1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_ratelimit.h" 34 #include "opt_kern_tls.h" 35 #if defined(INET) || defined(INET6) 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_seq.h> 93 #include <netinet/tcp_timer.h> 94 #include <netinet/tcp_var.h> 95 #include <netinet/tcp_log_buf.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCP_OFFLOAD 109 #include <netinet/tcp_offload.h> 110 #endif 111 #ifdef INET6 112 #include <netinet6/tcp6_var.h> 113 #endif 114 #include <netinet/tcp_ecn.h> 115 116 #include <netipsec/ipsec_support.h> 117 118 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 119 #include <netipsec/ipsec.h> 120 #include <netipsec/ipsec6.h> 121 #endif /* IPSEC */ 122 123 #include <netinet/udp.h> 124 #include <netinet/udp_var.h> 125 #include <machine/in_cksum.h> 126 127 #ifdef MAC 128 #include <security/mac/mac_framework.h> 129 #endif 130 #include "sack_filter.h" 131 #include "tcp_rack.h" 132 #include "rack_bbr_common.h" 133 134 uma_zone_t rack_zone; 135 uma_zone_t rack_pcb_zone; 136 137 #ifndef TICKS2SBT 138 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 139 #endif 140 141 VNET_DECLARE(uint32_t, newreno_beta); 142 VNET_DECLARE(uint32_t, newreno_beta_ecn); 143 #define V_newreno_beta VNET(newreno_beta) 144 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 145 146 147 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 148 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 149 150 struct sysctl_ctx_list rack_sysctl_ctx; 151 struct sysctl_oid *rack_sysctl_root; 152 153 #define CUM_ACKED 1 154 #define SACKED 2 155 156 /* 157 * The RACK module incorporates a number of 158 * TCP ideas that have been put out into the IETF 159 * over the last few years: 160 * - Matt Mathis's Rate Halving which slowly drops 161 * the congestion window so that the ack clock can 162 * be maintained during a recovery. 163 * - Yuchung Cheng's RACK TCP (for which its named) that 164 * will stop us using the number of dup acks and instead 165 * use time as the gage of when we retransmit. 166 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 167 * of Dukkipati et.al. 168 * RACK depends on SACK, so if an endpoint arrives that 169 * cannot do SACK the state machine below will shuttle the 170 * connection back to using the "default" TCP stack that is 171 * in FreeBSD. 172 * 173 * To implement RACK the original TCP stack was first decomposed 174 * into a functional state machine with individual states 175 * for each of the possible TCP connection states. The do_segment 176 * functions role in life is to mandate the connection supports SACK 177 * initially and then assure that the RACK state matches the conenction 178 * state before calling the states do_segment function. Each 179 * state is simplified due to the fact that the original do_segment 180 * has been decomposed and we *know* what state we are in (no 181 * switches on the state) and all tests for SACK are gone. This 182 * greatly simplifies what each state does. 183 * 184 * TCP output is also over-written with a new version since it 185 * must maintain the new rack scoreboard. 186 * 187 */ 188 static int32_t rack_tlp_thresh = 1; 189 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 190 static int32_t rack_tlp_use_greater = 1; 191 static int32_t rack_reorder_thresh = 2; 192 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 193 * - 60 seconds */ 194 static uint8_t rack_req_measurements = 1; 195 /* Attack threshold detections */ 196 static uint32_t rack_highest_sack_thresh_seen = 0; 197 static uint32_t rack_highest_move_thresh_seen = 0; 198 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 199 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 200 static int32_t rack_hw_rate_caps = 1; /* 1; */ 201 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 202 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 203 static int32_t rack_hw_up_only = 1; 204 static int32_t rack_stats_gets_ms_rtt = 1; 205 static int32_t rack_prr_addbackmax = 2; 206 static int32_t rack_do_hystart = 0; 207 static int32_t rack_apply_rtt_with_reduced_conf = 0; 208 209 static int32_t rack_pkt_delay = 1000; 210 static int32_t rack_send_a_lot_in_prr = 1; 211 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 212 static int32_t rack_verbose_logging = 0; 213 static int32_t rack_ignore_data_after_close = 1; 214 static int32_t rack_enable_shared_cwnd = 1; 215 static int32_t rack_use_cmp_acks = 1; 216 static int32_t rack_use_fsb = 1; 217 static int32_t rack_use_rfo = 1; 218 static int32_t rack_use_rsm_rfo = 1; 219 static int32_t rack_max_abc_post_recovery = 2; 220 static int32_t rack_client_low_buf = 0; 221 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 222 #ifdef TCP_ACCOUNTING 223 static int32_t rack_tcp_accounting = 0; 224 #endif 225 static int32_t rack_limits_scwnd = 1; 226 static int32_t rack_enable_mqueue_for_nonpaced = 0; 227 static int32_t rack_disable_prr = 0; 228 static int32_t use_rack_rr = 1; 229 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 230 static int32_t rack_persist_min = 250000; /* 250usec */ 231 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 232 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 233 static int32_t rack_default_init_window = 0; /* Use system default */ 234 static int32_t rack_limit_time_with_srtt = 0; 235 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 236 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 237 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 238 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 239 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 240 241 /* 242 * Currently regular tcp has a rto_min of 30ms 243 * the backoff goes 12 times so that ends up 244 * being a total of 122.850 seconds before a 245 * connection is killed. 246 */ 247 static uint32_t rack_def_data_window = 20; 248 static uint32_t rack_goal_bdp = 2; 249 static uint32_t rack_min_srtts = 1; 250 static uint32_t rack_min_measure_usec = 0; 251 static int32_t rack_tlp_min = 10000; /* 10ms */ 252 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 253 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 254 static const int32_t rack_free_cache = 2; 255 static int32_t rack_hptsi_segments = 40; 256 static int32_t rack_rate_sample_method = USE_RTT_LOW; 257 static int32_t rack_pace_every_seg = 0; 258 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 259 static int32_t rack_slot_reduction = 4; 260 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 261 static int32_t rack_cwnd_block_ends_measure = 0; 262 static int32_t rack_rwnd_block_ends_measure = 0; 263 static int32_t rack_def_profile = 0; 264 265 static int32_t rack_lower_cwnd_at_tlp = 0; 266 static int32_t rack_limited_retran = 0; 267 static int32_t rack_always_send_oldest = 0; 268 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 269 270 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 271 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 272 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 273 274 /* Probertt */ 275 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 276 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 277 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 278 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 279 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 280 281 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 282 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 283 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 284 static uint32_t rack_probertt_use_min_rtt_exit = 0; 285 static uint32_t rack_probe_rtt_sets_cwnd = 0; 286 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 287 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 288 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 289 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 290 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 291 static uint32_t rack_probertt_filter_life = 10000000; 292 static uint32_t rack_probertt_lower_within = 10; 293 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 294 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 295 static int32_t rack_probertt_clear_is = 1; 296 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 297 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 298 299 /* Part of pacing */ 300 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 301 302 /* Timely information */ 303 /* Combine these two gives the range of 'no change' to bw */ 304 /* ie the up/down provide the upper and lower bound */ 305 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 306 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 307 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 308 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 309 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 310 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */ 311 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 312 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 313 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 314 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 315 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 316 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 317 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 318 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 319 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 320 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 321 static int32_t rack_use_max_for_nobackoff = 0; 322 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 323 static int32_t rack_timely_no_stopping = 0; 324 static int32_t rack_down_raise_thresh = 100; 325 static int32_t rack_req_segs = 1; 326 static uint64_t rack_bw_rate_cap = 0; 327 328 329 /* Weird delayed ack mode */ 330 static int32_t rack_use_imac_dack = 0; 331 /* Rack specific counters */ 332 counter_u64_t rack_saw_enobuf; 333 counter_u64_t rack_saw_enobuf_hw; 334 counter_u64_t rack_saw_enetunreach; 335 counter_u64_t rack_persists_sends; 336 counter_u64_t rack_persists_acks; 337 counter_u64_t rack_persists_loss; 338 counter_u64_t rack_persists_lost_ends; 339 #ifdef INVARIANTS 340 counter_u64_t rack_adjust_map_bw; 341 #endif 342 /* Tail loss probe counters */ 343 counter_u64_t rack_tlp_tot; 344 counter_u64_t rack_tlp_newdata; 345 counter_u64_t rack_tlp_retran; 346 counter_u64_t rack_tlp_retran_bytes; 347 counter_u64_t rack_to_tot; 348 counter_u64_t rack_hot_alloc; 349 counter_u64_t rack_to_alloc; 350 counter_u64_t rack_to_alloc_hard; 351 counter_u64_t rack_to_alloc_emerg; 352 counter_u64_t rack_to_alloc_limited; 353 counter_u64_t rack_alloc_limited_conns; 354 counter_u64_t rack_split_limited; 355 356 counter_u64_t rack_multi_single_eq; 357 counter_u64_t rack_proc_non_comp_ack; 358 359 counter_u64_t rack_fto_send; 360 counter_u64_t rack_fto_rsm_send; 361 counter_u64_t rack_nfto_resend; 362 counter_u64_t rack_non_fto_send; 363 counter_u64_t rack_extended_rfo; 364 365 counter_u64_t rack_sack_proc_all; 366 counter_u64_t rack_sack_proc_short; 367 counter_u64_t rack_sack_proc_restart; 368 counter_u64_t rack_sack_attacks_detected; 369 counter_u64_t rack_sack_attacks_reversed; 370 counter_u64_t rack_sack_used_next_merge; 371 counter_u64_t rack_sack_splits; 372 counter_u64_t rack_sack_used_prev_merge; 373 counter_u64_t rack_sack_skipped_acked; 374 counter_u64_t rack_ack_total; 375 counter_u64_t rack_express_sack; 376 counter_u64_t rack_sack_total; 377 counter_u64_t rack_move_none; 378 counter_u64_t rack_move_some; 379 380 counter_u64_t rack_input_idle_reduces; 381 counter_u64_t rack_collapsed_win; 382 counter_u64_t rack_collapsed_win_seen; 383 counter_u64_t rack_collapsed_win_rxt; 384 counter_u64_t rack_collapsed_win_rxt_bytes; 385 counter_u64_t rack_try_scwnd; 386 counter_u64_t rack_hw_pace_init_fail; 387 counter_u64_t rack_hw_pace_lost; 388 389 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 390 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 391 392 393 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 394 395 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 396 (tv) = (value) + slop; \ 397 if ((u_long)(tv) < (u_long)(tvmin)) \ 398 (tv) = (tvmin); \ 399 if ((u_long)(tv) > (u_long)(tvmax)) \ 400 (tv) = (tvmax); \ 401 } while (0) 402 403 static void 404 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 405 406 static int 407 rack_process_ack(struct mbuf *m, struct tcphdr *th, 408 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 409 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 410 static int 411 rack_process_data(struct mbuf *m, struct tcphdr *th, 412 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 413 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 414 static void 415 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 416 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 417 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 418 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 419 uint8_t limit_type); 420 static struct rack_sendmap * 421 rack_check_recovery_mode(struct tcpcb *tp, 422 uint32_t tsused); 423 static void 424 rack_cong_signal(struct tcpcb *tp, 425 uint32_t type, uint32_t ack, int ); 426 static void rack_counter_destroy(void); 427 static int 428 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 429 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 430 static void 431 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 432 static void 433 rack_do_segment(struct mbuf *m, struct tcphdr *th, 434 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 435 uint8_t iptos); 436 static void rack_dtor(void *mem, int32_t size, void *arg); 437 static void 438 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 439 uint32_t flex1, uint32_t flex2, 440 uint32_t flex3, uint32_t flex4, 441 uint32_t flex5, uint32_t flex6, 442 uint16_t flex7, uint8_t mod); 443 444 static void 445 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 446 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 447 struct rack_sendmap *rsm, uint8_t quality); 448 static struct rack_sendmap * 449 rack_find_high_nonack(struct tcp_rack *rack, 450 struct rack_sendmap *rsm); 451 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 452 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 453 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 454 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 455 static void 456 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 457 tcp_seq th_ack, int line, uint8_t quality); 458 static uint32_t 459 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 460 static int32_t rack_handoff_ok(struct tcpcb *tp); 461 static int32_t rack_init(struct tcpcb *tp); 462 static void rack_init_sysctls(void); 463 static void 464 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 465 struct tcphdr *th, int entered_rec, int dup_ack_struck); 466 static void 467 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 468 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 469 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 470 471 static void 472 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 473 struct rack_sendmap *rsm); 474 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 475 static int32_t rack_output(struct tcpcb *tp); 476 477 static uint32_t 478 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 479 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 480 uint32_t cts, int *moved_two); 481 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 482 static void rack_remxt_tmr(struct tcpcb *tp); 483 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 484 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 485 static int32_t rack_stopall(struct tcpcb *tp); 486 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 487 static uint32_t 488 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 489 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 490 static void 491 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 492 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 493 static int 494 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 495 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 496 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 497 static int 498 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 499 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 500 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 501 static int 502 rack_do_closing(struct mbuf *m, struct tcphdr *th, 503 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 504 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 505 static int 506 rack_do_established(struct mbuf *m, struct tcphdr *th, 507 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 508 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 509 static int 510 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 511 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 512 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 513 static int 514 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 515 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 516 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 517 static int 518 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 519 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 520 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 521 static int 522 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 523 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 524 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 525 static int 526 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 527 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 528 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 529 static int 530 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 531 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 532 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 533 struct rack_sendmap * 534 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 535 uint32_t tsused); 536 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 537 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 538 static void 539 tcp_rack_partialack(struct tcpcb *tp); 540 static int 541 rack_set_profile(struct tcp_rack *rack, int prof); 542 static void 543 rack_apply_deferred_options(struct tcp_rack *rack); 544 545 int32_t rack_clear_counter=0; 546 547 static void 548 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 549 { 550 struct sockopt sopt; 551 struct cc_newreno_opts opt; 552 struct newreno old; 553 struct tcpcb *tp; 554 int error, failed = 0; 555 556 tp = rack->rc_tp; 557 if (tp->t_cc == NULL) { 558 /* Tcb is leaving */ 559 return; 560 } 561 rack->rc_pacing_cc_set = 1; 562 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 563 /* Not new-reno we can't play games with beta! */ 564 failed = 1; 565 goto out; 566 567 } 568 if (CC_ALGO(tp)->ctl_output == NULL) { 569 /* Huh, not using new-reno so no swaps.? */ 570 failed = 2; 571 goto out; 572 } 573 /* Get the current values out */ 574 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 575 sopt.sopt_dir = SOPT_GET; 576 opt.name = CC_NEWRENO_BETA; 577 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 578 if (error) { 579 failed = 3; 580 goto out; 581 } 582 old.beta = opt.val; 583 opt.name = CC_NEWRENO_BETA_ECN; 584 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 585 if (error) { 586 failed = 4; 587 goto out; 588 } 589 old.beta_ecn = opt.val; 590 591 /* Now lets set in the values we have stored */ 592 sopt.sopt_dir = SOPT_SET; 593 opt.name = CC_NEWRENO_BETA; 594 opt.val = rack->r_ctl.rc_saved_beta.beta; 595 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 596 if (error) { 597 failed = 5; 598 goto out; 599 } 600 opt.name = CC_NEWRENO_BETA_ECN; 601 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 602 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 603 if (error) { 604 failed = 6; 605 goto out; 606 } 607 /* Save off the values for restoral */ 608 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 609 out: 610 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 611 union tcp_log_stackspecific log; 612 struct timeval tv; 613 struct newreno *ptr; 614 615 ptr = ((struct newreno *)tp->t_ccv.cc_data); 616 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 617 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 618 log.u_bbr.flex1 = ptr->beta; 619 log.u_bbr.flex2 = ptr->beta_ecn; 620 log.u_bbr.flex3 = ptr->newreno_flags; 621 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 622 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 623 log.u_bbr.flex6 = failed; 624 log.u_bbr.flex7 = rack->gp_ready; 625 log.u_bbr.flex7 <<= 1; 626 log.u_bbr.flex7 |= rack->use_fixed_rate; 627 log.u_bbr.flex7 <<= 1; 628 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 629 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 630 log.u_bbr.flex8 = flex8; 631 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 632 0, &log, false, NULL, NULL, 0, &tv); 633 } 634 } 635 636 static void 637 rack_set_cc_pacing(struct tcp_rack *rack) 638 { 639 if (rack->rc_pacing_cc_set) 640 return; 641 /* 642 * Use the swap utility placing in 3 for flex8 to id a 643 * set of a new set of values. 644 */ 645 rack->rc_pacing_cc_set = 1; 646 rack_swap_beta_values(rack, 3); 647 } 648 649 static void 650 rack_undo_cc_pacing(struct tcp_rack *rack) 651 { 652 if (rack->rc_pacing_cc_set == 0) 653 return; 654 /* 655 * Use the swap utility placing in 4 for flex8 to id a 656 * restoral of the old values. 657 */ 658 rack->rc_pacing_cc_set = 0; 659 rack_swap_beta_values(rack, 4); 660 } 661 662 #ifdef NETFLIX_PEAKRATE 663 static inline void 664 rack_update_peakrate_thr(struct tcpcb *tp) 665 { 666 /* Keep in mind that t_maxpeakrate is in B/s. */ 667 uint64_t peak; 668 peak = uqmax((tp->t_maxseg * 2), 669 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 670 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 671 } 672 #endif 673 674 static int 675 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 676 { 677 uint32_t stat; 678 int32_t error; 679 680 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 681 if (error || req->newptr == NULL) 682 return error; 683 684 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 685 if (error) 686 return (error); 687 if (stat == 1) { 688 #ifdef INVARIANTS 689 printf("Clearing RACK counters\n"); 690 #endif 691 counter_u64_zero(rack_tlp_tot); 692 counter_u64_zero(rack_tlp_newdata); 693 counter_u64_zero(rack_tlp_retran); 694 counter_u64_zero(rack_tlp_retran_bytes); 695 counter_u64_zero(rack_to_tot); 696 counter_u64_zero(rack_saw_enobuf); 697 counter_u64_zero(rack_saw_enobuf_hw); 698 counter_u64_zero(rack_saw_enetunreach); 699 counter_u64_zero(rack_persists_sends); 700 counter_u64_zero(rack_persists_acks); 701 counter_u64_zero(rack_persists_loss); 702 counter_u64_zero(rack_persists_lost_ends); 703 #ifdef INVARIANTS 704 counter_u64_zero(rack_adjust_map_bw); 705 #endif 706 counter_u64_zero(rack_to_alloc_hard); 707 counter_u64_zero(rack_to_alloc_emerg); 708 counter_u64_zero(rack_sack_proc_all); 709 counter_u64_zero(rack_fto_send); 710 counter_u64_zero(rack_fto_rsm_send); 711 counter_u64_zero(rack_extended_rfo); 712 counter_u64_zero(rack_hw_pace_init_fail); 713 counter_u64_zero(rack_hw_pace_lost); 714 counter_u64_zero(rack_non_fto_send); 715 counter_u64_zero(rack_nfto_resend); 716 counter_u64_zero(rack_sack_proc_short); 717 counter_u64_zero(rack_sack_proc_restart); 718 counter_u64_zero(rack_to_alloc); 719 counter_u64_zero(rack_to_alloc_limited); 720 counter_u64_zero(rack_alloc_limited_conns); 721 counter_u64_zero(rack_split_limited); 722 counter_u64_zero(rack_multi_single_eq); 723 counter_u64_zero(rack_proc_non_comp_ack); 724 counter_u64_zero(rack_sack_attacks_detected); 725 counter_u64_zero(rack_sack_attacks_reversed); 726 counter_u64_zero(rack_sack_used_next_merge); 727 counter_u64_zero(rack_sack_used_prev_merge); 728 counter_u64_zero(rack_sack_splits); 729 counter_u64_zero(rack_sack_skipped_acked); 730 counter_u64_zero(rack_ack_total); 731 counter_u64_zero(rack_express_sack); 732 counter_u64_zero(rack_sack_total); 733 counter_u64_zero(rack_move_none); 734 counter_u64_zero(rack_move_some); 735 counter_u64_zero(rack_try_scwnd); 736 counter_u64_zero(rack_collapsed_win); 737 counter_u64_zero(rack_collapsed_win_rxt); 738 counter_u64_zero(rack_collapsed_win_seen); 739 counter_u64_zero(rack_collapsed_win_rxt_bytes); 740 } 741 rack_clear_counter = 0; 742 return (0); 743 } 744 745 static void 746 rack_init_sysctls(void) 747 { 748 struct sysctl_oid *rack_counters; 749 struct sysctl_oid *rack_attack; 750 struct sysctl_oid *rack_pacing; 751 struct sysctl_oid *rack_timely; 752 struct sysctl_oid *rack_timers; 753 struct sysctl_oid *rack_tlp; 754 struct sysctl_oid *rack_misc; 755 struct sysctl_oid *rack_features; 756 struct sysctl_oid *rack_measure; 757 struct sysctl_oid *rack_probertt; 758 struct sysctl_oid *rack_hw_pacing; 759 760 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 761 SYSCTL_CHILDREN(rack_sysctl_root), 762 OID_AUTO, 763 "sack_attack", 764 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 765 "Rack Sack Attack Counters and Controls"); 766 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 767 SYSCTL_CHILDREN(rack_sysctl_root), 768 OID_AUTO, 769 "stats", 770 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 771 "Rack Counters"); 772 SYSCTL_ADD_S32(&rack_sysctl_ctx, 773 SYSCTL_CHILDREN(rack_sysctl_root), 774 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 775 &rack_rate_sample_method , USE_RTT_LOW, 776 "What method should we use for rate sampling 0=high, 1=low "); 777 /* Probe rtt related controls */ 778 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 779 SYSCTL_CHILDREN(rack_sysctl_root), 780 OID_AUTO, 781 "probertt", 782 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 783 "ProbeRTT related Controls"); 784 SYSCTL_ADD_U16(&rack_sysctl_ctx, 785 SYSCTL_CHILDREN(rack_probertt), 786 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 787 &rack_atexit_prtt_hbp, 130, 788 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 789 SYSCTL_ADD_U16(&rack_sysctl_ctx, 790 SYSCTL_CHILDREN(rack_probertt), 791 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 792 &rack_atexit_prtt, 130, 793 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 794 SYSCTL_ADD_U16(&rack_sysctl_ctx, 795 SYSCTL_CHILDREN(rack_probertt), 796 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 797 &rack_per_of_gp_probertt, 60, 798 "What percentage of goodput do we pace at in probertt"); 799 SYSCTL_ADD_U16(&rack_sysctl_ctx, 800 SYSCTL_CHILDREN(rack_probertt), 801 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 802 &rack_per_of_gp_probertt_reduce, 10, 803 "What percentage of goodput do we reduce every gp_srtt"); 804 SYSCTL_ADD_U16(&rack_sysctl_ctx, 805 SYSCTL_CHILDREN(rack_probertt), 806 OID_AUTO, "gp_per_low", CTLFLAG_RW, 807 &rack_per_of_gp_lowthresh, 40, 808 "What percentage of goodput do we allow the multiplier to fall to"); 809 SYSCTL_ADD_U32(&rack_sysctl_ctx, 810 SYSCTL_CHILDREN(rack_probertt), 811 OID_AUTO, "time_between", CTLFLAG_RW, 812 & rack_time_between_probertt, 96000000, 813 "How many useconds between the lowest rtt falling must past before we enter probertt"); 814 SYSCTL_ADD_U32(&rack_sysctl_ctx, 815 SYSCTL_CHILDREN(rack_probertt), 816 OID_AUTO, "safety", CTLFLAG_RW, 817 &rack_probe_rtt_safety_val, 2000000, 818 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 819 SYSCTL_ADD_U32(&rack_sysctl_ctx, 820 SYSCTL_CHILDREN(rack_probertt), 821 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 822 &rack_probe_rtt_sets_cwnd, 0, 823 "Do we set the cwnd too (if always_lower is on)"); 824 SYSCTL_ADD_U32(&rack_sysctl_ctx, 825 SYSCTL_CHILDREN(rack_probertt), 826 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 827 &rack_max_drain_wait, 2, 828 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 829 SYSCTL_ADD_U32(&rack_sysctl_ctx, 830 SYSCTL_CHILDREN(rack_probertt), 831 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 832 &rack_must_drain, 1, 833 "We must drain this many gp_srtt's waiting for flight to reach goal"); 834 SYSCTL_ADD_U32(&rack_sysctl_ctx, 835 SYSCTL_CHILDREN(rack_probertt), 836 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 837 &rack_probertt_use_min_rtt_entry, 1, 838 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 839 SYSCTL_ADD_U32(&rack_sysctl_ctx, 840 SYSCTL_CHILDREN(rack_probertt), 841 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 842 &rack_probertt_use_min_rtt_exit, 0, 843 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 844 SYSCTL_ADD_U32(&rack_sysctl_ctx, 845 SYSCTL_CHILDREN(rack_probertt), 846 OID_AUTO, "length_div", CTLFLAG_RW, 847 &rack_probertt_gpsrtt_cnt_div, 0, 848 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 849 SYSCTL_ADD_U32(&rack_sysctl_ctx, 850 SYSCTL_CHILDREN(rack_probertt), 851 OID_AUTO, "length_mul", CTLFLAG_RW, 852 &rack_probertt_gpsrtt_cnt_mul, 0, 853 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 854 SYSCTL_ADD_U32(&rack_sysctl_ctx, 855 SYSCTL_CHILDREN(rack_probertt), 856 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 857 &rack_min_probertt_hold, 200000, 858 "What is the minimum time we hold probertt at target"); 859 SYSCTL_ADD_U32(&rack_sysctl_ctx, 860 SYSCTL_CHILDREN(rack_probertt), 861 OID_AUTO, "filter_life", CTLFLAG_RW, 862 &rack_probertt_filter_life, 10000000, 863 "What is the time for the filters life in useconds"); 864 SYSCTL_ADD_U32(&rack_sysctl_ctx, 865 SYSCTL_CHILDREN(rack_probertt), 866 OID_AUTO, "lower_within", CTLFLAG_RW, 867 &rack_probertt_lower_within, 10, 868 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 869 SYSCTL_ADD_U32(&rack_sysctl_ctx, 870 SYSCTL_CHILDREN(rack_probertt), 871 OID_AUTO, "must_move", CTLFLAG_RW, 872 &rack_min_rtt_movement, 250, 873 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 874 SYSCTL_ADD_U32(&rack_sysctl_ctx, 875 SYSCTL_CHILDREN(rack_probertt), 876 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 877 &rack_probertt_clear_is, 1, 878 "Do we clear I/S counts on exiting probe-rtt"); 879 SYSCTL_ADD_S32(&rack_sysctl_ctx, 880 SYSCTL_CHILDREN(rack_probertt), 881 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 882 &rack_max_drain_hbp, 1, 883 "How many extra drain gpsrtt's do we get in highly buffered paths"); 884 SYSCTL_ADD_S32(&rack_sysctl_ctx, 885 SYSCTL_CHILDREN(rack_probertt), 886 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 887 &rack_hbp_thresh, 3, 888 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 889 /* Pacing related sysctls */ 890 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 891 SYSCTL_CHILDREN(rack_sysctl_root), 892 OID_AUTO, 893 "pacing", 894 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 895 "Pacing related Controls"); 896 SYSCTL_ADD_S32(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_pacing), 898 OID_AUTO, "max_pace_over", CTLFLAG_RW, 899 &rack_max_per_above, 30, 900 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 901 SYSCTL_ADD_S32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_pacing), 903 OID_AUTO, "pace_to_one", CTLFLAG_RW, 904 &rack_pace_one_seg, 0, 905 "Do we allow low b/w pacing of 1MSS instead of two"); 906 SYSCTL_ADD_S32(&rack_sysctl_ctx, 907 SYSCTL_CHILDREN(rack_pacing), 908 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 909 &rack_limit_time_with_srtt, 0, 910 "Do we limit pacing time based on srtt"); 911 SYSCTL_ADD_S32(&rack_sysctl_ctx, 912 SYSCTL_CHILDREN(rack_pacing), 913 OID_AUTO, "init_win", CTLFLAG_RW, 914 &rack_default_init_window, 0, 915 "Do we have a rack initial window 0 = system default"); 916 SYSCTL_ADD_U16(&rack_sysctl_ctx, 917 SYSCTL_CHILDREN(rack_pacing), 918 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 919 &rack_per_of_gp_ss, 250, 920 "If non zero, what percentage of goodput to pace at in slow start"); 921 SYSCTL_ADD_U16(&rack_sysctl_ctx, 922 SYSCTL_CHILDREN(rack_pacing), 923 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 924 &rack_per_of_gp_ca, 150, 925 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 926 SYSCTL_ADD_U16(&rack_sysctl_ctx, 927 SYSCTL_CHILDREN(rack_pacing), 928 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 929 &rack_per_of_gp_rec, 200, 930 "If non zero, what percentage of goodput to pace at in recovery"); 931 SYSCTL_ADD_S32(&rack_sysctl_ctx, 932 SYSCTL_CHILDREN(rack_pacing), 933 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 934 &rack_hptsi_segments, 40, 935 "What size is the max for TSO segments in pacing and burst mitigation"); 936 SYSCTL_ADD_S32(&rack_sysctl_ctx, 937 SYSCTL_CHILDREN(rack_pacing), 938 OID_AUTO, "burst_reduces", CTLFLAG_RW, 939 &rack_slot_reduction, 4, 940 "When doing only burst mitigation what is the reduce divisor"); 941 SYSCTL_ADD_S32(&rack_sysctl_ctx, 942 SYSCTL_CHILDREN(rack_sysctl_root), 943 OID_AUTO, "use_pacing", CTLFLAG_RW, 944 &rack_pace_every_seg, 0, 945 "If set we use pacing, if clear we use only the original burst mitigation"); 946 SYSCTL_ADD_U64(&rack_sysctl_ctx, 947 SYSCTL_CHILDREN(rack_pacing), 948 OID_AUTO, "rate_cap", CTLFLAG_RW, 949 &rack_bw_rate_cap, 0, 950 "If set we apply this value to the absolute rate cap used by pacing"); 951 SYSCTL_ADD_U8(&rack_sysctl_ctx, 952 SYSCTL_CHILDREN(rack_sysctl_root), 953 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 954 &rack_req_measurements, 1, 955 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 956 /* Hardware pacing */ 957 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 958 SYSCTL_CHILDREN(rack_sysctl_root), 959 OID_AUTO, 960 "hdwr_pacing", 961 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 962 "Pacing related Controls"); 963 SYSCTL_ADD_S32(&rack_sysctl_ctx, 964 SYSCTL_CHILDREN(rack_hw_pacing), 965 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 966 &rack_hw_rwnd_factor, 2, 967 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 968 SYSCTL_ADD_S32(&rack_sysctl_ctx, 969 SYSCTL_CHILDREN(rack_hw_pacing), 970 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 971 &rack_enobuf_hw_boost_mult, 2, 972 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 973 SYSCTL_ADD_S32(&rack_sysctl_ctx, 974 SYSCTL_CHILDREN(rack_hw_pacing), 975 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 976 &rack_enobuf_hw_max, 2, 977 "What is the max boost the pacing time if we see a ENOBUFS?"); 978 SYSCTL_ADD_S32(&rack_sysctl_ctx, 979 SYSCTL_CHILDREN(rack_hw_pacing), 980 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 981 &rack_enobuf_hw_min, 2, 982 "What is the min boost the pacing time if we see a ENOBUFS?"); 983 SYSCTL_ADD_S32(&rack_sysctl_ctx, 984 SYSCTL_CHILDREN(rack_hw_pacing), 985 OID_AUTO, "enable", CTLFLAG_RW, 986 &rack_enable_hw_pacing, 0, 987 "Should RACK attempt to use hw pacing?"); 988 SYSCTL_ADD_S32(&rack_sysctl_ctx, 989 SYSCTL_CHILDREN(rack_hw_pacing), 990 OID_AUTO, "rate_cap", CTLFLAG_RW, 991 &rack_hw_rate_caps, 1, 992 "Does the highest hardware pacing rate cap the rate we will send at??"); 993 SYSCTL_ADD_S32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_hw_pacing), 995 OID_AUTO, "rate_min", CTLFLAG_RW, 996 &rack_hw_rate_min, 0, 997 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 998 SYSCTL_ADD_S32(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_hw_pacing), 1000 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1001 &rack_hw_rate_to_low, 0, 1002 "If we fall below this rate, dis-engage hw pacing?"); 1003 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_hw_pacing), 1005 OID_AUTO, "up_only", CTLFLAG_RW, 1006 &rack_hw_up_only, 1, 1007 "Do we allow hw pacing to lower the rate selected?"); 1008 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_hw_pacing), 1010 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1011 &rack_hw_pace_extra_slots, 2, 1012 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1013 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_sysctl_root), 1015 OID_AUTO, 1016 "timely", 1017 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1018 "Rack Timely RTT Controls"); 1019 /* Timely based GP dynmics */ 1020 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1021 SYSCTL_CHILDREN(rack_timely), 1022 OID_AUTO, "upper", CTLFLAG_RW, 1023 &rack_gp_per_bw_mul_up, 2, 1024 "Rack timely upper range for equal b/w (in percentage)"); 1025 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1026 SYSCTL_CHILDREN(rack_timely), 1027 OID_AUTO, "lower", CTLFLAG_RW, 1028 &rack_gp_per_bw_mul_down, 4, 1029 "Rack timely lower range for equal b/w (in percentage)"); 1030 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1031 SYSCTL_CHILDREN(rack_timely), 1032 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1033 &rack_gp_rtt_maxmul, 3, 1034 "Rack timely multiplier of lowest rtt for rtt_max"); 1035 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1036 SYSCTL_CHILDREN(rack_timely), 1037 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1038 &rack_gp_rtt_mindiv, 4, 1039 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1040 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1041 SYSCTL_CHILDREN(rack_timely), 1042 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1043 &rack_gp_rtt_minmul, 1, 1044 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1045 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1046 SYSCTL_CHILDREN(rack_timely), 1047 OID_AUTO, "decrease", CTLFLAG_RW, 1048 &rack_gp_decrease_per, 20, 1049 "Rack timely decrease percentage of our GP multiplication factor"); 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_timely), 1052 OID_AUTO, "increase", CTLFLAG_RW, 1053 &rack_gp_increase_per, 2, 1054 "Rack timely increase perentage of our GP multiplication factor"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_timely), 1057 OID_AUTO, "lowerbound", CTLFLAG_RW, 1058 &rack_per_lower_bound, 50, 1059 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1060 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_timely), 1062 OID_AUTO, "upperboundss", CTLFLAG_RW, 1063 &rack_per_upper_bound_ss, 0, 1064 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_timely), 1067 OID_AUTO, "upperboundca", CTLFLAG_RW, 1068 &rack_per_upper_bound_ca, 0, 1069 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_timely), 1072 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1073 &rack_do_dyn_mul, 0, 1074 "Rack timely do we enable dynmaic timely goodput by default"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_timely), 1077 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1078 &rack_gp_no_rec_chg, 1, 1079 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_timely), 1082 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1083 &rack_timely_dec_clear, 6, 1084 "Rack timely what threshold do we count to before another boost during b/w decent"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_timely), 1087 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1088 &rack_timely_max_push_rise, 3, 1089 "Rack timely how many times do we push up with b/w increase"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_timely), 1092 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1093 &rack_timely_max_push_drop, 3, 1094 "Rack timely how many times do we push back on b/w decent"); 1095 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_timely), 1097 OID_AUTO, "min_segs", CTLFLAG_RW, 1098 &rack_timely_min_segs, 4, 1099 "Rack timely when setting the cwnd what is the min num segments"); 1100 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1101 SYSCTL_CHILDREN(rack_timely), 1102 OID_AUTO, "noback_max", CTLFLAG_RW, 1103 &rack_use_max_for_nobackoff, 0, 1104 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1105 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1106 SYSCTL_CHILDREN(rack_timely), 1107 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1108 &rack_timely_int_timely_only, 0, 1109 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1110 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1111 SYSCTL_CHILDREN(rack_timely), 1112 OID_AUTO, "nonstop", CTLFLAG_RW, 1113 &rack_timely_no_stopping, 0, 1114 "Rack timely don't stop increase"); 1115 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1116 SYSCTL_CHILDREN(rack_timely), 1117 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1118 &rack_down_raise_thresh, 100, 1119 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1120 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1121 SYSCTL_CHILDREN(rack_timely), 1122 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1123 &rack_req_segs, 1, 1124 "Bottom dragging if not these many segments outstanding and room"); 1125 1126 /* TLP and Rack related parameters */ 1127 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_sysctl_root), 1129 OID_AUTO, 1130 "tlp", 1131 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1132 "TLP and Rack related Controls"); 1133 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1134 SYSCTL_CHILDREN(rack_tlp), 1135 OID_AUTO, "use_rrr", CTLFLAG_RW, 1136 &use_rack_rr, 1, 1137 "Do we use Rack Rapid Recovery"); 1138 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1139 SYSCTL_CHILDREN(rack_tlp), 1140 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1141 &rack_max_abc_post_recovery, 2, 1142 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1143 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1144 SYSCTL_CHILDREN(rack_tlp), 1145 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1146 &rack_non_rxt_use_cr, 0, 1147 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1148 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1149 SYSCTL_CHILDREN(rack_tlp), 1150 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1151 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1152 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1153 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1154 SYSCTL_CHILDREN(rack_tlp), 1155 OID_AUTO, "limit", CTLFLAG_RW, 1156 &rack_tlp_limit, 2, 1157 "How many TLP's can be sent without sending new data"); 1158 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1159 SYSCTL_CHILDREN(rack_tlp), 1160 OID_AUTO, "use_greater", CTLFLAG_RW, 1161 &rack_tlp_use_greater, 1, 1162 "Should we use the rack_rtt time if its greater than srtt"); 1163 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1164 SYSCTL_CHILDREN(rack_tlp), 1165 OID_AUTO, "tlpminto", CTLFLAG_RW, 1166 &rack_tlp_min, 10000, 1167 "TLP minimum timeout per the specification (in microseconds)"); 1168 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1169 SYSCTL_CHILDREN(rack_tlp), 1170 OID_AUTO, "send_oldest", CTLFLAG_RW, 1171 &rack_always_send_oldest, 0, 1172 "Should we always send the oldest TLP and RACK-TLP"); 1173 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1174 SYSCTL_CHILDREN(rack_tlp), 1175 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1176 &rack_limited_retran, 0, 1177 "How many times can a rack timeout drive out sends"); 1178 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1179 SYSCTL_CHILDREN(rack_tlp), 1180 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1181 &rack_lower_cwnd_at_tlp, 0, 1182 "When a TLP completes a retran should we enter recovery"); 1183 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1184 SYSCTL_CHILDREN(rack_tlp), 1185 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1186 &rack_reorder_thresh, 2, 1187 "What factor for rack will be added when seeing reordering (shift right)"); 1188 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1189 SYSCTL_CHILDREN(rack_tlp), 1190 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1191 &rack_tlp_thresh, 1, 1192 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1193 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1194 SYSCTL_CHILDREN(rack_tlp), 1195 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1196 &rack_reorder_fade, 60000000, 1197 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1198 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1199 SYSCTL_CHILDREN(rack_tlp), 1200 OID_AUTO, "pktdelay", CTLFLAG_RW, 1201 &rack_pkt_delay, 1000, 1202 "Extra RACK time (in microseconds) besides reordering thresh"); 1203 1204 /* Timer related controls */ 1205 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1206 SYSCTL_CHILDREN(rack_sysctl_root), 1207 OID_AUTO, 1208 "timers", 1209 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1210 "Timer related controls"); 1211 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1212 SYSCTL_CHILDREN(rack_timers), 1213 OID_AUTO, "persmin", CTLFLAG_RW, 1214 &rack_persist_min, 250000, 1215 "What is the minimum time in microseconds between persists"); 1216 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1217 SYSCTL_CHILDREN(rack_timers), 1218 OID_AUTO, "persmax", CTLFLAG_RW, 1219 &rack_persist_max, 2000000, 1220 "What is the largest delay in microseconds between persists"); 1221 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1222 SYSCTL_CHILDREN(rack_timers), 1223 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1224 &rack_delayed_ack_time, 40000, 1225 "Delayed ack time (40ms in microseconds)"); 1226 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1227 SYSCTL_CHILDREN(rack_timers), 1228 OID_AUTO, "minrto", CTLFLAG_RW, 1229 &rack_rto_min, 30000, 1230 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1231 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1232 SYSCTL_CHILDREN(rack_timers), 1233 OID_AUTO, "maxrto", CTLFLAG_RW, 1234 &rack_rto_max, 4000000, 1235 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1236 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1237 SYSCTL_CHILDREN(rack_timers), 1238 OID_AUTO, "minto", CTLFLAG_RW, 1239 &rack_min_to, 1000, 1240 "Minimum rack timeout in microseconds"); 1241 /* Measure controls */ 1242 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1243 SYSCTL_CHILDREN(rack_sysctl_root), 1244 OID_AUTO, 1245 "measure", 1246 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1247 "Measure related controls"); 1248 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1249 SYSCTL_CHILDREN(rack_measure), 1250 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1251 &rack_wma_divisor, 8, 1252 "When doing b/w calculation what is the divisor for the WMA"); 1253 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1254 SYSCTL_CHILDREN(rack_measure), 1255 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1256 &rack_cwnd_block_ends_measure, 0, 1257 "Does a cwnd just-return end the measurement window (app limited)"); 1258 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1259 SYSCTL_CHILDREN(rack_measure), 1260 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1261 &rack_rwnd_block_ends_measure, 0, 1262 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1263 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1264 SYSCTL_CHILDREN(rack_measure), 1265 OID_AUTO, "min_target", CTLFLAG_RW, 1266 &rack_def_data_window, 20, 1267 "What is the minimum target window (in mss) for a GP measurements"); 1268 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1269 SYSCTL_CHILDREN(rack_measure), 1270 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1271 &rack_goal_bdp, 2, 1272 "What is the goal BDP to measure"); 1273 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1274 SYSCTL_CHILDREN(rack_measure), 1275 OID_AUTO, "min_srtts", CTLFLAG_RW, 1276 &rack_min_srtts, 1, 1277 "What is the goal BDP to measure"); 1278 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1279 SYSCTL_CHILDREN(rack_measure), 1280 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1281 &rack_min_measure_usec, 0, 1282 "What is the Minimum time time for a measurement if 0, this is off"); 1283 /* Features */ 1284 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1285 SYSCTL_CHILDREN(rack_sysctl_root), 1286 OID_AUTO, 1287 "features", 1288 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1289 "Feature controls"); 1290 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1291 SYSCTL_CHILDREN(rack_features), 1292 OID_AUTO, "cmpack", CTLFLAG_RW, 1293 &rack_use_cmp_acks, 1, 1294 "Should RACK have LRO send compressed acks"); 1295 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1296 SYSCTL_CHILDREN(rack_features), 1297 OID_AUTO, "fsb", CTLFLAG_RW, 1298 &rack_use_fsb, 1, 1299 "Should RACK use the fast send block?"); 1300 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1301 SYSCTL_CHILDREN(rack_features), 1302 OID_AUTO, "rfo", CTLFLAG_RW, 1303 &rack_use_rfo, 1, 1304 "Should RACK use rack_fast_output()?"); 1305 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1306 SYSCTL_CHILDREN(rack_features), 1307 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1308 &rack_use_rsm_rfo, 1, 1309 "Should RACK use rack_fast_rsm_output()?"); 1310 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1311 SYSCTL_CHILDREN(rack_features), 1312 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1313 &rack_enable_mqueue_for_nonpaced, 0, 1314 "Should RACK use mbuf queuing for non-paced connections"); 1315 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1316 SYSCTL_CHILDREN(rack_features), 1317 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1318 &rack_do_hystart, 0, 1319 "Should RACK enable HyStart++ on connections?"); 1320 /* Misc rack controls */ 1321 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1322 SYSCTL_CHILDREN(rack_sysctl_root), 1323 OID_AUTO, 1324 "misc", 1325 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1326 "Misc related controls"); 1327 #ifdef TCP_ACCOUNTING 1328 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1329 SYSCTL_CHILDREN(rack_misc), 1330 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1331 &rack_tcp_accounting, 0, 1332 "Should we turn on TCP accounting for all rack sessions?"); 1333 #endif 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_misc), 1336 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1337 &rack_apply_rtt_with_reduced_conf, 0, 1338 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1339 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1340 SYSCTL_CHILDREN(rack_misc), 1341 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1342 &rack_dsack_std_based, 3, 1343 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1344 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1345 SYSCTL_CHILDREN(rack_misc), 1346 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1347 &rack_prr_addbackmax, 2, 1348 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1349 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1350 SYSCTL_CHILDREN(rack_misc), 1351 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1352 &rack_stats_gets_ms_rtt, 1, 1353 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1354 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1355 SYSCTL_CHILDREN(rack_misc), 1356 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1357 &rack_client_low_buf, 0, 1358 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1359 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1360 SYSCTL_CHILDREN(rack_misc), 1361 OID_AUTO, "defprofile", CTLFLAG_RW, 1362 &rack_def_profile, 0, 1363 "Should RACK use a default profile (0=no, num == profile num)?"); 1364 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1365 SYSCTL_CHILDREN(rack_misc), 1366 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1367 &rack_enable_shared_cwnd, 1, 1368 "Should RACK try to use the shared cwnd on connections where allowed"); 1369 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1370 SYSCTL_CHILDREN(rack_misc), 1371 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1372 &rack_limits_scwnd, 1, 1373 "Should RACK place low end time limits on the shared cwnd feature"); 1374 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1375 SYSCTL_CHILDREN(rack_misc), 1376 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1377 &rack_use_imac_dack, 0, 1378 "Should RACK try to emulate iMac delayed ack"); 1379 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1380 SYSCTL_CHILDREN(rack_misc), 1381 OID_AUTO, "no_prr", CTLFLAG_RW, 1382 &rack_disable_prr, 0, 1383 "Should RACK not use prr and only pace (must have pacing on)"); 1384 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1385 SYSCTL_CHILDREN(rack_misc), 1386 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1387 &rack_verbose_logging, 0, 1388 "Should RACK black box logging be verbose"); 1389 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1390 SYSCTL_CHILDREN(rack_misc), 1391 OID_AUTO, "data_after_close", CTLFLAG_RW, 1392 &rack_ignore_data_after_close, 1, 1393 "Do we hold off sending a RST until all pending data is ack'd"); 1394 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1395 SYSCTL_CHILDREN(rack_misc), 1396 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1397 &rack_sack_not_required, 1, 1398 "Do we allow rack to run on connections not supporting SACK"); 1399 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1400 SYSCTL_CHILDREN(rack_misc), 1401 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1402 &rack_send_a_lot_in_prr, 1, 1403 "Send a lot in prr"); 1404 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1405 SYSCTL_CHILDREN(rack_misc), 1406 OID_AUTO, "autoscale", CTLFLAG_RW, 1407 &rack_autosndbuf_inc, 20, 1408 "What percentage should rack scale up its snd buffer by?"); 1409 /* Sack Attacker detection stuff */ 1410 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1411 SYSCTL_CHILDREN(rack_attack), 1412 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1413 &rack_highest_sack_thresh_seen, 0, 1414 "Highest sack to ack ratio seen"); 1415 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1416 SYSCTL_CHILDREN(rack_attack), 1417 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1418 &rack_highest_move_thresh_seen, 0, 1419 "Highest move to non-move ratio seen"); 1420 rack_ack_total = counter_u64_alloc(M_WAITOK); 1421 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1422 SYSCTL_CHILDREN(rack_attack), 1423 OID_AUTO, "acktotal", CTLFLAG_RD, 1424 &rack_ack_total, 1425 "Total number of Ack's"); 1426 rack_express_sack = counter_u64_alloc(M_WAITOK); 1427 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1428 SYSCTL_CHILDREN(rack_attack), 1429 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1430 &rack_express_sack, 1431 "Total expresss number of Sack's"); 1432 rack_sack_total = counter_u64_alloc(M_WAITOK); 1433 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1434 SYSCTL_CHILDREN(rack_attack), 1435 OID_AUTO, "sacktotal", CTLFLAG_RD, 1436 &rack_sack_total, 1437 "Total number of SACKs"); 1438 rack_move_none = counter_u64_alloc(M_WAITOK); 1439 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1440 SYSCTL_CHILDREN(rack_attack), 1441 OID_AUTO, "move_none", CTLFLAG_RD, 1442 &rack_move_none, 1443 "Total number of SACK index reuse of positions under threshold"); 1444 rack_move_some = counter_u64_alloc(M_WAITOK); 1445 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_attack), 1447 OID_AUTO, "move_some", CTLFLAG_RD, 1448 &rack_move_some, 1449 "Total number of SACK index reuse of positions over threshold"); 1450 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1451 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1452 SYSCTL_CHILDREN(rack_attack), 1453 OID_AUTO, "attacks", CTLFLAG_RD, 1454 &rack_sack_attacks_detected, 1455 "Total number of SACK attackers that had sack disabled"); 1456 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1457 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1458 SYSCTL_CHILDREN(rack_attack), 1459 OID_AUTO, "reversed", CTLFLAG_RD, 1460 &rack_sack_attacks_reversed, 1461 "Total number of SACK attackers that were later determined false positive"); 1462 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1463 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1464 SYSCTL_CHILDREN(rack_attack), 1465 OID_AUTO, "nextmerge", CTLFLAG_RD, 1466 &rack_sack_used_next_merge, 1467 "Total number of times we used the next merge"); 1468 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1469 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1470 SYSCTL_CHILDREN(rack_attack), 1471 OID_AUTO, "prevmerge", CTLFLAG_RD, 1472 &rack_sack_used_prev_merge, 1473 "Total number of times we used the prev merge"); 1474 /* Counters */ 1475 rack_fto_send = counter_u64_alloc(M_WAITOK); 1476 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1477 SYSCTL_CHILDREN(rack_counters), 1478 OID_AUTO, "fto_send", CTLFLAG_RD, 1479 &rack_fto_send, "Total number of rack_fast_output sends"); 1480 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1481 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1482 SYSCTL_CHILDREN(rack_counters), 1483 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1484 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1485 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1486 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1487 SYSCTL_CHILDREN(rack_counters), 1488 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1489 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1490 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1491 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1492 SYSCTL_CHILDREN(rack_counters), 1493 OID_AUTO, "nfto_send", CTLFLAG_RD, 1494 &rack_non_fto_send, "Total number of rack_output first sends"); 1495 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1496 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1497 SYSCTL_CHILDREN(rack_counters), 1498 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1499 &rack_extended_rfo, "Total number of times we extended rfo"); 1500 1501 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1502 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_counters), 1504 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1505 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1506 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1507 1508 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1509 SYSCTL_CHILDREN(rack_counters), 1510 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1511 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1512 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1513 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1514 SYSCTL_CHILDREN(rack_counters), 1515 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1516 &rack_tlp_tot, 1517 "Total number of tail loss probe expirations"); 1518 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1519 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1520 SYSCTL_CHILDREN(rack_counters), 1521 OID_AUTO, "tlp_new", CTLFLAG_RD, 1522 &rack_tlp_newdata, 1523 "Total number of tail loss probe sending new data"); 1524 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1525 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1526 SYSCTL_CHILDREN(rack_counters), 1527 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1528 &rack_tlp_retran, 1529 "Total number of tail loss probe sending retransmitted data"); 1530 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1531 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1532 SYSCTL_CHILDREN(rack_counters), 1533 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1534 &rack_tlp_retran_bytes, 1535 "Total bytes of tail loss probe sending retransmitted data"); 1536 rack_to_tot = counter_u64_alloc(M_WAITOK); 1537 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1538 SYSCTL_CHILDREN(rack_counters), 1539 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1540 &rack_to_tot, 1541 "Total number of times the rack to expired"); 1542 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1543 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1544 SYSCTL_CHILDREN(rack_counters), 1545 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1546 &rack_saw_enobuf, 1547 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1548 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1549 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_counters), 1551 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1552 &rack_saw_enobuf_hw, 1553 "Total number of times a send returned enobuf for hdwr paced connections"); 1554 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1555 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_counters), 1557 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1558 &rack_saw_enetunreach, 1559 "Total number of times a send received a enetunreachable"); 1560 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1561 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_counters), 1563 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1564 &rack_hot_alloc, 1565 "Total allocations from the top of our list"); 1566 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1567 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1568 SYSCTL_CHILDREN(rack_counters), 1569 OID_AUTO, "allocs", CTLFLAG_RD, 1570 &rack_to_alloc, 1571 "Total allocations of tracking structures"); 1572 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_counters), 1575 OID_AUTO, "allochard", CTLFLAG_RD, 1576 &rack_to_alloc_hard, 1577 "Total allocations done with sleeping the hard way"); 1578 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1579 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1580 SYSCTL_CHILDREN(rack_counters), 1581 OID_AUTO, "allocemerg", CTLFLAG_RD, 1582 &rack_to_alloc_emerg, 1583 "Total allocations done from emergency cache"); 1584 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1585 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1586 SYSCTL_CHILDREN(rack_counters), 1587 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1588 &rack_to_alloc_limited, 1589 "Total allocations dropped due to limit"); 1590 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1591 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_counters), 1593 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1594 &rack_alloc_limited_conns, 1595 "Connections with allocations dropped due to limit"); 1596 rack_split_limited = counter_u64_alloc(M_WAITOK); 1597 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1598 SYSCTL_CHILDREN(rack_counters), 1599 OID_AUTO, "split_limited", CTLFLAG_RD, 1600 &rack_split_limited, 1601 "Split allocations dropped due to limit"); 1602 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1603 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1604 SYSCTL_CHILDREN(rack_counters), 1605 OID_AUTO, "persist_sends", CTLFLAG_RD, 1606 &rack_persists_sends, 1607 "Number of times we sent a persist probe"); 1608 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1609 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_counters), 1611 OID_AUTO, "persist_acks", CTLFLAG_RD, 1612 &rack_persists_acks, 1613 "Number of times a persist probe was acked"); 1614 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1615 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_counters), 1617 OID_AUTO, "persist_loss", CTLFLAG_RD, 1618 &rack_persists_loss, 1619 "Number of times we detected a lost persist probe (no ack)"); 1620 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1621 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1622 SYSCTL_CHILDREN(rack_counters), 1623 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1624 &rack_persists_lost_ends, 1625 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1626 #ifdef INVARIANTS 1627 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1628 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1629 SYSCTL_CHILDREN(rack_counters), 1630 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1631 &rack_adjust_map_bw, 1632 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1633 #endif 1634 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1635 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_counters), 1637 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1638 &rack_multi_single_eq, 1639 "Number of compressed acks total represented"); 1640 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1641 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1642 SYSCTL_CHILDREN(rack_counters), 1643 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1644 &rack_proc_non_comp_ack, 1645 "Number of non compresseds acks that we processed"); 1646 1647 1648 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_counters), 1651 OID_AUTO, "sack_long", CTLFLAG_RD, 1652 &rack_sack_proc_all, 1653 "Total times we had to walk whole list for sack processing"); 1654 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_counters), 1657 OID_AUTO, "sack_restart", CTLFLAG_RD, 1658 &rack_sack_proc_restart, 1659 "Total times we had to walk whole list due to a restart"); 1660 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1662 SYSCTL_CHILDREN(rack_counters), 1663 OID_AUTO, "sack_short", CTLFLAG_RD, 1664 &rack_sack_proc_short, 1665 "Total times we took shortcut for sack processing"); 1666 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1668 SYSCTL_CHILDREN(rack_attack), 1669 OID_AUTO, "skipacked", CTLFLAG_RD, 1670 &rack_sack_skipped_acked, 1671 "Total number of times we skipped previously sacked"); 1672 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_attack), 1675 OID_AUTO, "ofsplit", CTLFLAG_RD, 1676 &rack_sack_splits, 1677 "Total number of times we did the old fashion tree split"); 1678 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1682 &rack_input_idle_reduces, 1683 "Total number of idle reductions on input"); 1684 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1688 &rack_collapsed_win_seen, 1689 "Total number of collapsed window events seen (where our window shrinks)"); 1690 1691 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1692 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1693 SYSCTL_CHILDREN(rack_counters), 1694 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1695 &rack_collapsed_win, 1696 "Total number of collapsed window events where we mark packets"); 1697 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1698 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1699 SYSCTL_CHILDREN(rack_counters), 1700 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1701 &rack_collapsed_win_rxt, 1702 "Total number of packets that were retransmitted"); 1703 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1704 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1705 SYSCTL_CHILDREN(rack_counters), 1706 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1707 &rack_collapsed_win_rxt_bytes, 1708 "Total number of bytes that were retransmitted"); 1709 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1711 SYSCTL_CHILDREN(rack_counters), 1712 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1713 &rack_try_scwnd, 1714 "Total number of scwnd attempts"); 1715 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1716 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1717 OID_AUTO, "outsize", CTLFLAG_RD, 1718 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1719 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1720 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1721 OID_AUTO, "opts", CTLFLAG_RD, 1722 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1723 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1724 SYSCTL_CHILDREN(rack_sysctl_root), 1725 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1726 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1727 } 1728 1729 static __inline int 1730 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1731 { 1732 if (SEQ_GEQ(b->r_start, a->r_start) && 1733 SEQ_LT(b->r_start, a->r_end)) { 1734 /* 1735 * The entry b is within the 1736 * block a. i.e.: 1737 * a -- |-------------| 1738 * b -- |----| 1739 * <or> 1740 * b -- |------| 1741 * <or> 1742 * b -- |-----------| 1743 */ 1744 return (0); 1745 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1746 /* 1747 * b falls as either the next 1748 * sequence block after a so a 1749 * is said to be smaller than b. 1750 * i.e: 1751 * a -- |------| 1752 * b -- |--------| 1753 * or 1754 * b -- |-----| 1755 */ 1756 return (1); 1757 } 1758 /* 1759 * Whats left is where a is 1760 * larger than b. i.e: 1761 * a -- |-------| 1762 * b -- |---| 1763 * or even possibly 1764 * b -- |--------------| 1765 */ 1766 return (-1); 1767 } 1768 1769 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1770 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1771 1772 static uint32_t 1773 rc_init_window(struct tcp_rack *rack) 1774 { 1775 uint32_t win; 1776 1777 if (rack->rc_init_win == 0) { 1778 /* 1779 * Nothing set by the user, use the system stack 1780 * default. 1781 */ 1782 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1783 } 1784 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1785 return (win); 1786 } 1787 1788 static uint64_t 1789 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1790 { 1791 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1792 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1793 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1794 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1795 else 1796 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1797 } 1798 1799 static uint64_t 1800 rack_get_bw(struct tcp_rack *rack) 1801 { 1802 if (rack->use_fixed_rate) { 1803 /* Return the fixed pacing rate */ 1804 return (rack_get_fixed_pacing_bw(rack)); 1805 } 1806 if (rack->r_ctl.gp_bw == 0) { 1807 /* 1808 * We have yet no b/w measurement, 1809 * if we have a user set initial bw 1810 * return it. If we don't have that and 1811 * we have an srtt, use the tcp IW (10) to 1812 * calculate a fictional b/w over the SRTT 1813 * which is more or less a guess. Note 1814 * we don't use our IW from rack on purpose 1815 * so if we have like IW=30, we are not 1816 * calculating a "huge" b/w. 1817 */ 1818 uint64_t bw, srtt; 1819 if (rack->r_ctl.init_rate) 1820 return (rack->r_ctl.init_rate); 1821 1822 /* Has the user set a max peak rate? */ 1823 #ifdef NETFLIX_PEAKRATE 1824 if (rack->rc_tp->t_maxpeakrate) 1825 return (rack->rc_tp->t_maxpeakrate); 1826 #endif 1827 /* Ok lets come up with the IW guess, if we have a srtt */ 1828 if (rack->rc_tp->t_srtt == 0) { 1829 /* 1830 * Go with old pacing method 1831 * i.e. burst mitigation only. 1832 */ 1833 return (0); 1834 } 1835 /* Ok lets get the initial TCP win (not racks) */ 1836 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1837 srtt = (uint64_t)rack->rc_tp->t_srtt; 1838 bw *= (uint64_t)USECS_IN_SECOND; 1839 bw /= srtt; 1840 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1841 bw = rack->r_ctl.bw_rate_cap; 1842 return (bw); 1843 } else { 1844 uint64_t bw; 1845 1846 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1847 /* Averaging is done, we can return the value */ 1848 bw = rack->r_ctl.gp_bw; 1849 } else { 1850 /* Still doing initial average must calculate */ 1851 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1852 } 1853 #ifdef NETFLIX_PEAKRATE 1854 if ((rack->rc_tp->t_maxpeakrate) && 1855 (bw > rack->rc_tp->t_maxpeakrate)) { 1856 /* The user has set a peak rate to pace at 1857 * don't allow us to pace faster than that. 1858 */ 1859 return (rack->rc_tp->t_maxpeakrate); 1860 } 1861 #endif 1862 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1863 bw = rack->r_ctl.bw_rate_cap; 1864 return (bw); 1865 } 1866 } 1867 1868 static uint16_t 1869 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1870 { 1871 if (rack->use_fixed_rate) { 1872 return (100); 1873 } else if (rack->in_probe_rtt && (rsm == NULL)) 1874 return (rack->r_ctl.rack_per_of_gp_probertt); 1875 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1876 rack->r_ctl.rack_per_of_gp_rec)) { 1877 if (rsm) { 1878 /* a retransmission always use the recovery rate */ 1879 return (rack->r_ctl.rack_per_of_gp_rec); 1880 } else if (rack->rack_rec_nonrxt_use_cr) { 1881 /* Directed to use the configured rate */ 1882 goto configured_rate; 1883 } else if (rack->rack_no_prr && 1884 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1885 /* No PRR, lets just use the b/w estimate only */ 1886 return (100); 1887 } else { 1888 /* 1889 * Here we may have a non-retransmit but we 1890 * have no overrides, so just use the recovery 1891 * rate (prr is in effect). 1892 */ 1893 return (rack->r_ctl.rack_per_of_gp_rec); 1894 } 1895 } 1896 configured_rate: 1897 /* For the configured rate we look at our cwnd vs the ssthresh */ 1898 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1899 return (rack->r_ctl.rack_per_of_gp_ss); 1900 else 1901 return (rack->r_ctl.rack_per_of_gp_ca); 1902 } 1903 1904 static void 1905 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1906 { 1907 /* 1908 * Types of logs (mod value) 1909 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1910 * 2 = a dsack round begins, persist is reset to 16. 1911 * 3 = a dsack round ends 1912 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1913 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1914 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1915 */ 1916 if (tcp_bblogging_on(rack->rc_tp)) { 1917 union tcp_log_stackspecific log; 1918 struct timeval tv; 1919 1920 memset(&log, 0, sizeof(log)); 1921 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 1922 log.u_bbr.flex1 <<= 1; 1923 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 1924 log.u_bbr.flex1 <<= 1; 1925 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 1926 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 1927 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 1928 log.u_bbr.flex4 = flex4; 1929 log.u_bbr.flex5 = flex5; 1930 log.u_bbr.flex6 = flex6; 1931 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 1932 log.u_bbr.flex8 = mod; 1933 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1934 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1935 &rack->rc_inp->inp_socket->so_rcv, 1936 &rack->rc_inp->inp_socket->so_snd, 1937 RACK_DSACK_HANDLING, 0, 1938 0, &log, false, &tv); 1939 } 1940 } 1941 1942 static void 1943 rack_log_hdwr_pacing(struct tcp_rack *rack, 1944 uint64_t rate, uint64_t hw_rate, int line, 1945 int error, uint16_t mod) 1946 { 1947 if (tcp_bblogging_on(rack->rc_tp)) { 1948 union tcp_log_stackspecific log; 1949 struct timeval tv; 1950 const struct ifnet *ifp; 1951 1952 memset(&log, 0, sizeof(log)); 1953 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 1954 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 1955 if (rack->r_ctl.crte) { 1956 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 1957 } else if (rack->rc_inp->inp_route.ro_nh && 1958 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 1959 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 1960 } else 1961 ifp = NULL; 1962 if (ifp) { 1963 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 1964 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 1965 } 1966 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1967 log.u_bbr.bw_inuse = rate; 1968 log.u_bbr.flex5 = line; 1969 log.u_bbr.flex6 = error; 1970 log.u_bbr.flex7 = mod; 1971 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 1972 log.u_bbr.flex8 = rack->use_fixed_rate; 1973 log.u_bbr.flex8 <<= 1; 1974 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 1975 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 1976 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 1977 if (rack->r_ctl.crte) 1978 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 1979 else 1980 log.u_bbr.cur_del_rate = 0; 1981 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 1982 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1983 &rack->rc_inp->inp_socket->so_rcv, 1984 &rack->rc_inp->inp_socket->so_snd, 1985 BBR_LOG_HDWR_PACE, 0, 1986 0, &log, false, &tv); 1987 } 1988 } 1989 1990 static uint64_t 1991 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 1992 { 1993 /* 1994 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 1995 */ 1996 uint64_t bw_est, high_rate; 1997 uint64_t gain; 1998 1999 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2000 bw_est = bw * gain; 2001 bw_est /= (uint64_t)100; 2002 /* Never fall below the minimum (def 64kbps) */ 2003 if (bw_est < RACK_MIN_BW) 2004 bw_est = RACK_MIN_BW; 2005 if (rack->r_rack_hw_rate_caps) { 2006 /* Rate caps are in place */ 2007 if (rack->r_ctl.crte != NULL) { 2008 /* We have a hdwr rate already */ 2009 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2010 if (bw_est >= high_rate) { 2011 /* We are capping bw at the highest rate table entry */ 2012 rack_log_hdwr_pacing(rack, 2013 bw_est, high_rate, __LINE__, 2014 0, 3); 2015 bw_est = high_rate; 2016 if (capped) 2017 *capped = 1; 2018 } 2019 } else if ((rack->rack_hdrw_pacing == 0) && 2020 (rack->rack_hdw_pace_ena) && 2021 (rack->rack_attempt_hdwr_pace == 0) && 2022 (rack->rc_inp->inp_route.ro_nh != NULL) && 2023 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2024 /* 2025 * Special case, we have not yet attempted hardware 2026 * pacing, and yet we may, when we do, find out if we are 2027 * above the highest rate. We need to know the maxbw for the interface 2028 * in question (if it supports ratelimiting). We get back 2029 * a 0, if the interface is not found in the RL lists. 2030 */ 2031 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2032 if (high_rate) { 2033 /* Yep, we have a rate is it above this rate? */ 2034 if (bw_est > high_rate) { 2035 bw_est = high_rate; 2036 if (capped) 2037 *capped = 1; 2038 } 2039 } 2040 } 2041 } 2042 return (bw_est); 2043 } 2044 2045 static void 2046 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2047 { 2048 if (tcp_bblogging_on(rack->rc_tp)) { 2049 union tcp_log_stackspecific log; 2050 struct timeval tv; 2051 2052 if ((mod != 1) && (rack_verbose_logging == 0)) { 2053 /* 2054 * We get 3 values currently for mod 2055 * 1 - We are retransmitting and this tells the reason. 2056 * 2 - We are clearing a dup-ack count. 2057 * 3 - We are incrementing a dup-ack count. 2058 * 2059 * The clear/increment are only logged 2060 * if you have BBverbose on. 2061 */ 2062 return; 2063 } 2064 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2065 log.u_bbr.flex1 = tsused; 2066 log.u_bbr.flex2 = thresh; 2067 log.u_bbr.flex3 = rsm->r_flags; 2068 log.u_bbr.flex4 = rsm->r_dupack; 2069 log.u_bbr.flex5 = rsm->r_start; 2070 log.u_bbr.flex6 = rsm->r_end; 2071 log.u_bbr.flex8 = mod; 2072 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2073 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2074 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2075 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2076 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2077 log.u_bbr.pacing_gain = rack->r_must_retran; 2078 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2079 &rack->rc_inp->inp_socket->so_rcv, 2080 &rack->rc_inp->inp_socket->so_snd, 2081 BBR_LOG_SETTINGS_CHG, 0, 2082 0, &log, false, &tv); 2083 } 2084 } 2085 2086 static void 2087 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2088 { 2089 if (tcp_bblogging_on(rack->rc_tp)) { 2090 union tcp_log_stackspecific log; 2091 struct timeval tv; 2092 2093 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2094 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2095 log.u_bbr.flex2 = to; 2096 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2097 log.u_bbr.flex4 = slot; 2098 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2099 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2100 log.u_bbr.flex7 = rack->rc_in_persist; 2101 log.u_bbr.flex8 = which; 2102 if (rack->rack_no_prr) 2103 log.u_bbr.pkts_out = 0; 2104 else 2105 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2106 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2107 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2108 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2109 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2110 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2111 log.u_bbr.pacing_gain = rack->r_must_retran; 2112 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2113 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2114 log.u_bbr.lost = rack_rto_min; 2115 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2116 &rack->rc_inp->inp_socket->so_rcv, 2117 &rack->rc_inp->inp_socket->so_snd, 2118 BBR_LOG_TIMERSTAR, 0, 2119 0, &log, false, &tv); 2120 } 2121 } 2122 2123 static void 2124 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2125 { 2126 if (tcp_bblogging_on(rack->rc_tp)) { 2127 union tcp_log_stackspecific log; 2128 struct timeval tv; 2129 2130 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2131 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2132 log.u_bbr.flex8 = to_num; 2133 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2134 log.u_bbr.flex2 = rack->rc_rack_rtt; 2135 if (rsm == NULL) 2136 log.u_bbr.flex3 = 0; 2137 else 2138 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2139 if (rack->rack_no_prr) 2140 log.u_bbr.flex5 = 0; 2141 else 2142 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2143 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2144 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2145 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2146 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2147 log.u_bbr.pacing_gain = rack->r_must_retran; 2148 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2149 &rack->rc_inp->inp_socket->so_rcv, 2150 &rack->rc_inp->inp_socket->so_snd, 2151 BBR_LOG_RTO, 0, 2152 0, &log, false, &tv); 2153 } 2154 } 2155 2156 static void 2157 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2158 struct rack_sendmap *prev, 2159 struct rack_sendmap *rsm, 2160 struct rack_sendmap *next, 2161 int flag, uint32_t th_ack, int line) 2162 { 2163 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2164 union tcp_log_stackspecific log; 2165 struct timeval tv; 2166 2167 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2168 log.u_bbr.flex8 = flag; 2169 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2170 log.u_bbr.cur_del_rate = (uint64_t)prev; 2171 log.u_bbr.delRate = (uint64_t)rsm; 2172 log.u_bbr.rttProp = (uint64_t)next; 2173 log.u_bbr.flex7 = 0; 2174 if (prev) { 2175 log.u_bbr.flex1 = prev->r_start; 2176 log.u_bbr.flex2 = prev->r_end; 2177 log.u_bbr.flex7 |= 0x4; 2178 } 2179 if (rsm) { 2180 log.u_bbr.flex3 = rsm->r_start; 2181 log.u_bbr.flex4 = rsm->r_end; 2182 log.u_bbr.flex7 |= 0x2; 2183 } 2184 if (next) { 2185 log.u_bbr.flex5 = next->r_start; 2186 log.u_bbr.flex6 = next->r_end; 2187 log.u_bbr.flex7 |= 0x1; 2188 } 2189 log.u_bbr.applimited = line; 2190 log.u_bbr.pkts_out = th_ack; 2191 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2192 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2193 if (rack->rack_no_prr) 2194 log.u_bbr.lost = 0; 2195 else 2196 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2197 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2198 &rack->rc_inp->inp_socket->so_rcv, 2199 &rack->rc_inp->inp_socket->so_snd, 2200 TCP_LOG_MAPCHG, 0, 2201 0, &log, false, &tv); 2202 } 2203 } 2204 2205 static void 2206 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2207 struct rack_sendmap *rsm, int conf) 2208 { 2209 if (tcp_bblogging_on(tp)) { 2210 union tcp_log_stackspecific log; 2211 struct timeval tv; 2212 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2213 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2214 log.u_bbr.flex1 = t; 2215 log.u_bbr.flex2 = len; 2216 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2217 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2218 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2219 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2220 log.u_bbr.flex7 = conf; 2221 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2222 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2223 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2224 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2225 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2226 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2227 if (rsm) { 2228 log.u_bbr.pkt_epoch = rsm->r_start; 2229 log.u_bbr.lost = rsm->r_end; 2230 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2231 /* We loose any upper of the 24 bits */ 2232 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2233 } else { 2234 /* Its a SYN */ 2235 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2236 log.u_bbr.lost = 0; 2237 log.u_bbr.cwnd_gain = 0; 2238 log.u_bbr.pacing_gain = 0; 2239 } 2240 /* Write out general bits of interest rrs here */ 2241 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2242 log.u_bbr.use_lt_bw <<= 1; 2243 log.u_bbr.use_lt_bw |= rack->forced_ack; 2244 log.u_bbr.use_lt_bw <<= 1; 2245 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2246 log.u_bbr.use_lt_bw <<= 1; 2247 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2248 log.u_bbr.use_lt_bw <<= 1; 2249 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2250 log.u_bbr.use_lt_bw <<= 1; 2251 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2252 log.u_bbr.use_lt_bw <<= 1; 2253 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2254 log.u_bbr.use_lt_bw <<= 1; 2255 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2256 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2257 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2258 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2259 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2260 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2261 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2262 log.u_bbr.bw_inuse <<= 32; 2263 if (rsm) 2264 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2265 TCP_LOG_EVENTP(tp, NULL, 2266 &rack->rc_inp->inp_socket->so_rcv, 2267 &rack->rc_inp->inp_socket->so_snd, 2268 BBR_LOG_BBRRTT, 0, 2269 0, &log, false, &tv); 2270 2271 2272 } 2273 } 2274 2275 static void 2276 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2277 { 2278 /* 2279 * Log the rtt sample we are 2280 * applying to the srtt algorithm in 2281 * useconds. 2282 */ 2283 if (tcp_bblogging_on(rack->rc_tp)) { 2284 union tcp_log_stackspecific log; 2285 struct timeval tv; 2286 2287 /* Convert our ms to a microsecond */ 2288 memset(&log, 0, sizeof(log)); 2289 log.u_bbr.flex1 = rtt; 2290 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2291 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2292 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2293 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2294 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2295 log.u_bbr.flex7 = 1; 2296 log.u_bbr.flex8 = rack->sack_attack_disable; 2297 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2298 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2299 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2300 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2301 log.u_bbr.pacing_gain = rack->r_must_retran; 2302 /* 2303 * We capture in delRate the upper 32 bits as 2304 * the confidence level we had declared, and the 2305 * lower 32 bits as the actual RTT using the arrival 2306 * timestamp. 2307 */ 2308 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2309 log.u_bbr.delRate <<= 32; 2310 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2311 /* Lets capture all the things that make up t_rtxcur */ 2312 log.u_bbr.applimited = rack_rto_min; 2313 log.u_bbr.epoch = rack_rto_max; 2314 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2315 log.u_bbr.lost = rack_rto_min; 2316 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2317 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2318 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2319 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2320 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2321 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2322 &rack->rc_inp->inp_socket->so_rcv, 2323 &rack->rc_inp->inp_socket->so_snd, 2324 TCP_LOG_RTT, 0, 2325 0, &log, false, &tv); 2326 } 2327 } 2328 2329 static void 2330 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2331 { 2332 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2333 union tcp_log_stackspecific log; 2334 struct timeval tv; 2335 2336 /* Convert our ms to a microsecond */ 2337 memset(&log, 0, sizeof(log)); 2338 log.u_bbr.flex1 = rtt; 2339 log.u_bbr.flex2 = send_time; 2340 log.u_bbr.flex3 = ack_time; 2341 log.u_bbr.flex4 = where; 2342 log.u_bbr.flex7 = 2; 2343 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2344 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2345 &rack->rc_inp->inp_socket->so_rcv, 2346 &rack->rc_inp->inp_socket->so_snd, 2347 TCP_LOG_RTT, 0, 2348 0, &log, false, &tv); 2349 } 2350 } 2351 2352 2353 2354 static inline void 2355 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2356 { 2357 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 2358 union tcp_log_stackspecific log; 2359 struct timeval tv; 2360 2361 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2362 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2363 log.u_bbr.flex1 = line; 2364 log.u_bbr.flex2 = tick; 2365 log.u_bbr.flex3 = tp->t_maxunacktime; 2366 log.u_bbr.flex4 = tp->t_acktime; 2367 log.u_bbr.flex8 = event; 2368 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2369 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2370 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2371 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2372 log.u_bbr.pacing_gain = rack->r_must_retran; 2373 TCP_LOG_EVENTP(tp, NULL, 2374 &rack->rc_inp->inp_socket->so_rcv, 2375 &rack->rc_inp->inp_socket->so_snd, 2376 BBR_LOG_PROGRESS, 0, 2377 0, &log, false, &tv); 2378 } 2379 } 2380 2381 static void 2382 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2383 { 2384 if (tcp_bblogging_on(rack->rc_tp)) { 2385 union tcp_log_stackspecific log; 2386 2387 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2388 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2389 log.u_bbr.flex1 = slot; 2390 if (rack->rack_no_prr) 2391 log.u_bbr.flex2 = 0; 2392 else 2393 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2394 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2395 log.u_bbr.flex8 = rack->rc_in_persist; 2396 log.u_bbr.timeStamp = cts; 2397 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2398 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2399 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2400 log.u_bbr.pacing_gain = rack->r_must_retran; 2401 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2402 &rack->rc_inp->inp_socket->so_rcv, 2403 &rack->rc_inp->inp_socket->so_snd, 2404 BBR_LOG_BBRSND, 0, 2405 0, &log, false, tv); 2406 } 2407 } 2408 2409 static void 2410 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2411 { 2412 if (tcp_bblogging_on(rack->rc_tp)) { 2413 union tcp_log_stackspecific log; 2414 struct timeval tv; 2415 2416 memset(&log, 0, sizeof(log)); 2417 log.u_bbr.flex1 = did_out; 2418 log.u_bbr.flex2 = nxt_pkt; 2419 log.u_bbr.flex3 = way_out; 2420 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2421 if (rack->rack_no_prr) 2422 log.u_bbr.flex5 = 0; 2423 else 2424 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2425 log.u_bbr.flex6 = nsegs; 2426 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2427 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2428 log.u_bbr.flex7 <<= 1; 2429 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2430 log.u_bbr.flex7 <<= 1; 2431 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2432 log.u_bbr.flex8 = rack->rc_in_persist; 2433 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2434 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2435 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2436 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2437 log.u_bbr.use_lt_bw <<= 1; 2438 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2439 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2440 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2441 log.u_bbr.pacing_gain = rack->r_must_retran; 2442 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2443 &rack->rc_inp->inp_socket->so_rcv, 2444 &rack->rc_inp->inp_socket->so_snd, 2445 BBR_LOG_DOSEG_DONE, 0, 2446 0, &log, false, &tv); 2447 } 2448 } 2449 2450 static void 2451 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2452 { 2453 if (tcp_bblogging_on(rack->rc_tp)) { 2454 union tcp_log_stackspecific log; 2455 struct timeval tv; 2456 2457 memset(&log, 0, sizeof(log)); 2458 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2459 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2460 log.u_bbr.flex4 = arg1; 2461 log.u_bbr.flex5 = arg2; 2462 log.u_bbr.flex6 = arg3; 2463 log.u_bbr.flex8 = frm; 2464 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2465 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2466 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2467 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2468 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2469 log.u_bbr.pacing_gain = rack->r_must_retran; 2470 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 2471 &tptosocket(tp)->so_snd, 2472 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 2473 } 2474 } 2475 2476 static void 2477 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2478 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2479 { 2480 if (tcp_bblogging_on(rack->rc_tp)) { 2481 union tcp_log_stackspecific log; 2482 struct timeval tv; 2483 2484 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2485 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2486 log.u_bbr.flex1 = slot; 2487 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2488 log.u_bbr.flex4 = reason; 2489 if (rack->rack_no_prr) 2490 log.u_bbr.flex5 = 0; 2491 else 2492 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2493 log.u_bbr.flex7 = hpts_calling; 2494 log.u_bbr.flex8 = rack->rc_in_persist; 2495 log.u_bbr.lt_epoch = cwnd_to_use; 2496 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2497 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2498 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2499 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2500 log.u_bbr.pacing_gain = rack->r_must_retran; 2501 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2502 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2503 &rack->rc_inp->inp_socket->so_rcv, 2504 &rack->rc_inp->inp_socket->so_snd, 2505 BBR_LOG_JUSTRET, 0, 2506 tlen, &log, false, &tv); 2507 } 2508 } 2509 2510 static void 2511 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2512 struct timeval *tv, uint32_t flags_on_entry) 2513 { 2514 if (tcp_bblogging_on(rack->rc_tp)) { 2515 union tcp_log_stackspecific log; 2516 2517 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2518 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2519 log.u_bbr.flex1 = line; 2520 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2521 log.u_bbr.flex3 = flags_on_entry; 2522 log.u_bbr.flex4 = us_cts; 2523 if (rack->rack_no_prr) 2524 log.u_bbr.flex5 = 0; 2525 else 2526 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2527 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2528 log.u_bbr.flex7 = hpts_removed; 2529 log.u_bbr.flex8 = 1; 2530 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2531 log.u_bbr.timeStamp = us_cts; 2532 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2533 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2534 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2535 log.u_bbr.pacing_gain = rack->r_must_retran; 2536 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2537 &rack->rc_inp->inp_socket->so_rcv, 2538 &rack->rc_inp->inp_socket->so_snd, 2539 BBR_LOG_TIMERCANC, 0, 2540 0, &log, false, tv); 2541 } 2542 } 2543 2544 static void 2545 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2546 uint32_t flex1, uint32_t flex2, 2547 uint32_t flex3, uint32_t flex4, 2548 uint32_t flex5, uint32_t flex6, 2549 uint16_t flex7, uint8_t mod) 2550 { 2551 if (tcp_bblogging_on(rack->rc_tp)) { 2552 union tcp_log_stackspecific log; 2553 struct timeval tv; 2554 2555 if (mod == 1) { 2556 /* No you can't use 1, its for the real to cancel */ 2557 return; 2558 } 2559 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2560 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2561 log.u_bbr.flex1 = flex1; 2562 log.u_bbr.flex2 = flex2; 2563 log.u_bbr.flex3 = flex3; 2564 log.u_bbr.flex4 = flex4; 2565 log.u_bbr.flex5 = flex5; 2566 log.u_bbr.flex6 = flex6; 2567 log.u_bbr.flex7 = flex7; 2568 log.u_bbr.flex8 = mod; 2569 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2570 &rack->rc_inp->inp_socket->so_rcv, 2571 &rack->rc_inp->inp_socket->so_snd, 2572 BBR_LOG_TIMERCANC, 0, 2573 0, &log, false, &tv); 2574 } 2575 } 2576 2577 static void 2578 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2579 { 2580 if (tcp_bblogging_on(rack->rc_tp)) { 2581 union tcp_log_stackspecific log; 2582 struct timeval tv; 2583 2584 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2585 log.u_bbr.flex1 = timers; 2586 log.u_bbr.flex2 = ret; 2587 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2588 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2589 log.u_bbr.flex5 = cts; 2590 if (rack->rack_no_prr) 2591 log.u_bbr.flex6 = 0; 2592 else 2593 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2594 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2595 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2596 log.u_bbr.pacing_gain = rack->r_must_retran; 2597 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2598 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2599 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2600 &rack->rc_inp->inp_socket->so_rcv, 2601 &rack->rc_inp->inp_socket->so_snd, 2602 BBR_LOG_TO_PROCESS, 0, 2603 0, &log, false, &tv); 2604 } 2605 } 2606 2607 static void 2608 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 2609 { 2610 if (tcp_bblogging_on(rack->rc_tp)) { 2611 union tcp_log_stackspecific log; 2612 struct timeval tv; 2613 2614 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2615 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2616 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2617 if (rack->rack_no_prr) 2618 log.u_bbr.flex3 = 0; 2619 else 2620 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2621 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2622 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2623 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2624 log.u_bbr.flex7 = line; 2625 log.u_bbr.flex8 = frm; 2626 log.u_bbr.pkts_out = orig_cwnd; 2627 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2628 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2629 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2630 log.u_bbr.use_lt_bw <<= 1; 2631 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2632 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2633 &rack->rc_inp->inp_socket->so_rcv, 2634 &rack->rc_inp->inp_socket->so_snd, 2635 BBR_LOG_BBRUPD, 0, 2636 0, &log, false, &tv); 2637 } 2638 } 2639 2640 #ifdef NETFLIX_EXP_DETECTION 2641 static void 2642 rack_log_sad(struct tcp_rack *rack, int event) 2643 { 2644 if (tcp_bblogging_on(rack->rc_tp)) { 2645 union tcp_log_stackspecific log; 2646 struct timeval tv; 2647 2648 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2649 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2650 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2651 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2652 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2653 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2654 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2655 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2656 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2657 log.u_bbr.lt_epoch |= rack->do_detection; 2658 log.u_bbr.applimited = tcp_map_minimum; 2659 log.u_bbr.flex7 = rack->sack_attack_disable; 2660 log.u_bbr.flex8 = event; 2661 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2662 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2663 log.u_bbr.delivered = tcp_sad_decay_val; 2664 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2665 &rack->rc_inp->inp_socket->so_rcv, 2666 &rack->rc_inp->inp_socket->so_snd, 2667 TCP_SAD_DETECTION, 0, 2668 0, &log, false, &tv); 2669 } 2670 } 2671 #endif 2672 2673 static void 2674 rack_counter_destroy(void) 2675 { 2676 counter_u64_free(rack_fto_send); 2677 counter_u64_free(rack_fto_rsm_send); 2678 counter_u64_free(rack_nfto_resend); 2679 counter_u64_free(rack_hw_pace_init_fail); 2680 counter_u64_free(rack_hw_pace_lost); 2681 counter_u64_free(rack_non_fto_send); 2682 counter_u64_free(rack_extended_rfo); 2683 counter_u64_free(rack_ack_total); 2684 counter_u64_free(rack_express_sack); 2685 counter_u64_free(rack_sack_total); 2686 counter_u64_free(rack_move_none); 2687 counter_u64_free(rack_move_some); 2688 counter_u64_free(rack_sack_attacks_detected); 2689 counter_u64_free(rack_sack_attacks_reversed); 2690 counter_u64_free(rack_sack_used_next_merge); 2691 counter_u64_free(rack_sack_used_prev_merge); 2692 counter_u64_free(rack_tlp_tot); 2693 counter_u64_free(rack_tlp_newdata); 2694 counter_u64_free(rack_tlp_retran); 2695 counter_u64_free(rack_tlp_retran_bytes); 2696 counter_u64_free(rack_to_tot); 2697 counter_u64_free(rack_saw_enobuf); 2698 counter_u64_free(rack_saw_enobuf_hw); 2699 counter_u64_free(rack_saw_enetunreach); 2700 counter_u64_free(rack_hot_alloc); 2701 counter_u64_free(rack_to_alloc); 2702 counter_u64_free(rack_to_alloc_hard); 2703 counter_u64_free(rack_to_alloc_emerg); 2704 counter_u64_free(rack_to_alloc_limited); 2705 counter_u64_free(rack_alloc_limited_conns); 2706 counter_u64_free(rack_split_limited); 2707 counter_u64_free(rack_multi_single_eq); 2708 counter_u64_free(rack_proc_non_comp_ack); 2709 counter_u64_free(rack_sack_proc_all); 2710 counter_u64_free(rack_sack_proc_restart); 2711 counter_u64_free(rack_sack_proc_short); 2712 counter_u64_free(rack_sack_skipped_acked); 2713 counter_u64_free(rack_sack_splits); 2714 counter_u64_free(rack_input_idle_reduces); 2715 counter_u64_free(rack_collapsed_win); 2716 counter_u64_free(rack_collapsed_win_rxt); 2717 counter_u64_free(rack_collapsed_win_rxt_bytes); 2718 counter_u64_free(rack_collapsed_win_seen); 2719 counter_u64_free(rack_try_scwnd); 2720 counter_u64_free(rack_persists_sends); 2721 counter_u64_free(rack_persists_acks); 2722 counter_u64_free(rack_persists_loss); 2723 counter_u64_free(rack_persists_lost_ends); 2724 #ifdef INVARIANTS 2725 counter_u64_free(rack_adjust_map_bw); 2726 #endif 2727 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2728 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2729 } 2730 2731 static struct rack_sendmap * 2732 rack_alloc(struct tcp_rack *rack) 2733 { 2734 struct rack_sendmap *rsm; 2735 2736 /* 2737 * First get the top of the list it in 2738 * theory is the "hottest" rsm we have, 2739 * possibly just freed by ack processing. 2740 */ 2741 if (rack->rc_free_cnt > rack_free_cache) { 2742 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2743 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2744 counter_u64_add(rack_hot_alloc, 1); 2745 rack->rc_free_cnt--; 2746 return (rsm); 2747 } 2748 /* 2749 * Once we get under our free cache we probably 2750 * no longer have a "hot" one available. Lets 2751 * get one from UMA. 2752 */ 2753 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2754 if (rsm) { 2755 rack->r_ctl.rc_num_maps_alloced++; 2756 counter_u64_add(rack_to_alloc, 1); 2757 return (rsm); 2758 } 2759 /* 2760 * Dig in to our aux rsm's (the last two) since 2761 * UMA failed to get us one. 2762 */ 2763 if (rack->rc_free_cnt) { 2764 counter_u64_add(rack_to_alloc_emerg, 1); 2765 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2766 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2767 rack->rc_free_cnt--; 2768 return (rsm); 2769 } 2770 return (NULL); 2771 } 2772 2773 static struct rack_sendmap * 2774 rack_alloc_full_limit(struct tcp_rack *rack) 2775 { 2776 if ((V_tcp_map_entries_limit > 0) && 2777 (rack->do_detection == 0) && 2778 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2779 counter_u64_add(rack_to_alloc_limited, 1); 2780 if (!rack->alloc_limit_reported) { 2781 rack->alloc_limit_reported = 1; 2782 counter_u64_add(rack_alloc_limited_conns, 1); 2783 } 2784 return (NULL); 2785 } 2786 return (rack_alloc(rack)); 2787 } 2788 2789 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2790 static struct rack_sendmap * 2791 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2792 { 2793 struct rack_sendmap *rsm; 2794 2795 if (limit_type) { 2796 /* currently there is only one limit type */ 2797 if (V_tcp_map_split_limit > 0 && 2798 (rack->do_detection == 0) && 2799 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2800 counter_u64_add(rack_split_limited, 1); 2801 if (!rack->alloc_limit_reported) { 2802 rack->alloc_limit_reported = 1; 2803 counter_u64_add(rack_alloc_limited_conns, 1); 2804 } 2805 return (NULL); 2806 } 2807 } 2808 2809 /* allocate and mark in the limit type, if set */ 2810 rsm = rack_alloc(rack); 2811 if (rsm != NULL && limit_type) { 2812 rsm->r_limit_type = limit_type; 2813 rack->r_ctl.rc_num_split_allocs++; 2814 } 2815 return (rsm); 2816 } 2817 2818 static void 2819 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2820 { 2821 if (rsm->r_flags & RACK_APP_LIMITED) { 2822 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2823 rack->r_ctl.rc_app_limited_cnt--; 2824 } 2825 } 2826 if (rsm->r_limit_type) { 2827 /* currently there is only one limit type */ 2828 rack->r_ctl.rc_num_split_allocs--; 2829 } 2830 if (rsm == rack->r_ctl.rc_first_appl) { 2831 if (rack->r_ctl.rc_app_limited_cnt == 0) 2832 rack->r_ctl.rc_first_appl = NULL; 2833 else { 2834 /* Follow the next one out */ 2835 struct rack_sendmap fe; 2836 2837 fe.r_start = rsm->r_nseq_appl; 2838 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2839 } 2840 } 2841 if (rsm == rack->r_ctl.rc_resend) 2842 rack->r_ctl.rc_resend = NULL; 2843 if (rsm == rack->r_ctl.rc_end_appl) 2844 rack->r_ctl.rc_end_appl = NULL; 2845 if (rack->r_ctl.rc_tlpsend == rsm) 2846 rack->r_ctl.rc_tlpsend = NULL; 2847 if (rack->r_ctl.rc_sacklast == rsm) 2848 rack->r_ctl.rc_sacklast = NULL; 2849 memset(rsm, 0, sizeof(struct rack_sendmap)); 2850 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2851 rack->rc_free_cnt++; 2852 } 2853 2854 static void 2855 rack_free_trim(struct tcp_rack *rack) 2856 { 2857 struct rack_sendmap *rsm; 2858 2859 /* 2860 * Free up all the tail entries until 2861 * we get our list down to the limit. 2862 */ 2863 while (rack->rc_free_cnt > rack_free_cache) { 2864 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2865 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2866 rack->rc_free_cnt--; 2867 uma_zfree(rack_zone, rsm); 2868 } 2869 } 2870 2871 2872 static uint32_t 2873 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2874 { 2875 uint64_t srtt, bw, len, tim; 2876 uint32_t segsiz, def_len, minl; 2877 2878 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2879 def_len = rack_def_data_window * segsiz; 2880 if (rack->rc_gp_filled == 0) { 2881 /* 2882 * We have no measurement (IW is in flight?) so 2883 * we can only guess using our data_window sysctl 2884 * value (usually 20MSS). 2885 */ 2886 return (def_len); 2887 } 2888 /* 2889 * Now we have a number of factors to consider. 2890 * 2891 * 1) We have a desired BDP which is usually 2892 * at least 2. 2893 * 2) We have a minimum number of rtt's usually 1 SRTT 2894 * but we allow it too to be more. 2895 * 3) We want to make sure a measurement last N useconds (if 2896 * we have set rack_min_measure_usec. 2897 * 2898 * We handle the first concern here by trying to create a data 2899 * window of max(rack_def_data_window, DesiredBDP). The 2900 * second concern we handle in not letting the measurement 2901 * window end normally until at least the required SRTT's 2902 * have gone by which is done further below in 2903 * rack_enough_for_measurement(). Finally the third concern 2904 * we also handle here by calculating how long that time 2905 * would take at the current BW and then return the 2906 * max of our first calculation and that length. Note 2907 * that if rack_min_measure_usec is 0, we don't deal 2908 * with concern 3. Also for both Concern 1 and 3 an 2909 * application limited period could end the measurement 2910 * earlier. 2911 * 2912 * So lets calculate the BDP with the "known" b/w using 2913 * the SRTT has our rtt and then multiply it by the 2914 * goal. 2915 */ 2916 bw = rack_get_bw(rack); 2917 srtt = (uint64_t)tp->t_srtt; 2918 len = bw * srtt; 2919 len /= (uint64_t)HPTS_USEC_IN_SEC; 2920 len *= max(1, rack_goal_bdp); 2921 /* Now we need to round up to the nearest MSS */ 2922 len = roundup(len, segsiz); 2923 if (rack_min_measure_usec) { 2924 /* Now calculate our min length for this b/w */ 2925 tim = rack_min_measure_usec; 2926 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 2927 if (minl == 0) 2928 minl = 1; 2929 minl = roundup(minl, segsiz); 2930 if (len < minl) 2931 len = minl; 2932 } 2933 /* 2934 * Now if we have a very small window we want 2935 * to attempt to get the window that is 2936 * as small as possible. This happens on 2937 * low b/w connections and we don't want to 2938 * span huge numbers of rtt's between measurements. 2939 * 2940 * We basically include 2 over our "MIN window" so 2941 * that the measurement can be shortened (possibly) by 2942 * an ack'ed packet. 2943 */ 2944 if (len < def_len) 2945 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 2946 else 2947 return (max((uint32_t)len, def_len)); 2948 2949 } 2950 2951 static int 2952 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 2953 { 2954 uint32_t tim, srtts, segsiz; 2955 2956 /* 2957 * Has enough time passed for the GP measurement to be valid? 2958 */ 2959 if ((tp->snd_max == tp->snd_una) || 2960 (th_ack == tp->snd_max)){ 2961 /* All is acked */ 2962 *quality = RACK_QUALITY_ALLACKED; 2963 return (1); 2964 } 2965 if (SEQ_LT(th_ack, tp->gput_seq)) { 2966 /* Not enough bytes yet */ 2967 return (0); 2968 } 2969 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2970 if (SEQ_LT(th_ack, tp->gput_ack) && 2971 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 2972 /* Not enough bytes yet */ 2973 return (0); 2974 } 2975 if (rack->r_ctl.rc_first_appl && 2976 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 2977 /* 2978 * We are up to the app limited send point 2979 * we have to measure irrespective of the time.. 2980 */ 2981 *quality = RACK_QUALITY_APPLIMITED; 2982 return (1); 2983 } 2984 /* Now what about time? */ 2985 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 2986 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 2987 if (tim >= srtts) { 2988 *quality = RACK_QUALITY_HIGH; 2989 return (1); 2990 } 2991 /* Nope not even a full SRTT has passed */ 2992 return (0); 2993 } 2994 2995 static void 2996 rack_log_timely(struct tcp_rack *rack, 2997 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 2998 uint64_t up_bnd, int line, uint8_t method) 2999 { 3000 if (tcp_bblogging_on(rack->rc_tp)) { 3001 union tcp_log_stackspecific log; 3002 struct timeval tv; 3003 3004 memset(&log, 0, sizeof(log)); 3005 log.u_bbr.flex1 = logged; 3006 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3007 log.u_bbr.flex2 <<= 4; 3008 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3009 log.u_bbr.flex2 <<= 4; 3010 log.u_bbr.flex2 |= rack->rc_gp_incr; 3011 log.u_bbr.flex2 <<= 4; 3012 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3013 log.u_bbr.flex3 = rack->rc_gp_incr; 3014 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3015 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3016 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3017 log.u_bbr.flex7 = rack->rc_gp_bwred; 3018 log.u_bbr.flex8 = method; 3019 log.u_bbr.cur_del_rate = cur_bw; 3020 log.u_bbr.delRate = low_bnd; 3021 log.u_bbr.bw_inuse = up_bnd; 3022 log.u_bbr.rttProp = rack_get_bw(rack); 3023 log.u_bbr.pkt_epoch = line; 3024 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3025 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3026 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3027 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3028 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3029 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3030 log.u_bbr.cwnd_gain <<= 1; 3031 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3032 log.u_bbr.cwnd_gain <<= 1; 3033 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3034 log.u_bbr.cwnd_gain <<= 1; 3035 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3036 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3037 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3038 &rack->rc_inp->inp_socket->so_rcv, 3039 &rack->rc_inp->inp_socket->so_snd, 3040 TCP_TIMELY_WORK, 0, 3041 0, &log, false, &tv); 3042 } 3043 } 3044 3045 static int 3046 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3047 { 3048 /* 3049 * Before we increase we need to know if 3050 * the estimate just made was less than 3051 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3052 * 3053 * If we already are pacing at a fast enough 3054 * rate to push us faster there is no sense of 3055 * increasing. 3056 * 3057 * We first caculate our actual pacing rate (ss or ca multiplier 3058 * times our cur_bw). 3059 * 3060 * Then we take the last measured rate and multipy by our 3061 * maximum pacing overage to give us a max allowable rate. 3062 * 3063 * If our act_rate is smaller than our max_allowable rate 3064 * then we should increase. Else we should hold steady. 3065 * 3066 */ 3067 uint64_t act_rate, max_allow_rate; 3068 3069 if (rack_timely_no_stopping) 3070 return (1); 3071 3072 if ((cur_bw == 0) || (last_bw_est == 0)) { 3073 /* 3074 * Initial startup case or 3075 * everything is acked case. 3076 */ 3077 rack_log_timely(rack, mult, cur_bw, 0, 0, 3078 __LINE__, 9); 3079 return (1); 3080 } 3081 if (mult <= 100) { 3082 /* 3083 * We can always pace at or slightly above our rate. 3084 */ 3085 rack_log_timely(rack, mult, cur_bw, 0, 0, 3086 __LINE__, 9); 3087 return (1); 3088 } 3089 act_rate = cur_bw * (uint64_t)mult; 3090 act_rate /= 100; 3091 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3092 max_allow_rate /= 100; 3093 if (act_rate < max_allow_rate) { 3094 /* 3095 * Here the rate we are actually pacing at 3096 * is smaller than 10% above our last measurement. 3097 * This means we are pacing below what we would 3098 * like to try to achieve (plus some wiggle room). 3099 */ 3100 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3101 __LINE__, 9); 3102 return (1); 3103 } else { 3104 /* 3105 * Here we are already pacing at least rack_max_per_above(10%) 3106 * what we are getting back. This indicates most likely 3107 * that we are being limited (cwnd/rwnd/app) and can't 3108 * get any more b/w. There is no sense of trying to 3109 * raise up the pacing rate its not speeding us up 3110 * and we already are pacing faster than we are getting. 3111 */ 3112 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3113 __LINE__, 8); 3114 return (0); 3115 } 3116 } 3117 3118 static void 3119 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3120 { 3121 /* 3122 * When we drag bottom, we want to assure 3123 * that no multiplier is below 1.0, if so 3124 * we want to restore it to at least that. 3125 */ 3126 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3127 /* This is unlikely we usually do not touch recovery */ 3128 rack->r_ctl.rack_per_of_gp_rec = 100; 3129 } 3130 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3131 rack->r_ctl.rack_per_of_gp_ca = 100; 3132 } 3133 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3134 rack->r_ctl.rack_per_of_gp_ss = 100; 3135 } 3136 } 3137 3138 static void 3139 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3140 { 3141 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3142 rack->r_ctl.rack_per_of_gp_ca = 100; 3143 } 3144 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3145 rack->r_ctl.rack_per_of_gp_ss = 100; 3146 } 3147 } 3148 3149 static void 3150 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3151 { 3152 int32_t calc, logged, plus; 3153 3154 logged = 0; 3155 3156 if (override) { 3157 /* 3158 * override is passed when we are 3159 * loosing b/w and making one last 3160 * gasp at trying to not loose out 3161 * to a new-reno flow. 3162 */ 3163 goto extra_boost; 3164 } 3165 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3166 if (rack->rc_gp_incr && 3167 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3168 /* 3169 * Reset and get 5 strokes more before the boost. Note 3170 * that the count is 0 based so we have to add one. 3171 */ 3172 extra_boost: 3173 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3174 rack->rc_gp_timely_inc_cnt = 0; 3175 } else 3176 plus = (uint32_t)rack_gp_increase_per; 3177 /* Must be at least 1% increase for true timely increases */ 3178 if ((plus < 1) && 3179 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3180 plus = 1; 3181 if (rack->rc_gp_saw_rec && 3182 (rack->rc_gp_no_rec_chg == 0) && 3183 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3184 rack->r_ctl.rack_per_of_gp_rec)) { 3185 /* We have been in recovery ding it too */ 3186 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3187 if (calc > 0xffff) 3188 calc = 0xffff; 3189 logged |= 1; 3190 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3191 if (rack_per_upper_bound_ss && 3192 (rack->rc_dragged_bottom == 0) && 3193 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3194 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3195 } 3196 if (rack->rc_gp_saw_ca && 3197 (rack->rc_gp_saw_ss == 0) && 3198 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3199 rack->r_ctl.rack_per_of_gp_ca)) { 3200 /* In CA */ 3201 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3202 if (calc > 0xffff) 3203 calc = 0xffff; 3204 logged |= 2; 3205 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3206 if (rack_per_upper_bound_ca && 3207 (rack->rc_dragged_bottom == 0) && 3208 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3209 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3210 } 3211 if (rack->rc_gp_saw_ss && 3212 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3213 rack->r_ctl.rack_per_of_gp_ss)) { 3214 /* In SS */ 3215 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3216 if (calc > 0xffff) 3217 calc = 0xffff; 3218 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3219 if (rack_per_upper_bound_ss && 3220 (rack->rc_dragged_bottom == 0) && 3221 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3222 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3223 logged |= 4; 3224 } 3225 if (logged && 3226 (rack->rc_gp_incr == 0)){ 3227 /* Go into increment mode */ 3228 rack->rc_gp_incr = 1; 3229 rack->rc_gp_timely_inc_cnt = 0; 3230 } 3231 if (rack->rc_gp_incr && 3232 logged && 3233 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3234 rack->rc_gp_timely_inc_cnt++; 3235 } 3236 rack_log_timely(rack, logged, plus, 0, 0, 3237 __LINE__, 1); 3238 } 3239 3240 static uint32_t 3241 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3242 { 3243 /* 3244 * norm_grad = rtt_diff / minrtt; 3245 * new_per = curper * (1 - B * norm_grad) 3246 * 3247 * B = rack_gp_decrease_per (default 10%) 3248 * rtt_dif = input var current rtt-diff 3249 * curper = input var current percentage 3250 * minrtt = from rack filter 3251 * 3252 */ 3253 uint64_t perf; 3254 3255 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3256 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3257 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3258 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3259 (uint64_t)1000000)) / 3260 (uint64_t)1000000); 3261 if (perf > curper) { 3262 /* TSNH */ 3263 perf = curper - 1; 3264 } 3265 return ((uint32_t)perf); 3266 } 3267 3268 static uint32_t 3269 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3270 { 3271 /* 3272 * highrttthresh 3273 * result = curper * (1 - (B * ( 1 - ------ )) 3274 * gp_srtt 3275 * 3276 * B = rack_gp_decrease_per (default 10%) 3277 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3278 */ 3279 uint64_t perf; 3280 uint32_t highrttthresh; 3281 3282 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3283 3284 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3285 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3286 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3287 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3288 return (perf); 3289 } 3290 3291 static void 3292 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3293 { 3294 uint64_t logvar, logvar2, logvar3; 3295 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3296 3297 if (rack->rc_gp_incr) { 3298 /* Turn off increment counting */ 3299 rack->rc_gp_incr = 0; 3300 rack->rc_gp_timely_inc_cnt = 0; 3301 } 3302 ss_red = ca_red = rec_red = 0; 3303 logged = 0; 3304 /* Calculate the reduction value */ 3305 if (rtt_diff < 0) { 3306 rtt_diff *= -1; 3307 } 3308 /* Must be at least 1% reduction */ 3309 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3310 /* We have been in recovery ding it too */ 3311 if (timely_says == 2) { 3312 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3313 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3314 if (alt < new_per) 3315 val = alt; 3316 else 3317 val = new_per; 3318 } else 3319 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3320 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3321 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3322 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3323 } else { 3324 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3325 rec_red = 0; 3326 } 3327 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3328 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3329 logged |= 1; 3330 } 3331 if (rack->rc_gp_saw_ss) { 3332 /* Sent in SS */ 3333 if (timely_says == 2) { 3334 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3335 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3336 if (alt < new_per) 3337 val = alt; 3338 else 3339 val = new_per; 3340 } else 3341 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3342 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3343 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3344 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3345 } else { 3346 ss_red = new_per; 3347 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3348 logvar = new_per; 3349 logvar <<= 32; 3350 logvar |= alt; 3351 logvar2 = (uint32_t)rtt; 3352 logvar2 <<= 32; 3353 logvar2 |= (uint32_t)rtt_diff; 3354 logvar3 = rack_gp_rtt_maxmul; 3355 logvar3 <<= 32; 3356 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3357 rack_log_timely(rack, timely_says, 3358 logvar2, logvar3, 3359 logvar, __LINE__, 10); 3360 } 3361 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3362 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3363 logged |= 4; 3364 } else if (rack->rc_gp_saw_ca) { 3365 /* Sent in CA */ 3366 if (timely_says == 2) { 3367 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3368 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3369 if (alt < new_per) 3370 val = alt; 3371 else 3372 val = new_per; 3373 } else 3374 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3375 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3376 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3377 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3378 } else { 3379 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3380 ca_red = 0; 3381 logvar = new_per; 3382 logvar <<= 32; 3383 logvar |= alt; 3384 logvar2 = (uint32_t)rtt; 3385 logvar2 <<= 32; 3386 logvar2 |= (uint32_t)rtt_diff; 3387 logvar3 = rack_gp_rtt_maxmul; 3388 logvar3 <<= 32; 3389 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3390 rack_log_timely(rack, timely_says, 3391 logvar2, logvar3, 3392 logvar, __LINE__, 10); 3393 } 3394 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3395 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3396 logged |= 2; 3397 } 3398 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3399 rack->rc_gp_timely_dec_cnt++; 3400 if (rack_timely_dec_clear && 3401 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3402 rack->rc_gp_timely_dec_cnt = 0; 3403 } 3404 logvar = ss_red; 3405 logvar <<= 32; 3406 logvar |= ca_red; 3407 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3408 __LINE__, 2); 3409 } 3410 3411 static void 3412 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3413 uint32_t rtt, uint32_t line, uint8_t reas) 3414 { 3415 if (tcp_bblogging_on(rack->rc_tp)) { 3416 union tcp_log_stackspecific log; 3417 struct timeval tv; 3418 3419 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3420 log.u_bbr.flex1 = line; 3421 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3422 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3423 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3424 log.u_bbr.flex5 = rtt; 3425 log.u_bbr.flex6 = rack->rc_highly_buffered; 3426 log.u_bbr.flex6 <<= 1; 3427 log.u_bbr.flex6 |= rack->forced_ack; 3428 log.u_bbr.flex6 <<= 1; 3429 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3430 log.u_bbr.flex6 <<= 1; 3431 log.u_bbr.flex6 |= rack->in_probe_rtt; 3432 log.u_bbr.flex6 <<= 1; 3433 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3434 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3435 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3436 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3437 log.u_bbr.flex8 = reas; 3438 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3439 log.u_bbr.delRate = rack_get_bw(rack); 3440 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3441 log.u_bbr.cur_del_rate <<= 32; 3442 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3443 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3444 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3445 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3446 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3447 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3448 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3449 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3450 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3451 log.u_bbr.rttProp = us_cts; 3452 log.u_bbr.rttProp <<= 32; 3453 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3454 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3455 &rack->rc_inp->inp_socket->so_rcv, 3456 &rack->rc_inp->inp_socket->so_snd, 3457 BBR_LOG_RTT_SHRINKS, 0, 3458 0, &log, false, &rack->r_ctl.act_rcv_time); 3459 } 3460 } 3461 3462 static void 3463 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3464 { 3465 uint64_t bwdp; 3466 3467 bwdp = rack_get_bw(rack); 3468 bwdp *= (uint64_t)rtt; 3469 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3470 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3471 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3472 /* 3473 * A window protocol must be able to have 4 packets 3474 * outstanding as the floor in order to function 3475 * (especially considering delayed ack :D). 3476 */ 3477 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3478 } 3479 } 3480 3481 static void 3482 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3483 { 3484 /** 3485 * ProbeRTT is a bit different in rack_pacing than in 3486 * BBR. It is like BBR in that it uses the lowering of 3487 * the RTT as a signal that we saw something new and 3488 * counts from there for how long between. But it is 3489 * different in that its quite simple. It does not 3490 * play with the cwnd and wait until we get down 3491 * to N segments outstanding and hold that for 3492 * 200ms. Instead it just sets the pacing reduction 3493 * rate to a set percentage (70 by default) and hold 3494 * that for a number of recent GP Srtt's. 3495 */ 3496 uint32_t segsiz; 3497 3498 if (rack->rc_gp_dyn_mul == 0) 3499 return; 3500 3501 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3502 /* We are idle */ 3503 return; 3504 } 3505 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3506 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3507 /* 3508 * Stop the goodput now, the idea here is 3509 * that future measurements with in_probe_rtt 3510 * won't register if they are not greater so 3511 * we want to get what info (if any) is available 3512 * now. 3513 */ 3514 rack_do_goodput_measurement(rack->rc_tp, rack, 3515 rack->rc_tp->snd_una, __LINE__, 3516 RACK_QUALITY_PROBERTT); 3517 } 3518 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3519 rack->r_ctl.rc_time_probertt_entered = us_cts; 3520 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3521 rack->r_ctl.rc_pace_min_segs); 3522 rack->in_probe_rtt = 1; 3523 rack->measure_saw_probe_rtt = 1; 3524 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3525 rack->r_ctl.rc_time_probertt_starts = 0; 3526 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3527 if (rack_probertt_use_min_rtt_entry) 3528 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3529 else 3530 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3531 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3532 __LINE__, RACK_RTTS_ENTERPROBE); 3533 } 3534 3535 static void 3536 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3537 { 3538 struct rack_sendmap *rsm; 3539 uint32_t segsiz; 3540 3541 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3542 rack->r_ctl.rc_pace_min_segs); 3543 rack->in_probe_rtt = 0; 3544 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3545 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3546 /* 3547 * Stop the goodput now, the idea here is 3548 * that future measurements with in_probe_rtt 3549 * won't register if they are not greater so 3550 * we want to get what info (if any) is available 3551 * now. 3552 */ 3553 rack_do_goodput_measurement(rack->rc_tp, rack, 3554 rack->rc_tp->snd_una, __LINE__, 3555 RACK_QUALITY_PROBERTT); 3556 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3557 /* 3558 * We don't have enough data to make a measurement. 3559 * So lets just stop and start here after exiting 3560 * probe-rtt. We probably are not interested in 3561 * the results anyway. 3562 */ 3563 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3564 } 3565 /* 3566 * Measurements through the current snd_max are going 3567 * to be limited by the slower pacing rate. 3568 * 3569 * We need to mark these as app-limited so we 3570 * don't collapse the b/w. 3571 */ 3572 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3573 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3574 if (rack->r_ctl.rc_app_limited_cnt == 0) 3575 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3576 else { 3577 /* 3578 * Go out to the end app limited and mark 3579 * this new one as next and move the end_appl up 3580 * to this guy. 3581 */ 3582 if (rack->r_ctl.rc_end_appl) 3583 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3584 rack->r_ctl.rc_end_appl = rsm; 3585 } 3586 rsm->r_flags |= RACK_APP_LIMITED; 3587 rack->r_ctl.rc_app_limited_cnt++; 3588 } 3589 /* 3590 * Now, we need to examine our pacing rate multipliers. 3591 * If its under 100%, we need to kick it back up to 3592 * 100%. We also don't let it be over our "max" above 3593 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3594 * Note setting clamp_atexit_prtt to 0 has the effect 3595 * of setting CA/SS to 100% always at exit (which is 3596 * the default behavior). 3597 */ 3598 if (rack_probertt_clear_is) { 3599 rack->rc_gp_incr = 0; 3600 rack->rc_gp_bwred = 0; 3601 rack->rc_gp_timely_inc_cnt = 0; 3602 rack->rc_gp_timely_dec_cnt = 0; 3603 } 3604 /* Do we do any clamping at exit? */ 3605 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3606 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3607 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3608 } 3609 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3610 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3611 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3612 } 3613 /* 3614 * Lets set rtt_diff to 0, so that we will get a "boost" 3615 * after exiting. 3616 */ 3617 rack->r_ctl.rc_rtt_diff = 0; 3618 3619 /* Clear all flags so we start fresh */ 3620 rack->rc_tp->t_bytes_acked = 0; 3621 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 3622 /* 3623 * If configured to, set the cwnd and ssthresh to 3624 * our targets. 3625 */ 3626 if (rack_probe_rtt_sets_cwnd) { 3627 uint64_t ebdp; 3628 uint32_t setto; 3629 3630 /* Set ssthresh so we get into CA once we hit our target */ 3631 if (rack_probertt_use_min_rtt_exit == 1) { 3632 /* Set to min rtt */ 3633 rack_set_prtt_target(rack, segsiz, 3634 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3635 } else if (rack_probertt_use_min_rtt_exit == 2) { 3636 /* Set to current gp rtt */ 3637 rack_set_prtt_target(rack, segsiz, 3638 rack->r_ctl.rc_gp_srtt); 3639 } else if (rack_probertt_use_min_rtt_exit == 3) { 3640 /* Set to entry gp rtt */ 3641 rack_set_prtt_target(rack, segsiz, 3642 rack->r_ctl.rc_entry_gp_rtt); 3643 } else { 3644 uint64_t sum; 3645 uint32_t setval; 3646 3647 sum = rack->r_ctl.rc_entry_gp_rtt; 3648 sum *= 10; 3649 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3650 if (sum >= 20) { 3651 /* 3652 * A highly buffered path needs 3653 * cwnd space for timely to work. 3654 * Lets set things up as if 3655 * we are heading back here again. 3656 */ 3657 setval = rack->r_ctl.rc_entry_gp_rtt; 3658 } else if (sum >= 15) { 3659 /* 3660 * Lets take the smaller of the 3661 * two since we are just somewhat 3662 * buffered. 3663 */ 3664 setval = rack->r_ctl.rc_gp_srtt; 3665 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3666 setval = rack->r_ctl.rc_entry_gp_rtt; 3667 } else { 3668 /* 3669 * Here we are not highly buffered 3670 * and should pick the min we can to 3671 * keep from causing loss. 3672 */ 3673 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3674 } 3675 rack_set_prtt_target(rack, segsiz, 3676 setval); 3677 } 3678 if (rack_probe_rtt_sets_cwnd > 1) { 3679 /* There is a percentage here to boost */ 3680 ebdp = rack->r_ctl.rc_target_probertt_flight; 3681 ebdp *= rack_probe_rtt_sets_cwnd; 3682 ebdp /= 100; 3683 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3684 } else 3685 setto = rack->r_ctl.rc_target_probertt_flight; 3686 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3687 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3688 /* Enforce a min */ 3689 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3690 } 3691 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3692 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3693 } 3694 rack_log_rtt_shrinks(rack, us_cts, 3695 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3696 __LINE__, RACK_RTTS_EXITPROBE); 3697 /* Clear times last so log has all the info */ 3698 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3699 rack->r_ctl.rc_time_probertt_entered = us_cts; 3700 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3701 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3702 } 3703 3704 static void 3705 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3706 { 3707 /* Check in on probe-rtt */ 3708 if (rack->rc_gp_filled == 0) { 3709 /* We do not do p-rtt unless we have gp measurements */ 3710 return; 3711 } 3712 if (rack->in_probe_rtt) { 3713 uint64_t no_overflow; 3714 uint32_t endtime, must_stay; 3715 3716 if (rack->r_ctl.rc_went_idle_time && 3717 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3718 /* 3719 * We went idle during prtt, just exit now. 3720 */ 3721 rack_exit_probertt(rack, us_cts); 3722 } else if (rack_probe_rtt_safety_val && 3723 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3724 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3725 /* 3726 * Probe RTT safety value triggered! 3727 */ 3728 rack_log_rtt_shrinks(rack, us_cts, 3729 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3730 __LINE__, RACK_RTTS_SAFETY); 3731 rack_exit_probertt(rack, us_cts); 3732 } 3733 /* Calculate the max we will wait */ 3734 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3735 if (rack->rc_highly_buffered) 3736 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3737 /* Calculate the min we must wait */ 3738 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3739 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3740 TSTMP_LT(us_cts, endtime)) { 3741 uint32_t calc; 3742 /* Do we lower more? */ 3743 no_exit: 3744 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3745 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3746 else 3747 calc = 0; 3748 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3749 if (calc) { 3750 /* Maybe */ 3751 calc *= rack_per_of_gp_probertt_reduce; 3752 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3753 /* Limit it too */ 3754 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3755 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3756 } 3757 /* We must reach target or the time set */ 3758 return; 3759 } 3760 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3761 if ((TSTMP_LT(us_cts, must_stay) && 3762 rack->rc_highly_buffered) || 3763 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3764 rack->r_ctl.rc_target_probertt_flight)) { 3765 /* We are not past the must_stay time */ 3766 goto no_exit; 3767 } 3768 rack_log_rtt_shrinks(rack, us_cts, 3769 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3770 __LINE__, RACK_RTTS_REACHTARGET); 3771 rack->r_ctl.rc_time_probertt_starts = us_cts; 3772 if (rack->r_ctl.rc_time_probertt_starts == 0) 3773 rack->r_ctl.rc_time_probertt_starts = 1; 3774 /* Restore back to our rate we want to pace at in prtt */ 3775 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3776 } 3777 /* 3778 * Setup our end time, some number of gp_srtts plus 200ms. 3779 */ 3780 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3781 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3782 if (rack_probertt_gpsrtt_cnt_div) 3783 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3784 else 3785 endtime = 0; 3786 endtime += rack_min_probertt_hold; 3787 endtime += rack->r_ctl.rc_time_probertt_starts; 3788 if (TSTMP_GEQ(us_cts, endtime)) { 3789 /* yes, exit probertt */ 3790 rack_exit_probertt(rack, us_cts); 3791 } 3792 3793 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3794 /* Go into probertt, its been too long since we went lower */ 3795 rack_enter_probertt(rack, us_cts); 3796 } 3797 } 3798 3799 static void 3800 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3801 uint32_t rtt, int32_t rtt_diff) 3802 { 3803 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3804 uint32_t losses; 3805 3806 if ((rack->rc_gp_dyn_mul == 0) || 3807 (rack->use_fixed_rate) || 3808 (rack->in_probe_rtt) || 3809 (rack->rc_always_pace == 0)) { 3810 /* No dynamic GP multiplier in play */ 3811 return; 3812 } 3813 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3814 cur_bw = rack_get_bw(rack); 3815 /* Calculate our up and down range */ 3816 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3817 up_bnd /= 100; 3818 up_bnd += rack->r_ctl.last_gp_comp_bw; 3819 3820 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3821 subfr /= 100; 3822 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3823 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3824 /* 3825 * This is the case where our RTT is above 3826 * the max target and we have been configured 3827 * to just do timely no bonus up stuff in that case. 3828 * 3829 * There are two configurations, set to 1, and we 3830 * just do timely if we are over our max. If its 3831 * set above 1 then we slam the multipliers down 3832 * to 100 and then decrement per timely. 3833 */ 3834 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3835 __LINE__, 3); 3836 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3837 rack_validate_multipliers_at_or_below_100(rack); 3838 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3839 } else if ((last_bw_est < low_bnd) && !losses) { 3840 /* 3841 * We are decreasing this is a bit complicated this 3842 * means we are loosing ground. This could be 3843 * because another flow entered and we are competing 3844 * for b/w with it. This will push the RTT up which 3845 * makes timely unusable unless we want to get shoved 3846 * into a corner and just be backed off (the age 3847 * old problem with delay based CC). 3848 * 3849 * On the other hand if it was a route change we 3850 * would like to stay somewhat contained and not 3851 * blow out the buffers. 3852 */ 3853 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3854 __LINE__, 3); 3855 rack->r_ctl.last_gp_comp_bw = cur_bw; 3856 if (rack->rc_gp_bwred == 0) { 3857 /* Go into reduction counting */ 3858 rack->rc_gp_bwred = 1; 3859 rack->rc_gp_timely_dec_cnt = 0; 3860 } 3861 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3862 (timely_says == 0)) { 3863 /* 3864 * Push another time with a faster pacing 3865 * to try to gain back (we include override to 3866 * get a full raise factor). 3867 */ 3868 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3869 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3870 (timely_says == 0) || 3871 (rack_down_raise_thresh == 0)) { 3872 /* 3873 * Do an override up in b/w if we were 3874 * below the threshold or if the threshold 3875 * is zero we always do the raise. 3876 */ 3877 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3878 } else { 3879 /* Log it stays the same */ 3880 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3881 __LINE__, 11); 3882 } 3883 rack->rc_gp_timely_dec_cnt++; 3884 /* We are not incrementing really no-count */ 3885 rack->rc_gp_incr = 0; 3886 rack->rc_gp_timely_inc_cnt = 0; 3887 } else { 3888 /* 3889 * Lets just use the RTT 3890 * information and give up 3891 * pushing. 3892 */ 3893 goto use_timely; 3894 } 3895 } else if ((timely_says != 2) && 3896 !losses && 3897 (last_bw_est > up_bnd)) { 3898 /* 3899 * We are increasing b/w lets keep going, updating 3900 * our b/w and ignoring any timely input, unless 3901 * of course we are at our max raise (if there is one). 3902 */ 3903 3904 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3905 __LINE__, 3); 3906 rack->r_ctl.last_gp_comp_bw = cur_bw; 3907 if (rack->rc_gp_saw_ss && 3908 rack_per_upper_bound_ss && 3909 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3910 /* 3911 * In cases where we can't go higher 3912 * we should just use timely. 3913 */ 3914 goto use_timely; 3915 } 3916 if (rack->rc_gp_saw_ca && 3917 rack_per_upper_bound_ca && 3918 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 3919 /* 3920 * In cases where we can't go higher 3921 * we should just use timely. 3922 */ 3923 goto use_timely; 3924 } 3925 rack->rc_gp_bwred = 0; 3926 rack->rc_gp_timely_dec_cnt = 0; 3927 /* You get a set number of pushes if timely is trying to reduce */ 3928 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 3929 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3930 } else { 3931 /* Log it stays the same */ 3932 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 3933 __LINE__, 12); 3934 } 3935 return; 3936 } else { 3937 /* 3938 * We are staying between the lower and upper range bounds 3939 * so use timely to decide. 3940 */ 3941 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3942 __LINE__, 3); 3943 use_timely: 3944 if (timely_says) { 3945 rack->rc_gp_incr = 0; 3946 rack->rc_gp_timely_inc_cnt = 0; 3947 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 3948 !losses && 3949 (last_bw_est < low_bnd)) { 3950 /* We are loosing ground */ 3951 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3952 rack->rc_gp_timely_dec_cnt++; 3953 /* We are not incrementing really no-count */ 3954 rack->rc_gp_incr = 0; 3955 rack->rc_gp_timely_inc_cnt = 0; 3956 } else 3957 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3958 } else { 3959 rack->rc_gp_bwred = 0; 3960 rack->rc_gp_timely_dec_cnt = 0; 3961 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3962 } 3963 } 3964 } 3965 3966 static int32_t 3967 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 3968 { 3969 int32_t timely_says; 3970 uint64_t log_mult, log_rtt_a_diff; 3971 3972 log_rtt_a_diff = rtt; 3973 log_rtt_a_diff <<= 32; 3974 log_rtt_a_diff |= (uint32_t)rtt_diff; 3975 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 3976 rack_gp_rtt_maxmul)) { 3977 /* Reduce the b/w multiplier */ 3978 timely_says = 2; 3979 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3980 log_mult <<= 32; 3981 log_mult |= prev_rtt; 3982 rack_log_timely(rack, timely_says, log_mult, 3983 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3984 log_rtt_a_diff, __LINE__, 4); 3985 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 3986 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 3987 max(rack_gp_rtt_mindiv , 1)))) { 3988 /* Increase the b/w multiplier */ 3989 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 3990 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 3991 max(rack_gp_rtt_mindiv , 1)); 3992 log_mult <<= 32; 3993 log_mult |= prev_rtt; 3994 timely_says = 0; 3995 rack_log_timely(rack, timely_says, log_mult , 3996 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3997 log_rtt_a_diff, __LINE__, 5); 3998 } else { 3999 /* 4000 * Use a gradient to find it the timely gradient 4001 * is: 4002 * grad = rc_rtt_diff / min_rtt; 4003 * 4004 * anything below or equal to 0 will be 4005 * a increase indication. Anything above 4006 * zero is a decrease. Note we take care 4007 * of the actual gradient calculation 4008 * in the reduction (its not needed for 4009 * increase). 4010 */ 4011 log_mult = prev_rtt; 4012 if (rtt_diff <= 0) { 4013 /* 4014 * Rttdiff is less than zero, increase the 4015 * b/w multiplier (its 0 or negative) 4016 */ 4017 timely_says = 0; 4018 rack_log_timely(rack, timely_says, log_mult, 4019 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4020 } else { 4021 /* Reduce the b/w multiplier */ 4022 timely_says = 1; 4023 rack_log_timely(rack, timely_says, log_mult, 4024 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4025 } 4026 } 4027 return (timely_says); 4028 } 4029 4030 static void 4031 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4032 tcp_seq th_ack, int line, uint8_t quality) 4033 { 4034 uint64_t tim, bytes_ps, ltim, stim, utim; 4035 uint32_t segsiz, bytes, reqbytes, us_cts; 4036 int32_t gput, new_rtt_diff, timely_says; 4037 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4038 int did_add = 0; 4039 4040 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4041 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4042 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4043 tim = us_cts - tp->gput_ts; 4044 else 4045 tim = 0; 4046 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4047 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4048 else 4049 stim = 0; 4050 /* 4051 * Use the larger of the send time or ack time. This prevents us 4052 * from being influenced by ack artifacts to come up with too 4053 * high of measurement. Note that since we are spanning over many more 4054 * bytes in most of our measurements hopefully that is less likely to 4055 * occur. 4056 */ 4057 if (tim > stim) 4058 utim = max(tim, 1); 4059 else 4060 utim = max(stim, 1); 4061 /* Lets get a msec time ltim too for the old stuff */ 4062 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4063 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4064 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4065 if ((tim == 0) && (stim == 0)) { 4066 /* 4067 * Invalid measurement time, maybe 4068 * all on one ack/one send? 4069 */ 4070 bytes = 0; 4071 bytes_ps = 0; 4072 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4073 0, 0, 0, 10, __LINE__, NULL, quality); 4074 goto skip_measurement; 4075 } 4076 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4077 /* We never made a us_rtt measurement? */ 4078 bytes = 0; 4079 bytes_ps = 0; 4080 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4081 0, 0, 0, 10, __LINE__, NULL, quality); 4082 goto skip_measurement; 4083 } 4084 /* 4085 * Calculate the maximum possible b/w this connection 4086 * could have. We base our calculation on the lowest 4087 * rtt we have seen during the measurement and the 4088 * largest rwnd the client has given us in that time. This 4089 * forms a BDP that is the maximum that we could ever 4090 * get to the client. Anything larger is not valid. 4091 * 4092 * I originally had code here that rejected measurements 4093 * where the time was less than 1/2 the latest us_rtt. 4094 * But after thinking on that I realized its wrong since 4095 * say you had a 150Mbps or even 1Gbps link, and you 4096 * were a long way away.. example I am in Europe (100ms rtt) 4097 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4098 * bytes my time would be 1.2ms, and yet my rtt would say 4099 * the measurement was invalid the time was < 50ms. The 4100 * same thing is true for 150Mb (8ms of time). 4101 * 4102 * A better way I realized is to look at what the maximum 4103 * the connection could possibly do. This is gated on 4104 * the lowest RTT we have seen and the highest rwnd. 4105 * We should in theory never exceed that, if we are 4106 * then something on the path is storing up packets 4107 * and then feeding them all at once to our endpoint 4108 * messing up our measurement. 4109 */ 4110 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4111 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4112 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4113 if (SEQ_LT(th_ack, tp->gput_seq)) { 4114 /* No measurement can be made */ 4115 bytes = 0; 4116 bytes_ps = 0; 4117 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4118 0, 0, 0, 10, __LINE__, NULL, quality); 4119 goto skip_measurement; 4120 } else 4121 bytes = (th_ack - tp->gput_seq); 4122 bytes_ps = (uint64_t)bytes; 4123 /* 4124 * Don't measure a b/w for pacing unless we have gotten at least 4125 * an initial windows worth of data in this measurement interval. 4126 * 4127 * Small numbers of bytes get badly influenced by delayed ack and 4128 * other artifacts. Note we take the initial window or our 4129 * defined minimum GP (defaulting to 10 which hopefully is the 4130 * IW). 4131 */ 4132 if (rack->rc_gp_filled == 0) { 4133 /* 4134 * The initial estimate is special. We 4135 * have blasted out an IW worth of packets 4136 * without a real valid ack ts results. We 4137 * then setup the app_limited_needs_set flag, 4138 * this should get the first ack in (probably 2 4139 * MSS worth) to be recorded as the timestamp. 4140 * We thus allow a smaller number of bytes i.e. 4141 * IW - 2MSS. 4142 */ 4143 reqbytes -= (2 * segsiz); 4144 /* Also lets fill previous for our first measurement to be neutral */ 4145 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4146 } 4147 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4148 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4149 rack->r_ctl.rc_app_limited_cnt, 4150 0, 0, 10, __LINE__, NULL, quality); 4151 goto skip_measurement; 4152 } 4153 /* 4154 * We now need to calculate the Timely like status so 4155 * we can update (possibly) the b/w multipliers. 4156 */ 4157 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4158 if (rack->rc_gp_filled == 0) { 4159 /* No previous reading */ 4160 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4161 } else { 4162 if (rack->measure_saw_probe_rtt == 0) { 4163 /* 4164 * We don't want a probertt to be counted 4165 * since it will be negative incorrectly. We 4166 * expect to be reducing the RTT when we 4167 * pace at a slower rate. 4168 */ 4169 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4170 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4171 } 4172 } 4173 timely_says = rack_make_timely_judgement(rack, 4174 rack->r_ctl.rc_gp_srtt, 4175 rack->r_ctl.rc_rtt_diff, 4176 rack->r_ctl.rc_prev_gp_srtt 4177 ); 4178 bytes_ps *= HPTS_USEC_IN_SEC; 4179 bytes_ps /= utim; 4180 if (bytes_ps > rack->r_ctl.last_max_bw) { 4181 /* 4182 * Something is on path playing 4183 * since this b/w is not possible based 4184 * on our BDP (highest rwnd and lowest rtt 4185 * we saw in the measurement window). 4186 * 4187 * Another option here would be to 4188 * instead skip the measurement. 4189 */ 4190 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4191 bytes_ps, rack->r_ctl.last_max_bw, 0, 4192 11, __LINE__, NULL, quality); 4193 bytes_ps = rack->r_ctl.last_max_bw; 4194 } 4195 /* We store gp for b/w in bytes per second */ 4196 if (rack->rc_gp_filled == 0) { 4197 /* Initial measurement */ 4198 if (bytes_ps) { 4199 rack->r_ctl.gp_bw = bytes_ps; 4200 rack->rc_gp_filled = 1; 4201 rack->r_ctl.num_measurements = 1; 4202 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4203 } else { 4204 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4205 rack->r_ctl.rc_app_limited_cnt, 4206 0, 0, 10, __LINE__, NULL, quality); 4207 } 4208 if (tcp_in_hpts(rack->rc_inp) && 4209 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4210 /* 4211 * Ok we can't trust the pacer in this case 4212 * where we transition from un-paced to paced. 4213 * Or for that matter when the burst mitigation 4214 * was making a wild guess and got it wrong. 4215 * Stop the pacer and clear up all the aggregate 4216 * delays etc. 4217 */ 4218 tcp_hpts_remove(rack->rc_inp); 4219 rack->r_ctl.rc_hpts_flags = 0; 4220 rack->r_ctl.rc_last_output_to = 0; 4221 } 4222 did_add = 2; 4223 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4224 /* Still a small number run an average */ 4225 rack->r_ctl.gp_bw += bytes_ps; 4226 addpart = rack->r_ctl.num_measurements; 4227 rack->r_ctl.num_measurements++; 4228 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4229 /* We have collected enough to move forward */ 4230 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4231 } 4232 did_add = 3; 4233 } else { 4234 /* 4235 * We want to take 1/wma of the goodput and add in to 7/8th 4236 * of the old value weighted by the srtt. So if your measurement 4237 * period is say 2 SRTT's long you would get 1/4 as the 4238 * value, if it was like 1/2 SRTT then you would get 1/16th. 4239 * 4240 * But we must be careful not to take too much i.e. if the 4241 * srtt is say 20ms and the measurement is taken over 4242 * 400ms our weight would be 400/20 i.e. 20. On the 4243 * other hand if we get a measurement over 1ms with a 4244 * 10ms rtt we only want to take a much smaller portion. 4245 */ 4246 if (rack->r_ctl.num_measurements < 0xff) { 4247 rack->r_ctl.num_measurements++; 4248 } 4249 srtt = (uint64_t)tp->t_srtt; 4250 if (srtt == 0) { 4251 /* 4252 * Strange why did t_srtt go back to zero? 4253 */ 4254 if (rack->r_ctl.rc_rack_min_rtt) 4255 srtt = rack->r_ctl.rc_rack_min_rtt; 4256 else 4257 srtt = HPTS_USEC_IN_MSEC; 4258 } 4259 /* 4260 * XXXrrs: Note for reviewers, in playing with 4261 * dynamic pacing I discovered this GP calculation 4262 * as done originally leads to some undesired results. 4263 * Basically you can get longer measurements contributing 4264 * too much to the WMA. Thus I changed it if you are doing 4265 * dynamic adjustments to only do the aportioned adjustment 4266 * if we have a very small (time wise) measurement. Longer 4267 * measurements just get there weight (defaulting to 1/8) 4268 * add to the WMA. We may want to think about changing 4269 * this to always do that for both sides i.e. dynamic 4270 * and non-dynamic... but considering lots of folks 4271 * were playing with this I did not want to change the 4272 * calculation per.se. without your thoughts.. Lawerence? 4273 * Peter?? 4274 */ 4275 if (rack->rc_gp_dyn_mul == 0) { 4276 subpart = rack->r_ctl.gp_bw * utim; 4277 subpart /= (srtt * 8); 4278 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4279 /* 4280 * The b/w update takes no more 4281 * away then 1/2 our running total 4282 * so factor it in. 4283 */ 4284 addpart = bytes_ps * utim; 4285 addpart /= (srtt * 8); 4286 } else { 4287 /* 4288 * Don't allow a single measurement 4289 * to account for more than 1/2 of the 4290 * WMA. This could happen on a retransmission 4291 * where utim becomes huge compared to 4292 * srtt (multiple retransmissions when using 4293 * the sending rate which factors in all the 4294 * transmissions from the first one). 4295 */ 4296 subpart = rack->r_ctl.gp_bw / 2; 4297 addpart = bytes_ps / 2; 4298 } 4299 resid_bw = rack->r_ctl.gp_bw - subpart; 4300 rack->r_ctl.gp_bw = resid_bw + addpart; 4301 did_add = 1; 4302 } else { 4303 if ((utim / srtt) <= 1) { 4304 /* 4305 * The b/w update was over a small period 4306 * of time. The idea here is to prevent a small 4307 * measurement time period from counting 4308 * too much. So we scale it based on the 4309 * time so it attributes less than 1/rack_wma_divisor 4310 * of its measurement. 4311 */ 4312 subpart = rack->r_ctl.gp_bw * utim; 4313 subpart /= (srtt * rack_wma_divisor); 4314 addpart = bytes_ps * utim; 4315 addpart /= (srtt * rack_wma_divisor); 4316 } else { 4317 /* 4318 * The scaled measurement was long 4319 * enough so lets just add in the 4320 * portion of the measurement i.e. 1/rack_wma_divisor 4321 */ 4322 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4323 addpart = bytes_ps / rack_wma_divisor; 4324 } 4325 if ((rack->measure_saw_probe_rtt == 0) || 4326 (bytes_ps > rack->r_ctl.gp_bw)) { 4327 /* 4328 * For probe-rtt we only add it in 4329 * if its larger, all others we just 4330 * add in. 4331 */ 4332 did_add = 1; 4333 resid_bw = rack->r_ctl.gp_bw - subpart; 4334 rack->r_ctl.gp_bw = resid_bw + addpart; 4335 } 4336 } 4337 } 4338 if ((rack->gp_ready == 0) && 4339 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4340 /* We have enough measurements now */ 4341 rack->gp_ready = 1; 4342 rack_set_cc_pacing(rack); 4343 if (rack->defer_options) 4344 rack_apply_deferred_options(rack); 4345 } 4346 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4347 rack_get_bw(rack), 22, did_add, NULL, quality); 4348 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4349 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4350 rack_update_multiplier(rack, timely_says, bytes_ps, 4351 rack->r_ctl.rc_gp_srtt, 4352 rack->r_ctl.rc_rtt_diff); 4353 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4354 rack_get_bw(rack), 3, line, NULL, quality); 4355 /* reset the gp srtt and setup the new prev */ 4356 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4357 /* Record the lost count for the next measurement */ 4358 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4359 /* 4360 * We restart our diffs based on the gpsrtt in the 4361 * measurement window. 4362 */ 4363 rack->rc_gp_rtt_set = 0; 4364 rack->rc_gp_saw_rec = 0; 4365 rack->rc_gp_saw_ca = 0; 4366 rack->rc_gp_saw_ss = 0; 4367 rack->rc_dragged_bottom = 0; 4368 skip_measurement: 4369 4370 #ifdef STATS 4371 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4372 gput); 4373 /* 4374 * XXXLAS: This is a temporary hack, and should be 4375 * chained off VOI_TCP_GPUT when stats(9) grows an 4376 * API to deal with chained VOIs. 4377 */ 4378 if (tp->t_stats_gput_prev > 0) 4379 stats_voi_update_abs_s32(tp->t_stats, 4380 VOI_TCP_GPUT_ND, 4381 ((gput - tp->t_stats_gput_prev) * 100) / 4382 tp->t_stats_gput_prev); 4383 #endif 4384 tp->t_flags &= ~TF_GPUTINPROG; 4385 tp->t_stats_gput_prev = gput; 4386 /* 4387 * Now are we app limited now and there is space from where we 4388 * were to where we want to go? 4389 * 4390 * We don't do the other case i.e. non-applimited here since 4391 * the next send will trigger us picking up the missing data. 4392 */ 4393 if (rack->r_ctl.rc_first_appl && 4394 TCPS_HAVEESTABLISHED(tp->t_state) && 4395 rack->r_ctl.rc_app_limited_cnt && 4396 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4397 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4398 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4399 /* 4400 * Yep there is enough outstanding to make a measurement here. 4401 */ 4402 struct rack_sendmap *rsm, fe; 4403 4404 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4405 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4406 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4407 rack->app_limited_needs_set = 0; 4408 tp->gput_seq = th_ack; 4409 if (rack->in_probe_rtt) 4410 rack->measure_saw_probe_rtt = 1; 4411 else if ((rack->measure_saw_probe_rtt) && 4412 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4413 rack->measure_saw_probe_rtt = 0; 4414 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4415 /* There is a full window to gain info from */ 4416 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4417 } else { 4418 /* We can only measure up to the applimited point */ 4419 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4420 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4421 /* 4422 * We don't have enough to make a measurement. 4423 */ 4424 tp->t_flags &= ~TF_GPUTINPROG; 4425 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4426 0, 0, 0, 6, __LINE__, NULL, quality); 4427 return; 4428 } 4429 } 4430 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4431 /* 4432 * We will get no more data into the SB 4433 * this means we need to have the data available 4434 * before we start a measurement. 4435 */ 4436 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4437 /* Nope not enough data. */ 4438 return; 4439 } 4440 } 4441 tp->t_flags |= TF_GPUTINPROG; 4442 /* 4443 * Now we need to find the timestamp of the send at tp->gput_seq 4444 * for the send based measurement. 4445 */ 4446 fe.r_start = tp->gput_seq; 4447 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4448 if (rsm) { 4449 /* Ok send-based limit is set */ 4450 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4451 /* 4452 * Move back to include the earlier part 4453 * so our ack time lines up right (this may 4454 * make an overlapping measurement but thats 4455 * ok). 4456 */ 4457 tp->gput_seq = rsm->r_start; 4458 } 4459 if (rsm->r_flags & RACK_ACKED) 4460 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4461 else 4462 rack->app_limited_needs_set = 1; 4463 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4464 } else { 4465 /* 4466 * If we don't find the rsm due to some 4467 * send-limit set the current time, which 4468 * basically disables the send-limit. 4469 */ 4470 struct timeval tv; 4471 4472 microuptime(&tv); 4473 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4474 } 4475 rack_log_pacing_delay_calc(rack, 4476 tp->gput_seq, 4477 tp->gput_ack, 4478 (uint64_t)rsm, 4479 tp->gput_ts, 4480 rack->r_ctl.rc_app_limited_cnt, 4481 9, 4482 __LINE__, NULL, quality); 4483 } 4484 } 4485 4486 /* 4487 * CC wrapper hook functions 4488 */ 4489 static void 4490 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4491 uint16_t type, int32_t recovery) 4492 { 4493 uint32_t prior_cwnd, acked; 4494 struct tcp_log_buffer *lgb = NULL; 4495 uint8_t labc_to_use, quality; 4496 4497 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4498 tp->t_ccv.nsegs = nsegs; 4499 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 4500 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4501 uint32_t max; 4502 4503 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4504 if (tp->t_ccv.bytes_this_ack > max) { 4505 tp->t_ccv.bytes_this_ack = max; 4506 } 4507 } 4508 #ifdef STATS 4509 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4510 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4511 #endif 4512 quality = RACK_QUALITY_NONE; 4513 if ((tp->t_flags & TF_GPUTINPROG) && 4514 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4515 /* Measure the Goodput */ 4516 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4517 #ifdef NETFLIX_PEAKRATE 4518 if ((type == CC_ACK) && 4519 (tp->t_maxpeakrate)) { 4520 /* 4521 * We update t_peakrate_thr. This gives us roughly 4522 * one update per round trip time. Note 4523 * it will only be used if pace_always is off i.e 4524 * we don't do this for paced flows. 4525 */ 4526 rack_update_peakrate_thr(tp); 4527 } 4528 #endif 4529 } 4530 /* Which way our we limited, if not cwnd limited no advance in CA */ 4531 if (tp->snd_cwnd <= tp->snd_wnd) 4532 tp->t_ccv.flags |= CCF_CWND_LIMITED; 4533 else 4534 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 4535 if (tp->snd_cwnd > tp->snd_ssthresh) { 4536 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 4537 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4538 /* For the setting of a window past use the actual scwnd we are using */ 4539 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4540 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4541 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 4542 } 4543 } else { 4544 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4545 tp->t_bytes_acked = 0; 4546 } 4547 prior_cwnd = tp->snd_cwnd; 4548 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4549 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4550 labc_to_use = rack->rc_labc; 4551 else 4552 labc_to_use = rack_max_abc_post_recovery; 4553 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 4554 union tcp_log_stackspecific log; 4555 struct timeval tv; 4556 4557 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4558 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4559 log.u_bbr.flex1 = th_ack; 4560 log.u_bbr.flex2 = tp->t_ccv.flags; 4561 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 4562 log.u_bbr.flex4 = tp->t_ccv.nsegs; 4563 log.u_bbr.flex5 = labc_to_use; 4564 log.u_bbr.flex6 = prior_cwnd; 4565 log.u_bbr.flex7 = V_tcp_do_newsack; 4566 log.u_bbr.flex8 = 1; 4567 lgb = tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4568 0, &log, false, NULL, NULL, 0, &tv); 4569 } 4570 if (CC_ALGO(tp)->ack_received != NULL) { 4571 /* XXXLAS: Find a way to live without this */ 4572 tp->t_ccv.curack = th_ack; 4573 tp->t_ccv.labc = labc_to_use; 4574 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 4575 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 4576 } 4577 if (lgb) { 4578 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4579 } 4580 if (rack->r_must_retran) { 4581 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4582 /* 4583 * We now are beyond the rxt point so lets disable 4584 * the flag. 4585 */ 4586 rack->r_ctl.rc_out_at_rto = 0; 4587 rack->r_must_retran = 0; 4588 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4589 /* 4590 * Only decrement the rc_out_at_rto if the cwnd advances 4591 * at least a whole segment. Otherwise next time the peer 4592 * acks, we won't be able to send this generaly happens 4593 * when we are in Congestion Avoidance. 4594 */ 4595 if (acked <= rack->r_ctl.rc_out_at_rto){ 4596 rack->r_ctl.rc_out_at_rto -= acked; 4597 } else { 4598 rack->r_ctl.rc_out_at_rto = 0; 4599 } 4600 } 4601 } 4602 #ifdef STATS 4603 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4604 #endif 4605 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4606 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4607 } 4608 #ifdef NETFLIX_PEAKRATE 4609 /* we enforce max peak rate if it is set and we are not pacing */ 4610 if ((rack->rc_always_pace == 0) && 4611 tp->t_peakrate_thr && 4612 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4613 tp->snd_cwnd = tp->t_peakrate_thr; 4614 } 4615 #endif 4616 } 4617 4618 static void 4619 tcp_rack_partialack(struct tcpcb *tp) 4620 { 4621 struct tcp_rack *rack; 4622 4623 rack = (struct tcp_rack *)tp->t_fb_ptr; 4624 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4625 /* 4626 * If we are doing PRR and have enough 4627 * room to send <or> we are pacing and prr 4628 * is disabled we will want to see if we 4629 * can send data (by setting r_wanted_output to 4630 * true). 4631 */ 4632 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4633 rack->rack_no_prr) 4634 rack->r_wanted_output = 1; 4635 } 4636 4637 static void 4638 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4639 { 4640 struct tcp_rack *rack; 4641 uint32_t orig_cwnd; 4642 4643 orig_cwnd = tp->snd_cwnd; 4644 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4645 rack = (struct tcp_rack *)tp->t_fb_ptr; 4646 /* only alert CC if we alerted when we entered */ 4647 if (CC_ALGO(tp)->post_recovery != NULL) { 4648 tp->t_ccv.curack = th_ack; 4649 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 4650 if (tp->snd_cwnd < tp->snd_ssthresh) { 4651 /* 4652 * Rack has burst control and pacing 4653 * so lets not set this any lower than 4654 * snd_ssthresh per RFC-6582 (option 2). 4655 */ 4656 tp->snd_cwnd = tp->snd_ssthresh; 4657 } 4658 } 4659 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 4660 union tcp_log_stackspecific log; 4661 struct timeval tv; 4662 4663 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4664 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4665 log.u_bbr.flex1 = th_ack; 4666 log.u_bbr.flex2 = tp->t_ccv.flags; 4667 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 4668 log.u_bbr.flex4 = tp->t_ccv.nsegs; 4669 log.u_bbr.flex5 = V_tcp_abc_l_var; 4670 log.u_bbr.flex6 = orig_cwnd; 4671 log.u_bbr.flex7 = V_tcp_do_newsack; 4672 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4673 log.u_bbr.flex8 = 2; 4674 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4675 0, &log, false, NULL, NULL, 0, &tv); 4676 } 4677 if ((rack->rack_no_prr == 0) && 4678 (rack->no_prr_addback == 0) && 4679 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4680 /* 4681 * Suck the next prr cnt back into cwnd, but 4682 * only do that if we are not application limited. 4683 */ 4684 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 4685 /* 4686 * We are allowed to add back to the cwnd the amount we did 4687 * not get out if: 4688 * a) no_prr_addback is off. 4689 * b) we are not app limited 4690 * c) we are doing prr 4691 * <and> 4692 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4693 */ 4694 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4695 rack->r_ctl.rc_prr_sndcnt); 4696 } 4697 rack->r_ctl.rc_prr_sndcnt = 0; 4698 rack_log_to_prr(rack, 1, 0, __LINE__); 4699 } 4700 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 4701 tp->snd_recover = tp->snd_una; 4702 if (rack->r_ctl.dsack_persist) { 4703 rack->r_ctl.dsack_persist--; 4704 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4705 rack->r_ctl.num_dsack = 0; 4706 } 4707 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4708 } 4709 EXIT_RECOVERY(tp->t_flags); 4710 } 4711 4712 static void 4713 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 4714 { 4715 struct tcp_rack *rack; 4716 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4717 4718 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4719 #ifdef STATS 4720 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4721 #endif 4722 if (IN_RECOVERY(tp->t_flags) == 0) { 4723 in_rec_at_entry = 0; 4724 ssthresh_enter = tp->snd_ssthresh; 4725 cwnd_enter = tp->snd_cwnd; 4726 } else 4727 in_rec_at_entry = 1; 4728 rack = (struct tcp_rack *)tp->t_fb_ptr; 4729 switch (type) { 4730 case CC_NDUPACK: 4731 tp->t_flags &= ~TF_WASFRECOVERY; 4732 tp->t_flags &= ~TF_WASCRECOVERY; 4733 if (!IN_FASTRECOVERY(tp->t_flags)) { 4734 rack->r_ctl.rc_prr_delivered = 0; 4735 rack->r_ctl.rc_prr_out = 0; 4736 if (rack->rack_no_prr == 0) { 4737 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4738 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 4739 } 4740 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4741 tp->snd_recover = tp->snd_max; 4742 if (tp->t_flags2 & TF2_ECN_PERMIT) 4743 tp->t_flags2 |= TF2_ECN_SND_CWR; 4744 } 4745 break; 4746 case CC_ECN: 4747 if (!IN_CONGRECOVERY(tp->t_flags) || 4748 /* 4749 * Allow ECN reaction on ACK to CWR, if 4750 * that data segment was also CE marked. 4751 */ 4752 SEQ_GEQ(ack, tp->snd_recover)) { 4753 EXIT_CONGRECOVERY(tp->t_flags); 4754 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4755 tp->snd_recover = tp->snd_max + 1; 4756 if (tp->t_flags2 & TF2_ECN_PERMIT) 4757 tp->t_flags2 |= TF2_ECN_SND_CWR; 4758 } 4759 break; 4760 case CC_RTO: 4761 tp->t_dupacks = 0; 4762 tp->t_bytes_acked = 0; 4763 EXIT_RECOVERY(tp->t_flags); 4764 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4765 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4766 orig_cwnd = tp->snd_cwnd; 4767 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4768 rack_log_to_prr(rack, 16, orig_cwnd, line); 4769 if (tp->t_flags2 & TF2_ECN_PERMIT) 4770 tp->t_flags2 |= TF2_ECN_SND_CWR; 4771 break; 4772 case CC_RTO_ERR: 4773 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4774 /* RTO was unnecessary, so reset everything. */ 4775 tp->snd_cwnd = tp->snd_cwnd_prev; 4776 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4777 tp->snd_recover = tp->snd_recover_prev; 4778 if (tp->t_flags & TF_WASFRECOVERY) { 4779 ENTER_FASTRECOVERY(tp->t_flags); 4780 tp->t_flags &= ~TF_WASFRECOVERY; 4781 } 4782 if (tp->t_flags & TF_WASCRECOVERY) { 4783 ENTER_CONGRECOVERY(tp->t_flags); 4784 tp->t_flags &= ~TF_WASCRECOVERY; 4785 } 4786 tp->snd_nxt = tp->snd_max; 4787 tp->t_badrxtwin = 0; 4788 break; 4789 } 4790 if ((CC_ALGO(tp)->cong_signal != NULL) && 4791 (type != CC_RTO)){ 4792 tp->t_ccv.curack = ack; 4793 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 4794 } 4795 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4796 rack_log_to_prr(rack, 15, cwnd_enter, line); 4797 rack->r_ctl.dsack_byte_cnt = 0; 4798 rack->r_ctl.retran_during_recovery = 0; 4799 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4800 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4801 rack->r_ent_rec_ns = 1; 4802 } 4803 } 4804 4805 static inline void 4806 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4807 { 4808 uint32_t i_cwnd; 4809 4810 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4811 4812 #ifdef NETFLIX_STATS 4813 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4814 if (tp->t_state == TCPS_ESTABLISHED) 4815 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4816 #endif 4817 if (CC_ALGO(tp)->after_idle != NULL) 4818 CC_ALGO(tp)->after_idle(&tp->t_ccv); 4819 4820 if (tp->snd_cwnd == 1) 4821 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4822 else 4823 i_cwnd = rc_init_window(rack); 4824 4825 /* 4826 * Being idle is no different than the initial window. If the cc 4827 * clamps it down below the initial window raise it to the initial 4828 * window. 4829 */ 4830 if (tp->snd_cwnd < i_cwnd) { 4831 tp->snd_cwnd = i_cwnd; 4832 } 4833 } 4834 4835 /* 4836 * Indicate whether this ack should be delayed. We can delay the ack if 4837 * following conditions are met: 4838 * - There is no delayed ack timer in progress. 4839 * - Our last ack wasn't a 0-sized window. We never want to delay 4840 * the ack that opens up a 0-sized window. 4841 * - LRO wasn't used for this segment. We make sure by checking that the 4842 * segment size is not larger than the MSS. 4843 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4844 * connection. 4845 */ 4846 #define DELAY_ACK(tp, tlen) \ 4847 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4848 ((tp->t_flags & TF_DELACK) == 0) && \ 4849 (tlen <= tp->t_maxseg) && \ 4850 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4851 4852 static struct rack_sendmap * 4853 rack_find_lowest_rsm(struct tcp_rack *rack) 4854 { 4855 struct rack_sendmap *rsm; 4856 4857 /* 4858 * Walk the time-order transmitted list looking for an rsm that is 4859 * not acked. This will be the one that was sent the longest time 4860 * ago that is still outstanding. 4861 */ 4862 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4863 if (rsm->r_flags & RACK_ACKED) { 4864 continue; 4865 } 4866 goto finish; 4867 } 4868 finish: 4869 return (rsm); 4870 } 4871 4872 static struct rack_sendmap * 4873 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4874 { 4875 struct rack_sendmap *prsm; 4876 4877 /* 4878 * Walk the sequence order list backward until we hit and arrive at 4879 * the highest seq not acked. In theory when this is called it 4880 * should be the last segment (which it was not). 4881 */ 4882 prsm = rsm; 4883 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4884 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4885 continue; 4886 } 4887 return (prsm); 4888 } 4889 return (NULL); 4890 } 4891 4892 static uint32_t 4893 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4894 { 4895 int32_t lro; 4896 uint32_t thresh; 4897 4898 /* 4899 * lro is the flag we use to determine if we have seen reordering. 4900 * If it gets set we have seen reordering. The reorder logic either 4901 * works in one of two ways: 4902 * 4903 * If reorder-fade is configured, then we track the last time we saw 4904 * re-ordering occur. If we reach the point where enough time as 4905 * passed we no longer consider reordering has occuring. 4906 * 4907 * Or if reorder-face is 0, then once we see reordering we consider 4908 * the connection to alway be subject to reordering and just set lro 4909 * to 1. 4910 * 4911 * In the end if lro is non-zero we add the extra time for 4912 * reordering in. 4913 */ 4914 if (srtt == 0) 4915 srtt = 1; 4916 if (rack->r_ctl.rc_reorder_ts) { 4917 if (rack->r_ctl.rc_reorder_fade) { 4918 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 4919 lro = cts - rack->r_ctl.rc_reorder_ts; 4920 if (lro == 0) { 4921 /* 4922 * No time as passed since the last 4923 * reorder, mark it as reordering. 4924 */ 4925 lro = 1; 4926 } 4927 } else { 4928 /* Negative time? */ 4929 lro = 0; 4930 } 4931 if (lro > rack->r_ctl.rc_reorder_fade) { 4932 /* Turn off reordering seen too */ 4933 rack->r_ctl.rc_reorder_ts = 0; 4934 lro = 0; 4935 } 4936 } else { 4937 /* Reodering does not fade */ 4938 lro = 1; 4939 } 4940 } else { 4941 lro = 0; 4942 } 4943 if (rack->rc_rack_tmr_std_based == 0) { 4944 thresh = srtt + rack->r_ctl.rc_pkt_delay; 4945 } else { 4946 /* Standards based pkt-delay is 1/4 srtt */ 4947 thresh = srtt + (srtt >> 2); 4948 } 4949 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 4950 /* It must be set, if not you get 1/4 rtt */ 4951 if (rack->r_ctl.rc_reorder_shift) 4952 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 4953 else 4954 thresh += (srtt >> 2); 4955 } 4956 if (rack->rc_rack_use_dsack && 4957 lro && 4958 (rack->r_ctl.num_dsack > 0)) { 4959 /* 4960 * We only increase the reordering window if we 4961 * have seen reordering <and> we have a DSACK count. 4962 */ 4963 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 4964 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 4965 } 4966 /* SRTT * 2 is the ceiling */ 4967 if (thresh > (srtt * 2)) { 4968 thresh = srtt * 2; 4969 } 4970 /* And we don't want it above the RTO max either */ 4971 if (thresh > rack_rto_max) { 4972 thresh = rack_rto_max; 4973 } 4974 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 4975 return (thresh); 4976 } 4977 4978 static uint32_t 4979 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 4980 struct rack_sendmap *rsm, uint32_t srtt) 4981 { 4982 struct rack_sendmap *prsm; 4983 uint32_t thresh, len; 4984 int segsiz; 4985 4986 if (srtt == 0) 4987 srtt = 1; 4988 if (rack->r_ctl.rc_tlp_threshold) 4989 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 4990 else 4991 thresh = (srtt * 2); 4992 4993 /* Get the previous sent packet, if any */ 4994 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4995 len = rsm->r_end - rsm->r_start; 4996 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 4997 /* Exactly like the ID */ 4998 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 4999 uint32_t alt_thresh; 5000 /* 5001 * Compensate for delayed-ack with the d-ack time. 5002 */ 5003 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5004 if (alt_thresh > thresh) 5005 thresh = alt_thresh; 5006 } 5007 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5008 /* 2.1 behavior */ 5009 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5010 if (prsm && (len <= segsiz)) { 5011 /* 5012 * Two packets outstanding, thresh should be (2*srtt) + 5013 * possible inter-packet delay (if any). 5014 */ 5015 uint32_t inter_gap = 0; 5016 int idx, nidx; 5017 5018 idx = rsm->r_rtr_cnt - 1; 5019 nidx = prsm->r_rtr_cnt - 1; 5020 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5021 /* Yes it was sent later (or at the same time) */ 5022 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5023 } 5024 thresh += inter_gap; 5025 } else if (len <= segsiz) { 5026 /* 5027 * Possibly compensate for delayed-ack. 5028 */ 5029 uint32_t alt_thresh; 5030 5031 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5032 if (alt_thresh > thresh) 5033 thresh = alt_thresh; 5034 } 5035 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5036 /* 2.2 behavior */ 5037 if (len <= segsiz) { 5038 uint32_t alt_thresh; 5039 /* 5040 * Compensate for delayed-ack with the d-ack time. 5041 */ 5042 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5043 if (alt_thresh > thresh) 5044 thresh = alt_thresh; 5045 } 5046 } 5047 /* Not above an RTO */ 5048 if (thresh > tp->t_rxtcur) { 5049 thresh = tp->t_rxtcur; 5050 } 5051 /* Not above a RTO max */ 5052 if (thresh > rack_rto_max) { 5053 thresh = rack_rto_max; 5054 } 5055 /* Apply user supplied min TLP */ 5056 if (thresh < rack_tlp_min) { 5057 thresh = rack_tlp_min; 5058 } 5059 return (thresh); 5060 } 5061 5062 static uint32_t 5063 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5064 { 5065 /* 5066 * We want the rack_rtt which is the 5067 * last rtt we measured. However if that 5068 * does not exist we fallback to the srtt (which 5069 * we probably will never do) and then as a last 5070 * resort we use RACK_INITIAL_RTO if no srtt is 5071 * yet set. 5072 */ 5073 if (rack->rc_rack_rtt) 5074 return (rack->rc_rack_rtt); 5075 else if (tp->t_srtt == 0) 5076 return (RACK_INITIAL_RTO); 5077 return (tp->t_srtt); 5078 } 5079 5080 static struct rack_sendmap * 5081 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5082 { 5083 /* 5084 * Check to see that we don't need to fall into recovery. We will 5085 * need to do so if our oldest transmit is past the time we should 5086 * have had an ack. 5087 */ 5088 struct tcp_rack *rack; 5089 struct rack_sendmap *rsm; 5090 int32_t idx; 5091 uint32_t srtt, thresh; 5092 5093 rack = (struct tcp_rack *)tp->t_fb_ptr; 5094 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5095 return (NULL); 5096 } 5097 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5098 if (rsm == NULL) 5099 return (NULL); 5100 5101 5102 if (rsm->r_flags & RACK_ACKED) { 5103 rsm = rack_find_lowest_rsm(rack); 5104 if (rsm == NULL) 5105 return (NULL); 5106 } 5107 idx = rsm->r_rtr_cnt - 1; 5108 srtt = rack_grab_rtt(tp, rack); 5109 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5110 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5111 return (NULL); 5112 } 5113 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5114 return (NULL); 5115 } 5116 /* Ok if we reach here we are over-due and this guy can be sent */ 5117 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 5118 return (rsm); 5119 } 5120 5121 static uint32_t 5122 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5123 { 5124 int32_t t; 5125 int32_t tt; 5126 uint32_t ret_val; 5127 5128 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5129 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5130 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5131 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5132 ret_val = (uint32_t)tt; 5133 return (ret_val); 5134 } 5135 5136 static uint32_t 5137 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5138 { 5139 /* 5140 * Start the FR timer, we do this based on getting the first one in 5141 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5142 * events we need to stop the running timer (if its running) before 5143 * starting the new one. 5144 */ 5145 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5146 uint32_t srtt_cur; 5147 int32_t idx; 5148 int32_t is_tlp_timer = 0; 5149 struct rack_sendmap *rsm; 5150 5151 if (rack->t_timers_stopped) { 5152 /* All timers have been stopped none are to run */ 5153 return (0); 5154 } 5155 if (rack->rc_in_persist) { 5156 /* We can't start any timer in persists */ 5157 return (rack_get_persists_timer_val(tp, rack)); 5158 } 5159 rack->rc_on_min_to = 0; 5160 if ((tp->t_state < TCPS_ESTABLISHED) || 5161 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5162 goto activate_rxt; 5163 } 5164 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5165 if ((rsm == NULL) || sup_rack) { 5166 /* Nothing on the send map or no rack */ 5167 activate_rxt: 5168 time_since_sent = 0; 5169 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5170 if (rsm) { 5171 /* 5172 * Should we discount the RTX timer any? 5173 * 5174 * We want to discount it the smallest amount. 5175 * If a timer (Rack/TLP or RXT) has gone off more 5176 * recently thats the discount we want to use (now - timer time). 5177 * If the retransmit of the oldest packet was more recent then 5178 * we want to use that (now - oldest-packet-last_transmit_time). 5179 * 5180 */ 5181 idx = rsm->r_rtr_cnt - 1; 5182 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5183 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5184 else 5185 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5186 if (TSTMP_GT(cts, tstmp_touse)) 5187 time_since_sent = cts - tstmp_touse; 5188 } 5189 if (SEQ_LT(tp->snd_una, tp->snd_max) || 5190 sbavail(&tptosocket(tp)->so_snd)) { 5191 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5192 to = tp->t_rxtcur; 5193 if (to > time_since_sent) 5194 to -= time_since_sent; 5195 else 5196 to = rack->r_ctl.rc_min_to; 5197 if (to == 0) 5198 to = 1; 5199 /* Special case for KEEPINIT */ 5200 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5201 (TP_KEEPINIT(tp) != 0) && 5202 rsm) { 5203 /* 5204 * We have to put a ceiling on the rxt timer 5205 * of the keep-init timeout. 5206 */ 5207 uint32_t max_time, red; 5208 5209 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5210 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5211 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5212 if (red < max_time) 5213 max_time -= red; 5214 else 5215 max_time = 1; 5216 } 5217 /* Reduce timeout to the keep value if needed */ 5218 if (max_time < to) 5219 to = max_time; 5220 } 5221 return (to); 5222 } 5223 return (0); 5224 } 5225 if (rsm->r_flags & RACK_ACKED) { 5226 rsm = rack_find_lowest_rsm(rack); 5227 if (rsm == NULL) { 5228 /* No lowest? */ 5229 goto activate_rxt; 5230 } 5231 } 5232 if (rack->sack_attack_disable) { 5233 /* 5234 * We don't want to do 5235 * any TLP's if you are an attacker. 5236 * Though if you are doing what 5237 * is expected you may still have 5238 * SACK-PASSED marks. 5239 */ 5240 goto activate_rxt; 5241 } 5242 /* Convert from ms to usecs */ 5243 if ((rsm->r_flags & RACK_SACK_PASSED) || 5244 (rsm->r_flags & RACK_RWND_COLLAPSED) || 5245 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5246 if ((tp->t_flags & TF_SENTFIN) && 5247 ((tp->snd_max - tp->snd_una) == 1) && 5248 (rsm->r_flags & RACK_HAS_FIN)) { 5249 /* 5250 * We don't start a rack timer if all we have is a 5251 * FIN outstanding. 5252 */ 5253 goto activate_rxt; 5254 } 5255 if ((rack->use_rack_rr == 0) && 5256 (IN_FASTRECOVERY(tp->t_flags)) && 5257 (rack->rack_no_prr == 0) && 5258 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5259 /* 5260 * We are not cheating, in recovery and 5261 * not enough ack's to yet get our next 5262 * retransmission out. 5263 * 5264 * Note that classified attackers do not 5265 * get to use the rack-cheat. 5266 */ 5267 goto activate_tlp; 5268 } 5269 srtt = rack_grab_rtt(tp, rack); 5270 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5271 idx = rsm->r_rtr_cnt - 1; 5272 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5273 if (SEQ_GEQ(exp, cts)) { 5274 to = exp - cts; 5275 if (to < rack->r_ctl.rc_min_to) { 5276 to = rack->r_ctl.rc_min_to; 5277 if (rack->r_rr_config == 3) 5278 rack->rc_on_min_to = 1; 5279 } 5280 } else { 5281 to = rack->r_ctl.rc_min_to; 5282 if (rack->r_rr_config == 3) 5283 rack->rc_on_min_to = 1; 5284 } 5285 } else { 5286 /* Ok we need to do a TLP not RACK */ 5287 activate_tlp: 5288 if ((rack->rc_tlp_in_progress != 0) && 5289 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5290 /* 5291 * The previous send was a TLP and we have sent 5292 * N TLP's without sending new data. 5293 */ 5294 goto activate_rxt; 5295 } 5296 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5297 if (rsm == NULL) { 5298 /* We found no rsm to TLP with. */ 5299 goto activate_rxt; 5300 } 5301 if (rsm->r_flags & RACK_HAS_FIN) { 5302 /* If its a FIN we dont do TLP */ 5303 rsm = NULL; 5304 goto activate_rxt; 5305 } 5306 idx = rsm->r_rtr_cnt - 1; 5307 time_since_sent = 0; 5308 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5309 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5310 else 5311 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5312 if (TSTMP_GT(cts, tstmp_touse)) 5313 time_since_sent = cts - tstmp_touse; 5314 is_tlp_timer = 1; 5315 if (tp->t_srtt) { 5316 if ((rack->rc_srtt_measure_made == 0) && 5317 (tp->t_srtt == 1)) { 5318 /* 5319 * If another stack as run and set srtt to 1, 5320 * then the srtt was 0, so lets use the initial. 5321 */ 5322 srtt = RACK_INITIAL_RTO; 5323 } else { 5324 srtt_cur = tp->t_srtt; 5325 srtt = srtt_cur; 5326 } 5327 } else 5328 srtt = RACK_INITIAL_RTO; 5329 /* 5330 * If the SRTT is not keeping up and the 5331 * rack RTT has spiked we want to use 5332 * the last RTT not the smoothed one. 5333 */ 5334 if (rack_tlp_use_greater && 5335 tp->t_srtt && 5336 (srtt < rack_grab_rtt(tp, rack))) { 5337 srtt = rack_grab_rtt(tp, rack); 5338 } 5339 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5340 if (thresh > time_since_sent) { 5341 to = thresh - time_since_sent; 5342 } else { 5343 to = rack->r_ctl.rc_min_to; 5344 rack_log_alt_to_to_cancel(rack, 5345 thresh, /* flex1 */ 5346 time_since_sent, /* flex2 */ 5347 tstmp_touse, /* flex3 */ 5348 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5349 (uint32_t)rsm->r_tim_lastsent[idx], 5350 srtt, 5351 idx, 99); 5352 } 5353 if (to < rack_tlp_min) { 5354 to = rack_tlp_min; 5355 } 5356 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5357 /* 5358 * If the TLP time works out to larger than the max 5359 * RTO lets not do TLP.. just RTO. 5360 */ 5361 goto activate_rxt; 5362 } 5363 } 5364 if (is_tlp_timer == 0) { 5365 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5366 } else { 5367 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5368 } 5369 if (to == 0) 5370 to = 1; 5371 return (to); 5372 } 5373 5374 static void 5375 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5376 { 5377 if (rack->rc_in_persist == 0) { 5378 if (tp->t_flags & TF_GPUTINPROG) { 5379 /* 5380 * Stop the goodput now, the calling of the 5381 * measurement function clears the flag. 5382 */ 5383 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5384 RACK_QUALITY_PERSIST); 5385 } 5386 #ifdef NETFLIX_SHARED_CWND 5387 if (rack->r_ctl.rc_scw) { 5388 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5389 rack->rack_scwnd_is_idle = 1; 5390 } 5391 #endif 5392 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5393 if (rack->r_ctl.rc_went_idle_time == 0) 5394 rack->r_ctl.rc_went_idle_time = 1; 5395 rack_timer_cancel(tp, rack, cts, __LINE__); 5396 rack->r_ctl.persist_lost_ends = 0; 5397 rack->probe_not_answered = 0; 5398 rack->forced_ack = 0; 5399 tp->t_rxtshift = 0; 5400 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5401 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5402 rack->rc_in_persist = 1; 5403 } 5404 } 5405 5406 static void 5407 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5408 { 5409 if (tcp_in_hpts(rack->rc_inp)) { 5410 tcp_hpts_remove(rack->rc_inp); 5411 rack->r_ctl.rc_hpts_flags = 0; 5412 } 5413 #ifdef NETFLIX_SHARED_CWND 5414 if (rack->r_ctl.rc_scw) { 5415 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5416 rack->rack_scwnd_is_idle = 0; 5417 } 5418 #endif 5419 if (rack->rc_gp_dyn_mul && 5420 (rack->use_fixed_rate == 0) && 5421 (rack->rc_always_pace)) { 5422 /* 5423 * Do we count this as if a probe-rtt just 5424 * finished? 5425 */ 5426 uint32_t time_idle, idle_min; 5427 5428 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5429 idle_min = rack_min_probertt_hold; 5430 if (rack_probertt_gpsrtt_cnt_div) { 5431 uint64_t extra; 5432 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5433 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5434 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5435 idle_min += (uint32_t)extra; 5436 } 5437 if (time_idle >= idle_min) { 5438 /* Yes, we count it as a probe-rtt. */ 5439 uint32_t us_cts; 5440 5441 us_cts = tcp_get_usecs(NULL); 5442 if (rack->in_probe_rtt == 0) { 5443 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5444 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5445 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5446 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5447 } else { 5448 rack_exit_probertt(rack, us_cts); 5449 } 5450 } 5451 } 5452 rack->rc_in_persist = 0; 5453 rack->r_ctl.rc_went_idle_time = 0; 5454 tp->t_rxtshift = 0; 5455 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5456 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5457 rack->r_ctl.rc_agg_delayed = 0; 5458 rack->r_early = 0; 5459 rack->r_late = 0; 5460 rack->r_ctl.rc_agg_early = 0; 5461 } 5462 5463 static void 5464 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5465 struct hpts_diag *diag, struct timeval *tv) 5466 { 5467 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5468 union tcp_log_stackspecific log; 5469 5470 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5471 log.u_bbr.flex1 = diag->p_nxt_slot; 5472 log.u_bbr.flex2 = diag->p_cur_slot; 5473 log.u_bbr.flex3 = diag->slot_req; 5474 log.u_bbr.flex4 = diag->inp_hptsslot; 5475 log.u_bbr.flex5 = diag->slot_remaining; 5476 log.u_bbr.flex6 = diag->need_new_to; 5477 log.u_bbr.flex7 = diag->p_hpts_active; 5478 log.u_bbr.flex8 = diag->p_on_min_sleep; 5479 /* Hijack other fields as needed */ 5480 log.u_bbr.epoch = diag->have_slept; 5481 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5482 log.u_bbr.pkts_out = diag->co_ret; 5483 log.u_bbr.applimited = diag->hpts_sleep_time; 5484 log.u_bbr.delivered = diag->p_prev_slot; 5485 log.u_bbr.inflight = diag->p_runningslot; 5486 log.u_bbr.bw_inuse = diag->wheel_slot; 5487 log.u_bbr.rttProp = diag->wheel_cts; 5488 log.u_bbr.timeStamp = cts; 5489 log.u_bbr.delRate = diag->maxslots; 5490 log.u_bbr.cur_del_rate = diag->p_curtick; 5491 log.u_bbr.cur_del_rate <<= 32; 5492 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5493 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5494 &rack->rc_inp->inp_socket->so_rcv, 5495 &rack->rc_inp->inp_socket->so_snd, 5496 BBR_LOG_HPTSDIAG, 0, 5497 0, &log, false, tv); 5498 } 5499 5500 } 5501 5502 static void 5503 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5504 { 5505 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 5506 union tcp_log_stackspecific log; 5507 struct timeval tv; 5508 5509 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5510 log.u_bbr.flex1 = sb->sb_flags; 5511 log.u_bbr.flex2 = len; 5512 log.u_bbr.flex3 = sb->sb_state; 5513 log.u_bbr.flex8 = type; 5514 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5515 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5516 &rack->rc_inp->inp_socket->so_rcv, 5517 &rack->rc_inp->inp_socket->so_snd, 5518 TCP_LOG_SB_WAKE, 0, 5519 len, &log, false, &tv); 5520 } 5521 } 5522 5523 static void 5524 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5525 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5526 { 5527 struct hpts_diag diag; 5528 struct inpcb *inp = tptoinpcb(tp); 5529 struct timeval tv; 5530 uint32_t delayed_ack = 0; 5531 uint32_t hpts_timeout; 5532 uint32_t entry_slot = slot; 5533 uint8_t stopped; 5534 uint32_t left = 0; 5535 uint32_t us_cts; 5536 5537 if ((tp->t_state == TCPS_CLOSED) || 5538 (tp->t_state == TCPS_LISTEN)) { 5539 return; 5540 } 5541 if (tcp_in_hpts(inp)) { 5542 /* Already on the pacer */ 5543 return; 5544 } 5545 stopped = rack->rc_tmr_stopped; 5546 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5547 left = rack->r_ctl.rc_timer_exp - cts; 5548 } 5549 rack->r_ctl.rc_timer_exp = 0; 5550 rack->r_ctl.rc_hpts_flags = 0; 5551 us_cts = tcp_get_usecs(&tv); 5552 /* Now early/late accounting */ 5553 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5554 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5555 /* 5556 * We have a early carry over set, 5557 * we can always add more time so we 5558 * can always make this compensation. 5559 * 5560 * Note if ack's are allowed to wake us do not 5561 * penalize the next timer for being awoke 5562 * by an ack aka the rc_agg_early (non-paced mode). 5563 */ 5564 slot += rack->r_ctl.rc_agg_early; 5565 rack->r_early = 0; 5566 rack->r_ctl.rc_agg_early = 0; 5567 } 5568 if (rack->r_late) { 5569 /* 5570 * This is harder, we can 5571 * compensate some but it 5572 * really depends on what 5573 * the current pacing time is. 5574 */ 5575 if (rack->r_ctl.rc_agg_delayed >= slot) { 5576 /* 5577 * We can't compensate for it all. 5578 * And we have to have some time 5579 * on the clock. We always have a min 5580 * 10 slots (10 x 10 i.e. 100 usecs). 5581 */ 5582 if (slot <= HPTS_TICKS_PER_SLOT) { 5583 /* We gain delay */ 5584 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5585 slot = HPTS_TICKS_PER_SLOT; 5586 } else { 5587 /* We take off some */ 5588 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5589 slot = HPTS_TICKS_PER_SLOT; 5590 } 5591 } else { 5592 slot -= rack->r_ctl.rc_agg_delayed; 5593 rack->r_ctl.rc_agg_delayed = 0; 5594 /* Make sure we have 100 useconds at minimum */ 5595 if (slot < HPTS_TICKS_PER_SLOT) { 5596 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5597 slot = HPTS_TICKS_PER_SLOT; 5598 } 5599 if (rack->r_ctl.rc_agg_delayed == 0) 5600 rack->r_late = 0; 5601 } 5602 } 5603 if (slot) { 5604 /* We are pacing too */ 5605 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5606 } 5607 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5608 #ifdef NETFLIX_EXP_DETECTION 5609 if (rack->sack_attack_disable && 5610 (slot < tcp_sad_pacing_interval)) { 5611 /* 5612 * We have a potential attacker on 5613 * the line. We have possibly some 5614 * (or now) pacing time set. We want to 5615 * slow down the processing of sacks by some 5616 * amount (if it is an attacker). Set the default 5617 * slot for attackers in place (unless the original 5618 * interval is longer). Its stored in 5619 * micro-seconds, so lets convert to msecs. 5620 */ 5621 slot = tcp_sad_pacing_interval; 5622 } 5623 #endif 5624 if (tp->t_flags & TF_DELACK) { 5625 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5626 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5627 } 5628 if (delayed_ack && ((hpts_timeout == 0) || 5629 (delayed_ack < hpts_timeout))) 5630 hpts_timeout = delayed_ack; 5631 else 5632 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5633 /* 5634 * If no timers are going to run and we will fall off the hptsi 5635 * wheel, we resort to a keep-alive timer if its configured. 5636 */ 5637 if ((hpts_timeout == 0) && 5638 (slot == 0)) { 5639 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5640 (tp->t_state <= TCPS_CLOSING)) { 5641 /* 5642 * Ok we have no timer (persists, rack, tlp, rxt or 5643 * del-ack), we don't have segments being paced. So 5644 * all that is left is the keepalive timer. 5645 */ 5646 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5647 /* Get the established keep-alive time */ 5648 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5649 } else { 5650 /* 5651 * Get the initial setup keep-alive time, 5652 * note that this is probably not going to 5653 * happen, since rack will be running a rxt timer 5654 * if a SYN of some sort is outstanding. It is 5655 * actually handled in rack_timeout_rxt(). 5656 */ 5657 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5658 } 5659 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5660 if (rack->in_probe_rtt) { 5661 /* 5662 * We want to instead not wake up a long time from 5663 * now but to wake up about the time we would 5664 * exit probe-rtt and initiate a keep-alive ack. 5665 * This will get us out of probe-rtt and update 5666 * our min-rtt. 5667 */ 5668 hpts_timeout = rack_min_probertt_hold; 5669 } 5670 } 5671 } 5672 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5673 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5674 /* 5675 * RACK, TLP, persists and RXT timers all are restartable 5676 * based on actions input .. i.e we received a packet (ack 5677 * or sack) and that changes things (rw, or snd_una etc). 5678 * Thus we can restart them with a new value. For 5679 * keep-alive, delayed_ack we keep track of what was left 5680 * and restart the timer with a smaller value. 5681 */ 5682 if (left < hpts_timeout) 5683 hpts_timeout = left; 5684 } 5685 if (hpts_timeout) { 5686 /* 5687 * Hack alert for now we can't time-out over 2,147,483 5688 * seconds (a bit more than 596 hours), which is probably ok 5689 * :). 5690 */ 5691 if (hpts_timeout > 0x7ffffffe) 5692 hpts_timeout = 0x7ffffffe; 5693 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5694 } 5695 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5696 if ((rack->gp_ready == 0) && 5697 (rack->use_fixed_rate == 0) && 5698 (hpts_timeout < slot) && 5699 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5700 /* 5701 * We have no good estimate yet for the 5702 * old clunky burst mitigation or the 5703 * real pacing. And the tlp or rxt is smaller 5704 * than the pacing calculation. Lets not 5705 * pace that long since we know the calculation 5706 * so far is not accurate. 5707 */ 5708 slot = hpts_timeout; 5709 } 5710 /** 5711 * Turn off all the flags for queuing by default. The 5712 * flags have important meanings to what happens when 5713 * LRO interacts with the transport. Most likely (by default now) 5714 * mbuf_queueing and ack compression are on. So the transport 5715 * has a couple of flags that control what happens (if those 5716 * are not on then these flags won't have any effect since it 5717 * won't go through the queuing LRO path). 5718 * 5719 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5720 * pacing output, so don't disturb. But 5721 * it also means LRO can wake me if there 5722 * is a SACK arrival. 5723 * 5724 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5725 * with the above flag (QUEUE_READY) and 5726 * when present it says don't even wake me 5727 * if a SACK arrives. 5728 * 5729 * The idea behind these flags is that if we are pacing we 5730 * set the MBUF_QUEUE_READY and only get woken up if 5731 * a SACK arrives (which could change things) or if 5732 * our pacing timer expires. If, however, we have a rack 5733 * timer running, then we don't even want a sack to wake 5734 * us since the rack timer has to expire before we can send. 5735 * 5736 * Other cases should usually have none of the flags set 5737 * so LRO can call into us. 5738 */ 5739 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5740 if (slot) { 5741 rack->r_ctl.rc_last_output_to = us_cts + slot; 5742 /* 5743 * A pacing timer (slot) is being set, in 5744 * such a case we cannot send (we are blocked by 5745 * the timer). So lets tell LRO that it should not 5746 * wake us unless there is a SACK. Note this only 5747 * will be effective if mbuf queueing is on or 5748 * compressed acks are being processed. 5749 */ 5750 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5751 /* 5752 * But wait if we have a Rack timer running 5753 * even a SACK should not disturb us (with 5754 * the exception of r_rr_config 3). 5755 */ 5756 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5757 (rack->r_rr_config != 3)) 5758 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5759 if (rack->rc_ack_can_sendout_data) { 5760 /* 5761 * Ahh but wait, this is that special case 5762 * where the pacing timer can be disturbed 5763 * backout the changes (used for non-paced 5764 * burst limiting). 5765 */ 5766 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5767 } 5768 if ((rack->use_rack_rr) && 5769 (rack->r_rr_config < 2) && 5770 ((hpts_timeout) && (hpts_timeout < slot))) { 5771 /* 5772 * Arrange for the hpts to kick back in after the 5773 * t-o if the t-o does not cause a send. 5774 */ 5775 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5776 __LINE__, &diag); 5777 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5778 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5779 } else { 5780 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot), 5781 __LINE__, &diag); 5782 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5783 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5784 } 5785 } else if (hpts_timeout) { 5786 /* 5787 * With respect to inp_flags2 here, lets let any new acks wake 5788 * us up here. Since we are not pacing (no pacing timer), output 5789 * can happen so we should let it. If its a Rack timer, then any inbound 5790 * packet probably won't change the sending (we will be blocked) 5791 * but it may change the prr stats so letting it in (the set defaults 5792 * at the start of this block) are good enough. 5793 */ 5794 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5795 __LINE__, &diag); 5796 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5797 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5798 } else { 5799 /* No timer starting */ 5800 #ifdef INVARIANTS 5801 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5802 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5803 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5804 } 5805 #endif 5806 } 5807 rack->rc_tmr_stopped = 0; 5808 if (slot) 5809 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5810 } 5811 5812 /* 5813 * RACK Timer, here we simply do logging and house keeping. 5814 * the normal rack_output() function will call the 5815 * appropriate thing to check if we need to do a RACK retransmit. 5816 * We return 1, saying don't proceed with rack_output only 5817 * when all timers have been stopped (destroyed PCB?). 5818 */ 5819 static int 5820 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5821 { 5822 /* 5823 * This timer simply provides an internal trigger to send out data. 5824 * The check_recovery_mode call will see if there are needed 5825 * retransmissions, if so we will enter fast-recovery. The output 5826 * call may or may not do the same thing depending on sysctl 5827 * settings. 5828 */ 5829 struct rack_sendmap *rsm; 5830 5831 counter_u64_add(rack_to_tot, 1); 5832 if (rack->r_state && (rack->r_state != tp->t_state)) 5833 rack_set_state(tp, rack); 5834 rack->rc_on_min_to = 0; 5835 rsm = rack_check_recovery_mode(tp, cts); 5836 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5837 if (rsm) { 5838 rack->r_ctl.rc_resend = rsm; 5839 rack->r_timer_override = 1; 5840 if (rack->use_rack_rr) { 5841 /* 5842 * Don't accumulate extra pacing delay 5843 * we are allowing the rack timer to 5844 * over-ride pacing i.e. rrr takes precedence 5845 * if the pacing interval is longer than the rrr 5846 * time (in other words we get the min pacing 5847 * time versus rrr pacing time). 5848 */ 5849 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5850 } 5851 } 5852 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5853 if (rsm == NULL) { 5854 /* restart a timer and return 1 */ 5855 rack_start_hpts_timer(rack, tp, cts, 5856 0, 0, 0); 5857 return (1); 5858 } 5859 return (0); 5860 } 5861 5862 static void 5863 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5864 { 5865 if (rsm->m->m_len > rsm->orig_m_len) { 5866 /* 5867 * Mbuf grew, caused by sbcompress, our offset does 5868 * not change. 5869 */ 5870 rsm->orig_m_len = rsm->m->m_len; 5871 } else if (rsm->m->m_len < rsm->orig_m_len) { 5872 /* 5873 * Mbuf shrank, trimmed off the top by an ack, our 5874 * offset changes. 5875 */ 5876 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5877 rsm->orig_m_len = rsm->m->m_len; 5878 } 5879 } 5880 5881 static void 5882 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5883 { 5884 struct mbuf *m; 5885 uint32_t soff; 5886 5887 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5888 /* Fix up the orig_m_len and possibly the mbuf offset */ 5889 rack_adjust_orig_mlen(src_rsm); 5890 } 5891 m = src_rsm->m; 5892 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5893 while (soff >= m->m_len) { 5894 /* Move out past this mbuf */ 5895 soff -= m->m_len; 5896 m = m->m_next; 5897 KASSERT((m != NULL), 5898 ("rsm:%p nrsm:%p hit at soff:%u null m", 5899 src_rsm, rsm, soff)); 5900 } 5901 rsm->m = m; 5902 rsm->soff = soff; 5903 rsm->orig_m_len = m->m_len; 5904 } 5905 5906 static __inline void 5907 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5908 struct rack_sendmap *rsm, uint32_t start) 5909 { 5910 int idx; 5911 5912 nrsm->r_start = start; 5913 nrsm->r_end = rsm->r_end; 5914 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 5915 nrsm->r_flags = rsm->r_flags; 5916 nrsm->r_dupack = rsm->r_dupack; 5917 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 5918 nrsm->r_rtr_bytes = 0; 5919 nrsm->r_fas = rsm->r_fas; 5920 rsm->r_end = nrsm->r_start; 5921 nrsm->r_just_ret = rsm->r_just_ret; 5922 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 5923 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 5924 } 5925 /* Now if we have SYN flag we keep it on the left edge */ 5926 if (nrsm->r_flags & RACK_HAS_SYN) 5927 nrsm->r_flags &= ~RACK_HAS_SYN; 5928 /* Now if we have a FIN flag we keep it on the right edge */ 5929 if (rsm->r_flags & RACK_HAS_FIN) 5930 rsm->r_flags &= ~RACK_HAS_FIN; 5931 /* Push bit must go to the right edge as well */ 5932 if (rsm->r_flags & RACK_HAD_PUSH) 5933 rsm->r_flags &= ~RACK_HAD_PUSH; 5934 /* Clone over the state of the hw_tls flag */ 5935 nrsm->r_hw_tls = rsm->r_hw_tls; 5936 /* 5937 * Now we need to find nrsm's new location in the mbuf chain 5938 * we basically calculate a new offset, which is soff + 5939 * how much is left in original rsm. Then we walk out the mbuf 5940 * chain to find the righ position, it may be the same mbuf 5941 * or maybe not. 5942 */ 5943 KASSERT(((rsm->m != NULL) || 5944 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 5945 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 5946 if (rsm->m) 5947 rack_setup_offset_for_rsm(rsm, nrsm); 5948 } 5949 5950 static struct rack_sendmap * 5951 rack_merge_rsm(struct tcp_rack *rack, 5952 struct rack_sendmap *l_rsm, 5953 struct rack_sendmap *r_rsm) 5954 { 5955 /* 5956 * We are merging two ack'd RSM's, 5957 * the l_rsm is on the left (lower seq 5958 * values) and the r_rsm is on the right 5959 * (higher seq value). The simplest way 5960 * to merge these is to move the right 5961 * one into the left. I don't think there 5962 * is any reason we need to try to find 5963 * the oldest (or last oldest retransmitted). 5964 */ 5965 #ifdef INVARIANTS 5966 struct rack_sendmap *rm; 5967 #endif 5968 rack_log_map_chg(rack->rc_tp, rack, NULL, 5969 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 5970 l_rsm->r_end = r_rsm->r_end; 5971 if (l_rsm->r_dupack < r_rsm->r_dupack) 5972 l_rsm->r_dupack = r_rsm->r_dupack; 5973 if (r_rsm->r_rtr_bytes) 5974 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 5975 if (r_rsm->r_in_tmap) { 5976 /* This really should not happen */ 5977 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 5978 r_rsm->r_in_tmap = 0; 5979 } 5980 5981 /* Now the flags */ 5982 if (r_rsm->r_flags & RACK_HAS_FIN) 5983 l_rsm->r_flags |= RACK_HAS_FIN; 5984 if (r_rsm->r_flags & RACK_TLP) 5985 l_rsm->r_flags |= RACK_TLP; 5986 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 5987 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 5988 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 5989 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 5990 /* 5991 * If both are app-limited then let the 5992 * free lower the count. If right is app 5993 * limited and left is not, transfer. 5994 */ 5995 l_rsm->r_flags |= RACK_APP_LIMITED; 5996 r_rsm->r_flags &= ~RACK_APP_LIMITED; 5997 if (r_rsm == rack->r_ctl.rc_first_appl) 5998 rack->r_ctl.rc_first_appl = l_rsm; 5999 } 6000 #ifndef INVARIANTS 6001 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6002 #else 6003 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6004 if (rm != r_rsm) { 6005 panic("removing head in rack:%p rsm:%p rm:%p", 6006 rack, r_rsm, rm); 6007 } 6008 #endif 6009 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6010 /* Transfer the split limit to the map we free */ 6011 r_rsm->r_limit_type = l_rsm->r_limit_type; 6012 l_rsm->r_limit_type = 0; 6013 } 6014 rack_free(rack, r_rsm); 6015 return (l_rsm); 6016 } 6017 6018 /* 6019 * TLP Timer, here we simply setup what segment we want to 6020 * have the TLP expire on, the normal rack_output() will then 6021 * send it out. 6022 * 6023 * We return 1, saying don't proceed with rack_output only 6024 * when all timers have been stopped (destroyed PCB?). 6025 */ 6026 static int 6027 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6028 { 6029 /* 6030 * Tail Loss Probe. 6031 */ 6032 struct rack_sendmap *rsm = NULL; 6033 #ifdef INVARIANTS 6034 struct rack_sendmap *insret; 6035 #endif 6036 struct socket *so = tptosocket(tp); 6037 uint32_t amm; 6038 uint32_t out, avail; 6039 int collapsed_win = 0; 6040 6041 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6042 /* Its not time yet */ 6043 return (0); 6044 } 6045 if (ctf_progress_timeout_check(tp, true)) { 6046 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6047 return (-ETIMEDOUT); /* tcp_drop() */ 6048 } 6049 /* 6050 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6051 * need to figure out how to force a full MSS segment out. 6052 */ 6053 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6054 rack->r_ctl.retran_during_recovery = 0; 6055 rack->r_ctl.dsack_byte_cnt = 0; 6056 counter_u64_add(rack_tlp_tot, 1); 6057 if (rack->r_state && (rack->r_state != tp->t_state)) 6058 rack_set_state(tp, rack); 6059 avail = sbavail(&so->so_snd); 6060 out = tp->snd_max - tp->snd_una; 6061 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 6062 /* special case, we need a retransmission */ 6063 collapsed_win = 1; 6064 goto need_retran; 6065 } 6066 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6067 rack->r_ctl.dsack_persist--; 6068 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6069 rack->r_ctl.num_dsack = 0; 6070 } 6071 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6072 } 6073 if ((tp->t_flags & TF_GPUTINPROG) && 6074 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6075 /* 6076 * If this is the second in a row 6077 * TLP and we are doing a measurement 6078 * its time to abandon the measurement. 6079 * Something is likely broken on 6080 * the clients network and measuring a 6081 * broken network does us no good. 6082 */ 6083 tp->t_flags &= ~TF_GPUTINPROG; 6084 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6085 rack->r_ctl.rc_gp_srtt /*flex1*/, 6086 tp->gput_seq, 6087 0, 0, 18, __LINE__, NULL, 0); 6088 } 6089 /* 6090 * Check our send oldest always settings, and if 6091 * there is an oldest to send jump to the need_retran. 6092 */ 6093 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6094 goto need_retran; 6095 6096 if (avail > out) { 6097 /* New data is available */ 6098 amm = avail - out; 6099 if (amm > ctf_fixed_maxseg(tp)) { 6100 amm = ctf_fixed_maxseg(tp); 6101 if ((amm + out) > tp->snd_wnd) { 6102 /* We are rwnd limited */ 6103 goto need_retran; 6104 } 6105 } else if (amm < ctf_fixed_maxseg(tp)) { 6106 /* not enough to fill a MTU */ 6107 goto need_retran; 6108 } 6109 if (IN_FASTRECOVERY(tp->t_flags)) { 6110 /* Unlikely */ 6111 if (rack->rack_no_prr == 0) { 6112 if (out + amm <= tp->snd_wnd) { 6113 rack->r_ctl.rc_prr_sndcnt = amm; 6114 rack->r_ctl.rc_tlp_new_data = amm; 6115 rack_log_to_prr(rack, 4, 0, __LINE__); 6116 } 6117 } else 6118 goto need_retran; 6119 } else { 6120 /* Set the send-new override */ 6121 if (out + amm <= tp->snd_wnd) 6122 rack->r_ctl.rc_tlp_new_data = amm; 6123 else 6124 goto need_retran; 6125 } 6126 rack->r_ctl.rc_tlpsend = NULL; 6127 counter_u64_add(rack_tlp_newdata, 1); 6128 goto send; 6129 } 6130 need_retran: 6131 /* 6132 * Ok we need to arrange the last un-acked segment to be re-sent, or 6133 * optionally the first un-acked segment. 6134 */ 6135 if (collapsed_win == 0) { 6136 if (rack_always_send_oldest) 6137 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6138 else { 6139 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6140 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6141 rsm = rack_find_high_nonack(rack, rsm); 6142 } 6143 } 6144 if (rsm == NULL) { 6145 #ifdef TCP_BLACKBOX 6146 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6147 #endif 6148 goto out; 6149 } 6150 } else { 6151 /* 6152 * We must find the last segment 6153 * that was acceptable by the client. 6154 */ 6155 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6156 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6157 /* Found one */ 6158 break; 6159 } 6160 } 6161 if (rsm == NULL) { 6162 /* None? if so send the first */ 6163 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6164 if (rsm == NULL) { 6165 #ifdef TCP_BLACKBOX 6166 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6167 #endif 6168 goto out; 6169 } 6170 } 6171 } 6172 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6173 /* 6174 * We need to split this the last segment in two. 6175 */ 6176 struct rack_sendmap *nrsm; 6177 6178 nrsm = rack_alloc_full_limit(rack); 6179 if (nrsm == NULL) { 6180 /* 6181 * No memory to split, we will just exit and punt 6182 * off to the RXT timer. 6183 */ 6184 goto out; 6185 } 6186 rack_clone_rsm(rack, nrsm, rsm, 6187 (rsm->r_end - ctf_fixed_maxseg(tp))); 6188 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6189 #ifndef INVARIANTS 6190 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6191 #else 6192 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6193 if (insret != NULL) { 6194 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6195 nrsm, insret, rack, rsm); 6196 } 6197 #endif 6198 if (rsm->r_in_tmap) { 6199 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6200 nrsm->r_in_tmap = 1; 6201 } 6202 rsm = nrsm; 6203 } 6204 rack->r_ctl.rc_tlpsend = rsm; 6205 send: 6206 /* Make sure output path knows we are doing a TLP */ 6207 *doing_tlp = 1; 6208 rack->r_timer_override = 1; 6209 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6210 return (0); 6211 out: 6212 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6213 return (0); 6214 } 6215 6216 /* 6217 * Delayed ack Timer, here we simply need to setup the 6218 * ACK_NOW flag and remove the DELACK flag. From there 6219 * the output routine will send the ack out. 6220 * 6221 * We only return 1, saying don't proceed, if all timers 6222 * are stopped (destroyed PCB?). 6223 */ 6224 static int 6225 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6226 { 6227 6228 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6229 tp->t_flags &= ~TF_DELACK; 6230 tp->t_flags |= TF_ACKNOW; 6231 KMOD_TCPSTAT_INC(tcps_delack); 6232 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6233 return (0); 6234 } 6235 6236 /* 6237 * Persists timer, here we simply send the 6238 * same thing as a keepalive will. 6239 * the one byte send. 6240 * 6241 * We only return 1, saying don't proceed, if all timers 6242 * are stopped (destroyed PCB?). 6243 */ 6244 static int 6245 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6246 { 6247 struct tcptemp *t_template; 6248 int32_t retval = 1; 6249 6250 if (rack->rc_in_persist == 0) 6251 return (0); 6252 if (ctf_progress_timeout_check(tp, false)) { 6253 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6254 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6255 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6256 return (-ETIMEDOUT); /* tcp_drop() */ 6257 } 6258 /* 6259 * Persistence timer into zero window. Force a byte to be output, if 6260 * possible. 6261 */ 6262 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6263 /* 6264 * Hack: if the peer is dead/unreachable, we do not time out if the 6265 * window is closed. After a full backoff, drop the connection if 6266 * the idle time (no responses to probes) reaches the maximum 6267 * backoff that we would use if retransmitting. 6268 */ 6269 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6270 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6271 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6272 KMOD_TCPSTAT_INC(tcps_persistdrop); 6273 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6274 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6275 retval = -ETIMEDOUT; /* tcp_drop() */ 6276 goto out; 6277 } 6278 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6279 tp->snd_una == tp->snd_max) 6280 rack_exit_persist(tp, rack, cts); 6281 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6282 /* 6283 * If the user has closed the socket then drop a persisting 6284 * connection after a much reduced timeout. 6285 */ 6286 if (tp->t_state > TCPS_CLOSE_WAIT && 6287 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6288 KMOD_TCPSTAT_INC(tcps_persistdrop); 6289 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6290 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6291 retval = -ETIMEDOUT; /* tcp_drop() */ 6292 goto out; 6293 } 6294 t_template = tcpip_maketemplate(rack->rc_inp); 6295 if (t_template) { 6296 /* only set it if we were answered */ 6297 if (rack->forced_ack == 0) { 6298 rack->forced_ack = 1; 6299 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6300 } else { 6301 rack->probe_not_answered = 1; 6302 counter_u64_add(rack_persists_loss, 1); 6303 rack->r_ctl.persist_lost_ends++; 6304 } 6305 counter_u64_add(rack_persists_sends, 1); 6306 tcp_respond(tp, t_template->tt_ipgen, 6307 &t_template->tt_t, (struct mbuf *)NULL, 6308 tp->rcv_nxt, tp->snd_una - 1, 0); 6309 /* This sends an ack */ 6310 if (tp->t_flags & TF_DELACK) 6311 tp->t_flags &= ~TF_DELACK; 6312 free(t_template, M_TEMP); 6313 } 6314 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6315 tp->t_rxtshift++; 6316 out: 6317 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6318 rack_start_hpts_timer(rack, tp, cts, 6319 0, 0, 0); 6320 return (retval); 6321 } 6322 6323 /* 6324 * If a keepalive goes off, we had no other timers 6325 * happening. We always return 1 here since this 6326 * routine either drops the connection or sends 6327 * out a segment with respond. 6328 */ 6329 static int 6330 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6331 { 6332 struct tcptemp *t_template; 6333 struct inpcb *inp = tptoinpcb(tp); 6334 6335 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6336 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6337 /* 6338 * Keep-alive timer went off; send something or drop connection if 6339 * idle for too long. 6340 */ 6341 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6342 if (tp->t_state < TCPS_ESTABLISHED) 6343 goto dropit; 6344 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6345 tp->t_state <= TCPS_CLOSING) { 6346 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6347 goto dropit; 6348 /* 6349 * Send a packet designed to force a response if the peer is 6350 * up and reachable: either an ACK if the connection is 6351 * still alive, or an RST if the peer has closed the 6352 * connection due to timeout or reboot. Using sequence 6353 * number tp->snd_una-1 causes the transmitted zero-length 6354 * segment to lie outside the receive window; by the 6355 * protocol spec, this requires the correspondent TCP to 6356 * respond. 6357 */ 6358 KMOD_TCPSTAT_INC(tcps_keepprobe); 6359 t_template = tcpip_maketemplate(inp); 6360 if (t_template) { 6361 if (rack->forced_ack == 0) { 6362 rack->forced_ack = 1; 6363 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6364 } else { 6365 rack->probe_not_answered = 1; 6366 } 6367 tcp_respond(tp, t_template->tt_ipgen, 6368 &t_template->tt_t, (struct mbuf *)NULL, 6369 tp->rcv_nxt, tp->snd_una - 1, 0); 6370 free(t_template, M_TEMP); 6371 } 6372 } 6373 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6374 return (1); 6375 dropit: 6376 KMOD_TCPSTAT_INC(tcps_keepdrops); 6377 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6378 return (-ETIMEDOUT); /* tcp_drop() */ 6379 } 6380 6381 /* 6382 * Retransmit helper function, clear up all the ack 6383 * flags and take care of important book keeping. 6384 */ 6385 static void 6386 rack_remxt_tmr(struct tcpcb *tp) 6387 { 6388 /* 6389 * The retransmit timer went off, all sack'd blocks must be 6390 * un-acked. 6391 */ 6392 struct rack_sendmap *rsm, *trsm = NULL; 6393 struct tcp_rack *rack; 6394 6395 rack = (struct tcp_rack *)tp->t_fb_ptr; 6396 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6397 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6398 if (rack->r_state && (rack->r_state != tp->t_state)) 6399 rack_set_state(tp, rack); 6400 /* 6401 * Ideally we would like to be able to 6402 * mark SACK-PASS on anything not acked here. 6403 * 6404 * However, if we do that we would burst out 6405 * all that data 1ms apart. This would be unwise, 6406 * so for now we will just let the normal rxt timer 6407 * and tlp timer take care of it. 6408 * 6409 * Also we really need to stick them back in sequence 6410 * order. This way we send in the proper order and any 6411 * sacks that come floating in will "re-ack" the data. 6412 * To do this we zap the tmap with an INIT and then 6413 * walk through and place every rsm in the RB tree 6414 * back in its seq ordered place. 6415 */ 6416 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6417 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6418 rsm->r_dupack = 0; 6419 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6420 /* We must re-add it back to the tlist */ 6421 if (trsm == NULL) { 6422 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6423 } else { 6424 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6425 } 6426 rsm->r_in_tmap = 1; 6427 trsm = rsm; 6428 if (rsm->r_flags & RACK_ACKED) 6429 rsm->r_flags |= RACK_WAS_ACKED; 6430 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 6431 rsm->r_flags |= RACK_MUST_RXT; 6432 } 6433 /* Clear the count (we just un-acked them) */ 6434 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6435 rack->r_ctl.rc_sacked = 0; 6436 rack->r_ctl.rc_sacklast = NULL; 6437 rack->r_ctl.rc_agg_delayed = 0; 6438 rack->r_early = 0; 6439 rack->r_ctl.rc_agg_early = 0; 6440 rack->r_late = 0; 6441 /* Clear the tlp rtx mark */ 6442 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6443 if (rack->r_ctl.rc_resend != NULL) 6444 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6445 rack->r_ctl.rc_prr_sndcnt = 0; 6446 rack_log_to_prr(rack, 6, 0, __LINE__); 6447 rack->r_timer_override = 1; 6448 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6449 #ifdef NETFLIX_EXP_DETECTION 6450 || (rack->sack_attack_disable != 0) 6451 #endif 6452 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6453 /* 6454 * For non-sack customers new data 6455 * needs to go out as retransmits until 6456 * we retransmit up to snd_max. 6457 */ 6458 rack->r_must_retran = 1; 6459 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6460 rack->r_ctl.rc_sacked); 6461 } 6462 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6463 } 6464 6465 static void 6466 rack_convert_rtts(struct tcpcb *tp) 6467 { 6468 if (tp->t_srtt > 1) { 6469 uint32_t val, frac; 6470 6471 val = tp->t_srtt >> TCP_RTT_SHIFT; 6472 frac = tp->t_srtt & 0x1f; 6473 tp->t_srtt = TICKS_2_USEC(val); 6474 /* 6475 * frac is the fractional part of the srtt (if any) 6476 * but its in ticks and every bit represents 6477 * 1/32nd of a hz. 6478 */ 6479 if (frac) { 6480 if (hz == 1000) { 6481 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6482 } else { 6483 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6484 } 6485 tp->t_srtt += frac; 6486 } 6487 } 6488 if (tp->t_rttvar) { 6489 uint32_t val, frac; 6490 6491 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6492 frac = tp->t_rttvar & 0x1f; 6493 tp->t_rttvar = TICKS_2_USEC(val); 6494 /* 6495 * frac is the fractional part of the srtt (if any) 6496 * but its in ticks and every bit represents 6497 * 1/32nd of a hz. 6498 */ 6499 if (frac) { 6500 if (hz == 1000) { 6501 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6502 } else { 6503 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6504 } 6505 tp->t_rttvar += frac; 6506 } 6507 } 6508 tp->t_rxtcur = RACK_REXMTVAL(tp); 6509 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6510 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6511 } 6512 if (tp->t_rxtcur > rack_rto_max) { 6513 tp->t_rxtcur = rack_rto_max; 6514 } 6515 } 6516 6517 static void 6518 rack_cc_conn_init(struct tcpcb *tp) 6519 { 6520 struct tcp_rack *rack; 6521 uint32_t srtt; 6522 6523 rack = (struct tcp_rack *)tp->t_fb_ptr; 6524 srtt = tp->t_srtt; 6525 cc_conn_init(tp); 6526 /* 6527 * Now convert to rack's internal format, 6528 * if required. 6529 */ 6530 if ((srtt == 0) && (tp->t_srtt != 0)) 6531 rack_convert_rtts(tp); 6532 /* 6533 * We want a chance to stay in slowstart as 6534 * we create a connection. TCP spec says that 6535 * initially ssthresh is infinite. For our 6536 * purposes that is the snd_wnd. 6537 */ 6538 if (tp->snd_ssthresh < tp->snd_wnd) { 6539 tp->snd_ssthresh = tp->snd_wnd; 6540 } 6541 /* 6542 * We also want to assure a IW worth of 6543 * data can get inflight. 6544 */ 6545 if (rc_init_window(rack) < tp->snd_cwnd) 6546 tp->snd_cwnd = rc_init_window(rack); 6547 } 6548 6549 /* 6550 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6551 * we will setup to retransmit the lowest seq number outstanding. 6552 */ 6553 static int 6554 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6555 { 6556 struct inpcb *inp = tptoinpcb(tp); 6557 int32_t rexmt; 6558 int32_t retval = 0; 6559 bool isipv6; 6560 6561 if ((tp->t_flags & TF_GPUTINPROG) && 6562 (tp->t_rxtshift)) { 6563 /* 6564 * We have had a second timeout 6565 * measurements on successive rxt's are not profitable. 6566 * It is unlikely to be of any use (the network is 6567 * broken or the client went away). 6568 */ 6569 tp->t_flags &= ~TF_GPUTINPROG; 6570 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6571 rack->r_ctl.rc_gp_srtt /*flex1*/, 6572 tp->gput_seq, 6573 0, 0, 18, __LINE__, NULL, 0); 6574 } 6575 if (ctf_progress_timeout_check(tp, false)) { 6576 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6577 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6578 return (-ETIMEDOUT); /* tcp_drop() */ 6579 } 6580 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6581 rack->r_ctl.retran_during_recovery = 0; 6582 rack->rc_ack_required = 1; 6583 rack->r_ctl.dsack_byte_cnt = 0; 6584 if (IN_FASTRECOVERY(tp->t_flags)) 6585 tp->t_flags |= TF_WASFRECOVERY; 6586 else 6587 tp->t_flags &= ~TF_WASFRECOVERY; 6588 if (IN_CONGRECOVERY(tp->t_flags)) 6589 tp->t_flags |= TF_WASCRECOVERY; 6590 else 6591 tp->t_flags &= ~TF_WASCRECOVERY; 6592 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6593 (tp->snd_una == tp->snd_max)) { 6594 /* Nothing outstanding .. nothing to do */ 6595 return (0); 6596 } 6597 if (rack->r_ctl.dsack_persist) { 6598 rack->r_ctl.dsack_persist--; 6599 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6600 rack->r_ctl.num_dsack = 0; 6601 } 6602 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6603 } 6604 /* 6605 * Rack can only run one timer at a time, so we cannot 6606 * run a KEEPINIT (gating SYN sending) and a retransmit 6607 * timer for the SYN. So if we are in a front state and 6608 * have a KEEPINIT timer we need to check the first transmit 6609 * against now to see if we have exceeded the KEEPINIT time 6610 * (if one is set). 6611 */ 6612 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6613 (TP_KEEPINIT(tp) != 0)) { 6614 struct rack_sendmap *rsm; 6615 6616 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6617 if (rsm) { 6618 /* Ok we have something outstanding to test keepinit with */ 6619 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6620 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6621 /* We have exceeded the KEEPINIT time */ 6622 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6623 goto drop_it; 6624 } 6625 } 6626 } 6627 /* 6628 * Retransmission timer went off. Message has not been acked within 6629 * retransmit interval. Back off to a longer retransmit interval 6630 * and retransmit one segment. 6631 */ 6632 rack_remxt_tmr(tp); 6633 if ((rack->r_ctl.rc_resend == NULL) || 6634 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6635 /* 6636 * If the rwnd collapsed on 6637 * the one we are retransmitting 6638 * it does not count against the 6639 * rxt count. 6640 */ 6641 tp->t_rxtshift++; 6642 } 6643 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6644 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6645 drop_it: 6646 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6647 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6648 /* XXXGL: previously t_softerror was casted to uint16_t */ 6649 MPASS(tp->t_softerror >= 0); 6650 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6651 goto out; /* tcp_drop() */ 6652 } 6653 if (tp->t_state == TCPS_SYN_SENT) { 6654 /* 6655 * If the SYN was retransmitted, indicate CWND to be limited 6656 * to 1 segment in cc_conn_init(). 6657 */ 6658 tp->snd_cwnd = 1; 6659 } else if (tp->t_rxtshift == 1) { 6660 /* 6661 * first retransmit; record ssthresh and cwnd so they can be 6662 * recovered if this turns out to be a "bad" retransmit. A 6663 * retransmit is considered "bad" if an ACK for this segment 6664 * is received within RTT/2 interval; the assumption here is 6665 * that the ACK was already in flight. See "On Estimating 6666 * End-to-End Network Path Properties" by Allman and Paxson 6667 * for more details. 6668 */ 6669 tp->snd_cwnd_prev = tp->snd_cwnd; 6670 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6671 tp->snd_recover_prev = tp->snd_recover; 6672 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6673 tp->t_flags |= TF_PREVVALID; 6674 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6675 tp->t_flags &= ~TF_PREVVALID; 6676 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6677 if ((tp->t_state == TCPS_SYN_SENT) || 6678 (tp->t_state == TCPS_SYN_RECEIVED)) 6679 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6680 else 6681 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6682 6683 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6684 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6685 /* 6686 * We enter the path for PLMTUD if connection is established or, if 6687 * connection is FIN_WAIT_1 status, reason for the last is that if 6688 * amount of data we send is very small, we could send it in couple 6689 * of packets and process straight to FIN. In that case we won't 6690 * catch ESTABLISHED state. 6691 */ 6692 #ifdef INET6 6693 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 6694 #else 6695 isipv6 = false; 6696 #endif 6697 if (((V_tcp_pmtud_blackhole_detect == 1) || 6698 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6699 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6700 ((tp->t_state == TCPS_ESTABLISHED) || 6701 (tp->t_state == TCPS_FIN_WAIT_1))) { 6702 /* 6703 * Idea here is that at each stage of mtu probe (usually, 6704 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6705 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6706 * should take care of that. 6707 */ 6708 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6709 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6710 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6711 tp->t_rxtshift % 2 == 0)) { 6712 /* 6713 * Enter Path MTU Black-hole Detection mechanism: - 6714 * Disable Path MTU Discovery (IP "DF" bit). - 6715 * Reduce MTU to lower value than what we negotiated 6716 * with peer. 6717 */ 6718 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6719 /* Record that we may have found a black hole. */ 6720 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6721 /* Keep track of previous MSS. */ 6722 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6723 } 6724 6725 /* 6726 * Reduce the MSS to blackhole value or to the 6727 * default in an attempt to retransmit. 6728 */ 6729 #ifdef INET6 6730 if (isipv6 && 6731 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6732 /* Use the sysctl tuneable blackhole MSS. */ 6733 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6734 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6735 } else if (isipv6) { 6736 /* Use the default MSS. */ 6737 tp->t_maxseg = V_tcp_v6mssdflt; 6738 /* 6739 * Disable Path MTU Discovery when we switch 6740 * to minmss. 6741 */ 6742 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6743 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6744 } 6745 #endif 6746 #if defined(INET6) && defined(INET) 6747 else 6748 #endif 6749 #ifdef INET 6750 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6751 /* Use the sysctl tuneable blackhole MSS. */ 6752 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6753 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6754 } else { 6755 /* Use the default MSS. */ 6756 tp->t_maxseg = V_tcp_mssdflt; 6757 /* 6758 * Disable Path MTU Discovery when we switch 6759 * to minmss. 6760 */ 6761 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6762 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6763 } 6764 #endif 6765 } else { 6766 /* 6767 * If further retransmissions are still unsuccessful 6768 * with a lowered MTU, maybe this isn't a blackhole 6769 * and we restore the previous MSS and blackhole 6770 * detection flags. The limit '6' is determined by 6771 * giving each probe stage (1448, 1188, 524) 2 6772 * chances to recover. 6773 */ 6774 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6775 (tp->t_rxtshift >= 6)) { 6776 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6777 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6778 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6779 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6780 } 6781 } 6782 } 6783 /* 6784 * Disable RFC1323 and SACK if we haven't got any response to 6785 * our third SYN to work-around some broken terminal servers 6786 * (most of which have hopefully been retired) that have bad VJ 6787 * header compression code which trashes TCP segments containing 6788 * unknown-to-them TCP options. 6789 */ 6790 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6791 (tp->t_rxtshift == 3)) 6792 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6793 /* 6794 * If we backed off this far, our srtt estimate is probably bogus. 6795 * Clobber it so we'll take the next rtt measurement as our srtt; 6796 * move the current srtt into rttvar to keep the current retransmit 6797 * times until then. 6798 */ 6799 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6800 #ifdef INET6 6801 if ((inp->inp_vflag & INP_IPV6) != 0) 6802 in6_losing(inp); 6803 else 6804 #endif 6805 in_losing(inp); 6806 tp->t_rttvar += tp->t_srtt; 6807 tp->t_srtt = 0; 6808 } 6809 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6810 tp->snd_recover = tp->snd_max; 6811 tp->t_flags |= TF_ACKNOW; 6812 tp->t_rtttime = 0; 6813 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 6814 out: 6815 return (retval); 6816 } 6817 6818 static int 6819 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6820 { 6821 int32_t ret = 0; 6822 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6823 6824 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6825 (tp->t_flags & TF_GPUTINPROG)) { 6826 /* 6827 * We have a goodput in progress 6828 * and we have entered a late state. 6829 * Do we have enough data in the sb 6830 * to handle the GPUT request? 6831 */ 6832 uint32_t bytes; 6833 6834 bytes = tp->gput_ack - tp->gput_seq; 6835 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6836 bytes += tp->gput_seq - tp->snd_una; 6837 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 6838 /* 6839 * There are not enough bytes in the socket 6840 * buffer that have been sent to cover this 6841 * measurement. Cancel it. 6842 */ 6843 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6844 rack->r_ctl.rc_gp_srtt /*flex1*/, 6845 tp->gput_seq, 6846 0, 0, 18, __LINE__, NULL, 0); 6847 tp->t_flags &= ~TF_GPUTINPROG; 6848 } 6849 } 6850 if (timers == 0) { 6851 return (0); 6852 } 6853 if (tp->t_state == TCPS_LISTEN) { 6854 /* no timers on listen sockets */ 6855 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6856 return (0); 6857 return (1); 6858 } 6859 if ((timers & PACE_TMR_RACK) && 6860 rack->rc_on_min_to) { 6861 /* 6862 * For the rack timer when we 6863 * are on a min-timeout (which means rrr_conf = 3) 6864 * we don't want to check the timer. It may 6865 * be going off for a pace and thats ok we 6866 * want to send the retransmit (if its ready). 6867 * 6868 * If its on a normal rack timer (non-min) then 6869 * we will check if its expired. 6870 */ 6871 goto skip_time_check; 6872 } 6873 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6874 uint32_t left; 6875 6876 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6877 ret = -1; 6878 rack_log_to_processing(rack, cts, ret, 0); 6879 return (0); 6880 } 6881 if (hpts_calling == 0) { 6882 /* 6883 * A user send or queued mbuf (sack) has called us? We 6884 * return 0 and let the pacing guards 6885 * deal with it if they should or 6886 * should not cause a send. 6887 */ 6888 ret = -2; 6889 rack_log_to_processing(rack, cts, ret, 0); 6890 return (0); 6891 } 6892 /* 6893 * Ok our timer went off early and we are not paced false 6894 * alarm, go back to sleep. 6895 */ 6896 ret = -3; 6897 left = rack->r_ctl.rc_timer_exp - cts; 6898 tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left)); 6899 rack_log_to_processing(rack, cts, ret, left); 6900 return (1); 6901 } 6902 skip_time_check: 6903 rack->rc_tmr_stopped = 0; 6904 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 6905 if (timers & PACE_TMR_DELACK) { 6906 ret = rack_timeout_delack(tp, rack, cts); 6907 } else if (timers & PACE_TMR_RACK) { 6908 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6909 rack->r_fast_output = 0; 6910 ret = rack_timeout_rack(tp, rack, cts); 6911 } else if (timers & PACE_TMR_TLP) { 6912 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6913 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 6914 } else if (timers & PACE_TMR_RXT) { 6915 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6916 rack->r_fast_output = 0; 6917 ret = rack_timeout_rxt(tp, rack, cts); 6918 } else if (timers & PACE_TMR_PERSIT) { 6919 ret = rack_timeout_persist(tp, rack, cts); 6920 } else if (timers & PACE_TMR_KEEP) { 6921 ret = rack_timeout_keepalive(tp, rack, cts); 6922 } 6923 rack_log_to_processing(rack, cts, ret, timers); 6924 return (ret); 6925 } 6926 6927 static void 6928 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 6929 { 6930 struct timeval tv; 6931 uint32_t us_cts, flags_on_entry; 6932 uint8_t hpts_removed = 0; 6933 6934 flags_on_entry = rack->r_ctl.rc_hpts_flags; 6935 us_cts = tcp_get_usecs(&tv); 6936 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 6937 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 6938 ((tp->snd_max - tp->snd_una) == 0))) { 6939 tcp_hpts_remove(rack->rc_inp); 6940 hpts_removed = 1; 6941 /* If we were not delayed cancel out the flag. */ 6942 if ((tp->snd_max - tp->snd_una) == 0) 6943 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6944 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6945 } 6946 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 6947 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 6948 if (tcp_in_hpts(rack->rc_inp) && 6949 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 6950 /* 6951 * Canceling timer's when we have no output being 6952 * paced. We also must remove ourselves from the 6953 * hpts. 6954 */ 6955 tcp_hpts_remove(rack->rc_inp); 6956 hpts_removed = 1; 6957 } 6958 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 6959 } 6960 if (hpts_removed == 0) 6961 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6962 } 6963 6964 static int 6965 rack_stopall(struct tcpcb *tp) 6966 { 6967 struct tcp_rack *rack; 6968 rack = (struct tcp_rack *)tp->t_fb_ptr; 6969 rack->t_timers_stopped = 1; 6970 return (0); 6971 } 6972 6973 static void 6974 rack_stop_all_timers(struct tcpcb *tp) 6975 { 6976 struct tcp_rack *rack; 6977 6978 /* 6979 * Assure no timers are running. 6980 */ 6981 if (tcp_timer_active(tp, TT_PERSIST)) { 6982 /* We enter in persists, set the flag appropriately */ 6983 rack = (struct tcp_rack *)tp->t_fb_ptr; 6984 rack->rc_in_persist = 1; 6985 } 6986 } 6987 6988 static void 6989 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 6990 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 6991 { 6992 int32_t idx; 6993 6994 rsm->r_rtr_cnt++; 6995 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6996 rsm->r_dupack = 0; 6997 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 6998 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 6999 rsm->r_flags |= RACK_OVERMAX; 7000 } 7001 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7002 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7003 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7004 } 7005 idx = rsm->r_rtr_cnt - 1; 7006 rsm->r_tim_lastsent[idx] = ts; 7007 /* 7008 * Here we don't add in the len of send, since its already 7009 * in snduna <->snd_max. 7010 */ 7011 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7012 rack->r_ctl.rc_sacked); 7013 if (rsm->r_flags & RACK_ACKED) { 7014 /* Problably MTU discovery messing with us */ 7015 rsm->r_flags &= ~RACK_ACKED; 7016 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7017 } 7018 if (rsm->r_in_tmap) { 7019 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7020 rsm->r_in_tmap = 0; 7021 } 7022 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7023 rsm->r_in_tmap = 1; 7024 /* Take off the must retransmit flag, if its on */ 7025 if (rsm->r_flags & RACK_MUST_RXT) { 7026 if (rack->r_must_retran) 7027 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 7028 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 7029 /* 7030 * We have retransmitted all we need. Clear 7031 * any must retransmit flags. 7032 */ 7033 rack->r_must_retran = 0; 7034 rack->r_ctl.rc_out_at_rto = 0; 7035 } 7036 rsm->r_flags &= ~RACK_MUST_RXT; 7037 } 7038 if (rsm->r_flags & RACK_SACK_PASSED) { 7039 /* We have retransmitted due to the SACK pass */ 7040 rsm->r_flags &= ~RACK_SACK_PASSED; 7041 rsm->r_flags |= RACK_WAS_SACKPASS; 7042 } 7043 } 7044 7045 static uint32_t 7046 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7047 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7048 { 7049 /* 7050 * We (re-)transmitted starting at rsm->r_start for some length 7051 * (possibly less than r_end. 7052 */ 7053 struct rack_sendmap *nrsm; 7054 #ifdef INVARIANTS 7055 struct rack_sendmap *insret; 7056 #endif 7057 uint32_t c_end; 7058 int32_t len; 7059 7060 len = *lenp; 7061 c_end = rsm->r_start + len; 7062 if (SEQ_GEQ(c_end, rsm->r_end)) { 7063 /* 7064 * We retransmitted the whole piece or more than the whole 7065 * slopping into the next rsm. 7066 */ 7067 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7068 if (c_end == rsm->r_end) { 7069 *lenp = 0; 7070 return (0); 7071 } else { 7072 int32_t act_len; 7073 7074 /* Hangs over the end return whats left */ 7075 act_len = rsm->r_end - rsm->r_start; 7076 *lenp = (len - act_len); 7077 return (rsm->r_end); 7078 } 7079 /* We don't get out of this block. */ 7080 } 7081 /* 7082 * Here we retransmitted less than the whole thing which means we 7083 * have to split this into what was transmitted and what was not. 7084 */ 7085 nrsm = rack_alloc_full_limit(rack); 7086 if (nrsm == NULL) { 7087 /* 7088 * We can't get memory, so lets not proceed. 7089 */ 7090 *lenp = 0; 7091 return (0); 7092 } 7093 /* 7094 * So here we are going to take the original rsm and make it what we 7095 * retransmitted. nrsm will be the tail portion we did not 7096 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7097 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7098 * 1, 6 and the new piece will be 6, 11. 7099 */ 7100 rack_clone_rsm(rack, nrsm, rsm, c_end); 7101 nrsm->r_dupack = 0; 7102 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7103 #ifndef INVARIANTS 7104 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7105 #else 7106 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7107 if (insret != NULL) { 7108 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7109 nrsm, insret, rack, rsm); 7110 } 7111 #endif 7112 if (rsm->r_in_tmap) { 7113 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7114 nrsm->r_in_tmap = 1; 7115 } 7116 rsm->r_flags &= (~RACK_HAS_FIN); 7117 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7118 /* Log a split of rsm into rsm and nrsm */ 7119 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7120 *lenp = 0; 7121 return (0); 7122 } 7123 7124 static void 7125 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7126 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7127 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7128 { 7129 struct tcp_rack *rack; 7130 struct rack_sendmap *rsm, *nrsm, fe; 7131 #ifdef INVARIANTS 7132 struct rack_sendmap *insret; 7133 #endif 7134 register uint32_t snd_max, snd_una; 7135 7136 /* 7137 * Add to the RACK log of packets in flight or retransmitted. If 7138 * there is a TS option we will use the TS echoed, if not we will 7139 * grab a TS. 7140 * 7141 * Retransmissions will increment the count and move the ts to its 7142 * proper place. Note that if options do not include TS's then we 7143 * won't be able to effectively use the ACK for an RTT on a retran. 7144 * 7145 * Notes about r_start and r_end. Lets consider a send starting at 7146 * sequence 1 for 10 bytes. In such an example the r_start would be 7147 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7148 * This means that r_end is actually the first sequence for the next 7149 * slot (11). 7150 * 7151 */ 7152 /* 7153 * If err is set what do we do XXXrrs? should we not add the thing? 7154 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7155 * i.e. proceed with add ** do this for now. 7156 */ 7157 INP_WLOCK_ASSERT(tptoinpcb(tp)); 7158 if (err) 7159 /* 7160 * We don't log errors -- we could but snd_max does not 7161 * advance in this case either. 7162 */ 7163 return; 7164 7165 if (th_flags & TH_RST) { 7166 /* 7167 * We don't log resets and we return immediately from 7168 * sending 7169 */ 7170 return; 7171 } 7172 rack = (struct tcp_rack *)tp->t_fb_ptr; 7173 snd_una = tp->snd_una; 7174 snd_max = tp->snd_max; 7175 if (th_flags & (TH_SYN | TH_FIN)) { 7176 /* 7177 * The call to rack_log_output is made before bumping 7178 * snd_max. This means we can record one extra byte on a SYN 7179 * or FIN if seq_out is adding more on and a FIN is present 7180 * (and we are not resending). 7181 */ 7182 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7183 len++; 7184 if (th_flags & TH_FIN) 7185 len++; 7186 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7187 /* 7188 * The add/update as not been done for the FIN/SYN 7189 * yet. 7190 */ 7191 snd_max = tp->snd_nxt; 7192 } 7193 } 7194 if (SEQ_LEQ((seq_out + len), snd_una)) { 7195 /* Are sending an old segment to induce an ack (keep-alive)? */ 7196 return; 7197 } 7198 if (SEQ_LT(seq_out, snd_una)) { 7199 /* huh? should we panic? */ 7200 uint32_t end; 7201 7202 end = seq_out + len; 7203 seq_out = snd_una; 7204 if (SEQ_GEQ(end, seq_out)) 7205 len = end - seq_out; 7206 else 7207 len = 0; 7208 } 7209 if (len == 0) { 7210 /* We don't log zero window probes */ 7211 return; 7212 } 7213 if (IN_FASTRECOVERY(tp->t_flags)) { 7214 rack->r_ctl.rc_prr_out += len; 7215 } 7216 /* First question is it a retransmission or new? */ 7217 if (seq_out == snd_max) { 7218 /* Its new */ 7219 again: 7220 rsm = rack_alloc(rack); 7221 if (rsm == NULL) { 7222 /* 7223 * Hmm out of memory and the tcb got destroyed while 7224 * we tried to wait. 7225 */ 7226 return; 7227 } 7228 if (th_flags & TH_FIN) { 7229 rsm->r_flags = RACK_HAS_FIN|add_flag; 7230 } else { 7231 rsm->r_flags = add_flag; 7232 } 7233 if (hw_tls) 7234 rsm->r_hw_tls = 1; 7235 rsm->r_tim_lastsent[0] = cts; 7236 rsm->r_rtr_cnt = 1; 7237 rsm->r_rtr_bytes = 0; 7238 if (th_flags & TH_SYN) { 7239 /* The data space is one beyond snd_una */ 7240 rsm->r_flags |= RACK_HAS_SYN; 7241 } 7242 rsm->r_start = seq_out; 7243 rsm->r_end = rsm->r_start + len; 7244 rsm->r_dupack = 0; 7245 /* 7246 * save off the mbuf location that 7247 * sndmbuf_noadv returned (which is 7248 * where we started copying from).. 7249 */ 7250 rsm->m = s_mb; 7251 rsm->soff = s_moff; 7252 /* 7253 * Here we do add in the len of send, since its not yet 7254 * reflected in in snduna <->snd_max 7255 */ 7256 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7257 rack->r_ctl.rc_sacked) + 7258 (rsm->r_end - rsm->r_start)); 7259 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7260 if (rsm->m) { 7261 if (rsm->m->m_len <= rsm->soff) { 7262 /* 7263 * XXXrrs Question, will this happen? 7264 * 7265 * If sbsndptr is set at the correct place 7266 * then s_moff should always be somewhere 7267 * within rsm->m. But if the sbsndptr was 7268 * off then that won't be true. If it occurs 7269 * we need to walkout to the correct location. 7270 */ 7271 struct mbuf *lm; 7272 7273 lm = rsm->m; 7274 while (lm->m_len <= rsm->soff) { 7275 rsm->soff -= lm->m_len; 7276 lm = lm->m_next; 7277 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7278 __func__, rack, s_moff, s_mb, rsm->soff)); 7279 } 7280 rsm->m = lm; 7281 } 7282 rsm->orig_m_len = rsm->m->m_len; 7283 } else 7284 rsm->orig_m_len = 0; 7285 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7286 /* Log a new rsm */ 7287 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7288 #ifndef INVARIANTS 7289 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7290 #else 7291 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7292 if (insret != NULL) { 7293 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7294 nrsm, insret, rack, rsm); 7295 } 7296 #endif 7297 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7298 rsm->r_in_tmap = 1; 7299 /* 7300 * Special case detection, is there just a single 7301 * packet outstanding when we are not in recovery? 7302 * 7303 * If this is true mark it so. 7304 */ 7305 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7306 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7307 struct rack_sendmap *prsm; 7308 7309 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7310 if (prsm) 7311 prsm->r_one_out_nr = 1; 7312 } 7313 return; 7314 } 7315 /* 7316 * If we reach here its a retransmission and we need to find it. 7317 */ 7318 memset(&fe, 0, sizeof(fe)); 7319 more: 7320 if (hintrsm && (hintrsm->r_start == seq_out)) { 7321 rsm = hintrsm; 7322 hintrsm = NULL; 7323 } else { 7324 /* No hints sorry */ 7325 rsm = NULL; 7326 } 7327 if ((rsm) && (rsm->r_start == seq_out)) { 7328 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7329 if (len == 0) { 7330 return; 7331 } else { 7332 goto more; 7333 } 7334 } 7335 /* Ok it was not the last pointer go through it the hard way. */ 7336 refind: 7337 fe.r_start = seq_out; 7338 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7339 if (rsm) { 7340 if (rsm->r_start == seq_out) { 7341 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7342 if (len == 0) { 7343 return; 7344 } else { 7345 goto refind; 7346 } 7347 } 7348 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7349 /* Transmitted within this piece */ 7350 /* 7351 * Ok we must split off the front and then let the 7352 * update do the rest 7353 */ 7354 nrsm = rack_alloc_full_limit(rack); 7355 if (nrsm == NULL) { 7356 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7357 return; 7358 } 7359 /* 7360 * copy rsm to nrsm and then trim the front of rsm 7361 * to not include this part. 7362 */ 7363 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7364 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7365 #ifndef INVARIANTS 7366 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7367 #else 7368 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7369 if (insret != NULL) { 7370 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7371 nrsm, insret, rack, rsm); 7372 } 7373 #endif 7374 if (rsm->r_in_tmap) { 7375 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7376 nrsm->r_in_tmap = 1; 7377 } 7378 rsm->r_flags &= (~RACK_HAS_FIN); 7379 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7380 if (len == 0) { 7381 return; 7382 } else if (len > 0) 7383 goto refind; 7384 } 7385 } 7386 /* 7387 * Hmm not found in map did they retransmit both old and on into the 7388 * new? 7389 */ 7390 if (seq_out == tp->snd_max) { 7391 goto again; 7392 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7393 #ifdef INVARIANTS 7394 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7395 seq_out, len, tp->snd_una, tp->snd_max); 7396 printf("Starting Dump of all rack entries\n"); 7397 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7398 printf("rsm:%p start:%u end:%u\n", 7399 rsm, rsm->r_start, rsm->r_end); 7400 } 7401 printf("Dump complete\n"); 7402 panic("seq_out not found rack:%p tp:%p", 7403 rack, tp); 7404 #endif 7405 } else { 7406 #ifdef INVARIANTS 7407 /* 7408 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7409 * flag) 7410 */ 7411 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7412 seq_out, len, tp->snd_max, tp); 7413 #endif 7414 } 7415 } 7416 7417 /* 7418 * Record one of the RTT updates from an ack into 7419 * our sample structure. 7420 */ 7421 7422 static void 7423 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7424 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7425 { 7426 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7427 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7428 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7429 } 7430 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7431 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7432 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7433 } 7434 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7435 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7436 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7437 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7438 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7439 } 7440 if ((confidence == 1) && 7441 ((rsm == NULL) || 7442 (rsm->r_just_ret) || 7443 (rsm->r_one_out_nr && 7444 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7445 /* 7446 * If the rsm had a just return 7447 * hit it then we can't trust the 7448 * rtt measurement for buffer deterimination 7449 * Note that a confidence of 2, indicates 7450 * SACK'd which overrides the r_just_ret or 7451 * the r_one_out_nr. If it was a CUM-ACK and 7452 * we had only two outstanding, but get an 7453 * ack for only 1. Then that also lowers our 7454 * confidence. 7455 */ 7456 confidence = 0; 7457 } 7458 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7459 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7460 if (rack->r_ctl.rack_rs.confidence == 0) { 7461 /* 7462 * We take anything with no current confidence 7463 * saved. 7464 */ 7465 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7466 rack->r_ctl.rack_rs.confidence = confidence; 7467 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7468 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7469 /* 7470 * Once we have a confident number, 7471 * we can update it with a smaller 7472 * value since this confident number 7473 * may include the DSACK time until 7474 * the next segment (the second one) arrived. 7475 */ 7476 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7477 rack->r_ctl.rack_rs.confidence = confidence; 7478 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7479 } 7480 } 7481 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7482 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7483 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7484 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7485 } 7486 7487 /* 7488 * Collect new round-trip time estimate 7489 * and update averages and current timeout. 7490 */ 7491 static void 7492 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7493 { 7494 int32_t delta; 7495 int32_t rtt; 7496 7497 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7498 /* No valid sample */ 7499 return; 7500 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7501 /* We are to use the lowest RTT seen in a single ack */ 7502 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7503 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7504 /* We are to use the highest RTT seen in a single ack */ 7505 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7506 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7507 /* We are to use the average RTT seen in a single ack */ 7508 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7509 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7510 } else { 7511 #ifdef INVARIANTS 7512 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7513 #endif 7514 return; 7515 } 7516 if (rtt == 0) 7517 rtt = 1; 7518 if (rack->rc_gp_rtt_set == 0) { 7519 /* 7520 * With no RTT we have to accept 7521 * even one we are not confident of. 7522 */ 7523 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7524 rack->rc_gp_rtt_set = 1; 7525 } else if (rack->r_ctl.rack_rs.confidence) { 7526 /* update the running gp srtt */ 7527 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7528 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7529 } 7530 if (rack->r_ctl.rack_rs.confidence) { 7531 /* 7532 * record the low and high for highly buffered path computation, 7533 * we only do this if we are confident (not a retransmission). 7534 */ 7535 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7536 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7537 } 7538 if (rack->rc_highly_buffered == 0) { 7539 /* 7540 * Currently once we declare a path has 7541 * highly buffered there is no going 7542 * back, which may be a problem... 7543 */ 7544 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7545 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7546 rack->r_ctl.rc_highest_us_rtt, 7547 rack->r_ctl.rc_lowest_us_rtt, 7548 RACK_RTTS_SEEHBP); 7549 rack->rc_highly_buffered = 1; 7550 } 7551 } 7552 } 7553 if ((rack->r_ctl.rack_rs.confidence) || 7554 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7555 /* 7556 * If we are highly confident of it <or> it was 7557 * never retransmitted we accept it as the last us_rtt. 7558 */ 7559 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7560 /* The lowest rtt can be set if its was not retransmited */ 7561 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7562 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7563 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7564 rack->r_ctl.rc_lowest_us_rtt = 1; 7565 } 7566 } 7567 rack = (struct tcp_rack *)tp->t_fb_ptr; 7568 if (tp->t_srtt != 0) { 7569 /* 7570 * We keep a simple srtt in microseconds, like our rtt 7571 * measurement. We don't need to do any tricks with shifting 7572 * etc. Instead we just add in 1/8th of the new measurement 7573 * and subtract out 1/8 of the old srtt. We do the same with 7574 * the variance after finding the absolute value of the 7575 * difference between this sample and the current srtt. 7576 */ 7577 delta = tp->t_srtt - rtt; 7578 /* Take off 1/8th of the current sRTT */ 7579 tp->t_srtt -= (tp->t_srtt >> 3); 7580 /* Add in 1/8th of the new RTT just measured */ 7581 tp->t_srtt += (rtt >> 3); 7582 if (tp->t_srtt <= 0) 7583 tp->t_srtt = 1; 7584 /* Now lets make the absolute value of the variance */ 7585 if (delta < 0) 7586 delta = -delta; 7587 /* Subtract out 1/8th */ 7588 tp->t_rttvar -= (tp->t_rttvar >> 3); 7589 /* Add in 1/8th of the new variance we just saw */ 7590 tp->t_rttvar += (delta >> 3); 7591 if (tp->t_rttvar <= 0) 7592 tp->t_rttvar = 1; 7593 } else { 7594 /* 7595 * No rtt measurement yet - use the unsmoothed rtt. Set the 7596 * variance to half the rtt (so our first retransmit happens 7597 * at 3*rtt). 7598 */ 7599 tp->t_srtt = rtt; 7600 tp->t_rttvar = rtt >> 1; 7601 } 7602 rack->rc_srtt_measure_made = 1; 7603 KMOD_TCPSTAT_INC(tcps_rttupdated); 7604 if (tp->t_rttupdated < UCHAR_MAX) 7605 tp->t_rttupdated++; 7606 #ifdef STATS 7607 if (rack_stats_gets_ms_rtt == 0) { 7608 /* Send in the microsecond rtt used for rxt timeout purposes */ 7609 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7610 } else if (rack_stats_gets_ms_rtt == 1) { 7611 /* Send in the millisecond rtt used for rxt timeout purposes */ 7612 int32_t ms_rtt; 7613 7614 /* Round up */ 7615 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7616 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7617 } else if (rack_stats_gets_ms_rtt == 2) { 7618 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7619 int32_t ms_rtt; 7620 7621 /* Round up */ 7622 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7623 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7624 } else { 7625 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7626 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7627 } 7628 7629 #endif 7630 /* 7631 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7632 * way we do the smoothing, srtt and rttvar will each average +1/2 7633 * tick of bias. When we compute the retransmit timer, we want 1/2 7634 * tick of rounding and 1 extra tick because of +-1/2 tick 7635 * uncertainty in the firing of the timer. The bias will give us 7636 * exactly the 1.5 tick we need. But, because the bias is 7637 * statistical, we have to test that we don't drop below the minimum 7638 * feasible timer (which is 2 ticks). 7639 */ 7640 tp->t_rxtshift = 0; 7641 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7642 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7643 rack_log_rtt_sample(rack, rtt); 7644 tp->t_softerror = 0; 7645 } 7646 7647 7648 static void 7649 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7650 { 7651 /* 7652 * Apply to filter the inbound us-rtt at us_cts. 7653 */ 7654 uint32_t old_rtt; 7655 7656 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7657 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7658 us_rtt, us_cts); 7659 if (old_rtt > us_rtt) { 7660 /* We just hit a new lower rtt time */ 7661 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7662 __LINE__, RACK_RTTS_NEWRTT); 7663 /* 7664 * Only count it if its lower than what we saw within our 7665 * calculated range. 7666 */ 7667 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7668 if (rack_probertt_lower_within && 7669 rack->rc_gp_dyn_mul && 7670 (rack->use_fixed_rate == 0) && 7671 (rack->rc_always_pace)) { 7672 /* 7673 * We are seeing a new lower rtt very close 7674 * to the time that we would have entered probe-rtt. 7675 * This is probably due to the fact that a peer flow 7676 * has entered probe-rtt. Lets go in now too. 7677 */ 7678 uint32_t val; 7679 7680 val = rack_probertt_lower_within * rack_time_between_probertt; 7681 val /= 100; 7682 if ((rack->in_probe_rtt == 0) && 7683 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7684 rack_enter_probertt(rack, us_cts); 7685 } 7686 } 7687 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7688 } 7689 } 7690 } 7691 7692 static int 7693 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7694 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7695 { 7696 uint32_t us_rtt; 7697 int32_t i, all; 7698 uint32_t t, len_acked; 7699 7700 if ((rsm->r_flags & RACK_ACKED) || 7701 (rsm->r_flags & RACK_WAS_ACKED)) 7702 /* Already done */ 7703 return (0); 7704 if (rsm->r_no_rtt_allowed) { 7705 /* Not allowed */ 7706 return (0); 7707 } 7708 if (ack_type == CUM_ACKED) { 7709 if (SEQ_GT(th_ack, rsm->r_end)) { 7710 len_acked = rsm->r_end - rsm->r_start; 7711 all = 1; 7712 } else { 7713 len_acked = th_ack - rsm->r_start; 7714 all = 0; 7715 } 7716 } else { 7717 len_acked = rsm->r_end - rsm->r_start; 7718 all = 0; 7719 } 7720 if (rsm->r_rtr_cnt == 1) { 7721 7722 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7723 if ((int)t <= 0) 7724 t = 1; 7725 if (!tp->t_rttlow || tp->t_rttlow > t) 7726 tp->t_rttlow = t; 7727 if (!rack->r_ctl.rc_rack_min_rtt || 7728 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7729 rack->r_ctl.rc_rack_min_rtt = t; 7730 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7731 rack->r_ctl.rc_rack_min_rtt = 1; 7732 } 7733 } 7734 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7735 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7736 else 7737 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7738 if (us_rtt == 0) 7739 us_rtt = 1; 7740 if (CC_ALGO(tp)->rttsample != NULL) { 7741 /* Kick the RTT to the CC */ 7742 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 7743 } 7744 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7745 if (ack_type == SACKED) { 7746 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7747 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7748 } else { 7749 /* 7750 * We need to setup what our confidence 7751 * is in this ack. 7752 * 7753 * If the rsm was app limited and it is 7754 * less than a mss in length (the end 7755 * of the send) then we have a gap. If we 7756 * were app limited but say we were sending 7757 * multiple MSS's then we are more confident 7758 * int it. 7759 * 7760 * When we are not app-limited then we see if 7761 * the rsm is being included in the current 7762 * measurement, we tell this by the app_limited_needs_set 7763 * flag. 7764 * 7765 * Note that being cwnd blocked is not applimited 7766 * as well as the pacing delay between packets which 7767 * are sending only 1 or 2 MSS's also will show up 7768 * in the RTT. We probably need to examine this algorithm 7769 * a bit more and enhance it to account for the delay 7770 * between rsm's. We could do that by saving off the 7771 * pacing delay of each rsm (in an rsm) and then 7772 * factoring that in somehow though for now I am 7773 * not sure how :) 7774 */ 7775 int calc_conf = 0; 7776 7777 if (rsm->r_flags & RACK_APP_LIMITED) { 7778 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7779 calc_conf = 0; 7780 else 7781 calc_conf = 1; 7782 } else if (rack->app_limited_needs_set == 0) { 7783 calc_conf = 1; 7784 } else { 7785 calc_conf = 0; 7786 } 7787 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7788 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7789 calc_conf, rsm, rsm->r_rtr_cnt); 7790 } 7791 if ((rsm->r_flags & RACK_TLP) && 7792 (!IN_FASTRECOVERY(tp->t_flags))) { 7793 /* Segment was a TLP and our retrans matched */ 7794 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7795 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 7796 } 7797 } 7798 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7799 /* New more recent rack_tmit_time */ 7800 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7801 rack->rc_rack_rtt = t; 7802 } 7803 return (1); 7804 } 7805 /* 7806 * We clear the soft/rxtshift since we got an ack. 7807 * There is no assurance we will call the commit() function 7808 * so we need to clear these to avoid incorrect handling. 7809 */ 7810 tp->t_rxtshift = 0; 7811 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7812 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7813 tp->t_softerror = 0; 7814 if (to && (to->to_flags & TOF_TS) && 7815 (ack_type == CUM_ACKED) && 7816 (to->to_tsecr) && 7817 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7818 /* 7819 * Now which timestamp does it match? In this block the ACK 7820 * must be coming from a previous transmission. 7821 */ 7822 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7823 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7824 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7825 if ((int)t <= 0) 7826 t = 1; 7827 if (CC_ALGO(tp)->rttsample != NULL) { 7828 /* 7829 * Kick the RTT to the CC, here 7830 * we lie a bit in that we know the 7831 * retransmission is correct even though 7832 * we retransmitted. This is because 7833 * we match the timestamps. 7834 */ 7835 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7836 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7837 else 7838 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7839 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 7840 } 7841 if ((i + 1) < rsm->r_rtr_cnt) { 7842 /* 7843 * The peer ack'd from our previous 7844 * transmission. We have a spurious 7845 * retransmission and thus we dont 7846 * want to update our rack_rtt. 7847 * 7848 * Hmm should there be a CC revert here? 7849 * 7850 */ 7851 return (0); 7852 } 7853 if (!tp->t_rttlow || tp->t_rttlow > t) 7854 tp->t_rttlow = t; 7855 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7856 rack->r_ctl.rc_rack_min_rtt = t; 7857 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7858 rack->r_ctl.rc_rack_min_rtt = 1; 7859 } 7860 } 7861 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7862 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7863 /* New more recent rack_tmit_time */ 7864 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7865 rack->rc_rack_rtt = t; 7866 } 7867 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7868 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7869 rsm->r_rtr_cnt); 7870 return (1); 7871 } 7872 } 7873 goto ts_not_found; 7874 } else { 7875 /* 7876 * Ok its a SACK block that we retransmitted. or a windows 7877 * machine without timestamps. We can tell nothing from the 7878 * time-stamp since its not there or the time the peer last 7879 * recieved a segment that moved forward its cum-ack point. 7880 */ 7881 ts_not_found: 7882 i = rsm->r_rtr_cnt - 1; 7883 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7884 if ((int)t <= 0) 7885 t = 1; 7886 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7887 /* 7888 * We retransmitted and the ack came back in less 7889 * than the smallest rtt we have observed. We most 7890 * likely did an improper retransmit as outlined in 7891 * 6.2 Step 2 point 2 in the rack-draft so we 7892 * don't want to update our rack_rtt. We in 7893 * theory (in future) might want to think about reverting our 7894 * cwnd state but we won't for now. 7895 */ 7896 return (0); 7897 } else if (rack->r_ctl.rc_rack_min_rtt) { 7898 /* 7899 * We retransmitted it and the retransmit did the 7900 * job. 7901 */ 7902 if (!rack->r_ctl.rc_rack_min_rtt || 7903 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7904 rack->r_ctl.rc_rack_min_rtt = t; 7905 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7906 rack->r_ctl.rc_rack_min_rtt = 1; 7907 } 7908 } 7909 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 7910 /* New more recent rack_tmit_time */ 7911 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 7912 rack->rc_rack_rtt = t; 7913 } 7914 return (1); 7915 } 7916 } 7917 return (0); 7918 } 7919 7920 /* 7921 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7922 */ 7923 static void 7924 rack_log_sack_passed(struct tcpcb *tp, 7925 struct tcp_rack *rack, struct rack_sendmap *rsm) 7926 { 7927 struct rack_sendmap *nrsm; 7928 7929 nrsm = rsm; 7930 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 7931 rack_head, r_tnext) { 7932 if (nrsm == rsm) { 7933 /* Skip original segment he is acked */ 7934 continue; 7935 } 7936 if (nrsm->r_flags & RACK_ACKED) { 7937 /* 7938 * Skip ack'd segments, though we 7939 * should not see these, since tmap 7940 * should not have ack'd segments. 7941 */ 7942 continue; 7943 } 7944 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 7945 /* 7946 * If the peer dropped the rwnd on 7947 * these then we don't worry about them. 7948 */ 7949 continue; 7950 } 7951 if (nrsm->r_flags & RACK_SACK_PASSED) { 7952 /* 7953 * We found one that is already marked 7954 * passed, we have been here before and 7955 * so all others below this are marked. 7956 */ 7957 break; 7958 } 7959 nrsm->r_flags |= RACK_SACK_PASSED; 7960 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 7961 } 7962 } 7963 7964 static void 7965 rack_need_set_test(struct tcpcb *tp, 7966 struct tcp_rack *rack, 7967 struct rack_sendmap *rsm, 7968 tcp_seq th_ack, 7969 int line, 7970 int use_which) 7971 { 7972 7973 if ((tp->t_flags & TF_GPUTINPROG) && 7974 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 7975 /* 7976 * We were app limited, and this ack 7977 * butts up or goes beyond the point where we want 7978 * to start our next measurement. We need 7979 * to record the new gput_ts as here and 7980 * possibly update the start sequence. 7981 */ 7982 uint32_t seq, ts; 7983 7984 if (rsm->r_rtr_cnt > 1) { 7985 /* 7986 * This is a retransmit, can we 7987 * really make any assessment at this 7988 * point? We are not really sure of 7989 * the timestamp, is it this or the 7990 * previous transmission? 7991 * 7992 * Lets wait for something better that 7993 * is not retransmitted. 7994 */ 7995 return; 7996 } 7997 seq = tp->gput_seq; 7998 ts = tp->gput_ts; 7999 rack->app_limited_needs_set = 0; 8000 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8001 /* Do we start at a new end? */ 8002 if ((use_which == RACK_USE_BEG) && 8003 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8004 /* 8005 * When we get an ACK that just eats 8006 * up some of the rsm, we set RACK_USE_BEG 8007 * since whats at r_start (i.e. th_ack) 8008 * is left unacked and thats where the 8009 * measurement not starts. 8010 */ 8011 tp->gput_seq = rsm->r_start; 8012 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8013 } 8014 if ((use_which == RACK_USE_END) && 8015 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8016 /* 8017 * We use the end when the cumack 8018 * is moving forward and completely 8019 * deleting the rsm passed so basically 8020 * r_end holds th_ack. 8021 * 8022 * For SACK's we also want to use the end 8023 * since this piece just got sacked and 8024 * we want to target anything after that 8025 * in our measurement. 8026 */ 8027 tp->gput_seq = rsm->r_end; 8028 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8029 } 8030 if (use_which == RACK_USE_END_OR_THACK) { 8031 /* 8032 * special case for ack moving forward, 8033 * not a sack, we need to move all the 8034 * way up to where this ack cum-ack moves 8035 * to. 8036 */ 8037 if (SEQ_GT(th_ack, rsm->r_end)) 8038 tp->gput_seq = th_ack; 8039 else 8040 tp->gput_seq = rsm->r_end; 8041 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8042 } 8043 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8044 /* 8045 * We moved beyond this guy's range, re-calculate 8046 * the new end point. 8047 */ 8048 if (rack->rc_gp_filled == 0) { 8049 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8050 } else { 8051 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8052 } 8053 } 8054 /* 8055 * We are moving the goal post, we may be able to clear the 8056 * measure_saw_probe_rtt flag. 8057 */ 8058 if ((rack->in_probe_rtt == 0) && 8059 (rack->measure_saw_probe_rtt) && 8060 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8061 rack->measure_saw_probe_rtt = 0; 8062 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8063 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8064 if (rack->rc_gp_filled && 8065 ((tp->gput_ack - tp->gput_seq) < 8066 max(rc_init_window(rack), (MIN_GP_WIN * 8067 ctf_fixed_maxseg(tp))))) { 8068 uint32_t ideal_amount; 8069 8070 ideal_amount = rack_get_measure_window(tp, rack); 8071 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 8072 /* 8073 * There is no sense of continuing this measurement 8074 * because its too small to gain us anything we 8075 * trust. Skip it and that way we can start a new 8076 * measurement quicker. 8077 */ 8078 tp->t_flags &= ~TF_GPUTINPROG; 8079 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8080 0, 0, 0, 6, __LINE__, NULL, 0); 8081 } else { 8082 /* 8083 * Reset the window further out. 8084 */ 8085 tp->gput_ack = tp->gput_seq + ideal_amount; 8086 } 8087 } 8088 } 8089 } 8090 8091 static inline int 8092 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8093 { 8094 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8095 /* Behind our TLP definition or right at */ 8096 return (0); 8097 } 8098 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8099 /* The start is beyond or right at our end of TLP definition */ 8100 return (0); 8101 } 8102 /* It has to be a sub-part of the original TLP recorded */ 8103 return (1); 8104 } 8105 8106 8107 static uint32_t 8108 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8109 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8110 { 8111 uint32_t start, end, changed = 0; 8112 struct rack_sendmap stack_map; 8113 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8114 #ifdef INVARIANTS 8115 struct rack_sendmap *insret; 8116 #endif 8117 int32_t used_ref = 1; 8118 int moved = 0; 8119 8120 start = sack->start; 8121 end = sack->end; 8122 rsm = *prsm; 8123 memset(&fe, 0, sizeof(fe)); 8124 do_rest_ofb: 8125 if ((rsm == NULL) || 8126 (SEQ_LT(end, rsm->r_start)) || 8127 (SEQ_GEQ(start, rsm->r_end)) || 8128 (SEQ_LT(start, rsm->r_start))) { 8129 /* 8130 * We are not in the right spot, 8131 * find the correct spot in the tree. 8132 */ 8133 used_ref = 0; 8134 fe.r_start = start; 8135 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8136 moved++; 8137 } 8138 if (rsm == NULL) { 8139 /* TSNH */ 8140 goto out; 8141 } 8142 /* Ok we have an ACK for some piece of this rsm */ 8143 if (rsm->r_start != start) { 8144 if ((rsm->r_flags & RACK_ACKED) == 0) { 8145 /* 8146 * Before any splitting or hookery is 8147 * done is it a TLP of interest i.e. rxt? 8148 */ 8149 if ((rsm->r_flags & RACK_TLP) && 8150 (rsm->r_rtr_cnt > 1)) { 8151 /* 8152 * We are splitting a rxt TLP, check 8153 * if we need to save off the start/end 8154 */ 8155 if (rack->rc_last_tlp_acked_set && 8156 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8157 /* 8158 * We already turned this on since we are inside 8159 * the previous one was a partially sack now we 8160 * are getting another one (maybe all of it). 8161 * 8162 */ 8163 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8164 /* 8165 * Lets make sure we have all of it though. 8166 */ 8167 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8168 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8169 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8170 rack->r_ctl.last_tlp_acked_end); 8171 } 8172 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8173 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8174 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8175 rack->r_ctl.last_tlp_acked_end); 8176 } 8177 } else { 8178 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8179 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8180 rack->rc_last_tlp_past_cumack = 0; 8181 rack->rc_last_tlp_acked_set = 1; 8182 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8183 } 8184 } 8185 /** 8186 * Need to split this in two pieces the before and after, 8187 * the before remains in the map, the after must be 8188 * added. In other words we have: 8189 * rsm |--------------| 8190 * sackblk |-------> 8191 * rsm will become 8192 * rsm |---| 8193 * and nrsm will be the sacked piece 8194 * nrsm |----------| 8195 * 8196 * But before we start down that path lets 8197 * see if the sack spans over on top of 8198 * the next guy and it is already sacked. 8199 * 8200 */ 8201 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8202 if (next && (next->r_flags & RACK_ACKED) && 8203 SEQ_GEQ(end, next->r_start)) { 8204 /** 8205 * So the next one is already acked, and 8206 * we can thus by hookery use our stack_map 8207 * to reflect the piece being sacked and 8208 * then adjust the two tree entries moving 8209 * the start and ends around. So we start like: 8210 * rsm |------------| (not-acked) 8211 * next |-----------| (acked) 8212 * sackblk |--------> 8213 * We want to end like so: 8214 * rsm |------| (not-acked) 8215 * next |-----------------| (acked) 8216 * nrsm |-----| 8217 * Where nrsm is a temporary stack piece we 8218 * use to update all the gizmos. 8219 */ 8220 /* Copy up our fudge block */ 8221 nrsm = &stack_map; 8222 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8223 /* Now adjust our tree blocks */ 8224 rsm->r_end = start; 8225 next->r_start = start; 8226 /* Now we must adjust back where next->m is */ 8227 rack_setup_offset_for_rsm(rsm, next); 8228 8229 /* We don't need to adjust rsm, it did not change */ 8230 /* Clear out the dup ack count of the remainder */ 8231 rsm->r_dupack = 0; 8232 rsm->r_just_ret = 0; 8233 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8234 /* Now lets make sure our fudge block is right */ 8235 nrsm->r_start = start; 8236 /* Now lets update all the stats and such */ 8237 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8238 if (rack->app_limited_needs_set) 8239 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8240 changed += (nrsm->r_end - nrsm->r_start); 8241 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8242 if (nrsm->r_flags & RACK_SACK_PASSED) { 8243 rack->r_ctl.rc_reorder_ts = cts; 8244 } 8245 /* 8246 * Now we want to go up from rsm (the 8247 * one left un-acked) to the next one 8248 * in the tmap. We do this so when 8249 * we walk backwards we include marking 8250 * sack-passed on rsm (The one passed in 8251 * is skipped since it is generally called 8252 * on something sacked before removing it 8253 * from the tmap). 8254 */ 8255 if (rsm->r_in_tmap) { 8256 nrsm = TAILQ_NEXT(rsm, r_tnext); 8257 /* 8258 * Now that we have the next 8259 * one walk backwards from there. 8260 */ 8261 if (nrsm && nrsm->r_in_tmap) 8262 rack_log_sack_passed(tp, rack, nrsm); 8263 } 8264 /* Now are we done? */ 8265 if (SEQ_LT(end, next->r_end) || 8266 (end == next->r_end)) { 8267 /* Done with block */ 8268 goto out; 8269 } 8270 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8271 counter_u64_add(rack_sack_used_next_merge, 1); 8272 /* Postion for the next block */ 8273 start = next->r_end; 8274 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8275 if (rsm == NULL) 8276 goto out; 8277 } else { 8278 /** 8279 * We can't use any hookery here, so we 8280 * need to split the map. We enter like 8281 * so: 8282 * rsm |--------| 8283 * sackblk |-----> 8284 * We will add the new block nrsm and 8285 * that will be the new portion, and then 8286 * fall through after reseting rsm. So we 8287 * split and look like this: 8288 * rsm |----| 8289 * sackblk |-----> 8290 * nrsm |---| 8291 * We then fall through reseting 8292 * rsm to nrsm, so the next block 8293 * picks it up. 8294 */ 8295 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8296 if (nrsm == NULL) { 8297 /* 8298 * failed XXXrrs what can we do but loose the sack 8299 * info? 8300 */ 8301 goto out; 8302 } 8303 counter_u64_add(rack_sack_splits, 1); 8304 rack_clone_rsm(rack, nrsm, rsm, start); 8305 rsm->r_just_ret = 0; 8306 #ifndef INVARIANTS 8307 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8308 #else 8309 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8310 if (insret != NULL) { 8311 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8312 nrsm, insret, rack, rsm); 8313 } 8314 #endif 8315 if (rsm->r_in_tmap) { 8316 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8317 nrsm->r_in_tmap = 1; 8318 } 8319 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8320 rsm->r_flags &= (~RACK_HAS_FIN); 8321 /* Position us to point to the new nrsm that starts the sack blk */ 8322 rsm = nrsm; 8323 } 8324 } else { 8325 /* Already sacked this piece */ 8326 counter_u64_add(rack_sack_skipped_acked, 1); 8327 moved++; 8328 if (end == rsm->r_end) { 8329 /* Done with block */ 8330 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8331 goto out; 8332 } else if (SEQ_LT(end, rsm->r_end)) { 8333 /* A partial sack to a already sacked block */ 8334 moved++; 8335 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8336 goto out; 8337 } else { 8338 /* 8339 * The end goes beyond this guy 8340 * reposition the start to the 8341 * next block. 8342 */ 8343 start = rsm->r_end; 8344 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8345 if (rsm == NULL) 8346 goto out; 8347 } 8348 } 8349 } 8350 if (SEQ_GEQ(end, rsm->r_end)) { 8351 /** 8352 * The end of this block is either beyond this guy or right 8353 * at this guy. I.e.: 8354 * rsm --- |-----| 8355 * end |-----| 8356 * <or> 8357 * end |---------| 8358 */ 8359 if ((rsm->r_flags & RACK_ACKED) == 0) { 8360 /* 8361 * Is it a TLP of interest? 8362 */ 8363 if ((rsm->r_flags & RACK_TLP) && 8364 (rsm->r_rtr_cnt > 1)) { 8365 /* 8366 * We are splitting a rxt TLP, check 8367 * if we need to save off the start/end 8368 */ 8369 if (rack->rc_last_tlp_acked_set && 8370 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8371 /* 8372 * We already turned this on since we are inside 8373 * the previous one was a partially sack now we 8374 * are getting another one (maybe all of it). 8375 */ 8376 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8377 /* 8378 * Lets make sure we have all of it though. 8379 */ 8380 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8381 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8382 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8383 rack->r_ctl.last_tlp_acked_end); 8384 } 8385 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8386 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8387 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8388 rack->r_ctl.last_tlp_acked_end); 8389 } 8390 } else { 8391 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8392 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8393 rack->rc_last_tlp_past_cumack = 0; 8394 rack->rc_last_tlp_acked_set = 1; 8395 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8396 } 8397 } 8398 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8399 changed += (rsm->r_end - rsm->r_start); 8400 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8401 if (rsm->r_in_tmap) /* should be true */ 8402 rack_log_sack_passed(tp, rack, rsm); 8403 /* Is Reordering occuring? */ 8404 if (rsm->r_flags & RACK_SACK_PASSED) { 8405 rsm->r_flags &= ~RACK_SACK_PASSED; 8406 rack->r_ctl.rc_reorder_ts = cts; 8407 } 8408 if (rack->app_limited_needs_set) 8409 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8410 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8411 rsm->r_flags |= RACK_ACKED; 8412 if (rsm->r_in_tmap) { 8413 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8414 rsm->r_in_tmap = 0; 8415 } 8416 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8417 } else { 8418 counter_u64_add(rack_sack_skipped_acked, 1); 8419 moved++; 8420 } 8421 if (end == rsm->r_end) { 8422 /* This block only - done, setup for next */ 8423 goto out; 8424 } 8425 /* 8426 * There is more not coverend by this rsm move on 8427 * to the next block in the RB tree. 8428 */ 8429 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8430 start = rsm->r_end; 8431 rsm = nrsm; 8432 if (rsm == NULL) 8433 goto out; 8434 goto do_rest_ofb; 8435 } 8436 /** 8437 * The end of this sack block is smaller than 8438 * our rsm i.e.: 8439 * rsm --- |-----| 8440 * end |--| 8441 */ 8442 if ((rsm->r_flags & RACK_ACKED) == 0) { 8443 /* 8444 * Is it a TLP of interest? 8445 */ 8446 if ((rsm->r_flags & RACK_TLP) && 8447 (rsm->r_rtr_cnt > 1)) { 8448 /* 8449 * We are splitting a rxt TLP, check 8450 * if we need to save off the start/end 8451 */ 8452 if (rack->rc_last_tlp_acked_set && 8453 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8454 /* 8455 * We already turned this on since we are inside 8456 * the previous one was a partially sack now we 8457 * are getting another one (maybe all of it). 8458 */ 8459 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8460 /* 8461 * Lets make sure we have all of it though. 8462 */ 8463 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8464 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8465 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8466 rack->r_ctl.last_tlp_acked_end); 8467 } 8468 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8469 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8470 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8471 rack->r_ctl.last_tlp_acked_end); 8472 } 8473 } else { 8474 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8475 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8476 rack->rc_last_tlp_past_cumack = 0; 8477 rack->rc_last_tlp_acked_set = 1; 8478 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8479 } 8480 } 8481 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8482 if (prev && 8483 (prev->r_flags & RACK_ACKED)) { 8484 /** 8485 * Goal, we want the right remainder of rsm to shrink 8486 * in place and span from (rsm->r_start = end) to rsm->r_end. 8487 * We want to expand prev to go all the way 8488 * to prev->r_end <- end. 8489 * so in the tree we have before: 8490 * prev |--------| (acked) 8491 * rsm |-------| (non-acked) 8492 * sackblk |-| 8493 * We churn it so we end up with 8494 * prev |----------| (acked) 8495 * rsm |-----| (non-acked) 8496 * nrsm |-| (temporary) 8497 * 8498 * Note if either prev/rsm is a TLP we don't 8499 * do this. 8500 */ 8501 nrsm = &stack_map; 8502 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8503 prev->r_end = end; 8504 rsm->r_start = end; 8505 /* Now adjust nrsm (stack copy) to be 8506 * the one that is the small 8507 * piece that was "sacked". 8508 */ 8509 nrsm->r_end = end; 8510 rsm->r_dupack = 0; 8511 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8512 /* 8513 * Now that the rsm has had its start moved forward 8514 * lets go ahead and get its new place in the world. 8515 */ 8516 rack_setup_offset_for_rsm(prev, rsm); 8517 /* 8518 * Now nrsm is our new little piece 8519 * that is acked (which was merged 8520 * to prev). Update the rtt and changed 8521 * based on that. Also check for reordering. 8522 */ 8523 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8524 if (rack->app_limited_needs_set) 8525 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8526 changed += (nrsm->r_end - nrsm->r_start); 8527 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8528 if (nrsm->r_flags & RACK_SACK_PASSED) { 8529 rack->r_ctl.rc_reorder_ts = cts; 8530 } 8531 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8532 rsm = prev; 8533 counter_u64_add(rack_sack_used_prev_merge, 1); 8534 } else { 8535 /** 8536 * This is the case where our previous 8537 * block is not acked either, so we must 8538 * split the block in two. 8539 */ 8540 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8541 if (nrsm == NULL) { 8542 /* failed rrs what can we do but loose the sack info? */ 8543 goto out; 8544 } 8545 if ((rsm->r_flags & RACK_TLP) && 8546 (rsm->r_rtr_cnt > 1)) { 8547 /* 8548 * We are splitting a rxt TLP, check 8549 * if we need to save off the start/end 8550 */ 8551 if (rack->rc_last_tlp_acked_set && 8552 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8553 /* 8554 * We already turned this on since this block is inside 8555 * the previous one was a partially sack now we 8556 * are getting another one (maybe all of it). 8557 */ 8558 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8559 /* 8560 * Lets make sure we have all of it though. 8561 */ 8562 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8563 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8564 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8565 rack->r_ctl.last_tlp_acked_end); 8566 } 8567 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8568 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8569 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8570 rack->r_ctl.last_tlp_acked_end); 8571 } 8572 } else { 8573 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8574 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8575 rack->rc_last_tlp_acked_set = 1; 8576 rack->rc_last_tlp_past_cumack = 0; 8577 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8578 } 8579 } 8580 /** 8581 * In this case nrsm becomes 8582 * nrsm->r_start = end; 8583 * nrsm->r_end = rsm->r_end; 8584 * which is un-acked. 8585 * <and> 8586 * rsm->r_end = nrsm->r_start; 8587 * i.e. the remaining un-acked 8588 * piece is left on the left 8589 * hand side. 8590 * 8591 * So we start like this 8592 * rsm |----------| (not acked) 8593 * sackblk |---| 8594 * build it so we have 8595 * rsm |---| (acked) 8596 * nrsm |------| (not acked) 8597 */ 8598 counter_u64_add(rack_sack_splits, 1); 8599 rack_clone_rsm(rack, nrsm, rsm, end); 8600 rsm->r_flags &= (~RACK_HAS_FIN); 8601 rsm->r_just_ret = 0; 8602 #ifndef INVARIANTS 8603 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8604 #else 8605 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8606 if (insret != NULL) { 8607 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8608 nrsm, insret, rack, rsm); 8609 } 8610 #endif 8611 if (rsm->r_in_tmap) { 8612 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8613 nrsm->r_in_tmap = 1; 8614 } 8615 nrsm->r_dupack = 0; 8616 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8617 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8618 changed += (rsm->r_end - rsm->r_start); 8619 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8620 if (rsm->r_in_tmap) /* should be true */ 8621 rack_log_sack_passed(tp, rack, rsm); 8622 /* Is Reordering occuring? */ 8623 if (rsm->r_flags & RACK_SACK_PASSED) { 8624 rsm->r_flags &= ~RACK_SACK_PASSED; 8625 rack->r_ctl.rc_reorder_ts = cts; 8626 } 8627 if (rack->app_limited_needs_set) 8628 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8629 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8630 rsm->r_flags |= RACK_ACKED; 8631 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8632 if (rsm->r_in_tmap) { 8633 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8634 rsm->r_in_tmap = 0; 8635 } 8636 } 8637 } else if (start != end){ 8638 /* 8639 * The block was already acked. 8640 */ 8641 counter_u64_add(rack_sack_skipped_acked, 1); 8642 moved++; 8643 } 8644 out: 8645 if (rsm && 8646 ((rsm->r_flags & RACK_TLP) == 0) && 8647 (rsm->r_flags & RACK_ACKED)) { 8648 /* 8649 * Now can we merge where we worked 8650 * with either the previous or 8651 * next block? 8652 */ 8653 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8654 while (next) { 8655 if (next->r_flags & RACK_TLP) 8656 break; 8657 if (next->r_flags & RACK_ACKED) { 8658 /* yep this and next can be merged */ 8659 rsm = rack_merge_rsm(rack, rsm, next); 8660 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8661 } else 8662 break; 8663 } 8664 /* Now what about the previous? */ 8665 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8666 while (prev) { 8667 if (prev->r_flags & RACK_TLP) 8668 break; 8669 if (prev->r_flags & RACK_ACKED) { 8670 /* yep the previous and this can be merged */ 8671 rsm = rack_merge_rsm(rack, prev, rsm); 8672 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8673 } else 8674 break; 8675 } 8676 } 8677 if (used_ref == 0) { 8678 counter_u64_add(rack_sack_proc_all, 1); 8679 } else { 8680 counter_u64_add(rack_sack_proc_short, 1); 8681 } 8682 /* Save off the next one for quick reference. */ 8683 if (rsm) 8684 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8685 else 8686 nrsm = NULL; 8687 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8688 /* Pass back the moved. */ 8689 *moved_two = moved; 8690 return (changed); 8691 } 8692 8693 static void inline 8694 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8695 { 8696 struct rack_sendmap *tmap; 8697 8698 tmap = NULL; 8699 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8700 /* Its no longer sacked, mark it so */ 8701 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8702 #ifdef INVARIANTS 8703 if (rsm->r_in_tmap) { 8704 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8705 rack, rsm, rsm->r_flags); 8706 } 8707 #endif 8708 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8709 /* Rebuild it into our tmap */ 8710 if (tmap == NULL) { 8711 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8712 tmap = rsm; 8713 } else { 8714 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8715 tmap = rsm; 8716 } 8717 tmap->r_in_tmap = 1; 8718 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8719 } 8720 /* 8721 * Now lets possibly clear the sack filter so we start 8722 * recognizing sacks that cover this area. 8723 */ 8724 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8725 8726 } 8727 8728 static void 8729 rack_do_decay(struct tcp_rack *rack) 8730 { 8731 struct timeval res; 8732 8733 #define timersub(tvp, uvp, vvp) \ 8734 do { \ 8735 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8736 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8737 if ((vvp)->tv_usec < 0) { \ 8738 (vvp)->tv_sec--; \ 8739 (vvp)->tv_usec += 1000000; \ 8740 } \ 8741 } while (0) 8742 8743 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8744 #undef timersub 8745 8746 rack->r_ctl.input_pkt++; 8747 if ((rack->rc_in_persist) || 8748 (res.tv_sec >= 1) || 8749 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8750 /* 8751 * Check for decay of non-SAD, 8752 * we want all SAD detection metrics to 8753 * decay 1/4 per second (or more) passed. 8754 */ 8755 #ifdef NETFLIX_EXP_DETECTION 8756 uint32_t pkt_delta; 8757 8758 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8759 #endif 8760 /* Update our saved tracking values */ 8761 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8762 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8763 /* Now do we escape without decay? */ 8764 #ifdef NETFLIX_EXP_DETECTION 8765 if (rack->rc_in_persist || 8766 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8767 (pkt_delta < tcp_sad_low_pps)){ 8768 /* 8769 * We don't decay idle connections 8770 * or ones that have a low input pps. 8771 */ 8772 return; 8773 } 8774 /* Decay the counters */ 8775 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8776 tcp_sad_decay_val); 8777 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8778 tcp_sad_decay_val); 8779 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8780 tcp_sad_decay_val); 8781 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8782 tcp_sad_decay_val); 8783 #endif 8784 } 8785 } 8786 8787 static void 8788 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8789 { 8790 struct rack_sendmap *rsm; 8791 #ifdef INVARIANTS 8792 struct rack_sendmap *rm; 8793 #endif 8794 8795 /* 8796 * The ACK point is advancing to th_ack, we must drop off 8797 * the packets in the rack log and calculate any eligble 8798 * RTT's. 8799 */ 8800 rack->r_wanted_output = 1; 8801 8802 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8803 if ((rack->rc_last_tlp_acked_set == 1)&& 8804 (rack->rc_last_tlp_past_cumack == 1) && 8805 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8806 /* 8807 * We have reached the point where our last rack 8808 * tlp retransmit sequence is ahead of the cum-ack. 8809 * This can only happen when the cum-ack moves all 8810 * the way around (its been a full 2^^31+1 bytes 8811 * or more since we sent a retransmitted TLP). Lets 8812 * turn off the valid flag since its not really valid. 8813 * 8814 * Note since sack's also turn on this event we have 8815 * a complication, we have to wait to age it out until 8816 * the cum-ack is by the TLP before checking which is 8817 * what the next else clause does. 8818 */ 8819 rack_log_dsack_event(rack, 9, __LINE__, 8820 rack->r_ctl.last_tlp_acked_start, 8821 rack->r_ctl.last_tlp_acked_end); 8822 rack->rc_last_tlp_acked_set = 0; 8823 rack->rc_last_tlp_past_cumack = 0; 8824 } else if ((rack->rc_last_tlp_acked_set == 1) && 8825 (rack->rc_last_tlp_past_cumack == 0) && 8826 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8827 /* 8828 * It is safe to start aging TLP's out. 8829 */ 8830 rack->rc_last_tlp_past_cumack = 1; 8831 } 8832 /* We do the same for the tlp send seq as well */ 8833 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8834 (rack->rc_last_sent_tlp_past_cumack == 1) && 8835 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8836 rack_log_dsack_event(rack, 9, __LINE__, 8837 rack->r_ctl.last_sent_tlp_seq, 8838 (rack->r_ctl.last_sent_tlp_seq + 8839 rack->r_ctl.last_sent_tlp_len)); 8840 rack->rc_last_sent_tlp_seq_valid = 0; 8841 rack->rc_last_sent_tlp_past_cumack = 0; 8842 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8843 (rack->rc_last_sent_tlp_past_cumack == 0) && 8844 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8845 /* 8846 * It is safe to start aging TLP's send. 8847 */ 8848 rack->rc_last_sent_tlp_past_cumack = 1; 8849 } 8850 more: 8851 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8852 if (rsm == NULL) { 8853 if ((th_ack - 1) == tp->iss) { 8854 /* 8855 * For the SYN incoming case we will not 8856 * have called tcp_output for the sending of 8857 * the SYN, so there will be no map. All 8858 * other cases should probably be a panic. 8859 */ 8860 return; 8861 } 8862 if (tp->t_flags & TF_SENTFIN) { 8863 /* if we sent a FIN we often will not have map */ 8864 return; 8865 } 8866 #ifdef INVARIANTS 8867 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8868 tp, 8869 tp->t_state, th_ack, rack, 8870 tp->snd_una, tp->snd_max, tp->snd_nxt); 8871 #endif 8872 return; 8873 } 8874 if (SEQ_LT(th_ack, rsm->r_start)) { 8875 /* Huh map is missing this */ 8876 #ifdef INVARIANTS 8877 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 8878 rsm->r_start, 8879 th_ack, tp->t_state, rack->r_state); 8880 #endif 8881 return; 8882 } 8883 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 8884 8885 /* Now was it a retransmitted TLP? */ 8886 if ((rsm->r_flags & RACK_TLP) && 8887 (rsm->r_rtr_cnt > 1)) { 8888 /* 8889 * Yes, this rsm was a TLP and retransmitted, remember that 8890 * since if a DSACK comes back on this we don't want 8891 * to think of it as a reordered segment. This may 8892 * get updated again with possibly even other TLPs 8893 * in flight, but thats ok. Only when we don't send 8894 * a retransmitted TLP for 1/2 the sequences space 8895 * will it get turned off (above). 8896 */ 8897 if (rack->rc_last_tlp_acked_set && 8898 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8899 /* 8900 * We already turned this on since the end matches, 8901 * the previous one was a partially ack now we 8902 * are getting another one (maybe all of it). 8903 */ 8904 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8905 /* 8906 * Lets make sure we have all of it though. 8907 */ 8908 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8909 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8910 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8911 rack->r_ctl.last_tlp_acked_end); 8912 } 8913 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8914 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8915 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8916 rack->r_ctl.last_tlp_acked_end); 8917 } 8918 } else { 8919 rack->rc_last_tlp_past_cumack = 1; 8920 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8921 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8922 rack->rc_last_tlp_acked_set = 1; 8923 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8924 } 8925 } 8926 /* Now do we consume the whole thing? */ 8927 if (SEQ_GEQ(th_ack, rsm->r_end)) { 8928 /* Its all consumed. */ 8929 uint32_t left; 8930 uint8_t newly_acked; 8931 8932 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 8933 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 8934 rsm->r_rtr_bytes = 0; 8935 /* Record the time of highest cumack sent */ 8936 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8937 #ifndef INVARIANTS 8938 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8939 #else 8940 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8941 if (rm != rsm) { 8942 panic("removing head in rack:%p rsm:%p rm:%p", 8943 rack, rsm, rm); 8944 } 8945 #endif 8946 if (rsm->r_in_tmap) { 8947 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8948 rsm->r_in_tmap = 0; 8949 } 8950 newly_acked = 1; 8951 if (rsm->r_flags & RACK_ACKED) { 8952 /* 8953 * It was acked on the scoreboard -- remove 8954 * it from total 8955 */ 8956 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8957 newly_acked = 0; 8958 } else if (rsm->r_flags & RACK_SACK_PASSED) { 8959 /* 8960 * There are segments ACKED on the 8961 * scoreboard further up. We are seeing 8962 * reordering. 8963 */ 8964 rsm->r_flags &= ~RACK_SACK_PASSED; 8965 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8966 rsm->r_flags |= RACK_ACKED; 8967 rack->r_ctl.rc_reorder_ts = cts; 8968 if (rack->r_ent_rec_ns) { 8969 /* 8970 * We have sent no more, and we saw an sack 8971 * then ack arrive. 8972 */ 8973 rack->r_might_revert = 1; 8974 } 8975 } 8976 if ((rsm->r_flags & RACK_TO_REXT) && 8977 (tp->t_flags & TF_RCVD_TSTMP) && 8978 (to->to_flags & TOF_TS) && 8979 (to->to_tsecr != 0) && 8980 (tp->t_flags & TF_PREVVALID)) { 8981 /* 8982 * We can use the timestamp to see 8983 * if this retransmission was from the 8984 * first transmit. If so we made a mistake. 8985 */ 8986 tp->t_flags &= ~TF_PREVVALID; 8987 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 8988 /* The first transmit is what this ack is for */ 8989 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 8990 } 8991 } 8992 left = th_ack - rsm->r_end; 8993 if (rack->app_limited_needs_set && newly_acked) 8994 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 8995 /* Free back to zone */ 8996 rack_free(rack, rsm); 8997 if (left) { 8998 goto more; 8999 } 9000 /* Check for reneging */ 9001 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9002 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9003 /* 9004 * The peer has moved snd_una up to 9005 * the edge of this send, i.e. one 9006 * that it had previously acked. The only 9007 * way that can be true if the peer threw 9008 * away data (space issues) that it had 9009 * previously sacked (else it would have 9010 * given us snd_una up to (rsm->r_end). 9011 * We need to undo the acked markings here. 9012 * 9013 * Note we have to look to make sure th_ack is 9014 * our rsm->r_start in case we get an old ack 9015 * where th_ack is behind snd_una. 9016 */ 9017 rack_peer_reneges(rack, rsm, th_ack); 9018 } 9019 return; 9020 } 9021 if (rsm->r_flags & RACK_ACKED) { 9022 /* 9023 * It was acked on the scoreboard -- remove it from 9024 * total for the part being cum-acked. 9025 */ 9026 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9027 } 9028 /* 9029 * Clear the dup ack count for 9030 * the piece that remains. 9031 */ 9032 rsm->r_dupack = 0; 9033 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9034 if (rsm->r_rtr_bytes) { 9035 /* 9036 * It was retransmitted adjust the 9037 * sack holes for what was acked. 9038 */ 9039 int ack_am; 9040 9041 ack_am = (th_ack - rsm->r_start); 9042 if (ack_am >= rsm->r_rtr_bytes) { 9043 rack->r_ctl.rc_holes_rxt -= ack_am; 9044 rsm->r_rtr_bytes -= ack_am; 9045 } 9046 } 9047 /* 9048 * Update where the piece starts and record 9049 * the time of send of highest cumack sent. 9050 */ 9051 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9052 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9053 /* Now we need to move our offset forward too */ 9054 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9055 /* Fix up the orig_m_len and possibly the mbuf offset */ 9056 rack_adjust_orig_mlen(rsm); 9057 } 9058 rsm->soff += (th_ack - rsm->r_start); 9059 rsm->r_start = th_ack; 9060 /* Now do we need to move the mbuf fwd too? */ 9061 if (rsm->m) { 9062 while (rsm->soff >= rsm->m->m_len) { 9063 rsm->soff -= rsm->m->m_len; 9064 rsm->m = rsm->m->m_next; 9065 KASSERT((rsm->m != NULL), 9066 (" nrsm:%p hit at soff:%u null m", 9067 rsm, rsm->soff)); 9068 } 9069 rsm->orig_m_len = rsm->m->m_len; 9070 } 9071 if (rack->app_limited_needs_set) 9072 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9073 } 9074 9075 static void 9076 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9077 { 9078 struct rack_sendmap *rsm; 9079 int sack_pass_fnd = 0; 9080 9081 if (rack->r_might_revert) { 9082 /* 9083 * Ok we have reordering, have not sent anything, we 9084 * might want to revert the congestion state if nothing 9085 * further has SACK_PASSED on it. Lets check. 9086 * 9087 * We also get here when we have DSACKs come in for 9088 * all the data that we FR'd. Note that a rxt or tlp 9089 * timer clears this from happening. 9090 */ 9091 9092 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9093 if (rsm->r_flags & RACK_SACK_PASSED) { 9094 sack_pass_fnd = 1; 9095 break; 9096 } 9097 } 9098 if (sack_pass_fnd == 0) { 9099 /* 9100 * We went into recovery 9101 * incorrectly due to reordering! 9102 */ 9103 int orig_cwnd; 9104 9105 rack->r_ent_rec_ns = 0; 9106 orig_cwnd = tp->snd_cwnd; 9107 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9108 tp->snd_recover = tp->snd_una; 9109 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 9110 EXIT_RECOVERY(tp->t_flags); 9111 } 9112 rack->r_might_revert = 0; 9113 } 9114 } 9115 9116 #ifdef NETFLIX_EXP_DETECTION 9117 static void 9118 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9119 { 9120 if ((rack->do_detection || tcp_force_detection) && 9121 tcp_sack_to_ack_thresh && 9122 tcp_sack_to_move_thresh && 9123 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9124 /* 9125 * We have thresholds set to find 9126 * possible attackers and disable sack. 9127 * Check them. 9128 */ 9129 uint64_t ackratio, moveratio, movetotal; 9130 9131 /* Log detecting */ 9132 rack_log_sad(rack, 1); 9133 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9134 ackratio *= (uint64_t)(1000); 9135 if (rack->r_ctl.ack_count) 9136 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9137 else { 9138 /* We really should not hit here */ 9139 ackratio = 1000; 9140 } 9141 if ((rack->sack_attack_disable == 0) && 9142 (ackratio > rack_highest_sack_thresh_seen)) 9143 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9144 movetotal = rack->r_ctl.sack_moved_extra; 9145 movetotal += rack->r_ctl.sack_noextra_move; 9146 moveratio = rack->r_ctl.sack_moved_extra; 9147 moveratio *= (uint64_t)1000; 9148 if (movetotal) 9149 moveratio /= movetotal; 9150 else { 9151 /* No moves, thats pretty good */ 9152 moveratio = 0; 9153 } 9154 if ((rack->sack_attack_disable == 0) && 9155 (moveratio > rack_highest_move_thresh_seen)) 9156 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9157 if (rack->sack_attack_disable == 0) { 9158 if ((ackratio > tcp_sack_to_ack_thresh) && 9159 (moveratio > tcp_sack_to_move_thresh)) { 9160 /* Disable sack processing */ 9161 rack->sack_attack_disable = 1; 9162 if (rack->r_rep_attack == 0) { 9163 rack->r_rep_attack = 1; 9164 counter_u64_add(rack_sack_attacks_detected, 1); 9165 } 9166 tcp_trace_point(rack->rc_tp, TCP_TP_SAD_TRIGGERED); 9167 /* Clamp the cwnd at flight size */ 9168 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9169 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9170 rack_log_sad(rack, 2); 9171 } 9172 } else { 9173 /* We are sack-disabled check for false positives */ 9174 if ((ackratio <= tcp_restoral_thresh) || 9175 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9176 rack->sack_attack_disable = 0; 9177 rack_log_sad(rack, 3); 9178 /* Restart counting */ 9179 rack->r_ctl.sack_count = 0; 9180 rack->r_ctl.sack_moved_extra = 0; 9181 rack->r_ctl.sack_noextra_move = 1; 9182 rack->r_ctl.ack_count = max(1, 9183 (bytes_this_ack / segsiz)); 9184 9185 if (rack->r_rep_reverse == 0) { 9186 rack->r_rep_reverse = 1; 9187 counter_u64_add(rack_sack_attacks_reversed, 1); 9188 } 9189 /* Restore the cwnd */ 9190 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9191 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9192 } 9193 } 9194 } 9195 } 9196 #endif 9197 9198 static int 9199 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9200 { 9201 9202 uint32_t am, l_end; 9203 int was_tlp = 0; 9204 9205 if (SEQ_GT(end, start)) 9206 am = end - start; 9207 else 9208 am = 0; 9209 if ((rack->rc_last_tlp_acked_set ) && 9210 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9211 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9212 /* 9213 * The DSACK is because of a TLP which we don't 9214 * do anything with the reordering window over since 9215 * it was not reordering that caused the DSACK but 9216 * our previous retransmit TLP. 9217 */ 9218 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9219 was_tlp = 1; 9220 goto skip_dsack_round; 9221 } 9222 if (rack->rc_last_sent_tlp_seq_valid) { 9223 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9224 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9225 (SEQ_LEQ(end, l_end))) { 9226 /* 9227 * This dsack is from the last sent TLP, ignore it 9228 * for reordering purposes. 9229 */ 9230 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9231 was_tlp = 1; 9232 goto skip_dsack_round; 9233 } 9234 } 9235 if (rack->rc_dsack_round_seen == 0) { 9236 rack->rc_dsack_round_seen = 1; 9237 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9238 rack->r_ctl.num_dsack++; 9239 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9240 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9241 } 9242 skip_dsack_round: 9243 /* 9244 * We keep track of how many DSACK blocks we get 9245 * after a recovery incident. 9246 */ 9247 rack->r_ctl.dsack_byte_cnt += am; 9248 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9249 rack->r_ctl.retran_during_recovery && 9250 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9251 /* 9252 * False recovery most likely culprit is reordering. If 9253 * nothing else is missing we need to revert. 9254 */ 9255 rack->r_might_revert = 1; 9256 rack_handle_might_revert(rack->rc_tp, rack); 9257 rack->r_might_revert = 0; 9258 rack->r_ctl.retran_during_recovery = 0; 9259 rack->r_ctl.dsack_byte_cnt = 0; 9260 } 9261 return (was_tlp); 9262 } 9263 9264 static uint32_t 9265 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 9266 { 9267 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 9268 } 9269 9270 static int32_t 9271 rack_compute_pipe(struct tcpcb *tp) 9272 { 9273 return ((int32_t)do_rack_compute_pipe(tp, 9274 (struct tcp_rack *)tp->t_fb_ptr, 9275 tp->snd_una)); 9276 } 9277 9278 static void 9279 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9280 { 9281 /* Deal with changed and PRR here (in recovery only) */ 9282 uint32_t pipe, snd_una; 9283 9284 rack->r_ctl.rc_prr_delivered += changed; 9285 9286 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9287 /* 9288 * It is all outstanding, we are application limited 9289 * and thus we don't need more room to send anything. 9290 * Note we use tp->snd_una here and not th_ack because 9291 * the data as yet not been cut from the sb. 9292 */ 9293 rack->r_ctl.rc_prr_sndcnt = 0; 9294 return; 9295 } 9296 /* Compute prr_sndcnt */ 9297 if (SEQ_GT(tp->snd_una, th_ack)) { 9298 snd_una = tp->snd_una; 9299 } else { 9300 snd_una = th_ack; 9301 } 9302 pipe = do_rack_compute_pipe(tp, rack, snd_una); 9303 if (pipe > tp->snd_ssthresh) { 9304 long sndcnt; 9305 9306 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9307 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9308 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9309 else { 9310 rack->r_ctl.rc_prr_sndcnt = 0; 9311 rack_log_to_prr(rack, 9, 0, __LINE__); 9312 sndcnt = 0; 9313 } 9314 sndcnt++; 9315 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9316 sndcnt -= rack->r_ctl.rc_prr_out; 9317 else 9318 sndcnt = 0; 9319 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9320 rack_log_to_prr(rack, 10, 0, __LINE__); 9321 } else { 9322 uint32_t limit; 9323 9324 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9325 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9326 else 9327 limit = 0; 9328 if (changed > limit) 9329 limit = changed; 9330 limit += ctf_fixed_maxseg(tp); 9331 if (tp->snd_ssthresh > pipe) { 9332 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9333 rack_log_to_prr(rack, 11, 0, __LINE__); 9334 } else { 9335 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9336 rack_log_to_prr(rack, 12, 0, __LINE__); 9337 } 9338 } 9339 } 9340 9341 static void 9342 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9343 { 9344 uint32_t changed; 9345 struct tcp_rack *rack; 9346 struct rack_sendmap *rsm; 9347 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9348 register uint32_t th_ack; 9349 int32_t i, j, k, num_sack_blks = 0; 9350 uint32_t cts, acked, ack_point; 9351 int loop_start = 0, moved_two = 0; 9352 uint32_t tsused; 9353 9354 9355 INP_WLOCK_ASSERT(tptoinpcb(tp)); 9356 if (tcp_get_flags(th) & TH_RST) { 9357 /* We don't log resets */ 9358 return; 9359 } 9360 rack = (struct tcp_rack *)tp->t_fb_ptr; 9361 cts = tcp_get_usecs(NULL); 9362 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9363 changed = 0; 9364 th_ack = th->th_ack; 9365 if (rack->sack_attack_disable == 0) 9366 rack_do_decay(rack); 9367 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9368 /* 9369 * You only get credit for 9370 * MSS and greater (and you get extra 9371 * credit for larger cum-ack moves). 9372 */ 9373 int ac; 9374 9375 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9376 rack->r_ctl.ack_count += ac; 9377 counter_u64_add(rack_ack_total, ac); 9378 } 9379 if (rack->r_ctl.ack_count > 0xfff00000) { 9380 /* 9381 * reduce the number to keep us under 9382 * a uint32_t. 9383 */ 9384 rack->r_ctl.ack_count /= 2; 9385 rack->r_ctl.sack_count /= 2; 9386 } 9387 if (SEQ_GT(th_ack, tp->snd_una)) { 9388 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9389 tp->t_acktime = ticks; 9390 } 9391 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9392 changed = th_ack - rsm->r_start; 9393 if (changed) { 9394 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9395 } 9396 if ((to->to_flags & TOF_SACK) == 0) { 9397 /* We are done nothing left and no sack. */ 9398 rack_handle_might_revert(tp, rack); 9399 /* 9400 * For cases where we struck a dup-ack 9401 * with no SACK, add to the changes so 9402 * PRR will work right. 9403 */ 9404 if (dup_ack_struck && (changed == 0)) { 9405 changed += ctf_fixed_maxseg(rack->rc_tp); 9406 } 9407 goto out; 9408 } 9409 /* Sack block processing */ 9410 if (SEQ_GT(th_ack, tp->snd_una)) 9411 ack_point = th_ack; 9412 else 9413 ack_point = tp->snd_una; 9414 for (i = 0; i < to->to_nsacks; i++) { 9415 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9416 &sack, sizeof(sack)); 9417 sack.start = ntohl(sack.start); 9418 sack.end = ntohl(sack.end); 9419 if (SEQ_GT(sack.end, sack.start) && 9420 SEQ_GT(sack.start, ack_point) && 9421 SEQ_LT(sack.start, tp->snd_max) && 9422 SEQ_GT(sack.end, ack_point) && 9423 SEQ_LEQ(sack.end, tp->snd_max)) { 9424 sack_blocks[num_sack_blks] = sack; 9425 num_sack_blks++; 9426 } else if (SEQ_LEQ(sack.start, th_ack) && 9427 SEQ_LEQ(sack.end, th_ack)) { 9428 int was_tlp; 9429 9430 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9431 /* 9432 * Its a D-SACK block. 9433 */ 9434 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9435 } 9436 } 9437 if (rack->rc_dsack_round_seen) { 9438 /* Is the dsack roound over? */ 9439 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9440 /* Yes it is */ 9441 rack->rc_dsack_round_seen = 0; 9442 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9443 } 9444 } 9445 /* 9446 * Sort the SACK blocks so we can update the rack scoreboard with 9447 * just one pass. 9448 */ 9449 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9450 num_sack_blks, th->th_ack); 9451 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9452 if (num_sack_blks == 0) { 9453 /* Nothing to sack (DSACKs?) */ 9454 goto out_with_totals; 9455 } 9456 if (num_sack_blks < 2) { 9457 /* Only one, we don't need to sort */ 9458 goto do_sack_work; 9459 } 9460 /* Sort the sacks */ 9461 for (i = 0; i < num_sack_blks; i++) { 9462 for (j = i + 1; j < num_sack_blks; j++) { 9463 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9464 sack = sack_blocks[i]; 9465 sack_blocks[i] = sack_blocks[j]; 9466 sack_blocks[j] = sack; 9467 } 9468 } 9469 } 9470 /* 9471 * Now are any of the sack block ends the same (yes some 9472 * implementations send these)? 9473 */ 9474 again: 9475 if (num_sack_blks == 0) 9476 goto out_with_totals; 9477 if (num_sack_blks > 1) { 9478 for (i = 0; i < num_sack_blks; i++) { 9479 for (j = i + 1; j < num_sack_blks; j++) { 9480 if (sack_blocks[i].end == sack_blocks[j].end) { 9481 /* 9482 * Ok these two have the same end we 9483 * want the smallest end and then 9484 * throw away the larger and start 9485 * again. 9486 */ 9487 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9488 /* 9489 * The second block covers 9490 * more area use that 9491 */ 9492 sack_blocks[i].start = sack_blocks[j].start; 9493 } 9494 /* 9495 * Now collapse out the dup-sack and 9496 * lower the count 9497 */ 9498 for (k = (j + 1); k < num_sack_blks; k++) { 9499 sack_blocks[j].start = sack_blocks[k].start; 9500 sack_blocks[j].end = sack_blocks[k].end; 9501 j++; 9502 } 9503 num_sack_blks--; 9504 goto again; 9505 } 9506 } 9507 } 9508 } 9509 do_sack_work: 9510 /* 9511 * First lets look to see if 9512 * we have retransmitted and 9513 * can use the transmit next? 9514 */ 9515 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9516 if (rsm && 9517 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9518 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9519 /* 9520 * We probably did the FR and the next 9521 * SACK in continues as we would expect. 9522 */ 9523 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9524 if (acked) { 9525 rack->r_wanted_output = 1; 9526 changed += acked; 9527 } 9528 if (num_sack_blks == 1) { 9529 /* 9530 * This is what we would expect from 9531 * a normal implementation to happen 9532 * after we have retransmitted the FR, 9533 * i.e the sack-filter pushes down 9534 * to 1 block and the next to be retransmitted 9535 * is the sequence in the sack block (has more 9536 * are acked). Count this as ACK'd data to boost 9537 * up the chances of recovering any false positives. 9538 */ 9539 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9540 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9541 counter_u64_add(rack_express_sack, 1); 9542 if (rack->r_ctl.ack_count > 0xfff00000) { 9543 /* 9544 * reduce the number to keep us under 9545 * a uint32_t. 9546 */ 9547 rack->r_ctl.ack_count /= 2; 9548 rack->r_ctl.sack_count /= 2; 9549 } 9550 goto out_with_totals; 9551 } else { 9552 /* 9553 * Start the loop through the 9554 * rest of blocks, past the first block. 9555 */ 9556 moved_two = 0; 9557 loop_start = 1; 9558 } 9559 } 9560 /* Its a sack of some sort */ 9561 rack->r_ctl.sack_count++; 9562 if (rack->r_ctl.sack_count > 0xfff00000) { 9563 /* 9564 * reduce the number to keep us under 9565 * a uint32_t. 9566 */ 9567 rack->r_ctl.ack_count /= 2; 9568 rack->r_ctl.sack_count /= 2; 9569 } 9570 counter_u64_add(rack_sack_total, 1); 9571 if (rack->sack_attack_disable) { 9572 /* An attacker disablement is in place */ 9573 if (num_sack_blks > 1) { 9574 rack->r_ctl.sack_count += (num_sack_blks - 1); 9575 rack->r_ctl.sack_moved_extra++; 9576 counter_u64_add(rack_move_some, 1); 9577 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9578 rack->r_ctl.sack_moved_extra /= 2; 9579 rack->r_ctl.sack_noextra_move /= 2; 9580 } 9581 } 9582 goto out; 9583 } 9584 rsm = rack->r_ctl.rc_sacklast; 9585 for (i = loop_start; i < num_sack_blks; i++) { 9586 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9587 if (acked) { 9588 rack->r_wanted_output = 1; 9589 changed += acked; 9590 } 9591 if (moved_two) { 9592 /* 9593 * If we did not get a SACK for at least a MSS and 9594 * had to move at all, or if we moved more than our 9595 * threshold, it counts against the "extra" move. 9596 */ 9597 rack->r_ctl.sack_moved_extra += moved_two; 9598 counter_u64_add(rack_move_some, 1); 9599 } else { 9600 /* 9601 * else we did not have to move 9602 * any more than we would expect. 9603 */ 9604 rack->r_ctl.sack_noextra_move++; 9605 counter_u64_add(rack_move_none, 1); 9606 } 9607 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9608 /* 9609 * If the SACK was not a full MSS then 9610 * we add to sack_count the number of 9611 * MSS's (or possibly more than 9612 * a MSS if its a TSO send) we had to skip by. 9613 */ 9614 rack->r_ctl.sack_count += moved_two; 9615 counter_u64_add(rack_sack_total, moved_two); 9616 } 9617 /* 9618 * Now we need to setup for the next 9619 * round. First we make sure we won't 9620 * exceed the size of our uint32_t on 9621 * the various counts, and then clear out 9622 * moved_two. 9623 */ 9624 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9625 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9626 rack->r_ctl.sack_moved_extra /= 2; 9627 rack->r_ctl.sack_noextra_move /= 2; 9628 } 9629 if (rack->r_ctl.sack_count > 0xfff00000) { 9630 rack->r_ctl.ack_count /= 2; 9631 rack->r_ctl.sack_count /= 2; 9632 } 9633 moved_two = 0; 9634 } 9635 out_with_totals: 9636 if (num_sack_blks > 1) { 9637 /* 9638 * You get an extra stroke if 9639 * you have more than one sack-blk, this 9640 * could be where we are skipping forward 9641 * and the sack-filter is still working, or 9642 * it could be an attacker constantly 9643 * moving us. 9644 */ 9645 rack->r_ctl.sack_moved_extra++; 9646 counter_u64_add(rack_move_some, 1); 9647 } 9648 out: 9649 #ifdef NETFLIX_EXP_DETECTION 9650 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9651 #endif 9652 if (changed) { 9653 /* Something changed cancel the rack timer */ 9654 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9655 } 9656 tsused = tcp_get_usecs(NULL); 9657 rsm = tcp_rack_output(tp, rack, tsused); 9658 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9659 rsm && 9660 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 9661 /* Enter recovery */ 9662 entered_recovery = 1; 9663 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9664 /* 9665 * When we enter recovery we need to assure we send 9666 * one packet. 9667 */ 9668 if (rack->rack_no_prr == 0) { 9669 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9670 rack_log_to_prr(rack, 8, 0, __LINE__); 9671 } 9672 rack->r_timer_override = 1; 9673 rack->r_early = 0; 9674 rack->r_ctl.rc_agg_early = 0; 9675 } else if (IN_FASTRECOVERY(tp->t_flags) && 9676 rsm && 9677 (rack->r_rr_config == 3)) { 9678 /* 9679 * Assure we can output and we get no 9680 * remembered pace time except the retransmit. 9681 */ 9682 rack->r_timer_override = 1; 9683 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9684 rack->r_ctl.rc_resend = rsm; 9685 } 9686 if (IN_FASTRECOVERY(tp->t_flags) && 9687 (rack->rack_no_prr == 0) && 9688 (entered_recovery == 0)) { 9689 rack_update_prr(tp, rack, changed, th_ack); 9690 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9691 ((tcp_in_hpts(rack->rc_inp) == 0) && 9692 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9693 /* 9694 * If you are pacing output you don't want 9695 * to override. 9696 */ 9697 rack->r_early = 0; 9698 rack->r_ctl.rc_agg_early = 0; 9699 rack->r_timer_override = 1; 9700 } 9701 } 9702 } 9703 9704 static void 9705 rack_strike_dupack(struct tcp_rack *rack) 9706 { 9707 struct rack_sendmap *rsm; 9708 9709 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9710 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9711 rsm = TAILQ_NEXT(rsm, r_tnext); 9712 if (rsm->r_flags & RACK_MUST_RXT) { 9713 /* Sendmap entries that are marked to 9714 * be retransmitted do not need dupack's 9715 * struck. We get these marks for a number 9716 * of reasons (rxt timeout with no sack, 9717 * mtu change, or rwnd collapses). When 9718 * these events occur, we know we must retransmit 9719 * them and mark the sendmap entries. Dupack counting 9720 * is not needed since we are already set to retransmit 9721 * it as soon as we can. 9722 */ 9723 continue; 9724 } 9725 } 9726 if (rsm && (rsm->r_dupack < 0xff)) { 9727 rsm->r_dupack++; 9728 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9729 struct timeval tv; 9730 uint32_t cts; 9731 /* 9732 * Here we see if we need to retransmit. For 9733 * a SACK type connection if enough time has passed 9734 * we will get a return of the rsm. For a non-sack 9735 * connection we will get the rsm returned if the 9736 * dupack value is 3 or more. 9737 */ 9738 cts = tcp_get_usecs(&tv); 9739 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9740 if (rack->r_ctl.rc_resend != NULL) { 9741 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9742 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9743 rack->rc_tp->snd_una, __LINE__); 9744 } 9745 rack->r_wanted_output = 1; 9746 rack->r_timer_override = 1; 9747 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9748 } 9749 } else { 9750 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9751 } 9752 } 9753 } 9754 9755 static void 9756 rack_check_bottom_drag(struct tcpcb *tp, 9757 struct tcp_rack *rack, 9758 struct socket *so, int32_t acked) 9759 { 9760 uint32_t segsiz, minseg; 9761 9762 segsiz = ctf_fixed_maxseg(tp); 9763 minseg = segsiz; 9764 9765 if (tp->snd_max == tp->snd_una) { 9766 /* 9767 * We are doing dynamic pacing and we are way 9768 * under. Basically everything got acked while 9769 * we were still waiting on the pacer to expire. 9770 * 9771 * This means we need to boost the b/w in 9772 * addition to any earlier boosting of 9773 * the multiplier. 9774 */ 9775 rack->rc_dragged_bottom = 1; 9776 rack_validate_multipliers_at_or_above100(rack); 9777 /* 9778 * Lets use the segment bytes acked plus 9779 * the lowest RTT seen as the basis to 9780 * form a b/w estimate. This will be off 9781 * due to the fact that the true estimate 9782 * should be around 1/2 the time of the RTT 9783 * but we can settle for that. 9784 */ 9785 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9786 acked) { 9787 uint64_t bw, calc_bw, rtt; 9788 9789 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9790 if (rtt == 0) { 9791 /* no us sample is there a ms one? */ 9792 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9793 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9794 } else { 9795 goto no_measurement; 9796 } 9797 } 9798 bw = acked; 9799 calc_bw = bw * 1000000; 9800 calc_bw /= rtt; 9801 if (rack->r_ctl.last_max_bw && 9802 (rack->r_ctl.last_max_bw < calc_bw)) { 9803 /* 9804 * If we have a last calculated max bw 9805 * enforce it. 9806 */ 9807 calc_bw = rack->r_ctl.last_max_bw; 9808 } 9809 /* now plop it in */ 9810 if (rack->rc_gp_filled == 0) { 9811 if (calc_bw > ONE_POINT_TWO_MEG) { 9812 /* 9813 * If we have no measurement 9814 * don't let us set in more than 9815 * 1.2Mbps. If we are still too 9816 * low after pacing with this we 9817 * will hopefully have a max b/w 9818 * available to sanity check things. 9819 */ 9820 calc_bw = ONE_POINT_TWO_MEG; 9821 } 9822 rack->r_ctl.rc_rtt_diff = 0; 9823 rack->r_ctl.gp_bw = calc_bw; 9824 rack->rc_gp_filled = 1; 9825 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9826 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9827 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9828 } else if (calc_bw > rack->r_ctl.gp_bw) { 9829 rack->r_ctl.rc_rtt_diff = 0; 9830 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9831 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9832 rack->r_ctl.gp_bw = calc_bw; 9833 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9834 } else 9835 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9836 if ((rack->gp_ready == 0) && 9837 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9838 /* We have enough measurements now */ 9839 rack->gp_ready = 1; 9840 rack_set_cc_pacing(rack); 9841 if (rack->defer_options) 9842 rack_apply_deferred_options(rack); 9843 } 9844 /* 9845 * For acks over 1mss we do a extra boost to simulate 9846 * where we would get 2 acks (we want 110 for the mul). 9847 */ 9848 if (acked > segsiz) 9849 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9850 } else { 9851 /* 9852 * zero rtt possibly?, settle for just an old increase. 9853 */ 9854 no_measurement: 9855 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9856 } 9857 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9858 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9859 minseg)) && 9860 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9861 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9862 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9863 (segsiz * rack_req_segs))) { 9864 /* 9865 * We are doing dynamic GP pacing and 9866 * we have everything except 1MSS or less 9867 * bytes left out. We are still pacing away. 9868 * And there is data that could be sent, This 9869 * means we are inserting delayed ack time in 9870 * our measurements because we are pacing too slow. 9871 */ 9872 rack_validate_multipliers_at_or_above100(rack); 9873 rack->rc_dragged_bottom = 1; 9874 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9875 } 9876 } 9877 9878 9879 9880 static void 9881 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 9882 { 9883 /* 9884 * The fast output path is enabled and we 9885 * have moved the cumack forward. Lets see if 9886 * we can expand forward the fast path length by 9887 * that amount. What we would ideally like to 9888 * do is increase the number of bytes in the 9889 * fast path block (left_to_send) by the 9890 * acked amount. However we have to gate that 9891 * by two factors: 9892 * 1) The amount outstanding and the rwnd of the peer 9893 * (i.e. we don't want to exceed the rwnd of the peer). 9894 * <and> 9895 * 2) The amount of data left in the socket buffer (i.e. 9896 * we can't send beyond what is in the buffer). 9897 * 9898 * Note that this does not take into account any increase 9899 * in the cwnd. We will only extend the fast path by 9900 * what was acked. 9901 */ 9902 uint32_t new_total, gating_val; 9903 9904 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 9905 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 9906 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 9907 if (new_total <= gating_val) { 9908 /* We can increase left_to_send by the acked amount */ 9909 counter_u64_add(rack_extended_rfo, 1); 9910 rack->r_ctl.fsb.left_to_send = new_total; 9911 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 9912 ("rack:%p left_to_send:%u sbavail:%u out:%u", 9913 rack, rack->r_ctl.fsb.left_to_send, 9914 sbavail(&rack->rc_inp->inp_socket->so_snd), 9915 (tp->snd_max - tp->snd_una))); 9916 9917 } 9918 } 9919 9920 static void 9921 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 9922 { 9923 /* 9924 * Here any sendmap entry that points to the 9925 * beginning mbuf must be adjusted to the correct 9926 * offset. This must be called with: 9927 * 1) The socket buffer locked 9928 * 2) snd_una adjusted to its new position. 9929 * 9930 * Note that (2) implies rack_ack_received has also 9931 * been called. 9932 * 9933 * We grab the first mbuf in the socket buffer and 9934 * then go through the front of the sendmap, recalculating 9935 * the stored offset for any sendmap entry that has 9936 * that mbuf. We must use the sb functions to do this 9937 * since its possible an add was done has well as 9938 * the subtraction we may have just completed. This should 9939 * not be a penalty though, since we just referenced the sb 9940 * to go in and trim off the mbufs that we freed (of course 9941 * there will be a penalty for the sendmap references though). 9942 */ 9943 struct mbuf *m; 9944 struct rack_sendmap *rsm; 9945 9946 SOCKBUF_LOCK_ASSERT(sb); 9947 m = sb->sb_mb; 9948 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9949 if ((rsm == NULL) || (m == NULL)) { 9950 /* Nothing outstanding */ 9951 return; 9952 } 9953 while (rsm->m && (rsm->m == m)) { 9954 /* one to adjust */ 9955 #ifdef INVARIANTS 9956 struct mbuf *tm; 9957 uint32_t soff; 9958 9959 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 9960 if (rsm->orig_m_len != m->m_len) { 9961 rack_adjust_orig_mlen(rsm); 9962 } 9963 if (rsm->soff != soff) { 9964 /* 9965 * This is not a fatal error, we anticipate it 9966 * might happen (the else code), so we count it here 9967 * so that under invariant we can see that it really 9968 * does happen. 9969 */ 9970 counter_u64_add(rack_adjust_map_bw, 1); 9971 } 9972 rsm->m = tm; 9973 rsm->soff = soff; 9974 if (tm) 9975 rsm->orig_m_len = rsm->m->m_len; 9976 else 9977 rsm->orig_m_len = 0; 9978 #else 9979 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 9980 if (rsm->m) 9981 rsm->orig_m_len = rsm->m->m_len; 9982 else 9983 rsm->orig_m_len = 0; 9984 #endif 9985 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 9986 rsm); 9987 if (rsm == NULL) 9988 break; 9989 } 9990 } 9991 9992 /* 9993 * Return value of 1, we do not need to call rack_process_data(). 9994 * return value of 0, rack_process_data can be called. 9995 * For ret_val if its 0 the TCP is locked, if its non-zero 9996 * its unlocked and probably unsafe to touch the TCB. 9997 */ 9998 static int 9999 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10000 struct tcpcb *tp, struct tcpopt *to, 10001 uint32_t tiwin, int32_t tlen, 10002 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10003 { 10004 int32_t ourfinisacked = 0; 10005 int32_t nsegs, acked_amount; 10006 int32_t acked; 10007 struct mbuf *mfree; 10008 struct tcp_rack *rack; 10009 int32_t under_pacing = 0; 10010 int32_t recovery = 0; 10011 10012 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10013 10014 rack = (struct tcp_rack *)tp->t_fb_ptr; 10015 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10016 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10017 &rack->r_ctl.challenge_ack_ts, 10018 &rack->r_ctl.challenge_ack_cnt); 10019 rack->r_wanted_output = 1; 10020 return (1); 10021 } 10022 if (rack->gp_ready && 10023 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10024 under_pacing = 1; 10025 } 10026 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10027 int in_rec, dup_ack_struck = 0; 10028 10029 in_rec = IN_FASTRECOVERY(tp->t_flags); 10030 if (rack->rc_in_persist) { 10031 tp->t_rxtshift = 0; 10032 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10033 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10034 } 10035 if ((th->th_ack == tp->snd_una) && 10036 (tiwin == tp->snd_wnd) && 10037 ((to->to_flags & TOF_SACK) == 0)) { 10038 rack_strike_dupack(rack); 10039 dup_ack_struck = 1; 10040 } 10041 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10042 } 10043 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10044 /* 10045 * Old ack, behind (or duplicate to) the last one rcv'd 10046 * Note: We mark reordering is occuring if its 10047 * less than and we have not closed our window. 10048 */ 10049 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10050 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10051 } 10052 return (0); 10053 } 10054 /* 10055 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10056 * something we sent. 10057 */ 10058 if (tp->t_flags & TF_NEEDSYN) { 10059 /* 10060 * T/TCP: Connection was half-synchronized, and our SYN has 10061 * been ACK'd (so connection is now fully synchronized). Go 10062 * to non-starred state, increment snd_una for ACK of SYN, 10063 * and check if we can do window scaling. 10064 */ 10065 tp->t_flags &= ~TF_NEEDSYN; 10066 tp->snd_una++; 10067 /* Do window scaling? */ 10068 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10069 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10070 tp->rcv_scale = tp->request_r_scale; 10071 /* Send window already scaled. */ 10072 } 10073 } 10074 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10075 10076 acked = BYTES_THIS_ACK(tp, th); 10077 if (acked) { 10078 /* 10079 * Any time we move the cum-ack forward clear 10080 * keep-alive tied probe-not-answered. The 10081 * persists clears its own on entry. 10082 */ 10083 rack->probe_not_answered = 0; 10084 } 10085 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10086 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10087 /* 10088 * If we just performed our first retransmit, and the ACK arrives 10089 * within our recovery window, then it was a mistake to do the 10090 * retransmit in the first place. Recover our original cwnd and 10091 * ssthresh, and proceed to transmit where we left off. 10092 */ 10093 if ((tp->t_flags & TF_PREVVALID) && 10094 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10095 tp->t_flags &= ~TF_PREVVALID; 10096 if (tp->t_rxtshift == 1 && 10097 (int)(ticks - tp->t_badrxtwin) < 0) 10098 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10099 } 10100 if (acked) { 10101 /* assure we are not backed off */ 10102 tp->t_rxtshift = 0; 10103 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10104 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10105 rack->rc_tlp_in_progress = 0; 10106 rack->r_ctl.rc_tlp_cnt_out = 0; 10107 /* 10108 * If it is the RXT timer we want to 10109 * stop it, so we can restart a TLP. 10110 */ 10111 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10112 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10113 #ifdef NETFLIX_HTTP_LOGGING 10114 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10115 #endif 10116 } 10117 /* 10118 * If we have a timestamp reply, update smoothed round trip time. If 10119 * no timestamp is present but transmit timer is running and timed 10120 * sequence number was acked, update smoothed round trip time. Since 10121 * we now have an rtt measurement, cancel the timer backoff (cf., 10122 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10123 * timer. 10124 * 10125 * Some boxes send broken timestamp replies during the SYN+ACK 10126 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10127 * and blow up the retransmit timer. 10128 */ 10129 /* 10130 * If all outstanding data is acked, stop retransmit timer and 10131 * remember to restart (more output or persist). If there is more 10132 * data to be acked, restart retransmit timer, using current 10133 * (possibly backed-off) value. 10134 */ 10135 if (acked == 0) { 10136 if (ofia) 10137 *ofia = ourfinisacked; 10138 return (0); 10139 } 10140 if (IN_RECOVERY(tp->t_flags)) { 10141 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10142 (SEQ_LT(th->th_ack, tp->snd_max))) { 10143 tcp_rack_partialack(tp); 10144 } else { 10145 rack_post_recovery(tp, th->th_ack); 10146 recovery = 1; 10147 } 10148 } 10149 /* 10150 * Let the congestion control algorithm update congestion control 10151 * related information. This typically means increasing the 10152 * congestion window. 10153 */ 10154 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10155 SOCKBUF_LOCK(&so->so_snd); 10156 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10157 tp->snd_wnd -= acked_amount; 10158 mfree = sbcut_locked(&so->so_snd, acked_amount); 10159 if ((sbused(&so->so_snd) == 0) && 10160 (acked > acked_amount) && 10161 (tp->t_state >= TCPS_FIN_WAIT_1) && 10162 (tp->t_flags & TF_SENTFIN)) { 10163 /* 10164 * We must be sure our fin 10165 * was sent and acked (we can be 10166 * in FIN_WAIT_1 without having 10167 * sent the fin). 10168 */ 10169 ourfinisacked = 1; 10170 } 10171 tp->snd_una = th->th_ack; 10172 if (acked_amount && sbavail(&so->so_snd)) 10173 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10174 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10175 /* NB: sowwakeup_locked() does an implicit unlock. */ 10176 sowwakeup_locked(so); 10177 m_freem(mfree); 10178 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10179 tp->snd_recover = tp->snd_una; 10180 10181 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10182 tp->snd_nxt = tp->snd_una; 10183 } 10184 if (under_pacing && 10185 (rack->use_fixed_rate == 0) && 10186 (rack->in_probe_rtt == 0) && 10187 rack->rc_gp_dyn_mul && 10188 rack->rc_always_pace) { 10189 /* Check if we are dragging bottom */ 10190 rack_check_bottom_drag(tp, rack, so, acked); 10191 } 10192 if (tp->snd_una == tp->snd_max) { 10193 /* Nothing left outstanding */ 10194 tp->t_flags &= ~TF_PREVVALID; 10195 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10196 rack->r_ctl.retran_during_recovery = 0; 10197 rack->r_ctl.dsack_byte_cnt = 0; 10198 if (rack->r_ctl.rc_went_idle_time == 0) 10199 rack->r_ctl.rc_went_idle_time = 1; 10200 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10201 if (sbavail(&tptosocket(tp)->so_snd) == 0) 10202 tp->t_acktime = 0; 10203 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10204 /* Set need output so persist might get set */ 10205 rack->r_wanted_output = 1; 10206 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10207 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10208 (sbavail(&so->so_snd) == 0) && 10209 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10210 /* 10211 * The socket was gone and the 10212 * peer sent data (now or in the past), time to 10213 * reset him. 10214 */ 10215 *ret_val = 1; 10216 /* tcp_close will kill the inp pre-log the Reset */ 10217 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10218 tp = tcp_close(tp); 10219 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10220 return (1); 10221 } 10222 } 10223 if (ofia) 10224 *ofia = ourfinisacked; 10225 return (0); 10226 } 10227 10228 10229 static void 10230 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 10231 int dir, uint32_t flags, struct rack_sendmap *rsm) 10232 { 10233 if (tcp_bblogging_on(rack->rc_tp)) { 10234 union tcp_log_stackspecific log; 10235 struct timeval tv; 10236 10237 memset(&log, 0, sizeof(log)); 10238 log.u_bbr.flex1 = cnt; 10239 log.u_bbr.flex2 = split; 10240 log.u_bbr.flex3 = out; 10241 log.u_bbr.flex4 = line; 10242 log.u_bbr.flex5 = rack->r_must_retran; 10243 log.u_bbr.flex6 = flags; 10244 log.u_bbr.flex7 = rack->rc_has_collapsed; 10245 log.u_bbr.flex8 = dir; /* 10246 * 1 is collapsed, 0 is uncollapsed, 10247 * 2 is log of a rsm being marked, 3 is a split. 10248 */ 10249 if (rsm == NULL) 10250 log.u_bbr.rttProp = 0; 10251 else 10252 log.u_bbr.rttProp = (uint64_t)rsm; 10253 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 10254 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10255 TCP_LOG_EVENTP(rack->rc_tp, NULL, 10256 &rack->rc_inp->inp_socket->so_rcv, 10257 &rack->rc_inp->inp_socket->so_snd, 10258 TCP_RACK_LOG_COLLAPSE, 0, 10259 0, &log, false, &tv); 10260 } 10261 } 10262 10263 static void 10264 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line) 10265 { 10266 /* 10267 * Here all we do is mark the collapsed point and set the flag. 10268 * This may happen again and again, but there is no 10269 * sense splitting our map until we know where the 10270 * peer finally lands in the collapse. 10271 */ 10272 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); 10273 if ((rack->rc_has_collapsed == 0) || 10274 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd))) 10275 counter_u64_add(rack_collapsed_win_seen, 1); 10276 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10277 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 10278 rack->rc_has_collapsed = 1; 10279 rack->r_collapse_point_valid = 1; 10280 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 10281 } 10282 10283 static void 10284 rack_un_collapse_window(struct tcp_rack *rack, int line) 10285 { 10286 struct rack_sendmap *nrsm, *rsm, fe; 10287 int cnt = 0, split = 0; 10288 #ifdef INVARIANTS 10289 struct rack_sendmap *insret; 10290 #endif 10291 10292 memset(&fe, 0, sizeof(fe)); 10293 rack->rc_has_collapsed = 0; 10294 fe.r_start = rack->r_ctl.last_collapse_point; 10295 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10296 if (rsm == NULL) { 10297 /* Nothing to do maybe the peer ack'ed it all */ 10298 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10299 return; 10300 } 10301 /* Now do we need to split this one? */ 10302 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 10303 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 10304 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 10305 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10306 if (nrsm == NULL) { 10307 /* We can't get a rsm, mark all? */ 10308 nrsm = rsm; 10309 goto no_split; 10310 } 10311 /* Clone it */ 10312 split = 1; 10313 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 10314 #ifndef INVARIANTS 10315 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10316 #else 10317 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10318 if (insret != NULL) { 10319 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10320 nrsm, insret, rack, rsm); 10321 } 10322 #endif 10323 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 10324 rack->r_ctl.last_collapse_point, __LINE__); 10325 if (rsm->r_in_tmap) { 10326 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10327 nrsm->r_in_tmap = 1; 10328 } 10329 /* 10330 * Set in the new RSM as the 10331 * collapsed starting point 10332 */ 10333 rsm = nrsm; 10334 } 10335 no_split: 10336 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10337 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10338 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 10339 cnt++; 10340 } 10341 if (cnt) { 10342 counter_u64_add(rack_collapsed_win, 1); 10343 } 10344 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10345 } 10346 10347 static void 10348 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10349 int32_t tlen, int32_t tfo_syn) 10350 { 10351 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10352 if (rack->rc_dack_mode && 10353 (tlen > 500) && 10354 (rack->rc_dack_toggle == 1)) { 10355 goto no_delayed_ack; 10356 } 10357 rack_timer_cancel(tp, rack, 10358 rack->r_ctl.rc_rcvtime, __LINE__); 10359 tp->t_flags |= TF_DELACK; 10360 } else { 10361 no_delayed_ack: 10362 rack->r_wanted_output = 1; 10363 tp->t_flags |= TF_ACKNOW; 10364 if (rack->rc_dack_mode) { 10365 if (tp->t_flags & TF_DELACK) 10366 rack->rc_dack_toggle = 1; 10367 else 10368 rack->rc_dack_toggle = 0; 10369 } 10370 } 10371 } 10372 10373 static void 10374 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10375 { 10376 /* 10377 * If fast output is in progress, lets validate that 10378 * the new window did not shrink on us and make it 10379 * so fast output should end. 10380 */ 10381 if (rack->r_fast_output) { 10382 uint32_t out; 10383 10384 /* 10385 * Calculate what we will send if left as is 10386 * and compare that to our send window. 10387 */ 10388 out = ctf_outstanding(tp); 10389 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10390 /* ok we have an issue */ 10391 if (out >= tp->snd_wnd) { 10392 /* Turn off fast output the window is met or collapsed */ 10393 rack->r_fast_output = 0; 10394 } else { 10395 /* we have some room left */ 10396 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10397 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10398 /* If not at least 1 full segment never mind */ 10399 rack->r_fast_output = 0; 10400 } 10401 } 10402 } 10403 } 10404 } 10405 10406 10407 /* 10408 * Return value of 1, the TCB is unlocked and most 10409 * likely gone, return value of 0, the TCP is still 10410 * locked. 10411 */ 10412 static int 10413 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10414 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10415 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10416 { 10417 /* 10418 * Update window information. Don't look at window if no ACK: TAC's 10419 * send garbage on first SYN. 10420 */ 10421 int32_t nsegs; 10422 int32_t tfo_syn; 10423 struct tcp_rack *rack; 10424 10425 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10426 10427 rack = (struct tcp_rack *)tp->t_fb_ptr; 10428 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10429 if ((thflags & TH_ACK) && 10430 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10431 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10432 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10433 /* keep track of pure window updates */ 10434 if (tlen == 0 && 10435 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10436 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10437 tp->snd_wnd = tiwin; 10438 rack_validate_fo_sendwin_up(tp, rack); 10439 tp->snd_wl1 = th->th_seq; 10440 tp->snd_wl2 = th->th_ack; 10441 if (tp->snd_wnd > tp->max_sndwnd) 10442 tp->max_sndwnd = tp->snd_wnd; 10443 rack->r_wanted_output = 1; 10444 } else if (thflags & TH_ACK) { 10445 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10446 tp->snd_wnd = tiwin; 10447 rack_validate_fo_sendwin_up(tp, rack); 10448 tp->snd_wl1 = th->th_seq; 10449 tp->snd_wl2 = th->th_ack; 10450 } 10451 } 10452 if (tp->snd_wnd < ctf_outstanding(tp)) 10453 /* The peer collapsed the window */ 10454 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 10455 else if (rack->rc_has_collapsed) 10456 rack_un_collapse_window(rack, __LINE__); 10457 if ((rack->r_collapse_point_valid) && 10458 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 10459 rack->r_collapse_point_valid = 0; 10460 /* Was persist timer active and now we have window space? */ 10461 if ((rack->rc_in_persist != 0) && 10462 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10463 rack->r_ctl.rc_pace_min_segs))) { 10464 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10465 tp->snd_nxt = tp->snd_max; 10466 /* Make sure we output to start the timer */ 10467 rack->r_wanted_output = 1; 10468 } 10469 /* Do we enter persists? */ 10470 if ((rack->rc_in_persist == 0) && 10471 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10472 TCPS_HAVEESTABLISHED(tp->t_state) && 10473 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10474 sbavail(&tptosocket(tp)->so_snd) && 10475 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10476 /* 10477 * Here the rwnd is less than 10478 * the pacing size, we are established, 10479 * nothing is outstanding, and there is 10480 * data to send. Enter persists. 10481 */ 10482 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10483 } 10484 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10485 m_freem(m); 10486 return (0); 10487 } 10488 /* 10489 * don't process the URG bit, ignore them drag 10490 * along the up. 10491 */ 10492 tp->rcv_up = tp->rcv_nxt; 10493 10494 /* 10495 * Process the segment text, merging it into the TCP sequencing 10496 * queue, and arranging for acknowledgment of receipt if necessary. 10497 * This process logically involves adjusting tp->rcv_wnd as data is 10498 * presented to the user (this happens in tcp_usrreq.c, case 10499 * PRU_RCVD). If a FIN has already been received on this connection 10500 * then we just ignore the text. 10501 */ 10502 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10503 IS_FASTOPEN(tp->t_flags)); 10504 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10505 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10506 tcp_seq save_start = th->th_seq; 10507 tcp_seq save_rnxt = tp->rcv_nxt; 10508 int save_tlen = tlen; 10509 10510 m_adj(m, drop_hdrlen); /* delayed header drop */ 10511 /* 10512 * Insert segment which includes th into TCP reassembly 10513 * queue with control block tp. Set thflags to whether 10514 * reassembly now includes a segment with FIN. This handles 10515 * the common case inline (segment is the next to be 10516 * received on an established connection, and the queue is 10517 * empty), avoiding linkage into and removal from the queue 10518 * and repetition of various conversions. Set DELACK for 10519 * segments received in order, but ack immediately when 10520 * segments are out of order (so fast retransmit can work). 10521 */ 10522 if (th->th_seq == tp->rcv_nxt && 10523 SEGQ_EMPTY(tp) && 10524 (TCPS_HAVEESTABLISHED(tp->t_state) || 10525 tfo_syn)) { 10526 #ifdef NETFLIX_SB_LIMITS 10527 u_int mcnt, appended; 10528 10529 if (so->so_rcv.sb_shlim) { 10530 mcnt = m_memcnt(m); 10531 appended = 0; 10532 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10533 CFO_NOSLEEP, NULL) == false) { 10534 counter_u64_add(tcp_sb_shlim_fails, 1); 10535 m_freem(m); 10536 return (0); 10537 } 10538 } 10539 #endif 10540 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10541 tp->rcv_nxt += tlen; 10542 if (tlen && 10543 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10544 (tp->t_fbyte_in == 0)) { 10545 tp->t_fbyte_in = ticks; 10546 if (tp->t_fbyte_in == 0) 10547 tp->t_fbyte_in = 1; 10548 if (tp->t_fbyte_out && tp->t_fbyte_in) 10549 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10550 } 10551 thflags = tcp_get_flags(th) & TH_FIN; 10552 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10553 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10554 SOCKBUF_LOCK(&so->so_rcv); 10555 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10556 m_freem(m); 10557 } else 10558 #ifdef NETFLIX_SB_LIMITS 10559 appended = 10560 #endif 10561 sbappendstream_locked(&so->so_rcv, m, 0); 10562 10563 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10564 /* NB: sorwakeup_locked() does an implicit unlock. */ 10565 sorwakeup_locked(so); 10566 #ifdef NETFLIX_SB_LIMITS 10567 if (so->so_rcv.sb_shlim && appended != mcnt) 10568 counter_fo_release(so->so_rcv.sb_shlim, 10569 mcnt - appended); 10570 #endif 10571 } else { 10572 /* 10573 * XXX: Due to the header drop above "th" is 10574 * theoretically invalid by now. Fortunately 10575 * m_adj() doesn't actually frees any mbufs when 10576 * trimming from the head. 10577 */ 10578 tcp_seq temp = save_start; 10579 10580 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10581 tp->t_flags |= TF_ACKNOW; 10582 if (tp->t_flags & TF_WAKESOR) { 10583 tp->t_flags &= ~TF_WAKESOR; 10584 /* NB: sorwakeup_locked() does an implicit unlock. */ 10585 sorwakeup_locked(so); 10586 } 10587 } 10588 if ((tp->t_flags & TF_SACK_PERMIT) && 10589 (save_tlen > 0) && 10590 TCPS_HAVEESTABLISHED(tp->t_state)) { 10591 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10592 /* 10593 * DSACK actually handled in the fastpath 10594 * above. 10595 */ 10596 RACK_OPTS_INC(tcp_sack_path_1); 10597 tcp_update_sack_list(tp, save_start, 10598 save_start + save_tlen); 10599 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10600 if ((tp->rcv_numsacks >= 1) && 10601 (tp->sackblks[0].end == save_start)) { 10602 /* 10603 * Partial overlap, recorded at todrop 10604 * above. 10605 */ 10606 RACK_OPTS_INC(tcp_sack_path_2a); 10607 tcp_update_sack_list(tp, 10608 tp->sackblks[0].start, 10609 tp->sackblks[0].end); 10610 } else { 10611 RACK_OPTS_INC(tcp_sack_path_2b); 10612 tcp_update_dsack_list(tp, save_start, 10613 save_start + save_tlen); 10614 } 10615 } else if (tlen >= save_tlen) { 10616 /* Update of sackblks. */ 10617 RACK_OPTS_INC(tcp_sack_path_3); 10618 tcp_update_dsack_list(tp, save_start, 10619 save_start + save_tlen); 10620 } else if (tlen > 0) { 10621 RACK_OPTS_INC(tcp_sack_path_4); 10622 tcp_update_dsack_list(tp, save_start, 10623 save_start + tlen); 10624 } 10625 } 10626 } else { 10627 m_freem(m); 10628 thflags &= ~TH_FIN; 10629 } 10630 10631 /* 10632 * If FIN is received ACK the FIN and let the user know that the 10633 * connection is closing. 10634 */ 10635 if (thflags & TH_FIN) { 10636 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10637 /* The socket upcall is handled by socantrcvmore. */ 10638 socantrcvmore(so); 10639 /* 10640 * If connection is half-synchronized (ie NEEDSYN 10641 * flag on) then delay ACK, so it may be piggybacked 10642 * when SYN is sent. Otherwise, since we received a 10643 * FIN then no more input can be expected, send ACK 10644 * now. 10645 */ 10646 if (tp->t_flags & TF_NEEDSYN) { 10647 rack_timer_cancel(tp, rack, 10648 rack->r_ctl.rc_rcvtime, __LINE__); 10649 tp->t_flags |= TF_DELACK; 10650 } else { 10651 tp->t_flags |= TF_ACKNOW; 10652 } 10653 tp->rcv_nxt++; 10654 } 10655 switch (tp->t_state) { 10656 /* 10657 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10658 * CLOSE_WAIT state. 10659 */ 10660 case TCPS_SYN_RECEIVED: 10661 tp->t_starttime = ticks; 10662 /* FALLTHROUGH */ 10663 case TCPS_ESTABLISHED: 10664 rack_timer_cancel(tp, rack, 10665 rack->r_ctl.rc_rcvtime, __LINE__); 10666 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10667 break; 10668 10669 /* 10670 * If still in FIN_WAIT_1 STATE FIN has not been 10671 * acked so enter the CLOSING state. 10672 */ 10673 case TCPS_FIN_WAIT_1: 10674 rack_timer_cancel(tp, rack, 10675 rack->r_ctl.rc_rcvtime, __LINE__); 10676 tcp_state_change(tp, TCPS_CLOSING); 10677 break; 10678 10679 /* 10680 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10681 * starting the time-wait timer, turning off the 10682 * other standard timers. 10683 */ 10684 case TCPS_FIN_WAIT_2: 10685 rack_timer_cancel(tp, rack, 10686 rack->r_ctl.rc_rcvtime, __LINE__); 10687 tcp_twstart(tp); 10688 return (1); 10689 } 10690 } 10691 /* 10692 * Return any desired output. 10693 */ 10694 if ((tp->t_flags & TF_ACKNOW) || 10695 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10696 rack->r_wanted_output = 1; 10697 } 10698 return (0); 10699 } 10700 10701 /* 10702 * Here nothing is really faster, its just that we 10703 * have broken out the fast-data path also just like 10704 * the fast-ack. 10705 */ 10706 static int 10707 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10708 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10709 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10710 { 10711 int32_t nsegs; 10712 int32_t newsize = 0; /* automatic sockbuf scaling */ 10713 struct tcp_rack *rack; 10714 #ifdef NETFLIX_SB_LIMITS 10715 u_int mcnt, appended; 10716 #endif 10717 10718 /* 10719 * If last ACK falls within this segment's sequence numbers, record 10720 * the timestamp. NOTE that the test is modified according to the 10721 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10722 */ 10723 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10724 return (0); 10725 } 10726 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10727 return (0); 10728 } 10729 if (tiwin && tiwin != tp->snd_wnd) { 10730 return (0); 10731 } 10732 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10733 return (0); 10734 } 10735 if (__predict_false((to->to_flags & TOF_TS) && 10736 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10737 return (0); 10738 } 10739 if (__predict_false((th->th_ack != tp->snd_una))) { 10740 return (0); 10741 } 10742 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10743 return (0); 10744 } 10745 if ((to->to_flags & TOF_TS) != 0 && 10746 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10747 tp->ts_recent_age = tcp_ts_getticks(); 10748 tp->ts_recent = to->to_tsval; 10749 } 10750 rack = (struct tcp_rack *)tp->t_fb_ptr; 10751 /* 10752 * This is a pure, in-sequence data packet with nothing on the 10753 * reassembly queue and we have enough buffer space to take it. 10754 */ 10755 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10756 10757 #ifdef NETFLIX_SB_LIMITS 10758 if (so->so_rcv.sb_shlim) { 10759 mcnt = m_memcnt(m); 10760 appended = 0; 10761 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10762 CFO_NOSLEEP, NULL) == false) { 10763 counter_u64_add(tcp_sb_shlim_fails, 1); 10764 m_freem(m); 10765 return (1); 10766 } 10767 } 10768 #endif 10769 /* Clean receiver SACK report if present */ 10770 if (tp->rcv_numsacks) 10771 tcp_clean_sackreport(tp); 10772 KMOD_TCPSTAT_INC(tcps_preddat); 10773 tp->rcv_nxt += tlen; 10774 if (tlen && 10775 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10776 (tp->t_fbyte_in == 0)) { 10777 tp->t_fbyte_in = ticks; 10778 if (tp->t_fbyte_in == 0) 10779 tp->t_fbyte_in = 1; 10780 if (tp->t_fbyte_out && tp->t_fbyte_in) 10781 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10782 } 10783 /* 10784 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10785 */ 10786 tp->snd_wl1 = th->th_seq; 10787 /* 10788 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10789 */ 10790 tp->rcv_up = tp->rcv_nxt; 10791 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10792 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10793 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10794 10795 /* Add data to socket buffer. */ 10796 SOCKBUF_LOCK(&so->so_rcv); 10797 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10798 m_freem(m); 10799 } else { 10800 /* 10801 * Set new socket buffer size. Give up when limit is 10802 * reached. 10803 */ 10804 if (newsize) 10805 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 10806 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10807 m_adj(m, drop_hdrlen); /* delayed header drop */ 10808 #ifdef NETFLIX_SB_LIMITS 10809 appended = 10810 #endif 10811 sbappendstream_locked(&so->so_rcv, m, 0); 10812 ctf_calc_rwin(so, tp); 10813 } 10814 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10815 /* NB: sorwakeup_locked() does an implicit unlock. */ 10816 sorwakeup_locked(so); 10817 #ifdef NETFLIX_SB_LIMITS 10818 if (so->so_rcv.sb_shlim && mcnt != appended) 10819 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10820 #endif 10821 rack_handle_delayed_ack(tp, rack, tlen, 0); 10822 if (tp->snd_una == tp->snd_max) 10823 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10824 return (1); 10825 } 10826 10827 /* 10828 * This subfunction is used to try to highly optimize the 10829 * fast path. We again allow window updates that are 10830 * in sequence to remain in the fast-path. We also add 10831 * in the __predict's to attempt to help the compiler. 10832 * Note that if we return a 0, then we can *not* process 10833 * it and the caller should push the packet into the 10834 * slow-path. 10835 */ 10836 static int 10837 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10838 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10839 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10840 { 10841 int32_t acked; 10842 int32_t nsegs; 10843 int32_t under_pacing = 0; 10844 struct tcp_rack *rack; 10845 10846 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10847 /* Old ack, behind (or duplicate to) the last one rcv'd */ 10848 return (0); 10849 } 10850 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 10851 /* Above what we have sent? */ 10852 return (0); 10853 } 10854 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10855 /* We are retransmitting */ 10856 return (0); 10857 } 10858 if (__predict_false(tiwin == 0)) { 10859 /* zero window */ 10860 return (0); 10861 } 10862 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 10863 /* We need a SYN or a FIN, unlikely.. */ 10864 return (0); 10865 } 10866 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 10867 /* Timestamp is behind .. old ack with seq wrap? */ 10868 return (0); 10869 } 10870 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 10871 /* Still recovering */ 10872 return (0); 10873 } 10874 rack = (struct tcp_rack *)tp->t_fb_ptr; 10875 if (rack->r_ctl.rc_sacked) { 10876 /* We have sack holes on our scoreboard */ 10877 return (0); 10878 } 10879 /* Ok if we reach here, we can process a fast-ack */ 10880 if (rack->gp_ready && 10881 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10882 under_pacing = 1; 10883 } 10884 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10885 rack_log_ack(tp, to, th, 0, 0); 10886 /* Did the window get updated? */ 10887 if (tiwin != tp->snd_wnd) { 10888 tp->snd_wnd = tiwin; 10889 rack_validate_fo_sendwin_up(tp, rack); 10890 tp->snd_wl1 = th->th_seq; 10891 if (tp->snd_wnd > tp->max_sndwnd) 10892 tp->max_sndwnd = tp->snd_wnd; 10893 } 10894 /* Do we exit persists? */ 10895 if ((rack->rc_in_persist != 0) && 10896 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10897 rack->r_ctl.rc_pace_min_segs))) { 10898 rack_exit_persist(tp, rack, cts); 10899 } 10900 /* Do we enter persists? */ 10901 if ((rack->rc_in_persist == 0) && 10902 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10903 TCPS_HAVEESTABLISHED(tp->t_state) && 10904 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10905 sbavail(&tptosocket(tp)->so_snd) && 10906 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10907 /* 10908 * Here the rwnd is less than 10909 * the pacing size, we are established, 10910 * nothing is outstanding, and there is 10911 * data to send. Enter persists. 10912 */ 10913 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10914 } 10915 /* 10916 * If last ACK falls within this segment's sequence numbers, record 10917 * the timestamp. NOTE that the test is modified according to the 10918 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10919 */ 10920 if ((to->to_flags & TOF_TS) != 0 && 10921 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10922 tp->ts_recent_age = tcp_ts_getticks(); 10923 tp->ts_recent = to->to_tsval; 10924 } 10925 /* 10926 * This is a pure ack for outstanding data. 10927 */ 10928 KMOD_TCPSTAT_INC(tcps_predack); 10929 10930 /* 10931 * "bad retransmit" recovery. 10932 */ 10933 if ((tp->t_flags & TF_PREVVALID) && 10934 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10935 tp->t_flags &= ~TF_PREVVALID; 10936 if (tp->t_rxtshift == 1 && 10937 (int)(ticks - tp->t_badrxtwin) < 0) 10938 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10939 } 10940 /* 10941 * Recalculate the transmit timer / rtt. 10942 * 10943 * Some boxes send broken timestamp replies during the SYN+ACK 10944 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10945 * and blow up the retransmit timer. 10946 */ 10947 acked = BYTES_THIS_ACK(tp, th); 10948 10949 #ifdef TCP_HHOOK 10950 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 10951 hhook_run_tcp_est_in(tp, th, to); 10952 #endif 10953 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10954 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10955 if (acked) { 10956 struct mbuf *mfree; 10957 10958 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 10959 SOCKBUF_LOCK(&so->so_snd); 10960 mfree = sbcut_locked(&so->so_snd, acked); 10961 tp->snd_una = th->th_ack; 10962 /* Note we want to hold the sb lock through the sendmap adjust */ 10963 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10964 /* Wake up the socket if we have room to write more */ 10965 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10966 sowwakeup_locked(so); 10967 m_freem(mfree); 10968 tp->t_rxtshift = 0; 10969 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10970 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10971 rack->rc_tlp_in_progress = 0; 10972 rack->r_ctl.rc_tlp_cnt_out = 0; 10973 /* 10974 * If it is the RXT timer we want to 10975 * stop it, so we can restart a TLP. 10976 */ 10977 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10978 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10979 #ifdef NETFLIX_HTTP_LOGGING 10980 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10981 #endif 10982 } 10983 /* 10984 * Let the congestion control algorithm update congestion control 10985 * related information. This typically means increasing the 10986 * congestion window. 10987 */ 10988 if (tp->snd_wnd < ctf_outstanding(tp)) { 10989 /* The peer collapsed the window */ 10990 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 10991 } else if (rack->rc_has_collapsed) 10992 rack_un_collapse_window(rack, __LINE__); 10993 if ((rack->r_collapse_point_valid) && 10994 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 10995 rack->r_collapse_point_valid = 0; 10996 /* 10997 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 10998 */ 10999 tp->snd_wl2 = th->th_ack; 11000 tp->t_dupacks = 0; 11001 m_freem(m); 11002 /* ND6_HINT(tp); *//* Some progress has been made. */ 11003 11004 /* 11005 * If all outstanding data are acked, stop retransmit timer, 11006 * otherwise restart timer using current (possibly backed-off) 11007 * value. If process is waiting for space, wakeup/selwakeup/signal. 11008 * If data are ready to send, let tcp_output decide between more 11009 * output or persist. 11010 */ 11011 if (under_pacing && 11012 (rack->use_fixed_rate == 0) && 11013 (rack->in_probe_rtt == 0) && 11014 rack->rc_gp_dyn_mul && 11015 rack->rc_always_pace) { 11016 /* Check if we are dragging bottom */ 11017 rack_check_bottom_drag(tp, rack, so, acked); 11018 } 11019 if (tp->snd_una == tp->snd_max) { 11020 tp->t_flags &= ~TF_PREVVALID; 11021 rack->r_ctl.retran_during_recovery = 0; 11022 rack->r_ctl.dsack_byte_cnt = 0; 11023 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11024 if (rack->r_ctl.rc_went_idle_time == 0) 11025 rack->r_ctl.rc_went_idle_time = 1; 11026 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11027 if (sbavail(&tptosocket(tp)->so_snd) == 0) 11028 tp->t_acktime = 0; 11029 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11030 } 11031 if (acked && rack->r_fast_output) 11032 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11033 if (sbavail(&so->so_snd)) { 11034 rack->r_wanted_output = 1; 11035 } 11036 return (1); 11037 } 11038 11039 /* 11040 * Return value of 1, the TCB is unlocked and most 11041 * likely gone, return value of 0, the TCP is still 11042 * locked. 11043 */ 11044 static int 11045 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11046 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11047 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11048 { 11049 int32_t ret_val = 0; 11050 int32_t todrop; 11051 int32_t ourfinisacked = 0; 11052 struct tcp_rack *rack; 11053 11054 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11055 11056 ctf_calc_rwin(so, tp); 11057 /* 11058 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11059 * SYN, drop the input. if seg contains a RST, then drop the 11060 * connection. if seg does not contain SYN, then drop it. Otherwise 11061 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11062 * tp->irs if seg contains ack then advance tp->snd_una if seg 11063 * contains an ECE and ECN support is enabled, the stream is ECN 11064 * capable. if SYN has been acked change to ESTABLISHED else 11065 * SYN_RCVD state arrange for segment to be acked (eventually) 11066 * continue processing rest of data/controls. 11067 */ 11068 if ((thflags & TH_ACK) && 11069 (SEQ_LEQ(th->th_ack, tp->iss) || 11070 SEQ_GT(th->th_ack, tp->snd_max))) { 11071 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11072 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11073 return (1); 11074 } 11075 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11076 TCP_PROBE5(connect__refused, NULL, tp, 11077 mtod(m, const char *), tp, th); 11078 tp = tcp_drop(tp, ECONNREFUSED); 11079 ctf_do_drop(m, tp); 11080 return (1); 11081 } 11082 if (thflags & TH_RST) { 11083 ctf_do_drop(m, tp); 11084 return (1); 11085 } 11086 if (!(thflags & TH_SYN)) { 11087 ctf_do_drop(m, tp); 11088 return (1); 11089 } 11090 tp->irs = th->th_seq; 11091 tcp_rcvseqinit(tp); 11092 rack = (struct tcp_rack *)tp->t_fb_ptr; 11093 if (thflags & TH_ACK) { 11094 int tfo_partial = 0; 11095 11096 KMOD_TCPSTAT_INC(tcps_connects); 11097 soisconnected(so); 11098 #ifdef MAC 11099 mac_socketpeer_set_from_mbuf(m, so); 11100 #endif 11101 /* Do window scaling on this connection? */ 11102 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11103 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11104 tp->rcv_scale = tp->request_r_scale; 11105 } 11106 tp->rcv_adv += min(tp->rcv_wnd, 11107 TCP_MAXWIN << tp->rcv_scale); 11108 /* 11109 * If not all the data that was sent in the TFO SYN 11110 * has been acked, resend the remainder right away. 11111 */ 11112 if (IS_FASTOPEN(tp->t_flags) && 11113 (tp->snd_una != tp->snd_max)) { 11114 tp->snd_nxt = th->th_ack; 11115 tfo_partial = 1; 11116 } 11117 /* 11118 * If there's data, delay ACK; if there's also a FIN ACKNOW 11119 * will be turned on later. 11120 */ 11121 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11122 rack_timer_cancel(tp, rack, 11123 rack->r_ctl.rc_rcvtime, __LINE__); 11124 tp->t_flags |= TF_DELACK; 11125 } else { 11126 rack->r_wanted_output = 1; 11127 tp->t_flags |= TF_ACKNOW; 11128 rack->rc_dack_toggle = 0; 11129 } 11130 11131 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11132 11133 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11134 /* 11135 * We advance snd_una for the 11136 * fast open case. If th_ack is 11137 * acknowledging data beyond 11138 * snd_una we can't just call 11139 * ack-processing since the 11140 * data stream in our send-map 11141 * will start at snd_una + 1 (one 11142 * beyond the SYN). If its just 11143 * equal we don't need to do that 11144 * and there is no send_map. 11145 */ 11146 tp->snd_una++; 11147 } 11148 /* 11149 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11150 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11151 */ 11152 tp->t_starttime = ticks; 11153 if (tp->t_flags & TF_NEEDFIN) { 11154 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11155 tp->t_flags &= ~TF_NEEDFIN; 11156 thflags &= ~TH_SYN; 11157 } else { 11158 tcp_state_change(tp, TCPS_ESTABLISHED); 11159 TCP_PROBE5(connect__established, NULL, tp, 11160 mtod(m, const char *), tp, th); 11161 rack_cc_conn_init(tp); 11162 } 11163 } else { 11164 /* 11165 * Received initial SYN in SYN-SENT[*] state => simultaneous 11166 * open. If segment contains CC option and there is a 11167 * cached CC, apply TAO test. If it succeeds, connection is * 11168 * half-synchronized. Otherwise, do 3-way handshake: 11169 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11170 * there was no CC option, clear cached CC value. 11171 */ 11172 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 11173 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11174 } 11175 /* 11176 * Advance th->th_seq to correspond to first data byte. If data, 11177 * trim to stay within window, dropping FIN if necessary. 11178 */ 11179 th->th_seq++; 11180 if (tlen > tp->rcv_wnd) { 11181 todrop = tlen - tp->rcv_wnd; 11182 m_adj(m, -todrop); 11183 tlen = tp->rcv_wnd; 11184 thflags &= ~TH_FIN; 11185 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11186 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11187 } 11188 tp->snd_wl1 = th->th_seq - 1; 11189 tp->rcv_up = th->th_seq; 11190 /* 11191 * Client side of transaction: already sent SYN and data. If the 11192 * remote host used T/TCP to validate the SYN, our data will be 11193 * ACK'd; if so, enter normal data segment processing in the middle 11194 * of step 5, ack processing. Otherwise, goto step 6. 11195 */ 11196 if (thflags & TH_ACK) { 11197 /* For syn-sent we need to possibly update the rtt */ 11198 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11199 uint32_t t, mcts; 11200 11201 mcts = tcp_ts_getticks(); 11202 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11203 if (!tp->t_rttlow || tp->t_rttlow > t) 11204 tp->t_rttlow = t; 11205 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11206 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11207 tcp_rack_xmit_timer_commit(rack, tp); 11208 } 11209 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11210 return (ret_val); 11211 /* We may have changed to FIN_WAIT_1 above */ 11212 if (tp->t_state == TCPS_FIN_WAIT_1) { 11213 /* 11214 * In FIN_WAIT_1 STATE in addition to the processing 11215 * for the ESTABLISHED state if our FIN is now 11216 * acknowledged then enter FIN_WAIT_2. 11217 */ 11218 if (ourfinisacked) { 11219 /* 11220 * If we can't receive any more data, then 11221 * closing user can proceed. Starting the 11222 * timer is contrary to the specification, 11223 * but if we don't get a FIN we'll hang 11224 * forever. 11225 * 11226 * XXXjl: we should release the tp also, and 11227 * use a compressed state. 11228 */ 11229 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11230 soisdisconnected(so); 11231 tcp_timer_activate(tp, TT_2MSL, 11232 (tcp_fast_finwait2_recycle ? 11233 tcp_finwait2_timeout : 11234 TP_MAXIDLE(tp))); 11235 } 11236 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11237 } 11238 } 11239 } 11240 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11241 tiwin, thflags, nxt_pkt)); 11242 } 11243 11244 /* 11245 * Return value of 1, the TCB is unlocked and most 11246 * likely gone, return value of 0, the TCP is still 11247 * locked. 11248 */ 11249 static int 11250 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11251 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11252 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11253 { 11254 struct tcp_rack *rack; 11255 int32_t ret_val = 0; 11256 int32_t ourfinisacked = 0; 11257 11258 ctf_calc_rwin(so, tp); 11259 if ((thflags & TH_ACK) && 11260 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11261 SEQ_GT(th->th_ack, tp->snd_max))) { 11262 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11263 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11264 return (1); 11265 } 11266 rack = (struct tcp_rack *)tp->t_fb_ptr; 11267 if (IS_FASTOPEN(tp->t_flags)) { 11268 /* 11269 * When a TFO connection is in SYN_RECEIVED, the 11270 * only valid packets are the initial SYN, a 11271 * retransmit/copy of the initial SYN (possibly with 11272 * a subset of the original data), a valid ACK, a 11273 * FIN, or a RST. 11274 */ 11275 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11276 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11277 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11278 return (1); 11279 } else if (thflags & TH_SYN) { 11280 /* non-initial SYN is ignored */ 11281 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11282 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11283 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11284 ctf_do_drop(m, NULL); 11285 return (0); 11286 } 11287 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11288 ctf_do_drop(m, NULL); 11289 return (0); 11290 } 11291 } 11292 11293 if ((thflags & TH_RST) || 11294 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11295 return (__ctf_process_rst(m, th, so, tp, 11296 &rack->r_ctl.challenge_ack_ts, 11297 &rack->r_ctl.challenge_ack_cnt)); 11298 /* 11299 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11300 * it's less than ts_recent, drop it. 11301 */ 11302 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11303 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11304 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11305 return (ret_val); 11306 } 11307 /* 11308 * In the SYN-RECEIVED state, validate that the packet belongs to 11309 * this connection before trimming the data to fit the receive 11310 * window. Check the sequence number versus IRS since we know the 11311 * sequence numbers haven't wrapped. This is a partial fix for the 11312 * "LAND" DoS attack. 11313 */ 11314 if (SEQ_LT(th->th_seq, tp->irs)) { 11315 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11316 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11317 return (1); 11318 } 11319 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11320 &rack->r_ctl.challenge_ack_ts, 11321 &rack->r_ctl.challenge_ack_cnt)) { 11322 return (ret_val); 11323 } 11324 /* 11325 * If last ACK falls within this segment's sequence numbers, record 11326 * its timestamp. NOTE: 1) That the test incorporates suggestions 11327 * from the latest proposal of the tcplw@cray.com list (Braden 11328 * 1993/04/26). 2) That updating only on newer timestamps interferes 11329 * with our earlier PAWS tests, so this check should be solely 11330 * predicated on the sequence space of this segment. 3) That we 11331 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11332 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11333 * SEG.Len, This modified check allows us to overcome RFC1323's 11334 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11335 * p.869. In such cases, we can still calculate the RTT correctly 11336 * when RCV.NXT == Last.ACK.Sent. 11337 */ 11338 if ((to->to_flags & TOF_TS) != 0 && 11339 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11340 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11341 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11342 tp->ts_recent_age = tcp_ts_getticks(); 11343 tp->ts_recent = to->to_tsval; 11344 } 11345 tp->snd_wnd = tiwin; 11346 rack_validate_fo_sendwin_up(tp, rack); 11347 /* 11348 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11349 * is on (half-synchronized state), then queue data for later 11350 * processing; else drop segment and return. 11351 */ 11352 if ((thflags & TH_ACK) == 0) { 11353 if (IS_FASTOPEN(tp->t_flags)) { 11354 rack_cc_conn_init(tp); 11355 } 11356 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11357 tiwin, thflags, nxt_pkt)); 11358 } 11359 KMOD_TCPSTAT_INC(tcps_connects); 11360 if (tp->t_flags & TF_SONOTCONN) { 11361 tp->t_flags &= ~TF_SONOTCONN; 11362 soisconnected(so); 11363 } 11364 /* Do window scaling? */ 11365 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11366 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11367 tp->rcv_scale = tp->request_r_scale; 11368 } 11369 /* 11370 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11371 * FIN-WAIT-1 11372 */ 11373 tp->t_starttime = ticks; 11374 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11375 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11376 tp->t_tfo_pending = NULL; 11377 } 11378 if (tp->t_flags & TF_NEEDFIN) { 11379 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11380 tp->t_flags &= ~TF_NEEDFIN; 11381 } else { 11382 tcp_state_change(tp, TCPS_ESTABLISHED); 11383 TCP_PROBE5(accept__established, NULL, tp, 11384 mtod(m, const char *), tp, th); 11385 /* 11386 * TFO connections call cc_conn_init() during SYN 11387 * processing. Calling it again here for such connections 11388 * is not harmless as it would undo the snd_cwnd reduction 11389 * that occurs when a TFO SYN|ACK is retransmitted. 11390 */ 11391 if (!IS_FASTOPEN(tp->t_flags)) 11392 rack_cc_conn_init(tp); 11393 } 11394 /* 11395 * Account for the ACK of our SYN prior to 11396 * regular ACK processing below, except for 11397 * simultaneous SYN, which is handled later. 11398 */ 11399 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11400 tp->snd_una++; 11401 /* 11402 * If segment contains data or ACK, will call tcp_reass() later; if 11403 * not, do so now to pass queued data to user. 11404 */ 11405 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11406 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11407 (struct mbuf *)0); 11408 if (tp->t_flags & TF_WAKESOR) { 11409 tp->t_flags &= ~TF_WAKESOR; 11410 /* NB: sorwakeup_locked() does an implicit unlock. */ 11411 sorwakeup_locked(so); 11412 } 11413 } 11414 tp->snd_wl1 = th->th_seq - 1; 11415 /* For syn-recv we need to possibly update the rtt */ 11416 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11417 uint32_t t, mcts; 11418 11419 mcts = tcp_ts_getticks(); 11420 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11421 if (!tp->t_rttlow || tp->t_rttlow > t) 11422 tp->t_rttlow = t; 11423 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11424 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11425 tcp_rack_xmit_timer_commit(rack, tp); 11426 } 11427 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11428 return (ret_val); 11429 } 11430 if (tp->t_state == TCPS_FIN_WAIT_1) { 11431 /* We could have went to FIN_WAIT_1 (or EST) above */ 11432 /* 11433 * In FIN_WAIT_1 STATE in addition to the processing for the 11434 * ESTABLISHED state if our FIN is now acknowledged then 11435 * enter FIN_WAIT_2. 11436 */ 11437 if (ourfinisacked) { 11438 /* 11439 * If we can't receive any more data, then closing 11440 * user can proceed. Starting the timer is contrary 11441 * to the specification, but if we don't get a FIN 11442 * we'll hang forever. 11443 * 11444 * XXXjl: we should release the tp also, and use a 11445 * compressed state. 11446 */ 11447 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11448 soisdisconnected(so); 11449 tcp_timer_activate(tp, TT_2MSL, 11450 (tcp_fast_finwait2_recycle ? 11451 tcp_finwait2_timeout : 11452 TP_MAXIDLE(tp))); 11453 } 11454 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11455 } 11456 } 11457 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11458 tiwin, thflags, nxt_pkt)); 11459 } 11460 11461 /* 11462 * Return value of 1, the TCB is unlocked and most 11463 * likely gone, return value of 0, the TCP is still 11464 * locked. 11465 */ 11466 static int 11467 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11468 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11469 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11470 { 11471 int32_t ret_val = 0; 11472 struct tcp_rack *rack; 11473 11474 /* 11475 * Header prediction: check for the two common cases of a 11476 * uni-directional data xfer. If the packet has no control flags, 11477 * is in-sequence, the window didn't change and we're not 11478 * retransmitting, it's a candidate. If the length is zero and the 11479 * ack moved forward, we're the sender side of the xfer. Just free 11480 * the data acked & wake any higher level process that was blocked 11481 * waiting for space. If the length is non-zero and the ack didn't 11482 * move, we're the receiver side. If we're getting packets in-order 11483 * (the reassembly queue is empty), add the data toc The socket 11484 * buffer and note that we need a delayed ack. Make sure that the 11485 * hidden state-flags are also off. Since we check for 11486 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11487 */ 11488 rack = (struct tcp_rack *)tp->t_fb_ptr; 11489 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11490 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11491 __predict_true(SEGQ_EMPTY(tp)) && 11492 __predict_true(th->th_seq == tp->rcv_nxt)) { 11493 if (tlen == 0) { 11494 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11495 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11496 return (0); 11497 } 11498 } else { 11499 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11500 tiwin, nxt_pkt, iptos)) { 11501 return (0); 11502 } 11503 } 11504 } 11505 ctf_calc_rwin(so, tp); 11506 11507 if ((thflags & TH_RST) || 11508 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11509 return (__ctf_process_rst(m, th, so, tp, 11510 &rack->r_ctl.challenge_ack_ts, 11511 &rack->r_ctl.challenge_ack_cnt)); 11512 11513 /* 11514 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11515 * synchronized state. 11516 */ 11517 if (thflags & TH_SYN) { 11518 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11519 return (ret_val); 11520 } 11521 /* 11522 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11523 * it's less than ts_recent, drop it. 11524 */ 11525 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11526 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11527 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11528 return (ret_val); 11529 } 11530 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11531 &rack->r_ctl.challenge_ack_ts, 11532 &rack->r_ctl.challenge_ack_cnt)) { 11533 return (ret_val); 11534 } 11535 /* 11536 * If last ACK falls within this segment's sequence numbers, record 11537 * its timestamp. NOTE: 1) That the test incorporates suggestions 11538 * from the latest proposal of the tcplw@cray.com list (Braden 11539 * 1993/04/26). 2) That updating only on newer timestamps interferes 11540 * with our earlier PAWS tests, so this check should be solely 11541 * predicated on the sequence space of this segment. 3) That we 11542 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11543 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11544 * SEG.Len, This modified check allows us to overcome RFC1323's 11545 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11546 * p.869. In such cases, we can still calculate the RTT correctly 11547 * when RCV.NXT == Last.ACK.Sent. 11548 */ 11549 if ((to->to_flags & TOF_TS) != 0 && 11550 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11551 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11552 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11553 tp->ts_recent_age = tcp_ts_getticks(); 11554 tp->ts_recent = to->to_tsval; 11555 } 11556 /* 11557 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11558 * is on (half-synchronized state), then queue data for later 11559 * processing; else drop segment and return. 11560 */ 11561 if ((thflags & TH_ACK) == 0) { 11562 if (tp->t_flags & TF_NEEDSYN) { 11563 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11564 tiwin, thflags, nxt_pkt)); 11565 11566 } else if (tp->t_flags & TF_ACKNOW) { 11567 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11568 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11569 return (ret_val); 11570 } else { 11571 ctf_do_drop(m, NULL); 11572 return (0); 11573 } 11574 } 11575 /* 11576 * Ack processing. 11577 */ 11578 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11579 return (ret_val); 11580 } 11581 if (sbavail(&so->so_snd)) { 11582 if (ctf_progress_timeout_check(tp, true)) { 11583 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11584 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11585 return (1); 11586 } 11587 } 11588 /* State changes only happen in rack_process_data() */ 11589 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11590 tiwin, thflags, nxt_pkt)); 11591 } 11592 11593 /* 11594 * Return value of 1, the TCB is unlocked and most 11595 * likely gone, return value of 0, the TCP is still 11596 * locked. 11597 */ 11598 static int 11599 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11600 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11601 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11602 { 11603 int32_t ret_val = 0; 11604 struct tcp_rack *rack; 11605 11606 rack = (struct tcp_rack *)tp->t_fb_ptr; 11607 ctf_calc_rwin(so, tp); 11608 if ((thflags & TH_RST) || 11609 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11610 return (__ctf_process_rst(m, th, so, tp, 11611 &rack->r_ctl.challenge_ack_ts, 11612 &rack->r_ctl.challenge_ack_cnt)); 11613 /* 11614 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11615 * synchronized state. 11616 */ 11617 if (thflags & TH_SYN) { 11618 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11619 return (ret_val); 11620 } 11621 /* 11622 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11623 * it's less than ts_recent, drop it. 11624 */ 11625 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11626 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11627 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11628 return (ret_val); 11629 } 11630 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11631 &rack->r_ctl.challenge_ack_ts, 11632 &rack->r_ctl.challenge_ack_cnt)) { 11633 return (ret_val); 11634 } 11635 /* 11636 * If last ACK falls within this segment's sequence numbers, record 11637 * its timestamp. NOTE: 1) That the test incorporates suggestions 11638 * from the latest proposal of the tcplw@cray.com list (Braden 11639 * 1993/04/26). 2) That updating only on newer timestamps interferes 11640 * with our earlier PAWS tests, so this check should be solely 11641 * predicated on the sequence space of this segment. 3) That we 11642 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11643 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11644 * SEG.Len, This modified check allows us to overcome RFC1323's 11645 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11646 * p.869. In such cases, we can still calculate the RTT correctly 11647 * when RCV.NXT == Last.ACK.Sent. 11648 */ 11649 if ((to->to_flags & TOF_TS) != 0 && 11650 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11651 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11652 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11653 tp->ts_recent_age = tcp_ts_getticks(); 11654 tp->ts_recent = to->to_tsval; 11655 } 11656 /* 11657 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11658 * is on (half-synchronized state), then queue data for later 11659 * processing; else drop segment and return. 11660 */ 11661 if ((thflags & TH_ACK) == 0) { 11662 if (tp->t_flags & TF_NEEDSYN) { 11663 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11664 tiwin, thflags, nxt_pkt)); 11665 11666 } else if (tp->t_flags & TF_ACKNOW) { 11667 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11668 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11669 return (ret_val); 11670 } else { 11671 ctf_do_drop(m, NULL); 11672 return (0); 11673 } 11674 } 11675 /* 11676 * Ack processing. 11677 */ 11678 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11679 return (ret_val); 11680 } 11681 if (sbavail(&so->so_snd)) { 11682 if (ctf_progress_timeout_check(tp, true)) { 11683 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11684 tp, tick, PROGRESS_DROP, __LINE__); 11685 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11686 return (1); 11687 } 11688 } 11689 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11690 tiwin, thflags, nxt_pkt)); 11691 } 11692 11693 static int 11694 rack_check_data_after_close(struct mbuf *m, 11695 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11696 { 11697 struct tcp_rack *rack; 11698 11699 rack = (struct tcp_rack *)tp->t_fb_ptr; 11700 if (rack->rc_allow_data_af_clo == 0) { 11701 close_now: 11702 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11703 /* tcp_close will kill the inp pre-log the Reset */ 11704 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11705 tp = tcp_close(tp); 11706 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11707 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11708 return (1); 11709 } 11710 if (sbavail(&so->so_snd) == 0) 11711 goto close_now; 11712 /* Ok we allow data that is ignored and a followup reset */ 11713 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11714 tp->rcv_nxt = th->th_seq + *tlen; 11715 tp->t_flags2 |= TF2_DROP_AF_DATA; 11716 rack->r_wanted_output = 1; 11717 *tlen = 0; 11718 return (0); 11719 } 11720 11721 /* 11722 * Return value of 1, the TCB is unlocked and most 11723 * likely gone, return value of 0, the TCP is still 11724 * locked. 11725 */ 11726 static int 11727 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11728 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11729 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11730 { 11731 int32_t ret_val = 0; 11732 int32_t ourfinisacked = 0; 11733 struct tcp_rack *rack; 11734 11735 rack = (struct tcp_rack *)tp->t_fb_ptr; 11736 ctf_calc_rwin(so, tp); 11737 11738 if ((thflags & TH_RST) || 11739 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11740 return (__ctf_process_rst(m, th, so, tp, 11741 &rack->r_ctl.challenge_ack_ts, 11742 &rack->r_ctl.challenge_ack_cnt)); 11743 /* 11744 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11745 * synchronized state. 11746 */ 11747 if (thflags & TH_SYN) { 11748 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11749 return (ret_val); 11750 } 11751 /* 11752 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11753 * it's less than ts_recent, drop it. 11754 */ 11755 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11756 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11757 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11758 return (ret_val); 11759 } 11760 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11761 &rack->r_ctl.challenge_ack_ts, 11762 &rack->r_ctl.challenge_ack_cnt)) { 11763 return (ret_val); 11764 } 11765 /* 11766 * If new data are received on a connection after the user processes 11767 * are gone, then RST the other end. 11768 */ 11769 if ((tp->t_flags & TF_CLOSED) && tlen && 11770 rack_check_data_after_close(m, tp, &tlen, th, so)) 11771 return (1); 11772 /* 11773 * If last ACK falls within this segment's sequence numbers, record 11774 * its timestamp. NOTE: 1) That the test incorporates suggestions 11775 * from the latest proposal of the tcplw@cray.com list (Braden 11776 * 1993/04/26). 2) That updating only on newer timestamps interferes 11777 * with our earlier PAWS tests, so this check should be solely 11778 * predicated on the sequence space of this segment. 3) That we 11779 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11780 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11781 * SEG.Len, This modified check allows us to overcome RFC1323's 11782 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11783 * p.869. In such cases, we can still calculate the RTT correctly 11784 * when RCV.NXT == Last.ACK.Sent. 11785 */ 11786 if ((to->to_flags & TOF_TS) != 0 && 11787 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11788 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11789 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11790 tp->ts_recent_age = tcp_ts_getticks(); 11791 tp->ts_recent = to->to_tsval; 11792 } 11793 /* 11794 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11795 * is on (half-synchronized state), then queue data for later 11796 * processing; else drop segment and return. 11797 */ 11798 if ((thflags & TH_ACK) == 0) { 11799 if (tp->t_flags & TF_NEEDSYN) { 11800 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11801 tiwin, thflags, nxt_pkt)); 11802 } else if (tp->t_flags & TF_ACKNOW) { 11803 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11804 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11805 return (ret_val); 11806 } else { 11807 ctf_do_drop(m, NULL); 11808 return (0); 11809 } 11810 } 11811 /* 11812 * Ack processing. 11813 */ 11814 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11815 return (ret_val); 11816 } 11817 if (ourfinisacked) { 11818 /* 11819 * If we can't receive any more data, then closing user can 11820 * proceed. Starting the timer is contrary to the 11821 * specification, but if we don't get a FIN we'll hang 11822 * forever. 11823 * 11824 * XXXjl: we should release the tp also, and use a 11825 * compressed state. 11826 */ 11827 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11828 soisdisconnected(so); 11829 tcp_timer_activate(tp, TT_2MSL, 11830 (tcp_fast_finwait2_recycle ? 11831 tcp_finwait2_timeout : 11832 TP_MAXIDLE(tp))); 11833 } 11834 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11835 } 11836 if (sbavail(&so->so_snd)) { 11837 if (ctf_progress_timeout_check(tp, true)) { 11838 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11839 tp, tick, PROGRESS_DROP, __LINE__); 11840 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11841 return (1); 11842 } 11843 } 11844 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11845 tiwin, thflags, nxt_pkt)); 11846 } 11847 11848 /* 11849 * Return value of 1, the TCB is unlocked and most 11850 * likely gone, return value of 0, the TCP is still 11851 * locked. 11852 */ 11853 static int 11854 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 11855 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11856 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11857 { 11858 int32_t ret_val = 0; 11859 int32_t ourfinisacked = 0; 11860 struct tcp_rack *rack; 11861 11862 rack = (struct tcp_rack *)tp->t_fb_ptr; 11863 ctf_calc_rwin(so, tp); 11864 11865 if ((thflags & TH_RST) || 11866 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11867 return (__ctf_process_rst(m, th, so, tp, 11868 &rack->r_ctl.challenge_ack_ts, 11869 &rack->r_ctl.challenge_ack_cnt)); 11870 /* 11871 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11872 * synchronized state. 11873 */ 11874 if (thflags & TH_SYN) { 11875 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11876 return (ret_val); 11877 } 11878 /* 11879 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11880 * it's less than ts_recent, drop it. 11881 */ 11882 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11883 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11884 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11885 return (ret_val); 11886 } 11887 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11888 &rack->r_ctl.challenge_ack_ts, 11889 &rack->r_ctl.challenge_ack_cnt)) { 11890 return (ret_val); 11891 } 11892 /* 11893 * If new data are received on a connection after the user processes 11894 * are gone, then RST the other end. 11895 */ 11896 if ((tp->t_flags & TF_CLOSED) && tlen && 11897 rack_check_data_after_close(m, tp, &tlen, th, so)) 11898 return (1); 11899 /* 11900 * If last ACK falls within this segment's sequence numbers, record 11901 * its timestamp. NOTE: 1) That the test incorporates suggestions 11902 * from the latest proposal of the tcplw@cray.com list (Braden 11903 * 1993/04/26). 2) That updating only on newer timestamps interferes 11904 * with our earlier PAWS tests, so this check should be solely 11905 * predicated on the sequence space of this segment. 3) That we 11906 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11907 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11908 * SEG.Len, This modified check allows us to overcome RFC1323's 11909 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11910 * p.869. In such cases, we can still calculate the RTT correctly 11911 * when RCV.NXT == Last.ACK.Sent. 11912 */ 11913 if ((to->to_flags & TOF_TS) != 0 && 11914 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11915 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11916 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11917 tp->ts_recent_age = tcp_ts_getticks(); 11918 tp->ts_recent = to->to_tsval; 11919 } 11920 /* 11921 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11922 * is on (half-synchronized state), then queue data for later 11923 * processing; else drop segment and return. 11924 */ 11925 if ((thflags & TH_ACK) == 0) { 11926 if (tp->t_flags & TF_NEEDSYN) { 11927 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11928 tiwin, thflags, nxt_pkt)); 11929 } else if (tp->t_flags & TF_ACKNOW) { 11930 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11931 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11932 return (ret_val); 11933 } else { 11934 ctf_do_drop(m, NULL); 11935 return (0); 11936 } 11937 } 11938 /* 11939 * Ack processing. 11940 */ 11941 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11942 return (ret_val); 11943 } 11944 if (ourfinisacked) { 11945 tcp_twstart(tp); 11946 m_freem(m); 11947 return (1); 11948 } 11949 if (sbavail(&so->so_snd)) { 11950 if (ctf_progress_timeout_check(tp, true)) { 11951 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11952 tp, tick, PROGRESS_DROP, __LINE__); 11953 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11954 return (1); 11955 } 11956 } 11957 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11958 tiwin, thflags, nxt_pkt)); 11959 } 11960 11961 /* 11962 * Return value of 1, the TCB is unlocked and most 11963 * likely gone, return value of 0, the TCP is still 11964 * locked. 11965 */ 11966 static int 11967 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11968 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11969 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11970 { 11971 int32_t ret_val = 0; 11972 int32_t ourfinisacked = 0; 11973 struct tcp_rack *rack; 11974 11975 rack = (struct tcp_rack *)tp->t_fb_ptr; 11976 ctf_calc_rwin(so, tp); 11977 11978 if ((thflags & TH_RST) || 11979 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11980 return (__ctf_process_rst(m, th, so, tp, 11981 &rack->r_ctl.challenge_ack_ts, 11982 &rack->r_ctl.challenge_ack_cnt)); 11983 /* 11984 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11985 * synchronized state. 11986 */ 11987 if (thflags & TH_SYN) { 11988 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11989 return (ret_val); 11990 } 11991 /* 11992 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11993 * it's less than ts_recent, drop it. 11994 */ 11995 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11996 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11997 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11998 return (ret_val); 11999 } 12000 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12001 &rack->r_ctl.challenge_ack_ts, 12002 &rack->r_ctl.challenge_ack_cnt)) { 12003 return (ret_val); 12004 } 12005 /* 12006 * If new data are received on a connection after the user processes 12007 * are gone, then RST the other end. 12008 */ 12009 if ((tp->t_flags & TF_CLOSED) && tlen && 12010 rack_check_data_after_close(m, tp, &tlen, th, so)) 12011 return (1); 12012 /* 12013 * If last ACK falls within this segment's sequence numbers, record 12014 * its timestamp. NOTE: 1) That the test incorporates suggestions 12015 * from the latest proposal of the tcplw@cray.com list (Braden 12016 * 1993/04/26). 2) That updating only on newer timestamps interferes 12017 * with our earlier PAWS tests, so this check should be solely 12018 * predicated on the sequence space of this segment. 3) That we 12019 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12020 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12021 * SEG.Len, This modified check allows us to overcome RFC1323's 12022 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12023 * p.869. In such cases, we can still calculate the RTT correctly 12024 * when RCV.NXT == Last.ACK.Sent. 12025 */ 12026 if ((to->to_flags & TOF_TS) != 0 && 12027 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12028 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12029 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12030 tp->ts_recent_age = tcp_ts_getticks(); 12031 tp->ts_recent = to->to_tsval; 12032 } 12033 /* 12034 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12035 * is on (half-synchronized state), then queue data for later 12036 * processing; else drop segment and return. 12037 */ 12038 if ((thflags & TH_ACK) == 0) { 12039 if (tp->t_flags & TF_NEEDSYN) { 12040 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12041 tiwin, thflags, nxt_pkt)); 12042 } else if (tp->t_flags & TF_ACKNOW) { 12043 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12044 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12045 return (ret_val); 12046 } else { 12047 ctf_do_drop(m, NULL); 12048 return (0); 12049 } 12050 } 12051 /* 12052 * case TCPS_LAST_ACK: Ack processing. 12053 */ 12054 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12055 return (ret_val); 12056 } 12057 if (ourfinisacked) { 12058 tp = tcp_close(tp); 12059 ctf_do_drop(m, tp); 12060 return (1); 12061 } 12062 if (sbavail(&so->so_snd)) { 12063 if (ctf_progress_timeout_check(tp, true)) { 12064 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12065 tp, tick, PROGRESS_DROP, __LINE__); 12066 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12067 return (1); 12068 } 12069 } 12070 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12071 tiwin, thflags, nxt_pkt)); 12072 } 12073 12074 /* 12075 * Return value of 1, the TCB is unlocked and most 12076 * likely gone, return value of 0, the TCP is still 12077 * locked. 12078 */ 12079 static int 12080 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12081 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12082 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12083 { 12084 int32_t ret_val = 0; 12085 int32_t ourfinisacked = 0; 12086 struct tcp_rack *rack; 12087 12088 rack = (struct tcp_rack *)tp->t_fb_ptr; 12089 ctf_calc_rwin(so, tp); 12090 12091 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12092 if ((thflags & TH_RST) || 12093 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12094 return (__ctf_process_rst(m, th, so, tp, 12095 &rack->r_ctl.challenge_ack_ts, 12096 &rack->r_ctl.challenge_ack_cnt)); 12097 /* 12098 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12099 * synchronized state. 12100 */ 12101 if (thflags & TH_SYN) { 12102 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12103 return (ret_val); 12104 } 12105 /* 12106 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12107 * it's less than ts_recent, drop it. 12108 */ 12109 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12110 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12111 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12112 return (ret_val); 12113 } 12114 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12115 &rack->r_ctl.challenge_ack_ts, 12116 &rack->r_ctl.challenge_ack_cnt)) { 12117 return (ret_val); 12118 } 12119 /* 12120 * If new data are received on a connection after the user processes 12121 * are gone, then RST the other end. 12122 */ 12123 if ((tp->t_flags & TF_CLOSED) && tlen && 12124 rack_check_data_after_close(m, tp, &tlen, th, so)) 12125 return (1); 12126 /* 12127 * If last ACK falls within this segment's sequence numbers, record 12128 * its timestamp. NOTE: 1) That the test incorporates suggestions 12129 * from the latest proposal of the tcplw@cray.com list (Braden 12130 * 1993/04/26). 2) That updating only on newer timestamps interferes 12131 * with our earlier PAWS tests, so this check should be solely 12132 * predicated on the sequence space of this segment. 3) That we 12133 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12134 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12135 * SEG.Len, This modified check allows us to overcome RFC1323's 12136 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12137 * p.869. In such cases, we can still calculate the RTT correctly 12138 * when RCV.NXT == Last.ACK.Sent. 12139 */ 12140 if ((to->to_flags & TOF_TS) != 0 && 12141 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12142 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12143 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12144 tp->ts_recent_age = tcp_ts_getticks(); 12145 tp->ts_recent = to->to_tsval; 12146 } 12147 /* 12148 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12149 * is on (half-synchronized state), then queue data for later 12150 * processing; else drop segment and return. 12151 */ 12152 if ((thflags & TH_ACK) == 0) { 12153 if (tp->t_flags & TF_NEEDSYN) { 12154 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12155 tiwin, thflags, nxt_pkt)); 12156 } else if (tp->t_flags & TF_ACKNOW) { 12157 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12158 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12159 return (ret_val); 12160 } else { 12161 ctf_do_drop(m, NULL); 12162 return (0); 12163 } 12164 } 12165 /* 12166 * Ack processing. 12167 */ 12168 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12169 return (ret_val); 12170 } 12171 if (sbavail(&so->so_snd)) { 12172 if (ctf_progress_timeout_check(tp, true)) { 12173 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12174 tp, tick, PROGRESS_DROP, __LINE__); 12175 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12176 return (1); 12177 } 12178 } 12179 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12180 tiwin, thflags, nxt_pkt)); 12181 } 12182 12183 static void inline 12184 rack_clear_rate_sample(struct tcp_rack *rack) 12185 { 12186 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12187 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12188 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12189 } 12190 12191 static void 12192 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12193 { 12194 uint64_t bw_est, rate_wanted; 12195 int chged = 0; 12196 uint32_t user_max, orig_min, orig_max; 12197 12198 orig_min = rack->r_ctl.rc_pace_min_segs; 12199 orig_max = rack->r_ctl.rc_pace_max_segs; 12200 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12201 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12202 chged = 1; 12203 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12204 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12205 if (user_max != rack->r_ctl.rc_pace_max_segs) 12206 chged = 1; 12207 } 12208 if (rack->rc_force_max_seg) { 12209 rack->r_ctl.rc_pace_max_segs = user_max; 12210 } else if (rack->use_fixed_rate) { 12211 bw_est = rack_get_bw(rack); 12212 if ((rack->r_ctl.crte == NULL) || 12213 (bw_est != rack->r_ctl.crte->rate)) { 12214 rack->r_ctl.rc_pace_max_segs = user_max; 12215 } else { 12216 /* We are pacing right at the hardware rate */ 12217 uint32_t segsiz; 12218 12219 segsiz = min(ctf_fixed_maxseg(tp), 12220 rack->r_ctl.rc_pace_min_segs); 12221 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12222 tp, bw_est, segsiz, 0, 12223 rack->r_ctl.crte, NULL); 12224 } 12225 } else if (rack->rc_always_pace) { 12226 if (rack->r_ctl.gp_bw || 12227 #ifdef NETFLIX_PEAKRATE 12228 rack->rc_tp->t_maxpeakrate || 12229 #endif 12230 rack->r_ctl.init_rate) { 12231 /* We have a rate of some sort set */ 12232 uint32_t orig; 12233 12234 bw_est = rack_get_bw(rack); 12235 orig = rack->r_ctl.rc_pace_max_segs; 12236 if (fill_override) 12237 rate_wanted = *fill_override; 12238 else 12239 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12240 if (rate_wanted) { 12241 /* We have something */ 12242 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12243 rate_wanted, 12244 ctf_fixed_maxseg(rack->rc_tp)); 12245 } else 12246 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12247 if (orig != rack->r_ctl.rc_pace_max_segs) 12248 chged = 1; 12249 } else if ((rack->r_ctl.gp_bw == 0) && 12250 (rack->r_ctl.rc_pace_max_segs == 0)) { 12251 /* 12252 * If we have nothing limit us to bursting 12253 * out IW sized pieces. 12254 */ 12255 chged = 1; 12256 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12257 } 12258 } 12259 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12260 chged = 1; 12261 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12262 } 12263 if (chged) 12264 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12265 } 12266 12267 12268 static void 12269 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12270 { 12271 #ifdef INET6 12272 struct ip6_hdr *ip6 = NULL; 12273 #endif 12274 #ifdef INET 12275 struct ip *ip = NULL; 12276 #endif 12277 struct udphdr *udp = NULL; 12278 12279 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12280 #ifdef INET6 12281 if (rack->r_is_v6) { 12282 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12283 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12284 if (tp->t_port) { 12285 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12286 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12287 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12288 udp->uh_dport = tp->t_port; 12289 rack->r_ctl.fsb.udp = udp; 12290 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12291 } else 12292 { 12293 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12294 rack->r_ctl.fsb.udp = NULL; 12295 } 12296 tcpip_fillheaders(rack->rc_inp, 12297 tp->t_port, 12298 ip6, rack->r_ctl.fsb.th); 12299 } else 12300 #endif /* INET6 */ 12301 #ifdef INET 12302 { 12303 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12304 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12305 if (tp->t_port) { 12306 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12307 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12308 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12309 udp->uh_dport = tp->t_port; 12310 rack->r_ctl.fsb.udp = udp; 12311 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12312 } else 12313 { 12314 rack->r_ctl.fsb.udp = NULL; 12315 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12316 } 12317 tcpip_fillheaders(rack->rc_inp, 12318 tp->t_port, 12319 ip, rack->r_ctl.fsb.th); 12320 } 12321 #endif 12322 rack->r_fsb_inited = 1; 12323 } 12324 12325 static int 12326 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12327 { 12328 /* 12329 * Allocate the larger of spaces V6 if available else just 12330 * V4 and include udphdr (overbook) 12331 */ 12332 #ifdef INET6 12333 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12334 #else 12335 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12336 #endif 12337 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12338 M_TCPFSB, M_NOWAIT|M_ZERO); 12339 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12340 return (ENOMEM); 12341 } 12342 rack->r_fsb_inited = 0; 12343 return (0); 12344 } 12345 12346 static int 12347 rack_init(struct tcpcb *tp) 12348 { 12349 struct inpcb *inp = tptoinpcb(tp); 12350 struct tcp_rack *rack = NULL; 12351 #ifdef INVARIANTS 12352 struct rack_sendmap *insret; 12353 #endif 12354 uint32_t iwin, snt, us_cts; 12355 int err; 12356 12357 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12358 if (tp->t_fb_ptr == NULL) { 12359 /* 12360 * We need to allocate memory but cant. The INP and INP_INFO 12361 * locks and they are recursive (happens during setup. So a 12362 * scheme to drop the locks fails :( 12363 * 12364 */ 12365 return (ENOMEM); 12366 } 12367 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12368 12369 rack = (struct tcp_rack *)tp->t_fb_ptr; 12370 RB_INIT(&rack->r_ctl.rc_mtree); 12371 TAILQ_INIT(&rack->r_ctl.rc_free); 12372 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12373 rack->rc_tp = tp; 12374 rack->rc_inp = inp; 12375 /* Set the flag */ 12376 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 12377 /* Probably not needed but lets be sure */ 12378 rack_clear_rate_sample(rack); 12379 /* 12380 * Save off the default values, socket options will poke 12381 * at these if pacing is not on or we have not yet 12382 * reached where pacing is on (gp_ready/fixed enabled). 12383 * When they get set into the CC module (when gp_ready 12384 * is enabled or we enable fixed) then we will set these 12385 * values into the CC and place in here the old values 12386 * so we have a restoral. Then we will set the flag 12387 * rc_pacing_cc_set. That way whenever we turn off pacing 12388 * or switch off this stack, we will know to go restore 12389 * the saved values. 12390 */ 12391 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12392 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12393 /* We want abe like behavior as well */ 12394 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12395 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12396 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12397 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12398 rack->r_ctl.roundends = tp->snd_max; 12399 if (use_rack_rr) 12400 rack->use_rack_rr = 1; 12401 if (V_tcp_delack_enabled) 12402 tp->t_delayed_ack = 1; 12403 else 12404 tp->t_delayed_ack = 0; 12405 #ifdef TCP_ACCOUNTING 12406 if (rack_tcp_accounting) { 12407 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12408 } 12409 #endif 12410 if (rack_enable_shared_cwnd) 12411 rack->rack_enable_scwnd = 1; 12412 rack->rc_user_set_max_segs = rack_hptsi_segments; 12413 rack->rc_force_max_seg = 0; 12414 if (rack_use_imac_dack) 12415 rack->rc_dack_mode = 1; 12416 TAILQ_INIT(&rack->r_ctl.opt_list); 12417 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12418 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12419 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12420 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12421 rack->r_ctl.rc_highest_us_rtt = 0; 12422 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12423 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12424 if (rack_use_cmp_acks) 12425 rack->r_use_cmp_ack = 1; 12426 if (rack_disable_prr) 12427 rack->rack_no_prr = 1; 12428 if (rack_gp_no_rec_chg) 12429 rack->rc_gp_no_rec_chg = 1; 12430 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12431 rack->rc_always_pace = 1; 12432 if (rack->use_fixed_rate || rack->gp_ready) 12433 rack_set_cc_pacing(rack); 12434 } else 12435 rack->rc_always_pace = 0; 12436 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12437 rack->r_mbuf_queue = 1; 12438 else 12439 rack->r_mbuf_queue = 0; 12440 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12441 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12442 else 12443 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12444 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12445 if (rack_limits_scwnd) 12446 rack->r_limit_scw = 1; 12447 else 12448 rack->r_limit_scw = 0; 12449 rack->rc_labc = V_tcp_abc_l_var; 12450 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12451 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12452 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12453 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12454 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12455 rack->r_ctl.rc_min_to = rack_min_to; 12456 microuptime(&rack->r_ctl.act_rcv_time); 12457 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12458 rack->rc_init_win = rack_default_init_window; 12459 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12460 if (rack_hw_up_only) 12461 rack->r_up_only = 1; 12462 if (rack_do_dyn_mul) { 12463 /* When dynamic adjustment is on CA needs to start at 100% */ 12464 rack->rc_gp_dyn_mul = 1; 12465 if (rack_do_dyn_mul >= 100) 12466 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12467 } else 12468 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12469 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12470 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12471 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12472 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12473 rack_probertt_filter_life); 12474 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12475 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12476 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12477 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12478 rack->r_ctl.rc_time_probertt_starts = 0; 12479 if (rack_dsack_std_based & 0x1) { 12480 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12481 rack->rc_rack_tmr_std_based = 1; 12482 } 12483 if (rack_dsack_std_based & 0x2) { 12484 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12485 rack->rc_rack_use_dsack = 1; 12486 } 12487 /* We require at least one measurement, even if the sysctl is 0 */ 12488 if (rack_req_measurements) 12489 rack->r_ctl.req_measurements = rack_req_measurements; 12490 else 12491 rack->r_ctl.req_measurements = 1; 12492 if (rack_enable_hw_pacing) 12493 rack->rack_hdw_pace_ena = 1; 12494 if (rack_hw_rate_caps) 12495 rack->r_rack_hw_rate_caps = 1; 12496 /* Do we force on detection? */ 12497 #ifdef NETFLIX_EXP_DETECTION 12498 if (tcp_force_detection) 12499 rack->do_detection = 1; 12500 else 12501 #endif 12502 rack->do_detection = 0; 12503 if (rack_non_rxt_use_cr) 12504 rack->rack_rec_nonrxt_use_cr = 1; 12505 err = rack_init_fsb(tp, rack); 12506 if (err) { 12507 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12508 tp->t_fb_ptr = NULL; 12509 return (err); 12510 } 12511 if (tp->snd_una != tp->snd_max) { 12512 /* Create a send map for the current outstanding data */ 12513 struct rack_sendmap *rsm; 12514 12515 rsm = rack_alloc(rack); 12516 if (rsm == NULL) { 12517 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12518 tp->t_fb_ptr = NULL; 12519 return (ENOMEM); 12520 } 12521 rsm->r_no_rtt_allowed = 1; 12522 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12523 rsm->r_rtr_cnt = 1; 12524 rsm->r_rtr_bytes = 0; 12525 if (tp->t_flags & TF_SENTFIN) 12526 rsm->r_flags |= RACK_HAS_FIN; 12527 if ((tp->snd_una == tp->iss) && 12528 !TCPS_HAVEESTABLISHED(tp->t_state)) 12529 rsm->r_flags |= RACK_HAS_SYN; 12530 rsm->r_start = tp->snd_una; 12531 rsm->r_end = tp->snd_max; 12532 rsm->r_dupack = 0; 12533 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12534 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12535 if (rsm->m) 12536 rsm->orig_m_len = rsm->m->m_len; 12537 else 12538 rsm->orig_m_len = 0; 12539 } else { 12540 /* 12541 * This can happen if we have a stand-alone FIN or 12542 * SYN. 12543 */ 12544 rsm->m = NULL; 12545 rsm->orig_m_len = 0; 12546 rsm->soff = 0; 12547 } 12548 #ifndef INVARIANTS 12549 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12550 #else 12551 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12552 if (insret != NULL) { 12553 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12554 insret, rack, rsm); 12555 } 12556 #endif 12557 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12558 rsm->r_in_tmap = 1; 12559 } 12560 /* 12561 * Timers in Rack are kept in microseconds so lets 12562 * convert any initial incoming variables 12563 * from ticks into usecs. Note that we 12564 * also change the values of t_srtt and t_rttvar, if 12565 * they are non-zero. They are kept with a 5 12566 * bit decimal so we have to carefully convert 12567 * these to get the full precision. 12568 */ 12569 rack_convert_rtts(tp); 12570 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12571 if (rack_do_hystart) { 12572 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 12573 if (rack_do_hystart > 1) 12574 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 12575 if (rack_do_hystart > 2) 12576 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 12577 } 12578 if (rack_def_profile) 12579 rack_set_profile(rack, rack_def_profile); 12580 /* Cancel the GP measurement in progress */ 12581 tp->t_flags &= ~TF_GPUTINPROG; 12582 if (SEQ_GT(tp->snd_max, tp->iss)) 12583 snt = tp->snd_max - tp->iss; 12584 else 12585 snt = 0; 12586 iwin = rc_init_window(rack); 12587 if (snt < iwin) { 12588 /* We are not past the initial window 12589 * so we need to make sure cwnd is 12590 * correct. 12591 */ 12592 if (tp->snd_cwnd < iwin) 12593 tp->snd_cwnd = iwin; 12594 /* 12595 * If we are within the initial window 12596 * we want ssthresh to be unlimited. Setting 12597 * it to the rwnd (which the default stack does 12598 * and older racks) is not really a good idea 12599 * since we want to be in SS and grow both the 12600 * cwnd and the rwnd (via dynamic rwnd growth). If 12601 * we set it to the rwnd then as the peer grows its 12602 * rwnd we will be stuck in CA and never hit SS. 12603 * 12604 * Its far better to raise it up high (this takes the 12605 * risk that there as been a loss already, probably 12606 * we should have an indicator in all stacks of loss 12607 * but we don't), but considering the normal use this 12608 * is a risk worth taking. The consequences of not 12609 * hitting SS are far worse than going one more time 12610 * into it early on (before we have sent even a IW). 12611 * It is highly unlikely that we will have had a loss 12612 * before getting the IW out. 12613 */ 12614 tp->snd_ssthresh = 0xffffffff; 12615 } 12616 rack_stop_all_timers(tp); 12617 /* Lets setup the fsb block */ 12618 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12619 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12620 __LINE__, RACK_RTTS_INIT); 12621 return (0); 12622 } 12623 12624 static int 12625 rack_handoff_ok(struct tcpcb *tp) 12626 { 12627 if ((tp->t_state == TCPS_CLOSED) || 12628 (tp->t_state == TCPS_LISTEN)) { 12629 /* Sure no problem though it may not stick */ 12630 return (0); 12631 } 12632 if ((tp->t_state == TCPS_SYN_SENT) || 12633 (tp->t_state == TCPS_SYN_RECEIVED)) { 12634 /* 12635 * We really don't know if you support sack, 12636 * you have to get to ESTAB or beyond to tell. 12637 */ 12638 return (EAGAIN); 12639 } 12640 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12641 /* 12642 * Rack will only send a FIN after all data is acknowledged. 12643 * So in this case we have more data outstanding. We can't 12644 * switch stacks until either all data and only the FIN 12645 * is left (in which case rack_init() now knows how 12646 * to deal with that) <or> all is acknowledged and we 12647 * are only left with incoming data, though why you 12648 * would want to switch to rack after all data is acknowledged 12649 * I have no idea (rrs)! 12650 */ 12651 return (EAGAIN); 12652 } 12653 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12654 return (0); 12655 } 12656 /* 12657 * If we reach here we don't do SACK on this connection so we can 12658 * never do rack. 12659 */ 12660 return (EINVAL); 12661 } 12662 12663 12664 static void 12665 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12666 { 12667 struct inpcb *inp = tptoinpcb(tp); 12668 12669 if (tp->t_fb_ptr) { 12670 struct tcp_rack *rack; 12671 struct rack_sendmap *rsm, *nrsm; 12672 #ifdef INVARIANTS 12673 struct rack_sendmap *rm; 12674 #endif 12675 12676 rack = (struct tcp_rack *)tp->t_fb_ptr; 12677 if (tp->t_in_pkt) { 12678 /* 12679 * It is unsafe to process the packets since a 12680 * reset may be lurking in them (its rare but it 12681 * can occur). If we were to find a RST, then we 12682 * would end up dropping the connection and the 12683 * INP lock, so when we return the caller (tcp_usrreq) 12684 * will blow up when it trys to unlock the inp. 12685 */ 12686 struct mbuf *save, *m; 12687 12688 m = tp->t_in_pkt; 12689 tp->t_in_pkt = NULL; 12690 tp->t_tail_pkt = NULL; 12691 while (m) { 12692 save = m->m_nextpkt; 12693 m->m_nextpkt = NULL; 12694 m_freem(m); 12695 m = save; 12696 } 12697 } 12698 tp->t_flags &= ~TF_FORCEDATA; 12699 #ifdef NETFLIX_SHARED_CWND 12700 if (rack->r_ctl.rc_scw) { 12701 uint32_t limit; 12702 12703 if (rack->r_limit_scw) 12704 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12705 else 12706 limit = 0; 12707 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12708 rack->r_ctl.rc_scw_index, 12709 limit); 12710 rack->r_ctl.rc_scw = NULL; 12711 } 12712 #endif 12713 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12714 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12715 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12716 rack->r_ctl.fsb.th = NULL; 12717 } 12718 /* Convert back to ticks, with */ 12719 if (tp->t_srtt > 1) { 12720 uint32_t val, frac; 12721 12722 val = USEC_2_TICKS(tp->t_srtt); 12723 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12724 tp->t_srtt = val << TCP_RTT_SHIFT; 12725 /* 12726 * frac is the fractional part here is left 12727 * over from converting to hz and shifting. 12728 * We need to convert this to the 5 bit 12729 * remainder. 12730 */ 12731 if (frac) { 12732 if (hz == 1000) { 12733 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12734 } else { 12735 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12736 } 12737 tp->t_srtt += frac; 12738 } 12739 } 12740 if (tp->t_rttvar) { 12741 uint32_t val, frac; 12742 12743 val = USEC_2_TICKS(tp->t_rttvar); 12744 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12745 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12746 /* 12747 * frac is the fractional part here is left 12748 * over from converting to hz and shifting. 12749 * We need to convert this to the 5 bit 12750 * remainder. 12751 */ 12752 if (frac) { 12753 if (hz == 1000) { 12754 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12755 } else { 12756 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12757 } 12758 tp->t_rttvar += frac; 12759 } 12760 } 12761 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12762 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12763 if (rack->rc_always_pace) { 12764 tcp_decrement_paced_conn(); 12765 rack_undo_cc_pacing(rack); 12766 rack->rc_always_pace = 0; 12767 } 12768 /* Clean up any options if they were not applied */ 12769 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12770 struct deferred_opt_list *dol; 12771 12772 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12773 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12774 free(dol, M_TCPDO); 12775 } 12776 /* rack does not use force data but other stacks may clear it */ 12777 if (rack->r_ctl.crte != NULL) { 12778 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12779 rack->rack_hdrw_pacing = 0; 12780 rack->r_ctl.crte = NULL; 12781 } 12782 #ifdef TCP_BLACKBOX 12783 tcp_log_flowend(tp); 12784 #endif 12785 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12786 #ifndef INVARIANTS 12787 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12788 #else 12789 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12790 if (rm != rsm) { 12791 panic("At fini, rack:%p rsm:%p rm:%p", 12792 rack, rsm, rm); 12793 } 12794 #endif 12795 uma_zfree(rack_zone, rsm); 12796 } 12797 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12798 while (rsm) { 12799 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12800 uma_zfree(rack_zone, rsm); 12801 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12802 } 12803 rack->rc_free_cnt = 0; 12804 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12805 tp->t_fb_ptr = NULL; 12806 } 12807 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12808 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12809 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12810 inp->inp_flags2 &= ~INP_MBUF_ACKCMP; 12811 /* Cancel the GP measurement in progress */ 12812 tp->t_flags &= ~TF_GPUTINPROG; 12813 inp->inp_flags2 &= ~INP_MBUF_L_ACKS; 12814 /* Make sure snd_nxt is correctly set */ 12815 tp->snd_nxt = tp->snd_max; 12816 } 12817 12818 static void 12819 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12820 { 12821 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12822 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 12823 } 12824 switch (tp->t_state) { 12825 case TCPS_SYN_SENT: 12826 rack->r_state = TCPS_SYN_SENT; 12827 rack->r_substate = rack_do_syn_sent; 12828 break; 12829 case TCPS_SYN_RECEIVED: 12830 rack->r_state = TCPS_SYN_RECEIVED; 12831 rack->r_substate = rack_do_syn_recv; 12832 break; 12833 case TCPS_ESTABLISHED: 12834 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12835 rack->r_state = TCPS_ESTABLISHED; 12836 rack->r_substate = rack_do_established; 12837 break; 12838 case TCPS_CLOSE_WAIT: 12839 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12840 rack->r_state = TCPS_CLOSE_WAIT; 12841 rack->r_substate = rack_do_close_wait; 12842 break; 12843 case TCPS_FIN_WAIT_1: 12844 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12845 rack->r_state = TCPS_FIN_WAIT_1; 12846 rack->r_substate = rack_do_fin_wait_1; 12847 break; 12848 case TCPS_CLOSING: 12849 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12850 rack->r_state = TCPS_CLOSING; 12851 rack->r_substate = rack_do_closing; 12852 break; 12853 case TCPS_LAST_ACK: 12854 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12855 rack->r_state = TCPS_LAST_ACK; 12856 rack->r_substate = rack_do_lastack; 12857 break; 12858 case TCPS_FIN_WAIT_2: 12859 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12860 rack->r_state = TCPS_FIN_WAIT_2; 12861 rack->r_substate = rack_do_fin_wait_2; 12862 break; 12863 case TCPS_LISTEN: 12864 case TCPS_CLOSED: 12865 case TCPS_TIME_WAIT: 12866 default: 12867 break; 12868 }; 12869 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 12870 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 12871 12872 } 12873 12874 static void 12875 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 12876 { 12877 /* 12878 * We received an ack, and then did not 12879 * call send or were bounced out due to the 12880 * hpts was running. Now a timer is up as well, is 12881 * it the right timer? 12882 */ 12883 struct rack_sendmap *rsm; 12884 int tmr_up; 12885 12886 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 12887 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 12888 return; 12889 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12890 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 12891 (tmr_up == PACE_TMR_RXT)) { 12892 /* Should be an RXT */ 12893 return; 12894 } 12895 if (rsm == NULL) { 12896 /* Nothing outstanding? */ 12897 if (tp->t_flags & TF_DELACK) { 12898 if (tmr_up == PACE_TMR_DELACK) 12899 /* We are supposed to have delayed ack up and we do */ 12900 return; 12901 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 12902 /* 12903 * if we hit enobufs then we would expect the possibility 12904 * of nothing outstanding and the RXT up (and the hptsi timer). 12905 */ 12906 return; 12907 } else if (((V_tcp_always_keepalive || 12908 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 12909 (tp->t_state <= TCPS_CLOSING)) && 12910 (tmr_up == PACE_TMR_KEEP) && 12911 (tp->snd_max == tp->snd_una)) { 12912 /* We should have keep alive up and we do */ 12913 return; 12914 } 12915 } 12916 if (SEQ_GT(tp->snd_max, tp->snd_una) && 12917 ((tmr_up == PACE_TMR_TLP) || 12918 (tmr_up == PACE_TMR_RACK) || 12919 (tmr_up == PACE_TMR_RXT))) { 12920 /* 12921 * Either a Rack, TLP or RXT is fine if we 12922 * have outstanding data. 12923 */ 12924 return; 12925 } else if (tmr_up == PACE_TMR_DELACK) { 12926 /* 12927 * If the delayed ack was going to go off 12928 * before the rtx/tlp/rack timer were going to 12929 * expire, then that would be the timer in control. 12930 * Note we don't check the time here trusting the 12931 * code is correct. 12932 */ 12933 return; 12934 } 12935 /* 12936 * Ok the timer originally started is not what we want now. 12937 * We will force the hpts to be stopped if any, and restart 12938 * with the slot set to what was in the saved slot. 12939 */ 12940 if (tcp_in_hpts(rack->rc_inp)) { 12941 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 12942 uint32_t us_cts; 12943 12944 us_cts = tcp_get_usecs(NULL); 12945 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 12946 rack->r_early = 1; 12947 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 12948 } 12949 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 12950 } 12951 tcp_hpts_remove(rack->rc_inp); 12952 } 12953 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12954 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12955 } 12956 12957 12958 static void 12959 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 12960 { 12961 if ((SEQ_LT(tp->snd_wl1, seq) || 12962 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 12963 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 12964 /* keep track of pure window updates */ 12965 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 12966 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12967 tp->snd_wnd = tiwin; 12968 rack_validate_fo_sendwin_up(tp, rack); 12969 tp->snd_wl1 = seq; 12970 tp->snd_wl2 = ack; 12971 if (tp->snd_wnd > tp->max_sndwnd) 12972 tp->max_sndwnd = tp->snd_wnd; 12973 rack->r_wanted_output = 1; 12974 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 12975 tp->snd_wnd = tiwin; 12976 rack_validate_fo_sendwin_up(tp, rack); 12977 tp->snd_wl1 = seq; 12978 tp->snd_wl2 = ack; 12979 } else { 12980 /* Not a valid win update */ 12981 return; 12982 } 12983 /* Do we exit persists? */ 12984 if ((rack->rc_in_persist != 0) && 12985 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12986 rack->r_ctl.rc_pace_min_segs))) { 12987 rack_exit_persist(tp, rack, cts); 12988 } 12989 /* Do we enter persists? */ 12990 if ((rack->rc_in_persist == 0) && 12991 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12992 TCPS_HAVEESTABLISHED(tp->t_state) && 12993 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 12994 sbavail(&tptosocket(tp)->so_snd) && 12995 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 12996 /* 12997 * Here the rwnd is less than 12998 * the pacing size, we are established, 12999 * nothing is outstanding, and there is 13000 * data to send. Enter persists. 13001 */ 13002 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13003 } 13004 } 13005 13006 static void 13007 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13008 { 13009 13010 if (tcp_bblogging_on(rack->rc_tp)) { 13011 struct inpcb *inp = tptoinpcb(tp); 13012 union tcp_log_stackspecific log; 13013 struct timeval ltv; 13014 char tcp_hdr_buf[60]; 13015 struct tcphdr *th; 13016 struct timespec ts; 13017 uint32_t orig_snd_una; 13018 uint8_t xx = 0; 13019 13020 #ifdef NETFLIX_HTTP_LOGGING 13021 struct http_sendfile_track *http_req; 13022 13023 if (SEQ_GT(ae->ack, tp->snd_una)) { 13024 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13025 } else { 13026 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13027 } 13028 #endif 13029 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13030 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13031 if (rack->rack_no_prr == 0) 13032 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13033 else 13034 log.u_bbr.flex1 = 0; 13035 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13036 log.u_bbr.use_lt_bw <<= 1; 13037 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13038 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13039 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13040 log.u_bbr.pkts_out = tp->t_maxseg; 13041 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13042 log.u_bbr.flex7 = 1; 13043 log.u_bbr.lost = ae->flags; 13044 log.u_bbr.cwnd_gain = ackval; 13045 log.u_bbr.pacing_gain = 0x2; 13046 if (ae->flags & TSTMP_HDWR) { 13047 /* Record the hardware timestamp if present */ 13048 log.u_bbr.flex3 = M_TSTMP; 13049 ts.tv_sec = ae->timestamp / 1000000000; 13050 ts.tv_nsec = ae->timestamp % 1000000000; 13051 ltv.tv_sec = ts.tv_sec; 13052 ltv.tv_usec = ts.tv_nsec / 1000; 13053 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13054 } else if (ae->flags & TSTMP_LRO) { 13055 /* Record the LRO the arrival timestamp */ 13056 log.u_bbr.flex3 = M_TSTMP_LRO; 13057 ts.tv_sec = ae->timestamp / 1000000000; 13058 ts.tv_nsec = ae->timestamp % 1000000000; 13059 ltv.tv_sec = ts.tv_sec; 13060 ltv.tv_usec = ts.tv_nsec / 1000; 13061 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13062 } 13063 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13064 /* Log the rcv time */ 13065 log.u_bbr.delRate = ae->timestamp; 13066 #ifdef NETFLIX_HTTP_LOGGING 13067 log.u_bbr.applimited = tp->t_http_closed; 13068 log.u_bbr.applimited <<= 8; 13069 log.u_bbr.applimited |= tp->t_http_open; 13070 log.u_bbr.applimited <<= 8; 13071 log.u_bbr.applimited |= tp->t_http_req; 13072 if (http_req) { 13073 /* Copy out any client req info */ 13074 /* seconds */ 13075 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13076 /* useconds */ 13077 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13078 log.u_bbr.rttProp = http_req->timestamp; 13079 log.u_bbr.cur_del_rate = http_req->start; 13080 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13081 log.u_bbr.flex8 |= 1; 13082 } else { 13083 log.u_bbr.flex8 |= 2; 13084 log.u_bbr.bw_inuse = http_req->end; 13085 } 13086 log.u_bbr.flex6 = http_req->start_seq; 13087 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13088 log.u_bbr.flex8 |= 4; 13089 log.u_bbr.epoch = http_req->end_seq; 13090 } 13091 } 13092 #endif 13093 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13094 th = (struct tcphdr *)tcp_hdr_buf; 13095 th->th_seq = ae->seq; 13096 th->th_ack = ae->ack; 13097 th->th_win = ae->win; 13098 /* Now fill in the ports */ 13099 th->th_sport = inp->inp_fport; 13100 th->th_dport = inp->inp_lport; 13101 tcp_set_flags(th, ae->flags); 13102 /* Now do we have a timestamp option? */ 13103 if (ae->flags & HAS_TSTMP) { 13104 u_char *cp; 13105 uint32_t val; 13106 13107 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13108 cp = (u_char *)(th + 1); 13109 *cp = TCPOPT_NOP; 13110 cp++; 13111 *cp = TCPOPT_NOP; 13112 cp++; 13113 *cp = TCPOPT_TIMESTAMP; 13114 cp++; 13115 *cp = TCPOLEN_TIMESTAMP; 13116 cp++; 13117 val = htonl(ae->ts_value); 13118 bcopy((char *)&val, 13119 (char *)cp, sizeof(uint32_t)); 13120 val = htonl(ae->ts_echo); 13121 bcopy((char *)&val, 13122 (char *)(cp + 4), sizeof(uint32_t)); 13123 } else 13124 th->th_off = (sizeof(struct tcphdr) >> 2); 13125 13126 /* 13127 * For sane logging we need to play a little trick. 13128 * If the ack were fully processed we would have moved 13129 * snd_una to high_seq, but since compressed acks are 13130 * processed in two phases, at this point (logging) snd_una 13131 * won't be advanced. So we would see multiple acks showing 13132 * the advancement. We can prevent that by "pretending" that 13133 * snd_una was advanced and then un-advancing it so that the 13134 * logging code has the right value for tlb_snd_una. 13135 */ 13136 if (tp->snd_una != high_seq) { 13137 orig_snd_una = tp->snd_una; 13138 tp->snd_una = high_seq; 13139 xx = 1; 13140 } else 13141 xx = 0; 13142 TCP_LOG_EVENTP(tp, th, 13143 &tptosocket(tp)->so_rcv, 13144 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 13145 0, &log, true, <v); 13146 if (xx) { 13147 tp->snd_una = orig_snd_una; 13148 } 13149 } 13150 13151 } 13152 13153 static void 13154 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13155 { 13156 uint32_t us_rtt; 13157 /* 13158 * A persist or keep-alive was forced out, update our 13159 * min rtt time. Note now worry about lost responses. 13160 * When a subsequent keep-alive or persist times out 13161 * and forced_ack is still on, then the last probe 13162 * was not responded to. In such cases we have a 13163 * sysctl that controls the behavior. Either we apply 13164 * the rtt but with reduced confidence (0). Or we just 13165 * plain don't apply the rtt estimate. Having data flow 13166 * will clear the probe_not_answered flag i.e. cum-ack 13167 * move forward <or> exiting and reentering persists. 13168 */ 13169 13170 rack->forced_ack = 0; 13171 rack->rc_tp->t_rxtshift = 0; 13172 if ((rack->rc_in_persist && 13173 (tiwin == rack->rc_tp->snd_wnd)) || 13174 (rack->rc_in_persist == 0)) { 13175 /* 13176 * In persists only apply the RTT update if this is 13177 * a response to our window probe. And that 13178 * means the rwnd sent must match the current 13179 * snd_wnd. If it does not, then we got a 13180 * window update ack instead. For keepalive 13181 * we allow the answer no matter what the window. 13182 * 13183 * Note that if the probe_not_answered is set then 13184 * the forced_ack_ts is the oldest one i.e. the first 13185 * probe sent that might have been lost. This assures 13186 * us that if we do calculate an RTT it is longer not 13187 * some short thing. 13188 */ 13189 if (rack->rc_in_persist) 13190 counter_u64_add(rack_persists_acks, 1); 13191 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13192 if (us_rtt == 0) 13193 us_rtt = 1; 13194 if (rack->probe_not_answered == 0) { 13195 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13196 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13197 } else { 13198 /* We have a retransmitted probe here too */ 13199 if (rack_apply_rtt_with_reduced_conf) { 13200 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13201 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13202 } 13203 } 13204 } 13205 } 13206 13207 static int 13208 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13209 { 13210 /* 13211 * Handle a "special" compressed ack mbuf. Each incoming 13212 * ack has only four possible dispositions: 13213 * 13214 * A) It moves the cum-ack forward 13215 * B) It is behind the cum-ack. 13216 * C) It is a window-update ack. 13217 * D) It is a dup-ack. 13218 * 13219 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13220 * in the incoming mbuf. We also need to still pay attention 13221 * to nxt_pkt since there may be another packet after this 13222 * one. 13223 */ 13224 #ifdef TCP_ACCOUNTING 13225 uint64_t ts_val; 13226 uint64_t rdstc; 13227 #endif 13228 int segsiz; 13229 struct timespec ts; 13230 struct tcp_rack *rack; 13231 struct tcp_ackent *ae; 13232 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13233 int cnt, i, did_out, ourfinisacked = 0; 13234 struct tcpopt to_holder, *to = NULL; 13235 #ifdef TCP_ACCOUNTING 13236 int win_up_req = 0; 13237 #endif 13238 int nsegs = 0; 13239 int under_pacing = 1; 13240 int recovery = 0; 13241 #ifdef TCP_ACCOUNTING 13242 sched_pin(); 13243 #endif 13244 rack = (struct tcp_rack *)tp->t_fb_ptr; 13245 if (rack->gp_ready && 13246 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13247 under_pacing = 0; 13248 else 13249 under_pacing = 1; 13250 13251 if (rack->r_state != tp->t_state) 13252 rack_set_state(tp, rack); 13253 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13254 (tp->t_flags & TF_GPUTINPROG)) { 13255 /* 13256 * We have a goodput in progress 13257 * and we have entered a late state. 13258 * Do we have enough data in the sb 13259 * to handle the GPUT request? 13260 */ 13261 uint32_t bytes; 13262 13263 bytes = tp->gput_ack - tp->gput_seq; 13264 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13265 bytes += tp->gput_seq - tp->snd_una; 13266 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 13267 /* 13268 * There are not enough bytes in the socket 13269 * buffer that have been sent to cover this 13270 * measurement. Cancel it. 13271 */ 13272 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13273 rack->r_ctl.rc_gp_srtt /*flex1*/, 13274 tp->gput_seq, 13275 0, 0, 18, __LINE__, NULL, 0); 13276 tp->t_flags &= ~TF_GPUTINPROG; 13277 } 13278 } 13279 to = &to_holder; 13280 to->to_flags = 0; 13281 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13282 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13283 cnt = m->m_len / sizeof(struct tcp_ackent); 13284 counter_u64_add(rack_multi_single_eq, cnt); 13285 high_seq = tp->snd_una; 13286 the_win = tp->snd_wnd; 13287 win_seq = tp->snd_wl1; 13288 win_upd_ack = tp->snd_wl2; 13289 cts = tcp_tv_to_usectick(tv); 13290 ms_cts = tcp_tv_to_mssectick(tv); 13291 rack->r_ctl.rc_rcvtime = cts; 13292 segsiz = ctf_fixed_maxseg(tp); 13293 if ((rack->rc_gp_dyn_mul) && 13294 (rack->use_fixed_rate == 0) && 13295 (rack->rc_always_pace)) { 13296 /* Check in on probertt */ 13297 rack_check_probe_rtt(rack, cts); 13298 } 13299 for (i = 0; i < cnt; i++) { 13300 #ifdef TCP_ACCOUNTING 13301 ts_val = get_cyclecount(); 13302 #endif 13303 rack_clear_rate_sample(rack); 13304 ae = ((mtod(m, struct tcp_ackent *)) + i); 13305 /* Setup the window */ 13306 tiwin = ae->win << tp->snd_scale; 13307 if (tiwin > rack->r_ctl.rc_high_rwnd) 13308 rack->r_ctl.rc_high_rwnd = tiwin; 13309 /* figure out the type of ack */ 13310 if (SEQ_LT(ae->ack, high_seq)) { 13311 /* Case B*/ 13312 ae->ack_val_set = ACK_BEHIND; 13313 } else if (SEQ_GT(ae->ack, high_seq)) { 13314 /* Case A */ 13315 ae->ack_val_set = ACK_CUMACK; 13316 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13317 /* Case D */ 13318 ae->ack_val_set = ACK_DUPACK; 13319 } else { 13320 /* Case C */ 13321 ae->ack_val_set = ACK_RWND; 13322 } 13323 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13324 /* Validate timestamp */ 13325 if (ae->flags & HAS_TSTMP) { 13326 /* Setup for a timestamp */ 13327 to->to_flags = TOF_TS; 13328 ae->ts_echo -= tp->ts_offset; 13329 to->to_tsecr = ae->ts_echo; 13330 to->to_tsval = ae->ts_value; 13331 /* 13332 * If echoed timestamp is later than the current time, fall back to 13333 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13334 * were used when this connection was established. 13335 */ 13336 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13337 to->to_tsecr = 0; 13338 if (tp->ts_recent && 13339 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13340 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13341 #ifdef TCP_ACCOUNTING 13342 rdstc = get_cyclecount(); 13343 if (rdstc > ts_val) { 13344 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13345 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13346 } 13347 } 13348 #endif 13349 continue; 13350 } 13351 } 13352 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13353 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13354 tp->ts_recent_age = tcp_ts_getticks(); 13355 tp->ts_recent = ae->ts_value; 13356 } 13357 } else { 13358 /* Setup for a no options */ 13359 to->to_flags = 0; 13360 } 13361 /* Update the rcv time and perform idle reduction possibly */ 13362 if (tp->t_idle_reduce && 13363 (tp->snd_max == tp->snd_una) && 13364 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13365 counter_u64_add(rack_input_idle_reduces, 1); 13366 rack_cc_after_idle(rack, tp); 13367 } 13368 tp->t_rcvtime = ticks; 13369 /* Now what about ECN of a chain of pure ACKs? */ 13370 if (tcp_ecn_input_segment(tp, ae->flags, 0, 13371 tcp_packets_this_ack(tp, ae->ack), 13372 ae->codepoint)) 13373 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 13374 #ifdef TCP_ACCOUNTING 13375 /* Count for the specific type of ack in */ 13376 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13377 tp->tcp_cnt_counters[ae->ack_val_set]++; 13378 } 13379 #endif 13380 /* 13381 * Note how we could move up these in the determination 13382 * above, but we don't so that way the timestamp checks (and ECN) 13383 * is done first before we do any processing on the ACK. 13384 * The non-compressed path through the code has this 13385 * weakness (noted by @jtl) that it actually does some 13386 * processing before verifying the timestamp information. 13387 * We don't take that path here which is why we set 13388 * the ack_val_set first, do the timestamp and ecn 13389 * processing, and then look at what we have setup. 13390 */ 13391 if (ae->ack_val_set == ACK_BEHIND) { 13392 /* 13393 * Case B flag reordering, if window is not closed 13394 * or it could be a keep-alive or persists 13395 */ 13396 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13397 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13398 } 13399 } else if (ae->ack_val_set == ACK_DUPACK) { 13400 /* Case D */ 13401 rack_strike_dupack(rack); 13402 } else if (ae->ack_val_set == ACK_RWND) { 13403 /* Case C */ 13404 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13405 ts.tv_sec = ae->timestamp / 1000000000; 13406 ts.tv_nsec = ae->timestamp % 1000000000; 13407 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13408 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13409 } else { 13410 rack->r_ctl.act_rcv_time = *tv; 13411 } 13412 if (rack->forced_ack) { 13413 rack_handle_probe_response(rack, tiwin, 13414 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13415 } 13416 #ifdef TCP_ACCOUNTING 13417 win_up_req = 1; 13418 #endif 13419 win_upd_ack = ae->ack; 13420 win_seq = ae->seq; 13421 the_win = tiwin; 13422 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13423 } else { 13424 /* Case A */ 13425 if (SEQ_GT(ae->ack, tp->snd_max)) { 13426 /* 13427 * We just send an ack since the incoming 13428 * ack is beyond the largest seq we sent. 13429 */ 13430 if ((tp->t_flags & TF_ACKNOW) == 0) { 13431 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13432 if (tp->t_flags && TF_ACKNOW) 13433 rack->r_wanted_output = 1; 13434 } 13435 } else { 13436 nsegs++; 13437 /* If the window changed setup to update */ 13438 if (tiwin != tp->snd_wnd) { 13439 win_upd_ack = ae->ack; 13440 win_seq = ae->seq; 13441 the_win = tiwin; 13442 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13443 } 13444 #ifdef TCP_ACCOUNTING 13445 /* Account for the acks */ 13446 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13447 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13448 } 13449 #endif 13450 high_seq = ae->ack; 13451 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 13452 union tcp_log_stackspecific log; 13453 struct timeval tv; 13454 13455 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13456 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13457 log.u_bbr.flex1 = high_seq; 13458 log.u_bbr.flex2 = rack->r_ctl.roundends; 13459 log.u_bbr.flex3 = rack->r_ctl.current_round; 13460 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13461 log.u_bbr.flex8 = 8; 13462 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13463 0, &log, false, NULL, NULL, 0, &tv); 13464 } 13465 /* 13466 * The draft (v3) calls for us to use SEQ_GEQ, but that 13467 * causes issues when we are just going app limited. Lets 13468 * instead use SEQ_GT <or> where its equal but more data 13469 * is outstanding. 13470 */ 13471 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13472 ((high_seq == rack->r_ctl.roundends) && 13473 SEQ_GT(tp->snd_max, tp->snd_una))) { 13474 rack->r_ctl.current_round++; 13475 rack->r_ctl.roundends = tp->snd_max; 13476 if (CC_ALGO(tp)->newround != NULL) { 13477 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 13478 } 13479 } 13480 /* Setup our act_rcv_time */ 13481 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13482 ts.tv_sec = ae->timestamp / 1000000000; 13483 ts.tv_nsec = ae->timestamp % 1000000000; 13484 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13485 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13486 } else { 13487 rack->r_ctl.act_rcv_time = *tv; 13488 } 13489 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13490 if (rack->rc_dsack_round_seen) { 13491 /* Is the dsack round over? */ 13492 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13493 /* Yes it is */ 13494 rack->rc_dsack_round_seen = 0; 13495 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13496 } 13497 } 13498 } 13499 } 13500 /* And lets be sure to commit the rtt measurements for this ack */ 13501 tcp_rack_xmit_timer_commit(rack, tp); 13502 #ifdef TCP_ACCOUNTING 13503 rdstc = get_cyclecount(); 13504 if (rdstc > ts_val) { 13505 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13506 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13507 if (ae->ack_val_set == ACK_CUMACK) 13508 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13509 } 13510 } 13511 #endif 13512 } 13513 #ifdef TCP_ACCOUNTING 13514 ts_val = get_cyclecount(); 13515 #endif 13516 /* Tend to any collapsed window */ 13517 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 13518 /* The peer collapsed the window */ 13519 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__); 13520 } else if (rack->rc_has_collapsed) 13521 rack_un_collapse_window(rack, __LINE__); 13522 if ((rack->r_collapse_point_valid) && 13523 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 13524 rack->r_collapse_point_valid = 0; 13525 acked_amount = acked = (high_seq - tp->snd_una); 13526 if (acked) { 13527 /* 13528 * Clear the probe not answered flag 13529 * since cum-ack moved forward. 13530 */ 13531 rack->probe_not_answered = 0; 13532 if (rack->sack_attack_disable == 0) 13533 rack_do_decay(rack); 13534 if (acked >= segsiz) { 13535 /* 13536 * You only get credit for 13537 * MSS and greater (and you get extra 13538 * credit for larger cum-ack moves). 13539 */ 13540 int ac; 13541 13542 ac = acked / segsiz; 13543 rack->r_ctl.ack_count += ac; 13544 counter_u64_add(rack_ack_total, ac); 13545 } 13546 if (rack->r_ctl.ack_count > 0xfff00000) { 13547 /* 13548 * reduce the number to keep us under 13549 * a uint32_t. 13550 */ 13551 rack->r_ctl.ack_count /= 2; 13552 rack->r_ctl.sack_count /= 2; 13553 } 13554 if (tp->t_flags & TF_NEEDSYN) { 13555 /* 13556 * T/TCP: Connection was half-synchronized, and our SYN has 13557 * been ACK'd (so connection is now fully synchronized). Go 13558 * to non-starred state, increment snd_una for ACK of SYN, 13559 * and check if we can do window scaling. 13560 */ 13561 tp->t_flags &= ~TF_NEEDSYN; 13562 tp->snd_una++; 13563 acked_amount = acked = (high_seq - tp->snd_una); 13564 } 13565 if (acked > sbavail(&so->so_snd)) 13566 acked_amount = sbavail(&so->so_snd); 13567 #ifdef NETFLIX_EXP_DETECTION 13568 /* 13569 * We only care on a cum-ack move if we are in a sack-disabled 13570 * state. We have already added in to the ack_count, and we never 13571 * would disable on a cum-ack move, so we only care to do the 13572 * detection if it may "undo" it, i.e. we were in disabled already. 13573 */ 13574 if (rack->sack_attack_disable) 13575 rack_do_detection(tp, rack, acked_amount, segsiz); 13576 #endif 13577 if (IN_FASTRECOVERY(tp->t_flags) && 13578 (rack->rack_no_prr == 0)) 13579 rack_update_prr(tp, rack, acked_amount, high_seq); 13580 if (IN_RECOVERY(tp->t_flags)) { 13581 if (SEQ_LT(high_seq, tp->snd_recover) && 13582 (SEQ_LT(high_seq, tp->snd_max))) { 13583 tcp_rack_partialack(tp); 13584 } else { 13585 rack_post_recovery(tp, high_seq); 13586 recovery = 1; 13587 } 13588 } 13589 /* Handle the rack-log-ack part (sendmap) */ 13590 if ((sbused(&so->so_snd) == 0) && 13591 (acked > acked_amount) && 13592 (tp->t_state >= TCPS_FIN_WAIT_1) && 13593 (tp->t_flags & TF_SENTFIN)) { 13594 /* 13595 * We must be sure our fin 13596 * was sent and acked (we can be 13597 * in FIN_WAIT_1 without having 13598 * sent the fin). 13599 */ 13600 ourfinisacked = 1; 13601 /* 13602 * Lets make sure snd_una is updated 13603 * since most likely acked_amount = 0 (it 13604 * should be). 13605 */ 13606 tp->snd_una = high_seq; 13607 } 13608 /* Did we make a RTO error? */ 13609 if ((tp->t_flags & TF_PREVVALID) && 13610 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13611 tp->t_flags &= ~TF_PREVVALID; 13612 if (tp->t_rxtshift == 1 && 13613 (int)(ticks - tp->t_badrxtwin) < 0) 13614 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 13615 } 13616 /* Handle the data in the socket buffer */ 13617 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13618 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13619 if (acked_amount > 0) { 13620 struct mbuf *mfree; 13621 13622 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13623 SOCKBUF_LOCK(&so->so_snd); 13624 mfree = sbcut_locked(&so->so_snd, acked_amount); 13625 tp->snd_una = high_seq; 13626 /* Note we want to hold the sb lock through the sendmap adjust */ 13627 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13628 /* Wake up the socket if we have room to write more */ 13629 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13630 sowwakeup_locked(so); 13631 m_freem(mfree); 13632 } 13633 /* update progress */ 13634 tp->t_acktime = ticks; 13635 rack_log_progress_event(rack, tp, tp->t_acktime, 13636 PROGRESS_UPDATE, __LINE__); 13637 /* Clear out shifts and such */ 13638 tp->t_rxtshift = 0; 13639 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13640 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13641 rack->rc_tlp_in_progress = 0; 13642 rack->r_ctl.rc_tlp_cnt_out = 0; 13643 /* Send recover and snd_nxt must be dragged along */ 13644 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13645 tp->snd_recover = tp->snd_una; 13646 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13647 tp->snd_nxt = tp->snd_una; 13648 /* 13649 * If the RXT timer is running we want to 13650 * stop it, so we can restart a TLP (or new RXT). 13651 */ 13652 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13653 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13654 #ifdef NETFLIX_HTTP_LOGGING 13655 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13656 #endif 13657 tp->snd_wl2 = high_seq; 13658 tp->t_dupacks = 0; 13659 if (under_pacing && 13660 (rack->use_fixed_rate == 0) && 13661 (rack->in_probe_rtt == 0) && 13662 rack->rc_gp_dyn_mul && 13663 rack->rc_always_pace) { 13664 /* Check if we are dragging bottom */ 13665 rack_check_bottom_drag(tp, rack, so, acked); 13666 } 13667 if (tp->snd_una == tp->snd_max) { 13668 tp->t_flags &= ~TF_PREVVALID; 13669 rack->r_ctl.retran_during_recovery = 0; 13670 rack->r_ctl.dsack_byte_cnt = 0; 13671 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13672 if (rack->r_ctl.rc_went_idle_time == 0) 13673 rack->r_ctl.rc_went_idle_time = 1; 13674 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13675 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13676 tp->t_acktime = 0; 13677 /* Set so we might enter persists... */ 13678 rack->r_wanted_output = 1; 13679 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13680 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13681 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13682 (sbavail(&so->so_snd) == 0) && 13683 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13684 /* 13685 * The socket was gone and the 13686 * peer sent data (not now in the past), time to 13687 * reset him. 13688 */ 13689 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13690 /* tcp_close will kill the inp pre-log the Reset */ 13691 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13692 #ifdef TCP_ACCOUNTING 13693 rdstc = get_cyclecount(); 13694 if (rdstc > ts_val) { 13695 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13696 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13697 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13698 } 13699 } 13700 #endif 13701 m_freem(m); 13702 tp = tcp_close(tp); 13703 if (tp == NULL) { 13704 #ifdef TCP_ACCOUNTING 13705 sched_unpin(); 13706 #endif 13707 return (1); 13708 } 13709 /* 13710 * We would normally do drop-with-reset which would 13711 * send back a reset. We can't since we don't have 13712 * all the needed bits. Instead lets arrange for 13713 * a call to tcp_output(). That way since we 13714 * are in the closed state we will generate a reset. 13715 * 13716 * Note if tcp_accounting is on we don't unpin since 13717 * we do that after the goto label. 13718 */ 13719 goto send_out_a_rst; 13720 } 13721 if ((sbused(&so->so_snd) == 0) && 13722 (tp->t_state >= TCPS_FIN_WAIT_1) && 13723 (tp->t_flags & TF_SENTFIN)) { 13724 /* 13725 * If we can't receive any more data, then closing user can 13726 * proceed. Starting the timer is contrary to the 13727 * specification, but if we don't get a FIN we'll hang 13728 * forever. 13729 * 13730 */ 13731 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13732 soisdisconnected(so); 13733 tcp_timer_activate(tp, TT_2MSL, 13734 (tcp_fast_finwait2_recycle ? 13735 tcp_finwait2_timeout : 13736 TP_MAXIDLE(tp))); 13737 } 13738 if (ourfinisacked == 0) { 13739 /* 13740 * We don't change to fin-wait-2 if we have our fin acked 13741 * which means we are probably in TCPS_CLOSING. 13742 */ 13743 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13744 } 13745 } 13746 } 13747 /* Wake up the socket if we have room to write more */ 13748 if (sbavail(&so->so_snd)) { 13749 rack->r_wanted_output = 1; 13750 if (ctf_progress_timeout_check(tp, true)) { 13751 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13752 tp, tick, PROGRESS_DROP, __LINE__); 13753 /* 13754 * We cheat here and don't send a RST, we should send one 13755 * when the pacer drops the connection. 13756 */ 13757 #ifdef TCP_ACCOUNTING 13758 rdstc = get_cyclecount(); 13759 if (rdstc > ts_val) { 13760 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13761 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13762 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13763 } 13764 } 13765 sched_unpin(); 13766 #endif 13767 (void)tcp_drop(tp, ETIMEDOUT); 13768 m_freem(m); 13769 return (1); 13770 } 13771 } 13772 if (ourfinisacked) { 13773 switch(tp->t_state) { 13774 case TCPS_CLOSING: 13775 #ifdef TCP_ACCOUNTING 13776 rdstc = get_cyclecount(); 13777 if (rdstc > ts_val) { 13778 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13779 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13780 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13781 } 13782 } 13783 sched_unpin(); 13784 #endif 13785 tcp_twstart(tp); 13786 m_freem(m); 13787 return (1); 13788 break; 13789 case TCPS_LAST_ACK: 13790 #ifdef TCP_ACCOUNTING 13791 rdstc = get_cyclecount(); 13792 if (rdstc > ts_val) { 13793 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13794 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13795 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13796 } 13797 } 13798 sched_unpin(); 13799 #endif 13800 tp = tcp_close(tp); 13801 ctf_do_drop(m, tp); 13802 return (1); 13803 break; 13804 case TCPS_FIN_WAIT_1: 13805 #ifdef TCP_ACCOUNTING 13806 rdstc = get_cyclecount(); 13807 if (rdstc > ts_val) { 13808 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13809 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13810 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13811 } 13812 } 13813 #endif 13814 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13815 soisdisconnected(so); 13816 tcp_timer_activate(tp, TT_2MSL, 13817 (tcp_fast_finwait2_recycle ? 13818 tcp_finwait2_timeout : 13819 TP_MAXIDLE(tp))); 13820 } 13821 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13822 break; 13823 default: 13824 break; 13825 } 13826 } 13827 if (rack->r_fast_output) { 13828 /* 13829 * We re doing fast output.. can we expand that? 13830 */ 13831 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13832 } 13833 #ifdef TCP_ACCOUNTING 13834 rdstc = get_cyclecount(); 13835 if (rdstc > ts_val) { 13836 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13837 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13838 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13839 } 13840 } 13841 13842 } else if (win_up_req) { 13843 rdstc = get_cyclecount(); 13844 if (rdstc > ts_val) { 13845 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13846 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13847 } 13848 } 13849 #endif 13850 } 13851 /* Now is there a next packet, if so we are done */ 13852 m_freem(m); 13853 did_out = 0; 13854 if (nxt_pkt) { 13855 #ifdef TCP_ACCOUNTING 13856 sched_unpin(); 13857 #endif 13858 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13859 return (0); 13860 } 13861 rack_handle_might_revert(tp, rack); 13862 ctf_calc_rwin(so, tp); 13863 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13864 send_out_a_rst: 13865 if (tcp_output(tp) < 0) { 13866 #ifdef TCP_ACCOUNTING 13867 sched_unpin(); 13868 #endif 13869 return (1); 13870 } 13871 did_out = 1; 13872 } 13873 rack_free_trim(rack); 13874 #ifdef TCP_ACCOUNTING 13875 sched_unpin(); 13876 #endif 13877 rack_timer_audit(tp, rack, &so->so_snd); 13878 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13879 return (0); 13880 } 13881 13882 13883 static int 13884 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13885 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13886 int32_t nxt_pkt, struct timeval *tv) 13887 { 13888 struct inpcb *inp = tptoinpcb(tp); 13889 #ifdef TCP_ACCOUNTING 13890 uint64_t ts_val; 13891 #endif 13892 int32_t thflags, retval, did_out = 0; 13893 int32_t way_out = 0; 13894 /* 13895 * cts - is the current time from tv (caller gets ts) in microseconds. 13896 * ms_cts - is the current time from tv in milliseconds. 13897 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 13898 */ 13899 uint32_t cts, us_cts, ms_cts; 13900 uint32_t tiwin, high_seq; 13901 struct timespec ts; 13902 struct tcpopt to; 13903 struct tcp_rack *rack; 13904 struct rack_sendmap *rsm; 13905 int32_t prev_state = 0; 13906 #ifdef TCP_ACCOUNTING 13907 int ack_val_set = 0xf; 13908 #endif 13909 int nsegs; 13910 13911 NET_EPOCH_ASSERT(); 13912 INP_WLOCK_ASSERT(inp); 13913 13914 /* 13915 * tv passed from common code is from either M_TSTMP_LRO or 13916 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 13917 */ 13918 rack = (struct tcp_rack *)tp->t_fb_ptr; 13919 if (m->m_flags & M_ACKCMP) { 13920 /* 13921 * All compressed ack's are ack's by definition so 13922 * remove any ack required flag and then do the processing. 13923 */ 13924 rack->rc_ack_required = 0; 13925 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 13926 } 13927 if (m->m_flags & M_ACKCMP) { 13928 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 13929 } 13930 cts = tcp_tv_to_usectick(tv); 13931 ms_cts = tcp_tv_to_mssectick(tv); 13932 nsegs = m->m_pkthdr.lro_nsegs; 13933 counter_u64_add(rack_proc_non_comp_ack, 1); 13934 thflags = tcp_get_flags(th); 13935 #ifdef TCP_ACCOUNTING 13936 sched_pin(); 13937 if (thflags & TH_ACK) 13938 ts_val = get_cyclecount(); 13939 #endif 13940 if ((m->m_flags & M_TSTMP) || 13941 (m->m_flags & M_TSTMP_LRO)) { 13942 mbuf_tstmp2timespec(m, &ts); 13943 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13944 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13945 } else 13946 rack->r_ctl.act_rcv_time = *tv; 13947 kern_prefetch(rack, &prev_state); 13948 prev_state = 0; 13949 /* 13950 * Unscale the window into a 32-bit value. For the SYN_SENT state 13951 * the scale is zero. 13952 */ 13953 tiwin = th->th_win << tp->snd_scale; 13954 #ifdef TCP_ACCOUNTING 13955 if (thflags & TH_ACK) { 13956 /* 13957 * We have a tradeoff here. We can either do what we are 13958 * doing i.e. pinning to this CPU and then doing the accounting 13959 * <or> we could do a critical enter, setup the rdtsc and cpu 13960 * as in below, and then validate we are on the same CPU on 13961 * exit. I have choosen to not do the critical enter since 13962 * that often will gain you a context switch, and instead lock 13963 * us (line above this if) to the same CPU with sched_pin(). This 13964 * means we may be context switched out for a higher priority 13965 * interupt but we won't be moved to another CPU. 13966 * 13967 * If this occurs (which it won't very often since we most likely 13968 * are running this code in interupt context and only a higher 13969 * priority will bump us ... clock?) we will falsely add in 13970 * to the time the interupt processing time plus the ack processing 13971 * time. This is ok since its a rare event. 13972 */ 13973 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 13974 ctf_fixed_maxseg(tp)); 13975 } 13976 #endif 13977 /* 13978 * Parse options on any incoming segment. 13979 */ 13980 memset(&to, 0, sizeof(to)); 13981 tcp_dooptions(&to, (u_char *)(th + 1), 13982 (th->th_off << 2) - sizeof(struct tcphdr), 13983 (thflags & TH_SYN) ? TO_SYN : 0); 13984 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 13985 __func__)); 13986 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 13987 __func__)); 13988 13989 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13990 (tp->t_flags & TF_GPUTINPROG)) { 13991 /* 13992 * We have a goodput in progress 13993 * and we have entered a late state. 13994 * Do we have enough data in the sb 13995 * to handle the GPUT request? 13996 */ 13997 uint32_t bytes; 13998 13999 bytes = tp->gput_ack - tp->gput_seq; 14000 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14001 bytes += tp->gput_seq - tp->snd_una; 14002 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 14003 /* 14004 * There are not enough bytes in the socket 14005 * buffer that have been sent to cover this 14006 * measurement. Cancel it. 14007 */ 14008 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14009 rack->r_ctl.rc_gp_srtt /*flex1*/, 14010 tp->gput_seq, 14011 0, 0, 18, __LINE__, NULL, 0); 14012 tp->t_flags &= ~TF_GPUTINPROG; 14013 } 14014 } 14015 high_seq = th->th_ack; 14016 if (tcp_bblogging_on(rack->rc_tp)) { 14017 union tcp_log_stackspecific log; 14018 struct timeval ltv; 14019 #ifdef NETFLIX_HTTP_LOGGING 14020 struct http_sendfile_track *http_req; 14021 14022 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14023 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14024 } else { 14025 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14026 } 14027 #endif 14028 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14029 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14030 if (rack->rack_no_prr == 0) 14031 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14032 else 14033 log.u_bbr.flex1 = 0; 14034 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14035 log.u_bbr.use_lt_bw <<= 1; 14036 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14037 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14038 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14039 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14040 log.u_bbr.flex3 = m->m_flags; 14041 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14042 log.u_bbr.lost = thflags; 14043 log.u_bbr.pacing_gain = 0x1; 14044 #ifdef TCP_ACCOUNTING 14045 log.u_bbr.cwnd_gain = ack_val_set; 14046 #endif 14047 log.u_bbr.flex7 = 2; 14048 if (m->m_flags & M_TSTMP) { 14049 /* Record the hardware timestamp if present */ 14050 mbuf_tstmp2timespec(m, &ts); 14051 ltv.tv_sec = ts.tv_sec; 14052 ltv.tv_usec = ts.tv_nsec / 1000; 14053 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14054 } else if (m->m_flags & M_TSTMP_LRO) { 14055 /* Record the LRO the arrival timestamp */ 14056 mbuf_tstmp2timespec(m, &ts); 14057 ltv.tv_sec = ts.tv_sec; 14058 ltv.tv_usec = ts.tv_nsec / 1000; 14059 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14060 } 14061 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14062 /* Log the rcv time */ 14063 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14064 #ifdef NETFLIX_HTTP_LOGGING 14065 log.u_bbr.applimited = tp->t_http_closed; 14066 log.u_bbr.applimited <<= 8; 14067 log.u_bbr.applimited |= tp->t_http_open; 14068 log.u_bbr.applimited <<= 8; 14069 log.u_bbr.applimited |= tp->t_http_req; 14070 if (http_req) { 14071 /* Copy out any client req info */ 14072 /* seconds */ 14073 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14074 /* useconds */ 14075 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14076 log.u_bbr.rttProp = http_req->timestamp; 14077 log.u_bbr.cur_del_rate = http_req->start; 14078 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14079 log.u_bbr.flex8 |= 1; 14080 } else { 14081 log.u_bbr.flex8 |= 2; 14082 log.u_bbr.bw_inuse = http_req->end; 14083 } 14084 log.u_bbr.flex6 = http_req->start_seq; 14085 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14086 log.u_bbr.flex8 |= 4; 14087 log.u_bbr.epoch = http_req->end_seq; 14088 } 14089 } 14090 #endif 14091 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14092 tlen, &log, true, <v); 14093 } 14094 /* Remove ack required flag if set, we have one */ 14095 if (thflags & TH_ACK) 14096 rack->rc_ack_required = 0; 14097 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14098 way_out = 4; 14099 retval = 0; 14100 m_freem(m); 14101 goto done_with_input; 14102 } 14103 /* 14104 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14105 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14106 */ 14107 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14108 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14109 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14110 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14111 #ifdef TCP_ACCOUNTING 14112 sched_unpin(); 14113 #endif 14114 return (1); 14115 } 14116 /* 14117 * If timestamps were negotiated during SYN/ACK and a 14118 * segment without a timestamp is received, silently drop 14119 * the segment, unless it is a RST segment or missing timestamps are 14120 * tolerated. 14121 * See section 3.2 of RFC 7323. 14122 */ 14123 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14124 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14125 way_out = 5; 14126 retval = 0; 14127 m_freem(m); 14128 goto done_with_input; 14129 } 14130 14131 /* 14132 * Segment received on connection. Reset idle time and keep-alive 14133 * timer. XXX: This should be done after segment validation to 14134 * ignore broken/spoofed segs. 14135 */ 14136 if (tp->t_idle_reduce && 14137 (tp->snd_max == tp->snd_una) && 14138 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14139 counter_u64_add(rack_input_idle_reduces, 1); 14140 rack_cc_after_idle(rack, tp); 14141 } 14142 tp->t_rcvtime = ticks; 14143 #ifdef STATS 14144 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14145 #endif 14146 if (tiwin > rack->r_ctl.rc_high_rwnd) 14147 rack->r_ctl.rc_high_rwnd = tiwin; 14148 /* 14149 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14150 * this to occur after we've validated the segment. 14151 */ 14152 if (tcp_ecn_input_segment(tp, thflags, tlen, 14153 tcp_packets_this_ack(tp, th->th_ack), 14154 iptos)) 14155 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 14156 14157 /* 14158 * If echoed timestamp is later than the current time, fall back to 14159 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14160 * were used when this connection was established. 14161 */ 14162 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14163 to.to_tsecr -= tp->ts_offset; 14164 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14165 to.to_tsecr = 0; 14166 } 14167 14168 /* 14169 * If its the first time in we need to take care of options and 14170 * verify we can do SACK for rack! 14171 */ 14172 if (rack->r_state == 0) { 14173 /* Should be init'd by rack_init() */ 14174 KASSERT(rack->rc_inp != NULL, 14175 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14176 if (rack->rc_inp == NULL) { 14177 rack->rc_inp = inp; 14178 } 14179 14180 /* 14181 * Process options only when we get SYN/ACK back. The SYN 14182 * case for incoming connections is handled in tcp_syncache. 14183 * According to RFC1323 the window field in a SYN (i.e., a 14184 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14185 * this is traditional behavior, may need to be cleaned up. 14186 */ 14187 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14188 /* Handle parallel SYN for ECN */ 14189 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14190 if ((to.to_flags & TOF_SCALE) && 14191 (tp->t_flags & TF_REQ_SCALE)) { 14192 tp->t_flags |= TF_RCVD_SCALE; 14193 tp->snd_scale = to.to_wscale; 14194 } else 14195 tp->t_flags &= ~TF_REQ_SCALE; 14196 /* 14197 * Initial send window. It will be updated with the 14198 * next incoming segment to the scaled value. 14199 */ 14200 tp->snd_wnd = th->th_win; 14201 rack_validate_fo_sendwin_up(tp, rack); 14202 if ((to.to_flags & TOF_TS) && 14203 (tp->t_flags & TF_REQ_TSTMP)) { 14204 tp->t_flags |= TF_RCVD_TSTMP; 14205 tp->ts_recent = to.to_tsval; 14206 tp->ts_recent_age = cts; 14207 } else 14208 tp->t_flags &= ~TF_REQ_TSTMP; 14209 if (to.to_flags & TOF_MSS) { 14210 tcp_mss(tp, to.to_mss); 14211 } 14212 if ((tp->t_flags & TF_SACK_PERMIT) && 14213 (to.to_flags & TOF_SACKPERM) == 0) 14214 tp->t_flags &= ~TF_SACK_PERMIT; 14215 if (IS_FASTOPEN(tp->t_flags)) { 14216 if (to.to_flags & TOF_FASTOPEN) { 14217 uint16_t mss; 14218 14219 if (to.to_flags & TOF_MSS) 14220 mss = to.to_mss; 14221 else 14222 if ((inp->inp_vflag & INP_IPV6) != 0) 14223 mss = TCP6_MSS; 14224 else 14225 mss = TCP_MSS; 14226 tcp_fastopen_update_cache(tp, mss, 14227 to.to_tfo_len, to.to_tfo_cookie); 14228 } else 14229 tcp_fastopen_disable_path(tp); 14230 } 14231 } 14232 /* 14233 * At this point we are at the initial call. Here we decide 14234 * if we are doing RACK or not. We do this by seeing if 14235 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14236 * The code now does do dup-ack counting so if you don't 14237 * switch back you won't get rack & TLP, but you will still 14238 * get this stack. 14239 */ 14240 14241 if ((rack_sack_not_required == 0) && 14242 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14243 tcp_switch_back_to_default(tp); 14244 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14245 tlen, iptos); 14246 #ifdef TCP_ACCOUNTING 14247 sched_unpin(); 14248 #endif 14249 return (1); 14250 } 14251 tcp_set_hpts(inp); 14252 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14253 } 14254 if (thflags & TH_FIN) 14255 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14256 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14257 if ((rack->rc_gp_dyn_mul) && 14258 (rack->use_fixed_rate == 0) && 14259 (rack->rc_always_pace)) { 14260 /* Check in on probertt */ 14261 rack_check_probe_rtt(rack, us_cts); 14262 } 14263 rack_clear_rate_sample(rack); 14264 if ((rack->forced_ack) && 14265 ((tcp_get_flags(th) & TH_RST) == 0)) { 14266 rack_handle_probe_response(rack, tiwin, us_cts); 14267 } 14268 /* 14269 * This is the one exception case where we set the rack state 14270 * always. All other times (timers etc) we must have a rack-state 14271 * set (so we assure we have done the checks above for SACK). 14272 */ 14273 rack->r_ctl.rc_rcvtime = cts; 14274 if (rack->r_state != tp->t_state) 14275 rack_set_state(tp, rack); 14276 if (SEQ_GT(th->th_ack, tp->snd_una) && 14277 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14278 kern_prefetch(rsm, &prev_state); 14279 prev_state = rack->r_state; 14280 retval = (*rack->r_substate) (m, th, so, 14281 tp, &to, drop_hdrlen, 14282 tlen, tiwin, thflags, nxt_pkt, iptos); 14283 if (retval == 0) { 14284 /* 14285 * If retval is 1 the tcb is unlocked and most likely the tp 14286 * is gone. 14287 */ 14288 INP_WLOCK_ASSERT(inp); 14289 if ((rack->rc_gp_dyn_mul) && 14290 (rack->rc_always_pace) && 14291 (rack->use_fixed_rate == 0) && 14292 rack->in_probe_rtt && 14293 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14294 /* 14295 * If we are going for target, lets recheck before 14296 * we output. 14297 */ 14298 rack_check_probe_rtt(rack, us_cts); 14299 } 14300 if (rack->set_pacing_done_a_iw == 0) { 14301 /* How much has been acked? */ 14302 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14303 /* We have enough to set in the pacing segment size */ 14304 rack->set_pacing_done_a_iw = 1; 14305 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14306 } 14307 } 14308 tcp_rack_xmit_timer_commit(rack, tp); 14309 #ifdef TCP_ACCOUNTING 14310 /* 14311 * If we set the ack_val_se to what ack processing we are doing 14312 * we also want to track how many cycles we burned. Note 14313 * the bits after tcp_output we let be "free". This is because 14314 * we are also tracking the tcp_output times as well. Note the 14315 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14316 * 0xf cannot be returned and is what we initialize it too to 14317 * indicate we are not doing the tabulations. 14318 */ 14319 if (ack_val_set != 0xf) { 14320 uint64_t crtsc; 14321 14322 crtsc = get_cyclecount(); 14323 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14324 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14325 } 14326 } 14327 #endif 14328 if (nxt_pkt == 0) { 14329 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14330 do_output_now: 14331 if (tcp_output(tp) < 0) 14332 return (1); 14333 did_out = 1; 14334 } 14335 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14336 rack_free_trim(rack); 14337 } 14338 /* Update any rounds needed */ 14339 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { 14340 union tcp_log_stackspecific log; 14341 struct timeval tv; 14342 14343 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14344 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14345 log.u_bbr.flex1 = high_seq; 14346 log.u_bbr.flex2 = rack->r_ctl.roundends; 14347 log.u_bbr.flex3 = rack->r_ctl.current_round; 14348 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14349 log.u_bbr.flex8 = 9; 14350 tcp_log_event(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14351 0, &log, false, NULL, NULL, 0, &tv); 14352 } 14353 /* 14354 * The draft (v3) calls for us to use SEQ_GEQ, but that 14355 * causes issues when we are just going app limited. Lets 14356 * instead use SEQ_GT <or> where its equal but more data 14357 * is outstanding. 14358 */ 14359 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14360 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14361 rack->r_ctl.current_round++; 14362 rack->r_ctl.roundends = tp->snd_max; 14363 if (CC_ALGO(tp)->newround != NULL) { 14364 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 14365 } 14366 } 14367 if ((nxt_pkt == 0) && 14368 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14369 (SEQ_GT(tp->snd_max, tp->snd_una) || 14370 (tp->t_flags & TF_DELACK) || 14371 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14372 (tp->t_state <= TCPS_CLOSING)))) { 14373 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14374 if ((tp->snd_max == tp->snd_una) && 14375 ((tp->t_flags & TF_DELACK) == 0) && 14376 (tcp_in_hpts(rack->rc_inp)) && 14377 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14378 /* keep alive not needed if we are hptsi output yet */ 14379 ; 14380 } else { 14381 int late = 0; 14382 if (tcp_in_hpts(inp)) { 14383 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14384 us_cts = tcp_get_usecs(NULL); 14385 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14386 rack->r_early = 1; 14387 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14388 } else 14389 late = 1; 14390 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14391 } 14392 tcp_hpts_remove(inp); 14393 } 14394 if (late && (did_out == 0)) { 14395 /* 14396 * We are late in the sending 14397 * and we did not call the output 14398 * (this probably should not happen). 14399 */ 14400 goto do_output_now; 14401 } 14402 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14403 } 14404 way_out = 1; 14405 } else if (nxt_pkt == 0) { 14406 /* Do we have the correct timer running? */ 14407 rack_timer_audit(tp, rack, &so->so_snd); 14408 way_out = 2; 14409 } 14410 done_with_input: 14411 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14412 if (did_out) 14413 rack->r_wanted_output = 0; 14414 } 14415 #ifdef TCP_ACCOUNTING 14416 sched_unpin(); 14417 #endif 14418 return (retval); 14419 } 14420 14421 void 14422 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14423 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14424 { 14425 struct timeval tv; 14426 14427 /* First lets see if we have old packets */ 14428 if (tp->t_in_pkt) { 14429 if (ctf_do_queued_segments(so, tp, 1)) { 14430 m_freem(m); 14431 return; 14432 } 14433 } 14434 if (m->m_flags & M_TSTMP_LRO) { 14435 mbuf_tstmp2timeval(m, &tv); 14436 } else { 14437 /* Should not be should we kassert instead? */ 14438 tcp_get_usecs(&tv); 14439 } 14440 if (rack_do_segment_nounlock(m, th, so, tp, 14441 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14442 INP_WUNLOCK(tptoinpcb(tp)); 14443 } 14444 } 14445 14446 struct rack_sendmap * 14447 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14448 { 14449 struct rack_sendmap *rsm = NULL; 14450 int32_t idx; 14451 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14452 14453 /* Return the next guy to be re-transmitted */ 14454 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14455 return (NULL); 14456 } 14457 if (tp->t_flags & TF_SENTFIN) { 14458 /* retran the end FIN? */ 14459 return (NULL); 14460 } 14461 /* ok lets look at this one */ 14462 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14463 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 14464 return (rsm); 14465 } 14466 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14467 goto check_it; 14468 } 14469 rsm = rack_find_lowest_rsm(rack); 14470 if (rsm == NULL) { 14471 return (NULL); 14472 } 14473 check_it: 14474 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14475 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14476 /* 14477 * No sack so we automatically do the 3 strikes and 14478 * retransmit (no rack timer would be started). 14479 */ 14480 14481 return (rsm); 14482 } 14483 if (rsm->r_flags & RACK_ACKED) { 14484 return (NULL); 14485 } 14486 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14487 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14488 /* Its not yet ready */ 14489 return (NULL); 14490 } 14491 srtt = rack_grab_rtt(tp, rack); 14492 idx = rsm->r_rtr_cnt - 1; 14493 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14494 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14495 if ((tsused == ts_low) || 14496 (TSTMP_LT(tsused, ts_low))) { 14497 /* No time since sending */ 14498 return (NULL); 14499 } 14500 if ((tsused - ts_low) < thresh) { 14501 /* It has not been long enough yet */ 14502 return (NULL); 14503 } 14504 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14505 ((rsm->r_flags & RACK_SACK_PASSED) && 14506 (rack->sack_attack_disable == 0))) { 14507 /* 14508 * We have passed the dup-ack threshold <or> 14509 * a SACK has indicated this is missing. 14510 * Note that if you are a declared attacker 14511 * it is only the dup-ack threshold that 14512 * will cause retransmits. 14513 */ 14514 /* log retransmit reason */ 14515 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14516 rack->r_fast_output = 0; 14517 return (rsm); 14518 } 14519 return (NULL); 14520 } 14521 14522 static void 14523 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14524 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14525 int line, struct rack_sendmap *rsm, uint8_t quality) 14526 { 14527 if (tcp_bblogging_on(rack->rc_tp)) { 14528 union tcp_log_stackspecific log; 14529 struct timeval tv; 14530 14531 memset(&log, 0, sizeof(log)); 14532 log.u_bbr.flex1 = slot; 14533 log.u_bbr.flex2 = len; 14534 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14535 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14536 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14537 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14538 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14539 log.u_bbr.use_lt_bw <<= 1; 14540 log.u_bbr.use_lt_bw |= rack->r_late; 14541 log.u_bbr.use_lt_bw <<= 1; 14542 log.u_bbr.use_lt_bw |= rack->r_early; 14543 log.u_bbr.use_lt_bw <<= 1; 14544 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14545 log.u_bbr.use_lt_bw <<= 1; 14546 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14547 log.u_bbr.use_lt_bw <<= 1; 14548 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14549 log.u_bbr.use_lt_bw <<= 1; 14550 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14551 log.u_bbr.use_lt_bw <<= 1; 14552 log.u_bbr.use_lt_bw |= rack->gp_ready; 14553 log.u_bbr.pkt_epoch = line; 14554 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14555 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14556 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14557 log.u_bbr.bw_inuse = bw_est; 14558 log.u_bbr.delRate = bw; 14559 if (rack->r_ctl.gp_bw == 0) 14560 log.u_bbr.cur_del_rate = 0; 14561 else 14562 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14563 log.u_bbr.rttProp = len_time; 14564 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14565 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14566 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14567 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14568 /* We are in slow start */ 14569 log.u_bbr.flex7 = 1; 14570 } else { 14571 /* we are on congestion avoidance */ 14572 log.u_bbr.flex7 = 0; 14573 } 14574 log.u_bbr.flex8 = method; 14575 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14576 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14577 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14578 log.u_bbr.cwnd_gain <<= 1; 14579 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14580 log.u_bbr.cwnd_gain <<= 1; 14581 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14582 log.u_bbr.bbr_substate = quality; 14583 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14584 &rack->rc_inp->inp_socket->so_rcv, 14585 &rack->rc_inp->inp_socket->so_snd, 14586 BBR_LOG_HPTSI_CALC, 0, 14587 0, &log, false, &tv); 14588 } 14589 } 14590 14591 static uint32_t 14592 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14593 { 14594 uint32_t new_tso, user_max; 14595 14596 user_max = rack->rc_user_set_max_segs * mss; 14597 if (rack->rc_force_max_seg) { 14598 return (user_max); 14599 } 14600 if (rack->use_fixed_rate && 14601 ((rack->r_ctl.crte == NULL) || 14602 (bw != rack->r_ctl.crte->rate))) { 14603 /* Use the user mss since we are not exactly matched */ 14604 return (user_max); 14605 } 14606 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14607 if (new_tso > user_max) 14608 new_tso = user_max; 14609 return (new_tso); 14610 } 14611 14612 static int32_t 14613 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14614 { 14615 uint64_t lentim, fill_bw; 14616 14617 /* Lets first see if we are full, if so continue with normal rate */ 14618 rack->r_via_fill_cw = 0; 14619 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14620 return (slot); 14621 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14622 return (slot); 14623 if (rack->r_ctl.rc_last_us_rtt == 0) 14624 return (slot); 14625 if (rack->rc_pace_fill_if_rttin_range && 14626 (rack->r_ctl.rc_last_us_rtt >= 14627 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14628 /* The rtt is huge, N * smallest, lets not fill */ 14629 return (slot); 14630 } 14631 /* 14632 * first lets calculate the b/w based on the last us-rtt 14633 * and the sndwnd. 14634 */ 14635 fill_bw = rack->r_ctl.cwnd_to_use; 14636 /* Take the rwnd if its smaller */ 14637 if (fill_bw > rack->rc_tp->snd_wnd) 14638 fill_bw = rack->rc_tp->snd_wnd; 14639 if (rack->r_fill_less_agg) { 14640 /* 14641 * Now take away the inflight (this will reduce our 14642 * aggressiveness and yeah, if we get that much out in 1RTT 14643 * we will have had acks come back and still be behind). 14644 */ 14645 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14646 } 14647 /* Now lets make it into a b/w */ 14648 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14649 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14650 /* We are below the min b/w */ 14651 if (non_paced) 14652 *rate_wanted = fill_bw; 14653 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14654 return (slot); 14655 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14656 fill_bw = rack->r_ctl.bw_rate_cap; 14657 rack->r_via_fill_cw = 1; 14658 if (rack->r_rack_hw_rate_caps && 14659 (rack->r_ctl.crte != NULL)) { 14660 uint64_t high_rate; 14661 14662 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14663 if (fill_bw > high_rate) { 14664 /* We are capping bw at the highest rate table entry */ 14665 if (*rate_wanted > high_rate) { 14666 /* The original rate was also capped */ 14667 rack->r_via_fill_cw = 0; 14668 } 14669 rack_log_hdwr_pacing(rack, 14670 fill_bw, high_rate, __LINE__, 14671 0, 3); 14672 fill_bw = high_rate; 14673 if (capped) 14674 *capped = 1; 14675 } 14676 } else if ((rack->r_ctl.crte == NULL) && 14677 (rack->rack_hdrw_pacing == 0) && 14678 (rack->rack_hdw_pace_ena) && 14679 rack->r_rack_hw_rate_caps && 14680 (rack->rack_attempt_hdwr_pace == 0) && 14681 (rack->rc_inp->inp_route.ro_nh != NULL) && 14682 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14683 /* 14684 * Ok we may have a first attempt that is greater than our top rate 14685 * lets check. 14686 */ 14687 uint64_t high_rate; 14688 14689 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14690 if (high_rate) { 14691 if (fill_bw > high_rate) { 14692 fill_bw = high_rate; 14693 if (capped) 14694 *capped = 1; 14695 } 14696 } 14697 } 14698 /* 14699 * Ok fill_bw holds our mythical b/w to fill the cwnd 14700 * in a rtt, what does that time wise equate too? 14701 */ 14702 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14703 lentim /= fill_bw; 14704 *rate_wanted = fill_bw; 14705 if (non_paced || (lentim < slot)) { 14706 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14707 0, lentim, 12, __LINE__, NULL, 0); 14708 return ((int32_t)lentim); 14709 } else 14710 return (slot); 14711 } 14712 14713 static int32_t 14714 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14715 { 14716 uint64_t srtt; 14717 int32_t slot = 0; 14718 int can_start_hw_pacing = 1; 14719 int err; 14720 14721 if (rack->rc_always_pace == 0) { 14722 /* 14723 * We use the most optimistic possible cwnd/srtt for 14724 * sending calculations. This will make our 14725 * calculation anticipate getting more through 14726 * quicker then possible. But thats ok we don't want 14727 * the peer to have a gap in data sending. 14728 */ 14729 uint64_t cwnd, tr_perms = 0; 14730 int32_t reduce = 0; 14731 14732 old_method: 14733 /* 14734 * We keep no precise pacing with the old method 14735 * instead we use the pacer to mitigate bursts. 14736 */ 14737 if (rack->r_ctl.rc_rack_min_rtt) 14738 srtt = rack->r_ctl.rc_rack_min_rtt; 14739 else 14740 srtt = max(tp->t_srtt, 1); 14741 if (rack->r_ctl.rc_rack_largest_cwnd) 14742 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14743 else 14744 cwnd = rack->r_ctl.cwnd_to_use; 14745 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14746 tr_perms = (cwnd * 1000) / srtt; 14747 if (tr_perms == 0) { 14748 tr_perms = ctf_fixed_maxseg(tp); 14749 } 14750 /* 14751 * Calculate how long this will take to drain, if 14752 * the calculation comes out to zero, thats ok we 14753 * will use send_a_lot to possibly spin around for 14754 * more increasing tot_len_this_send to the point 14755 * that its going to require a pace, or we hit the 14756 * cwnd. Which in that case we are just waiting for 14757 * a ACK. 14758 */ 14759 slot = len / tr_perms; 14760 /* Now do we reduce the time so we don't run dry? */ 14761 if (slot && rack_slot_reduction) { 14762 reduce = (slot / rack_slot_reduction); 14763 if (reduce < slot) { 14764 slot -= reduce; 14765 } else 14766 slot = 0; 14767 } 14768 slot *= HPTS_USEC_IN_MSEC; 14769 if (rack->rc_pace_to_cwnd) { 14770 uint64_t rate_wanted = 0; 14771 14772 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14773 rack->rc_ack_can_sendout_data = 1; 14774 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14775 } else 14776 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14777 } else { 14778 uint64_t bw_est, res, lentim, rate_wanted; 14779 uint32_t orig_val, segs, oh; 14780 int capped = 0; 14781 int prev_fill; 14782 14783 if ((rack->r_rr_config == 1) && rsm) { 14784 return (rack->r_ctl.rc_min_to); 14785 } 14786 if (rack->use_fixed_rate) { 14787 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14788 } else if ((rack->r_ctl.init_rate == 0) && 14789 #ifdef NETFLIX_PEAKRATE 14790 (rack->rc_tp->t_maxpeakrate == 0) && 14791 #endif 14792 (rack->r_ctl.gp_bw == 0)) { 14793 /* no way to yet do an estimate */ 14794 bw_est = rate_wanted = 0; 14795 } else { 14796 bw_est = rack_get_bw(rack); 14797 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14798 } 14799 if ((bw_est == 0) || (rate_wanted == 0) || 14800 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14801 /* 14802 * No way yet to make a b/w estimate or 14803 * our raise is set incorrectly. 14804 */ 14805 goto old_method; 14806 } 14807 /* We need to account for all the overheads */ 14808 segs = (len + segsiz - 1) / segsiz; 14809 /* 14810 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14811 * and how much data we put in each packet. Yes this 14812 * means we may be off if we are larger than 1500 bytes 14813 * or smaller. But this just makes us more conservative. 14814 */ 14815 if (rack_hw_rate_min && 14816 (bw_est < rack_hw_rate_min)) 14817 can_start_hw_pacing = 0; 14818 if (ETHERNET_SEGMENT_SIZE > segsiz) 14819 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14820 else 14821 oh = 0; 14822 segs *= oh; 14823 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14824 res = lentim / rate_wanted; 14825 slot = (uint32_t)res; 14826 orig_val = rack->r_ctl.rc_pace_max_segs; 14827 if (rack->r_ctl.crte == NULL) { 14828 /* 14829 * Only do this if we are not hardware pacing 14830 * since if we are doing hw-pacing below we will 14831 * set make a call after setting up or changing 14832 * the rate. 14833 */ 14834 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14835 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14836 /* 14837 * We lost our rate somehow, this can happen 14838 * if the interface changed underneath us. 14839 */ 14840 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14841 rack->r_ctl.crte = NULL; 14842 /* Lets re-allow attempting to setup pacing */ 14843 rack->rack_hdrw_pacing = 0; 14844 rack->rack_attempt_hdwr_pace = 0; 14845 rack_log_hdwr_pacing(rack, 14846 rate_wanted, bw_est, __LINE__, 14847 0, 6); 14848 } 14849 /* Did we change the TSO size, if so log it */ 14850 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14851 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 14852 prev_fill = rack->r_via_fill_cw; 14853 if ((rack->rc_pace_to_cwnd) && 14854 (capped == 0) && 14855 (rack->use_fixed_rate == 0) && 14856 (rack->in_probe_rtt == 0) && 14857 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14858 /* 14859 * We want to pace at our rate *or* faster to 14860 * fill the cwnd to the max if its not full. 14861 */ 14862 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14863 } 14864 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14865 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14866 if ((rack->rack_hdw_pace_ena) && 14867 (can_start_hw_pacing > 0) && 14868 (rack->rack_hdrw_pacing == 0) && 14869 (rack->rack_attempt_hdwr_pace == 0)) { 14870 /* 14871 * Lets attempt to turn on hardware pacing 14872 * if we can. 14873 */ 14874 rack->rack_attempt_hdwr_pace = 1; 14875 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14876 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14877 rate_wanted, 14878 RS_PACING_GEQ, 14879 &err, &rack->r_ctl.crte_prev_rate); 14880 if (rack->r_ctl.crte) { 14881 rack->rack_hdrw_pacing = 1; 14882 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 14883 0, rack->r_ctl.crte, 14884 NULL); 14885 rack_log_hdwr_pacing(rack, 14886 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14887 err, 0); 14888 rack->r_ctl.last_hw_bw_req = rate_wanted; 14889 } else { 14890 counter_u64_add(rack_hw_pace_init_fail, 1); 14891 } 14892 } else if (rack->rack_hdrw_pacing && 14893 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 14894 /* Do we need to adjust our rate? */ 14895 const struct tcp_hwrate_limit_table *nrte; 14896 14897 if (rack->r_up_only && 14898 (rate_wanted < rack->r_ctl.crte->rate)) { 14899 /** 14900 * We have four possible states here 14901 * having to do with the previous time 14902 * and this time. 14903 * previous | this-time 14904 * A) 0 | 0 -- fill_cw not in the picture 14905 * B) 1 | 0 -- we were doing a fill-cw but now are not 14906 * C) 1 | 1 -- all rates from fill_cw 14907 * D) 0 | 1 -- we were doing non-fill and now we are filling 14908 * 14909 * For case A, C and D we don't allow a drop. But for 14910 * case B where we now our on our steady rate we do 14911 * allow a drop. 14912 * 14913 */ 14914 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 14915 goto done_w_hdwr; 14916 } 14917 if ((rate_wanted > rack->r_ctl.crte->rate) || 14918 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 14919 if (rack_hw_rate_to_low && 14920 (bw_est < rack_hw_rate_to_low)) { 14921 /* 14922 * The pacing rate is too low for hardware, but 14923 * do allow hardware pacing to be restarted. 14924 */ 14925 rack_log_hdwr_pacing(rack, 14926 bw_est, rack->r_ctl.crte->rate, __LINE__, 14927 0, 5); 14928 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14929 rack->r_ctl.crte = NULL; 14930 rack->rack_attempt_hdwr_pace = 0; 14931 rack->rack_hdrw_pacing = 0; 14932 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14933 goto done_w_hdwr; 14934 } 14935 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 14936 rack->rc_tp, 14937 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14938 rate_wanted, 14939 RS_PACING_GEQ, 14940 &err, &rack->r_ctl.crte_prev_rate); 14941 if (nrte == NULL) { 14942 /* Lost the rate */ 14943 rack->rack_hdrw_pacing = 0; 14944 rack->r_ctl.crte = NULL; 14945 rack_log_hdwr_pacing(rack, 14946 rate_wanted, 0, __LINE__, 14947 err, 1); 14948 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14949 counter_u64_add(rack_hw_pace_lost, 1); 14950 } else if (nrte != rack->r_ctl.crte) { 14951 rack->r_ctl.crte = nrte; 14952 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 14953 segsiz, 0, 14954 rack->r_ctl.crte, 14955 NULL); 14956 rack_log_hdwr_pacing(rack, 14957 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14958 err, 2); 14959 rack->r_ctl.last_hw_bw_req = rate_wanted; 14960 } 14961 } else { 14962 /* We just need to adjust the segment size */ 14963 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14964 rack_log_hdwr_pacing(rack, 14965 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14966 0, 4); 14967 rack->r_ctl.last_hw_bw_req = rate_wanted; 14968 } 14969 } 14970 } 14971 if ((rack->r_ctl.crte != NULL) && 14972 (rack->r_ctl.crte->rate == rate_wanted)) { 14973 /* 14974 * We need to add a extra if the rates 14975 * are exactly matched. The idea is 14976 * we want the software to make sure the 14977 * queue is empty before adding more, this 14978 * gives us N MSS extra pace times where 14979 * N is our sysctl 14980 */ 14981 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 14982 } 14983 done_w_hdwr: 14984 if (rack_limit_time_with_srtt && 14985 (rack->use_fixed_rate == 0) && 14986 #ifdef NETFLIX_PEAKRATE 14987 (rack->rc_tp->t_maxpeakrate == 0) && 14988 #endif 14989 (rack->rack_hdrw_pacing == 0)) { 14990 /* 14991 * Sanity check, we do not allow the pacing delay 14992 * to be longer than the SRTT of the path. If it is 14993 * a slow path, then adding a packet should increase 14994 * the RTT and compensate for this i.e. the srtt will 14995 * be greater so the allowed pacing time will be greater. 14996 * 14997 * Note this restriction is not for where a peak rate 14998 * is set, we are doing fixed pacing or hardware pacing. 14999 */ 15000 if (rack->rc_tp->t_srtt) 15001 srtt = rack->rc_tp->t_srtt; 15002 else 15003 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15004 if (srtt < (uint64_t)slot) { 15005 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15006 slot = srtt; 15007 } 15008 } 15009 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15010 } 15011 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15012 /* 15013 * If this rate is seeing enobufs when it 15014 * goes to send then either the nic is out 15015 * of gas or we are mis-estimating the time 15016 * somehow and not letting the queue empty 15017 * completely. Lets add to the pacing time. 15018 */ 15019 int hw_boost_delay; 15020 15021 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15022 if (hw_boost_delay > rack_enobuf_hw_max) 15023 hw_boost_delay = rack_enobuf_hw_max; 15024 else if (hw_boost_delay < rack_enobuf_hw_min) 15025 hw_boost_delay = rack_enobuf_hw_min; 15026 slot += hw_boost_delay; 15027 } 15028 return (slot); 15029 } 15030 15031 static void 15032 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15033 tcp_seq startseq, uint32_t sb_offset) 15034 { 15035 struct rack_sendmap *my_rsm = NULL; 15036 struct rack_sendmap fe; 15037 15038 if (tp->t_state < TCPS_ESTABLISHED) { 15039 /* 15040 * We don't start any measurements if we are 15041 * not at least established. 15042 */ 15043 return; 15044 } 15045 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15046 /* 15047 * We will get no more data into the SB 15048 * this means we need to have the data available 15049 * before we start a measurement. 15050 */ 15051 15052 if (sbavail(&tptosocket(tp)->so_snd) < 15053 max(rc_init_window(rack), 15054 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15055 /* Nope not enough data */ 15056 return; 15057 } 15058 } 15059 tp->t_flags |= TF_GPUTINPROG; 15060 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15061 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15062 tp->gput_seq = startseq; 15063 rack->app_limited_needs_set = 0; 15064 if (rack->in_probe_rtt) 15065 rack->measure_saw_probe_rtt = 1; 15066 else if ((rack->measure_saw_probe_rtt) && 15067 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15068 rack->measure_saw_probe_rtt = 0; 15069 if (rack->rc_gp_filled) 15070 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15071 else { 15072 /* Special case initial measurement */ 15073 struct timeval tv; 15074 15075 tp->gput_ts = tcp_get_usecs(&tv); 15076 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15077 } 15078 /* 15079 * We take a guess out into the future, 15080 * if we have no measurement and no 15081 * initial rate, we measure the first 15082 * initial-windows worth of data to 15083 * speed up getting some GP measurement and 15084 * thus start pacing. 15085 */ 15086 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15087 rack->app_limited_needs_set = 1; 15088 tp->gput_ack = startseq + max(rc_init_window(rack), 15089 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15090 rack_log_pacing_delay_calc(rack, 15091 tp->gput_seq, 15092 tp->gput_ack, 15093 0, 15094 tp->gput_ts, 15095 rack->r_ctl.rc_app_limited_cnt, 15096 9, 15097 __LINE__, NULL, 0); 15098 return; 15099 } 15100 if (sb_offset) { 15101 /* 15102 * We are out somewhere in the sb 15103 * can we use the already outstanding data? 15104 */ 15105 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15106 /* 15107 * Yes first one is good and in this case 15108 * the tp->gput_ts is correctly set based on 15109 * the last ack that arrived (no need to 15110 * set things up when an ack comes in). 15111 */ 15112 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15113 if ((my_rsm == NULL) || 15114 (my_rsm->r_rtr_cnt != 1)) { 15115 /* retransmission? */ 15116 goto use_latest; 15117 } 15118 } else { 15119 if (rack->r_ctl.rc_first_appl == NULL) { 15120 /* 15121 * If rc_first_appl is NULL 15122 * then the cnt should be 0. 15123 * This is probably an error, maybe 15124 * a KASSERT would be approprate. 15125 */ 15126 goto use_latest; 15127 } 15128 /* 15129 * If we have a marker pointer to the last one that is 15130 * app limited we can use that, but we need to set 15131 * things up so that when it gets ack'ed we record 15132 * the ack time (if its not already acked). 15133 */ 15134 rack->app_limited_needs_set = 1; 15135 /* 15136 * We want to get to the rsm that is either 15137 * next with space i.e. over 1 MSS or the one 15138 * after that (after the app-limited). 15139 */ 15140 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15141 rack->r_ctl.rc_first_appl); 15142 if (my_rsm) { 15143 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15144 /* Have to use the next one */ 15145 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15146 my_rsm); 15147 else { 15148 /* Use after the first MSS of it is acked */ 15149 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15150 goto start_set; 15151 } 15152 } 15153 if ((my_rsm == NULL) || 15154 (my_rsm->r_rtr_cnt != 1)) { 15155 /* 15156 * Either its a retransmit or 15157 * the last is the app-limited one. 15158 */ 15159 goto use_latest; 15160 } 15161 } 15162 tp->gput_seq = my_rsm->r_start; 15163 start_set: 15164 if (my_rsm->r_flags & RACK_ACKED) { 15165 /* 15166 * This one has been acked use the arrival ack time 15167 */ 15168 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15169 rack->app_limited_needs_set = 0; 15170 } 15171 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15172 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15173 rack_log_pacing_delay_calc(rack, 15174 tp->gput_seq, 15175 tp->gput_ack, 15176 (uint64_t)my_rsm, 15177 tp->gput_ts, 15178 rack->r_ctl.rc_app_limited_cnt, 15179 9, 15180 __LINE__, NULL, 0); 15181 return; 15182 } 15183 15184 use_latest: 15185 /* 15186 * We don't know how long we may have been 15187 * idle or if this is the first-send. Lets 15188 * setup the flag so we will trim off 15189 * the first ack'd data so we get a true 15190 * measurement. 15191 */ 15192 rack->app_limited_needs_set = 1; 15193 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15194 /* Find this guy so we can pull the send time */ 15195 fe.r_start = startseq; 15196 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15197 if (my_rsm) { 15198 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15199 if (my_rsm->r_flags & RACK_ACKED) { 15200 /* 15201 * Unlikely since its probably what was 15202 * just transmitted (but I am paranoid). 15203 */ 15204 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15205 rack->app_limited_needs_set = 0; 15206 } 15207 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15208 /* This also is unlikely */ 15209 tp->gput_seq = my_rsm->r_start; 15210 } 15211 } else { 15212 /* 15213 * TSNH unless we have some send-map limit, 15214 * and even at that it should not be hitting 15215 * that limit (we should have stopped sending). 15216 */ 15217 struct timeval tv; 15218 15219 microuptime(&tv); 15220 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15221 } 15222 rack_log_pacing_delay_calc(rack, 15223 tp->gput_seq, 15224 tp->gput_ack, 15225 (uint64_t)my_rsm, 15226 tp->gput_ts, 15227 rack->r_ctl.rc_app_limited_cnt, 15228 9, __LINE__, NULL, 0); 15229 } 15230 15231 static inline uint32_t 15232 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15233 uint32_t avail, int32_t sb_offset) 15234 { 15235 uint32_t len; 15236 uint32_t sendwin; 15237 15238 if (tp->snd_wnd > cwnd_to_use) 15239 sendwin = cwnd_to_use; 15240 else 15241 sendwin = tp->snd_wnd; 15242 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15243 /* We never want to go over our peers rcv-window */ 15244 len = 0; 15245 } else { 15246 uint32_t flight; 15247 15248 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15249 if (flight >= sendwin) { 15250 /* 15251 * We have in flight what we are allowed by cwnd (if 15252 * it was rwnd blocking it would have hit above out 15253 * >= tp->snd_wnd). 15254 */ 15255 return (0); 15256 } 15257 len = sendwin - flight; 15258 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15259 /* We would send too much (beyond the rwnd) */ 15260 len = tp->snd_wnd - ctf_outstanding(tp); 15261 } 15262 if ((len + sb_offset) > avail) { 15263 /* 15264 * We don't have that much in the SB, how much is 15265 * there? 15266 */ 15267 len = avail - sb_offset; 15268 } 15269 } 15270 return (len); 15271 } 15272 15273 static void 15274 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15275 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15276 int rsm_is_null, int optlen, int line, uint16_t mode) 15277 { 15278 if (tcp_bblogging_on(rack->rc_tp)) { 15279 union tcp_log_stackspecific log; 15280 struct timeval tv; 15281 15282 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15283 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15284 log.u_bbr.flex1 = error; 15285 log.u_bbr.flex2 = flags; 15286 log.u_bbr.flex3 = rsm_is_null; 15287 log.u_bbr.flex4 = ipoptlen; 15288 log.u_bbr.flex5 = tp->rcv_numsacks; 15289 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15290 log.u_bbr.flex7 = optlen; 15291 log.u_bbr.flex8 = rack->r_fsb_inited; 15292 log.u_bbr.applimited = rack->r_fast_output; 15293 log.u_bbr.bw_inuse = rack_get_bw(rack); 15294 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15295 log.u_bbr.cwnd_gain = mode; 15296 log.u_bbr.pkts_out = orig_len; 15297 log.u_bbr.lt_epoch = len; 15298 log.u_bbr.delivered = line; 15299 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15300 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15301 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15302 len, &log, false, NULL, NULL, 0, &tv); 15303 } 15304 } 15305 15306 15307 static struct mbuf * 15308 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15309 struct rack_fast_send_blk *fsb, 15310 int32_t seglimit, int32_t segsize, int hw_tls) 15311 { 15312 #ifdef KERN_TLS 15313 struct ktls_session *tls, *ntls; 15314 #ifdef INVARIANTS 15315 struct mbuf *start; 15316 #endif 15317 #endif 15318 struct mbuf *m, *n, **np, *smb; 15319 struct mbuf *top; 15320 int32_t off, soff; 15321 int32_t len = *plen; 15322 int32_t fragsize; 15323 int32_t len_cp = 0; 15324 uint32_t mlen, frags; 15325 15326 soff = off = the_off; 15327 smb = m = the_m; 15328 np = ⊤ 15329 top = NULL; 15330 #ifdef KERN_TLS 15331 if (hw_tls && (m->m_flags & M_EXTPG)) 15332 tls = m->m_epg_tls; 15333 else 15334 tls = NULL; 15335 #ifdef INVARIANTS 15336 start = m; 15337 #endif 15338 #endif 15339 while (len > 0) { 15340 if (m == NULL) { 15341 *plen = len_cp; 15342 break; 15343 } 15344 #ifdef KERN_TLS 15345 if (hw_tls) { 15346 if (m->m_flags & M_EXTPG) 15347 ntls = m->m_epg_tls; 15348 else 15349 ntls = NULL; 15350 15351 /* 15352 * Avoid mixing TLS records with handshake 15353 * data or TLS records from different 15354 * sessions. 15355 */ 15356 if (tls != ntls) { 15357 MPASS(m != start); 15358 *plen = len_cp; 15359 break; 15360 } 15361 } 15362 #endif 15363 mlen = min(len, m->m_len - off); 15364 if (seglimit) { 15365 /* 15366 * For M_EXTPG mbufs, add 3 segments 15367 * + 1 in case we are crossing page boundaries 15368 * + 2 in case the TLS hdr/trailer are used 15369 * It is cheaper to just add the segments 15370 * than it is to take the cache miss to look 15371 * at the mbuf ext_pgs state in detail. 15372 */ 15373 if (m->m_flags & M_EXTPG) { 15374 fragsize = min(segsize, PAGE_SIZE); 15375 frags = 3; 15376 } else { 15377 fragsize = segsize; 15378 frags = 0; 15379 } 15380 15381 /* Break if we really can't fit anymore. */ 15382 if ((frags + 1) >= seglimit) { 15383 *plen = len_cp; 15384 break; 15385 } 15386 15387 /* 15388 * Reduce size if you can't copy the whole 15389 * mbuf. If we can't copy the whole mbuf, also 15390 * adjust len so the loop will end after this 15391 * mbuf. 15392 */ 15393 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15394 mlen = (seglimit - frags - 1) * fragsize; 15395 len = mlen; 15396 *plen = len_cp + len; 15397 } 15398 frags += howmany(mlen, fragsize); 15399 if (frags == 0) 15400 frags++; 15401 seglimit -= frags; 15402 KASSERT(seglimit > 0, 15403 ("%s: seglimit went too low", __func__)); 15404 } 15405 n = m_get(M_NOWAIT, m->m_type); 15406 *np = n; 15407 if (n == NULL) 15408 goto nospace; 15409 n->m_len = mlen; 15410 soff += mlen; 15411 len_cp += n->m_len; 15412 if (m->m_flags & (M_EXT|M_EXTPG)) { 15413 n->m_data = m->m_data + off; 15414 mb_dupcl(n, m); 15415 } else { 15416 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15417 (u_int)n->m_len); 15418 } 15419 len -= n->m_len; 15420 off = 0; 15421 m = m->m_next; 15422 np = &n->m_next; 15423 if (len || (soff == smb->m_len)) { 15424 /* 15425 * We have more so we move forward or 15426 * we have consumed the entire mbuf and 15427 * len has fell to 0. 15428 */ 15429 soff = 0; 15430 smb = m; 15431 } 15432 15433 } 15434 if (fsb != NULL) { 15435 fsb->m = smb; 15436 fsb->off = soff; 15437 if (smb) { 15438 /* 15439 * Save off the size of the mbuf. We do 15440 * this so that we can recognize when it 15441 * has been trimmed by sbcut() as acks 15442 * come in. 15443 */ 15444 fsb->o_m_len = smb->m_len; 15445 } else { 15446 /* 15447 * This is the case where the next mbuf went to NULL. This 15448 * means with this copy we have sent everything in the sb. 15449 * In theory we could clear the fast_output flag, but lets 15450 * not since its possible that we could get more added 15451 * and acks that call the extend function which would let 15452 * us send more. 15453 */ 15454 fsb->o_m_len = 0; 15455 } 15456 } 15457 return (top); 15458 nospace: 15459 if (top) 15460 m_freem(top); 15461 return (NULL); 15462 15463 } 15464 15465 /* 15466 * This is a copy of m_copym(), taking the TSO segment size/limit 15467 * constraints into account, and advancing the sndptr as it goes. 15468 */ 15469 static struct mbuf * 15470 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15471 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15472 { 15473 struct mbuf *m, *n; 15474 int32_t soff; 15475 15476 soff = rack->r_ctl.fsb.off; 15477 m = rack->r_ctl.fsb.m; 15478 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15479 /* 15480 * The mbuf had the front of it chopped off by an ack 15481 * we need to adjust the soff/off by that difference. 15482 */ 15483 uint32_t delta; 15484 15485 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15486 soff -= delta; 15487 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15488 /* 15489 * The mbuf was expanded probably by 15490 * a m_compress. Just update o_m_len. 15491 */ 15492 rack->r_ctl.fsb.o_m_len = m->m_len; 15493 } 15494 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15495 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15496 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15497 __FUNCTION__, 15498 rack, *plen, m, m->m_len)); 15499 /* Save off the right location before we copy and advance */ 15500 *s_soff = soff; 15501 *s_mb = rack->r_ctl.fsb.m; 15502 n = rack_fo_base_copym(m, soff, plen, 15503 &rack->r_ctl.fsb, 15504 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15505 return (n); 15506 } 15507 15508 static int 15509 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15510 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15511 { 15512 /* 15513 * Enter the fast retransmit path. We are given that a sched_pin is 15514 * in place (if accounting is compliled in) and the cycle count taken 15515 * at the entry is in the ts_val. The concept her is that the rsm 15516 * now holds the mbuf offsets and such so we can directly transmit 15517 * without a lot of overhead, the len field is already set for 15518 * us to prohibit us from sending too much (usually its 1MSS). 15519 */ 15520 struct ip *ip = NULL; 15521 struct udphdr *udp = NULL; 15522 struct tcphdr *th = NULL; 15523 struct mbuf *m = NULL; 15524 struct inpcb *inp; 15525 uint8_t *cpto; 15526 struct tcp_log_buffer *lgb; 15527 #ifdef TCP_ACCOUNTING 15528 uint64_t crtsc; 15529 int cnt_thru = 1; 15530 #endif 15531 struct tcpopt to; 15532 u_char opt[TCP_MAXOLEN]; 15533 uint32_t hdrlen, optlen; 15534 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 15535 uint16_t flags; 15536 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15537 uint32_t if_hw_tsomaxsegsize; 15538 15539 #ifdef INET6 15540 struct ip6_hdr *ip6 = NULL; 15541 15542 if (rack->r_is_v6) { 15543 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15544 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15545 } else 15546 #endif /* INET6 */ 15547 { 15548 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15549 hdrlen = sizeof(struct tcpiphdr); 15550 } 15551 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15552 goto failed; 15553 } 15554 if (doing_tlp) { 15555 /* Its a TLP add the flag, it may already be there but be sure */ 15556 rsm->r_flags |= RACK_TLP; 15557 } else { 15558 /* If it was a TLP it is not not on this retransmit */ 15559 rsm->r_flags &= ~RACK_TLP; 15560 } 15561 startseq = rsm->r_start; 15562 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15563 inp = rack->rc_inp; 15564 to.to_flags = 0; 15565 flags = tcp_outflags[tp->t_state]; 15566 if (flags & (TH_SYN|TH_RST)) { 15567 goto failed; 15568 } 15569 if (rsm->r_flags & RACK_HAS_FIN) { 15570 /* We can't send a FIN here */ 15571 goto failed; 15572 } 15573 if (flags & TH_FIN) { 15574 /* We never send a FIN */ 15575 flags &= ~TH_FIN; 15576 } 15577 if (tp->t_flags & TF_RCVD_TSTMP) { 15578 to.to_tsval = ms_cts + tp->ts_offset; 15579 to.to_tsecr = tp->ts_recent; 15580 to.to_flags = TOF_TS; 15581 } 15582 optlen = tcp_addoptions(&to, opt); 15583 hdrlen += optlen; 15584 udp = rack->r_ctl.fsb.udp; 15585 if (udp) 15586 hdrlen += sizeof(struct udphdr); 15587 if (rack->r_ctl.rc_pace_max_segs) 15588 max_val = rack->r_ctl.rc_pace_max_segs; 15589 else if (rack->rc_user_set_max_segs) 15590 max_val = rack->rc_user_set_max_segs * segsiz; 15591 else 15592 max_val = len; 15593 if ((tp->t_flags & TF_TSO) && 15594 V_tcp_do_tso && 15595 (len > segsiz) && 15596 (tp->t_port == 0)) 15597 tso = 1; 15598 #ifdef INET6 15599 if (MHLEN < hdrlen + max_linkhdr) 15600 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15601 else 15602 #endif 15603 m = m_gethdr(M_NOWAIT, MT_DATA); 15604 if (m == NULL) 15605 goto failed; 15606 m->m_data += max_linkhdr; 15607 m->m_len = hdrlen; 15608 th = rack->r_ctl.fsb.th; 15609 /* Establish the len to send */ 15610 if (len > max_val) 15611 len = max_val; 15612 if ((tso) && (len + optlen > tp->t_maxseg)) { 15613 uint32_t if_hw_tsomax; 15614 int32_t max_len; 15615 15616 /* extract TSO information */ 15617 if_hw_tsomax = tp->t_tsomax; 15618 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15619 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15620 /* 15621 * Check if we should limit by maximum payload 15622 * length: 15623 */ 15624 if (if_hw_tsomax != 0) { 15625 /* compute maximum TSO length */ 15626 max_len = (if_hw_tsomax - hdrlen - 15627 max_linkhdr); 15628 if (max_len <= 0) { 15629 goto failed; 15630 } else if (len > max_len) { 15631 len = max_len; 15632 } 15633 } 15634 if (len <= segsiz) { 15635 /* 15636 * In case there are too many small fragments don't 15637 * use TSO: 15638 */ 15639 tso = 0; 15640 } 15641 } else { 15642 tso = 0; 15643 } 15644 if ((tso == 0) && (len > segsiz)) 15645 len = segsiz; 15646 if ((len == 0) || 15647 (len <= MHLEN - hdrlen - max_linkhdr)) { 15648 goto failed; 15649 } 15650 th->th_seq = htonl(rsm->r_start); 15651 th->th_ack = htonl(tp->rcv_nxt); 15652 /* 15653 * The PUSH bit should only be applied 15654 * if the full retransmission is made. If 15655 * we are sending less than this is the 15656 * left hand edge and should not have 15657 * the PUSH bit. 15658 */ 15659 if ((rsm->r_flags & RACK_HAD_PUSH) && 15660 (len == (rsm->r_end - rsm->r_start))) 15661 flags |= TH_PUSH; 15662 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15663 if (th->th_win == 0) { 15664 tp->t_sndzerowin++; 15665 tp->t_flags |= TF_RXWIN0SENT; 15666 } else 15667 tp->t_flags &= ~TF_RXWIN0SENT; 15668 if (rsm->r_flags & RACK_TLP) { 15669 /* 15670 * TLP should not count in retran count, but 15671 * in its own bin 15672 */ 15673 counter_u64_add(rack_tlp_retran, 1); 15674 counter_u64_add(rack_tlp_retran_bytes, len); 15675 } else { 15676 tp->t_sndrexmitpack++; 15677 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15678 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15679 } 15680 #ifdef STATS 15681 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15682 len); 15683 #endif 15684 if (rsm->m == NULL) 15685 goto failed; 15686 if (rsm->orig_m_len != rsm->m->m_len) { 15687 /* Fix up the orig_m_len and possibly the mbuf offset */ 15688 rack_adjust_orig_mlen(rsm); 15689 } 15690 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15691 if (len <= segsiz) { 15692 /* 15693 * Must have ran out of mbufs for the copy 15694 * shorten it to no longer need tso. Lets 15695 * not put on sendalot since we are low on 15696 * mbufs. 15697 */ 15698 tso = 0; 15699 } 15700 if ((m->m_next == NULL) || (len <= 0)){ 15701 goto failed; 15702 } 15703 if (udp) { 15704 if (rack->r_is_v6) 15705 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15706 else 15707 ulen = hdrlen + len - sizeof(struct ip); 15708 udp->uh_ulen = htons(ulen); 15709 } 15710 m->m_pkthdr.rcvif = (struct ifnet *)0; 15711 if (TCPS_HAVERCVDSYN(tp->t_state) && 15712 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 15713 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15714 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15715 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15716 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15717 #ifdef INET6 15718 if (rack->r_is_v6) { 15719 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15720 ip6->ip6_flow |= htonl(ect << 20); 15721 } 15722 else 15723 #endif 15724 { 15725 ip->ip_tos &= ~IPTOS_ECN_MASK; 15726 ip->ip_tos |= ect; 15727 } 15728 } 15729 tcp_set_flags(th, flags); 15730 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15731 #ifdef INET6 15732 if (rack->r_is_v6) { 15733 if (tp->t_port) { 15734 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15735 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15736 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15737 th->th_sum = htons(0); 15738 UDPSTAT_INC(udps_opackets); 15739 } else { 15740 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15741 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15742 th->th_sum = in6_cksum_pseudo(ip6, 15743 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15744 0); 15745 } 15746 } 15747 #endif 15748 #if defined(INET6) && defined(INET) 15749 else 15750 #endif 15751 #ifdef INET 15752 { 15753 if (tp->t_port) { 15754 m->m_pkthdr.csum_flags = CSUM_UDP; 15755 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15756 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15757 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15758 th->th_sum = htons(0); 15759 UDPSTAT_INC(udps_opackets); 15760 } else { 15761 m->m_pkthdr.csum_flags = CSUM_TCP; 15762 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15763 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15764 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15765 IPPROTO_TCP + len + optlen)); 15766 } 15767 /* IP version must be set here for ipv4/ipv6 checking later */ 15768 KASSERT(ip->ip_v == IPVERSION, 15769 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15770 } 15771 #endif 15772 if (tso) { 15773 KASSERT(len > tp->t_maxseg - optlen, 15774 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15775 m->m_pkthdr.csum_flags |= CSUM_TSO; 15776 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15777 } 15778 #ifdef INET6 15779 if (rack->r_is_v6) { 15780 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15781 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15782 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15783 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15784 else 15785 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15786 } 15787 #endif 15788 #if defined(INET) && defined(INET6) 15789 else 15790 #endif 15791 #ifdef INET 15792 { 15793 ip->ip_len = htons(m->m_pkthdr.len); 15794 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15795 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15796 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15797 if (tp->t_port == 0 || len < V_tcp_minmss) { 15798 ip->ip_off |= htons(IP_DF); 15799 } 15800 } else { 15801 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15802 } 15803 } 15804 #endif 15805 /* Time to copy in our header */ 15806 cpto = mtod(m, uint8_t *); 15807 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15808 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15809 if (optlen) { 15810 bcopy(opt, th + 1, optlen); 15811 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15812 } else { 15813 th->th_off = sizeof(struct tcphdr) >> 2; 15814 } 15815 if (tcp_bblogging_on(rack->rc_tp)) { 15816 union tcp_log_stackspecific log; 15817 15818 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 15819 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 15820 counter_u64_add(rack_collapsed_win_rxt, 1); 15821 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 15822 } 15823 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15824 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15825 if (rack->rack_no_prr) 15826 log.u_bbr.flex1 = 0; 15827 else 15828 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15829 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15830 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15831 log.u_bbr.flex4 = max_val; 15832 log.u_bbr.flex5 = 0; 15833 /* Save off the early/late values */ 15834 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15835 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15836 log.u_bbr.bw_inuse = rack_get_bw(rack); 15837 if (doing_tlp == 0) 15838 log.u_bbr.flex8 = 1; 15839 else 15840 log.u_bbr.flex8 = 2; 15841 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15842 log.u_bbr.flex7 = 55; 15843 log.u_bbr.pkts_out = tp->t_maxseg; 15844 log.u_bbr.timeStamp = cts; 15845 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15846 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15847 log.u_bbr.delivered = 0; 15848 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15849 len, &log, false, NULL, NULL, 0, tv); 15850 } else 15851 lgb = NULL; 15852 #ifdef INET6 15853 if (rack->r_is_v6) { 15854 error = ip6_output(m, NULL, 15855 &inp->inp_route6, 15856 0, NULL, NULL, inp); 15857 } 15858 else 15859 #endif 15860 #ifdef INET 15861 { 15862 error = ip_output(m, NULL, 15863 &inp->inp_route, 15864 0, 0, inp); 15865 } 15866 #endif 15867 m = NULL; 15868 if (lgb) { 15869 lgb->tlb_errno = error; 15870 lgb = NULL; 15871 } 15872 if (error) { 15873 goto failed; 15874 } 15875 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15876 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15877 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15878 rack->rc_tlp_in_progress = 1; 15879 rack->r_ctl.rc_tlp_cnt_out++; 15880 } 15881 if (error == 0) { 15882 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15883 if (doing_tlp) { 15884 rack->rc_last_sent_tlp_past_cumack = 0; 15885 rack->rc_last_sent_tlp_seq_valid = 1; 15886 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 15887 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 15888 } 15889 } 15890 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15891 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15892 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15893 rack->r_ctl.retran_during_recovery += len; 15894 { 15895 int idx; 15896 15897 idx = (len / segsiz) + 3; 15898 if (idx >= TCP_MSS_ACCT_ATIMER) 15899 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15900 else 15901 counter_u64_add(rack_out_size[idx], 1); 15902 } 15903 if (tp->t_rtttime == 0) { 15904 tp->t_rtttime = ticks; 15905 tp->t_rtseq = startseq; 15906 KMOD_TCPSTAT_INC(tcps_segstimed); 15907 } 15908 counter_u64_add(rack_fto_rsm_send, 1); 15909 if (error && (error == ENOBUFS)) { 15910 if (rack->r_ctl.crte != NULL) { 15911 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 15912 } else 15913 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 15914 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 15915 if (rack->rc_enobuf < 0x7f) 15916 rack->rc_enobuf++; 15917 if (slot < (10 * HPTS_USEC_IN_MSEC)) 15918 slot = 10 * HPTS_USEC_IN_MSEC; 15919 } else 15920 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 15921 if ((slot == 0) || 15922 (rack->rc_always_pace == 0) || 15923 (rack->r_rr_config == 1)) { 15924 /* 15925 * We have no pacing set or we 15926 * are using old-style rack or 15927 * we are overridden to use the old 1ms pacing. 15928 */ 15929 slot = rack->r_ctl.rc_min_to; 15930 } 15931 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 15932 #ifdef TCP_ACCOUNTING 15933 crtsc = get_cyclecount(); 15934 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15935 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 15936 } 15937 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15938 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 15939 } 15940 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15941 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 15942 } 15943 sched_unpin(); 15944 #endif 15945 return (0); 15946 failed: 15947 if (m) 15948 m_free(m); 15949 return (-1); 15950 } 15951 15952 static void 15953 rack_sndbuf_autoscale(struct tcp_rack *rack) 15954 { 15955 /* 15956 * Automatic sizing of send socket buffer. Often the send buffer 15957 * size is not optimally adjusted to the actual network conditions 15958 * at hand (delay bandwidth product). Setting the buffer size too 15959 * small limits throughput on links with high bandwidth and high 15960 * delay (eg. trans-continental/oceanic links). Setting the 15961 * buffer size too big consumes too much real kernel memory, 15962 * especially with many connections on busy servers. 15963 * 15964 * The criteria to step up the send buffer one notch are: 15965 * 1. receive window of remote host is larger than send buffer 15966 * (with a fudge factor of 5/4th); 15967 * 2. send buffer is filled to 7/8th with data (so we actually 15968 * have data to make use of it); 15969 * 3. send buffer fill has not hit maximal automatic size; 15970 * 4. our send window (slow start and cogestion controlled) is 15971 * larger than sent but unacknowledged data in send buffer. 15972 * 15973 * Note that the rack version moves things much faster since 15974 * we want to avoid hitting cache lines in the rack_fast_output() 15975 * path so this is called much less often and thus moves 15976 * the SB forward by a percentage. 15977 */ 15978 struct socket *so; 15979 struct tcpcb *tp; 15980 uint32_t sendwin, scaleup; 15981 15982 tp = rack->rc_tp; 15983 so = rack->rc_inp->inp_socket; 15984 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 15985 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 15986 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 15987 sbused(&so->so_snd) >= 15988 (so->so_snd.sb_hiwat / 8 * 7) && 15989 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 15990 sendwin >= (sbused(&so->so_snd) - 15991 (tp->snd_nxt - tp->snd_una))) { 15992 if (rack_autosndbuf_inc) 15993 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 15994 else 15995 scaleup = V_tcp_autosndbuf_inc; 15996 if (scaleup < V_tcp_autosndbuf_inc) 15997 scaleup = V_tcp_autosndbuf_inc; 15998 scaleup += so->so_snd.sb_hiwat; 15999 if (scaleup > V_tcp_autosndbuf_max) 16000 scaleup = V_tcp_autosndbuf_max; 16001 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 16002 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16003 } 16004 } 16005 } 16006 16007 static int 16008 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16009 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16010 { 16011 /* 16012 * Enter to do fast output. We are given that the sched_pin is 16013 * in place (if accounting is compiled in) and the cycle count taken 16014 * at entry is in place in ts_val. The idea here is that 16015 * we know how many more bytes needs to be sent (presumably either 16016 * during pacing or to fill the cwnd and that was greater than 16017 * the max-burst). We have how much to send and all the info we 16018 * need to just send. 16019 */ 16020 #ifdef INET 16021 struct ip *ip = NULL; 16022 #endif 16023 struct udphdr *udp = NULL; 16024 struct tcphdr *th = NULL; 16025 struct mbuf *m, *s_mb; 16026 struct inpcb *inp; 16027 uint8_t *cpto; 16028 struct tcp_log_buffer *lgb; 16029 #ifdef TCP_ACCOUNTING 16030 uint64_t crtsc; 16031 #endif 16032 struct tcpopt to; 16033 u_char opt[TCP_MAXOLEN]; 16034 uint32_t hdrlen, optlen; 16035 #ifdef TCP_ACCOUNTING 16036 int cnt_thru = 1; 16037 #endif 16038 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16039 uint16_t flags; 16040 uint32_t s_soff; 16041 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16042 uint32_t if_hw_tsomaxsegsize; 16043 uint16_t add_flag = RACK_SENT_FP; 16044 #ifdef INET6 16045 struct ip6_hdr *ip6 = NULL; 16046 16047 if (rack->r_is_v6) { 16048 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16049 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16050 } else 16051 #endif /* INET6 */ 16052 { 16053 #ifdef INET 16054 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16055 hdrlen = sizeof(struct tcpiphdr); 16056 #endif 16057 } 16058 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16059 m = NULL; 16060 goto failed; 16061 } 16062 startseq = tp->snd_max; 16063 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16064 inp = rack->rc_inp; 16065 len = rack->r_ctl.fsb.left_to_send; 16066 to.to_flags = 0; 16067 flags = rack->r_ctl.fsb.tcp_flags; 16068 if (tp->t_flags & TF_RCVD_TSTMP) { 16069 to.to_tsval = ms_cts + tp->ts_offset; 16070 to.to_tsecr = tp->ts_recent; 16071 to.to_flags = TOF_TS; 16072 } 16073 optlen = tcp_addoptions(&to, opt); 16074 hdrlen += optlen; 16075 udp = rack->r_ctl.fsb.udp; 16076 if (udp) 16077 hdrlen += sizeof(struct udphdr); 16078 if (rack->r_ctl.rc_pace_max_segs) 16079 max_val = rack->r_ctl.rc_pace_max_segs; 16080 else if (rack->rc_user_set_max_segs) 16081 max_val = rack->rc_user_set_max_segs * segsiz; 16082 else 16083 max_val = len; 16084 if ((tp->t_flags & TF_TSO) && 16085 V_tcp_do_tso && 16086 (len > segsiz) && 16087 (tp->t_port == 0)) 16088 tso = 1; 16089 again: 16090 #ifdef INET6 16091 if (MHLEN < hdrlen + max_linkhdr) 16092 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16093 else 16094 #endif 16095 m = m_gethdr(M_NOWAIT, MT_DATA); 16096 if (m == NULL) 16097 goto failed; 16098 m->m_data += max_linkhdr; 16099 m->m_len = hdrlen; 16100 th = rack->r_ctl.fsb.th; 16101 /* Establish the len to send */ 16102 if (len > max_val) 16103 len = max_val; 16104 if ((tso) && (len + optlen > tp->t_maxseg)) { 16105 uint32_t if_hw_tsomax; 16106 int32_t max_len; 16107 16108 /* extract TSO information */ 16109 if_hw_tsomax = tp->t_tsomax; 16110 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16111 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16112 /* 16113 * Check if we should limit by maximum payload 16114 * length: 16115 */ 16116 if (if_hw_tsomax != 0) { 16117 /* compute maximum TSO length */ 16118 max_len = (if_hw_tsomax - hdrlen - 16119 max_linkhdr); 16120 if (max_len <= 0) { 16121 goto failed; 16122 } else if (len > max_len) { 16123 len = max_len; 16124 } 16125 } 16126 if (len <= segsiz) { 16127 /* 16128 * In case there are too many small fragments don't 16129 * use TSO: 16130 */ 16131 tso = 0; 16132 } 16133 } else { 16134 tso = 0; 16135 } 16136 if ((tso == 0) && (len > segsiz)) 16137 len = segsiz; 16138 if ((len == 0) || 16139 (len <= MHLEN - hdrlen - max_linkhdr)) { 16140 goto failed; 16141 } 16142 sb_offset = tp->snd_max - tp->snd_una; 16143 th->th_seq = htonl(tp->snd_max); 16144 th->th_ack = htonl(tp->rcv_nxt); 16145 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16146 if (th->th_win == 0) { 16147 tp->t_sndzerowin++; 16148 tp->t_flags |= TF_RXWIN0SENT; 16149 } else 16150 tp->t_flags &= ~TF_RXWIN0SENT; 16151 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16152 KMOD_TCPSTAT_INC(tcps_sndpack); 16153 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16154 #ifdef STATS 16155 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16156 len); 16157 #endif 16158 if (rack->r_ctl.fsb.m == NULL) 16159 goto failed; 16160 16161 /* s_mb and s_soff are saved for rack_log_output */ 16162 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16163 &s_mb, &s_soff); 16164 if (len <= segsiz) { 16165 /* 16166 * Must have ran out of mbufs for the copy 16167 * shorten it to no longer need tso. Lets 16168 * not put on sendalot since we are low on 16169 * mbufs. 16170 */ 16171 tso = 0; 16172 } 16173 if (rack->r_ctl.fsb.rfo_apply_push && 16174 (len == rack->r_ctl.fsb.left_to_send)) { 16175 flags |= TH_PUSH; 16176 add_flag |= RACK_HAD_PUSH; 16177 } 16178 if ((m->m_next == NULL) || (len <= 0)){ 16179 goto failed; 16180 } 16181 if (udp) { 16182 if (rack->r_is_v6) 16183 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16184 else 16185 ulen = hdrlen + len - sizeof(struct ip); 16186 udp->uh_ulen = htons(ulen); 16187 } 16188 m->m_pkthdr.rcvif = (struct ifnet *)0; 16189 if (TCPS_HAVERCVDSYN(tp->t_state) && 16190 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 16191 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16192 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16193 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16194 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16195 #ifdef INET6 16196 if (rack->r_is_v6) { 16197 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16198 ip6->ip6_flow |= htonl(ect << 20); 16199 } 16200 else 16201 #endif 16202 { 16203 #ifdef INET 16204 ip->ip_tos &= ~IPTOS_ECN_MASK; 16205 ip->ip_tos |= ect; 16206 #endif 16207 } 16208 } 16209 tcp_set_flags(th, flags); 16210 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16211 #ifdef INET6 16212 if (rack->r_is_v6) { 16213 if (tp->t_port) { 16214 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16215 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16216 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16217 th->th_sum = htons(0); 16218 UDPSTAT_INC(udps_opackets); 16219 } else { 16220 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16221 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16222 th->th_sum = in6_cksum_pseudo(ip6, 16223 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16224 0); 16225 } 16226 } 16227 #endif 16228 #if defined(INET6) && defined(INET) 16229 else 16230 #endif 16231 #ifdef INET 16232 { 16233 if (tp->t_port) { 16234 m->m_pkthdr.csum_flags = CSUM_UDP; 16235 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16236 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16237 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16238 th->th_sum = htons(0); 16239 UDPSTAT_INC(udps_opackets); 16240 } else { 16241 m->m_pkthdr.csum_flags = CSUM_TCP; 16242 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16243 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16244 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16245 IPPROTO_TCP + len + optlen)); 16246 } 16247 /* IP version must be set here for ipv4/ipv6 checking later */ 16248 KASSERT(ip->ip_v == IPVERSION, 16249 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16250 } 16251 #endif 16252 if (tso) { 16253 KASSERT(len > tp->t_maxseg - optlen, 16254 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16255 m->m_pkthdr.csum_flags |= CSUM_TSO; 16256 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16257 } 16258 #ifdef INET6 16259 if (rack->r_is_v6) { 16260 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16261 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16262 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16263 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16264 else 16265 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16266 } 16267 #endif 16268 #if defined(INET) && defined(INET6) 16269 else 16270 #endif 16271 #ifdef INET 16272 { 16273 ip->ip_len = htons(m->m_pkthdr.len); 16274 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16275 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16276 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16277 if (tp->t_port == 0 || len < V_tcp_minmss) { 16278 ip->ip_off |= htons(IP_DF); 16279 } 16280 } else { 16281 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16282 } 16283 } 16284 #endif 16285 /* Time to copy in our header */ 16286 cpto = mtod(m, uint8_t *); 16287 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16288 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16289 if (optlen) { 16290 bcopy(opt, th + 1, optlen); 16291 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16292 } else { 16293 th->th_off = sizeof(struct tcphdr) >> 2; 16294 } 16295 if (tcp_bblogging_on(rack->rc_tp)) { 16296 union tcp_log_stackspecific log; 16297 16298 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16299 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16300 if (rack->rack_no_prr) 16301 log.u_bbr.flex1 = 0; 16302 else 16303 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16304 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16305 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16306 log.u_bbr.flex4 = max_val; 16307 log.u_bbr.flex5 = 0; 16308 /* Save off the early/late values */ 16309 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16310 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16311 log.u_bbr.bw_inuse = rack_get_bw(rack); 16312 log.u_bbr.flex8 = 0; 16313 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16314 log.u_bbr.flex7 = 44; 16315 log.u_bbr.pkts_out = tp->t_maxseg; 16316 log.u_bbr.timeStamp = cts; 16317 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16318 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16319 log.u_bbr.delivered = 0; 16320 lgb = tcp_log_event(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16321 len, &log, false, NULL, NULL, 0, tv); 16322 } else 16323 lgb = NULL; 16324 #ifdef INET6 16325 if (rack->r_is_v6) { 16326 error = ip6_output(m, NULL, 16327 &inp->inp_route6, 16328 0, NULL, NULL, inp); 16329 } 16330 #endif 16331 #if defined(INET) && defined(INET6) 16332 else 16333 #endif 16334 #ifdef INET 16335 { 16336 error = ip_output(m, NULL, 16337 &inp->inp_route, 16338 0, 0, inp); 16339 } 16340 #endif 16341 if (lgb) { 16342 lgb->tlb_errno = error; 16343 lgb = NULL; 16344 } 16345 if (error) { 16346 *send_err = error; 16347 m = NULL; 16348 goto failed; 16349 } 16350 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16351 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16352 m = NULL; 16353 if (tp->snd_una == tp->snd_max) { 16354 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16355 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16356 tp->t_acktime = ticks; 16357 } 16358 if (error == 0) 16359 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16360 16361 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16362 tot_len += len; 16363 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16364 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16365 tp->snd_max += len; 16366 tp->snd_nxt = tp->snd_max; 16367 { 16368 int idx; 16369 16370 idx = (len / segsiz) + 3; 16371 if (idx >= TCP_MSS_ACCT_ATIMER) 16372 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16373 else 16374 counter_u64_add(rack_out_size[idx], 1); 16375 } 16376 if (len <= rack->r_ctl.fsb.left_to_send) 16377 rack->r_ctl.fsb.left_to_send -= len; 16378 else 16379 rack->r_ctl.fsb.left_to_send = 0; 16380 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16381 rack->r_fast_output = 0; 16382 rack->r_ctl.fsb.left_to_send = 0; 16383 /* At the end of fast_output scale up the sb */ 16384 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16385 rack_sndbuf_autoscale(rack); 16386 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16387 } 16388 if (tp->t_rtttime == 0) { 16389 tp->t_rtttime = ticks; 16390 tp->t_rtseq = startseq; 16391 KMOD_TCPSTAT_INC(tcps_segstimed); 16392 } 16393 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16394 (max_val > len) && 16395 (tso == 0)) { 16396 max_val -= len; 16397 len = segsiz; 16398 th = rack->r_ctl.fsb.th; 16399 #ifdef TCP_ACCOUNTING 16400 cnt_thru++; 16401 #endif 16402 goto again; 16403 } 16404 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16405 counter_u64_add(rack_fto_send, 1); 16406 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16407 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16408 #ifdef TCP_ACCOUNTING 16409 crtsc = get_cyclecount(); 16410 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16411 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16412 } 16413 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16414 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16415 } 16416 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16417 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16418 } 16419 sched_unpin(); 16420 #endif 16421 return (0); 16422 failed: 16423 if (m) 16424 m_free(m); 16425 rack->r_fast_output = 0; 16426 return (-1); 16427 } 16428 16429 static struct rack_sendmap * 16430 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 16431 { 16432 struct rack_sendmap *rsm = NULL; 16433 struct rack_sendmap fe; 16434 int thresh; 16435 16436 restart: 16437 fe.r_start = rack->r_ctl.last_collapse_point; 16438 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 16439 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 16440 /* Nothing, strange turn off validity */ 16441 rack->r_collapse_point_valid = 0; 16442 return (NULL); 16443 } 16444 /* Can we send it yet? */ 16445 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 16446 /* 16447 * Receiver window has not grown enough for 16448 * the segment to be put on the wire. 16449 */ 16450 return (NULL); 16451 } 16452 if (rsm->r_flags & RACK_ACKED) { 16453 /* 16454 * It has been sacked, lets move to the 16455 * next one if possible. 16456 */ 16457 rack->r_ctl.last_collapse_point = rsm->r_end; 16458 /* Are we done? */ 16459 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16460 rack->r_ctl.high_collapse_point)) { 16461 rack->r_collapse_point_valid = 0; 16462 return (NULL); 16463 } 16464 goto restart; 16465 } 16466 /* Now has it been long enough ? */ 16467 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 16468 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 16469 rack_log_collapse(rack, rsm->r_start, 16470 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16471 thresh, __LINE__, 6, rsm->r_flags, rsm); 16472 return (rsm); 16473 } 16474 /* Not enough time */ 16475 rack_log_collapse(rack, rsm->r_start, 16476 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16477 thresh, __LINE__, 7, rsm->r_flags, rsm); 16478 return (NULL); 16479 } 16480 16481 static int 16482 rack_output(struct tcpcb *tp) 16483 { 16484 struct socket *so; 16485 uint32_t recwin; 16486 uint32_t sb_offset, s_moff = 0; 16487 int32_t len, error = 0; 16488 uint16_t flags; 16489 struct mbuf *m, *s_mb = NULL; 16490 struct mbuf *mb; 16491 uint32_t if_hw_tsomaxsegcount = 0; 16492 uint32_t if_hw_tsomaxsegsize; 16493 int32_t segsiz, minseg; 16494 long tot_len_this_send = 0; 16495 #ifdef INET 16496 struct ip *ip = NULL; 16497 #endif 16498 struct udphdr *udp = NULL; 16499 struct tcp_rack *rack; 16500 struct tcphdr *th; 16501 uint8_t pass = 0; 16502 uint8_t mark = 0; 16503 uint8_t wanted_cookie = 0; 16504 u_char opt[TCP_MAXOLEN]; 16505 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16506 uint32_t rack_seq; 16507 16508 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16509 unsigned ipsec_optlen = 0; 16510 16511 #endif 16512 int32_t idle, sendalot; 16513 int32_t sub_from_prr = 0; 16514 volatile int32_t sack_rxmit; 16515 struct rack_sendmap *rsm = NULL; 16516 int32_t tso, mtu; 16517 struct tcpopt to; 16518 int32_t slot = 0; 16519 int32_t sup_rack = 0; 16520 uint32_t cts, ms_cts, delayed, early; 16521 uint16_t add_flag = RACK_SENT_SP; 16522 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16523 uint8_t hpts_calling, doing_tlp = 0; 16524 uint32_t cwnd_to_use, pace_max_seg; 16525 int32_t do_a_prefetch = 0; 16526 int32_t prefetch_rsm = 0; 16527 int32_t orig_len = 0; 16528 struct timeval tv; 16529 int32_t prefetch_so_done = 0; 16530 struct tcp_log_buffer *lgb; 16531 struct inpcb *inp = tptoinpcb(tp); 16532 struct sockbuf *sb; 16533 uint64_t ts_val = 0; 16534 #ifdef TCP_ACCOUNTING 16535 uint64_t crtsc; 16536 #endif 16537 #ifdef INET6 16538 struct ip6_hdr *ip6 = NULL; 16539 int32_t isipv6; 16540 #endif 16541 bool hw_tls = false; 16542 16543 NET_EPOCH_ASSERT(); 16544 INP_WLOCK_ASSERT(inp); 16545 16546 /* setup and take the cache hits here */ 16547 rack = (struct tcp_rack *)tp->t_fb_ptr; 16548 #ifdef TCP_ACCOUNTING 16549 sched_pin(); 16550 ts_val = get_cyclecount(); 16551 #endif 16552 hpts_calling = inp->inp_hpts_calls; 16553 #ifdef TCP_OFFLOAD 16554 if (tp->t_flags & TF_TOE) { 16555 #ifdef TCP_ACCOUNTING 16556 sched_unpin(); 16557 #endif 16558 return (tcp_offload_output(tp)); 16559 } 16560 #endif 16561 /* 16562 * For TFO connections in SYN_RECEIVED, only allow the initial 16563 * SYN|ACK and those sent by the retransmit timer. 16564 */ 16565 if (IS_FASTOPEN(tp->t_flags) && 16566 (tp->t_state == TCPS_SYN_RECEIVED) && 16567 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16568 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16569 #ifdef TCP_ACCOUNTING 16570 sched_unpin(); 16571 #endif 16572 return (0); 16573 } 16574 #ifdef INET6 16575 if (rack->r_state) { 16576 /* Use the cache line loaded if possible */ 16577 isipv6 = rack->r_is_v6; 16578 } else { 16579 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16580 } 16581 #endif 16582 early = 0; 16583 cts = tcp_get_usecs(&tv); 16584 ms_cts = tcp_tv_to_mssectick(&tv); 16585 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16586 tcp_in_hpts(rack->rc_inp)) { 16587 /* 16588 * We are on the hpts for some timer but not hptsi output. 16589 * Remove from the hpts unconditionally. 16590 */ 16591 rack_timer_cancel(tp, rack, cts, __LINE__); 16592 } 16593 /* Are we pacing and late? */ 16594 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16595 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16596 /* We are delayed */ 16597 delayed = cts - rack->r_ctl.rc_last_output_to; 16598 } else { 16599 delayed = 0; 16600 } 16601 /* Do the timers, which may override the pacer */ 16602 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16603 int retval; 16604 16605 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16606 &doing_tlp); 16607 if (retval != 0) { 16608 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16609 #ifdef TCP_ACCOUNTING 16610 sched_unpin(); 16611 #endif 16612 /* 16613 * If timers want tcp_drop(), then pass error out, 16614 * otherwise suppress it. 16615 */ 16616 return (retval < 0 ? retval : 0); 16617 } 16618 } 16619 if (rack->rc_in_persist) { 16620 if (tcp_in_hpts(rack->rc_inp) == 0) { 16621 /* Timer is not running */ 16622 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16623 } 16624 #ifdef TCP_ACCOUNTING 16625 sched_unpin(); 16626 #endif 16627 return (0); 16628 } 16629 if ((rack->rc_ack_required == 1) && 16630 (rack->r_timer_override == 0)){ 16631 /* A timeout occurred and no ack has arrived */ 16632 if (tcp_in_hpts(rack->rc_inp) == 0) { 16633 /* Timer is not running */ 16634 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16635 } 16636 #ifdef TCP_ACCOUNTING 16637 sched_unpin(); 16638 #endif 16639 return (0); 16640 } 16641 if ((rack->r_timer_override) || 16642 (rack->rc_ack_can_sendout_data) || 16643 (delayed) || 16644 (tp->t_state < TCPS_ESTABLISHED)) { 16645 rack->rc_ack_can_sendout_data = 0; 16646 if (tcp_in_hpts(rack->rc_inp)) 16647 tcp_hpts_remove(rack->rc_inp); 16648 } else if (tcp_in_hpts(rack->rc_inp)) { 16649 /* 16650 * On the hpts you can't pass even if ACKNOW is on, we will 16651 * when the hpts fires. 16652 */ 16653 #ifdef TCP_ACCOUNTING 16654 crtsc = get_cyclecount(); 16655 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16656 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16657 } 16658 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16659 tp->tcp_cnt_counters[SND_BLOCKED]++; 16660 } 16661 sched_unpin(); 16662 #endif 16663 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16664 return (0); 16665 } 16666 rack->rc_inp->inp_hpts_calls = 0; 16667 /* Finish out both pacing early and late accounting */ 16668 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16669 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16670 early = rack->r_ctl.rc_last_output_to - cts; 16671 } else 16672 early = 0; 16673 if (delayed) { 16674 rack->r_ctl.rc_agg_delayed += delayed; 16675 rack->r_late = 1; 16676 } else if (early) { 16677 rack->r_ctl.rc_agg_early += early; 16678 rack->r_early = 1; 16679 } 16680 /* Now that early/late accounting is done turn off the flag */ 16681 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16682 rack->r_wanted_output = 0; 16683 rack->r_timer_override = 0; 16684 if ((tp->t_state != rack->r_state) && 16685 TCPS_HAVEESTABLISHED(tp->t_state)) { 16686 rack_set_state(tp, rack); 16687 } 16688 if ((rack->r_fast_output) && 16689 (doing_tlp == 0) && 16690 (tp->rcv_numsacks == 0)) { 16691 int ret; 16692 16693 error = 0; 16694 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16695 if (ret >= 0) 16696 return(ret); 16697 else if (error) { 16698 inp = rack->rc_inp; 16699 so = inp->inp_socket; 16700 sb = &so->so_snd; 16701 goto nomore; 16702 } 16703 } 16704 inp = rack->rc_inp; 16705 /* 16706 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16707 * only allow the initial SYN or SYN|ACK and those sent 16708 * by the retransmit timer. 16709 */ 16710 if (IS_FASTOPEN(tp->t_flags) && 16711 ((tp->t_state == TCPS_SYN_RECEIVED) || 16712 (tp->t_state == TCPS_SYN_SENT)) && 16713 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16714 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16715 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16716 so = inp->inp_socket; 16717 sb = &so->so_snd; 16718 goto just_return_nolock; 16719 } 16720 /* 16721 * Determine length of data that should be transmitted, and flags 16722 * that will be used. If there is some data or critical controls 16723 * (SYN, RST) to send, then transmit; otherwise, investigate 16724 * further. 16725 */ 16726 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16727 if (tp->t_idle_reduce) { 16728 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16729 rack_cc_after_idle(rack, tp); 16730 } 16731 tp->t_flags &= ~TF_LASTIDLE; 16732 if (idle) { 16733 if (tp->t_flags & TF_MORETOCOME) { 16734 tp->t_flags |= TF_LASTIDLE; 16735 idle = 0; 16736 } 16737 } 16738 if ((tp->snd_una == tp->snd_max) && 16739 rack->r_ctl.rc_went_idle_time && 16740 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16741 idle = cts - rack->r_ctl.rc_went_idle_time; 16742 if (idle > rack_min_probertt_hold) { 16743 /* Count as a probe rtt */ 16744 if (rack->in_probe_rtt == 0) { 16745 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16746 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16747 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16748 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16749 } else { 16750 rack_exit_probertt(rack, cts); 16751 } 16752 } 16753 idle = 0; 16754 } 16755 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16756 rack_init_fsb_block(tp, rack); 16757 again: 16758 /* 16759 * If we've recently taken a timeout, snd_max will be greater than 16760 * snd_nxt. There may be SACK information that allows us to avoid 16761 * resending already delivered data. Adjust snd_nxt accordingly. 16762 */ 16763 sendalot = 0; 16764 cts = tcp_get_usecs(&tv); 16765 ms_cts = tcp_tv_to_mssectick(&tv); 16766 tso = 0; 16767 mtu = 0; 16768 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16769 minseg = segsiz; 16770 if (rack->r_ctl.rc_pace_max_segs == 0) 16771 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16772 else 16773 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16774 sb_offset = tp->snd_max - tp->snd_una; 16775 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16776 flags = tcp_outflags[tp->t_state]; 16777 while (rack->rc_free_cnt < rack_free_cache) { 16778 rsm = rack_alloc(rack); 16779 if (rsm == NULL) { 16780 if (inp->inp_hpts_calls) 16781 /* Retry in a ms */ 16782 slot = (1 * HPTS_USEC_IN_MSEC); 16783 so = inp->inp_socket; 16784 sb = &so->so_snd; 16785 goto just_return_nolock; 16786 } 16787 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16788 rack->rc_free_cnt++; 16789 rsm = NULL; 16790 } 16791 if (inp->inp_hpts_calls) 16792 inp->inp_hpts_calls = 0; 16793 sack_rxmit = 0; 16794 len = 0; 16795 rsm = NULL; 16796 if (flags & TH_RST) { 16797 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16798 so = inp->inp_socket; 16799 sb = &so->so_snd; 16800 goto send; 16801 } 16802 if (rack->r_ctl.rc_resend) { 16803 /* Retransmit timer */ 16804 rsm = rack->r_ctl.rc_resend; 16805 rack->r_ctl.rc_resend = NULL; 16806 len = rsm->r_end - rsm->r_start; 16807 sack_rxmit = 1; 16808 sendalot = 0; 16809 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16810 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16811 __func__, __LINE__, 16812 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16813 sb_offset = rsm->r_start - tp->snd_una; 16814 if (len >= segsiz) 16815 len = segsiz; 16816 } else if (rack->r_collapse_point_valid && 16817 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 16818 /* 16819 * If an RSM is returned then enough time has passed 16820 * for us to retransmit it. Move up the collapse point, 16821 * since this rsm has its chance to retransmit now. 16822 */ 16823 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); 16824 rack->r_ctl.last_collapse_point = rsm->r_end; 16825 /* Are we done? */ 16826 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16827 rack->r_ctl.high_collapse_point)) 16828 rack->r_collapse_point_valid = 0; 16829 sack_rxmit = 1; 16830 /* We are not doing a TLP */ 16831 doing_tlp = 0; 16832 len = rsm->r_end - rsm->r_start; 16833 sb_offset = rsm->r_start - tp->snd_una; 16834 sendalot = 0; 16835 if ((rack->full_size_rxt == 0) && 16836 (rack->shape_rxt_to_pacing_min == 0) && 16837 (len >= segsiz)) 16838 len = segsiz; 16839 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16840 /* We have a retransmit that takes precedence */ 16841 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16842 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 16843 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16844 /* Enter recovery if not induced by a time-out */ 16845 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 16846 } 16847 #ifdef INVARIANTS 16848 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16849 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16850 tp, rack, rsm, rsm->r_start, tp->snd_una); 16851 } 16852 #endif 16853 len = rsm->r_end - rsm->r_start; 16854 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16855 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16856 __func__, __LINE__, 16857 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16858 sb_offset = rsm->r_start - tp->snd_una; 16859 sendalot = 0; 16860 if (len >= segsiz) 16861 len = segsiz; 16862 if (len > 0) { 16863 sack_rxmit = 1; 16864 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16865 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16866 min(len, segsiz)); 16867 } 16868 } else if (rack->r_ctl.rc_tlpsend) { 16869 /* Tail loss probe */ 16870 long cwin; 16871 long tlen; 16872 16873 /* 16874 * Check if we can do a TLP with a RACK'd packet 16875 * this can happen if we are not doing the rack 16876 * cheat and we skipped to a TLP and it 16877 * went off. 16878 */ 16879 rsm = rack->r_ctl.rc_tlpsend; 16880 /* We are doing a TLP make sure the flag is preent */ 16881 rsm->r_flags |= RACK_TLP; 16882 rack->r_ctl.rc_tlpsend = NULL; 16883 sack_rxmit = 1; 16884 tlen = rsm->r_end - rsm->r_start; 16885 if (tlen > segsiz) 16886 tlen = segsiz; 16887 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16888 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16889 __func__, __LINE__, 16890 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16891 sb_offset = rsm->r_start - tp->snd_una; 16892 cwin = min(tp->snd_wnd, tlen); 16893 len = cwin; 16894 } 16895 if (rack->r_must_retran && 16896 (doing_tlp == 0) && 16897 (SEQ_GT(tp->snd_max, tp->snd_una)) && 16898 (rsm == NULL)) { 16899 /* 16900 * There are two different ways that we 16901 * can get into this block: 16902 * a) This is a non-sack connection, we had a time-out 16903 * and thus r_must_retran was set and everything 16904 * left outstanding as been marked for retransmit. 16905 * b) The MTU of the path shrank, so that everything 16906 * was marked to be retransmitted with the smaller 16907 * mtu and r_must_retran was set. 16908 * 16909 * This means that we expect the sendmap (outstanding) 16910 * to all be marked must. We can use the tmap to 16911 * look at them. 16912 * 16913 */ 16914 int sendwin, flight; 16915 16916 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 16917 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 16918 if (flight >= sendwin) { 16919 /* 16920 * We can't send yet. 16921 */ 16922 so = inp->inp_socket; 16923 sb = &so->so_snd; 16924 goto just_return_nolock; 16925 } 16926 /* 16927 * This is the case a/b mentioned above. All 16928 * outstanding/not-acked should be marked. 16929 * We can use the tmap to find them. 16930 */ 16931 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16932 if (rsm == NULL) { 16933 /* TSNH */ 16934 rack->r_must_retran = 0; 16935 rack->r_ctl.rc_out_at_rto = 0; 16936 so = inp->inp_socket; 16937 sb = &so->so_snd; 16938 goto just_return_nolock; 16939 } 16940 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 16941 /* 16942 * The first one does not have the flag, did we collapse 16943 * further up in our list? 16944 */ 16945 rack->r_must_retran = 0; 16946 rack->r_ctl.rc_out_at_rto = 0; 16947 rsm = NULL; 16948 sack_rxmit = 0; 16949 } else { 16950 sack_rxmit = 1; 16951 len = rsm->r_end - rsm->r_start; 16952 sb_offset = rsm->r_start - tp->snd_una; 16953 sendalot = 0; 16954 if ((rack->full_size_rxt == 0) && 16955 (rack->shape_rxt_to_pacing_min == 0) && 16956 (len >= segsiz)) 16957 len = segsiz; 16958 /* 16959 * Delay removing the flag RACK_MUST_RXT so 16960 * that the fastpath for retransmit will 16961 * work with this rsm. 16962 */ 16963 } 16964 } 16965 /* 16966 * Enforce a connection sendmap count limit if set 16967 * as long as we are not retransmiting. 16968 */ 16969 if ((rsm == NULL) && 16970 (rack->do_detection == 0) && 16971 (V_tcp_map_entries_limit > 0) && 16972 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 16973 counter_u64_add(rack_to_alloc_limited, 1); 16974 if (!rack->alloc_limit_reported) { 16975 rack->alloc_limit_reported = 1; 16976 counter_u64_add(rack_alloc_limited_conns, 1); 16977 } 16978 so = inp->inp_socket; 16979 sb = &so->so_snd; 16980 goto just_return_nolock; 16981 } 16982 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 16983 /* we are retransmitting the fin */ 16984 len--; 16985 if (len) { 16986 /* 16987 * When retransmitting data do *not* include the 16988 * FIN. This could happen from a TLP probe. 16989 */ 16990 flags &= ~TH_FIN; 16991 } 16992 } 16993 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 16994 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 16995 int ret; 16996 16997 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 16998 if (ret == 0) 16999 return (0); 17000 } 17001 so = inp->inp_socket; 17002 sb = &so->so_snd; 17003 if (do_a_prefetch == 0) { 17004 kern_prefetch(sb, &do_a_prefetch); 17005 do_a_prefetch = 1; 17006 } 17007 #ifdef NETFLIX_SHARED_CWND 17008 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17009 rack->rack_enable_scwnd) { 17010 /* We are doing cwnd sharing */ 17011 if (rack->gp_ready && 17012 (rack->rack_attempted_scwnd == 0) && 17013 (rack->r_ctl.rc_scw == NULL) && 17014 tp->t_lib) { 17015 /* The pcbid is in, lets make an attempt */ 17016 counter_u64_add(rack_try_scwnd, 1); 17017 rack->rack_attempted_scwnd = 1; 17018 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17019 &rack->r_ctl.rc_scw_index, 17020 segsiz); 17021 } 17022 if (rack->r_ctl.rc_scw && 17023 (rack->rack_scwnd_is_idle == 1) && 17024 sbavail(&so->so_snd)) { 17025 /* we are no longer out of data */ 17026 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17027 rack->rack_scwnd_is_idle = 0; 17028 } 17029 if (rack->r_ctl.rc_scw) { 17030 /* First lets update and get the cwnd */ 17031 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17032 rack->r_ctl.rc_scw_index, 17033 tp->snd_cwnd, tp->snd_wnd, segsiz); 17034 } 17035 } 17036 #endif 17037 /* 17038 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17039 * state flags. 17040 */ 17041 if (tp->t_flags & TF_NEEDFIN) 17042 flags |= TH_FIN; 17043 if (tp->t_flags & TF_NEEDSYN) 17044 flags |= TH_SYN; 17045 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17046 void *end_rsm; 17047 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17048 if (end_rsm) 17049 kern_prefetch(end_rsm, &prefetch_rsm); 17050 prefetch_rsm = 1; 17051 } 17052 SOCKBUF_LOCK(sb); 17053 /* 17054 * If snd_nxt == snd_max and we have transmitted a FIN, the 17055 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17056 * negative length. This can also occur when TCP opens up its 17057 * congestion window while receiving additional duplicate acks after 17058 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17059 * the fast-retransmit. 17060 * 17061 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17062 * set to snd_una, the sb_offset will be 0, and the length may wind 17063 * up 0. 17064 * 17065 * If sack_rxmit is true we are retransmitting from the scoreboard 17066 * in which case len is already set. 17067 */ 17068 if ((sack_rxmit == 0) && 17069 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17070 uint32_t avail; 17071 17072 avail = sbavail(sb); 17073 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17074 sb_offset = tp->snd_nxt - tp->snd_una; 17075 else 17076 sb_offset = 0; 17077 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17078 if (rack->r_ctl.rc_tlp_new_data) { 17079 /* TLP is forcing out new data */ 17080 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17081 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17082 } 17083 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17084 if (tp->snd_wnd > sb_offset) 17085 len = tp->snd_wnd - sb_offset; 17086 else 17087 len = 0; 17088 } else { 17089 len = rack->r_ctl.rc_tlp_new_data; 17090 } 17091 rack->r_ctl.rc_tlp_new_data = 0; 17092 } else { 17093 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17094 } 17095 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17096 /* 17097 * For prr=off, we need to send only 1 MSS 17098 * at a time. We do this because another sack could 17099 * be arriving that causes us to send retransmits and 17100 * we don't want to be on a long pace due to a larger send 17101 * that keeps us from sending out the retransmit. 17102 */ 17103 len = segsiz; 17104 } 17105 } else { 17106 uint32_t outstanding; 17107 /* 17108 * We are inside of a Fast recovery episode, this 17109 * is caused by a SACK or 3 dup acks. At this point 17110 * we have sent all the retransmissions and we rely 17111 * on PRR to dictate what we will send in the form of 17112 * new data. 17113 */ 17114 17115 outstanding = tp->snd_max - tp->snd_una; 17116 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17117 if (tp->snd_wnd > outstanding) { 17118 len = tp->snd_wnd - outstanding; 17119 /* Check to see if we have the data */ 17120 if ((sb_offset + len) > avail) { 17121 /* It does not all fit */ 17122 if (avail > sb_offset) 17123 len = avail - sb_offset; 17124 else 17125 len = 0; 17126 } 17127 } else { 17128 len = 0; 17129 } 17130 } else if (avail > sb_offset) { 17131 len = avail - sb_offset; 17132 } else { 17133 len = 0; 17134 } 17135 if (len > 0) { 17136 if (len > rack->r_ctl.rc_prr_sndcnt) { 17137 len = rack->r_ctl.rc_prr_sndcnt; 17138 } 17139 if (len > 0) { 17140 sub_from_prr = 1; 17141 } 17142 } 17143 if (len > segsiz) { 17144 /* 17145 * We should never send more than a MSS when 17146 * retransmitting or sending new data in prr 17147 * mode unless the override flag is on. Most 17148 * likely the PRR algorithm is not going to 17149 * let us send a lot as well :-) 17150 */ 17151 if (rack->r_ctl.rc_prr_sendalot == 0) { 17152 len = segsiz; 17153 } 17154 } else if (len < segsiz) { 17155 /* 17156 * Do we send any? The idea here is if the 17157 * send empty's the socket buffer we want to 17158 * do it. However if not then lets just wait 17159 * for our prr_sndcnt to get bigger. 17160 */ 17161 long leftinsb; 17162 17163 leftinsb = sbavail(sb) - sb_offset; 17164 if (leftinsb > len) { 17165 /* This send does not empty the sb */ 17166 len = 0; 17167 } 17168 } 17169 } 17170 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17171 /* 17172 * If you have not established 17173 * and are not doing FAST OPEN 17174 * no data please. 17175 */ 17176 if ((sack_rxmit == 0) && 17177 (!IS_FASTOPEN(tp->t_flags))){ 17178 len = 0; 17179 sb_offset = 0; 17180 } 17181 } 17182 if (prefetch_so_done == 0) { 17183 kern_prefetch(so, &prefetch_so_done); 17184 prefetch_so_done = 1; 17185 } 17186 /* 17187 * Lop off SYN bit if it has already been sent. However, if this is 17188 * SYN-SENT state and if segment contains data and if we don't know 17189 * that foreign host supports TAO, suppress sending segment. 17190 */ 17191 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17192 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17193 /* 17194 * When sending additional segments following a TFO SYN|ACK, 17195 * do not include the SYN bit. 17196 */ 17197 if (IS_FASTOPEN(tp->t_flags) && 17198 (tp->t_state == TCPS_SYN_RECEIVED)) 17199 flags &= ~TH_SYN; 17200 } 17201 /* 17202 * Be careful not to send data and/or FIN on SYN segments. This 17203 * measure is needed to prevent interoperability problems with not 17204 * fully conformant TCP implementations. 17205 */ 17206 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17207 len = 0; 17208 flags &= ~TH_FIN; 17209 } 17210 /* 17211 * On TFO sockets, ensure no data is sent in the following cases: 17212 * 17213 * - When retransmitting SYN|ACK on a passively-created socket 17214 * 17215 * - When retransmitting SYN on an actively created socket 17216 * 17217 * - When sending a zero-length cookie (cookie request) on an 17218 * actively created socket 17219 * 17220 * - When the socket is in the CLOSED state (RST is being sent) 17221 */ 17222 if (IS_FASTOPEN(tp->t_flags) && 17223 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17224 ((tp->t_state == TCPS_SYN_SENT) && 17225 (tp->t_tfo_client_cookie_len == 0)) || 17226 (flags & TH_RST))) { 17227 sack_rxmit = 0; 17228 len = 0; 17229 } 17230 /* Without fast-open there should never be data sent on a SYN */ 17231 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17232 tp->snd_nxt = tp->iss; 17233 len = 0; 17234 } 17235 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17236 /* We only send 1 MSS if we have a DSACK block */ 17237 add_flag |= RACK_SENT_W_DSACK; 17238 len = segsiz; 17239 } 17240 orig_len = len; 17241 if (len <= 0) { 17242 /* 17243 * If FIN has been sent but not acked, but we haven't been 17244 * called to retransmit, len will be < 0. Otherwise, window 17245 * shrank after we sent into it. If window shrank to 0, 17246 * cancel pending retransmit, pull snd_nxt back to (closed) 17247 * window, and set the persist timer if it isn't already 17248 * going. If the window didn't close completely, just wait 17249 * for an ACK. 17250 * 17251 * We also do a general check here to ensure that we will 17252 * set the persist timer when we have data to send, but a 17253 * 0-byte window. This makes sure the persist timer is set 17254 * even if the packet hits one of the "goto send" lines 17255 * below. 17256 */ 17257 len = 0; 17258 if ((tp->snd_wnd == 0) && 17259 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17260 (tp->snd_una == tp->snd_max) && 17261 (sb_offset < (int)sbavail(sb))) { 17262 rack_enter_persist(tp, rack, cts); 17263 } 17264 } else if ((rsm == NULL) && 17265 (doing_tlp == 0) && 17266 (len < pace_max_seg)) { 17267 /* 17268 * We are not sending a maximum sized segment for 17269 * some reason. Should we not send anything (think 17270 * sws or persists)? 17271 */ 17272 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17273 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17274 (len < minseg) && 17275 (len < (int)(sbavail(sb) - sb_offset))) { 17276 /* 17277 * Here the rwnd is less than 17278 * the minimum pacing size, this is not a retransmit, 17279 * we are established and 17280 * the send is not the last in the socket buffer 17281 * we send nothing, and we may enter persists 17282 * if nothing is outstanding. 17283 */ 17284 len = 0; 17285 if (tp->snd_max == tp->snd_una) { 17286 /* 17287 * Nothing out we can 17288 * go into persists. 17289 */ 17290 rack_enter_persist(tp, rack, cts); 17291 } 17292 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17293 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17294 (len < (int)(sbavail(sb) - sb_offset)) && 17295 (len < minseg)) { 17296 /* 17297 * Here we are not retransmitting, and 17298 * the cwnd is not so small that we could 17299 * not send at least a min size (rxt timer 17300 * not having gone off), We have 2 segments or 17301 * more already in flight, its not the tail end 17302 * of the socket buffer and the cwnd is blocking 17303 * us from sending out a minimum pacing segment size. 17304 * Lets not send anything. 17305 */ 17306 len = 0; 17307 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17308 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17309 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17310 (len < (int)(sbavail(sb) - sb_offset)) && 17311 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17312 /* 17313 * Here we have a send window but we have 17314 * filled it up and we can't send another pacing segment. 17315 * We also have in flight more than 2 segments 17316 * and we are not completing the sb i.e. we allow 17317 * the last bytes of the sb to go out even if 17318 * its not a full pacing segment. 17319 */ 17320 len = 0; 17321 } else if ((rack->r_ctl.crte != NULL) && 17322 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17323 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17324 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17325 (len < (int)(sbavail(sb) - sb_offset))) { 17326 /* 17327 * Here we are doing hardware pacing, this is not a TLP, 17328 * we are not sending a pace max segment size, there is rwnd 17329 * room to send at least N pace_max_seg, the cwnd is greater 17330 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17331 * more segments in flight and its not the tail of the socket buffer. 17332 * 17333 * We don't want to send instead we need to get more ack's in to 17334 * allow us to send a full pacing segment. Normally, if we are pacing 17335 * about the right speed, we should have finished our pacing 17336 * send as most of the acks have come back if we are at the 17337 * right rate. This is a bit fuzzy since return path delay 17338 * can delay the acks, which is why we want to make sure we 17339 * have cwnd space to have a bit more than a max pace segments in flight. 17340 * 17341 * If we have not gotten our acks back we are pacing at too high a 17342 * rate delaying will not hurt and will bring our GP estimate down by 17343 * injecting the delay. If we don't do this we will send 17344 * 2 MSS out in response to the acks being clocked in which 17345 * defeats the point of hw-pacing (i.e. to help us get 17346 * larger TSO's out). 17347 */ 17348 len = 0; 17349 17350 } 17351 17352 } 17353 /* len will be >= 0 after this point. */ 17354 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17355 rack_sndbuf_autoscale(rack); 17356 /* 17357 * Decide if we can use TCP Segmentation Offloading (if supported by 17358 * hardware). 17359 * 17360 * TSO may only be used if we are in a pure bulk sending state. The 17361 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17362 * options prevent using TSO. With TSO the TCP header is the same 17363 * (except for the sequence number) for all generated packets. This 17364 * makes it impossible to transmit any options which vary per 17365 * generated segment or packet. 17366 * 17367 * IPv4 handling has a clear separation of ip options and ip header 17368 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17369 * the right thing below to provide length of just ip options and thus 17370 * checking for ipoptlen is enough to decide if ip options are present. 17371 */ 17372 ipoptlen = 0; 17373 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17374 /* 17375 * Pre-calculate here as we save another lookup into the darknesses 17376 * of IPsec that way and can actually decide if TSO is ok. 17377 */ 17378 #ifdef INET6 17379 if (isipv6 && IPSEC_ENABLED(ipv6)) 17380 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 17381 #ifdef INET 17382 else 17383 #endif 17384 #endif /* INET6 */ 17385 #ifdef INET 17386 if (IPSEC_ENABLED(ipv4)) 17387 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 17388 #endif /* INET */ 17389 #endif 17390 17391 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17392 ipoptlen += ipsec_optlen; 17393 #endif 17394 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17395 (tp->t_port == 0) && 17396 ((tp->t_flags & TF_SIGNATURE) == 0) && 17397 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17398 ipoptlen == 0) 17399 tso = 1; 17400 { 17401 uint32_t outstanding __unused; 17402 17403 outstanding = tp->snd_max - tp->snd_una; 17404 if (tp->t_flags & TF_SENTFIN) { 17405 /* 17406 * If we sent a fin, snd_max is 1 higher than 17407 * snd_una 17408 */ 17409 outstanding--; 17410 } 17411 if (sack_rxmit) { 17412 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17413 flags &= ~TH_FIN; 17414 } else { 17415 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17416 sbused(sb))) 17417 flags &= ~TH_FIN; 17418 } 17419 } 17420 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17421 (long)TCP_MAXWIN << tp->rcv_scale); 17422 17423 /* 17424 * Sender silly window avoidance. We transmit under the following 17425 * conditions when len is non-zero: 17426 * 17427 * - We have a full segment (or more with TSO) - This is the last 17428 * buffer in a write()/send() and we are either idle or running 17429 * NODELAY - we've timed out (e.g. persist timer) - we have more 17430 * then 1/2 the maximum send window's worth of data (receiver may be 17431 * limited the window size) - we need to retransmit 17432 */ 17433 if (len) { 17434 if (len >= segsiz) { 17435 goto send; 17436 } 17437 /* 17438 * NOTE! on localhost connections an 'ack' from the remote 17439 * end may occur synchronously with the output and cause us 17440 * to flush a buffer queued with moretocome. XXX 17441 * 17442 */ 17443 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17444 (idle || (tp->t_flags & TF_NODELAY)) && 17445 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17446 (tp->t_flags & TF_NOPUSH) == 0) { 17447 pass = 2; 17448 goto send; 17449 } 17450 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17451 pass = 22; 17452 goto send; 17453 } 17454 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17455 pass = 4; 17456 goto send; 17457 } 17458 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17459 pass = 5; 17460 goto send; 17461 } 17462 if (sack_rxmit) { 17463 pass = 6; 17464 goto send; 17465 } 17466 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17467 (ctf_outstanding(tp) < (segsiz * 2))) { 17468 /* 17469 * We have less than two MSS outstanding (delayed ack) 17470 * and our rwnd will not let us send a full sized 17471 * MSS. Lets go ahead and let this small segment 17472 * out because we want to try to have at least two 17473 * packets inflight to not be caught by delayed ack. 17474 */ 17475 pass = 12; 17476 goto send; 17477 } 17478 } 17479 /* 17480 * Sending of standalone window updates. 17481 * 17482 * Window updates are important when we close our window due to a 17483 * full socket buffer and are opening it again after the application 17484 * reads data from it. Once the window has opened again and the 17485 * remote end starts to send again the ACK clock takes over and 17486 * provides the most current window information. 17487 * 17488 * We must avoid the silly window syndrome whereas every read from 17489 * the receive buffer, no matter how small, causes a window update 17490 * to be sent. We also should avoid sending a flurry of window 17491 * updates when the socket buffer had queued a lot of data and the 17492 * application is doing small reads. 17493 * 17494 * Prevent a flurry of pointless window updates by only sending an 17495 * update when we can increase the advertized window by more than 17496 * 1/4th of the socket buffer capacity. When the buffer is getting 17497 * full or is very small be more aggressive and send an update 17498 * whenever we can increase by two mss sized segments. In all other 17499 * situations the ACK's to new incoming data will carry further 17500 * window increases. 17501 * 17502 * Don't send an independent window update if a delayed ACK is 17503 * pending (it will get piggy-backed on it) or the remote side 17504 * already has done a half-close and won't send more data. Skip 17505 * this if the connection is in T/TCP half-open state. 17506 */ 17507 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17508 !(tp->t_flags & TF_DELACK) && 17509 !TCPS_HAVERCVDFIN(tp->t_state)) { 17510 /* 17511 * "adv" is the amount we could increase the window, taking 17512 * into account that we are limited by TCP_MAXWIN << 17513 * tp->rcv_scale. 17514 */ 17515 int32_t adv; 17516 int oldwin; 17517 17518 adv = recwin; 17519 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17520 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17521 if (adv > oldwin) 17522 adv -= oldwin; 17523 else { 17524 /* We can't increase the window */ 17525 adv = 0; 17526 } 17527 } else 17528 oldwin = 0; 17529 17530 /* 17531 * If the new window size ends up being the same as or less 17532 * than the old size when it is scaled, then don't force 17533 * a window update. 17534 */ 17535 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17536 goto dontupdate; 17537 17538 if (adv >= (int32_t)(2 * segsiz) && 17539 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17540 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17541 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17542 pass = 7; 17543 goto send; 17544 } 17545 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17546 pass = 23; 17547 goto send; 17548 } 17549 } 17550 dontupdate: 17551 17552 /* 17553 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17554 * is also a catch-all for the retransmit timer timeout case. 17555 */ 17556 if (tp->t_flags & TF_ACKNOW) { 17557 pass = 8; 17558 goto send; 17559 } 17560 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17561 pass = 9; 17562 goto send; 17563 } 17564 /* 17565 * If our state indicates that FIN should be sent and we have not 17566 * yet done so, then we need to send. 17567 */ 17568 if ((flags & TH_FIN) && 17569 (tp->snd_nxt == tp->snd_una)) { 17570 pass = 11; 17571 goto send; 17572 } 17573 /* 17574 * No reason to send a segment, just return. 17575 */ 17576 just_return: 17577 SOCKBUF_UNLOCK(sb); 17578 just_return_nolock: 17579 { 17580 int app_limited = CTF_JR_SENT_DATA; 17581 17582 if (tot_len_this_send > 0) { 17583 /* Make sure snd_nxt is up to max */ 17584 rack->r_ctl.fsb.recwin = recwin; 17585 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17586 if ((error == 0) && 17587 rack_use_rfo && 17588 ((flags & (TH_SYN|TH_FIN)) == 0) && 17589 (ipoptlen == 0) && 17590 (tp->snd_nxt == tp->snd_max) && 17591 (tp->rcv_numsacks == 0) && 17592 rack->r_fsb_inited && 17593 TCPS_HAVEESTABLISHED(tp->t_state) && 17594 (rack->r_must_retran == 0) && 17595 ((tp->t_flags & TF_NEEDFIN) == 0) && 17596 (len > 0) && (orig_len > 0) && 17597 (orig_len > len) && 17598 ((orig_len - len) >= segsiz) && 17599 ((optlen == 0) || 17600 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17601 /* We can send at least one more MSS using our fsb */ 17602 17603 rack->r_fast_output = 1; 17604 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17605 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17606 rack->r_ctl.fsb.tcp_flags = flags; 17607 rack->r_ctl.fsb.left_to_send = orig_len - len; 17608 if (hw_tls) 17609 rack->r_ctl.fsb.hw_tls = 1; 17610 else 17611 rack->r_ctl.fsb.hw_tls = 0; 17612 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17613 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17614 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17615 (tp->snd_max - tp->snd_una))); 17616 if (rack->r_ctl.fsb.left_to_send < segsiz) 17617 rack->r_fast_output = 0; 17618 else { 17619 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17620 rack->r_ctl.fsb.rfo_apply_push = 1; 17621 else 17622 rack->r_ctl.fsb.rfo_apply_push = 0; 17623 } 17624 } else 17625 rack->r_fast_output = 0; 17626 17627 17628 rack_log_fsb(rack, tp, so, flags, 17629 ipoptlen, orig_len, len, 0, 17630 1, optlen, __LINE__, 1); 17631 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17632 tp->snd_nxt = tp->snd_max; 17633 } else { 17634 int end_window = 0; 17635 uint32_t seq = tp->gput_ack; 17636 17637 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17638 if (rsm) { 17639 /* 17640 * Mark the last sent that we just-returned (hinting 17641 * that delayed ack may play a role in any rtt measurement). 17642 */ 17643 rsm->r_just_ret = 1; 17644 } 17645 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17646 rack->r_ctl.rc_agg_delayed = 0; 17647 rack->r_early = 0; 17648 rack->r_late = 0; 17649 rack->r_ctl.rc_agg_early = 0; 17650 if ((ctf_outstanding(tp) + 17651 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17652 minseg)) >= tp->snd_wnd) { 17653 /* We are limited by the rwnd */ 17654 app_limited = CTF_JR_RWND_LIMITED; 17655 if (IN_FASTRECOVERY(tp->t_flags)) 17656 rack->r_ctl.rc_prr_sndcnt = 0; 17657 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17658 /* We are limited by whats available -- app limited */ 17659 app_limited = CTF_JR_APP_LIMITED; 17660 if (IN_FASTRECOVERY(tp->t_flags)) 17661 rack->r_ctl.rc_prr_sndcnt = 0; 17662 } else if ((idle == 0) && 17663 ((tp->t_flags & TF_NODELAY) == 0) && 17664 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17665 (len < segsiz)) { 17666 /* 17667 * No delay is not on and the 17668 * user is sending less than 1MSS. This 17669 * brings out SWS avoidance so we 17670 * don't send. Another app-limited case. 17671 */ 17672 app_limited = CTF_JR_APP_LIMITED; 17673 } else if (tp->t_flags & TF_NOPUSH) { 17674 /* 17675 * The user has requested no push of 17676 * the last segment and we are 17677 * at the last segment. Another app 17678 * limited case. 17679 */ 17680 app_limited = CTF_JR_APP_LIMITED; 17681 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17682 /* Its the cwnd */ 17683 app_limited = CTF_JR_CWND_LIMITED; 17684 } else if (IN_FASTRECOVERY(tp->t_flags) && 17685 (rack->rack_no_prr == 0) && 17686 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17687 app_limited = CTF_JR_PRR; 17688 } else { 17689 /* Now why here are we not sending? */ 17690 #ifdef NOW 17691 #ifdef INVARIANTS 17692 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17693 #endif 17694 #endif 17695 app_limited = CTF_JR_ASSESSING; 17696 } 17697 /* 17698 * App limited in some fashion, for our pacing GP 17699 * measurements we don't want any gap (even cwnd). 17700 * Close down the measurement window. 17701 */ 17702 if (rack_cwnd_block_ends_measure && 17703 ((app_limited == CTF_JR_CWND_LIMITED) || 17704 (app_limited == CTF_JR_PRR))) { 17705 /* 17706 * The reason we are not sending is 17707 * the cwnd (or prr). We have been configured 17708 * to end the measurement window in 17709 * this case. 17710 */ 17711 end_window = 1; 17712 } else if (rack_rwnd_block_ends_measure && 17713 (app_limited == CTF_JR_RWND_LIMITED)) { 17714 /* 17715 * We are rwnd limited and have been 17716 * configured to end the measurement 17717 * window in this case. 17718 */ 17719 end_window = 1; 17720 } else if (app_limited == CTF_JR_APP_LIMITED) { 17721 /* 17722 * A true application limited period, we have 17723 * ran out of data. 17724 */ 17725 end_window = 1; 17726 } else if (app_limited == CTF_JR_ASSESSING) { 17727 /* 17728 * In the assessing case we hit the end of 17729 * the if/else and had no known reason 17730 * This will panic us under invariants.. 17731 * 17732 * If we get this out in logs we need to 17733 * investagate which reason we missed. 17734 */ 17735 end_window = 1; 17736 } 17737 if (end_window) { 17738 uint8_t log = 0; 17739 17740 /* Adjust the Gput measurement */ 17741 if ((tp->t_flags & TF_GPUTINPROG) && 17742 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17743 tp->gput_ack = tp->snd_max; 17744 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17745 /* 17746 * There is not enough to measure. 17747 */ 17748 tp->t_flags &= ~TF_GPUTINPROG; 17749 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17750 rack->r_ctl.rc_gp_srtt /*flex1*/, 17751 tp->gput_seq, 17752 0, 0, 18, __LINE__, NULL, 0); 17753 } else 17754 log = 1; 17755 } 17756 /* Mark the last packet has app limited */ 17757 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17758 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17759 if (rack->r_ctl.rc_app_limited_cnt == 0) 17760 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17761 else { 17762 /* 17763 * Go out to the end app limited and mark 17764 * this new one as next and move the end_appl up 17765 * to this guy. 17766 */ 17767 if (rack->r_ctl.rc_end_appl) 17768 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17769 rack->r_ctl.rc_end_appl = rsm; 17770 } 17771 rsm->r_flags |= RACK_APP_LIMITED; 17772 rack->r_ctl.rc_app_limited_cnt++; 17773 } 17774 if (log) 17775 rack_log_pacing_delay_calc(rack, 17776 rack->r_ctl.rc_app_limited_cnt, seq, 17777 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17778 } 17779 } 17780 /* Check if we need to go into persists or not */ 17781 if ((tp->snd_max == tp->snd_una) && 17782 TCPS_HAVEESTABLISHED(tp->t_state) && 17783 sbavail(sb) && 17784 (sbavail(sb) > tp->snd_wnd) && 17785 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17786 /* Yes lets make sure to move to persist before timer-start */ 17787 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17788 } 17789 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17790 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17791 } 17792 #ifdef NETFLIX_SHARED_CWND 17793 if ((sbavail(sb) == 0) && 17794 rack->r_ctl.rc_scw) { 17795 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17796 rack->rack_scwnd_is_idle = 1; 17797 } 17798 #endif 17799 #ifdef TCP_ACCOUNTING 17800 if (tot_len_this_send > 0) { 17801 crtsc = get_cyclecount(); 17802 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17803 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17804 } 17805 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17806 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17807 } 17808 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17809 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17810 } 17811 } else { 17812 crtsc = get_cyclecount(); 17813 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17814 tp->tcp_cnt_counters[SND_LIMITED]++; 17815 } 17816 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17817 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17818 } 17819 } 17820 sched_unpin(); 17821 #endif 17822 return (0); 17823 17824 send: 17825 if (rsm || sack_rxmit) 17826 counter_u64_add(rack_nfto_resend, 1); 17827 else 17828 counter_u64_add(rack_non_fto_send, 1); 17829 if ((flags & TH_FIN) && 17830 sbavail(sb)) { 17831 /* 17832 * We do not transmit a FIN 17833 * with data outstanding. We 17834 * need to make it so all data 17835 * is acked first. 17836 */ 17837 flags &= ~TH_FIN; 17838 } 17839 /* Enforce stack imposed max seg size if we have one */ 17840 if (rack->r_ctl.rc_pace_max_segs && 17841 (len > rack->r_ctl.rc_pace_max_segs)) { 17842 mark = 1; 17843 len = rack->r_ctl.rc_pace_max_segs; 17844 } 17845 SOCKBUF_LOCK_ASSERT(sb); 17846 if (len > 0) { 17847 if (len >= segsiz) 17848 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17849 else 17850 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17851 } 17852 /* 17853 * Before ESTABLISHED, force sending of initial options unless TCP 17854 * set not to do any options. NOTE: we assume that the IP/TCP header 17855 * plus TCP options always fit in a single mbuf, leaving room for a 17856 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17857 * + optlen <= MCLBYTES 17858 */ 17859 optlen = 0; 17860 #ifdef INET6 17861 if (isipv6) 17862 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17863 else 17864 #endif 17865 hdrlen = sizeof(struct tcpiphdr); 17866 17867 /* 17868 * Compute options for segment. We only have to care about SYN and 17869 * established connection segments. Options for SYN-ACK segments 17870 * are handled in TCP syncache. 17871 */ 17872 to.to_flags = 0; 17873 if ((tp->t_flags & TF_NOOPT) == 0) { 17874 /* Maximum segment size. */ 17875 if (flags & TH_SYN) { 17876 tp->snd_nxt = tp->iss; 17877 to.to_mss = tcp_mssopt(&inp->inp_inc); 17878 if (tp->t_port) 17879 to.to_mss -= V_tcp_udp_tunneling_overhead; 17880 to.to_flags |= TOF_MSS; 17881 17882 /* 17883 * On SYN or SYN|ACK transmits on TFO connections, 17884 * only include the TFO option if it is not a 17885 * retransmit, as the presence of the TFO option may 17886 * have caused the original SYN or SYN|ACK to have 17887 * been dropped by a middlebox. 17888 */ 17889 if (IS_FASTOPEN(tp->t_flags) && 17890 (tp->t_rxtshift == 0)) { 17891 if (tp->t_state == TCPS_SYN_RECEIVED) { 17892 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17893 to.to_tfo_cookie = 17894 (u_int8_t *)&tp->t_tfo_cookie.server; 17895 to.to_flags |= TOF_FASTOPEN; 17896 wanted_cookie = 1; 17897 } else if (tp->t_state == TCPS_SYN_SENT) { 17898 to.to_tfo_len = 17899 tp->t_tfo_client_cookie_len; 17900 to.to_tfo_cookie = 17901 tp->t_tfo_cookie.client; 17902 to.to_flags |= TOF_FASTOPEN; 17903 wanted_cookie = 1; 17904 /* 17905 * If we wind up having more data to 17906 * send with the SYN than can fit in 17907 * one segment, don't send any more 17908 * until the SYN|ACK comes back from 17909 * the other end. 17910 */ 17911 sendalot = 0; 17912 } 17913 } 17914 } 17915 /* Window scaling. */ 17916 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 17917 to.to_wscale = tp->request_r_scale; 17918 to.to_flags |= TOF_SCALE; 17919 } 17920 /* Timestamps. */ 17921 if ((tp->t_flags & TF_RCVD_TSTMP) || 17922 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 17923 to.to_tsval = ms_cts + tp->ts_offset; 17924 to.to_tsecr = tp->ts_recent; 17925 to.to_flags |= TOF_TS; 17926 } 17927 /* Set receive buffer autosizing timestamp. */ 17928 if (tp->rfbuf_ts == 0 && 17929 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 17930 tp->rfbuf_ts = tcp_ts_getticks(); 17931 /* Selective ACK's. */ 17932 if (tp->t_flags & TF_SACK_PERMIT) { 17933 if (flags & TH_SYN) 17934 to.to_flags |= TOF_SACKPERM; 17935 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 17936 tp->rcv_numsacks > 0) { 17937 to.to_flags |= TOF_SACK; 17938 to.to_nsacks = tp->rcv_numsacks; 17939 to.to_sacks = (u_char *)tp->sackblks; 17940 } 17941 } 17942 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 17943 /* TCP-MD5 (RFC2385). */ 17944 if (tp->t_flags & TF_SIGNATURE) 17945 to.to_flags |= TOF_SIGNATURE; 17946 #endif /* TCP_SIGNATURE */ 17947 17948 /* Processing the options. */ 17949 hdrlen += optlen = tcp_addoptions(&to, opt); 17950 /* 17951 * If we wanted a TFO option to be added, but it was unable 17952 * to fit, ensure no data is sent. 17953 */ 17954 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 17955 !(to.to_flags & TOF_FASTOPEN)) 17956 len = 0; 17957 } 17958 if (tp->t_port) { 17959 if (V_tcp_udp_tunneling_port == 0) { 17960 /* The port was removed?? */ 17961 SOCKBUF_UNLOCK(&so->so_snd); 17962 #ifdef TCP_ACCOUNTING 17963 crtsc = get_cyclecount(); 17964 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17965 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 17966 } 17967 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17968 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 17969 } 17970 sched_unpin(); 17971 #endif 17972 return (EHOSTUNREACH); 17973 } 17974 hdrlen += sizeof(struct udphdr); 17975 } 17976 #ifdef INET6 17977 if (isipv6) 17978 ipoptlen = ip6_optlen(inp); 17979 else 17980 #endif 17981 if (inp->inp_options) 17982 ipoptlen = inp->inp_options->m_len - 17983 offsetof(struct ipoption, ipopt_list); 17984 else 17985 ipoptlen = 0; 17986 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17987 ipoptlen += ipsec_optlen; 17988 #endif 17989 17990 /* 17991 * Adjust data length if insertion of options will bump the packet 17992 * length beyond the t_maxseg length. Clear the FIN bit because we 17993 * cut off the tail of the segment. 17994 */ 17995 if (len + optlen + ipoptlen > tp->t_maxseg) { 17996 if (tso) { 17997 uint32_t if_hw_tsomax; 17998 uint32_t moff; 17999 int32_t max_len; 18000 18001 /* extract TSO information */ 18002 if_hw_tsomax = tp->t_tsomax; 18003 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18004 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18005 KASSERT(ipoptlen == 0, 18006 ("%s: TSO can't do IP options", __func__)); 18007 18008 /* 18009 * Check if we should limit by maximum payload 18010 * length: 18011 */ 18012 if (if_hw_tsomax != 0) { 18013 /* compute maximum TSO length */ 18014 max_len = (if_hw_tsomax - hdrlen - 18015 max_linkhdr); 18016 if (max_len <= 0) { 18017 len = 0; 18018 } else if (len > max_len) { 18019 sendalot = 1; 18020 len = max_len; 18021 mark = 2; 18022 } 18023 } 18024 /* 18025 * Prevent the last segment from being fractional 18026 * unless the send sockbuf can be emptied: 18027 */ 18028 max_len = (tp->t_maxseg - optlen); 18029 if ((sb_offset + len) < sbavail(sb)) { 18030 moff = len % (u_int)max_len; 18031 if (moff != 0) { 18032 mark = 3; 18033 len -= moff; 18034 } 18035 } 18036 /* 18037 * In case there are too many small fragments don't 18038 * use TSO: 18039 */ 18040 if (len <= segsiz) { 18041 mark = 4; 18042 tso = 0; 18043 } 18044 /* 18045 * Send the FIN in a separate segment after the bulk 18046 * sending is done. We don't trust the TSO 18047 * implementations to clear the FIN flag on all but 18048 * the last segment. 18049 */ 18050 if (tp->t_flags & TF_NEEDFIN) { 18051 sendalot = 4; 18052 } 18053 } else { 18054 mark = 5; 18055 if (optlen + ipoptlen >= tp->t_maxseg) { 18056 /* 18057 * Since we don't have enough space to put 18058 * the IP header chain and the TCP header in 18059 * one packet as required by RFC 7112, don't 18060 * send it. Also ensure that at least one 18061 * byte of the payload can be put into the 18062 * TCP segment. 18063 */ 18064 SOCKBUF_UNLOCK(&so->so_snd); 18065 error = EMSGSIZE; 18066 sack_rxmit = 0; 18067 goto out; 18068 } 18069 len = tp->t_maxseg - optlen - ipoptlen; 18070 sendalot = 5; 18071 } 18072 } else { 18073 tso = 0; 18074 mark = 6; 18075 } 18076 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18077 ("%s: len > IP_MAXPACKET", __func__)); 18078 #ifdef DIAGNOSTIC 18079 #ifdef INET6 18080 if (max_linkhdr + hdrlen > MCLBYTES) 18081 #else 18082 if (max_linkhdr + hdrlen > MHLEN) 18083 #endif 18084 panic("tcphdr too big"); 18085 #endif 18086 18087 /* 18088 * This KASSERT is here to catch edge cases at a well defined place. 18089 * Before, those had triggered (random) panic conditions further 18090 * down. 18091 */ 18092 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18093 if ((len == 0) && 18094 (flags & TH_FIN) && 18095 (sbused(sb))) { 18096 /* 18097 * We have outstanding data, don't send a fin by itself!. 18098 */ 18099 goto just_return; 18100 } 18101 /* 18102 * Grab a header mbuf, attaching a copy of data to be transmitted, 18103 * and initialize the header from the template for sends on this 18104 * connection. 18105 */ 18106 hw_tls = tp->t_nic_ktls_xmit != 0; 18107 if (len) { 18108 uint32_t max_val; 18109 uint32_t moff; 18110 18111 if (rack->r_ctl.rc_pace_max_segs) 18112 max_val = rack->r_ctl.rc_pace_max_segs; 18113 else if (rack->rc_user_set_max_segs) 18114 max_val = rack->rc_user_set_max_segs * segsiz; 18115 else 18116 max_val = len; 18117 /* 18118 * We allow a limit on sending with hptsi. 18119 */ 18120 if (len > max_val) { 18121 mark = 7; 18122 len = max_val; 18123 } 18124 #ifdef INET6 18125 if (MHLEN < hdrlen + max_linkhdr) 18126 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18127 else 18128 #endif 18129 m = m_gethdr(M_NOWAIT, MT_DATA); 18130 18131 if (m == NULL) { 18132 SOCKBUF_UNLOCK(sb); 18133 error = ENOBUFS; 18134 sack_rxmit = 0; 18135 goto out; 18136 } 18137 m->m_data += max_linkhdr; 18138 m->m_len = hdrlen; 18139 18140 /* 18141 * Start the m_copy functions from the closest mbuf to the 18142 * sb_offset in the socket buffer chain. 18143 */ 18144 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18145 s_mb = mb; 18146 s_moff = moff; 18147 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18148 m_copydata(mb, moff, (int)len, 18149 mtod(m, caddr_t)+hdrlen); 18150 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18151 sbsndptr_adv(sb, mb, len); 18152 m->m_len += len; 18153 } else { 18154 struct sockbuf *msb; 18155 18156 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18157 msb = NULL; 18158 else 18159 msb = sb; 18160 m->m_next = tcp_m_copym( 18161 mb, moff, &len, 18162 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18163 ((rsm == NULL) ? hw_tls : 0) 18164 #ifdef NETFLIX_COPY_ARGS 18165 , &s_mb, &s_moff 18166 #endif 18167 ); 18168 if (len <= (tp->t_maxseg - optlen)) { 18169 /* 18170 * Must have ran out of mbufs for the copy 18171 * shorten it to no longer need tso. Lets 18172 * not put on sendalot since we are low on 18173 * mbufs. 18174 */ 18175 tso = 0; 18176 } 18177 if (m->m_next == NULL) { 18178 SOCKBUF_UNLOCK(sb); 18179 (void)m_free(m); 18180 error = ENOBUFS; 18181 sack_rxmit = 0; 18182 goto out; 18183 } 18184 } 18185 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18186 if (rsm && (rsm->r_flags & RACK_TLP)) { 18187 /* 18188 * TLP should not count in retran count, but 18189 * in its own bin 18190 */ 18191 counter_u64_add(rack_tlp_retran, 1); 18192 counter_u64_add(rack_tlp_retran_bytes, len); 18193 } else { 18194 tp->t_sndrexmitpack++; 18195 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18196 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18197 } 18198 #ifdef STATS 18199 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18200 len); 18201 #endif 18202 } else { 18203 KMOD_TCPSTAT_INC(tcps_sndpack); 18204 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18205 #ifdef STATS 18206 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18207 len); 18208 #endif 18209 } 18210 /* 18211 * If we're sending everything we've got, set PUSH. (This 18212 * will keep happy those implementations which only give 18213 * data to the user when a buffer fills or a PUSH comes in.) 18214 */ 18215 if (sb_offset + len == sbused(sb) && 18216 sbused(sb) && 18217 !(flags & TH_SYN)) { 18218 flags |= TH_PUSH; 18219 add_flag |= RACK_HAD_PUSH; 18220 } 18221 18222 SOCKBUF_UNLOCK(sb); 18223 } else { 18224 SOCKBUF_UNLOCK(sb); 18225 if (tp->t_flags & TF_ACKNOW) 18226 KMOD_TCPSTAT_INC(tcps_sndacks); 18227 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18228 KMOD_TCPSTAT_INC(tcps_sndctrl); 18229 else 18230 KMOD_TCPSTAT_INC(tcps_sndwinup); 18231 18232 m = m_gethdr(M_NOWAIT, MT_DATA); 18233 if (m == NULL) { 18234 error = ENOBUFS; 18235 sack_rxmit = 0; 18236 goto out; 18237 } 18238 #ifdef INET6 18239 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18240 MHLEN >= hdrlen) { 18241 M_ALIGN(m, hdrlen); 18242 } else 18243 #endif 18244 m->m_data += max_linkhdr; 18245 m->m_len = hdrlen; 18246 } 18247 SOCKBUF_UNLOCK_ASSERT(sb); 18248 m->m_pkthdr.rcvif = (struct ifnet *)0; 18249 #ifdef MAC 18250 mac_inpcb_create_mbuf(inp, m); 18251 #endif 18252 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18253 #ifdef INET6 18254 if (isipv6) 18255 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18256 else 18257 #endif /* INET6 */ 18258 #ifdef INET 18259 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18260 #endif 18261 th = rack->r_ctl.fsb.th; 18262 udp = rack->r_ctl.fsb.udp; 18263 if (udp) { 18264 #ifdef INET6 18265 if (isipv6) 18266 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18267 else 18268 #endif /* INET6 */ 18269 ulen = hdrlen + len - sizeof(struct ip); 18270 udp->uh_ulen = htons(ulen); 18271 } 18272 } else { 18273 #ifdef INET6 18274 if (isipv6) { 18275 ip6 = mtod(m, struct ip6_hdr *); 18276 if (tp->t_port) { 18277 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18278 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18279 udp->uh_dport = tp->t_port; 18280 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18281 udp->uh_ulen = htons(ulen); 18282 th = (struct tcphdr *)(udp + 1); 18283 } else 18284 th = (struct tcphdr *)(ip6 + 1); 18285 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18286 } else 18287 #endif /* INET6 */ 18288 { 18289 #ifdef INET 18290 ip = mtod(m, struct ip *); 18291 if (tp->t_port) { 18292 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18293 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18294 udp->uh_dport = tp->t_port; 18295 ulen = hdrlen + len - sizeof(struct ip); 18296 udp->uh_ulen = htons(ulen); 18297 th = (struct tcphdr *)(udp + 1); 18298 } else 18299 th = (struct tcphdr *)(ip + 1); 18300 tcpip_fillheaders(inp, tp->t_port, ip, th); 18301 #endif 18302 } 18303 } 18304 /* 18305 * Fill in fields, remembering maximum advertised window for use in 18306 * delaying messages about window sizes. If resending a FIN, be sure 18307 * not to use a new sequence number. 18308 */ 18309 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18310 tp->snd_nxt == tp->snd_max) 18311 tp->snd_nxt--; 18312 /* 18313 * If we are starting a connection, send ECN setup SYN packet. If we 18314 * are on a retransmit, we may resend those bits a number of times 18315 * as per RFC 3168. 18316 */ 18317 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18318 flags |= tcp_ecn_output_syn_sent(tp); 18319 } 18320 /* Also handle parallel SYN for ECN */ 18321 if (TCPS_HAVERCVDSYN(tp->t_state) && 18322 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18323 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18324 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18325 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18326 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18327 #ifdef INET6 18328 if (isipv6) { 18329 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18330 ip6->ip6_flow |= htonl(ect << 20); 18331 } 18332 else 18333 #endif 18334 { 18335 #ifdef INET 18336 ip->ip_tos &= ~IPTOS_ECN_MASK; 18337 ip->ip_tos |= ect; 18338 #endif 18339 } 18340 } 18341 /* 18342 * If we are doing retransmissions, then snd_nxt will not reflect 18343 * the first unsent octet. For ACK only packets, we do not want the 18344 * sequence number of the retransmitted packet, we want the sequence 18345 * number of the next unsent octet. So, if there is no data (and no 18346 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18347 * ti_seq. But if we are in persist state, snd_max might reflect 18348 * one byte beyond the right edge of the window, so use snd_nxt in 18349 * that case, since we know we aren't doing a retransmission. 18350 * (retransmit and persist are mutually exclusive...) 18351 */ 18352 if (sack_rxmit == 0) { 18353 if (len || (flags & (TH_SYN | TH_FIN))) { 18354 th->th_seq = htonl(tp->snd_nxt); 18355 rack_seq = tp->snd_nxt; 18356 } else { 18357 th->th_seq = htonl(tp->snd_max); 18358 rack_seq = tp->snd_max; 18359 } 18360 } else { 18361 th->th_seq = htonl(rsm->r_start); 18362 rack_seq = rsm->r_start; 18363 } 18364 th->th_ack = htonl(tp->rcv_nxt); 18365 tcp_set_flags(th, flags); 18366 /* 18367 * Calculate receive window. Don't shrink window, but avoid silly 18368 * window syndrome. 18369 * If a RST segment is sent, advertise a window of zero. 18370 */ 18371 if (flags & TH_RST) { 18372 recwin = 0; 18373 } else { 18374 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18375 recwin < (long)segsiz) { 18376 recwin = 0; 18377 } 18378 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18379 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18380 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18381 } 18382 18383 /* 18384 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18385 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18386 * handled in syncache. 18387 */ 18388 if (flags & TH_SYN) 18389 th->th_win = htons((u_short) 18390 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18391 else { 18392 /* Avoid shrinking window with window scaling. */ 18393 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18394 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18395 } 18396 /* 18397 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18398 * window. This may cause the remote transmitter to stall. This 18399 * flag tells soreceive() to disable delayed acknowledgements when 18400 * draining the buffer. This can occur if the receiver is 18401 * attempting to read more data than can be buffered prior to 18402 * transmitting on the connection. 18403 */ 18404 if (th->th_win == 0) { 18405 tp->t_sndzerowin++; 18406 tp->t_flags |= TF_RXWIN0SENT; 18407 } else 18408 tp->t_flags &= ~TF_RXWIN0SENT; 18409 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18410 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18411 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18412 uint8_t *cpto; 18413 18414 cpto = mtod(m, uint8_t *); 18415 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18416 /* 18417 * We have just copied in: 18418 * IP/IP6 18419 * <optional udphdr> 18420 * tcphdr (no options) 18421 * 18422 * We need to grab the correct pointers into the mbuf 18423 * for both the tcp header, and possibly the udp header (if tunneling). 18424 * We do this by using the offset in the copy buffer and adding it 18425 * to the mbuf base pointer (cpto). 18426 */ 18427 #ifdef INET6 18428 if (isipv6) 18429 ip6 = mtod(m, struct ip6_hdr *); 18430 else 18431 #endif /* INET6 */ 18432 #ifdef INET 18433 ip = mtod(m, struct ip *); 18434 #endif 18435 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18436 /* If we have a udp header lets set it into the mbuf as well */ 18437 if (udp) 18438 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18439 } 18440 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18441 if (to.to_flags & TOF_SIGNATURE) { 18442 /* 18443 * Calculate MD5 signature and put it into the place 18444 * determined before. 18445 * NOTE: since TCP options buffer doesn't point into 18446 * mbuf's data, calculate offset and use it. 18447 */ 18448 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18449 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18450 /* 18451 * Do not send segment if the calculation of MD5 18452 * digest has failed. 18453 */ 18454 goto out; 18455 } 18456 } 18457 #endif 18458 if (optlen) { 18459 bcopy(opt, th + 1, optlen); 18460 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18461 } 18462 /* 18463 * Put TCP length in extended header, and then checksum extended 18464 * header and data. 18465 */ 18466 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18467 #ifdef INET6 18468 if (isipv6) { 18469 /* 18470 * ip6_plen is not need to be filled now, and will be filled 18471 * in ip6_output. 18472 */ 18473 if (tp->t_port) { 18474 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18475 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18476 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18477 th->th_sum = htons(0); 18478 UDPSTAT_INC(udps_opackets); 18479 } else { 18480 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18481 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18482 th->th_sum = in6_cksum_pseudo(ip6, 18483 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18484 0); 18485 } 18486 } 18487 #endif 18488 #if defined(INET6) && defined(INET) 18489 else 18490 #endif 18491 #ifdef INET 18492 { 18493 if (tp->t_port) { 18494 m->m_pkthdr.csum_flags = CSUM_UDP; 18495 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18496 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18497 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18498 th->th_sum = htons(0); 18499 UDPSTAT_INC(udps_opackets); 18500 } else { 18501 m->m_pkthdr.csum_flags = CSUM_TCP; 18502 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18503 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18504 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18505 IPPROTO_TCP + len + optlen)); 18506 } 18507 /* IP version must be set here for ipv4/ipv6 checking later */ 18508 KASSERT(ip->ip_v == IPVERSION, 18509 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18510 } 18511 #endif 18512 /* 18513 * Enable TSO and specify the size of the segments. The TCP pseudo 18514 * header checksum is always provided. XXX: Fixme: This is currently 18515 * not the case for IPv6. 18516 */ 18517 if (tso) { 18518 KASSERT(len > tp->t_maxseg - optlen, 18519 ("%s: len <= tso_segsz", __func__)); 18520 m->m_pkthdr.csum_flags |= CSUM_TSO; 18521 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18522 } 18523 KASSERT(len + hdrlen == m_length(m, NULL), 18524 ("%s: mbuf chain different than expected: %d + %u != %u", 18525 __func__, len, hdrlen, m_length(m, NULL))); 18526 18527 #ifdef TCP_HHOOK 18528 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18529 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18530 #endif 18531 /* We're getting ready to send; log now. */ 18532 if (tcp_bblogging_on(rack->rc_tp)) { 18533 union tcp_log_stackspecific log; 18534 18535 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18536 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18537 if (rack->rack_no_prr) 18538 log.u_bbr.flex1 = 0; 18539 else 18540 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18541 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18542 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18543 log.u_bbr.flex4 = orig_len; 18544 /* Save off the early/late values */ 18545 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18546 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18547 log.u_bbr.bw_inuse = rack_get_bw(rack); 18548 log.u_bbr.flex8 = 0; 18549 if (rsm) { 18550 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18551 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18552 counter_u64_add(rack_collapsed_win_rxt, 1); 18553 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18554 } 18555 if (doing_tlp) 18556 log.u_bbr.flex8 = 2; 18557 else 18558 log.u_bbr.flex8 = 1; 18559 } else { 18560 if (doing_tlp) 18561 log.u_bbr.flex8 = 3; 18562 else 18563 log.u_bbr.flex8 = 0; 18564 } 18565 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18566 log.u_bbr.flex7 = mark; 18567 log.u_bbr.flex7 <<= 8; 18568 log.u_bbr.flex7 |= pass; 18569 log.u_bbr.pkts_out = tp->t_maxseg; 18570 log.u_bbr.timeStamp = cts; 18571 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18572 log.u_bbr.lt_epoch = cwnd_to_use; 18573 log.u_bbr.delivered = sendalot; 18574 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18575 len, &log, false, NULL, NULL, 0, &tv); 18576 } else 18577 lgb = NULL; 18578 18579 /* 18580 * Fill in IP length and desired time to live and send to IP level. 18581 * There should be a better way to handle ttl and tos; we could keep 18582 * them in the template, but need a way to checksum without them. 18583 */ 18584 /* 18585 * m->m_pkthdr.len should have been set before cksum calcuration, 18586 * because in6_cksum() need it. 18587 */ 18588 #ifdef INET6 18589 if (isipv6) { 18590 /* 18591 * we separately set hoplimit for every segment, since the 18592 * user might want to change the value via setsockopt. Also, 18593 * desired default hop limit might be changed via Neighbor 18594 * Discovery. 18595 */ 18596 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18597 18598 /* 18599 * Set the packet size here for the benefit of DTrace 18600 * probes. ip6_output() will set it properly; it's supposed 18601 * to include the option header lengths as well. 18602 */ 18603 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18604 18605 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18606 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18607 else 18608 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18609 18610 if (tp->t_state == TCPS_SYN_SENT) 18611 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18612 18613 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18614 /* TODO: IPv6 IP6TOS_ECT bit on */ 18615 error = ip6_output(m, 18616 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18617 inp->in6p_outputopts, 18618 #else 18619 NULL, 18620 #endif 18621 &inp->inp_route6, 18622 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18623 NULL, NULL, inp); 18624 18625 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18626 mtu = inp->inp_route6.ro_nh->nh_mtu; 18627 } 18628 #endif /* INET6 */ 18629 #if defined(INET) && defined(INET6) 18630 else 18631 #endif 18632 #ifdef INET 18633 { 18634 ip->ip_len = htons(m->m_pkthdr.len); 18635 #ifdef INET6 18636 if (inp->inp_vflag & INP_IPV6PROTO) 18637 ip->ip_ttl = in6_selecthlim(inp, NULL); 18638 #endif /* INET6 */ 18639 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18640 /* 18641 * If we do path MTU discovery, then we set DF on every 18642 * packet. This might not be the best thing to do according 18643 * to RFC3390 Section 2. However the tcp hostcache migitates 18644 * the problem so it affects only the first tcp connection 18645 * with a host. 18646 * 18647 * NB: Don't set DF on small MTU/MSS to have a safe 18648 * fallback. 18649 */ 18650 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18651 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18652 if (tp->t_port == 0 || len < V_tcp_minmss) { 18653 ip->ip_off |= htons(IP_DF); 18654 } 18655 } else { 18656 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18657 } 18658 18659 if (tp->t_state == TCPS_SYN_SENT) 18660 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18661 18662 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18663 18664 error = ip_output(m, 18665 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18666 inp->inp_options, 18667 #else 18668 NULL, 18669 #endif 18670 &inp->inp_route, 18671 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18672 inp); 18673 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18674 mtu = inp->inp_route.ro_nh->nh_mtu; 18675 } 18676 #endif /* INET */ 18677 18678 out: 18679 if (lgb) { 18680 lgb->tlb_errno = error; 18681 lgb = NULL; 18682 } 18683 /* 18684 * In transmit state, time the transmission and arrange for the 18685 * retransmit. In persist state, just set snd_max. 18686 */ 18687 if (error == 0) { 18688 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18689 if (rsm && doing_tlp) { 18690 rack->rc_last_sent_tlp_past_cumack = 0; 18691 rack->rc_last_sent_tlp_seq_valid = 1; 18692 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18693 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18694 } 18695 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18696 if (rsm && (doing_tlp == 0)) { 18697 /* Set we retransmitted */ 18698 rack->rc_gp_saw_rec = 1; 18699 } else { 18700 if (cwnd_to_use > tp->snd_ssthresh) { 18701 /* Set we sent in CA */ 18702 rack->rc_gp_saw_ca = 1; 18703 } else { 18704 /* Set we sent in SS */ 18705 rack->rc_gp_saw_ss = 1; 18706 } 18707 } 18708 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18709 (tp->t_flags & TF_SACK_PERMIT) && 18710 tp->rcv_numsacks > 0) 18711 tcp_clean_dsack_blocks(tp); 18712 tot_len_this_send += len; 18713 if (len == 0) 18714 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18715 else if (len == 1) { 18716 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18717 } else if (len > 1) { 18718 int idx; 18719 18720 idx = (len / segsiz) + 3; 18721 if (idx >= TCP_MSS_ACCT_ATIMER) 18722 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18723 else 18724 counter_u64_add(rack_out_size[idx], 1); 18725 } 18726 } 18727 if ((rack->rack_no_prr == 0) && 18728 sub_from_prr && 18729 (error == 0)) { 18730 if (rack->r_ctl.rc_prr_sndcnt >= len) 18731 rack->r_ctl.rc_prr_sndcnt -= len; 18732 else 18733 rack->r_ctl.rc_prr_sndcnt = 0; 18734 } 18735 sub_from_prr = 0; 18736 if (doing_tlp) { 18737 /* Make sure the TLP is added */ 18738 add_flag |= RACK_TLP; 18739 } else if (rsm) { 18740 /* If its a resend without TLP then it must not have the flag */ 18741 rsm->r_flags &= ~RACK_TLP; 18742 } 18743 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18744 rack_to_usec_ts(&tv), 18745 rsm, add_flag, s_mb, s_moff, hw_tls); 18746 18747 18748 if ((error == 0) && 18749 (len > 0) && 18750 (tp->snd_una == tp->snd_max)) 18751 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18752 { 18753 tcp_seq startseq = tp->snd_nxt; 18754 18755 /* Track our lost count */ 18756 if (rsm && (doing_tlp == 0)) 18757 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18758 /* 18759 * Advance snd_nxt over sequence space of this segment. 18760 */ 18761 if (error) 18762 /* We don't log or do anything with errors */ 18763 goto nomore; 18764 if (doing_tlp == 0) { 18765 if (rsm == NULL) { 18766 /* 18767 * Not a retransmission of some 18768 * sort, new data is going out so 18769 * clear our TLP count and flag. 18770 */ 18771 rack->rc_tlp_in_progress = 0; 18772 rack->r_ctl.rc_tlp_cnt_out = 0; 18773 } 18774 } else { 18775 /* 18776 * We have just sent a TLP, mark that it is true 18777 * and make sure our in progress is set so we 18778 * continue to check the count. 18779 */ 18780 rack->rc_tlp_in_progress = 1; 18781 rack->r_ctl.rc_tlp_cnt_out++; 18782 } 18783 if (flags & (TH_SYN | TH_FIN)) { 18784 if (flags & TH_SYN) 18785 tp->snd_nxt++; 18786 if (flags & TH_FIN) { 18787 tp->snd_nxt++; 18788 tp->t_flags |= TF_SENTFIN; 18789 } 18790 } 18791 /* In the ENOBUFS case we do *not* update snd_max */ 18792 if (sack_rxmit) 18793 goto nomore; 18794 18795 tp->snd_nxt += len; 18796 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18797 if (tp->snd_una == tp->snd_max) { 18798 /* 18799 * Update the time we just added data since 18800 * none was outstanding. 18801 */ 18802 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18803 tp->t_acktime = ticks; 18804 } 18805 tp->snd_max = tp->snd_nxt; 18806 /* 18807 * Time this transmission if not a retransmission and 18808 * not currently timing anything. 18809 * This is only relevant in case of switching back to 18810 * the base stack. 18811 */ 18812 if (tp->t_rtttime == 0) { 18813 tp->t_rtttime = ticks; 18814 tp->t_rtseq = startseq; 18815 KMOD_TCPSTAT_INC(tcps_segstimed); 18816 } 18817 if (len && 18818 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18819 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18820 } 18821 /* 18822 * If we are doing FO we need to update the mbuf position and subtract 18823 * this happens when the peer sends us duplicate information and 18824 * we thus want to send a DSACK. 18825 * 18826 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18827 * turned off? If not then we are going to echo multiple DSACK blocks 18828 * out (with the TSO), which we should not be doing. 18829 */ 18830 if (rack->r_fast_output && len) { 18831 if (rack->r_ctl.fsb.left_to_send > len) 18832 rack->r_ctl.fsb.left_to_send -= len; 18833 else 18834 rack->r_ctl.fsb.left_to_send = 0; 18835 if (rack->r_ctl.fsb.left_to_send < segsiz) 18836 rack->r_fast_output = 0; 18837 if (rack->r_fast_output) { 18838 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18839 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18840 } 18841 } 18842 } 18843 nomore: 18844 if (error) { 18845 rack->r_ctl.rc_agg_delayed = 0; 18846 rack->r_early = 0; 18847 rack->r_late = 0; 18848 rack->r_ctl.rc_agg_early = 0; 18849 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18850 /* 18851 * Failures do not advance the seq counter above. For the 18852 * case of ENOBUFS we will fall out and retry in 1ms with 18853 * the hpts. Everything else will just have to retransmit 18854 * with the timer. 18855 * 18856 * In any case, we do not want to loop around for another 18857 * send without a good reason. 18858 */ 18859 sendalot = 0; 18860 switch (error) { 18861 case EPERM: 18862 tp->t_softerror = error; 18863 #ifdef TCP_ACCOUNTING 18864 crtsc = get_cyclecount(); 18865 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18866 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18867 } 18868 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18869 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18870 } 18871 sched_unpin(); 18872 #endif 18873 return (error); 18874 case ENOBUFS: 18875 /* 18876 * Pace us right away to retry in a some 18877 * time 18878 */ 18879 if (rack->r_ctl.crte != NULL) { 18880 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); 18881 } else 18882 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); 18883 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18884 if (rack->rc_enobuf < 0x7f) 18885 rack->rc_enobuf++; 18886 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18887 slot = 10 * HPTS_USEC_IN_MSEC; 18888 if (rack->r_ctl.crte != NULL) { 18889 counter_u64_add(rack_saw_enobuf_hw, 1); 18890 tcp_rl_log_enobuf(rack->r_ctl.crte); 18891 } 18892 counter_u64_add(rack_saw_enobuf, 1); 18893 goto enobufs; 18894 case EMSGSIZE: 18895 /* 18896 * For some reason the interface we used initially 18897 * to send segments changed to another or lowered 18898 * its MTU. If TSO was active we either got an 18899 * interface without TSO capabilits or TSO was 18900 * turned off. If we obtained mtu from ip_output() 18901 * then update it and try again. 18902 */ 18903 if (tso) 18904 tp->t_flags &= ~TF_TSO; 18905 if (mtu != 0) { 18906 tcp_mss_update(tp, -1, mtu, NULL, NULL); 18907 goto again; 18908 } 18909 slot = 10 * HPTS_USEC_IN_MSEC; 18910 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18911 #ifdef TCP_ACCOUNTING 18912 crtsc = get_cyclecount(); 18913 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18914 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18915 } 18916 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18917 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18918 } 18919 sched_unpin(); 18920 #endif 18921 return (error); 18922 case ENETUNREACH: 18923 counter_u64_add(rack_saw_enetunreach, 1); 18924 case EHOSTDOWN: 18925 case EHOSTUNREACH: 18926 case ENETDOWN: 18927 if (TCPS_HAVERCVDSYN(tp->t_state)) { 18928 tp->t_softerror = error; 18929 } 18930 /* FALLTHROUGH */ 18931 default: 18932 slot = 10 * HPTS_USEC_IN_MSEC; 18933 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18934 #ifdef TCP_ACCOUNTING 18935 crtsc = get_cyclecount(); 18936 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18937 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18938 } 18939 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18940 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18941 } 18942 sched_unpin(); 18943 #endif 18944 return (error); 18945 } 18946 } else { 18947 rack->rc_enobuf = 0; 18948 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18949 rack->r_ctl.retran_during_recovery += len; 18950 } 18951 KMOD_TCPSTAT_INC(tcps_sndtotal); 18952 18953 /* 18954 * Data sent (as far as we can tell). If this advertises a larger 18955 * window than any other segment, then remember the size of the 18956 * advertised window. Any pending ACK has now been sent. 18957 */ 18958 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 18959 tp->rcv_adv = tp->rcv_nxt + recwin; 18960 18961 tp->last_ack_sent = tp->rcv_nxt; 18962 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18963 enobufs: 18964 if (sendalot) { 18965 /* Do we need to turn off sendalot? */ 18966 if (rack->r_ctl.rc_pace_max_segs && 18967 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 18968 /* We hit our max. */ 18969 sendalot = 0; 18970 } else if ((rack->rc_user_set_max_segs) && 18971 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 18972 /* We hit the user defined max */ 18973 sendalot = 0; 18974 } 18975 } 18976 if ((error == 0) && (flags & TH_FIN)) 18977 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 18978 if (flags & TH_RST) { 18979 /* 18980 * We don't send again after sending a RST. 18981 */ 18982 slot = 0; 18983 sendalot = 0; 18984 if (error == 0) 18985 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 18986 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 18987 /* 18988 * Get our pacing rate, if an error 18989 * occurred in sending (ENOBUF) we would 18990 * hit the else if with slot preset. Other 18991 * errors return. 18992 */ 18993 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 18994 } 18995 if (rsm && 18996 (rsm->r_flags & RACK_HAS_SYN) == 0 && 18997 rack->use_rack_rr) { 18998 /* Its a retransmit and we use the rack cheat? */ 18999 if ((slot == 0) || 19000 (rack->rc_always_pace == 0) || 19001 (rack->r_rr_config == 1)) { 19002 /* 19003 * We have no pacing set or we 19004 * are using old-style rack or 19005 * we are overridden to use the old 1ms pacing. 19006 */ 19007 slot = rack->r_ctl.rc_min_to; 19008 } 19009 } 19010 /* We have sent clear the flag */ 19011 rack->r_ent_rec_ns = 0; 19012 if (rack->r_must_retran) { 19013 if (rsm) { 19014 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19015 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19016 /* 19017 * We have retransmitted all. 19018 */ 19019 rack->r_must_retran = 0; 19020 rack->r_ctl.rc_out_at_rto = 0; 19021 } 19022 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19023 /* 19024 * Sending new data will also kill 19025 * the loop. 19026 */ 19027 rack->r_must_retran = 0; 19028 rack->r_ctl.rc_out_at_rto = 0; 19029 } 19030 } 19031 rack->r_ctl.fsb.recwin = recwin; 19032 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19033 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19034 /* 19035 * We hit an RTO and now have past snd_max at the RTO 19036 * clear all the WAS flags. 19037 */ 19038 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19039 } 19040 if (slot) { 19041 /* set the rack tcb into the slot N */ 19042 if ((error == 0) && 19043 rack_use_rfo && 19044 ((flags & (TH_SYN|TH_FIN)) == 0) && 19045 (rsm == NULL) && 19046 (tp->snd_nxt == tp->snd_max) && 19047 (ipoptlen == 0) && 19048 (tp->rcv_numsacks == 0) && 19049 rack->r_fsb_inited && 19050 TCPS_HAVEESTABLISHED(tp->t_state) && 19051 (rack->r_must_retran == 0) && 19052 ((tp->t_flags & TF_NEEDFIN) == 0) && 19053 (len > 0) && (orig_len > 0) && 19054 (orig_len > len) && 19055 ((orig_len - len) >= segsiz) && 19056 ((optlen == 0) || 19057 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19058 /* We can send at least one more MSS using our fsb */ 19059 19060 rack->r_fast_output = 1; 19061 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19062 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19063 rack->r_ctl.fsb.tcp_flags = flags; 19064 rack->r_ctl.fsb.left_to_send = orig_len - len; 19065 if (hw_tls) 19066 rack->r_ctl.fsb.hw_tls = 1; 19067 else 19068 rack->r_ctl.fsb.hw_tls = 0; 19069 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19070 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19071 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19072 (tp->snd_max - tp->snd_una))); 19073 if (rack->r_ctl.fsb.left_to_send < segsiz) 19074 rack->r_fast_output = 0; 19075 else { 19076 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19077 rack->r_ctl.fsb.rfo_apply_push = 1; 19078 else 19079 rack->r_ctl.fsb.rfo_apply_push = 0; 19080 } 19081 } else 19082 rack->r_fast_output = 0; 19083 rack_log_fsb(rack, tp, so, flags, 19084 ipoptlen, orig_len, len, error, 19085 (rsm == NULL), optlen, __LINE__, 2); 19086 } else if (sendalot) { 19087 int ret; 19088 19089 sack_rxmit = 0; 19090 if ((error == 0) && 19091 rack_use_rfo && 19092 ((flags & (TH_SYN|TH_FIN)) == 0) && 19093 (rsm == NULL) && 19094 (ipoptlen == 0) && 19095 (tp->rcv_numsacks == 0) && 19096 (tp->snd_nxt == tp->snd_max) && 19097 (rack->r_must_retran == 0) && 19098 rack->r_fsb_inited && 19099 TCPS_HAVEESTABLISHED(tp->t_state) && 19100 ((tp->t_flags & TF_NEEDFIN) == 0) && 19101 (len > 0) && (orig_len > 0) && 19102 (orig_len > len) && 19103 ((orig_len - len) >= segsiz) && 19104 ((optlen == 0) || 19105 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19106 /* we can use fast_output for more */ 19107 19108 rack->r_fast_output = 1; 19109 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19110 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19111 rack->r_ctl.fsb.tcp_flags = flags; 19112 rack->r_ctl.fsb.left_to_send = orig_len - len; 19113 if (hw_tls) 19114 rack->r_ctl.fsb.hw_tls = 1; 19115 else 19116 rack->r_ctl.fsb.hw_tls = 0; 19117 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19118 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19119 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19120 (tp->snd_max - tp->snd_una))); 19121 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19122 rack->r_fast_output = 0; 19123 } 19124 if (rack->r_fast_output) { 19125 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19126 rack->r_ctl.fsb.rfo_apply_push = 1; 19127 else 19128 rack->r_ctl.fsb.rfo_apply_push = 0; 19129 rack_log_fsb(rack, tp, so, flags, 19130 ipoptlen, orig_len, len, error, 19131 (rsm == NULL), optlen, __LINE__, 3); 19132 error = 0; 19133 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19134 if (ret >= 0) 19135 return (ret); 19136 else if (error) 19137 goto nomore; 19138 19139 } 19140 } 19141 goto again; 19142 } 19143 /* Assure when we leave that snd_nxt will point to top */ 19144 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19145 tp->snd_nxt = tp->snd_max; 19146 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19147 #ifdef TCP_ACCOUNTING 19148 crtsc = get_cyclecount() - ts_val; 19149 if (tot_len_this_send) { 19150 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19151 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19152 } 19153 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19154 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19155 } 19156 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19157 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19158 } 19159 } else { 19160 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19161 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19162 } 19163 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19164 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19165 } 19166 } 19167 sched_unpin(); 19168 #endif 19169 if (error == ENOBUFS) 19170 error = 0; 19171 return (error); 19172 } 19173 19174 static void 19175 rack_update_seg(struct tcp_rack *rack) 19176 { 19177 uint32_t orig_val; 19178 19179 orig_val = rack->r_ctl.rc_pace_max_segs; 19180 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19181 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19182 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19183 } 19184 19185 static void 19186 rack_mtu_change(struct tcpcb *tp) 19187 { 19188 /* 19189 * The MSS may have changed 19190 */ 19191 struct tcp_rack *rack; 19192 struct rack_sendmap *rsm; 19193 19194 rack = (struct tcp_rack *)tp->t_fb_ptr; 19195 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19196 /* 19197 * The MTU has changed we need to resend everything 19198 * since all we have sent is lost. We first fix 19199 * up the mtu though. 19200 */ 19201 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19202 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19203 rack_remxt_tmr(tp); 19204 rack->r_fast_output = 0; 19205 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19206 rack->r_ctl.rc_sacked); 19207 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19208 rack->r_must_retran = 1; 19209 /* Mark all inflight to needing to be rxt'd */ 19210 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19211 rsm->r_flags |= RACK_MUST_RXT; 19212 } 19213 } 19214 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19215 /* We don't use snd_nxt to retransmit */ 19216 tp->snd_nxt = tp->snd_max; 19217 } 19218 19219 static int 19220 rack_set_profile(struct tcp_rack *rack, int prof) 19221 { 19222 int err = EINVAL; 19223 if (prof == 1) { 19224 /* pace_always=1 */ 19225 if (rack->rc_always_pace == 0) { 19226 if (tcp_can_enable_pacing() == 0) 19227 return (EBUSY); 19228 } 19229 rack->rc_always_pace = 1; 19230 if (rack->use_fixed_rate || rack->gp_ready) 19231 rack_set_cc_pacing(rack); 19232 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19233 rack->rack_attempt_hdwr_pace = 0; 19234 /* cmpack=1 */ 19235 if (rack_use_cmp_acks) 19236 rack->r_use_cmp_ack = 1; 19237 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19238 rack->r_use_cmp_ack) 19239 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19240 /* scwnd=1 */ 19241 rack->rack_enable_scwnd = 1; 19242 /* dynamic=100 */ 19243 rack->rc_gp_dyn_mul = 1; 19244 /* gp_inc_ca */ 19245 rack->r_ctl.rack_per_of_gp_ca = 100; 19246 /* rrr_conf=3 */ 19247 rack->r_rr_config = 3; 19248 /* npush=2 */ 19249 rack->r_ctl.rc_no_push_at_mrtt = 2; 19250 /* fillcw=1 */ 19251 rack->rc_pace_to_cwnd = 1; 19252 rack->rc_pace_fill_if_rttin_range = 0; 19253 rack->rtt_limit_mul = 0; 19254 /* noprr=1 */ 19255 rack->rack_no_prr = 1; 19256 /* lscwnd=1 */ 19257 rack->r_limit_scw = 1; 19258 /* gp_inc_rec */ 19259 rack->r_ctl.rack_per_of_gp_rec = 90; 19260 err = 0; 19261 19262 } else if (prof == 3) { 19263 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19264 /* pace_always=1 */ 19265 if (rack->rc_always_pace == 0) { 19266 if (tcp_can_enable_pacing() == 0) 19267 return (EBUSY); 19268 } 19269 rack->rc_always_pace = 1; 19270 if (rack->use_fixed_rate || rack->gp_ready) 19271 rack_set_cc_pacing(rack); 19272 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19273 rack->rack_attempt_hdwr_pace = 0; 19274 /* cmpack=1 */ 19275 if (rack_use_cmp_acks) 19276 rack->r_use_cmp_ack = 1; 19277 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19278 rack->r_use_cmp_ack) 19279 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19280 /* scwnd=1 */ 19281 rack->rack_enable_scwnd = 1; 19282 /* dynamic=100 */ 19283 rack->rc_gp_dyn_mul = 1; 19284 /* gp_inc_ca */ 19285 rack->r_ctl.rack_per_of_gp_ca = 100; 19286 /* rrr_conf=3 */ 19287 rack->r_rr_config = 3; 19288 /* npush=2 */ 19289 rack->r_ctl.rc_no_push_at_mrtt = 2; 19290 /* fillcw=2 */ 19291 rack->rc_pace_to_cwnd = 1; 19292 rack->r_fill_less_agg = 1; 19293 rack->rc_pace_fill_if_rttin_range = 0; 19294 rack->rtt_limit_mul = 0; 19295 /* noprr=1 */ 19296 rack->rack_no_prr = 1; 19297 /* lscwnd=1 */ 19298 rack->r_limit_scw = 1; 19299 /* gp_inc_rec */ 19300 rack->r_ctl.rack_per_of_gp_rec = 90; 19301 err = 0; 19302 19303 19304 } else if (prof == 2) { 19305 /* cmpack=1 */ 19306 if (rack->rc_always_pace == 0) { 19307 if (tcp_can_enable_pacing() == 0) 19308 return (EBUSY); 19309 } 19310 rack->rc_always_pace = 1; 19311 if (rack->use_fixed_rate || rack->gp_ready) 19312 rack_set_cc_pacing(rack); 19313 rack->r_use_cmp_ack = 1; 19314 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19315 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19316 /* pace_always=1 */ 19317 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19318 /* scwnd=1 */ 19319 rack->rack_enable_scwnd = 1; 19320 /* dynamic=100 */ 19321 rack->rc_gp_dyn_mul = 1; 19322 rack->r_ctl.rack_per_of_gp_ca = 100; 19323 /* rrr_conf=3 */ 19324 rack->r_rr_config = 3; 19325 /* npush=2 */ 19326 rack->r_ctl.rc_no_push_at_mrtt = 2; 19327 /* fillcw=1 */ 19328 rack->rc_pace_to_cwnd = 1; 19329 rack->rc_pace_fill_if_rttin_range = 0; 19330 rack->rtt_limit_mul = 0; 19331 /* noprr=1 */ 19332 rack->rack_no_prr = 1; 19333 /* lscwnd=0 */ 19334 rack->r_limit_scw = 0; 19335 err = 0; 19336 } else if (prof == 0) { 19337 /* This changes things back to the default settings */ 19338 err = 0; 19339 if (rack->rc_always_pace) { 19340 tcp_decrement_paced_conn(); 19341 rack_undo_cc_pacing(rack); 19342 rack->rc_always_pace = 0; 19343 } 19344 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19345 rack->rc_always_pace = 1; 19346 if (rack->use_fixed_rate || rack->gp_ready) 19347 rack_set_cc_pacing(rack); 19348 } else 19349 rack->rc_always_pace = 0; 19350 if (rack_dsack_std_based & 0x1) { 19351 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19352 rack->rc_rack_tmr_std_based = 1; 19353 } 19354 if (rack_dsack_std_based & 0x2) { 19355 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19356 rack->rc_rack_use_dsack = 1; 19357 } 19358 if (rack_use_cmp_acks) 19359 rack->r_use_cmp_ack = 1; 19360 else 19361 rack->r_use_cmp_ack = 0; 19362 if (rack_disable_prr) 19363 rack->rack_no_prr = 1; 19364 else 19365 rack->rack_no_prr = 0; 19366 if (rack_gp_no_rec_chg) 19367 rack->rc_gp_no_rec_chg = 1; 19368 else 19369 rack->rc_gp_no_rec_chg = 0; 19370 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19371 rack->r_mbuf_queue = 1; 19372 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19373 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19374 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19375 } else { 19376 rack->r_mbuf_queue = 0; 19377 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19378 } 19379 if (rack_enable_shared_cwnd) 19380 rack->rack_enable_scwnd = 1; 19381 else 19382 rack->rack_enable_scwnd = 0; 19383 if (rack_do_dyn_mul) { 19384 /* When dynamic adjustment is on CA needs to start at 100% */ 19385 rack->rc_gp_dyn_mul = 1; 19386 if (rack_do_dyn_mul >= 100) 19387 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19388 } else { 19389 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19390 rack->rc_gp_dyn_mul = 0; 19391 } 19392 rack->r_rr_config = 0; 19393 rack->r_ctl.rc_no_push_at_mrtt = 0; 19394 rack->rc_pace_to_cwnd = 0; 19395 rack->rc_pace_fill_if_rttin_range = 0; 19396 rack->rtt_limit_mul = 0; 19397 19398 if (rack_enable_hw_pacing) 19399 rack->rack_hdw_pace_ena = 1; 19400 else 19401 rack->rack_hdw_pace_ena = 0; 19402 if (rack_disable_prr) 19403 rack->rack_no_prr = 1; 19404 else 19405 rack->rack_no_prr = 0; 19406 if (rack_limits_scwnd) 19407 rack->r_limit_scw = 1; 19408 else 19409 rack->r_limit_scw = 0; 19410 err = 0; 19411 } 19412 return (err); 19413 } 19414 19415 static int 19416 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19417 { 19418 struct deferred_opt_list *dol; 19419 19420 dol = malloc(sizeof(struct deferred_opt_list), 19421 M_TCPFSB, M_NOWAIT|M_ZERO); 19422 if (dol == NULL) { 19423 /* 19424 * No space yikes -- fail out.. 19425 */ 19426 return (0); 19427 } 19428 dol->optname = sopt_name; 19429 dol->optval = loptval; 19430 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19431 return (1); 19432 } 19433 19434 static int 19435 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19436 uint32_t optval, uint64_t loptval) 19437 { 19438 struct epoch_tracker et; 19439 struct sockopt sopt; 19440 struct cc_newreno_opts opt; 19441 struct inpcb *inp = tptoinpcb(tp); 19442 uint64_t val; 19443 int error = 0; 19444 uint16_t ca, ss; 19445 19446 switch (sopt_name) { 19447 19448 case TCP_RACK_DSACK_OPT: 19449 RACK_OPTS_INC(tcp_rack_dsack_opt); 19450 if (optval & 0x1) { 19451 rack->rc_rack_tmr_std_based = 1; 19452 } else { 19453 rack->rc_rack_tmr_std_based = 0; 19454 } 19455 if (optval & 0x2) { 19456 rack->rc_rack_use_dsack = 1; 19457 } else { 19458 rack->rc_rack_use_dsack = 0; 19459 } 19460 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19461 break; 19462 case TCP_RACK_PACING_BETA: 19463 RACK_OPTS_INC(tcp_rack_beta); 19464 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 19465 /* This only works for newreno. */ 19466 error = EINVAL; 19467 break; 19468 } 19469 if (rack->rc_pacing_cc_set) { 19470 /* 19471 * Set them into the real CC module 19472 * whats in the rack pcb is the old values 19473 * to be used on restoral/ 19474 */ 19475 sopt.sopt_dir = SOPT_SET; 19476 opt.name = CC_NEWRENO_BETA; 19477 opt.val = optval; 19478 if (CC_ALGO(tp)->ctl_output != NULL) 19479 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 19480 else { 19481 error = ENOENT; 19482 break; 19483 } 19484 } else { 19485 /* 19486 * Not pacing yet so set it into our local 19487 * rack pcb storage. 19488 */ 19489 rack->r_ctl.rc_saved_beta.beta = optval; 19490 } 19491 break; 19492 case TCP_RACK_TIMER_SLOP: 19493 RACK_OPTS_INC(tcp_rack_timer_slop); 19494 rack->r_ctl.timer_slop = optval; 19495 if (rack->rc_tp->t_srtt) { 19496 /* 19497 * If we have an SRTT lets update t_rxtcur 19498 * to have the new slop. 19499 */ 19500 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19501 rack_rto_min, rack_rto_max, 19502 rack->r_ctl.timer_slop); 19503 } 19504 break; 19505 case TCP_RACK_PACING_BETA_ECN: 19506 RACK_OPTS_INC(tcp_rack_beta_ecn); 19507 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 19508 /* This only works for newreno. */ 19509 error = EINVAL; 19510 break; 19511 } 19512 if (rack->rc_pacing_cc_set) { 19513 /* 19514 * Set them into the real CC module 19515 * whats in the rack pcb is the old values 19516 * to be used on restoral/ 19517 */ 19518 sopt.sopt_dir = SOPT_SET; 19519 opt.name = CC_NEWRENO_BETA_ECN; 19520 opt.val = optval; 19521 if (CC_ALGO(tp)->ctl_output != NULL) 19522 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 19523 else 19524 error = ENOENT; 19525 } else { 19526 /* 19527 * Not pacing yet so set it into our local 19528 * rack pcb storage. 19529 */ 19530 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19531 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19532 } 19533 break; 19534 case TCP_DEFER_OPTIONS: 19535 RACK_OPTS_INC(tcp_defer_opt); 19536 if (optval) { 19537 if (rack->gp_ready) { 19538 /* Too late */ 19539 error = EINVAL; 19540 break; 19541 } 19542 rack->defer_options = 1; 19543 } else 19544 rack->defer_options = 0; 19545 break; 19546 case TCP_RACK_MEASURE_CNT: 19547 RACK_OPTS_INC(tcp_rack_measure_cnt); 19548 if (optval && (optval <= 0xff)) { 19549 rack->r_ctl.req_measurements = optval; 19550 } else 19551 error = EINVAL; 19552 break; 19553 case TCP_REC_ABC_VAL: 19554 RACK_OPTS_INC(tcp_rec_abc_val); 19555 if (optval > 0) 19556 rack->r_use_labc_for_rec = 1; 19557 else 19558 rack->r_use_labc_for_rec = 0; 19559 break; 19560 case TCP_RACK_ABC_VAL: 19561 RACK_OPTS_INC(tcp_rack_abc_val); 19562 if ((optval > 0) && (optval < 255)) 19563 rack->rc_labc = optval; 19564 else 19565 error = EINVAL; 19566 break; 19567 case TCP_HDWR_UP_ONLY: 19568 RACK_OPTS_INC(tcp_pacing_up_only); 19569 if (optval) 19570 rack->r_up_only = 1; 19571 else 19572 rack->r_up_only = 0; 19573 break; 19574 case TCP_PACING_RATE_CAP: 19575 RACK_OPTS_INC(tcp_pacing_rate_cap); 19576 rack->r_ctl.bw_rate_cap = loptval; 19577 break; 19578 case TCP_RACK_PROFILE: 19579 RACK_OPTS_INC(tcp_profile); 19580 error = rack_set_profile(rack, optval); 19581 break; 19582 case TCP_USE_CMP_ACKS: 19583 RACK_OPTS_INC(tcp_use_cmp_acks); 19584 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19585 /* You can't turn it off once its on! */ 19586 error = EINVAL; 19587 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19588 rack->r_use_cmp_ack = 1; 19589 rack->r_mbuf_queue = 1; 19590 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19591 } 19592 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19593 inp->inp_flags2 |= INP_MBUF_ACKCMP; 19594 break; 19595 case TCP_SHARED_CWND_TIME_LIMIT: 19596 RACK_OPTS_INC(tcp_lscwnd); 19597 if (optval) 19598 rack->r_limit_scw = 1; 19599 else 19600 rack->r_limit_scw = 0; 19601 break; 19602 case TCP_RACK_PACE_TO_FILL: 19603 RACK_OPTS_INC(tcp_fillcw); 19604 if (optval == 0) 19605 rack->rc_pace_to_cwnd = 0; 19606 else { 19607 rack->rc_pace_to_cwnd = 1; 19608 if (optval > 1) 19609 rack->r_fill_less_agg = 1; 19610 } 19611 if ((optval >= rack_gp_rtt_maxmul) && 19612 rack_gp_rtt_maxmul && 19613 (optval < 0xf)) { 19614 rack->rc_pace_fill_if_rttin_range = 1; 19615 rack->rtt_limit_mul = optval; 19616 } else { 19617 rack->rc_pace_fill_if_rttin_range = 0; 19618 rack->rtt_limit_mul = 0; 19619 } 19620 break; 19621 case TCP_RACK_NO_PUSH_AT_MAX: 19622 RACK_OPTS_INC(tcp_npush); 19623 if (optval == 0) 19624 rack->r_ctl.rc_no_push_at_mrtt = 0; 19625 else if (optval < 0xff) 19626 rack->r_ctl.rc_no_push_at_mrtt = optval; 19627 else 19628 error = EINVAL; 19629 break; 19630 case TCP_SHARED_CWND_ENABLE: 19631 RACK_OPTS_INC(tcp_rack_scwnd); 19632 if (optval == 0) 19633 rack->rack_enable_scwnd = 0; 19634 else 19635 rack->rack_enable_scwnd = 1; 19636 break; 19637 case TCP_RACK_MBUF_QUEUE: 19638 /* Now do we use the LRO mbuf-queue feature */ 19639 RACK_OPTS_INC(tcp_rack_mbufq); 19640 if (optval || rack->r_use_cmp_ack) 19641 rack->r_mbuf_queue = 1; 19642 else 19643 rack->r_mbuf_queue = 0; 19644 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19645 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19646 else 19647 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19648 break; 19649 case TCP_RACK_NONRXT_CFG_RATE: 19650 RACK_OPTS_INC(tcp_rack_cfg_rate); 19651 if (optval == 0) 19652 rack->rack_rec_nonrxt_use_cr = 0; 19653 else 19654 rack->rack_rec_nonrxt_use_cr = 1; 19655 break; 19656 case TCP_NO_PRR: 19657 RACK_OPTS_INC(tcp_rack_noprr); 19658 if (optval == 0) 19659 rack->rack_no_prr = 0; 19660 else if (optval == 1) 19661 rack->rack_no_prr = 1; 19662 else if (optval == 2) 19663 rack->no_prr_addback = 1; 19664 else 19665 error = EINVAL; 19666 break; 19667 case TCP_TIMELY_DYN_ADJ: 19668 RACK_OPTS_INC(tcp_timely_dyn); 19669 if (optval == 0) 19670 rack->rc_gp_dyn_mul = 0; 19671 else { 19672 rack->rc_gp_dyn_mul = 1; 19673 if (optval >= 100) { 19674 /* 19675 * If the user sets something 100 or more 19676 * its the gp_ca value. 19677 */ 19678 rack->r_ctl.rack_per_of_gp_ca = optval; 19679 } 19680 } 19681 break; 19682 case TCP_RACK_DO_DETECTION: 19683 RACK_OPTS_INC(tcp_rack_do_detection); 19684 if (optval == 0) 19685 rack->do_detection = 0; 19686 else 19687 rack->do_detection = 1; 19688 break; 19689 case TCP_RACK_TLP_USE: 19690 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19691 error = EINVAL; 19692 break; 19693 } 19694 RACK_OPTS_INC(tcp_tlp_use); 19695 rack->rack_tlp_threshold_use = optval; 19696 break; 19697 case TCP_RACK_TLP_REDUCE: 19698 /* RACK TLP cwnd reduction (bool) */ 19699 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19700 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19701 break; 19702 /* Pacing related ones */ 19703 case TCP_RACK_PACE_ALWAYS: 19704 /* 19705 * zero is old rack method, 1 is new 19706 * method using a pacing rate. 19707 */ 19708 RACK_OPTS_INC(tcp_rack_pace_always); 19709 if (optval > 0) { 19710 if (rack->rc_always_pace) { 19711 error = EALREADY; 19712 break; 19713 } else if (tcp_can_enable_pacing()) { 19714 rack->rc_always_pace = 1; 19715 if (rack->use_fixed_rate || rack->gp_ready) 19716 rack_set_cc_pacing(rack); 19717 } 19718 else { 19719 error = ENOSPC; 19720 break; 19721 } 19722 } else { 19723 if (rack->rc_always_pace) { 19724 tcp_decrement_paced_conn(); 19725 rack->rc_always_pace = 0; 19726 rack_undo_cc_pacing(rack); 19727 } 19728 } 19729 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19730 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19731 else 19732 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19733 /* A rate may be set irate or other, if so set seg size */ 19734 rack_update_seg(rack); 19735 break; 19736 case TCP_BBR_RACK_INIT_RATE: 19737 RACK_OPTS_INC(tcp_initial_rate); 19738 val = optval; 19739 /* Change from kbits per second to bytes per second */ 19740 val *= 1000; 19741 val /= 8; 19742 rack->r_ctl.init_rate = val; 19743 if (rack->rc_init_win != rack_default_init_window) { 19744 uint32_t win, snt; 19745 19746 /* 19747 * Options don't always get applied 19748 * in the order you think. So in order 19749 * to assure we update a cwnd we need 19750 * to check and see if we are still 19751 * where we should raise the cwnd. 19752 */ 19753 win = rc_init_window(rack); 19754 if (SEQ_GT(tp->snd_max, tp->iss)) 19755 snt = tp->snd_max - tp->iss; 19756 else 19757 snt = 0; 19758 if ((snt < win) && 19759 (tp->snd_cwnd < win)) 19760 tp->snd_cwnd = win; 19761 } 19762 if (rack->rc_always_pace) 19763 rack_update_seg(rack); 19764 break; 19765 case TCP_BBR_IWINTSO: 19766 RACK_OPTS_INC(tcp_initial_win); 19767 if (optval && (optval <= 0xff)) { 19768 uint32_t win, snt; 19769 19770 rack->rc_init_win = optval; 19771 win = rc_init_window(rack); 19772 if (SEQ_GT(tp->snd_max, tp->iss)) 19773 snt = tp->snd_max - tp->iss; 19774 else 19775 snt = 0; 19776 if ((snt < win) && 19777 (tp->t_srtt | 19778 #ifdef NETFLIX_PEAKRATE 19779 tp->t_maxpeakrate | 19780 #endif 19781 rack->r_ctl.init_rate)) { 19782 /* 19783 * We are not past the initial window 19784 * and we have some bases for pacing, 19785 * so we need to possibly adjust up 19786 * the cwnd. Note even if we don't set 19787 * the cwnd, its still ok to raise the rc_init_win 19788 * which can be used coming out of idle when we 19789 * would have a rate. 19790 */ 19791 if (tp->snd_cwnd < win) 19792 tp->snd_cwnd = win; 19793 } 19794 if (rack->rc_always_pace) 19795 rack_update_seg(rack); 19796 } else 19797 error = EINVAL; 19798 break; 19799 case TCP_RACK_FORCE_MSEG: 19800 RACK_OPTS_INC(tcp_rack_force_max_seg); 19801 if (optval) 19802 rack->rc_force_max_seg = 1; 19803 else 19804 rack->rc_force_max_seg = 0; 19805 break; 19806 case TCP_RACK_PACE_MAX_SEG: 19807 /* Max segments size in a pace in bytes */ 19808 RACK_OPTS_INC(tcp_rack_max_seg); 19809 rack->rc_user_set_max_segs = optval; 19810 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19811 break; 19812 case TCP_RACK_PACE_RATE_REC: 19813 /* Set the fixed pacing rate in Bytes per second ca */ 19814 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19815 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19816 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19817 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19818 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19819 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19820 rack->use_fixed_rate = 1; 19821 if (rack->rc_always_pace) 19822 rack_set_cc_pacing(rack); 19823 rack_log_pacing_delay_calc(rack, 19824 rack->r_ctl.rc_fixed_pacing_rate_ss, 19825 rack->r_ctl.rc_fixed_pacing_rate_ca, 19826 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19827 __LINE__, NULL,0); 19828 break; 19829 19830 case TCP_RACK_PACE_RATE_SS: 19831 /* Set the fixed pacing rate in Bytes per second ca */ 19832 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19833 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19834 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19835 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19836 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19837 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19838 rack->use_fixed_rate = 1; 19839 if (rack->rc_always_pace) 19840 rack_set_cc_pacing(rack); 19841 rack_log_pacing_delay_calc(rack, 19842 rack->r_ctl.rc_fixed_pacing_rate_ss, 19843 rack->r_ctl.rc_fixed_pacing_rate_ca, 19844 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19845 __LINE__, NULL, 0); 19846 break; 19847 19848 case TCP_RACK_PACE_RATE_CA: 19849 /* Set the fixed pacing rate in Bytes per second ca */ 19850 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19851 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19852 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19853 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19854 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19855 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19856 rack->use_fixed_rate = 1; 19857 if (rack->rc_always_pace) 19858 rack_set_cc_pacing(rack); 19859 rack_log_pacing_delay_calc(rack, 19860 rack->r_ctl.rc_fixed_pacing_rate_ss, 19861 rack->r_ctl.rc_fixed_pacing_rate_ca, 19862 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19863 __LINE__, NULL, 0); 19864 break; 19865 case TCP_RACK_GP_INCREASE_REC: 19866 RACK_OPTS_INC(tcp_gp_inc_rec); 19867 rack->r_ctl.rack_per_of_gp_rec = optval; 19868 rack_log_pacing_delay_calc(rack, 19869 rack->r_ctl.rack_per_of_gp_ss, 19870 rack->r_ctl.rack_per_of_gp_ca, 19871 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19872 __LINE__, NULL, 0); 19873 break; 19874 case TCP_RACK_GP_INCREASE_CA: 19875 RACK_OPTS_INC(tcp_gp_inc_ca); 19876 ca = optval; 19877 if (ca < 100) { 19878 /* 19879 * We don't allow any reduction 19880 * over the GP b/w. 19881 */ 19882 error = EINVAL; 19883 break; 19884 } 19885 rack->r_ctl.rack_per_of_gp_ca = ca; 19886 rack_log_pacing_delay_calc(rack, 19887 rack->r_ctl.rack_per_of_gp_ss, 19888 rack->r_ctl.rack_per_of_gp_ca, 19889 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19890 __LINE__, NULL, 0); 19891 break; 19892 case TCP_RACK_GP_INCREASE_SS: 19893 RACK_OPTS_INC(tcp_gp_inc_ss); 19894 ss = optval; 19895 if (ss < 100) { 19896 /* 19897 * We don't allow any reduction 19898 * over the GP b/w. 19899 */ 19900 error = EINVAL; 19901 break; 19902 } 19903 rack->r_ctl.rack_per_of_gp_ss = ss; 19904 rack_log_pacing_delay_calc(rack, 19905 rack->r_ctl.rack_per_of_gp_ss, 19906 rack->r_ctl.rack_per_of_gp_ca, 19907 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19908 __LINE__, NULL, 0); 19909 break; 19910 case TCP_RACK_RR_CONF: 19911 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 19912 if (optval && optval <= 3) 19913 rack->r_rr_config = optval; 19914 else 19915 rack->r_rr_config = 0; 19916 break; 19917 case TCP_HDWR_RATE_CAP: 19918 RACK_OPTS_INC(tcp_hdwr_rate_cap); 19919 if (optval) { 19920 if (rack->r_rack_hw_rate_caps == 0) 19921 rack->r_rack_hw_rate_caps = 1; 19922 else 19923 error = EALREADY; 19924 } else { 19925 rack->r_rack_hw_rate_caps = 0; 19926 } 19927 break; 19928 case TCP_BBR_HDWR_PACE: 19929 RACK_OPTS_INC(tcp_hdwr_pacing); 19930 if (optval){ 19931 if (rack->rack_hdrw_pacing == 0) { 19932 rack->rack_hdw_pace_ena = 1; 19933 rack->rack_attempt_hdwr_pace = 0; 19934 } else 19935 error = EALREADY; 19936 } else { 19937 rack->rack_hdw_pace_ena = 0; 19938 #ifdef RATELIMIT 19939 if (rack->r_ctl.crte != NULL) { 19940 rack->rack_hdrw_pacing = 0; 19941 rack->rack_attempt_hdwr_pace = 0; 19942 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 19943 rack->r_ctl.crte = NULL; 19944 } 19945 #endif 19946 } 19947 break; 19948 /* End Pacing related ones */ 19949 case TCP_RACK_PRR_SENDALOT: 19950 /* Allow PRR to send more than one seg */ 19951 RACK_OPTS_INC(tcp_rack_prr_sendalot); 19952 rack->r_ctl.rc_prr_sendalot = optval; 19953 break; 19954 case TCP_RACK_MIN_TO: 19955 /* Minimum time between rack t-o's in ms */ 19956 RACK_OPTS_INC(tcp_rack_min_to); 19957 rack->r_ctl.rc_min_to = optval; 19958 break; 19959 case TCP_RACK_EARLY_SEG: 19960 /* If early recovery max segments */ 19961 RACK_OPTS_INC(tcp_rack_early_seg); 19962 rack->r_ctl.rc_early_recovery_segs = optval; 19963 break; 19964 case TCP_RACK_ENABLE_HYSTART: 19965 { 19966 if (optval) { 19967 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 19968 if (rack_do_hystart > RACK_HYSTART_ON) 19969 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 19970 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 19971 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 19972 } else { 19973 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 19974 } 19975 } 19976 break; 19977 case TCP_RACK_REORD_THRESH: 19978 /* RACK reorder threshold (shift amount) */ 19979 RACK_OPTS_INC(tcp_rack_reord_thresh); 19980 if ((optval > 0) && (optval < 31)) 19981 rack->r_ctl.rc_reorder_shift = optval; 19982 else 19983 error = EINVAL; 19984 break; 19985 case TCP_RACK_REORD_FADE: 19986 /* Does reordering fade after ms time */ 19987 RACK_OPTS_INC(tcp_rack_reord_fade); 19988 rack->r_ctl.rc_reorder_fade = optval; 19989 break; 19990 case TCP_RACK_TLP_THRESH: 19991 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 19992 RACK_OPTS_INC(tcp_rack_tlp_thresh); 19993 if (optval) 19994 rack->r_ctl.rc_tlp_threshold = optval; 19995 else 19996 error = EINVAL; 19997 break; 19998 case TCP_BBR_USE_RACK_RR: 19999 RACK_OPTS_INC(tcp_rack_rr); 20000 if (optval) 20001 rack->use_rack_rr = 1; 20002 else 20003 rack->use_rack_rr = 0; 20004 break; 20005 case TCP_FAST_RSM_HACK: 20006 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20007 if (optval) 20008 rack->fast_rsm_hack = 1; 20009 else 20010 rack->fast_rsm_hack = 0; 20011 break; 20012 case TCP_RACK_PKT_DELAY: 20013 /* RACK added ms i.e. rack-rtt + reord + N */ 20014 RACK_OPTS_INC(tcp_rack_pkt_delay); 20015 rack->r_ctl.rc_pkt_delay = optval; 20016 break; 20017 case TCP_DELACK: 20018 RACK_OPTS_INC(tcp_rack_delayed_ack); 20019 if (optval == 0) 20020 tp->t_delayed_ack = 0; 20021 else 20022 tp->t_delayed_ack = 1; 20023 if (tp->t_flags & TF_DELACK) { 20024 tp->t_flags &= ~TF_DELACK; 20025 tp->t_flags |= TF_ACKNOW; 20026 NET_EPOCH_ENTER(et); 20027 rack_output(tp); 20028 NET_EPOCH_EXIT(et); 20029 } 20030 break; 20031 20032 case TCP_BBR_RACK_RTT_USE: 20033 RACK_OPTS_INC(tcp_rack_rtt_use); 20034 if ((optval != USE_RTT_HIGH) && 20035 (optval != USE_RTT_LOW) && 20036 (optval != USE_RTT_AVG)) 20037 error = EINVAL; 20038 else 20039 rack->r_ctl.rc_rate_sample_method = optval; 20040 break; 20041 case TCP_DATA_AFTER_CLOSE: 20042 RACK_OPTS_INC(tcp_data_after_close); 20043 if (optval) 20044 rack->rc_allow_data_af_clo = 1; 20045 else 20046 rack->rc_allow_data_af_clo = 0; 20047 break; 20048 default: 20049 break; 20050 } 20051 #ifdef NETFLIX_STATS 20052 tcp_log_socket_option(tp, sopt_name, optval, error); 20053 #endif 20054 return (error); 20055 } 20056 20057 20058 static void 20059 rack_apply_deferred_options(struct tcp_rack *rack) 20060 { 20061 struct deferred_opt_list *dol, *sdol; 20062 uint32_t s_optval; 20063 20064 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20065 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20066 /* Disadvantage of deferal is you loose the error return */ 20067 s_optval = (uint32_t)dol->optval; 20068 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20069 free(dol, M_TCPDO); 20070 } 20071 } 20072 20073 static void 20074 rack_hw_tls_change(struct tcpcb *tp, int chg) 20075 { 20076 /* Update HW tls state */ 20077 struct tcp_rack *rack; 20078 20079 rack = (struct tcp_rack *)tp->t_fb_ptr; 20080 if (chg) 20081 rack->r_ctl.fsb.hw_tls = 1; 20082 else 20083 rack->r_ctl.fsb.hw_tls = 0; 20084 } 20085 20086 static int 20087 rack_pru_options(struct tcpcb *tp, int flags) 20088 { 20089 if (flags & PRUS_OOB) 20090 return (EOPNOTSUPP); 20091 return (0); 20092 } 20093 20094 static struct tcp_function_block __tcp_rack = { 20095 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20096 .tfb_tcp_output = rack_output, 20097 .tfb_do_queued_segments = ctf_do_queued_segments, 20098 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20099 .tfb_tcp_do_segment = rack_do_segment, 20100 .tfb_tcp_ctloutput = rack_ctloutput, 20101 .tfb_tcp_fb_init = rack_init, 20102 .tfb_tcp_fb_fini = rack_fini, 20103 .tfb_tcp_timer_stop_all = rack_stopall, 20104 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20105 .tfb_tcp_handoff_ok = rack_handoff_ok, 20106 .tfb_tcp_mtu_chg = rack_mtu_change, 20107 .tfb_pru_options = rack_pru_options, 20108 .tfb_hwtls_change = rack_hw_tls_change, 20109 .tfb_compute_pipe = rack_compute_pipe, 20110 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20111 }; 20112 20113 /* 20114 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20115 * socket option arguments. When it re-acquires the lock after the copy, it 20116 * has to revalidate that the connection is still valid for the socket 20117 * option. 20118 */ 20119 static int 20120 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20121 { 20122 #ifdef INET6 20123 struct ip6_hdr *ip6; 20124 #endif 20125 #ifdef INET 20126 struct ip *ip; 20127 #endif 20128 struct tcpcb *tp; 20129 struct tcp_rack *rack; 20130 uint64_t loptval; 20131 int32_t error = 0, optval; 20132 20133 tp = intotcpcb(inp); 20134 rack = (struct tcp_rack *)tp->t_fb_ptr; 20135 if (rack == NULL) { 20136 INP_WUNLOCK(inp); 20137 return (EINVAL); 20138 } 20139 #ifdef INET6 20140 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20141 #endif 20142 #ifdef INET 20143 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20144 #endif 20145 20146 switch (sopt->sopt_level) { 20147 #ifdef INET6 20148 case IPPROTO_IPV6: 20149 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20150 switch (sopt->sopt_name) { 20151 case IPV6_USE_MIN_MTU: 20152 tcp6_use_min_mtu(tp); 20153 break; 20154 case IPV6_TCLASS: 20155 /* 20156 * The DSCP codepoint has changed, update the fsb. 20157 */ 20158 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20159 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20160 break; 20161 } 20162 INP_WUNLOCK(inp); 20163 return (0); 20164 #endif 20165 #ifdef INET 20166 case IPPROTO_IP: 20167 switch (sopt->sopt_name) { 20168 case IP_TOS: 20169 /* 20170 * The DSCP codepoint has changed, update the fsb. 20171 */ 20172 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20173 break; 20174 case IP_TTL: 20175 /* 20176 * The TTL has changed, update the fsb. 20177 */ 20178 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20179 break; 20180 } 20181 INP_WUNLOCK(inp); 20182 return (0); 20183 #endif 20184 } 20185 20186 switch (sopt->sopt_name) { 20187 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20188 /* Pacing related ones */ 20189 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20190 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20191 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20192 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20193 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20194 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20195 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20196 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20197 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20198 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20199 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20200 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20201 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20202 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20203 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20204 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20205 /* End pacing related */ 20206 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20207 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20208 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20209 case TCP_RACK_MIN_TO: /* URL:min_to */ 20210 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20211 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20212 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20213 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20214 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20215 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20216 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20217 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20218 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20219 case TCP_NO_PRR: /* URL:noprr */ 20220 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20221 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20222 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20223 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20224 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20225 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20226 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20227 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20228 case TCP_RACK_PROFILE: /* URL:profile */ 20229 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20230 case TCP_RACK_ABC_VAL: /* URL:labc */ 20231 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20232 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20233 case TCP_DEFER_OPTIONS: /* URL:defer */ 20234 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20235 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20236 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20237 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20238 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20239 break; 20240 default: 20241 /* Filter off all unknown options to the base stack */ 20242 return (tcp_default_ctloutput(inp, sopt)); 20243 break; 20244 } 20245 INP_WUNLOCK(inp); 20246 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20247 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20248 /* 20249 * We truncate it down to 32 bits for the socket-option trace this 20250 * means rates > 34Gbps won't show right, but thats probably ok. 20251 */ 20252 optval = (uint32_t)loptval; 20253 } else { 20254 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20255 /* Save it in 64 bit form too */ 20256 loptval = optval; 20257 } 20258 if (error) 20259 return (error); 20260 INP_WLOCK(inp); 20261 if (inp->inp_flags & INP_DROPPED) { 20262 INP_WUNLOCK(inp); 20263 return (ECONNRESET); 20264 } 20265 if (tp->t_fb != &__tcp_rack) { 20266 INP_WUNLOCK(inp); 20267 return (ENOPROTOOPT); 20268 } 20269 if (rack->defer_options && (rack->gp_ready == 0) && 20270 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20271 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20272 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20273 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20274 /* Options are beind deferred */ 20275 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20276 INP_WUNLOCK(inp); 20277 return (0); 20278 } else { 20279 /* No memory to defer, fail */ 20280 INP_WUNLOCK(inp); 20281 return (ENOMEM); 20282 } 20283 } 20284 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20285 INP_WUNLOCK(inp); 20286 return (error); 20287 } 20288 20289 static void 20290 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20291 { 20292 20293 INP_WLOCK_ASSERT(tptoinpcb(tp)); 20294 bzero(ti, sizeof(*ti)); 20295 20296 ti->tcpi_state = tp->t_state; 20297 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20298 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20299 if (tp->t_flags & TF_SACK_PERMIT) 20300 ti->tcpi_options |= TCPI_OPT_SACK; 20301 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20302 ti->tcpi_options |= TCPI_OPT_WSCALE; 20303 ti->tcpi_snd_wscale = tp->snd_scale; 20304 ti->tcpi_rcv_wscale = tp->rcv_scale; 20305 } 20306 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 20307 ti->tcpi_options |= TCPI_OPT_ECN; 20308 if (tp->t_flags & TF_FASTOPEN) 20309 ti->tcpi_options |= TCPI_OPT_TFO; 20310 /* still kept in ticks is t_rcvtime */ 20311 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20312 /* Since we hold everything in precise useconds this is easy */ 20313 ti->tcpi_rtt = tp->t_srtt; 20314 ti->tcpi_rttvar = tp->t_rttvar; 20315 ti->tcpi_rto = tp->t_rxtcur; 20316 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20317 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20318 /* 20319 * FreeBSD-specific extension fields for tcp_info. 20320 */ 20321 ti->tcpi_rcv_space = tp->rcv_wnd; 20322 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20323 ti->tcpi_snd_wnd = tp->snd_wnd; 20324 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20325 ti->tcpi_snd_nxt = tp->snd_nxt; 20326 ti->tcpi_snd_mss = tp->t_maxseg; 20327 ti->tcpi_rcv_mss = tp->t_maxseg; 20328 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20329 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20330 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20331 #ifdef NETFLIX_STATS 20332 ti->tcpi_total_tlp = tp->t_sndtlppack; 20333 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20334 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20335 #endif 20336 #ifdef TCP_OFFLOAD 20337 if (tp->t_flags & TF_TOE) { 20338 ti->tcpi_options |= TCPI_OPT_TOE; 20339 tcp_offload_tcp_info(tp, ti); 20340 } 20341 #endif 20342 } 20343 20344 static int 20345 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20346 { 20347 struct tcpcb *tp; 20348 struct tcp_rack *rack; 20349 int32_t error, optval; 20350 uint64_t val, loptval; 20351 struct tcp_info ti; 20352 /* 20353 * Because all our options are either boolean or an int, we can just 20354 * pull everything into optval and then unlock and copy. If we ever 20355 * add a option that is not a int, then this will have quite an 20356 * impact to this routine. 20357 */ 20358 error = 0; 20359 tp = intotcpcb(inp); 20360 rack = (struct tcp_rack *)tp->t_fb_ptr; 20361 if (rack == NULL) { 20362 INP_WUNLOCK(inp); 20363 return (EINVAL); 20364 } 20365 switch (sopt->sopt_name) { 20366 case TCP_INFO: 20367 /* First get the info filled */ 20368 rack_fill_info(tp, &ti); 20369 /* Fix up the rtt related fields if needed */ 20370 INP_WUNLOCK(inp); 20371 error = sooptcopyout(sopt, &ti, sizeof ti); 20372 return (error); 20373 /* 20374 * Beta is the congestion control value for NewReno that influences how 20375 * much of a backoff happens when loss is detected. It is normally set 20376 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20377 * when you exit recovery. 20378 */ 20379 case TCP_RACK_PACING_BETA: 20380 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 20381 error = EINVAL; 20382 else if (rack->rc_pacing_cc_set == 0) 20383 optval = rack->r_ctl.rc_saved_beta.beta; 20384 else { 20385 /* 20386 * Reach out into the CC data and report back what 20387 * I have previously set. Yeah it looks hackish but 20388 * we don't want to report the saved values. 20389 */ 20390 if (tp->t_ccv.cc_data) 20391 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 20392 else 20393 error = EINVAL; 20394 } 20395 break; 20396 /* 20397 * Beta_ecn is the congestion control value for NewReno that influences how 20398 * much of a backoff happens when a ECN mark is detected. It is normally set 20399 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20400 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20401 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20402 */ 20403 20404 case TCP_RACK_PACING_BETA_ECN: 20405 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 20406 error = EINVAL; 20407 else if (rack->rc_pacing_cc_set == 0) 20408 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20409 else { 20410 /* 20411 * Reach out into the CC data and report back what 20412 * I have previously set. Yeah it looks hackish but 20413 * we don't want to report the saved values. 20414 */ 20415 if (tp->t_ccv.cc_data) 20416 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 20417 else 20418 error = EINVAL; 20419 } 20420 break; 20421 case TCP_RACK_DSACK_OPT: 20422 optval = 0; 20423 if (rack->rc_rack_tmr_std_based) { 20424 optval |= 1; 20425 } 20426 if (rack->rc_rack_use_dsack) { 20427 optval |= 2; 20428 } 20429 break; 20430 case TCP_RACK_ENABLE_HYSTART: 20431 { 20432 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 20433 optval = RACK_HYSTART_ON; 20434 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 20435 optval = RACK_HYSTART_ON_W_SC; 20436 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 20437 optval = RACK_HYSTART_ON_W_SC_C; 20438 } else { 20439 optval = RACK_HYSTART_OFF; 20440 } 20441 } 20442 break; 20443 case TCP_FAST_RSM_HACK: 20444 optval = rack->fast_rsm_hack; 20445 break; 20446 case TCP_DEFER_OPTIONS: 20447 optval = rack->defer_options; 20448 break; 20449 case TCP_RACK_MEASURE_CNT: 20450 optval = rack->r_ctl.req_measurements; 20451 break; 20452 case TCP_REC_ABC_VAL: 20453 optval = rack->r_use_labc_for_rec; 20454 break; 20455 case TCP_RACK_ABC_VAL: 20456 optval = rack->rc_labc; 20457 break; 20458 case TCP_HDWR_UP_ONLY: 20459 optval= rack->r_up_only; 20460 break; 20461 case TCP_PACING_RATE_CAP: 20462 loptval = rack->r_ctl.bw_rate_cap; 20463 break; 20464 case TCP_RACK_PROFILE: 20465 /* You cannot retrieve a profile, its write only */ 20466 error = EINVAL; 20467 break; 20468 case TCP_USE_CMP_ACKS: 20469 optval = rack->r_use_cmp_ack; 20470 break; 20471 case TCP_RACK_PACE_TO_FILL: 20472 optval = rack->rc_pace_to_cwnd; 20473 if (optval && rack->r_fill_less_agg) 20474 optval++; 20475 break; 20476 case TCP_RACK_NO_PUSH_AT_MAX: 20477 optval = rack->r_ctl.rc_no_push_at_mrtt; 20478 break; 20479 case TCP_SHARED_CWND_ENABLE: 20480 optval = rack->rack_enable_scwnd; 20481 break; 20482 case TCP_RACK_NONRXT_CFG_RATE: 20483 optval = rack->rack_rec_nonrxt_use_cr; 20484 break; 20485 case TCP_NO_PRR: 20486 if (rack->rack_no_prr == 1) 20487 optval = 1; 20488 else if (rack->no_prr_addback == 1) 20489 optval = 2; 20490 else 20491 optval = 0; 20492 break; 20493 case TCP_RACK_DO_DETECTION: 20494 optval = rack->do_detection; 20495 break; 20496 case TCP_RACK_MBUF_QUEUE: 20497 /* Now do we use the LRO mbuf-queue feature */ 20498 optval = rack->r_mbuf_queue; 20499 break; 20500 case TCP_TIMELY_DYN_ADJ: 20501 optval = rack->rc_gp_dyn_mul; 20502 break; 20503 case TCP_BBR_IWINTSO: 20504 optval = rack->rc_init_win; 20505 break; 20506 case TCP_RACK_TLP_REDUCE: 20507 /* RACK TLP cwnd reduction (bool) */ 20508 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20509 break; 20510 case TCP_BBR_RACK_INIT_RATE: 20511 val = rack->r_ctl.init_rate; 20512 /* convert to kbits per sec */ 20513 val *= 8; 20514 val /= 1000; 20515 optval = (uint32_t)val; 20516 break; 20517 case TCP_RACK_FORCE_MSEG: 20518 optval = rack->rc_force_max_seg; 20519 break; 20520 case TCP_RACK_PACE_MAX_SEG: 20521 /* Max segments in a pace */ 20522 optval = rack->rc_user_set_max_segs; 20523 break; 20524 case TCP_RACK_PACE_ALWAYS: 20525 /* Use the always pace method */ 20526 optval = rack->rc_always_pace; 20527 break; 20528 case TCP_RACK_PRR_SENDALOT: 20529 /* Allow PRR to send more than one seg */ 20530 optval = rack->r_ctl.rc_prr_sendalot; 20531 break; 20532 case TCP_RACK_MIN_TO: 20533 /* Minimum time between rack t-o's in ms */ 20534 optval = rack->r_ctl.rc_min_to; 20535 break; 20536 case TCP_RACK_EARLY_SEG: 20537 /* If early recovery max segments */ 20538 optval = rack->r_ctl.rc_early_recovery_segs; 20539 break; 20540 case TCP_RACK_REORD_THRESH: 20541 /* RACK reorder threshold (shift amount) */ 20542 optval = rack->r_ctl.rc_reorder_shift; 20543 break; 20544 case TCP_RACK_REORD_FADE: 20545 /* Does reordering fade after ms time */ 20546 optval = rack->r_ctl.rc_reorder_fade; 20547 break; 20548 case TCP_BBR_USE_RACK_RR: 20549 /* Do we use the rack cheat for rxt */ 20550 optval = rack->use_rack_rr; 20551 break; 20552 case TCP_RACK_RR_CONF: 20553 optval = rack->r_rr_config; 20554 break; 20555 case TCP_HDWR_RATE_CAP: 20556 optval = rack->r_rack_hw_rate_caps; 20557 break; 20558 case TCP_BBR_HDWR_PACE: 20559 optval = rack->rack_hdw_pace_ena; 20560 break; 20561 case TCP_RACK_TLP_THRESH: 20562 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20563 optval = rack->r_ctl.rc_tlp_threshold; 20564 break; 20565 case TCP_RACK_PKT_DELAY: 20566 /* RACK added ms i.e. rack-rtt + reord + N */ 20567 optval = rack->r_ctl.rc_pkt_delay; 20568 break; 20569 case TCP_RACK_TLP_USE: 20570 optval = rack->rack_tlp_threshold_use; 20571 break; 20572 case TCP_RACK_PACE_RATE_CA: 20573 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20574 break; 20575 case TCP_RACK_PACE_RATE_SS: 20576 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20577 break; 20578 case TCP_RACK_PACE_RATE_REC: 20579 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20580 break; 20581 case TCP_RACK_GP_INCREASE_SS: 20582 optval = rack->r_ctl.rack_per_of_gp_ca; 20583 break; 20584 case TCP_RACK_GP_INCREASE_CA: 20585 optval = rack->r_ctl.rack_per_of_gp_ss; 20586 break; 20587 case TCP_BBR_RACK_RTT_USE: 20588 optval = rack->r_ctl.rc_rate_sample_method; 20589 break; 20590 case TCP_DELACK: 20591 optval = tp->t_delayed_ack; 20592 break; 20593 case TCP_DATA_AFTER_CLOSE: 20594 optval = rack->rc_allow_data_af_clo; 20595 break; 20596 case TCP_SHARED_CWND_TIME_LIMIT: 20597 optval = rack->r_limit_scw; 20598 break; 20599 case TCP_RACK_TIMER_SLOP: 20600 optval = rack->r_ctl.timer_slop; 20601 break; 20602 default: 20603 return (tcp_default_ctloutput(inp, sopt)); 20604 break; 20605 } 20606 INP_WUNLOCK(inp); 20607 if (error == 0) { 20608 if (TCP_PACING_RATE_CAP) 20609 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20610 else 20611 error = sooptcopyout(sopt, &optval, sizeof optval); 20612 } 20613 return (error); 20614 } 20615 20616 static int 20617 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20618 { 20619 if (sopt->sopt_dir == SOPT_SET) { 20620 return (rack_set_sockopt(inp, sopt)); 20621 } else if (sopt->sopt_dir == SOPT_GET) { 20622 return (rack_get_sockopt(inp, sopt)); 20623 } else { 20624 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20625 } 20626 } 20627 20628 static const char *rack_stack_names[] = { 20629 __XSTRING(STACKNAME), 20630 #ifdef STACKALIAS 20631 __XSTRING(STACKALIAS), 20632 #endif 20633 }; 20634 20635 static int 20636 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20637 { 20638 memset(mem, 0, size); 20639 return (0); 20640 } 20641 20642 static void 20643 rack_dtor(void *mem, int32_t size, void *arg) 20644 { 20645 20646 } 20647 20648 static bool rack_mod_inited = false; 20649 20650 static int 20651 tcp_addrack(module_t mod, int32_t type, void *data) 20652 { 20653 int32_t err = 0; 20654 int num_stacks; 20655 20656 switch (type) { 20657 case MOD_LOAD: 20658 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20659 sizeof(struct rack_sendmap), 20660 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20661 20662 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20663 sizeof(struct tcp_rack), 20664 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20665 20666 sysctl_ctx_init(&rack_sysctl_ctx); 20667 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20668 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20669 OID_AUTO, 20670 #ifdef STACKALIAS 20671 __XSTRING(STACKALIAS), 20672 #else 20673 __XSTRING(STACKNAME), 20674 #endif 20675 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20676 ""); 20677 if (rack_sysctl_root == NULL) { 20678 printf("Failed to add sysctl node\n"); 20679 err = EFAULT; 20680 goto free_uma; 20681 } 20682 rack_init_sysctls(); 20683 num_stacks = nitems(rack_stack_names); 20684 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20685 rack_stack_names, &num_stacks); 20686 if (err) { 20687 printf("Failed to register %s stack name for " 20688 "%s module\n", rack_stack_names[num_stacks], 20689 __XSTRING(MODNAME)); 20690 sysctl_ctx_free(&rack_sysctl_ctx); 20691 free_uma: 20692 uma_zdestroy(rack_zone); 20693 uma_zdestroy(rack_pcb_zone); 20694 rack_counter_destroy(); 20695 printf("Failed to register rack module -- err:%d\n", err); 20696 return (err); 20697 } 20698 tcp_lro_reg_mbufq(); 20699 rack_mod_inited = true; 20700 break; 20701 case MOD_QUIESCE: 20702 err = deregister_tcp_functions(&__tcp_rack, true, false); 20703 break; 20704 case MOD_UNLOAD: 20705 err = deregister_tcp_functions(&__tcp_rack, false, true); 20706 if (err == EBUSY) 20707 break; 20708 if (rack_mod_inited) { 20709 uma_zdestroy(rack_zone); 20710 uma_zdestroy(rack_pcb_zone); 20711 sysctl_ctx_free(&rack_sysctl_ctx); 20712 rack_counter_destroy(); 20713 rack_mod_inited = false; 20714 } 20715 tcp_lro_dereg_mbufq(); 20716 err = 0; 20717 break; 20718 default: 20719 return (EOPNOTSUPP); 20720 } 20721 return (err); 20722 } 20723 20724 static moduledata_t tcp_rack = { 20725 .name = __XSTRING(MODNAME), 20726 .evhand = tcp_addrack, 20727 .priv = 0 20728 }; 20729 20730 MODULE_VERSION(MODNAME, 1); 20731 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20732 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20733 20734 #endif /* #if !defined(INET) && !defined(INET6) */ 20735