1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_hpts.h> 97 #include <netinet/tcp_ratelimit.h> 98 #include <netinet/tcp_accounting.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/cc/cc.h> 101 #include <netinet/cc/cc_newreno.h> 102 #include <netinet/tcp_fastopen.h> 103 #include <netinet/tcp_lro.h> 104 #ifdef NETFLIX_SHARED_CWND 105 #include <netinet/tcp_shared_cwnd.h> 106 #endif 107 #ifdef TCPDEBUG 108 #include <netinet/tcp_debug.h> 109 #endif /* TCPDEBUG */ 110 #ifdef TCP_OFFLOAD 111 #include <netinet/tcp_offload.h> 112 #endif 113 #ifdef INET6 114 #include <netinet6/tcp6_var.h> 115 #endif 116 117 #include <netipsec/ipsec_support.h> 118 119 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 120 #include <netipsec/ipsec.h> 121 #include <netipsec/ipsec6.h> 122 #endif /* IPSEC */ 123 124 #include <netinet/udp.h> 125 #include <netinet/udp_var.h> 126 #include <machine/in_cksum.h> 127 128 #ifdef MAC 129 #include <security/mac/mac_framework.h> 130 #endif 131 #include "sack_filter.h" 132 #include "tcp_rack.h" 133 #include "rack_bbr_common.h" 134 135 uma_zone_t rack_zone; 136 uma_zone_t rack_pcb_zone; 137 138 #ifndef TICKS2SBT 139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 140 #endif 141 142 VNET_DECLARE(uint32_t, newreno_beta); 143 VNET_DECLARE(uint32_t, newreno_beta_ecn); 144 #define V_newreno_beta VNET(newreno_beta) 145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 146 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 150 151 struct sysctl_ctx_list rack_sysctl_ctx; 152 struct sysctl_oid *rack_sysctl_root; 153 154 #define CUM_ACKED 1 155 #define SACKED 2 156 157 /* 158 * The RACK module incorporates a number of 159 * TCP ideas that have been put out into the IETF 160 * over the last few years: 161 * - Matt Mathis's Rate Halving which slowly drops 162 * the congestion window so that the ack clock can 163 * be maintained during a recovery. 164 * - Yuchung Cheng's RACK TCP (for which its named) that 165 * will stop us using the number of dup acks and instead 166 * use time as the gage of when we retransmit. 167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 168 * of Dukkipati et.al. 169 * RACK depends on SACK, so if an endpoint arrives that 170 * cannot do SACK the state machine below will shuttle the 171 * connection back to using the "default" TCP stack that is 172 * in FreeBSD. 173 * 174 * To implement RACK the original TCP stack was first decomposed 175 * into a functional state machine with individual states 176 * for each of the possible TCP connection states. The do_segement 177 * functions role in life is to mandate the connection supports SACK 178 * initially and then assure that the RACK state matches the conenction 179 * state before calling the states do_segment function. Each 180 * state is simplified due to the fact that the original do_segment 181 * has been decomposed and we *know* what state we are in (no 182 * switches on the state) and all tests for SACK are gone. This 183 * greatly simplifies what each state does. 184 * 185 * TCP output is also over-written with a new version since it 186 * must maintain the new rack scoreboard. 187 * 188 */ 189 static int32_t rack_tlp_thresh = 1; 190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 191 static int32_t rack_tlp_use_greater = 1; 192 static int32_t rack_reorder_thresh = 2; 193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 194 * - 60 seconds */ 195 static uint8_t rack_req_measurements = 1; 196 /* Attack threshold detections */ 197 static uint32_t rack_highest_sack_thresh_seen = 0; 198 static uint32_t rack_highest_move_thresh_seen = 0; 199 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 200 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 201 static int32_t rack_hw_rate_caps = 1; /* 1; */ 202 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 203 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 204 static int32_t rack_hw_up_only = 1; 205 static int32_t rack_stats_gets_ms_rtt = 1; 206 static int32_t rack_prr_addbackmax = 2; 207 static int32_t rack_do_hystart = 0; 208 209 static int32_t rack_pkt_delay = 1000; 210 static int32_t rack_send_a_lot_in_prr = 1; 211 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 212 static int32_t rack_verbose_logging = 0; 213 static int32_t rack_ignore_data_after_close = 1; 214 static int32_t rack_enable_shared_cwnd = 1; 215 static int32_t rack_use_cmp_acks = 1; 216 static int32_t rack_use_fsb = 1; 217 static int32_t rack_use_rfo = 1; 218 static int32_t rack_use_rsm_rfo = 1; 219 static int32_t rack_max_abc_post_recovery = 2; 220 static int32_t rack_client_low_buf = 0; 221 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 222 #ifdef TCP_ACCOUNTING 223 static int32_t rack_tcp_accounting = 0; 224 #endif 225 static int32_t rack_limits_scwnd = 1; 226 static int32_t rack_enable_mqueue_for_nonpaced = 0; 227 static int32_t rack_disable_prr = 0; 228 static int32_t use_rack_rr = 1; 229 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 230 static int32_t rack_persist_min = 250000; /* 250usec */ 231 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 232 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 233 static int32_t rack_default_init_window = 0; /* Use system default */ 234 static int32_t rack_limit_time_with_srtt = 0; 235 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 236 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 237 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 238 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 239 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 240 /* 241 * Currently regular tcp has a rto_min of 30ms 242 * the backoff goes 12 times so that ends up 243 * being a total of 122.850 seconds before a 244 * connection is killed. 245 */ 246 static uint32_t rack_def_data_window = 20; 247 static uint32_t rack_goal_bdp = 2; 248 static uint32_t rack_min_srtts = 1; 249 static uint32_t rack_min_measure_usec = 0; 250 static int32_t rack_tlp_min = 10000; /* 10ms */ 251 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 252 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 253 static const int32_t rack_free_cache = 2; 254 static int32_t rack_hptsi_segments = 40; 255 static int32_t rack_rate_sample_method = USE_RTT_LOW; 256 static int32_t rack_pace_every_seg = 0; 257 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 258 static int32_t rack_slot_reduction = 4; 259 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 260 static int32_t rack_cwnd_block_ends_measure = 0; 261 static int32_t rack_rwnd_block_ends_measure = 0; 262 static int32_t rack_def_profile = 0; 263 264 static int32_t rack_lower_cwnd_at_tlp = 0; 265 static int32_t rack_limited_retran = 0; 266 static int32_t rack_always_send_oldest = 0; 267 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 268 269 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 270 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 271 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 272 273 /* Probertt */ 274 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 275 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 276 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 277 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 278 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 279 280 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 281 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 282 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 283 static uint32_t rack_probertt_use_min_rtt_exit = 0; 284 static uint32_t rack_probe_rtt_sets_cwnd = 0; 285 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 286 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 287 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 288 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 289 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 290 static uint32_t rack_probertt_filter_life = 10000000; 291 static uint32_t rack_probertt_lower_within = 10; 292 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 293 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 294 static int32_t rack_probertt_clear_is = 1; 295 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 296 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 297 298 /* Part of pacing */ 299 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 300 301 /* Timely information */ 302 /* Combine these two gives the range of 'no change' to bw */ 303 /* ie the up/down provide the upper and lower bound */ 304 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 305 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 306 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 307 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 308 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 309 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 310 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 311 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 312 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 313 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 314 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 315 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 316 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 317 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 318 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 319 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 320 static int32_t rack_use_max_for_nobackoff = 0; 321 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 322 static int32_t rack_timely_no_stopping = 0; 323 static int32_t rack_down_raise_thresh = 100; 324 static int32_t rack_req_segs = 1; 325 static uint64_t rack_bw_rate_cap = 0; 326 327 /* Weird delayed ack mode */ 328 static int32_t rack_use_imac_dack = 0; 329 /* Rack specific counters */ 330 counter_u64_t rack_badfr; 331 counter_u64_t rack_badfr_bytes; 332 counter_u64_t rack_rtm_prr_retran; 333 counter_u64_t rack_rtm_prr_newdata; 334 counter_u64_t rack_timestamp_mismatch; 335 counter_u64_t rack_reorder_seen; 336 counter_u64_t rack_paced_segments; 337 counter_u64_t rack_unpaced_segments; 338 counter_u64_t rack_calc_zero; 339 counter_u64_t rack_calc_nonzero; 340 counter_u64_t rack_saw_enobuf; 341 counter_u64_t rack_saw_enobuf_hw; 342 counter_u64_t rack_saw_enetunreach; 343 counter_u64_t rack_per_timer_hole; 344 counter_u64_t rack_large_ackcmp; 345 counter_u64_t rack_small_ackcmp; 346 #ifdef INVARIANTS 347 counter_u64_t rack_adjust_map_bw; 348 #endif 349 /* Tail loss probe counters */ 350 counter_u64_t rack_tlp_tot; 351 counter_u64_t rack_tlp_newdata; 352 counter_u64_t rack_tlp_retran; 353 counter_u64_t rack_tlp_retran_bytes; 354 counter_u64_t rack_tlp_retran_fail; 355 counter_u64_t rack_to_tot; 356 counter_u64_t rack_to_arm_rack; 357 counter_u64_t rack_to_arm_tlp; 358 counter_u64_t rack_hot_alloc; 359 counter_u64_t rack_to_alloc; 360 counter_u64_t rack_to_alloc_hard; 361 counter_u64_t rack_to_alloc_emerg; 362 counter_u64_t rack_to_alloc_limited; 363 counter_u64_t rack_alloc_limited_conns; 364 counter_u64_t rack_split_limited; 365 366 #define MAX_NUM_OF_CNTS 13 367 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS]; 368 counter_u64_t rack_multi_single_eq; 369 counter_u64_t rack_proc_non_comp_ack; 370 371 counter_u64_t rack_fto_send; 372 counter_u64_t rack_fto_rsm_send; 373 counter_u64_t rack_nfto_resend; 374 counter_u64_t rack_non_fto_send; 375 counter_u64_t rack_extended_rfo; 376 377 counter_u64_t rack_sack_proc_all; 378 counter_u64_t rack_sack_proc_short; 379 counter_u64_t rack_sack_proc_restart; 380 counter_u64_t rack_sack_attacks_detected; 381 counter_u64_t rack_sack_attacks_reversed; 382 counter_u64_t rack_sack_used_next_merge; 383 counter_u64_t rack_sack_splits; 384 counter_u64_t rack_sack_used_prev_merge; 385 counter_u64_t rack_sack_skipped_acked; 386 counter_u64_t rack_ack_total; 387 counter_u64_t rack_express_sack; 388 counter_u64_t rack_sack_total; 389 counter_u64_t rack_move_none; 390 counter_u64_t rack_move_some; 391 392 counter_u64_t rack_used_tlpmethod; 393 counter_u64_t rack_used_tlpmethod2; 394 counter_u64_t rack_enter_tlp_calc; 395 counter_u64_t rack_input_idle_reduces; 396 counter_u64_t rack_collapsed_win; 397 counter_u64_t rack_tlp_does_nada; 398 counter_u64_t rack_try_scwnd; 399 counter_u64_t rack_hw_pace_init_fail; 400 counter_u64_t rack_hw_pace_lost; 401 counter_u64_t rack_sbsndptr_right; 402 counter_u64_t rack_sbsndptr_wrong; 403 404 /* Temp CPU counters */ 405 counter_u64_t rack_find_high; 406 407 counter_u64_t rack_progress_drops; 408 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 409 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 410 411 412 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 413 414 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 415 (tv) = (value) + slop; \ 416 if ((u_long)(tv) < (u_long)(tvmin)) \ 417 (tv) = (tvmin); \ 418 if ((u_long)(tv) > (u_long)(tvmax)) \ 419 (tv) = (tvmax); \ 420 } while (0) 421 422 static void 423 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 424 425 static int 426 rack_process_ack(struct mbuf *m, struct tcphdr *th, 427 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 428 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 429 static int 430 rack_process_data(struct mbuf *m, struct tcphdr *th, 431 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 432 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 433 static void 434 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 435 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 436 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 437 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 438 uint8_t limit_type); 439 static struct rack_sendmap * 440 rack_check_recovery_mode(struct tcpcb *tp, 441 uint32_t tsused); 442 static void 443 rack_cong_signal(struct tcpcb *tp, 444 uint32_t type, uint32_t ack); 445 static void rack_counter_destroy(void); 446 static int 447 rack_ctloutput(struct socket *so, struct sockopt *sopt, 448 struct inpcb *inp, struct tcpcb *tp); 449 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 450 static void 451 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 452 static void 453 rack_do_segment(struct mbuf *m, struct tcphdr *th, 454 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 455 uint8_t iptos); 456 static void rack_dtor(void *mem, int32_t size, void *arg); 457 static void 458 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 459 uint32_t flex1, uint32_t flex2, 460 uint32_t flex3, uint32_t flex4, 461 uint32_t flex5, uint32_t flex6, 462 uint16_t flex7, uint8_t mod); 463 464 static void 465 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 466 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 467 struct rack_sendmap *rsm, uint8_t quality); 468 static struct rack_sendmap * 469 rack_find_high_nonack(struct tcp_rack *rack, 470 struct rack_sendmap *rsm); 471 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 472 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 473 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 474 static int 475 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 476 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 477 static void 478 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 479 tcp_seq th_ack, int line, uint8_t quality); 480 static uint32_t 481 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 482 static int32_t rack_handoff_ok(struct tcpcb *tp); 483 static int32_t rack_init(struct tcpcb *tp); 484 static void rack_init_sysctls(void); 485 static void 486 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 487 struct tcphdr *th, int entered_rec, int dup_ack_struck); 488 static void 489 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 490 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts, 491 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 492 493 static void 494 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 495 struct rack_sendmap *rsm); 496 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 497 static int32_t rack_output(struct tcpcb *tp); 498 499 static uint32_t 500 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 501 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 502 uint32_t cts, int *moved_two); 503 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 504 static void rack_remxt_tmr(struct tcpcb *tp); 505 static int 506 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 507 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 508 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 509 static int32_t rack_stopall(struct tcpcb *tp); 510 static void 511 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 512 uint32_t delta); 513 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 514 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 515 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 516 static uint32_t 517 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 518 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 519 static void 520 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 521 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 522 static int 523 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 524 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 525 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 526 static int 527 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 528 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 529 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 530 static int 531 rack_do_closing(struct mbuf *m, struct tcphdr *th, 532 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 533 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 534 static int 535 rack_do_established(struct mbuf *m, struct tcphdr *th, 536 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 537 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 538 static int 539 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 540 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 541 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 542 static int 543 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 544 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 545 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 546 static int 547 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 548 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 549 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 550 static int 551 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 552 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 553 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 554 static int 555 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 556 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 557 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 558 static int 559 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 560 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 561 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 562 struct rack_sendmap * 563 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 564 uint32_t tsused); 565 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 566 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 567 static void 568 tcp_rack_partialack(struct tcpcb *tp); 569 static int 570 rack_set_profile(struct tcp_rack *rack, int prof); 571 static void 572 rack_apply_deferred_options(struct tcp_rack *rack); 573 574 int32_t rack_clear_counter=0; 575 576 static void 577 rack_set_cc_pacing(struct tcp_rack *rack) 578 { 579 struct sockopt sopt; 580 struct cc_newreno_opts opt; 581 struct newreno old, *ptr; 582 struct tcpcb *tp; 583 int error; 584 585 if (rack->rc_pacing_cc_set) 586 return; 587 588 tp = rack->rc_tp; 589 if (tp->cc_algo == NULL) { 590 /* Tcb is leaving */ 591 printf("No cc algorithm?\n"); 592 return; 593 } 594 rack->rc_pacing_cc_set = 1; 595 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 596 /* Not new-reno we can't play games with beta! */ 597 goto out; 598 } 599 ptr = ((struct newreno *)tp->ccv->cc_data); 600 if (CC_ALGO(tp)->ctl_output == NULL) { 601 /* Huh, why does new_reno no longer have a set function? */ 602 printf("no ctl_output for algo:%s\n", tp->cc_algo->name); 603 goto out; 604 } 605 if (ptr == NULL) { 606 /* Just the default values */ 607 old.beta = V_newreno_beta_ecn; 608 old.beta_ecn = V_newreno_beta_ecn; 609 old.newreno_flags = 0; 610 } else { 611 old.beta = ptr->beta; 612 old.beta_ecn = ptr->beta_ecn; 613 old.newreno_flags = ptr->newreno_flags; 614 } 615 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 616 sopt.sopt_dir = SOPT_SET; 617 opt.name = CC_NEWRENO_BETA; 618 opt.val = rack->r_ctl.rc_saved_beta.beta; 619 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 620 if (error) { 621 printf("Error returned by ctl_output %d\n", error); 622 goto out; 623 } 624 /* 625 * Hack alert we need to set in our newreno_flags 626 * so that Abe behavior is also applied. 627 */ 628 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 629 opt.name = CC_NEWRENO_BETA_ECN; 630 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 631 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 632 if (error) { 633 printf("Error returned by ctl_output %d\n", error); 634 goto out; 635 } 636 /* Save off the original values for restoral */ 637 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 638 out: 639 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 640 union tcp_log_stackspecific log; 641 struct timeval tv; 642 643 ptr = ((struct newreno *)tp->ccv->cc_data); 644 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 645 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 646 if (ptr) { 647 log.u_bbr.flex1 = ptr->beta; 648 log.u_bbr.flex2 = ptr->beta_ecn; 649 log.u_bbr.flex3 = ptr->newreno_flags; 650 } 651 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 652 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 653 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 654 log.u_bbr.flex7 = rack->gp_ready; 655 log.u_bbr.flex7 <<= 1; 656 log.u_bbr.flex7 |= rack->use_fixed_rate; 657 log.u_bbr.flex7 <<= 1; 658 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 659 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 660 log.u_bbr.flex8 = 3; 661 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 662 0, &log, false, NULL, NULL, 0, &tv); 663 } 664 } 665 666 static void 667 rack_undo_cc_pacing(struct tcp_rack *rack) 668 { 669 struct newreno old, *ptr; 670 struct tcpcb *tp; 671 672 if (rack->rc_pacing_cc_set == 0) 673 return; 674 tp = rack->rc_tp; 675 rack->rc_pacing_cc_set = 0; 676 if (tp->cc_algo == NULL) 677 /* Tcb is leaving */ 678 return; 679 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 680 /* Not new-reno nothing to do! */ 681 return; 682 } 683 ptr = ((struct newreno *)tp->ccv->cc_data); 684 if (ptr == NULL) { 685 /* 686 * This happens at rack_fini() if the 687 * cc module gets freed on us. In that 688 * case we loose our "new" settings but 689 * thats ok, since the tcb is going away anyway. 690 */ 691 return; 692 } 693 /* Grab out our set values */ 694 memcpy(&old, ptr, sizeof(struct newreno)); 695 /* Copy back in the original values */ 696 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 697 /* Now save back the values we had set in (for when pacing is restored) */ 698 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 699 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 700 union tcp_log_stackspecific log; 701 struct timeval tv; 702 703 ptr = ((struct newreno *)tp->ccv->cc_data); 704 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 705 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 706 log.u_bbr.flex1 = ptr->beta; 707 log.u_bbr.flex2 = ptr->beta_ecn; 708 log.u_bbr.flex3 = ptr->newreno_flags; 709 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 710 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 711 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 712 log.u_bbr.flex7 = rack->gp_ready; 713 log.u_bbr.flex7 <<= 1; 714 log.u_bbr.flex7 |= rack->use_fixed_rate; 715 log.u_bbr.flex7 <<= 1; 716 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 717 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 718 log.u_bbr.flex8 = 4; 719 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 720 0, &log, false, NULL, NULL, 0, &tv); 721 } 722 } 723 724 #ifdef NETFLIX_PEAKRATE 725 static inline void 726 rack_update_peakrate_thr(struct tcpcb *tp) 727 { 728 /* Keep in mind that t_maxpeakrate is in B/s. */ 729 uint64_t peak; 730 peak = uqmax((tp->t_maxseg * 2), 731 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 732 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 733 } 734 #endif 735 736 static int 737 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 738 { 739 uint32_t stat; 740 int32_t error; 741 int i; 742 743 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 744 if (error || req->newptr == NULL) 745 return error; 746 747 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 748 if (error) 749 return (error); 750 if (stat == 1) { 751 #ifdef INVARIANTS 752 printf("Clearing RACK counters\n"); 753 #endif 754 counter_u64_zero(rack_badfr); 755 counter_u64_zero(rack_badfr_bytes); 756 counter_u64_zero(rack_rtm_prr_retran); 757 counter_u64_zero(rack_rtm_prr_newdata); 758 counter_u64_zero(rack_timestamp_mismatch); 759 counter_u64_zero(rack_reorder_seen); 760 counter_u64_zero(rack_tlp_tot); 761 counter_u64_zero(rack_tlp_newdata); 762 counter_u64_zero(rack_tlp_retran); 763 counter_u64_zero(rack_tlp_retran_bytes); 764 counter_u64_zero(rack_tlp_retran_fail); 765 counter_u64_zero(rack_to_tot); 766 counter_u64_zero(rack_to_arm_rack); 767 counter_u64_zero(rack_to_arm_tlp); 768 counter_u64_zero(rack_paced_segments); 769 counter_u64_zero(rack_calc_zero); 770 counter_u64_zero(rack_calc_nonzero); 771 counter_u64_zero(rack_unpaced_segments); 772 counter_u64_zero(rack_saw_enobuf); 773 counter_u64_zero(rack_saw_enobuf_hw); 774 counter_u64_zero(rack_saw_enetunreach); 775 counter_u64_zero(rack_per_timer_hole); 776 counter_u64_zero(rack_large_ackcmp); 777 counter_u64_zero(rack_small_ackcmp); 778 #ifdef INVARIANTS 779 counter_u64_zero(rack_adjust_map_bw); 780 #endif 781 counter_u64_zero(rack_to_alloc_hard); 782 counter_u64_zero(rack_to_alloc_emerg); 783 counter_u64_zero(rack_sack_proc_all); 784 counter_u64_zero(rack_fto_send); 785 counter_u64_zero(rack_fto_rsm_send); 786 counter_u64_zero(rack_extended_rfo); 787 counter_u64_zero(rack_hw_pace_init_fail); 788 counter_u64_zero(rack_hw_pace_lost); 789 counter_u64_zero(rack_sbsndptr_wrong); 790 counter_u64_zero(rack_sbsndptr_right); 791 counter_u64_zero(rack_non_fto_send); 792 counter_u64_zero(rack_nfto_resend); 793 counter_u64_zero(rack_sack_proc_short); 794 counter_u64_zero(rack_sack_proc_restart); 795 counter_u64_zero(rack_to_alloc); 796 counter_u64_zero(rack_to_alloc_limited); 797 counter_u64_zero(rack_alloc_limited_conns); 798 counter_u64_zero(rack_split_limited); 799 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 800 counter_u64_zero(rack_proc_comp_ack[i]); 801 } 802 counter_u64_zero(rack_multi_single_eq); 803 counter_u64_zero(rack_proc_non_comp_ack); 804 counter_u64_zero(rack_find_high); 805 counter_u64_zero(rack_sack_attacks_detected); 806 counter_u64_zero(rack_sack_attacks_reversed); 807 counter_u64_zero(rack_sack_used_next_merge); 808 counter_u64_zero(rack_sack_used_prev_merge); 809 counter_u64_zero(rack_sack_splits); 810 counter_u64_zero(rack_sack_skipped_acked); 811 counter_u64_zero(rack_ack_total); 812 counter_u64_zero(rack_express_sack); 813 counter_u64_zero(rack_sack_total); 814 counter_u64_zero(rack_move_none); 815 counter_u64_zero(rack_move_some); 816 counter_u64_zero(rack_used_tlpmethod); 817 counter_u64_zero(rack_used_tlpmethod2); 818 counter_u64_zero(rack_enter_tlp_calc); 819 counter_u64_zero(rack_progress_drops); 820 counter_u64_zero(rack_tlp_does_nada); 821 counter_u64_zero(rack_try_scwnd); 822 counter_u64_zero(rack_collapsed_win); 823 } 824 rack_clear_counter = 0; 825 return (0); 826 } 827 828 static void 829 rack_init_sysctls(void) 830 { 831 int i; 832 struct sysctl_oid *rack_counters; 833 struct sysctl_oid *rack_attack; 834 struct sysctl_oid *rack_pacing; 835 struct sysctl_oid *rack_timely; 836 struct sysctl_oid *rack_timers; 837 struct sysctl_oid *rack_tlp; 838 struct sysctl_oid *rack_misc; 839 struct sysctl_oid *rack_features; 840 struct sysctl_oid *rack_measure; 841 struct sysctl_oid *rack_probertt; 842 struct sysctl_oid *rack_hw_pacing; 843 844 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 845 SYSCTL_CHILDREN(rack_sysctl_root), 846 OID_AUTO, 847 "sack_attack", 848 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 849 "Rack Sack Attack Counters and Controls"); 850 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 851 SYSCTL_CHILDREN(rack_sysctl_root), 852 OID_AUTO, 853 "stats", 854 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 855 "Rack Counters"); 856 SYSCTL_ADD_S32(&rack_sysctl_ctx, 857 SYSCTL_CHILDREN(rack_sysctl_root), 858 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 859 &rack_rate_sample_method , USE_RTT_LOW, 860 "What method should we use for rate sampling 0=high, 1=low "); 861 /* Probe rtt related controls */ 862 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 863 SYSCTL_CHILDREN(rack_sysctl_root), 864 OID_AUTO, 865 "probertt", 866 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 867 "ProbeRTT related Controls"); 868 SYSCTL_ADD_U16(&rack_sysctl_ctx, 869 SYSCTL_CHILDREN(rack_probertt), 870 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 871 &rack_atexit_prtt_hbp, 130, 872 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 873 SYSCTL_ADD_U16(&rack_sysctl_ctx, 874 SYSCTL_CHILDREN(rack_probertt), 875 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 876 &rack_atexit_prtt, 130, 877 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 878 SYSCTL_ADD_U16(&rack_sysctl_ctx, 879 SYSCTL_CHILDREN(rack_probertt), 880 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 881 &rack_per_of_gp_probertt, 60, 882 "What percentage of goodput do we pace at in probertt"); 883 SYSCTL_ADD_U16(&rack_sysctl_ctx, 884 SYSCTL_CHILDREN(rack_probertt), 885 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 886 &rack_per_of_gp_probertt_reduce, 10, 887 "What percentage of goodput do we reduce every gp_srtt"); 888 SYSCTL_ADD_U16(&rack_sysctl_ctx, 889 SYSCTL_CHILDREN(rack_probertt), 890 OID_AUTO, "gp_per_low", CTLFLAG_RW, 891 &rack_per_of_gp_lowthresh, 40, 892 "What percentage of goodput do we allow the multiplier to fall to"); 893 SYSCTL_ADD_U32(&rack_sysctl_ctx, 894 SYSCTL_CHILDREN(rack_probertt), 895 OID_AUTO, "time_between", CTLFLAG_RW, 896 & rack_time_between_probertt, 96000000, 897 "How many useconds between the lowest rtt falling must past before we enter probertt"); 898 SYSCTL_ADD_U32(&rack_sysctl_ctx, 899 SYSCTL_CHILDREN(rack_probertt), 900 OID_AUTO, "safety", CTLFLAG_RW, 901 &rack_probe_rtt_safety_val, 2000000, 902 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 903 SYSCTL_ADD_U32(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_probertt), 905 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 906 &rack_probe_rtt_sets_cwnd, 0, 907 "Do we set the cwnd too (if always_lower is on)"); 908 SYSCTL_ADD_U32(&rack_sysctl_ctx, 909 SYSCTL_CHILDREN(rack_probertt), 910 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 911 &rack_max_drain_wait, 2, 912 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 913 SYSCTL_ADD_U32(&rack_sysctl_ctx, 914 SYSCTL_CHILDREN(rack_probertt), 915 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 916 &rack_must_drain, 1, 917 "We must drain this many gp_srtt's waiting for flight to reach goal"); 918 SYSCTL_ADD_U32(&rack_sysctl_ctx, 919 SYSCTL_CHILDREN(rack_probertt), 920 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 921 &rack_probertt_use_min_rtt_entry, 1, 922 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 923 SYSCTL_ADD_U32(&rack_sysctl_ctx, 924 SYSCTL_CHILDREN(rack_probertt), 925 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 926 &rack_probertt_use_min_rtt_exit, 0, 927 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 928 SYSCTL_ADD_U32(&rack_sysctl_ctx, 929 SYSCTL_CHILDREN(rack_probertt), 930 OID_AUTO, "length_div", CTLFLAG_RW, 931 &rack_probertt_gpsrtt_cnt_div, 0, 932 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 933 SYSCTL_ADD_U32(&rack_sysctl_ctx, 934 SYSCTL_CHILDREN(rack_probertt), 935 OID_AUTO, "length_mul", CTLFLAG_RW, 936 &rack_probertt_gpsrtt_cnt_mul, 0, 937 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 938 SYSCTL_ADD_U32(&rack_sysctl_ctx, 939 SYSCTL_CHILDREN(rack_probertt), 940 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 941 &rack_min_probertt_hold, 200000, 942 "What is the minimum time we hold probertt at target"); 943 SYSCTL_ADD_U32(&rack_sysctl_ctx, 944 SYSCTL_CHILDREN(rack_probertt), 945 OID_AUTO, "filter_life", CTLFLAG_RW, 946 &rack_probertt_filter_life, 10000000, 947 "What is the time for the filters life in useconds"); 948 SYSCTL_ADD_U32(&rack_sysctl_ctx, 949 SYSCTL_CHILDREN(rack_probertt), 950 OID_AUTO, "lower_within", CTLFLAG_RW, 951 &rack_probertt_lower_within, 10, 952 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 953 SYSCTL_ADD_U32(&rack_sysctl_ctx, 954 SYSCTL_CHILDREN(rack_probertt), 955 OID_AUTO, "must_move", CTLFLAG_RW, 956 &rack_min_rtt_movement, 250, 957 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 958 SYSCTL_ADD_U32(&rack_sysctl_ctx, 959 SYSCTL_CHILDREN(rack_probertt), 960 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 961 &rack_probertt_clear_is, 1, 962 "Do we clear I/S counts on exiting probe-rtt"); 963 SYSCTL_ADD_S32(&rack_sysctl_ctx, 964 SYSCTL_CHILDREN(rack_probertt), 965 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 966 &rack_max_drain_hbp, 1, 967 "How many extra drain gpsrtt's do we get in highly buffered paths"); 968 SYSCTL_ADD_S32(&rack_sysctl_ctx, 969 SYSCTL_CHILDREN(rack_probertt), 970 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 971 &rack_hbp_thresh, 3, 972 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 973 /* Pacing related sysctls */ 974 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 975 SYSCTL_CHILDREN(rack_sysctl_root), 976 OID_AUTO, 977 "pacing", 978 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 979 "Pacing related Controls"); 980 SYSCTL_ADD_S32(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_pacing), 982 OID_AUTO, "max_pace_over", CTLFLAG_RW, 983 &rack_max_per_above, 30, 984 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 985 SYSCTL_ADD_S32(&rack_sysctl_ctx, 986 SYSCTL_CHILDREN(rack_pacing), 987 OID_AUTO, "pace_to_one", CTLFLAG_RW, 988 &rack_pace_one_seg, 0, 989 "Do we allow low b/w pacing of 1MSS instead of two"); 990 SYSCTL_ADD_S32(&rack_sysctl_ctx, 991 SYSCTL_CHILDREN(rack_pacing), 992 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 993 &rack_limit_time_with_srtt, 0, 994 "Do we limit pacing time based on srtt"); 995 SYSCTL_ADD_S32(&rack_sysctl_ctx, 996 SYSCTL_CHILDREN(rack_pacing), 997 OID_AUTO, "init_win", CTLFLAG_RW, 998 &rack_default_init_window, 0, 999 "Do we have a rack initial window 0 = system default"); 1000 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1001 SYSCTL_CHILDREN(rack_pacing), 1002 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1003 &rack_per_of_gp_ss, 250, 1004 "If non zero, what percentage of goodput to pace at in slow start"); 1005 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1006 SYSCTL_CHILDREN(rack_pacing), 1007 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1008 &rack_per_of_gp_ca, 150, 1009 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1010 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1011 SYSCTL_CHILDREN(rack_pacing), 1012 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1013 &rack_per_of_gp_rec, 200, 1014 "If non zero, what percentage of goodput to pace at in recovery"); 1015 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1016 SYSCTL_CHILDREN(rack_pacing), 1017 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1018 &rack_hptsi_segments, 40, 1019 "What size is the max for TSO segments in pacing and burst mitigation"); 1020 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1021 SYSCTL_CHILDREN(rack_pacing), 1022 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1023 &rack_slot_reduction, 4, 1024 "When doing only burst mitigation what is the reduce divisor"); 1025 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1026 SYSCTL_CHILDREN(rack_sysctl_root), 1027 OID_AUTO, "use_pacing", CTLFLAG_RW, 1028 &rack_pace_every_seg, 0, 1029 "If set we use pacing, if clear we use only the original burst mitigation"); 1030 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1031 SYSCTL_CHILDREN(rack_pacing), 1032 OID_AUTO, "rate_cap", CTLFLAG_RW, 1033 &rack_bw_rate_cap, 0, 1034 "If set we apply this value to the absolute rate cap used by pacing"); 1035 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1036 SYSCTL_CHILDREN(rack_sysctl_root), 1037 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1038 &rack_req_measurements, 1, 1039 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1040 /* Hardware pacing */ 1041 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1042 SYSCTL_CHILDREN(rack_sysctl_root), 1043 OID_AUTO, 1044 "hdwr_pacing", 1045 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1046 "Pacing related Controls"); 1047 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_hw_pacing), 1049 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1050 &rack_hw_rwnd_factor, 2, 1051 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1052 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1053 SYSCTL_CHILDREN(rack_hw_pacing), 1054 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1055 &rack_enobuf_hw_boost_mult, 2, 1056 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1057 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1058 SYSCTL_CHILDREN(rack_hw_pacing), 1059 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1060 &rack_enobuf_hw_max, 2, 1061 "What is the max boost the pacing time if we see a ENOBUFS?"); 1062 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1063 SYSCTL_CHILDREN(rack_hw_pacing), 1064 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1065 &rack_enobuf_hw_min, 2, 1066 "What is the min boost the pacing time if we see a ENOBUFS?"); 1067 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1068 SYSCTL_CHILDREN(rack_hw_pacing), 1069 OID_AUTO, "enable", CTLFLAG_RW, 1070 &rack_enable_hw_pacing, 0, 1071 "Should RACK attempt to use hw pacing?"); 1072 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1073 SYSCTL_CHILDREN(rack_hw_pacing), 1074 OID_AUTO, "rate_cap", CTLFLAG_RW, 1075 &rack_hw_rate_caps, 1, 1076 "Does the highest hardware pacing rate cap the rate we will send at??"); 1077 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1078 SYSCTL_CHILDREN(rack_hw_pacing), 1079 OID_AUTO, "rate_min", CTLFLAG_RW, 1080 &rack_hw_rate_min, 0, 1081 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1082 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1083 SYSCTL_CHILDREN(rack_hw_pacing), 1084 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1085 &rack_hw_rate_to_low, 0, 1086 "If we fall below this rate, dis-engage hw pacing?"); 1087 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1088 SYSCTL_CHILDREN(rack_hw_pacing), 1089 OID_AUTO, "up_only", CTLFLAG_RW, 1090 &rack_hw_up_only, 1, 1091 "Do we allow hw pacing to lower the rate selected?"); 1092 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1093 SYSCTL_CHILDREN(rack_hw_pacing), 1094 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1095 &rack_hw_pace_extra_slots, 2, 1096 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1097 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1098 SYSCTL_CHILDREN(rack_sysctl_root), 1099 OID_AUTO, 1100 "timely", 1101 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1102 "Rack Timely RTT Controls"); 1103 /* Timely based GP dynmics */ 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_timely), 1106 OID_AUTO, "upper", CTLFLAG_RW, 1107 &rack_gp_per_bw_mul_up, 2, 1108 "Rack timely upper range for equal b/w (in percentage)"); 1109 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_timely), 1111 OID_AUTO, "lower", CTLFLAG_RW, 1112 &rack_gp_per_bw_mul_down, 4, 1113 "Rack timely lower range for equal b/w (in percentage)"); 1114 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_timely), 1116 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1117 &rack_gp_rtt_maxmul, 3, 1118 "Rack timely multipler of lowest rtt for rtt_max"); 1119 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_timely), 1121 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1122 &rack_gp_rtt_mindiv, 4, 1123 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_timely), 1126 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1127 &rack_gp_rtt_minmul, 1, 1128 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_timely), 1131 OID_AUTO, "decrease", CTLFLAG_RW, 1132 &rack_gp_decrease_per, 20, 1133 "Rack timely decrease percentage of our GP multiplication factor"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_timely), 1136 OID_AUTO, "increase", CTLFLAG_RW, 1137 &rack_gp_increase_per, 2, 1138 "Rack timely increase perentage of our GP multiplication factor"); 1139 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_timely), 1141 OID_AUTO, "lowerbound", CTLFLAG_RW, 1142 &rack_per_lower_bound, 50, 1143 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1144 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_timely), 1146 OID_AUTO, "upperboundss", CTLFLAG_RW, 1147 &rack_per_upper_bound_ss, 0, 1148 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_timely), 1151 OID_AUTO, "upperboundca", CTLFLAG_RW, 1152 &rack_per_upper_bound_ca, 0, 1153 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1154 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1155 SYSCTL_CHILDREN(rack_timely), 1156 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1157 &rack_do_dyn_mul, 0, 1158 "Rack timely do we enable dynmaic timely goodput by default"); 1159 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1160 SYSCTL_CHILDREN(rack_timely), 1161 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1162 &rack_gp_no_rec_chg, 1, 1163 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1164 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1165 SYSCTL_CHILDREN(rack_timely), 1166 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1167 &rack_timely_dec_clear, 6, 1168 "Rack timely what threshold do we count to before another boost during b/w decent"); 1169 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1170 SYSCTL_CHILDREN(rack_timely), 1171 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1172 &rack_timely_max_push_rise, 3, 1173 "Rack timely how many times do we push up with b/w increase"); 1174 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1175 SYSCTL_CHILDREN(rack_timely), 1176 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1177 &rack_timely_max_push_drop, 3, 1178 "Rack timely how many times do we push back on b/w decent"); 1179 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1180 SYSCTL_CHILDREN(rack_timely), 1181 OID_AUTO, "min_segs", CTLFLAG_RW, 1182 &rack_timely_min_segs, 4, 1183 "Rack timely when setting the cwnd what is the min num segments"); 1184 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1185 SYSCTL_CHILDREN(rack_timely), 1186 OID_AUTO, "noback_max", CTLFLAG_RW, 1187 &rack_use_max_for_nobackoff, 0, 1188 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1189 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1190 SYSCTL_CHILDREN(rack_timely), 1191 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1192 &rack_timely_int_timely_only, 0, 1193 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1194 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1195 SYSCTL_CHILDREN(rack_timely), 1196 OID_AUTO, "nonstop", CTLFLAG_RW, 1197 &rack_timely_no_stopping, 0, 1198 "Rack timely don't stop increase"); 1199 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1200 SYSCTL_CHILDREN(rack_timely), 1201 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1202 &rack_down_raise_thresh, 100, 1203 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1204 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1205 SYSCTL_CHILDREN(rack_timely), 1206 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1207 &rack_req_segs, 1, 1208 "Bottom dragging if not these many segments outstanding and room"); 1209 1210 /* TLP and Rack related parameters */ 1211 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1212 SYSCTL_CHILDREN(rack_sysctl_root), 1213 OID_AUTO, 1214 "tlp", 1215 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1216 "TLP and Rack related Controls"); 1217 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1218 SYSCTL_CHILDREN(rack_tlp), 1219 OID_AUTO, "use_rrr", CTLFLAG_RW, 1220 &use_rack_rr, 1, 1221 "Do we use Rack Rapid Recovery"); 1222 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1223 SYSCTL_CHILDREN(rack_tlp), 1224 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1225 &rack_max_abc_post_recovery, 2, 1226 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1227 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1228 SYSCTL_CHILDREN(rack_tlp), 1229 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1230 &rack_non_rxt_use_cr, 0, 1231 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1232 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1233 SYSCTL_CHILDREN(rack_tlp), 1234 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1235 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1236 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1237 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1238 SYSCTL_CHILDREN(rack_tlp), 1239 OID_AUTO, "limit", CTLFLAG_RW, 1240 &rack_tlp_limit, 2, 1241 "How many TLP's can be sent without sending new data"); 1242 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1243 SYSCTL_CHILDREN(rack_tlp), 1244 OID_AUTO, "use_greater", CTLFLAG_RW, 1245 &rack_tlp_use_greater, 1, 1246 "Should we use the rack_rtt time if its greater than srtt"); 1247 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1248 SYSCTL_CHILDREN(rack_tlp), 1249 OID_AUTO, "tlpminto", CTLFLAG_RW, 1250 &rack_tlp_min, 10000, 1251 "TLP minimum timeout per the specification (in microseconds)"); 1252 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1253 SYSCTL_CHILDREN(rack_tlp), 1254 OID_AUTO, "send_oldest", CTLFLAG_RW, 1255 &rack_always_send_oldest, 0, 1256 "Should we always send the oldest TLP and RACK-TLP"); 1257 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1258 SYSCTL_CHILDREN(rack_tlp), 1259 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1260 &rack_limited_retran, 0, 1261 "How many times can a rack timeout drive out sends"); 1262 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1263 SYSCTL_CHILDREN(rack_tlp), 1264 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1265 &rack_lower_cwnd_at_tlp, 0, 1266 "When a TLP completes a retran should we enter recovery"); 1267 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1268 SYSCTL_CHILDREN(rack_tlp), 1269 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1270 &rack_reorder_thresh, 2, 1271 "What factor for rack will be added when seeing reordering (shift right)"); 1272 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1273 SYSCTL_CHILDREN(rack_tlp), 1274 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1275 &rack_tlp_thresh, 1, 1276 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1277 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1278 SYSCTL_CHILDREN(rack_tlp), 1279 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1280 &rack_reorder_fade, 60000000, 1281 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1282 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1283 SYSCTL_CHILDREN(rack_tlp), 1284 OID_AUTO, "pktdelay", CTLFLAG_RW, 1285 &rack_pkt_delay, 1000, 1286 "Extra RACK time (in microseconds) besides reordering thresh"); 1287 1288 /* Timer related controls */ 1289 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1290 SYSCTL_CHILDREN(rack_sysctl_root), 1291 OID_AUTO, 1292 "timers", 1293 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1294 "Timer related controls"); 1295 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1296 SYSCTL_CHILDREN(rack_timers), 1297 OID_AUTO, "persmin", CTLFLAG_RW, 1298 &rack_persist_min, 250000, 1299 "What is the minimum time in microseconds between persists"); 1300 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1301 SYSCTL_CHILDREN(rack_timers), 1302 OID_AUTO, "persmax", CTLFLAG_RW, 1303 &rack_persist_max, 2000000, 1304 "What is the largest delay in microseconds between persists"); 1305 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1306 SYSCTL_CHILDREN(rack_timers), 1307 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1308 &rack_delayed_ack_time, 40000, 1309 "Delayed ack time (40ms in microseconds)"); 1310 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1311 SYSCTL_CHILDREN(rack_timers), 1312 OID_AUTO, "minrto", CTLFLAG_RW, 1313 &rack_rto_min, 30000, 1314 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1315 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1316 SYSCTL_CHILDREN(rack_timers), 1317 OID_AUTO, "maxrto", CTLFLAG_RW, 1318 &rack_rto_max, 4000000, 1319 "Maxiumum RTO in microseconds -- should be at least as large as min_rto"); 1320 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1321 SYSCTL_CHILDREN(rack_timers), 1322 OID_AUTO, "minto", CTLFLAG_RW, 1323 &rack_min_to, 1000, 1324 "Minimum rack timeout in microseconds"); 1325 /* Measure controls */ 1326 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1327 SYSCTL_CHILDREN(rack_sysctl_root), 1328 OID_AUTO, 1329 "measure", 1330 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1331 "Measure related controls"); 1332 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1333 SYSCTL_CHILDREN(rack_measure), 1334 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1335 &rack_wma_divisor, 8, 1336 "When doing b/w calculation what is the divisor for the WMA"); 1337 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1338 SYSCTL_CHILDREN(rack_measure), 1339 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1340 &rack_cwnd_block_ends_measure, 0, 1341 "Does a cwnd just-return end the measurement window (app limited)"); 1342 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1343 SYSCTL_CHILDREN(rack_measure), 1344 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1345 &rack_rwnd_block_ends_measure, 0, 1346 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1347 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_measure), 1349 OID_AUTO, "min_target", CTLFLAG_RW, 1350 &rack_def_data_window, 20, 1351 "What is the minimum target window (in mss) for a GP measurements"); 1352 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_measure), 1354 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1355 &rack_goal_bdp, 2, 1356 "What is the goal BDP to measure"); 1357 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_measure), 1359 OID_AUTO, "min_srtts", CTLFLAG_RW, 1360 &rack_min_srtts, 1, 1361 "What is the goal BDP to measure"); 1362 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1363 SYSCTL_CHILDREN(rack_measure), 1364 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1365 &rack_min_measure_usec, 0, 1366 "What is the Minimum time time for a measurement if 0, this is off"); 1367 /* Features */ 1368 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1369 SYSCTL_CHILDREN(rack_sysctl_root), 1370 OID_AUTO, 1371 "features", 1372 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1373 "Feature controls"); 1374 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1375 SYSCTL_CHILDREN(rack_features), 1376 OID_AUTO, "cmpack", CTLFLAG_RW, 1377 &rack_use_cmp_acks, 1, 1378 "Should RACK have LRO send compressed acks"); 1379 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1380 SYSCTL_CHILDREN(rack_features), 1381 OID_AUTO, "fsb", CTLFLAG_RW, 1382 &rack_use_fsb, 1, 1383 "Should RACK use the fast send block?"); 1384 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1385 SYSCTL_CHILDREN(rack_features), 1386 OID_AUTO, "rfo", CTLFLAG_RW, 1387 &rack_use_rfo, 1, 1388 "Should RACK use rack_fast_output()?"); 1389 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1390 SYSCTL_CHILDREN(rack_features), 1391 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1392 &rack_use_rsm_rfo, 1, 1393 "Should RACK use rack_fast_rsm_output()?"); 1394 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1395 SYSCTL_CHILDREN(rack_features), 1396 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1397 &rack_enable_mqueue_for_nonpaced, 0, 1398 "Should RACK use mbuf queuing for non-paced connections"); 1399 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1400 SYSCTL_CHILDREN(rack_features), 1401 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1402 &rack_do_hystart, 0, 1403 "Should RACK enable HyStart++ on connections?"); 1404 /* Misc rack controls */ 1405 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1406 SYSCTL_CHILDREN(rack_sysctl_root), 1407 OID_AUTO, 1408 "misc", 1409 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1410 "Misc related controls"); 1411 #ifdef TCP_ACCOUNTING 1412 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1413 SYSCTL_CHILDREN(rack_misc), 1414 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1415 &rack_tcp_accounting, 0, 1416 "Should we turn on TCP accounting for all rack sessions?"); 1417 #endif 1418 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1419 SYSCTL_CHILDREN(rack_misc), 1420 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1421 &rack_dsack_std_based, 3, 1422 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1423 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1424 SYSCTL_CHILDREN(rack_misc), 1425 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1426 &rack_prr_addbackmax, 2, 1427 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1428 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1429 SYSCTL_CHILDREN(rack_misc), 1430 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1431 &rack_stats_gets_ms_rtt, 1, 1432 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1433 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1434 SYSCTL_CHILDREN(rack_misc), 1435 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1436 &rack_client_low_buf, 0, 1437 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1438 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1439 SYSCTL_CHILDREN(rack_misc), 1440 OID_AUTO, "defprofile", CTLFLAG_RW, 1441 &rack_def_profile, 0, 1442 "Should RACK use a default profile (0=no, num == profile num)?"); 1443 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1444 SYSCTL_CHILDREN(rack_misc), 1445 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1446 &rack_enable_shared_cwnd, 1, 1447 "Should RACK try to use the shared cwnd on connections where allowed"); 1448 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1449 SYSCTL_CHILDREN(rack_misc), 1450 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1451 &rack_limits_scwnd, 1, 1452 "Should RACK place low end time limits on the shared cwnd feature"); 1453 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1454 SYSCTL_CHILDREN(rack_misc), 1455 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1456 &rack_use_imac_dack, 0, 1457 "Should RACK try to emulate iMac delayed ack"); 1458 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1459 SYSCTL_CHILDREN(rack_misc), 1460 OID_AUTO, "no_prr", CTLFLAG_RW, 1461 &rack_disable_prr, 0, 1462 "Should RACK not use prr and only pace (must have pacing on)"); 1463 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1464 SYSCTL_CHILDREN(rack_misc), 1465 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1466 &rack_verbose_logging, 0, 1467 "Should RACK black box logging be verbose"); 1468 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1469 SYSCTL_CHILDREN(rack_misc), 1470 OID_AUTO, "data_after_close", CTLFLAG_RW, 1471 &rack_ignore_data_after_close, 1, 1472 "Do we hold off sending a RST until all pending data is ack'd"); 1473 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1474 SYSCTL_CHILDREN(rack_misc), 1475 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1476 &rack_sack_not_required, 1, 1477 "Do we allow rack to run on connections not supporting SACK"); 1478 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1479 SYSCTL_CHILDREN(rack_misc), 1480 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1481 &rack_send_a_lot_in_prr, 1, 1482 "Send a lot in prr"); 1483 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1484 SYSCTL_CHILDREN(rack_misc), 1485 OID_AUTO, "autoscale", CTLFLAG_RW, 1486 &rack_autosndbuf_inc, 20, 1487 "What percentage should rack scale up its snd buffer by?"); 1488 /* Sack Attacker detection stuff */ 1489 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1490 SYSCTL_CHILDREN(rack_attack), 1491 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1492 &rack_highest_sack_thresh_seen, 0, 1493 "Highest sack to ack ratio seen"); 1494 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1495 SYSCTL_CHILDREN(rack_attack), 1496 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1497 &rack_highest_move_thresh_seen, 0, 1498 "Highest move to non-move ratio seen"); 1499 rack_ack_total = counter_u64_alloc(M_WAITOK); 1500 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1501 SYSCTL_CHILDREN(rack_attack), 1502 OID_AUTO, "acktotal", CTLFLAG_RD, 1503 &rack_ack_total, 1504 "Total number of Ack's"); 1505 rack_express_sack = counter_u64_alloc(M_WAITOK); 1506 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1507 SYSCTL_CHILDREN(rack_attack), 1508 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1509 &rack_express_sack, 1510 "Total expresss number of Sack's"); 1511 rack_sack_total = counter_u64_alloc(M_WAITOK); 1512 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1513 SYSCTL_CHILDREN(rack_attack), 1514 OID_AUTO, "sacktotal", CTLFLAG_RD, 1515 &rack_sack_total, 1516 "Total number of SACKs"); 1517 rack_move_none = counter_u64_alloc(M_WAITOK); 1518 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1519 SYSCTL_CHILDREN(rack_attack), 1520 OID_AUTO, "move_none", CTLFLAG_RD, 1521 &rack_move_none, 1522 "Total number of SACK index reuse of postions under threshold"); 1523 rack_move_some = counter_u64_alloc(M_WAITOK); 1524 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1525 SYSCTL_CHILDREN(rack_attack), 1526 OID_AUTO, "move_some", CTLFLAG_RD, 1527 &rack_move_some, 1528 "Total number of SACK index reuse of postions over threshold"); 1529 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1530 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1531 SYSCTL_CHILDREN(rack_attack), 1532 OID_AUTO, "attacks", CTLFLAG_RD, 1533 &rack_sack_attacks_detected, 1534 "Total number of SACK attackers that had sack disabled"); 1535 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1536 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1537 SYSCTL_CHILDREN(rack_attack), 1538 OID_AUTO, "reversed", CTLFLAG_RD, 1539 &rack_sack_attacks_reversed, 1540 "Total number of SACK attackers that were later determined false positive"); 1541 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1542 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1543 SYSCTL_CHILDREN(rack_attack), 1544 OID_AUTO, "nextmerge", CTLFLAG_RD, 1545 &rack_sack_used_next_merge, 1546 "Total number of times we used the next merge"); 1547 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1548 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1549 SYSCTL_CHILDREN(rack_attack), 1550 OID_AUTO, "prevmerge", CTLFLAG_RD, 1551 &rack_sack_used_prev_merge, 1552 "Total number of times we used the prev merge"); 1553 /* Counters */ 1554 rack_fto_send = counter_u64_alloc(M_WAITOK); 1555 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_counters), 1557 OID_AUTO, "fto_send", CTLFLAG_RD, 1558 &rack_fto_send, "Total number of rack_fast_output sends"); 1559 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1560 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1561 SYSCTL_CHILDREN(rack_counters), 1562 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1563 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1564 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1565 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1566 SYSCTL_CHILDREN(rack_counters), 1567 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1568 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1569 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1570 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1571 SYSCTL_CHILDREN(rack_counters), 1572 OID_AUTO, "nfto_send", CTLFLAG_RD, 1573 &rack_non_fto_send, "Total number of rack_output first sends"); 1574 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1575 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1576 SYSCTL_CHILDREN(rack_counters), 1577 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1578 &rack_extended_rfo, "Total number of times we extended rfo"); 1579 1580 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1581 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_counters), 1583 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1584 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1585 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1586 1587 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1588 SYSCTL_CHILDREN(rack_counters), 1589 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1590 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1591 rack_badfr = counter_u64_alloc(M_WAITOK); 1592 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1593 SYSCTL_CHILDREN(rack_counters), 1594 OID_AUTO, "badfr", CTLFLAG_RD, 1595 &rack_badfr, "Total number of bad FRs"); 1596 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 1597 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1598 SYSCTL_CHILDREN(rack_counters), 1599 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 1600 &rack_badfr_bytes, "Total number of bad FRs"); 1601 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 1602 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1603 SYSCTL_CHILDREN(rack_counters), 1604 OID_AUTO, "prrsndret", CTLFLAG_RD, 1605 &rack_rtm_prr_retran, 1606 "Total number of prr based retransmits"); 1607 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 1608 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1609 SYSCTL_CHILDREN(rack_counters), 1610 OID_AUTO, "prrsndnew", CTLFLAG_RD, 1611 &rack_rtm_prr_newdata, 1612 "Total number of prr based new transmits"); 1613 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 1614 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1615 SYSCTL_CHILDREN(rack_counters), 1616 OID_AUTO, "tsnf", CTLFLAG_RD, 1617 &rack_timestamp_mismatch, 1618 "Total number of timestamps that we could not find the reported ts"); 1619 rack_find_high = counter_u64_alloc(M_WAITOK); 1620 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_counters), 1622 OID_AUTO, "findhigh", CTLFLAG_RD, 1623 &rack_find_high, 1624 "Total number of FIN causing find-high"); 1625 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 1626 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1627 SYSCTL_CHILDREN(rack_counters), 1628 OID_AUTO, "reordering", CTLFLAG_RD, 1629 &rack_reorder_seen, 1630 "Total number of times we added delay due to reordering"); 1631 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1632 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1633 SYSCTL_CHILDREN(rack_counters), 1634 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1635 &rack_tlp_tot, 1636 "Total number of tail loss probe expirations"); 1637 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1638 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1639 SYSCTL_CHILDREN(rack_counters), 1640 OID_AUTO, "tlp_new", CTLFLAG_RD, 1641 &rack_tlp_newdata, 1642 "Total number of tail loss probe sending new data"); 1643 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1644 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1645 SYSCTL_CHILDREN(rack_counters), 1646 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1647 &rack_tlp_retran, 1648 "Total number of tail loss probe sending retransmitted data"); 1649 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1650 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_counters), 1652 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1653 &rack_tlp_retran_bytes, 1654 "Total bytes of tail loss probe sending retransmitted data"); 1655 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 1656 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1657 SYSCTL_CHILDREN(rack_counters), 1658 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 1659 &rack_tlp_retran_fail, 1660 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 1661 rack_to_tot = counter_u64_alloc(M_WAITOK); 1662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1663 SYSCTL_CHILDREN(rack_counters), 1664 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1665 &rack_to_tot, 1666 "Total number of times the rack to expired"); 1667 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 1668 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1669 SYSCTL_CHILDREN(rack_counters), 1670 OID_AUTO, "arm_rack", CTLFLAG_RD, 1671 &rack_to_arm_rack, 1672 "Total number of times the rack timer armed"); 1673 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 1674 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1675 SYSCTL_CHILDREN(rack_counters), 1676 OID_AUTO, "arm_tlp", CTLFLAG_RD, 1677 &rack_to_arm_tlp, 1678 "Total number of times the tlp timer armed"); 1679 rack_calc_zero = counter_u64_alloc(M_WAITOK); 1680 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 1681 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1682 SYSCTL_CHILDREN(rack_counters), 1683 OID_AUTO, "calc_zero", CTLFLAG_RD, 1684 &rack_calc_zero, 1685 "Total number of times pacing time worked out to zero"); 1686 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1687 SYSCTL_CHILDREN(rack_counters), 1688 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 1689 &rack_calc_nonzero, 1690 "Total number of times pacing time worked out to non-zero"); 1691 rack_paced_segments = counter_u64_alloc(M_WAITOK); 1692 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1693 SYSCTL_CHILDREN(rack_counters), 1694 OID_AUTO, "paced", CTLFLAG_RD, 1695 &rack_paced_segments, 1696 "Total number of times a segment send caused hptsi"); 1697 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 1698 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1699 SYSCTL_CHILDREN(rack_counters), 1700 OID_AUTO, "unpaced", CTLFLAG_RD, 1701 &rack_unpaced_segments, 1702 "Total number of times a segment did not cause hptsi"); 1703 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1704 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1705 SYSCTL_CHILDREN(rack_counters), 1706 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1707 &rack_saw_enobuf, 1708 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1709 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1711 SYSCTL_CHILDREN(rack_counters), 1712 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1713 &rack_saw_enobuf_hw, 1714 "Total number of times a send returned enobuf for hdwr paced connections"); 1715 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1716 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1717 SYSCTL_CHILDREN(rack_counters), 1718 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1719 &rack_saw_enetunreach, 1720 "Total number of times a send received a enetunreachable"); 1721 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1722 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1723 SYSCTL_CHILDREN(rack_counters), 1724 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1725 &rack_hot_alloc, 1726 "Total allocations from the top of our list"); 1727 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1728 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1729 SYSCTL_CHILDREN(rack_counters), 1730 OID_AUTO, "allocs", CTLFLAG_RD, 1731 &rack_to_alloc, 1732 "Total allocations of tracking structures"); 1733 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1734 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1735 SYSCTL_CHILDREN(rack_counters), 1736 OID_AUTO, "allochard", CTLFLAG_RD, 1737 &rack_to_alloc_hard, 1738 "Total allocations done with sleeping the hard way"); 1739 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1740 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1741 SYSCTL_CHILDREN(rack_counters), 1742 OID_AUTO, "allocemerg", CTLFLAG_RD, 1743 &rack_to_alloc_emerg, 1744 "Total allocations done from emergency cache"); 1745 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1746 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1747 SYSCTL_CHILDREN(rack_counters), 1748 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1749 &rack_to_alloc_limited, 1750 "Total allocations dropped due to limit"); 1751 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1752 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1753 SYSCTL_CHILDREN(rack_counters), 1754 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1755 &rack_alloc_limited_conns, 1756 "Connections with allocations dropped due to limit"); 1757 rack_split_limited = counter_u64_alloc(M_WAITOK); 1758 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1759 SYSCTL_CHILDREN(rack_counters), 1760 OID_AUTO, "split_limited", CTLFLAG_RD, 1761 &rack_split_limited, 1762 "Split allocations dropped due to limit"); 1763 1764 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 1765 char name[32]; 1766 sprintf(name, "cmp_ack_cnt_%d", i); 1767 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK); 1768 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1769 SYSCTL_CHILDREN(rack_counters), 1770 OID_AUTO, name, CTLFLAG_RD, 1771 &rack_proc_comp_ack[i], 1772 "Number of compressed acks we processed"); 1773 } 1774 rack_large_ackcmp = counter_u64_alloc(M_WAITOK); 1775 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1776 SYSCTL_CHILDREN(rack_counters), 1777 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD, 1778 &rack_large_ackcmp, 1779 "Number of TCP connections with large mbuf's for compressed acks"); 1780 rack_small_ackcmp = counter_u64_alloc(M_WAITOK); 1781 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1782 SYSCTL_CHILDREN(rack_counters), 1783 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD, 1784 &rack_small_ackcmp, 1785 "Number of TCP connections with small mbuf's for compressed acks"); 1786 #ifdef INVARIANTS 1787 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1788 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1789 SYSCTL_CHILDREN(rack_counters), 1790 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1791 &rack_adjust_map_bw, 1792 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1793 #endif 1794 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1795 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1796 SYSCTL_CHILDREN(rack_counters), 1797 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1798 &rack_multi_single_eq, 1799 "Number of compressed acks total represented"); 1800 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1801 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1802 SYSCTL_CHILDREN(rack_counters), 1803 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1804 &rack_proc_non_comp_ack, 1805 "Number of non compresseds acks that we processed"); 1806 1807 1808 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1809 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1810 SYSCTL_CHILDREN(rack_counters), 1811 OID_AUTO, "sack_long", CTLFLAG_RD, 1812 &rack_sack_proc_all, 1813 "Total times we had to walk whole list for sack processing"); 1814 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1815 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1816 SYSCTL_CHILDREN(rack_counters), 1817 OID_AUTO, "sack_restart", CTLFLAG_RD, 1818 &rack_sack_proc_restart, 1819 "Total times we had to walk whole list due to a restart"); 1820 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1821 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1822 SYSCTL_CHILDREN(rack_counters), 1823 OID_AUTO, "sack_short", CTLFLAG_RD, 1824 &rack_sack_proc_short, 1825 "Total times we took shortcut for sack processing"); 1826 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 1827 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1828 SYSCTL_CHILDREN(rack_counters), 1829 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 1830 &rack_enter_tlp_calc, 1831 "Total times we called calc-tlp"); 1832 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 1833 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1834 SYSCTL_CHILDREN(rack_counters), 1835 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 1836 &rack_used_tlpmethod, 1837 "Total number of runt sacks"); 1838 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 1839 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1840 SYSCTL_CHILDREN(rack_counters), 1841 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 1842 &rack_used_tlpmethod2, 1843 "Total number of times we hit TLP method 2"); 1844 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1845 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1846 SYSCTL_CHILDREN(rack_attack), 1847 OID_AUTO, "skipacked", CTLFLAG_RD, 1848 &rack_sack_skipped_acked, 1849 "Total number of times we skipped previously sacked"); 1850 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1851 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1852 SYSCTL_CHILDREN(rack_attack), 1853 OID_AUTO, "ofsplit", CTLFLAG_RD, 1854 &rack_sack_splits, 1855 "Total number of times we did the old fashion tree split"); 1856 rack_progress_drops = counter_u64_alloc(M_WAITOK); 1857 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1858 SYSCTL_CHILDREN(rack_counters), 1859 OID_AUTO, "prog_drops", CTLFLAG_RD, 1860 &rack_progress_drops, 1861 "Total number of progress drops"); 1862 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1863 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1864 SYSCTL_CHILDREN(rack_counters), 1865 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1866 &rack_input_idle_reduces, 1867 "Total number of idle reductions on input"); 1868 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1869 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1870 SYSCTL_CHILDREN(rack_counters), 1871 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1872 &rack_collapsed_win, 1873 "Total number of collapsed windows"); 1874 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 1875 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1876 SYSCTL_CHILDREN(rack_counters), 1877 OID_AUTO, "tlp_nada", CTLFLAG_RD, 1878 &rack_tlp_does_nada, 1879 "Total number of nada tlp calls"); 1880 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1881 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1882 SYSCTL_CHILDREN(rack_counters), 1883 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1884 &rack_try_scwnd, 1885 "Total number of scwnd attempts"); 1886 1887 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1888 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1889 SYSCTL_CHILDREN(rack_counters), 1890 OID_AUTO, "timer_hole", CTLFLAG_RD, 1891 &rack_per_timer_hole, 1892 "Total persists start in timer hole"); 1893 1894 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK); 1895 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1896 SYSCTL_CHILDREN(rack_counters), 1897 OID_AUTO, "sndptr_wrong", CTLFLAG_RD, 1898 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret"); 1899 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK); 1900 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1901 SYSCTL_CHILDREN(rack_counters), 1902 OID_AUTO, "sndptr_right", CTLFLAG_RD, 1903 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret"); 1904 1905 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1906 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1907 OID_AUTO, "outsize", CTLFLAG_RD, 1908 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1909 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1910 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1911 OID_AUTO, "opts", CTLFLAG_RD, 1912 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1913 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1914 SYSCTL_CHILDREN(rack_sysctl_root), 1915 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1916 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1917 } 1918 1919 static __inline int 1920 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1921 { 1922 if (SEQ_GEQ(b->r_start, a->r_start) && 1923 SEQ_LT(b->r_start, a->r_end)) { 1924 /* 1925 * The entry b is within the 1926 * block a. i.e.: 1927 * a -- |-------------| 1928 * b -- |----| 1929 * <or> 1930 * b -- |------| 1931 * <or> 1932 * b -- |-----------| 1933 */ 1934 return (0); 1935 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1936 /* 1937 * b falls as either the next 1938 * sequence block after a so a 1939 * is said to be smaller than b. 1940 * i.e: 1941 * a -- |------| 1942 * b -- |--------| 1943 * or 1944 * b -- |-----| 1945 */ 1946 return (1); 1947 } 1948 /* 1949 * Whats left is where a is 1950 * larger than b. i.e: 1951 * a -- |-------| 1952 * b -- |---| 1953 * or even possibly 1954 * b -- |--------------| 1955 */ 1956 return (-1); 1957 } 1958 1959 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1960 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1961 1962 static uint32_t 1963 rc_init_window(struct tcp_rack *rack) 1964 { 1965 uint32_t win; 1966 1967 if (rack->rc_init_win == 0) { 1968 /* 1969 * Nothing set by the user, use the system stack 1970 * default. 1971 */ 1972 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1973 } 1974 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1975 return (win); 1976 } 1977 1978 static uint64_t 1979 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1980 { 1981 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1982 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1983 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1984 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1985 else 1986 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1987 } 1988 1989 static uint64_t 1990 rack_get_bw(struct tcp_rack *rack) 1991 { 1992 if (rack->use_fixed_rate) { 1993 /* Return the fixed pacing rate */ 1994 return (rack_get_fixed_pacing_bw(rack)); 1995 } 1996 if (rack->r_ctl.gp_bw == 0) { 1997 /* 1998 * We have yet no b/w measurement, 1999 * if we have a user set initial bw 2000 * return it. If we don't have that and 2001 * we have an srtt, use the tcp IW (10) to 2002 * calculate a fictional b/w over the SRTT 2003 * which is more or less a guess. Note 2004 * we don't use our IW from rack on purpose 2005 * so if we have like IW=30, we are not 2006 * calculating a "huge" b/w. 2007 */ 2008 uint64_t bw, srtt; 2009 if (rack->r_ctl.init_rate) 2010 return (rack->r_ctl.init_rate); 2011 2012 /* Has the user set a max peak rate? */ 2013 #ifdef NETFLIX_PEAKRATE 2014 if (rack->rc_tp->t_maxpeakrate) 2015 return (rack->rc_tp->t_maxpeakrate); 2016 #endif 2017 /* Ok lets come up with the IW guess, if we have a srtt */ 2018 if (rack->rc_tp->t_srtt == 0) { 2019 /* 2020 * Go with old pacing method 2021 * i.e. burst mitigation only. 2022 */ 2023 return (0); 2024 } 2025 /* Ok lets get the initial TCP win (not racks) */ 2026 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2027 srtt = (uint64_t)rack->rc_tp->t_srtt; 2028 bw *= (uint64_t)USECS_IN_SECOND; 2029 bw /= srtt; 2030 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2031 bw = rack->r_ctl.bw_rate_cap; 2032 return (bw); 2033 } else { 2034 uint64_t bw; 2035 2036 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2037 /* Averaging is done, we can return the value */ 2038 bw = rack->r_ctl.gp_bw; 2039 } else { 2040 /* Still doing initial average must calculate */ 2041 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 2042 } 2043 #ifdef NETFLIX_PEAKRATE 2044 if ((rack->rc_tp->t_maxpeakrate) && 2045 (bw > rack->rc_tp->t_maxpeakrate)) { 2046 /* The user has set a peak rate to pace at 2047 * don't allow us to pace faster than that. 2048 */ 2049 return (rack->rc_tp->t_maxpeakrate); 2050 } 2051 #endif 2052 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2053 bw = rack->r_ctl.bw_rate_cap; 2054 return (bw); 2055 } 2056 } 2057 2058 static uint16_t 2059 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2060 { 2061 if (rack->use_fixed_rate) { 2062 return (100); 2063 } else if (rack->in_probe_rtt && (rsm == NULL)) 2064 return (rack->r_ctl.rack_per_of_gp_probertt); 2065 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2066 rack->r_ctl.rack_per_of_gp_rec)) { 2067 if (rsm) { 2068 /* a retransmission always use the recovery rate */ 2069 return (rack->r_ctl.rack_per_of_gp_rec); 2070 } else if (rack->rack_rec_nonrxt_use_cr) { 2071 /* Directed to use the configured rate */ 2072 goto configured_rate; 2073 } else if (rack->rack_no_prr && 2074 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2075 /* No PRR, lets just use the b/w estimate only */ 2076 return (100); 2077 } else { 2078 /* 2079 * Here we may have a non-retransmit but we 2080 * have no overrides, so just use the recovery 2081 * rate (prr is in effect). 2082 */ 2083 return (rack->r_ctl.rack_per_of_gp_rec); 2084 } 2085 } 2086 configured_rate: 2087 /* For the configured rate we look at our cwnd vs the ssthresh */ 2088 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2089 return (rack->r_ctl.rack_per_of_gp_ss); 2090 else 2091 return (rack->r_ctl.rack_per_of_gp_ca); 2092 } 2093 2094 static void 2095 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2096 { 2097 /* 2098 * Types of logs (mod value) 2099 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2100 * 2 = a dsack round begins, persist is reset to 16. 2101 * 3 = a dsack round ends 2102 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2103 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2104 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2105 */ 2106 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2107 union tcp_log_stackspecific log; 2108 struct timeval tv; 2109 2110 memset(&log, 0, sizeof(log)); 2111 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2112 log.u_bbr.flex1 <<= 1; 2113 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2114 log.u_bbr.flex1 <<= 1; 2115 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2116 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2117 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2118 log.u_bbr.flex4 = flex4; 2119 log.u_bbr.flex5 = flex5; 2120 log.u_bbr.flex6 = flex6; 2121 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2122 log.u_bbr.flex8 = mod; 2123 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2124 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2125 &rack->rc_inp->inp_socket->so_rcv, 2126 &rack->rc_inp->inp_socket->so_snd, 2127 RACK_DSACK_HANDLING, 0, 2128 0, &log, false, &tv); 2129 } 2130 } 2131 2132 static void 2133 rack_log_hdwr_pacing(struct tcp_rack *rack, 2134 uint64_t rate, uint64_t hw_rate, int line, 2135 int error, uint16_t mod) 2136 { 2137 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2138 union tcp_log_stackspecific log; 2139 struct timeval tv; 2140 const struct ifnet *ifp; 2141 2142 memset(&log, 0, sizeof(log)); 2143 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2144 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2145 if (rack->r_ctl.crte) { 2146 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2147 } else if (rack->rc_inp->inp_route.ro_nh && 2148 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2149 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2150 } else 2151 ifp = NULL; 2152 if (ifp) { 2153 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2154 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2155 } 2156 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2157 log.u_bbr.bw_inuse = rate; 2158 log.u_bbr.flex5 = line; 2159 log.u_bbr.flex6 = error; 2160 log.u_bbr.flex7 = mod; 2161 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2162 log.u_bbr.flex8 = rack->use_fixed_rate; 2163 log.u_bbr.flex8 <<= 1; 2164 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2165 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2166 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2167 if (rack->r_ctl.crte) 2168 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2169 else 2170 log.u_bbr.cur_del_rate = 0; 2171 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2172 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2173 &rack->rc_inp->inp_socket->so_rcv, 2174 &rack->rc_inp->inp_socket->so_snd, 2175 BBR_LOG_HDWR_PACE, 0, 2176 0, &log, false, &tv); 2177 } 2178 } 2179 2180 static uint64_t 2181 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2182 { 2183 /* 2184 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2185 */ 2186 uint64_t bw_est, high_rate; 2187 uint64_t gain; 2188 2189 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2190 bw_est = bw * gain; 2191 bw_est /= (uint64_t)100; 2192 /* Never fall below the minimum (def 64kbps) */ 2193 if (bw_est < RACK_MIN_BW) 2194 bw_est = RACK_MIN_BW; 2195 if (rack->r_rack_hw_rate_caps) { 2196 /* Rate caps are in place */ 2197 if (rack->r_ctl.crte != NULL) { 2198 /* We have a hdwr rate already */ 2199 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2200 if (bw_est >= high_rate) { 2201 /* We are capping bw at the highest rate table entry */ 2202 rack_log_hdwr_pacing(rack, 2203 bw_est, high_rate, __LINE__, 2204 0, 3); 2205 bw_est = high_rate; 2206 if (capped) 2207 *capped = 1; 2208 } 2209 } else if ((rack->rack_hdrw_pacing == 0) && 2210 (rack->rack_hdw_pace_ena) && 2211 (rack->rack_attempt_hdwr_pace == 0) && 2212 (rack->rc_inp->inp_route.ro_nh != NULL) && 2213 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2214 /* 2215 * Special case, we have not yet attempted hardware 2216 * pacing, and yet we may, when we do, find out if we are 2217 * above the highest rate. We need to know the maxbw for the interface 2218 * in question (if it supports ratelimiting). We get back 2219 * a 0, if the interface is not found in the RL lists. 2220 */ 2221 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2222 if (high_rate) { 2223 /* Yep, we have a rate is it above this rate? */ 2224 if (bw_est > high_rate) { 2225 bw_est = high_rate; 2226 if (capped) 2227 *capped = 1; 2228 } 2229 } 2230 } 2231 } 2232 return (bw_est); 2233 } 2234 2235 static void 2236 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2237 { 2238 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2239 union tcp_log_stackspecific log; 2240 struct timeval tv; 2241 2242 if ((mod != 1) && (rack_verbose_logging == 0)) { 2243 /* 2244 * We get 3 values currently for mod 2245 * 1 - We are retransmitting and this tells the reason. 2246 * 2 - We are clearing a dup-ack count. 2247 * 3 - We are incrementing a dup-ack count. 2248 * 2249 * The clear/increment are only logged 2250 * if you have BBverbose on. 2251 */ 2252 return; 2253 } 2254 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2255 log.u_bbr.flex1 = tsused; 2256 log.u_bbr.flex2 = thresh; 2257 log.u_bbr.flex3 = rsm->r_flags; 2258 log.u_bbr.flex4 = rsm->r_dupack; 2259 log.u_bbr.flex5 = rsm->r_start; 2260 log.u_bbr.flex6 = rsm->r_end; 2261 log.u_bbr.flex8 = mod; 2262 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2263 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2264 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2265 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2266 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2267 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2268 log.u_bbr.pacing_gain = rack->r_must_retran; 2269 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2270 &rack->rc_inp->inp_socket->so_rcv, 2271 &rack->rc_inp->inp_socket->so_snd, 2272 BBR_LOG_SETTINGS_CHG, 0, 2273 0, &log, false, &tv); 2274 } 2275 } 2276 2277 static void 2278 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2279 { 2280 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2281 union tcp_log_stackspecific log; 2282 struct timeval tv; 2283 2284 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2285 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2286 log.u_bbr.flex2 = to; 2287 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2288 log.u_bbr.flex4 = slot; 2289 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2290 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2291 log.u_bbr.flex7 = rack->rc_in_persist; 2292 log.u_bbr.flex8 = which; 2293 if (rack->rack_no_prr) 2294 log.u_bbr.pkts_out = 0; 2295 else 2296 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2297 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2298 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2299 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2300 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2301 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2302 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2303 log.u_bbr.pacing_gain = rack->r_must_retran; 2304 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2305 log.u_bbr.lost = rack_rto_min; 2306 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2307 &rack->rc_inp->inp_socket->so_rcv, 2308 &rack->rc_inp->inp_socket->so_snd, 2309 BBR_LOG_TIMERSTAR, 0, 2310 0, &log, false, &tv); 2311 } 2312 } 2313 2314 static void 2315 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2316 { 2317 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2318 union tcp_log_stackspecific log; 2319 struct timeval tv; 2320 2321 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2322 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2323 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2324 log.u_bbr.flex8 = to_num; 2325 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2326 log.u_bbr.flex2 = rack->rc_rack_rtt; 2327 if (rsm == NULL) 2328 log.u_bbr.flex3 = 0; 2329 else 2330 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2331 if (rack->rack_no_prr) 2332 log.u_bbr.flex5 = 0; 2333 else 2334 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2335 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2336 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2337 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2338 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2339 log.u_bbr.pacing_gain = rack->r_must_retran; 2340 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2341 &rack->rc_inp->inp_socket->so_rcv, 2342 &rack->rc_inp->inp_socket->so_snd, 2343 BBR_LOG_RTO, 0, 2344 0, &log, false, &tv); 2345 } 2346 } 2347 2348 static void 2349 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2350 struct rack_sendmap *prev, 2351 struct rack_sendmap *rsm, 2352 struct rack_sendmap *next, 2353 int flag, uint32_t th_ack, int line) 2354 { 2355 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2356 union tcp_log_stackspecific log; 2357 struct timeval tv; 2358 2359 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2360 log.u_bbr.flex8 = flag; 2361 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2362 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2363 log.u_bbr.cur_del_rate = (uint64_t)prev; 2364 log.u_bbr.delRate = (uint64_t)rsm; 2365 log.u_bbr.rttProp = (uint64_t)next; 2366 log.u_bbr.flex7 = 0; 2367 if (prev) { 2368 log.u_bbr.flex1 = prev->r_start; 2369 log.u_bbr.flex2 = prev->r_end; 2370 log.u_bbr.flex7 |= 0x4; 2371 } 2372 if (rsm) { 2373 log.u_bbr.flex3 = rsm->r_start; 2374 log.u_bbr.flex4 = rsm->r_end; 2375 log.u_bbr.flex7 |= 0x2; 2376 } 2377 if (next) { 2378 log.u_bbr.flex5 = next->r_start; 2379 log.u_bbr.flex6 = next->r_end; 2380 log.u_bbr.flex7 |= 0x1; 2381 } 2382 log.u_bbr.applimited = line; 2383 log.u_bbr.pkts_out = th_ack; 2384 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2385 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2386 if (rack->rack_no_prr) 2387 log.u_bbr.lost = 0; 2388 else 2389 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2390 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2391 &rack->rc_inp->inp_socket->so_rcv, 2392 &rack->rc_inp->inp_socket->so_snd, 2393 TCP_LOG_MAPCHG, 0, 2394 0, &log, false, &tv); 2395 } 2396 } 2397 2398 static void 2399 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2400 struct rack_sendmap *rsm, int conf) 2401 { 2402 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2403 union tcp_log_stackspecific log; 2404 struct timeval tv; 2405 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2406 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2407 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2408 log.u_bbr.flex1 = t; 2409 log.u_bbr.flex2 = len; 2410 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2411 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2412 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2413 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2414 log.u_bbr.flex7 = conf; 2415 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2416 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2417 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2418 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2419 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2420 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2421 if (rsm) { 2422 log.u_bbr.pkt_epoch = rsm->r_start; 2423 log.u_bbr.lost = rsm->r_end; 2424 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2425 log.u_bbr.pacing_gain = rsm->r_flags; 2426 } else { 2427 /* Its a SYN */ 2428 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2429 log.u_bbr.lost = 0; 2430 log.u_bbr.cwnd_gain = 0; 2431 log.u_bbr.pacing_gain = 0; 2432 } 2433 /* Write out general bits of interest rrs here */ 2434 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2435 log.u_bbr.use_lt_bw <<= 1; 2436 log.u_bbr.use_lt_bw |= rack->forced_ack; 2437 log.u_bbr.use_lt_bw <<= 1; 2438 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2439 log.u_bbr.use_lt_bw <<= 1; 2440 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2441 log.u_bbr.use_lt_bw <<= 1; 2442 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2443 log.u_bbr.use_lt_bw <<= 1; 2444 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2445 log.u_bbr.use_lt_bw <<= 1; 2446 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2447 log.u_bbr.use_lt_bw <<= 1; 2448 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2449 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2450 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2451 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2452 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2453 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2454 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2455 log.u_bbr.bw_inuse <<= 32; 2456 if (rsm) 2457 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2458 TCP_LOG_EVENTP(tp, NULL, 2459 &rack->rc_inp->inp_socket->so_rcv, 2460 &rack->rc_inp->inp_socket->so_snd, 2461 BBR_LOG_BBRRTT, 0, 2462 0, &log, false, &tv); 2463 2464 2465 } 2466 } 2467 2468 static void 2469 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2470 { 2471 /* 2472 * Log the rtt sample we are 2473 * applying to the srtt algorithm in 2474 * useconds. 2475 */ 2476 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2477 union tcp_log_stackspecific log; 2478 struct timeval tv; 2479 2480 /* Convert our ms to a microsecond */ 2481 memset(&log, 0, sizeof(log)); 2482 log.u_bbr.flex1 = rtt; 2483 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2484 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2485 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2486 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2487 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2488 log.u_bbr.flex7 = 1; 2489 log.u_bbr.flex8 = rack->sack_attack_disable; 2490 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2491 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2492 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2493 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2494 log.u_bbr.pacing_gain = rack->r_must_retran; 2495 /* 2496 * We capture in delRate the upper 32 bits as 2497 * the confidence level we had declared, and the 2498 * lower 32 bits as the actual RTT using the arrival 2499 * timestamp. 2500 */ 2501 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2502 log.u_bbr.delRate <<= 32; 2503 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2504 /* Lets capture all the things that make up t_rtxcur */ 2505 log.u_bbr.applimited = rack_rto_min; 2506 log.u_bbr.epoch = rack_rto_max; 2507 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2508 log.u_bbr.lost = rack_rto_min; 2509 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2510 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2511 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2512 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2513 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2514 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2515 &rack->rc_inp->inp_socket->so_rcv, 2516 &rack->rc_inp->inp_socket->so_snd, 2517 TCP_LOG_RTT, 0, 2518 0, &log, false, &tv); 2519 } 2520 } 2521 2522 static void 2523 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2524 { 2525 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2526 union tcp_log_stackspecific log; 2527 struct timeval tv; 2528 2529 /* Convert our ms to a microsecond */ 2530 memset(&log, 0, sizeof(log)); 2531 log.u_bbr.flex1 = rtt; 2532 log.u_bbr.flex2 = send_time; 2533 log.u_bbr.flex3 = ack_time; 2534 log.u_bbr.flex4 = where; 2535 log.u_bbr.flex7 = 2; 2536 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2537 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2538 &rack->rc_inp->inp_socket->so_rcv, 2539 &rack->rc_inp->inp_socket->so_snd, 2540 TCP_LOG_RTT, 0, 2541 0, &log, false, &tv); 2542 } 2543 } 2544 2545 2546 2547 static inline void 2548 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2549 { 2550 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2551 union tcp_log_stackspecific log; 2552 struct timeval tv; 2553 2554 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2555 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2556 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2557 log.u_bbr.flex1 = line; 2558 log.u_bbr.flex2 = tick; 2559 log.u_bbr.flex3 = tp->t_maxunacktime; 2560 log.u_bbr.flex4 = tp->t_acktime; 2561 log.u_bbr.flex8 = event; 2562 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2563 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2564 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2565 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2566 log.u_bbr.pacing_gain = rack->r_must_retran; 2567 TCP_LOG_EVENTP(tp, NULL, 2568 &rack->rc_inp->inp_socket->so_rcv, 2569 &rack->rc_inp->inp_socket->so_snd, 2570 BBR_LOG_PROGRESS, 0, 2571 0, &log, false, &tv); 2572 } 2573 } 2574 2575 static void 2576 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2577 { 2578 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2579 union tcp_log_stackspecific log; 2580 2581 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2582 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2583 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2584 log.u_bbr.flex1 = slot; 2585 if (rack->rack_no_prr) 2586 log.u_bbr.flex2 = 0; 2587 else 2588 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2589 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2590 log.u_bbr.flex8 = rack->rc_in_persist; 2591 log.u_bbr.timeStamp = cts; 2592 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2593 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2594 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2595 log.u_bbr.pacing_gain = rack->r_must_retran; 2596 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2597 &rack->rc_inp->inp_socket->so_rcv, 2598 &rack->rc_inp->inp_socket->so_snd, 2599 BBR_LOG_BBRSND, 0, 2600 0, &log, false, tv); 2601 } 2602 } 2603 2604 static void 2605 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2606 { 2607 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2608 union tcp_log_stackspecific log; 2609 struct timeval tv; 2610 2611 memset(&log, 0, sizeof(log)); 2612 log.u_bbr.flex1 = did_out; 2613 log.u_bbr.flex2 = nxt_pkt; 2614 log.u_bbr.flex3 = way_out; 2615 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2616 if (rack->rack_no_prr) 2617 log.u_bbr.flex5 = 0; 2618 else 2619 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2620 log.u_bbr.flex6 = nsegs; 2621 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2622 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2623 log.u_bbr.flex7 <<= 1; 2624 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2625 log.u_bbr.flex7 <<= 1; 2626 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2627 log.u_bbr.flex8 = rack->rc_in_persist; 2628 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2629 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2630 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2631 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2632 log.u_bbr.use_lt_bw <<= 1; 2633 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2634 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2635 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2636 log.u_bbr.pacing_gain = rack->r_must_retran; 2637 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2638 &rack->rc_inp->inp_socket->so_rcv, 2639 &rack->rc_inp->inp_socket->so_snd, 2640 BBR_LOG_DOSEG_DONE, 0, 2641 0, &log, false, &tv); 2642 } 2643 } 2644 2645 static void 2646 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2647 { 2648 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2649 union tcp_log_stackspecific log; 2650 struct timeval tv; 2651 uint32_t cts; 2652 2653 memset(&log, 0, sizeof(log)); 2654 cts = tcp_get_usecs(&tv); 2655 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2656 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2657 log.u_bbr.flex4 = arg1; 2658 log.u_bbr.flex5 = arg2; 2659 log.u_bbr.flex6 = arg3; 2660 log.u_bbr.flex8 = frm; 2661 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2662 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2663 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2664 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2665 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2666 log.u_bbr.pacing_gain = rack->r_must_retran; 2667 TCP_LOG_EVENTP(tp, NULL, 2668 &tp->t_inpcb->inp_socket->so_rcv, 2669 &tp->t_inpcb->inp_socket->so_snd, 2670 TCP_HDWR_PACE_SIZE, 0, 2671 0, &log, false, &tv); 2672 } 2673 } 2674 2675 static void 2676 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2677 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2678 { 2679 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2680 union tcp_log_stackspecific log; 2681 struct timeval tv; 2682 2683 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2684 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2685 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2686 log.u_bbr.flex1 = slot; 2687 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2688 log.u_bbr.flex4 = reason; 2689 if (rack->rack_no_prr) 2690 log.u_bbr.flex5 = 0; 2691 else 2692 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2693 log.u_bbr.flex7 = hpts_calling; 2694 log.u_bbr.flex8 = rack->rc_in_persist; 2695 log.u_bbr.lt_epoch = cwnd_to_use; 2696 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2697 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2698 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2699 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2700 log.u_bbr.pacing_gain = rack->r_must_retran; 2701 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2702 &rack->rc_inp->inp_socket->so_rcv, 2703 &rack->rc_inp->inp_socket->so_snd, 2704 BBR_LOG_JUSTRET, 0, 2705 tlen, &log, false, &tv); 2706 } 2707 } 2708 2709 static void 2710 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2711 struct timeval *tv, uint32_t flags_on_entry) 2712 { 2713 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2714 union tcp_log_stackspecific log; 2715 2716 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2717 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2718 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2719 log.u_bbr.flex1 = line; 2720 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2721 log.u_bbr.flex3 = flags_on_entry; 2722 log.u_bbr.flex4 = us_cts; 2723 if (rack->rack_no_prr) 2724 log.u_bbr.flex5 = 0; 2725 else 2726 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2727 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2728 log.u_bbr.flex7 = hpts_removed; 2729 log.u_bbr.flex8 = 1; 2730 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2731 log.u_bbr.timeStamp = us_cts; 2732 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2733 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2734 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2735 log.u_bbr.pacing_gain = rack->r_must_retran; 2736 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2737 &rack->rc_inp->inp_socket->so_rcv, 2738 &rack->rc_inp->inp_socket->so_snd, 2739 BBR_LOG_TIMERCANC, 0, 2740 0, &log, false, tv); 2741 } 2742 } 2743 2744 static void 2745 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2746 uint32_t flex1, uint32_t flex2, 2747 uint32_t flex3, uint32_t flex4, 2748 uint32_t flex5, uint32_t flex6, 2749 uint16_t flex7, uint8_t mod) 2750 { 2751 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2752 union tcp_log_stackspecific log; 2753 struct timeval tv; 2754 2755 if (mod == 1) { 2756 /* No you can't use 1, its for the real to cancel */ 2757 return; 2758 } 2759 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2760 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2761 log.u_bbr.flex1 = flex1; 2762 log.u_bbr.flex2 = flex2; 2763 log.u_bbr.flex3 = flex3; 2764 log.u_bbr.flex4 = flex4; 2765 log.u_bbr.flex5 = flex5; 2766 log.u_bbr.flex6 = flex6; 2767 log.u_bbr.flex7 = flex7; 2768 log.u_bbr.flex8 = mod; 2769 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2770 &rack->rc_inp->inp_socket->so_rcv, 2771 &rack->rc_inp->inp_socket->so_snd, 2772 BBR_LOG_TIMERCANC, 0, 2773 0, &log, false, &tv); 2774 } 2775 } 2776 2777 static void 2778 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2779 { 2780 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2781 union tcp_log_stackspecific log; 2782 struct timeval tv; 2783 2784 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2785 log.u_bbr.flex1 = timers; 2786 log.u_bbr.flex2 = ret; 2787 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2788 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2789 log.u_bbr.flex5 = cts; 2790 if (rack->rack_no_prr) 2791 log.u_bbr.flex6 = 0; 2792 else 2793 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2794 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2795 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2796 log.u_bbr.pacing_gain = rack->r_must_retran; 2797 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2798 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2799 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2800 &rack->rc_inp->inp_socket->so_rcv, 2801 &rack->rc_inp->inp_socket->so_snd, 2802 BBR_LOG_TO_PROCESS, 0, 2803 0, &log, false, &tv); 2804 } 2805 } 2806 2807 static void 2808 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd) 2809 { 2810 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2811 union tcp_log_stackspecific log; 2812 struct timeval tv; 2813 2814 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2815 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2816 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2817 if (rack->rack_no_prr) 2818 log.u_bbr.flex3 = 0; 2819 else 2820 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2821 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2822 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2823 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2824 log.u_bbr.flex8 = frm; 2825 log.u_bbr.pkts_out = orig_cwnd; 2826 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2827 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2828 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2829 log.u_bbr.use_lt_bw <<= 1; 2830 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2831 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2832 &rack->rc_inp->inp_socket->so_rcv, 2833 &rack->rc_inp->inp_socket->so_snd, 2834 BBR_LOG_BBRUPD, 0, 2835 0, &log, false, &tv); 2836 } 2837 } 2838 2839 #ifdef NETFLIX_EXP_DETECTION 2840 static void 2841 rack_log_sad(struct tcp_rack *rack, int event) 2842 { 2843 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2844 union tcp_log_stackspecific log; 2845 struct timeval tv; 2846 2847 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2848 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2849 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2850 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2851 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2852 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2853 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2854 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2855 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2856 log.u_bbr.lt_epoch |= rack->do_detection; 2857 log.u_bbr.applimited = tcp_map_minimum; 2858 log.u_bbr.flex7 = rack->sack_attack_disable; 2859 log.u_bbr.flex8 = event; 2860 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2861 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2862 log.u_bbr.delivered = tcp_sad_decay_val; 2863 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2864 &rack->rc_inp->inp_socket->so_rcv, 2865 &rack->rc_inp->inp_socket->so_snd, 2866 TCP_SAD_DETECTION, 0, 2867 0, &log, false, &tv); 2868 } 2869 } 2870 #endif 2871 2872 static void 2873 rack_counter_destroy(void) 2874 { 2875 int i; 2876 2877 counter_u64_free(rack_fto_send); 2878 counter_u64_free(rack_fto_rsm_send); 2879 counter_u64_free(rack_nfto_resend); 2880 counter_u64_free(rack_hw_pace_init_fail); 2881 counter_u64_free(rack_hw_pace_lost); 2882 counter_u64_free(rack_non_fto_send); 2883 counter_u64_free(rack_extended_rfo); 2884 counter_u64_free(rack_ack_total); 2885 counter_u64_free(rack_express_sack); 2886 counter_u64_free(rack_sack_total); 2887 counter_u64_free(rack_move_none); 2888 counter_u64_free(rack_move_some); 2889 counter_u64_free(rack_sack_attacks_detected); 2890 counter_u64_free(rack_sack_attacks_reversed); 2891 counter_u64_free(rack_sack_used_next_merge); 2892 counter_u64_free(rack_sack_used_prev_merge); 2893 counter_u64_free(rack_badfr); 2894 counter_u64_free(rack_badfr_bytes); 2895 counter_u64_free(rack_rtm_prr_retran); 2896 counter_u64_free(rack_rtm_prr_newdata); 2897 counter_u64_free(rack_timestamp_mismatch); 2898 counter_u64_free(rack_find_high); 2899 counter_u64_free(rack_reorder_seen); 2900 counter_u64_free(rack_tlp_tot); 2901 counter_u64_free(rack_tlp_newdata); 2902 counter_u64_free(rack_tlp_retran); 2903 counter_u64_free(rack_tlp_retran_bytes); 2904 counter_u64_free(rack_tlp_retran_fail); 2905 counter_u64_free(rack_to_tot); 2906 counter_u64_free(rack_to_arm_rack); 2907 counter_u64_free(rack_to_arm_tlp); 2908 counter_u64_free(rack_calc_zero); 2909 counter_u64_free(rack_calc_nonzero); 2910 counter_u64_free(rack_paced_segments); 2911 counter_u64_free(rack_unpaced_segments); 2912 counter_u64_free(rack_saw_enobuf); 2913 counter_u64_free(rack_saw_enobuf_hw); 2914 counter_u64_free(rack_saw_enetunreach); 2915 counter_u64_free(rack_hot_alloc); 2916 counter_u64_free(rack_to_alloc); 2917 counter_u64_free(rack_to_alloc_hard); 2918 counter_u64_free(rack_to_alloc_emerg); 2919 counter_u64_free(rack_to_alloc_limited); 2920 counter_u64_free(rack_alloc_limited_conns); 2921 counter_u64_free(rack_split_limited); 2922 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 2923 counter_u64_free(rack_proc_comp_ack[i]); 2924 } 2925 counter_u64_free(rack_multi_single_eq); 2926 counter_u64_free(rack_proc_non_comp_ack); 2927 counter_u64_free(rack_sack_proc_all); 2928 counter_u64_free(rack_sack_proc_restart); 2929 counter_u64_free(rack_sack_proc_short); 2930 counter_u64_free(rack_enter_tlp_calc); 2931 counter_u64_free(rack_used_tlpmethod); 2932 counter_u64_free(rack_used_tlpmethod2); 2933 counter_u64_free(rack_sack_skipped_acked); 2934 counter_u64_free(rack_sack_splits); 2935 counter_u64_free(rack_progress_drops); 2936 counter_u64_free(rack_input_idle_reduces); 2937 counter_u64_free(rack_collapsed_win); 2938 counter_u64_free(rack_tlp_does_nada); 2939 counter_u64_free(rack_try_scwnd); 2940 counter_u64_free(rack_per_timer_hole); 2941 counter_u64_free(rack_large_ackcmp); 2942 counter_u64_free(rack_small_ackcmp); 2943 #ifdef INVARIANTS 2944 counter_u64_free(rack_adjust_map_bw); 2945 #endif 2946 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2947 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2948 } 2949 2950 static struct rack_sendmap * 2951 rack_alloc(struct tcp_rack *rack) 2952 { 2953 struct rack_sendmap *rsm; 2954 2955 /* 2956 * First get the top of the list it in 2957 * theory is the "hottest" rsm we have, 2958 * possibly just freed by ack processing. 2959 */ 2960 if (rack->rc_free_cnt > rack_free_cache) { 2961 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2962 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2963 counter_u64_add(rack_hot_alloc, 1); 2964 rack->rc_free_cnt--; 2965 return (rsm); 2966 } 2967 /* 2968 * Once we get under our free cache we probably 2969 * no longer have a "hot" one available. Lets 2970 * get one from UMA. 2971 */ 2972 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2973 if (rsm) { 2974 rack->r_ctl.rc_num_maps_alloced++; 2975 counter_u64_add(rack_to_alloc, 1); 2976 return (rsm); 2977 } 2978 /* 2979 * Dig in to our aux rsm's (the last two) since 2980 * UMA failed to get us one. 2981 */ 2982 if (rack->rc_free_cnt) { 2983 counter_u64_add(rack_to_alloc_emerg, 1); 2984 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2985 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2986 rack->rc_free_cnt--; 2987 return (rsm); 2988 } 2989 return (NULL); 2990 } 2991 2992 static struct rack_sendmap * 2993 rack_alloc_full_limit(struct tcp_rack *rack) 2994 { 2995 if ((V_tcp_map_entries_limit > 0) && 2996 (rack->do_detection == 0) && 2997 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2998 counter_u64_add(rack_to_alloc_limited, 1); 2999 if (!rack->alloc_limit_reported) { 3000 rack->alloc_limit_reported = 1; 3001 counter_u64_add(rack_alloc_limited_conns, 1); 3002 } 3003 return (NULL); 3004 } 3005 return (rack_alloc(rack)); 3006 } 3007 3008 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3009 static struct rack_sendmap * 3010 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 3011 { 3012 struct rack_sendmap *rsm; 3013 3014 if (limit_type) { 3015 /* currently there is only one limit type */ 3016 if (V_tcp_map_split_limit > 0 && 3017 (rack->do_detection == 0) && 3018 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 3019 counter_u64_add(rack_split_limited, 1); 3020 if (!rack->alloc_limit_reported) { 3021 rack->alloc_limit_reported = 1; 3022 counter_u64_add(rack_alloc_limited_conns, 1); 3023 } 3024 return (NULL); 3025 } 3026 } 3027 3028 /* allocate and mark in the limit type, if set */ 3029 rsm = rack_alloc(rack); 3030 if (rsm != NULL && limit_type) { 3031 rsm->r_limit_type = limit_type; 3032 rack->r_ctl.rc_num_split_allocs++; 3033 } 3034 return (rsm); 3035 } 3036 3037 static void 3038 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3039 { 3040 if (rsm->r_flags & RACK_APP_LIMITED) { 3041 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3042 rack->r_ctl.rc_app_limited_cnt--; 3043 } 3044 } 3045 if (rsm->r_limit_type) { 3046 /* currently there is only one limit type */ 3047 rack->r_ctl.rc_num_split_allocs--; 3048 } 3049 if (rsm == rack->r_ctl.rc_first_appl) { 3050 if (rack->r_ctl.rc_app_limited_cnt == 0) 3051 rack->r_ctl.rc_first_appl = NULL; 3052 else { 3053 /* Follow the next one out */ 3054 struct rack_sendmap fe; 3055 3056 fe.r_start = rsm->r_nseq_appl; 3057 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3058 } 3059 } 3060 if (rsm == rack->r_ctl.rc_resend) 3061 rack->r_ctl.rc_resend = NULL; 3062 if (rsm == rack->r_ctl.rc_rsm_at_retran) 3063 rack->r_ctl.rc_rsm_at_retran = NULL; 3064 if (rsm == rack->r_ctl.rc_end_appl) 3065 rack->r_ctl.rc_end_appl = NULL; 3066 if (rack->r_ctl.rc_tlpsend == rsm) 3067 rack->r_ctl.rc_tlpsend = NULL; 3068 if (rack->r_ctl.rc_sacklast == rsm) 3069 rack->r_ctl.rc_sacklast = NULL; 3070 memset(rsm, 0, sizeof(struct rack_sendmap)); 3071 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3072 rack->rc_free_cnt++; 3073 } 3074 3075 static void 3076 rack_free_trim(struct tcp_rack *rack) 3077 { 3078 struct rack_sendmap *rsm; 3079 3080 /* 3081 * Free up all the tail entries until 3082 * we get our list down to the limit. 3083 */ 3084 while (rack->rc_free_cnt > rack_free_cache) { 3085 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3086 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3087 rack->rc_free_cnt--; 3088 uma_zfree(rack_zone, rsm); 3089 } 3090 } 3091 3092 3093 static uint32_t 3094 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3095 { 3096 uint64_t srtt, bw, len, tim; 3097 uint32_t segsiz, def_len, minl; 3098 3099 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3100 def_len = rack_def_data_window * segsiz; 3101 if (rack->rc_gp_filled == 0) { 3102 /* 3103 * We have no measurement (IW is in flight?) so 3104 * we can only guess using our data_window sysctl 3105 * value (usually 20MSS). 3106 */ 3107 return (def_len); 3108 } 3109 /* 3110 * Now we have a number of factors to consider. 3111 * 3112 * 1) We have a desired BDP which is usually 3113 * at least 2. 3114 * 2) We have a minimum number of rtt's usually 1 SRTT 3115 * but we allow it too to be more. 3116 * 3) We want to make sure a measurement last N useconds (if 3117 * we have set rack_min_measure_usec. 3118 * 3119 * We handle the first concern here by trying to create a data 3120 * window of max(rack_def_data_window, DesiredBDP). The 3121 * second concern we handle in not letting the measurement 3122 * window end normally until at least the required SRTT's 3123 * have gone by which is done further below in 3124 * rack_enough_for_measurement(). Finally the third concern 3125 * we also handle here by calculating how long that time 3126 * would take at the current BW and then return the 3127 * max of our first calculation and that length. Note 3128 * that if rack_min_measure_usec is 0, we don't deal 3129 * with concern 3. Also for both Concern 1 and 3 an 3130 * application limited period could end the measurement 3131 * earlier. 3132 * 3133 * So lets calculate the BDP with the "known" b/w using 3134 * the SRTT has our rtt and then multiply it by the 3135 * goal. 3136 */ 3137 bw = rack_get_bw(rack); 3138 srtt = (uint64_t)tp->t_srtt; 3139 len = bw * srtt; 3140 len /= (uint64_t)HPTS_USEC_IN_SEC; 3141 len *= max(1, rack_goal_bdp); 3142 /* Now we need to round up to the nearest MSS */ 3143 len = roundup(len, segsiz); 3144 if (rack_min_measure_usec) { 3145 /* Now calculate our min length for this b/w */ 3146 tim = rack_min_measure_usec; 3147 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3148 if (minl == 0) 3149 minl = 1; 3150 minl = roundup(minl, segsiz); 3151 if (len < minl) 3152 len = minl; 3153 } 3154 /* 3155 * Now if we have a very small window we want 3156 * to attempt to get the window that is 3157 * as small as possible. This happens on 3158 * low b/w connections and we don't want to 3159 * span huge numbers of rtt's between measurements. 3160 * 3161 * We basically include 2 over our "MIN window" so 3162 * that the measurement can be shortened (possibly) by 3163 * an ack'ed packet. 3164 */ 3165 if (len < def_len) 3166 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3167 else 3168 return (max((uint32_t)len, def_len)); 3169 3170 } 3171 3172 static int 3173 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3174 { 3175 uint32_t tim, srtts, segsiz; 3176 3177 /* 3178 * Has enough time passed for the GP measurement to be valid? 3179 */ 3180 if ((tp->snd_max == tp->snd_una) || 3181 (th_ack == tp->snd_max)){ 3182 /* All is acked */ 3183 *quality = RACK_QUALITY_ALLACKED; 3184 return (1); 3185 } 3186 if (SEQ_LT(th_ack, tp->gput_seq)) { 3187 /* Not enough bytes yet */ 3188 return (0); 3189 } 3190 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3191 if (SEQ_LT(th_ack, tp->gput_ack) && 3192 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3193 /* Not enough bytes yet */ 3194 return (0); 3195 } 3196 if (rack->r_ctl.rc_first_appl && 3197 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3198 /* 3199 * We are up to the app limited send point 3200 * we have to measure irrespective of the time.. 3201 */ 3202 *quality = RACK_QUALITY_APPLIMITED; 3203 return (1); 3204 } 3205 /* Now what about time? */ 3206 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3207 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3208 if (tim >= srtts) { 3209 *quality = RACK_QUALITY_HIGH; 3210 return (1); 3211 } 3212 /* Nope not even a full SRTT has passed */ 3213 return (0); 3214 } 3215 3216 static void 3217 rack_log_timely(struct tcp_rack *rack, 3218 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3219 uint64_t up_bnd, int line, uint8_t method) 3220 { 3221 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3222 union tcp_log_stackspecific log; 3223 struct timeval tv; 3224 3225 memset(&log, 0, sizeof(log)); 3226 log.u_bbr.flex1 = logged; 3227 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3228 log.u_bbr.flex2 <<= 4; 3229 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3230 log.u_bbr.flex2 <<= 4; 3231 log.u_bbr.flex2 |= rack->rc_gp_incr; 3232 log.u_bbr.flex2 <<= 4; 3233 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3234 log.u_bbr.flex3 = rack->rc_gp_incr; 3235 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3236 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3237 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3238 log.u_bbr.flex7 = rack->rc_gp_bwred; 3239 log.u_bbr.flex8 = method; 3240 log.u_bbr.cur_del_rate = cur_bw; 3241 log.u_bbr.delRate = low_bnd; 3242 log.u_bbr.bw_inuse = up_bnd; 3243 log.u_bbr.rttProp = rack_get_bw(rack); 3244 log.u_bbr.pkt_epoch = line; 3245 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3246 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3247 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3248 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3249 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3250 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3251 log.u_bbr.cwnd_gain <<= 1; 3252 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3253 log.u_bbr.cwnd_gain <<= 1; 3254 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3255 log.u_bbr.cwnd_gain <<= 1; 3256 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3257 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3258 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3259 &rack->rc_inp->inp_socket->so_rcv, 3260 &rack->rc_inp->inp_socket->so_snd, 3261 TCP_TIMELY_WORK, 0, 3262 0, &log, false, &tv); 3263 } 3264 } 3265 3266 static int 3267 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3268 { 3269 /* 3270 * Before we increase we need to know if 3271 * the estimate just made was less than 3272 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3273 * 3274 * If we already are pacing at a fast enough 3275 * rate to push us faster there is no sense of 3276 * increasing. 3277 * 3278 * We first caculate our actual pacing rate (ss or ca multipler 3279 * times our cur_bw). 3280 * 3281 * Then we take the last measured rate and multipy by our 3282 * maximum pacing overage to give us a max allowable rate. 3283 * 3284 * If our act_rate is smaller than our max_allowable rate 3285 * then we should increase. Else we should hold steady. 3286 * 3287 */ 3288 uint64_t act_rate, max_allow_rate; 3289 3290 if (rack_timely_no_stopping) 3291 return (1); 3292 3293 if ((cur_bw == 0) || (last_bw_est == 0)) { 3294 /* 3295 * Initial startup case or 3296 * everything is acked case. 3297 */ 3298 rack_log_timely(rack, mult, cur_bw, 0, 0, 3299 __LINE__, 9); 3300 return (1); 3301 } 3302 if (mult <= 100) { 3303 /* 3304 * We can always pace at or slightly above our rate. 3305 */ 3306 rack_log_timely(rack, mult, cur_bw, 0, 0, 3307 __LINE__, 9); 3308 return (1); 3309 } 3310 act_rate = cur_bw * (uint64_t)mult; 3311 act_rate /= 100; 3312 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3313 max_allow_rate /= 100; 3314 if (act_rate < max_allow_rate) { 3315 /* 3316 * Here the rate we are actually pacing at 3317 * is smaller than 10% above our last measurement. 3318 * This means we are pacing below what we would 3319 * like to try to achieve (plus some wiggle room). 3320 */ 3321 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3322 __LINE__, 9); 3323 return (1); 3324 } else { 3325 /* 3326 * Here we are already pacing at least rack_max_per_above(10%) 3327 * what we are getting back. This indicates most likely 3328 * that we are being limited (cwnd/rwnd/app) and can't 3329 * get any more b/w. There is no sense of trying to 3330 * raise up the pacing rate its not speeding us up 3331 * and we already are pacing faster than we are getting. 3332 */ 3333 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3334 __LINE__, 8); 3335 return (0); 3336 } 3337 } 3338 3339 static void 3340 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3341 { 3342 /* 3343 * When we drag bottom, we want to assure 3344 * that no multiplier is below 1.0, if so 3345 * we want to restore it to at least that. 3346 */ 3347 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3348 /* This is unlikely we usually do not touch recovery */ 3349 rack->r_ctl.rack_per_of_gp_rec = 100; 3350 } 3351 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3352 rack->r_ctl.rack_per_of_gp_ca = 100; 3353 } 3354 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3355 rack->r_ctl.rack_per_of_gp_ss = 100; 3356 } 3357 } 3358 3359 static void 3360 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3361 { 3362 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3363 rack->r_ctl.rack_per_of_gp_ca = 100; 3364 } 3365 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3366 rack->r_ctl.rack_per_of_gp_ss = 100; 3367 } 3368 } 3369 3370 static void 3371 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3372 { 3373 int32_t calc, logged, plus; 3374 3375 logged = 0; 3376 3377 if (override) { 3378 /* 3379 * override is passed when we are 3380 * loosing b/w and making one last 3381 * gasp at trying to not loose out 3382 * to a new-reno flow. 3383 */ 3384 goto extra_boost; 3385 } 3386 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3387 if (rack->rc_gp_incr && 3388 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3389 /* 3390 * Reset and get 5 strokes more before the boost. Note 3391 * that the count is 0 based so we have to add one. 3392 */ 3393 extra_boost: 3394 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3395 rack->rc_gp_timely_inc_cnt = 0; 3396 } else 3397 plus = (uint32_t)rack_gp_increase_per; 3398 /* Must be at least 1% increase for true timely increases */ 3399 if ((plus < 1) && 3400 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3401 plus = 1; 3402 if (rack->rc_gp_saw_rec && 3403 (rack->rc_gp_no_rec_chg == 0) && 3404 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3405 rack->r_ctl.rack_per_of_gp_rec)) { 3406 /* We have been in recovery ding it too */ 3407 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3408 if (calc > 0xffff) 3409 calc = 0xffff; 3410 logged |= 1; 3411 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3412 if (rack_per_upper_bound_ss && 3413 (rack->rc_dragged_bottom == 0) && 3414 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3415 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3416 } 3417 if (rack->rc_gp_saw_ca && 3418 (rack->rc_gp_saw_ss == 0) && 3419 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3420 rack->r_ctl.rack_per_of_gp_ca)) { 3421 /* In CA */ 3422 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3423 if (calc > 0xffff) 3424 calc = 0xffff; 3425 logged |= 2; 3426 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3427 if (rack_per_upper_bound_ca && 3428 (rack->rc_dragged_bottom == 0) && 3429 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3430 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3431 } 3432 if (rack->rc_gp_saw_ss && 3433 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3434 rack->r_ctl.rack_per_of_gp_ss)) { 3435 /* In SS */ 3436 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3437 if (calc > 0xffff) 3438 calc = 0xffff; 3439 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3440 if (rack_per_upper_bound_ss && 3441 (rack->rc_dragged_bottom == 0) && 3442 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3443 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3444 logged |= 4; 3445 } 3446 if (logged && 3447 (rack->rc_gp_incr == 0)){ 3448 /* Go into increment mode */ 3449 rack->rc_gp_incr = 1; 3450 rack->rc_gp_timely_inc_cnt = 0; 3451 } 3452 if (rack->rc_gp_incr && 3453 logged && 3454 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3455 rack->rc_gp_timely_inc_cnt++; 3456 } 3457 rack_log_timely(rack, logged, plus, 0, 0, 3458 __LINE__, 1); 3459 } 3460 3461 static uint32_t 3462 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3463 { 3464 /* 3465 * norm_grad = rtt_diff / minrtt; 3466 * new_per = curper * (1 - B * norm_grad) 3467 * 3468 * B = rack_gp_decrease_per (default 10%) 3469 * rtt_dif = input var current rtt-diff 3470 * curper = input var current percentage 3471 * minrtt = from rack filter 3472 * 3473 */ 3474 uint64_t perf; 3475 3476 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3477 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3478 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3479 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3480 (uint64_t)1000000)) / 3481 (uint64_t)1000000); 3482 if (perf > curper) { 3483 /* TSNH */ 3484 perf = curper - 1; 3485 } 3486 return ((uint32_t)perf); 3487 } 3488 3489 static uint32_t 3490 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3491 { 3492 /* 3493 * highrttthresh 3494 * result = curper * (1 - (B * ( 1 - ------ )) 3495 * gp_srtt 3496 * 3497 * B = rack_gp_decrease_per (default 10%) 3498 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3499 */ 3500 uint64_t perf; 3501 uint32_t highrttthresh; 3502 3503 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3504 3505 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3506 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3507 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3508 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3509 return (perf); 3510 } 3511 3512 static void 3513 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3514 { 3515 uint64_t logvar, logvar2, logvar3; 3516 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3517 3518 if (rack->rc_gp_incr) { 3519 /* Turn off increment counting */ 3520 rack->rc_gp_incr = 0; 3521 rack->rc_gp_timely_inc_cnt = 0; 3522 } 3523 ss_red = ca_red = rec_red = 0; 3524 logged = 0; 3525 /* Calculate the reduction value */ 3526 if (rtt_diff < 0) { 3527 rtt_diff *= -1; 3528 } 3529 /* Must be at least 1% reduction */ 3530 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3531 /* We have been in recovery ding it too */ 3532 if (timely_says == 2) { 3533 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3534 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3535 if (alt < new_per) 3536 val = alt; 3537 else 3538 val = new_per; 3539 } else 3540 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3541 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3542 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3543 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3544 } else { 3545 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3546 rec_red = 0; 3547 } 3548 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3549 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3550 logged |= 1; 3551 } 3552 if (rack->rc_gp_saw_ss) { 3553 /* Sent in SS */ 3554 if (timely_says == 2) { 3555 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3556 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3557 if (alt < new_per) 3558 val = alt; 3559 else 3560 val = new_per; 3561 } else 3562 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3563 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3564 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3565 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3566 } else { 3567 ss_red = new_per; 3568 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3569 logvar = new_per; 3570 logvar <<= 32; 3571 logvar |= alt; 3572 logvar2 = (uint32_t)rtt; 3573 logvar2 <<= 32; 3574 logvar2 |= (uint32_t)rtt_diff; 3575 logvar3 = rack_gp_rtt_maxmul; 3576 logvar3 <<= 32; 3577 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3578 rack_log_timely(rack, timely_says, 3579 logvar2, logvar3, 3580 logvar, __LINE__, 10); 3581 } 3582 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3583 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3584 logged |= 4; 3585 } else if (rack->rc_gp_saw_ca) { 3586 /* Sent in CA */ 3587 if (timely_says == 2) { 3588 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3589 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3590 if (alt < new_per) 3591 val = alt; 3592 else 3593 val = new_per; 3594 } else 3595 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3596 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3597 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3598 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3599 } else { 3600 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3601 ca_red = 0; 3602 logvar = new_per; 3603 logvar <<= 32; 3604 logvar |= alt; 3605 logvar2 = (uint32_t)rtt; 3606 logvar2 <<= 32; 3607 logvar2 |= (uint32_t)rtt_diff; 3608 logvar3 = rack_gp_rtt_maxmul; 3609 logvar3 <<= 32; 3610 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3611 rack_log_timely(rack, timely_says, 3612 logvar2, logvar3, 3613 logvar, __LINE__, 10); 3614 } 3615 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3616 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3617 logged |= 2; 3618 } 3619 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3620 rack->rc_gp_timely_dec_cnt++; 3621 if (rack_timely_dec_clear && 3622 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3623 rack->rc_gp_timely_dec_cnt = 0; 3624 } 3625 logvar = ss_red; 3626 logvar <<= 32; 3627 logvar |= ca_red; 3628 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3629 __LINE__, 2); 3630 } 3631 3632 static void 3633 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3634 uint32_t rtt, uint32_t line, uint8_t reas) 3635 { 3636 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3637 union tcp_log_stackspecific log; 3638 struct timeval tv; 3639 3640 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3641 log.u_bbr.flex1 = line; 3642 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3643 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3644 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3645 log.u_bbr.flex5 = rtt; 3646 log.u_bbr.flex6 = rack->rc_highly_buffered; 3647 log.u_bbr.flex6 <<= 1; 3648 log.u_bbr.flex6 |= rack->forced_ack; 3649 log.u_bbr.flex6 <<= 1; 3650 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3651 log.u_bbr.flex6 <<= 1; 3652 log.u_bbr.flex6 |= rack->in_probe_rtt; 3653 log.u_bbr.flex6 <<= 1; 3654 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3655 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3656 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3657 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3658 log.u_bbr.flex8 = reas; 3659 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3660 log.u_bbr.delRate = rack_get_bw(rack); 3661 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3662 log.u_bbr.cur_del_rate <<= 32; 3663 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3664 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3665 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3666 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3667 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3668 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3669 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3670 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3671 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3672 log.u_bbr.rttProp = us_cts; 3673 log.u_bbr.rttProp <<= 32; 3674 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3675 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3676 &rack->rc_inp->inp_socket->so_rcv, 3677 &rack->rc_inp->inp_socket->so_snd, 3678 BBR_LOG_RTT_SHRINKS, 0, 3679 0, &log, false, &rack->r_ctl.act_rcv_time); 3680 } 3681 } 3682 3683 static void 3684 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3685 { 3686 uint64_t bwdp; 3687 3688 bwdp = rack_get_bw(rack); 3689 bwdp *= (uint64_t)rtt; 3690 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3691 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3692 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3693 /* 3694 * A window protocol must be able to have 4 packets 3695 * outstanding as the floor in order to function 3696 * (especially considering delayed ack :D). 3697 */ 3698 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3699 } 3700 } 3701 3702 static void 3703 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3704 { 3705 /** 3706 * ProbeRTT is a bit different in rack_pacing than in 3707 * BBR. It is like BBR in that it uses the lowering of 3708 * the RTT as a signal that we saw something new and 3709 * counts from there for how long between. But it is 3710 * different in that its quite simple. It does not 3711 * play with the cwnd and wait until we get down 3712 * to N segments outstanding and hold that for 3713 * 200ms. Instead it just sets the pacing reduction 3714 * rate to a set percentage (70 by default) and hold 3715 * that for a number of recent GP Srtt's. 3716 */ 3717 uint32_t segsiz; 3718 3719 if (rack->rc_gp_dyn_mul == 0) 3720 return; 3721 3722 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3723 /* We are idle */ 3724 return; 3725 } 3726 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3727 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3728 /* 3729 * Stop the goodput now, the idea here is 3730 * that future measurements with in_probe_rtt 3731 * won't register if they are not greater so 3732 * we want to get what info (if any) is available 3733 * now. 3734 */ 3735 rack_do_goodput_measurement(rack->rc_tp, rack, 3736 rack->rc_tp->snd_una, __LINE__, 3737 RACK_QUALITY_PROBERTT); 3738 } 3739 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3740 rack->r_ctl.rc_time_probertt_entered = us_cts; 3741 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3742 rack->r_ctl.rc_pace_min_segs); 3743 rack->in_probe_rtt = 1; 3744 rack->measure_saw_probe_rtt = 1; 3745 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3746 rack->r_ctl.rc_time_probertt_starts = 0; 3747 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3748 if (rack_probertt_use_min_rtt_entry) 3749 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3750 else 3751 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3752 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3753 __LINE__, RACK_RTTS_ENTERPROBE); 3754 } 3755 3756 static void 3757 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3758 { 3759 struct rack_sendmap *rsm; 3760 uint32_t segsiz; 3761 3762 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3763 rack->r_ctl.rc_pace_min_segs); 3764 rack->in_probe_rtt = 0; 3765 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3766 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3767 /* 3768 * Stop the goodput now, the idea here is 3769 * that future measurements with in_probe_rtt 3770 * won't register if they are not greater so 3771 * we want to get what info (if any) is available 3772 * now. 3773 */ 3774 rack_do_goodput_measurement(rack->rc_tp, rack, 3775 rack->rc_tp->snd_una, __LINE__, 3776 RACK_QUALITY_PROBERTT); 3777 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3778 /* 3779 * We don't have enough data to make a measurement. 3780 * So lets just stop and start here after exiting 3781 * probe-rtt. We probably are not interested in 3782 * the results anyway. 3783 */ 3784 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3785 } 3786 /* 3787 * Measurements through the current snd_max are going 3788 * to be limited by the slower pacing rate. 3789 * 3790 * We need to mark these as app-limited so we 3791 * don't collapse the b/w. 3792 */ 3793 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3794 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3795 if (rack->r_ctl.rc_app_limited_cnt == 0) 3796 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3797 else { 3798 /* 3799 * Go out to the end app limited and mark 3800 * this new one as next and move the end_appl up 3801 * to this guy. 3802 */ 3803 if (rack->r_ctl.rc_end_appl) 3804 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3805 rack->r_ctl.rc_end_appl = rsm; 3806 } 3807 rsm->r_flags |= RACK_APP_LIMITED; 3808 rack->r_ctl.rc_app_limited_cnt++; 3809 } 3810 /* 3811 * Now, we need to examine our pacing rate multipliers. 3812 * If its under 100%, we need to kick it back up to 3813 * 100%. We also don't let it be over our "max" above 3814 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3815 * Note setting clamp_atexit_prtt to 0 has the effect 3816 * of setting CA/SS to 100% always at exit (which is 3817 * the default behavior). 3818 */ 3819 if (rack_probertt_clear_is) { 3820 rack->rc_gp_incr = 0; 3821 rack->rc_gp_bwred = 0; 3822 rack->rc_gp_timely_inc_cnt = 0; 3823 rack->rc_gp_timely_dec_cnt = 0; 3824 } 3825 /* Do we do any clamping at exit? */ 3826 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3827 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3828 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3829 } 3830 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3831 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3832 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3833 } 3834 /* 3835 * Lets set rtt_diff to 0, so that we will get a "boost" 3836 * after exiting. 3837 */ 3838 rack->r_ctl.rc_rtt_diff = 0; 3839 3840 /* Clear all flags so we start fresh */ 3841 rack->rc_tp->t_bytes_acked = 0; 3842 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3843 /* 3844 * If configured to, set the cwnd and ssthresh to 3845 * our targets. 3846 */ 3847 if (rack_probe_rtt_sets_cwnd) { 3848 uint64_t ebdp; 3849 uint32_t setto; 3850 3851 /* Set ssthresh so we get into CA once we hit our target */ 3852 if (rack_probertt_use_min_rtt_exit == 1) { 3853 /* Set to min rtt */ 3854 rack_set_prtt_target(rack, segsiz, 3855 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3856 } else if (rack_probertt_use_min_rtt_exit == 2) { 3857 /* Set to current gp rtt */ 3858 rack_set_prtt_target(rack, segsiz, 3859 rack->r_ctl.rc_gp_srtt); 3860 } else if (rack_probertt_use_min_rtt_exit == 3) { 3861 /* Set to entry gp rtt */ 3862 rack_set_prtt_target(rack, segsiz, 3863 rack->r_ctl.rc_entry_gp_rtt); 3864 } else { 3865 uint64_t sum; 3866 uint32_t setval; 3867 3868 sum = rack->r_ctl.rc_entry_gp_rtt; 3869 sum *= 10; 3870 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3871 if (sum >= 20) { 3872 /* 3873 * A highly buffered path needs 3874 * cwnd space for timely to work. 3875 * Lets set things up as if 3876 * we are heading back here again. 3877 */ 3878 setval = rack->r_ctl.rc_entry_gp_rtt; 3879 } else if (sum >= 15) { 3880 /* 3881 * Lets take the smaller of the 3882 * two since we are just somewhat 3883 * buffered. 3884 */ 3885 setval = rack->r_ctl.rc_gp_srtt; 3886 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3887 setval = rack->r_ctl.rc_entry_gp_rtt; 3888 } else { 3889 /* 3890 * Here we are not highly buffered 3891 * and should pick the min we can to 3892 * keep from causing loss. 3893 */ 3894 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3895 } 3896 rack_set_prtt_target(rack, segsiz, 3897 setval); 3898 } 3899 if (rack_probe_rtt_sets_cwnd > 1) { 3900 /* There is a percentage here to boost */ 3901 ebdp = rack->r_ctl.rc_target_probertt_flight; 3902 ebdp *= rack_probe_rtt_sets_cwnd; 3903 ebdp /= 100; 3904 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3905 } else 3906 setto = rack->r_ctl.rc_target_probertt_flight; 3907 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3908 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3909 /* Enforce a min */ 3910 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3911 } 3912 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3913 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3914 } 3915 rack_log_rtt_shrinks(rack, us_cts, 3916 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3917 __LINE__, RACK_RTTS_EXITPROBE); 3918 /* Clear times last so log has all the info */ 3919 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3920 rack->r_ctl.rc_time_probertt_entered = us_cts; 3921 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3922 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3923 } 3924 3925 static void 3926 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3927 { 3928 /* Check in on probe-rtt */ 3929 if (rack->rc_gp_filled == 0) { 3930 /* We do not do p-rtt unless we have gp measurements */ 3931 return; 3932 } 3933 if (rack->in_probe_rtt) { 3934 uint64_t no_overflow; 3935 uint32_t endtime, must_stay; 3936 3937 if (rack->r_ctl.rc_went_idle_time && 3938 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3939 /* 3940 * We went idle during prtt, just exit now. 3941 */ 3942 rack_exit_probertt(rack, us_cts); 3943 } else if (rack_probe_rtt_safety_val && 3944 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3945 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3946 /* 3947 * Probe RTT safety value triggered! 3948 */ 3949 rack_log_rtt_shrinks(rack, us_cts, 3950 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3951 __LINE__, RACK_RTTS_SAFETY); 3952 rack_exit_probertt(rack, us_cts); 3953 } 3954 /* Calculate the max we will wait */ 3955 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3956 if (rack->rc_highly_buffered) 3957 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3958 /* Calculate the min we must wait */ 3959 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3960 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3961 TSTMP_LT(us_cts, endtime)) { 3962 uint32_t calc; 3963 /* Do we lower more? */ 3964 no_exit: 3965 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3966 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3967 else 3968 calc = 0; 3969 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3970 if (calc) { 3971 /* Maybe */ 3972 calc *= rack_per_of_gp_probertt_reduce; 3973 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3974 /* Limit it too */ 3975 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3976 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3977 } 3978 /* We must reach target or the time set */ 3979 return; 3980 } 3981 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3982 if ((TSTMP_LT(us_cts, must_stay) && 3983 rack->rc_highly_buffered) || 3984 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3985 rack->r_ctl.rc_target_probertt_flight)) { 3986 /* We are not past the must_stay time */ 3987 goto no_exit; 3988 } 3989 rack_log_rtt_shrinks(rack, us_cts, 3990 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3991 __LINE__, RACK_RTTS_REACHTARGET); 3992 rack->r_ctl.rc_time_probertt_starts = us_cts; 3993 if (rack->r_ctl.rc_time_probertt_starts == 0) 3994 rack->r_ctl.rc_time_probertt_starts = 1; 3995 /* Restore back to our rate we want to pace at in prtt */ 3996 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3997 } 3998 /* 3999 * Setup our end time, some number of gp_srtts plus 200ms. 4000 */ 4001 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 4002 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 4003 if (rack_probertt_gpsrtt_cnt_div) 4004 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 4005 else 4006 endtime = 0; 4007 endtime += rack_min_probertt_hold; 4008 endtime += rack->r_ctl.rc_time_probertt_starts; 4009 if (TSTMP_GEQ(us_cts, endtime)) { 4010 /* yes, exit probertt */ 4011 rack_exit_probertt(rack, us_cts); 4012 } 4013 4014 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 4015 /* Go into probertt, its been too long since we went lower */ 4016 rack_enter_probertt(rack, us_cts); 4017 } 4018 } 4019 4020 static void 4021 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4022 uint32_t rtt, int32_t rtt_diff) 4023 { 4024 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4025 uint32_t losses; 4026 4027 if ((rack->rc_gp_dyn_mul == 0) || 4028 (rack->use_fixed_rate) || 4029 (rack->in_probe_rtt) || 4030 (rack->rc_always_pace == 0)) { 4031 /* No dynamic GP multipler in play */ 4032 return; 4033 } 4034 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4035 cur_bw = rack_get_bw(rack); 4036 /* Calculate our up and down range */ 4037 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4038 up_bnd /= 100; 4039 up_bnd += rack->r_ctl.last_gp_comp_bw; 4040 4041 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4042 subfr /= 100; 4043 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4044 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4045 /* 4046 * This is the case where our RTT is above 4047 * the max target and we have been configured 4048 * to just do timely no bonus up stuff in that case. 4049 * 4050 * There are two configurations, set to 1, and we 4051 * just do timely if we are over our max. If its 4052 * set above 1 then we slam the multipliers down 4053 * to 100 and then decrement per timely. 4054 */ 4055 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4056 __LINE__, 3); 4057 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4058 rack_validate_multipliers_at_or_below_100(rack); 4059 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4060 } else if ((last_bw_est < low_bnd) && !losses) { 4061 /* 4062 * We are decreasing this is a bit complicated this 4063 * means we are loosing ground. This could be 4064 * because another flow entered and we are competing 4065 * for b/w with it. This will push the RTT up which 4066 * makes timely unusable unless we want to get shoved 4067 * into a corner and just be backed off (the age 4068 * old problem with delay based CC). 4069 * 4070 * On the other hand if it was a route change we 4071 * would like to stay somewhat contained and not 4072 * blow out the buffers. 4073 */ 4074 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4075 __LINE__, 3); 4076 rack->r_ctl.last_gp_comp_bw = cur_bw; 4077 if (rack->rc_gp_bwred == 0) { 4078 /* Go into reduction counting */ 4079 rack->rc_gp_bwred = 1; 4080 rack->rc_gp_timely_dec_cnt = 0; 4081 } 4082 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 4083 (timely_says == 0)) { 4084 /* 4085 * Push another time with a faster pacing 4086 * to try to gain back (we include override to 4087 * get a full raise factor). 4088 */ 4089 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4090 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4091 (timely_says == 0) || 4092 (rack_down_raise_thresh == 0)) { 4093 /* 4094 * Do an override up in b/w if we were 4095 * below the threshold or if the threshold 4096 * is zero we always do the raise. 4097 */ 4098 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4099 } else { 4100 /* Log it stays the same */ 4101 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4102 __LINE__, 11); 4103 } 4104 rack->rc_gp_timely_dec_cnt++; 4105 /* We are not incrementing really no-count */ 4106 rack->rc_gp_incr = 0; 4107 rack->rc_gp_timely_inc_cnt = 0; 4108 } else { 4109 /* 4110 * Lets just use the RTT 4111 * information and give up 4112 * pushing. 4113 */ 4114 goto use_timely; 4115 } 4116 } else if ((timely_says != 2) && 4117 !losses && 4118 (last_bw_est > up_bnd)) { 4119 /* 4120 * We are increasing b/w lets keep going, updating 4121 * our b/w and ignoring any timely input, unless 4122 * of course we are at our max raise (if there is one). 4123 */ 4124 4125 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4126 __LINE__, 3); 4127 rack->r_ctl.last_gp_comp_bw = cur_bw; 4128 if (rack->rc_gp_saw_ss && 4129 rack_per_upper_bound_ss && 4130 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 4131 /* 4132 * In cases where we can't go higher 4133 * we should just use timely. 4134 */ 4135 goto use_timely; 4136 } 4137 if (rack->rc_gp_saw_ca && 4138 rack_per_upper_bound_ca && 4139 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4140 /* 4141 * In cases where we can't go higher 4142 * we should just use timely. 4143 */ 4144 goto use_timely; 4145 } 4146 rack->rc_gp_bwred = 0; 4147 rack->rc_gp_timely_dec_cnt = 0; 4148 /* You get a set number of pushes if timely is trying to reduce */ 4149 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4150 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4151 } else { 4152 /* Log it stays the same */ 4153 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4154 __LINE__, 12); 4155 } 4156 return; 4157 } else { 4158 /* 4159 * We are staying between the lower and upper range bounds 4160 * so use timely to decide. 4161 */ 4162 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4163 __LINE__, 3); 4164 use_timely: 4165 if (timely_says) { 4166 rack->rc_gp_incr = 0; 4167 rack->rc_gp_timely_inc_cnt = 0; 4168 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4169 !losses && 4170 (last_bw_est < low_bnd)) { 4171 /* We are loosing ground */ 4172 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4173 rack->rc_gp_timely_dec_cnt++; 4174 /* We are not incrementing really no-count */ 4175 rack->rc_gp_incr = 0; 4176 rack->rc_gp_timely_inc_cnt = 0; 4177 } else 4178 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4179 } else { 4180 rack->rc_gp_bwred = 0; 4181 rack->rc_gp_timely_dec_cnt = 0; 4182 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4183 } 4184 } 4185 } 4186 4187 static int32_t 4188 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4189 { 4190 int32_t timely_says; 4191 uint64_t log_mult, log_rtt_a_diff; 4192 4193 log_rtt_a_diff = rtt; 4194 log_rtt_a_diff <<= 32; 4195 log_rtt_a_diff |= (uint32_t)rtt_diff; 4196 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4197 rack_gp_rtt_maxmul)) { 4198 /* Reduce the b/w multipler */ 4199 timely_says = 2; 4200 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4201 log_mult <<= 32; 4202 log_mult |= prev_rtt; 4203 rack_log_timely(rack, timely_says, log_mult, 4204 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4205 log_rtt_a_diff, __LINE__, 4); 4206 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4207 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4208 max(rack_gp_rtt_mindiv , 1)))) { 4209 /* Increase the b/w multipler */ 4210 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4211 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4212 max(rack_gp_rtt_mindiv , 1)); 4213 log_mult <<= 32; 4214 log_mult |= prev_rtt; 4215 timely_says = 0; 4216 rack_log_timely(rack, timely_says, log_mult , 4217 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4218 log_rtt_a_diff, __LINE__, 5); 4219 } else { 4220 /* 4221 * Use a gradient to find it the timely gradient 4222 * is: 4223 * grad = rc_rtt_diff / min_rtt; 4224 * 4225 * anything below or equal to 0 will be 4226 * a increase indication. Anything above 4227 * zero is a decrease. Note we take care 4228 * of the actual gradient calculation 4229 * in the reduction (its not needed for 4230 * increase). 4231 */ 4232 log_mult = prev_rtt; 4233 if (rtt_diff <= 0) { 4234 /* 4235 * Rttdiff is less than zero, increase the 4236 * b/w multipler (its 0 or negative) 4237 */ 4238 timely_says = 0; 4239 rack_log_timely(rack, timely_says, log_mult, 4240 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4241 } else { 4242 /* Reduce the b/w multipler */ 4243 timely_says = 1; 4244 rack_log_timely(rack, timely_says, log_mult, 4245 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4246 } 4247 } 4248 return (timely_says); 4249 } 4250 4251 static void 4252 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4253 tcp_seq th_ack, int line, uint8_t quality) 4254 { 4255 uint64_t tim, bytes_ps, ltim, stim, utim; 4256 uint32_t segsiz, bytes, reqbytes, us_cts; 4257 int32_t gput, new_rtt_diff, timely_says; 4258 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4259 int did_add = 0; 4260 4261 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4262 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4263 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4264 tim = us_cts - tp->gput_ts; 4265 else 4266 tim = 0; 4267 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4268 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4269 else 4270 stim = 0; 4271 /* 4272 * Use the larger of the send time or ack time. This prevents us 4273 * from being influenced by ack artifacts to come up with too 4274 * high of measurement. Note that since we are spanning over many more 4275 * bytes in most of our measurements hopefully that is less likely to 4276 * occur. 4277 */ 4278 if (tim > stim) 4279 utim = max(tim, 1); 4280 else 4281 utim = max(stim, 1); 4282 /* Lets get a msec time ltim too for the old stuff */ 4283 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4284 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4285 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4286 if ((tim == 0) && (stim == 0)) { 4287 /* 4288 * Invalid measurement time, maybe 4289 * all on one ack/one send? 4290 */ 4291 bytes = 0; 4292 bytes_ps = 0; 4293 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4294 0, 0, 0, 10, __LINE__, NULL, quality); 4295 goto skip_measurement; 4296 } 4297 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4298 /* We never made a us_rtt measurement? */ 4299 bytes = 0; 4300 bytes_ps = 0; 4301 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4302 0, 0, 0, 10, __LINE__, NULL, quality); 4303 goto skip_measurement; 4304 } 4305 /* 4306 * Calculate the maximum possible b/w this connection 4307 * could have. We base our calculation on the lowest 4308 * rtt we have seen during the measurement and the 4309 * largest rwnd the client has given us in that time. This 4310 * forms a BDP that is the maximum that we could ever 4311 * get to the client. Anything larger is not valid. 4312 * 4313 * I originally had code here that rejected measurements 4314 * where the time was less than 1/2 the latest us_rtt. 4315 * But after thinking on that I realized its wrong since 4316 * say you had a 150Mbps or even 1Gbps link, and you 4317 * were a long way away.. example I am in Europe (100ms rtt) 4318 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4319 * bytes my time would be 1.2ms, and yet my rtt would say 4320 * the measurement was invalid the time was < 50ms. The 4321 * same thing is true for 150Mb (8ms of time). 4322 * 4323 * A better way I realized is to look at what the maximum 4324 * the connection could possibly do. This is gated on 4325 * the lowest RTT we have seen and the highest rwnd. 4326 * We should in theory never exceed that, if we are 4327 * then something on the path is storing up packets 4328 * and then feeding them all at once to our endpoint 4329 * messing up our measurement. 4330 */ 4331 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4332 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4333 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4334 if (SEQ_LT(th_ack, tp->gput_seq)) { 4335 /* No measurement can be made */ 4336 bytes = 0; 4337 bytes_ps = 0; 4338 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4339 0, 0, 0, 10, __LINE__, NULL, quality); 4340 goto skip_measurement; 4341 } else 4342 bytes = (th_ack - tp->gput_seq); 4343 bytes_ps = (uint64_t)bytes; 4344 /* 4345 * Don't measure a b/w for pacing unless we have gotten at least 4346 * an initial windows worth of data in this measurement interval. 4347 * 4348 * Small numbers of bytes get badly influenced by delayed ack and 4349 * other artifacts. Note we take the initial window or our 4350 * defined minimum GP (defaulting to 10 which hopefully is the 4351 * IW). 4352 */ 4353 if (rack->rc_gp_filled == 0) { 4354 /* 4355 * The initial estimate is special. We 4356 * have blasted out an IW worth of packets 4357 * without a real valid ack ts results. We 4358 * then setup the app_limited_needs_set flag, 4359 * this should get the first ack in (probably 2 4360 * MSS worth) to be recorded as the timestamp. 4361 * We thus allow a smaller number of bytes i.e. 4362 * IW - 2MSS. 4363 */ 4364 reqbytes -= (2 * segsiz); 4365 /* Also lets fill previous for our first measurement to be neutral */ 4366 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4367 } 4368 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4369 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4370 rack->r_ctl.rc_app_limited_cnt, 4371 0, 0, 10, __LINE__, NULL, quality); 4372 goto skip_measurement; 4373 } 4374 /* 4375 * We now need to calculate the Timely like status so 4376 * we can update (possibly) the b/w multipliers. 4377 */ 4378 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4379 if (rack->rc_gp_filled == 0) { 4380 /* No previous reading */ 4381 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4382 } else { 4383 if (rack->measure_saw_probe_rtt == 0) { 4384 /* 4385 * We don't want a probertt to be counted 4386 * since it will be negative incorrectly. We 4387 * expect to be reducing the RTT when we 4388 * pace at a slower rate. 4389 */ 4390 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4391 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4392 } 4393 } 4394 timely_says = rack_make_timely_judgement(rack, 4395 rack->r_ctl.rc_gp_srtt, 4396 rack->r_ctl.rc_rtt_diff, 4397 rack->r_ctl.rc_prev_gp_srtt 4398 ); 4399 bytes_ps *= HPTS_USEC_IN_SEC; 4400 bytes_ps /= utim; 4401 if (bytes_ps > rack->r_ctl.last_max_bw) { 4402 /* 4403 * Something is on path playing 4404 * since this b/w is not possible based 4405 * on our BDP (highest rwnd and lowest rtt 4406 * we saw in the measurement window). 4407 * 4408 * Another option here would be to 4409 * instead skip the measurement. 4410 */ 4411 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4412 bytes_ps, rack->r_ctl.last_max_bw, 0, 4413 11, __LINE__, NULL, quality); 4414 bytes_ps = rack->r_ctl.last_max_bw; 4415 } 4416 /* We store gp for b/w in bytes per second */ 4417 if (rack->rc_gp_filled == 0) { 4418 /* Initial measurment */ 4419 if (bytes_ps) { 4420 rack->r_ctl.gp_bw = bytes_ps; 4421 rack->rc_gp_filled = 1; 4422 rack->r_ctl.num_measurements = 1; 4423 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4424 } else { 4425 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4426 rack->r_ctl.rc_app_limited_cnt, 4427 0, 0, 10, __LINE__, NULL, quality); 4428 } 4429 if (rack->rc_inp->inp_in_hpts && 4430 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4431 /* 4432 * Ok we can't trust the pacer in this case 4433 * where we transition from un-paced to paced. 4434 * Or for that matter when the burst mitigation 4435 * was making a wild guess and got it wrong. 4436 * Stop the pacer and clear up all the aggregate 4437 * delays etc. 4438 */ 4439 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 4440 rack->r_ctl.rc_hpts_flags = 0; 4441 rack->r_ctl.rc_last_output_to = 0; 4442 } 4443 did_add = 2; 4444 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4445 /* Still a small number run an average */ 4446 rack->r_ctl.gp_bw += bytes_ps; 4447 addpart = rack->r_ctl.num_measurements; 4448 rack->r_ctl.num_measurements++; 4449 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4450 /* We have collected enought to move forward */ 4451 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4452 } 4453 did_add = 3; 4454 } else { 4455 /* 4456 * We want to take 1/wma of the goodput and add in to 7/8th 4457 * of the old value weighted by the srtt. So if your measurement 4458 * period is say 2 SRTT's long you would get 1/4 as the 4459 * value, if it was like 1/2 SRTT then you would get 1/16th. 4460 * 4461 * But we must be careful not to take too much i.e. if the 4462 * srtt is say 20ms and the measurement is taken over 4463 * 400ms our weight would be 400/20 i.e. 20. On the 4464 * other hand if we get a measurement over 1ms with a 4465 * 10ms rtt we only want to take a much smaller portion. 4466 */ 4467 if (rack->r_ctl.num_measurements < 0xff) { 4468 rack->r_ctl.num_measurements++; 4469 } 4470 srtt = (uint64_t)tp->t_srtt; 4471 if (srtt == 0) { 4472 /* 4473 * Strange why did t_srtt go back to zero? 4474 */ 4475 if (rack->r_ctl.rc_rack_min_rtt) 4476 srtt = rack->r_ctl.rc_rack_min_rtt; 4477 else 4478 srtt = HPTS_USEC_IN_MSEC; 4479 } 4480 /* 4481 * XXXrrs: Note for reviewers, in playing with 4482 * dynamic pacing I discovered this GP calculation 4483 * as done originally leads to some undesired results. 4484 * Basically you can get longer measurements contributing 4485 * too much to the WMA. Thus I changed it if you are doing 4486 * dynamic adjustments to only do the aportioned adjustment 4487 * if we have a very small (time wise) measurement. Longer 4488 * measurements just get there weight (defaulting to 1/8) 4489 * add to the WMA. We may want to think about changing 4490 * this to always do that for both sides i.e. dynamic 4491 * and non-dynamic... but considering lots of folks 4492 * were playing with this I did not want to change the 4493 * calculation per.se. without your thoughts.. Lawerence? 4494 * Peter?? 4495 */ 4496 if (rack->rc_gp_dyn_mul == 0) { 4497 subpart = rack->r_ctl.gp_bw * utim; 4498 subpart /= (srtt * 8); 4499 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4500 /* 4501 * The b/w update takes no more 4502 * away then 1/2 our running total 4503 * so factor it in. 4504 */ 4505 addpart = bytes_ps * utim; 4506 addpart /= (srtt * 8); 4507 } else { 4508 /* 4509 * Don't allow a single measurement 4510 * to account for more than 1/2 of the 4511 * WMA. This could happen on a retransmission 4512 * where utim becomes huge compared to 4513 * srtt (multiple retransmissions when using 4514 * the sending rate which factors in all the 4515 * transmissions from the first one). 4516 */ 4517 subpart = rack->r_ctl.gp_bw / 2; 4518 addpart = bytes_ps / 2; 4519 } 4520 resid_bw = rack->r_ctl.gp_bw - subpart; 4521 rack->r_ctl.gp_bw = resid_bw + addpart; 4522 did_add = 1; 4523 } else { 4524 if ((utim / srtt) <= 1) { 4525 /* 4526 * The b/w update was over a small period 4527 * of time. The idea here is to prevent a small 4528 * measurement time period from counting 4529 * too much. So we scale it based on the 4530 * time so it attributes less than 1/rack_wma_divisor 4531 * of its measurement. 4532 */ 4533 subpart = rack->r_ctl.gp_bw * utim; 4534 subpart /= (srtt * rack_wma_divisor); 4535 addpart = bytes_ps * utim; 4536 addpart /= (srtt * rack_wma_divisor); 4537 } else { 4538 /* 4539 * The scaled measurement was long 4540 * enough so lets just add in the 4541 * portion of the measurment i.e. 1/rack_wma_divisor 4542 */ 4543 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4544 addpart = bytes_ps / rack_wma_divisor; 4545 } 4546 if ((rack->measure_saw_probe_rtt == 0) || 4547 (bytes_ps > rack->r_ctl.gp_bw)) { 4548 /* 4549 * For probe-rtt we only add it in 4550 * if its larger, all others we just 4551 * add in. 4552 */ 4553 did_add = 1; 4554 resid_bw = rack->r_ctl.gp_bw - subpart; 4555 rack->r_ctl.gp_bw = resid_bw + addpart; 4556 } 4557 } 4558 } 4559 if ((rack->gp_ready == 0) && 4560 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4561 /* We have enough measurements now */ 4562 rack->gp_ready = 1; 4563 rack_set_cc_pacing(rack); 4564 if (rack->defer_options) 4565 rack_apply_deferred_options(rack); 4566 } 4567 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4568 rack_get_bw(rack), 22, did_add, NULL, quality); 4569 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4570 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4571 rack_update_multiplier(rack, timely_says, bytes_ps, 4572 rack->r_ctl.rc_gp_srtt, 4573 rack->r_ctl.rc_rtt_diff); 4574 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4575 rack_get_bw(rack), 3, line, NULL, quality); 4576 /* reset the gp srtt and setup the new prev */ 4577 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4578 /* Record the lost count for the next measurement */ 4579 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4580 /* 4581 * We restart our diffs based on the gpsrtt in the 4582 * measurement window. 4583 */ 4584 rack->rc_gp_rtt_set = 0; 4585 rack->rc_gp_saw_rec = 0; 4586 rack->rc_gp_saw_ca = 0; 4587 rack->rc_gp_saw_ss = 0; 4588 rack->rc_dragged_bottom = 0; 4589 skip_measurement: 4590 4591 #ifdef STATS 4592 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4593 gput); 4594 /* 4595 * XXXLAS: This is a temporary hack, and should be 4596 * chained off VOI_TCP_GPUT when stats(9) grows an 4597 * API to deal with chained VOIs. 4598 */ 4599 if (tp->t_stats_gput_prev > 0) 4600 stats_voi_update_abs_s32(tp->t_stats, 4601 VOI_TCP_GPUT_ND, 4602 ((gput - tp->t_stats_gput_prev) * 100) / 4603 tp->t_stats_gput_prev); 4604 #endif 4605 tp->t_flags &= ~TF_GPUTINPROG; 4606 tp->t_stats_gput_prev = gput; 4607 /* 4608 * Now are we app limited now and there is space from where we 4609 * were to where we want to go? 4610 * 4611 * We don't do the other case i.e. non-applimited here since 4612 * the next send will trigger us picking up the missing data. 4613 */ 4614 if (rack->r_ctl.rc_first_appl && 4615 TCPS_HAVEESTABLISHED(tp->t_state) && 4616 rack->r_ctl.rc_app_limited_cnt && 4617 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4618 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4619 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4620 /* 4621 * Yep there is enough outstanding to make a measurement here. 4622 */ 4623 struct rack_sendmap *rsm, fe; 4624 4625 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4626 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4627 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4628 rack->app_limited_needs_set = 0; 4629 tp->gput_seq = th_ack; 4630 if (rack->in_probe_rtt) 4631 rack->measure_saw_probe_rtt = 1; 4632 else if ((rack->measure_saw_probe_rtt) && 4633 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4634 rack->measure_saw_probe_rtt = 0; 4635 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4636 /* There is a full window to gain info from */ 4637 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4638 } else { 4639 /* We can only measure up to the applimited point */ 4640 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4641 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4642 /* 4643 * We don't have enough to make a measurement. 4644 */ 4645 tp->t_flags &= ~TF_GPUTINPROG; 4646 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4647 0, 0, 0, 6, __LINE__, NULL, quality); 4648 return; 4649 } 4650 } 4651 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4652 /* 4653 * We will get no more data into the SB 4654 * this means we need to have the data available 4655 * before we start a measurement. 4656 */ 4657 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4658 /* Nope not enough data. */ 4659 return; 4660 } 4661 } 4662 tp->t_flags |= TF_GPUTINPROG; 4663 /* 4664 * Now we need to find the timestamp of the send at tp->gput_seq 4665 * for the send based measurement. 4666 */ 4667 fe.r_start = tp->gput_seq; 4668 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4669 if (rsm) { 4670 /* Ok send-based limit is set */ 4671 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4672 /* 4673 * Move back to include the earlier part 4674 * so our ack time lines up right (this may 4675 * make an overlapping measurement but thats 4676 * ok). 4677 */ 4678 tp->gput_seq = rsm->r_start; 4679 } 4680 if (rsm->r_flags & RACK_ACKED) 4681 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4682 else 4683 rack->app_limited_needs_set = 1; 4684 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4685 } else { 4686 /* 4687 * If we don't find the rsm due to some 4688 * send-limit set the current time, which 4689 * basically disables the send-limit. 4690 */ 4691 struct timeval tv; 4692 4693 microuptime(&tv); 4694 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4695 } 4696 rack_log_pacing_delay_calc(rack, 4697 tp->gput_seq, 4698 tp->gput_ack, 4699 (uint64_t)rsm, 4700 tp->gput_ts, 4701 rack->r_ctl.rc_app_limited_cnt, 4702 9, 4703 __LINE__, NULL, quality); 4704 } 4705 } 4706 4707 /* 4708 * CC wrapper hook functions 4709 */ 4710 static void 4711 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4712 uint16_t type, int32_t recovery) 4713 { 4714 uint32_t prior_cwnd, acked; 4715 struct tcp_log_buffer *lgb = NULL; 4716 uint8_t labc_to_use, quality; 4717 4718 INP_WLOCK_ASSERT(tp->t_inpcb); 4719 tp->ccv->nsegs = nsegs; 4720 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4721 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4722 uint32_t max; 4723 4724 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4725 if (tp->ccv->bytes_this_ack > max) { 4726 tp->ccv->bytes_this_ack = max; 4727 } 4728 } 4729 #ifdef STATS 4730 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4731 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4732 #endif 4733 quality = RACK_QUALITY_NONE; 4734 if ((tp->t_flags & TF_GPUTINPROG) && 4735 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4736 /* Measure the Goodput */ 4737 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4738 #ifdef NETFLIX_PEAKRATE 4739 if ((type == CC_ACK) && 4740 (tp->t_maxpeakrate)) { 4741 /* 4742 * We update t_peakrate_thr. This gives us roughly 4743 * one update per round trip time. Note 4744 * it will only be used if pace_always is off i.e 4745 * we don't do this for paced flows. 4746 */ 4747 rack_update_peakrate_thr(tp); 4748 } 4749 #endif 4750 } 4751 /* Which way our we limited, if not cwnd limited no advance in CA */ 4752 if (tp->snd_cwnd <= tp->snd_wnd) 4753 tp->ccv->flags |= CCF_CWND_LIMITED; 4754 else 4755 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4756 if (tp->snd_cwnd > tp->snd_ssthresh) { 4757 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4758 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4759 /* For the setting of a window past use the actual scwnd we are using */ 4760 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4761 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4762 tp->ccv->flags |= CCF_ABC_SENTAWND; 4763 } 4764 } else { 4765 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4766 tp->t_bytes_acked = 0; 4767 } 4768 prior_cwnd = tp->snd_cwnd; 4769 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4770 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4771 labc_to_use = rack->rc_labc; 4772 else 4773 labc_to_use = rack_max_abc_post_recovery; 4774 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4775 union tcp_log_stackspecific log; 4776 struct timeval tv; 4777 4778 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4779 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4780 log.u_bbr.flex1 = th_ack; 4781 log.u_bbr.flex2 = tp->ccv->flags; 4782 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4783 log.u_bbr.flex4 = tp->ccv->nsegs; 4784 log.u_bbr.flex5 = labc_to_use; 4785 log.u_bbr.flex6 = prior_cwnd; 4786 log.u_bbr.flex7 = V_tcp_do_newsack; 4787 log.u_bbr.flex8 = 1; 4788 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4789 0, &log, false, NULL, NULL, 0, &tv); 4790 } 4791 if (CC_ALGO(tp)->ack_received != NULL) { 4792 /* XXXLAS: Find a way to live without this */ 4793 tp->ccv->curack = th_ack; 4794 tp->ccv->labc = labc_to_use; 4795 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4796 CC_ALGO(tp)->ack_received(tp->ccv, type); 4797 } 4798 if (lgb) { 4799 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4800 } 4801 if (rack->r_must_retran) { 4802 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4803 /* 4804 * We now are beyond the rxt point so lets disable 4805 * the flag. 4806 */ 4807 rack->r_ctl.rc_out_at_rto = 0; 4808 rack->r_must_retran = 0; 4809 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4810 /* 4811 * Only decrement the rc_out_at_rto if the cwnd advances 4812 * at least a whole segment. Otherwise next time the peer 4813 * acks, we won't be able to send this generaly happens 4814 * when we are in Congestion Avoidance. 4815 */ 4816 if (acked <= rack->r_ctl.rc_out_at_rto){ 4817 rack->r_ctl.rc_out_at_rto -= acked; 4818 } else { 4819 rack->r_ctl.rc_out_at_rto = 0; 4820 } 4821 } 4822 } 4823 #ifdef STATS 4824 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4825 #endif 4826 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4827 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4828 } 4829 #ifdef NETFLIX_PEAKRATE 4830 /* we enforce max peak rate if it is set and we are not pacing */ 4831 if ((rack->rc_always_pace == 0) && 4832 tp->t_peakrate_thr && 4833 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4834 tp->snd_cwnd = tp->t_peakrate_thr; 4835 } 4836 #endif 4837 } 4838 4839 static void 4840 tcp_rack_partialack(struct tcpcb *tp) 4841 { 4842 struct tcp_rack *rack; 4843 4844 rack = (struct tcp_rack *)tp->t_fb_ptr; 4845 INP_WLOCK_ASSERT(tp->t_inpcb); 4846 /* 4847 * If we are doing PRR and have enough 4848 * room to send <or> we are pacing and prr 4849 * is disabled we will want to see if we 4850 * can send data (by setting r_wanted_output to 4851 * true). 4852 */ 4853 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4854 rack->rack_no_prr) 4855 rack->r_wanted_output = 1; 4856 } 4857 4858 static void 4859 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4860 { 4861 struct tcp_rack *rack; 4862 uint32_t orig_cwnd; 4863 4864 orig_cwnd = tp->snd_cwnd; 4865 INP_WLOCK_ASSERT(tp->t_inpcb); 4866 rack = (struct tcp_rack *)tp->t_fb_ptr; 4867 /* only alert CC if we alerted when we entered */ 4868 if (CC_ALGO(tp)->post_recovery != NULL) { 4869 tp->ccv->curack = th_ack; 4870 CC_ALGO(tp)->post_recovery(tp->ccv); 4871 if (tp->snd_cwnd < tp->snd_ssthresh) { 4872 /* 4873 * Rack has burst control and pacing 4874 * so lets not set this any lower than 4875 * snd_ssthresh per RFC-6582 (option 2). 4876 */ 4877 tp->snd_cwnd = tp->snd_ssthresh; 4878 } 4879 } 4880 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4881 union tcp_log_stackspecific log; 4882 struct timeval tv; 4883 4884 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4885 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4886 log.u_bbr.flex1 = th_ack; 4887 log.u_bbr.flex2 = tp->ccv->flags; 4888 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4889 log.u_bbr.flex4 = tp->ccv->nsegs; 4890 log.u_bbr.flex5 = V_tcp_abc_l_var; 4891 log.u_bbr.flex6 = orig_cwnd; 4892 log.u_bbr.flex7 = V_tcp_do_newsack; 4893 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4894 log.u_bbr.flex8 = 2; 4895 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4896 0, &log, false, NULL, NULL, 0, &tv); 4897 } 4898 if ((rack->rack_no_prr == 0) && 4899 (rack->no_prr_addback == 0) && 4900 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4901 /* 4902 * Suck the next prr cnt back into cwnd, but 4903 * only do that if we are not application limited. 4904 */ 4905 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4906 /* 4907 * We are allowed to add back to the cwnd the amount we did 4908 * not get out if: 4909 * a) no_prr_addback is off. 4910 * b) we are not app limited 4911 * c) we are doing prr 4912 * <and> 4913 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4914 */ 4915 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4916 rack->r_ctl.rc_prr_sndcnt); 4917 } 4918 rack->r_ctl.rc_prr_sndcnt = 0; 4919 rack_log_to_prr(rack, 1, 0); 4920 } 4921 rack_log_to_prr(rack, 14, orig_cwnd); 4922 tp->snd_recover = tp->snd_una; 4923 if (rack->r_ctl.dsack_persist) { 4924 rack->r_ctl.dsack_persist--; 4925 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4926 rack->r_ctl.num_dsack = 0; 4927 } 4928 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4929 } 4930 EXIT_RECOVERY(tp->t_flags); 4931 } 4932 4933 static void 4934 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack) 4935 { 4936 struct tcp_rack *rack; 4937 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4938 4939 INP_WLOCK_ASSERT(tp->t_inpcb); 4940 #ifdef STATS 4941 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4942 #endif 4943 if (IN_RECOVERY(tp->t_flags) == 0) { 4944 in_rec_at_entry = 0; 4945 ssthresh_enter = tp->snd_ssthresh; 4946 cwnd_enter = tp->snd_cwnd; 4947 } else 4948 in_rec_at_entry = 1; 4949 rack = (struct tcp_rack *)tp->t_fb_ptr; 4950 switch (type) { 4951 case CC_NDUPACK: 4952 tp->t_flags &= ~TF_WASFRECOVERY; 4953 tp->t_flags &= ~TF_WASCRECOVERY; 4954 if (!IN_FASTRECOVERY(tp->t_flags)) { 4955 rack->r_ctl.rc_prr_delivered = 0; 4956 rack->r_ctl.rc_prr_out = 0; 4957 if (rack->rack_no_prr == 0) { 4958 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4959 rack_log_to_prr(rack, 2, in_rec_at_entry); 4960 } 4961 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4962 tp->snd_recover = tp->snd_max; 4963 if (tp->t_flags2 & TF2_ECN_PERMIT) 4964 tp->t_flags2 |= TF2_ECN_SND_CWR; 4965 } 4966 break; 4967 case CC_ECN: 4968 if (!IN_CONGRECOVERY(tp->t_flags) || 4969 /* 4970 * Allow ECN reaction on ACK to CWR, if 4971 * that data segment was also CE marked. 4972 */ 4973 SEQ_GEQ(ack, tp->snd_recover)) { 4974 EXIT_CONGRECOVERY(tp->t_flags); 4975 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4976 tp->snd_recover = tp->snd_max + 1; 4977 if (tp->t_flags2 & TF2_ECN_PERMIT) 4978 tp->t_flags2 |= TF2_ECN_SND_CWR; 4979 } 4980 break; 4981 case CC_RTO: 4982 tp->t_dupacks = 0; 4983 tp->t_bytes_acked = 0; 4984 EXIT_RECOVERY(tp->t_flags); 4985 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4986 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4987 orig_cwnd = tp->snd_cwnd; 4988 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4989 rack_log_to_prr(rack, 16, orig_cwnd); 4990 if (tp->t_flags2 & TF2_ECN_PERMIT) 4991 tp->t_flags2 |= TF2_ECN_SND_CWR; 4992 break; 4993 case CC_RTO_ERR: 4994 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4995 /* RTO was unnecessary, so reset everything. */ 4996 tp->snd_cwnd = tp->snd_cwnd_prev; 4997 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4998 tp->snd_recover = tp->snd_recover_prev; 4999 if (tp->t_flags & TF_WASFRECOVERY) { 5000 ENTER_FASTRECOVERY(tp->t_flags); 5001 tp->t_flags &= ~TF_WASFRECOVERY; 5002 } 5003 if (tp->t_flags & TF_WASCRECOVERY) { 5004 ENTER_CONGRECOVERY(tp->t_flags); 5005 tp->t_flags &= ~TF_WASCRECOVERY; 5006 } 5007 tp->snd_nxt = tp->snd_max; 5008 tp->t_badrxtwin = 0; 5009 break; 5010 } 5011 if ((CC_ALGO(tp)->cong_signal != NULL) && 5012 (type != CC_RTO)){ 5013 tp->ccv->curack = ack; 5014 CC_ALGO(tp)->cong_signal(tp->ccv, type); 5015 } 5016 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5017 rack_log_to_prr(rack, 15, cwnd_enter); 5018 rack->r_ctl.dsack_byte_cnt = 0; 5019 rack->r_ctl.retran_during_recovery = 0; 5020 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5021 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5022 rack->r_ent_rec_ns = 1; 5023 } 5024 } 5025 5026 static inline void 5027 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5028 { 5029 uint32_t i_cwnd; 5030 5031 INP_WLOCK_ASSERT(tp->t_inpcb); 5032 5033 #ifdef NETFLIX_STATS 5034 KMOD_TCPSTAT_INC(tcps_idle_restarts); 5035 if (tp->t_state == TCPS_ESTABLISHED) 5036 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 5037 #endif 5038 if (CC_ALGO(tp)->after_idle != NULL) 5039 CC_ALGO(tp)->after_idle(tp->ccv); 5040 5041 if (tp->snd_cwnd == 1) 5042 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5043 else 5044 i_cwnd = rc_init_window(rack); 5045 5046 /* 5047 * Being idle is no differnt than the initial window. If the cc 5048 * clamps it down below the initial window raise it to the initial 5049 * window. 5050 */ 5051 if (tp->snd_cwnd < i_cwnd) { 5052 tp->snd_cwnd = i_cwnd; 5053 } 5054 } 5055 5056 /* 5057 * Indicate whether this ack should be delayed. We can delay the ack if 5058 * following conditions are met: 5059 * - There is no delayed ack timer in progress. 5060 * - Our last ack wasn't a 0-sized window. We never want to delay 5061 * the ack that opens up a 0-sized window. 5062 * - LRO wasn't used for this segment. We make sure by checking that the 5063 * segment size is not larger than the MSS. 5064 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5065 * connection. 5066 */ 5067 #define DELAY_ACK(tp, tlen) \ 5068 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5069 ((tp->t_flags & TF_DELACK) == 0) && \ 5070 (tlen <= tp->t_maxseg) && \ 5071 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5072 5073 static struct rack_sendmap * 5074 rack_find_lowest_rsm(struct tcp_rack *rack) 5075 { 5076 struct rack_sendmap *rsm; 5077 5078 /* 5079 * Walk the time-order transmitted list looking for an rsm that is 5080 * not acked. This will be the one that was sent the longest time 5081 * ago that is still outstanding. 5082 */ 5083 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5084 if (rsm->r_flags & RACK_ACKED) { 5085 continue; 5086 } 5087 goto finish; 5088 } 5089 finish: 5090 return (rsm); 5091 } 5092 5093 static struct rack_sendmap * 5094 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5095 { 5096 struct rack_sendmap *prsm; 5097 5098 /* 5099 * Walk the sequence order list backward until we hit and arrive at 5100 * the highest seq not acked. In theory when this is called it 5101 * should be the last segment (which it was not). 5102 */ 5103 counter_u64_add(rack_find_high, 1); 5104 prsm = rsm; 5105 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 5106 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5107 continue; 5108 } 5109 return (prsm); 5110 } 5111 return (NULL); 5112 } 5113 5114 static uint32_t 5115 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 5116 { 5117 int32_t lro; 5118 uint32_t thresh; 5119 5120 /* 5121 * lro is the flag we use to determine if we have seen reordering. 5122 * If it gets set we have seen reordering. The reorder logic either 5123 * works in one of two ways: 5124 * 5125 * If reorder-fade is configured, then we track the last time we saw 5126 * re-ordering occur. If we reach the point where enough time as 5127 * passed we no longer consider reordering has occuring. 5128 * 5129 * Or if reorder-face is 0, then once we see reordering we consider 5130 * the connection to alway be subject to reordering and just set lro 5131 * to 1. 5132 * 5133 * In the end if lro is non-zero we add the extra time for 5134 * reordering in. 5135 */ 5136 if (srtt == 0) 5137 srtt = 1; 5138 if (rack->r_ctl.rc_reorder_ts) { 5139 if (rack->r_ctl.rc_reorder_fade) { 5140 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5141 lro = cts - rack->r_ctl.rc_reorder_ts; 5142 if (lro == 0) { 5143 /* 5144 * No time as passed since the last 5145 * reorder, mark it as reordering. 5146 */ 5147 lro = 1; 5148 } 5149 } else { 5150 /* Negative time? */ 5151 lro = 0; 5152 } 5153 if (lro > rack->r_ctl.rc_reorder_fade) { 5154 /* Turn off reordering seen too */ 5155 rack->r_ctl.rc_reorder_ts = 0; 5156 lro = 0; 5157 } 5158 } else { 5159 /* Reodering does not fade */ 5160 lro = 1; 5161 } 5162 } else { 5163 lro = 0; 5164 } 5165 if (rack->rc_rack_tmr_std_based == 0) { 5166 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5167 } else { 5168 /* Standards based pkt-delay is 1/4 srtt */ 5169 thresh = srtt + (srtt >> 2); 5170 } 5171 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5172 /* It must be set, if not you get 1/4 rtt */ 5173 if (rack->r_ctl.rc_reorder_shift) 5174 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5175 else 5176 thresh += (srtt >> 2); 5177 } 5178 if (rack->rc_rack_use_dsack && 5179 lro && 5180 (rack->r_ctl.num_dsack > 0)) { 5181 /* 5182 * We only increase the reordering window if we 5183 * have seen reordering <and> we have a DSACK count. 5184 */ 5185 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5186 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5187 } 5188 /* SRTT * 2 is the ceiling */ 5189 if (thresh > (srtt * 2)) { 5190 thresh = srtt * 2; 5191 } 5192 /* And we don't want it above the RTO max either */ 5193 if (thresh > rack_rto_max) { 5194 thresh = rack_rto_max; 5195 } 5196 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5197 return (thresh); 5198 } 5199 5200 static uint32_t 5201 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5202 struct rack_sendmap *rsm, uint32_t srtt) 5203 { 5204 struct rack_sendmap *prsm; 5205 uint32_t thresh, len; 5206 int segsiz; 5207 5208 if (srtt == 0) 5209 srtt = 1; 5210 if (rack->r_ctl.rc_tlp_threshold) 5211 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5212 else 5213 thresh = (srtt * 2); 5214 5215 /* Get the previous sent packet, if any */ 5216 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5217 counter_u64_add(rack_enter_tlp_calc, 1); 5218 len = rsm->r_end - rsm->r_start; 5219 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5220 /* Exactly like the ID */ 5221 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5222 uint32_t alt_thresh; 5223 /* 5224 * Compensate for delayed-ack with the d-ack time. 5225 */ 5226 counter_u64_add(rack_used_tlpmethod, 1); 5227 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5228 if (alt_thresh > thresh) 5229 thresh = alt_thresh; 5230 } 5231 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5232 /* 2.1 behavior */ 5233 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5234 if (prsm && (len <= segsiz)) { 5235 /* 5236 * Two packets outstanding, thresh should be (2*srtt) + 5237 * possible inter-packet delay (if any). 5238 */ 5239 uint32_t inter_gap = 0; 5240 int idx, nidx; 5241 5242 counter_u64_add(rack_used_tlpmethod, 1); 5243 idx = rsm->r_rtr_cnt - 1; 5244 nidx = prsm->r_rtr_cnt - 1; 5245 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5246 /* Yes it was sent later (or at the same time) */ 5247 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5248 } 5249 thresh += inter_gap; 5250 } else if (len <= segsiz) { 5251 /* 5252 * Possibly compensate for delayed-ack. 5253 */ 5254 uint32_t alt_thresh; 5255 5256 counter_u64_add(rack_used_tlpmethod2, 1); 5257 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5258 if (alt_thresh > thresh) 5259 thresh = alt_thresh; 5260 } 5261 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5262 /* 2.2 behavior */ 5263 if (len <= segsiz) { 5264 uint32_t alt_thresh; 5265 /* 5266 * Compensate for delayed-ack with the d-ack time. 5267 */ 5268 counter_u64_add(rack_used_tlpmethod, 1); 5269 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5270 if (alt_thresh > thresh) 5271 thresh = alt_thresh; 5272 } 5273 } 5274 /* Not above an RTO */ 5275 if (thresh > tp->t_rxtcur) { 5276 thresh = tp->t_rxtcur; 5277 } 5278 /* Not above a RTO max */ 5279 if (thresh > rack_rto_max) { 5280 thresh = rack_rto_max; 5281 } 5282 /* Apply user supplied min TLP */ 5283 if (thresh < rack_tlp_min) { 5284 thresh = rack_tlp_min; 5285 } 5286 return (thresh); 5287 } 5288 5289 static uint32_t 5290 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5291 { 5292 /* 5293 * We want the rack_rtt which is the 5294 * last rtt we measured. However if that 5295 * does not exist we fallback to the srtt (which 5296 * we probably will never do) and then as a last 5297 * resort we use RACK_INITIAL_RTO if no srtt is 5298 * yet set. 5299 */ 5300 if (rack->rc_rack_rtt) 5301 return (rack->rc_rack_rtt); 5302 else if (tp->t_srtt == 0) 5303 return (RACK_INITIAL_RTO); 5304 return (tp->t_srtt); 5305 } 5306 5307 static struct rack_sendmap * 5308 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5309 { 5310 /* 5311 * Check to see that we don't need to fall into recovery. We will 5312 * need to do so if our oldest transmit is past the time we should 5313 * have had an ack. 5314 */ 5315 struct tcp_rack *rack; 5316 struct rack_sendmap *rsm; 5317 int32_t idx; 5318 uint32_t srtt, thresh; 5319 5320 rack = (struct tcp_rack *)tp->t_fb_ptr; 5321 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5322 return (NULL); 5323 } 5324 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5325 if (rsm == NULL) 5326 return (NULL); 5327 5328 if (rsm->r_flags & RACK_ACKED) { 5329 rsm = rack_find_lowest_rsm(rack); 5330 if (rsm == NULL) 5331 return (NULL); 5332 } 5333 idx = rsm->r_rtr_cnt - 1; 5334 srtt = rack_grab_rtt(tp, rack); 5335 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5336 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5337 return (NULL); 5338 } 5339 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5340 return (NULL); 5341 } 5342 /* Ok if we reach here we are over-due and this guy can be sent */ 5343 if (IN_RECOVERY(tp->t_flags) == 0) { 5344 /* 5345 * For the one that enters us into recovery record undo 5346 * info. 5347 */ 5348 rack->r_ctl.rc_rsm_start = rsm->r_start; 5349 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5350 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5351 } 5352 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 5353 return (rsm); 5354 } 5355 5356 static uint32_t 5357 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5358 { 5359 int32_t t; 5360 int32_t tt; 5361 uint32_t ret_val; 5362 5363 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5364 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5365 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5366 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 5367 tp->t_rxtshift++; 5368 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5369 ret_val = (uint32_t)tt; 5370 return (ret_val); 5371 } 5372 5373 static uint32_t 5374 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5375 { 5376 /* 5377 * Start the FR timer, we do this based on getting the first one in 5378 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5379 * events we need to stop the running timer (if its running) before 5380 * starting the new one. 5381 */ 5382 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5383 uint32_t srtt_cur; 5384 int32_t idx; 5385 int32_t is_tlp_timer = 0; 5386 struct rack_sendmap *rsm; 5387 5388 if (rack->t_timers_stopped) { 5389 /* All timers have been stopped none are to run */ 5390 return (0); 5391 } 5392 if (rack->rc_in_persist) { 5393 /* We can't start any timer in persists */ 5394 return (rack_get_persists_timer_val(tp, rack)); 5395 } 5396 rack->rc_on_min_to = 0; 5397 if ((tp->t_state < TCPS_ESTABLISHED) || 5398 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5399 goto activate_rxt; 5400 } 5401 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5402 if ((rsm == NULL) || sup_rack) { 5403 /* Nothing on the send map or no rack */ 5404 activate_rxt: 5405 time_since_sent = 0; 5406 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5407 if (rsm) { 5408 /* 5409 * Should we discount the RTX timer any? 5410 * 5411 * We want to discount it the smallest amount. 5412 * If a timer (Rack/TLP or RXT) has gone off more 5413 * recently thats the discount we want to use (now - timer time). 5414 * If the retransmit of the oldest packet was more recent then 5415 * we want to use that (now - oldest-packet-last_transmit_time). 5416 * 5417 */ 5418 idx = rsm->r_rtr_cnt - 1; 5419 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5420 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5421 else 5422 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5423 if (TSTMP_GT(cts, tstmp_touse)) 5424 time_since_sent = cts - tstmp_touse; 5425 } 5426 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5427 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5428 to = tp->t_rxtcur; 5429 if (to > time_since_sent) 5430 to -= time_since_sent; 5431 else 5432 to = rack->r_ctl.rc_min_to; 5433 if (to == 0) 5434 to = 1; 5435 /* Special case for KEEPINIT */ 5436 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5437 (TP_KEEPINIT(tp) != 0) && 5438 rsm) { 5439 /* 5440 * We have to put a ceiling on the rxt timer 5441 * of the keep-init timeout. 5442 */ 5443 uint32_t max_time, red; 5444 5445 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5446 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5447 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5448 if (red < max_time) 5449 max_time -= red; 5450 else 5451 max_time = 1; 5452 } 5453 /* Reduce timeout to the keep value if needed */ 5454 if (max_time < to) 5455 to = max_time; 5456 } 5457 return (to); 5458 } 5459 return (0); 5460 } 5461 if (rsm->r_flags & RACK_ACKED) { 5462 rsm = rack_find_lowest_rsm(rack); 5463 if (rsm == NULL) { 5464 /* No lowest? */ 5465 goto activate_rxt; 5466 } 5467 } 5468 if (rack->sack_attack_disable) { 5469 /* 5470 * We don't want to do 5471 * any TLP's if you are an attacker. 5472 * Though if you are doing what 5473 * is expected you may still have 5474 * SACK-PASSED marks. 5475 */ 5476 goto activate_rxt; 5477 } 5478 /* Convert from ms to usecs */ 5479 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5480 if ((tp->t_flags & TF_SENTFIN) && 5481 ((tp->snd_max - tp->snd_una) == 1) && 5482 (rsm->r_flags & RACK_HAS_FIN)) { 5483 /* 5484 * We don't start a rack timer if all we have is a 5485 * FIN outstanding. 5486 */ 5487 goto activate_rxt; 5488 } 5489 if ((rack->use_rack_rr == 0) && 5490 (IN_FASTRECOVERY(tp->t_flags)) && 5491 (rack->rack_no_prr == 0) && 5492 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5493 /* 5494 * We are not cheating, in recovery and 5495 * not enough ack's to yet get our next 5496 * retransmission out. 5497 * 5498 * Note that classified attackers do not 5499 * get to use the rack-cheat. 5500 */ 5501 goto activate_tlp; 5502 } 5503 srtt = rack_grab_rtt(tp, rack); 5504 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5505 idx = rsm->r_rtr_cnt - 1; 5506 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5507 if (SEQ_GEQ(exp, cts)) { 5508 to = exp - cts; 5509 if (to < rack->r_ctl.rc_min_to) { 5510 to = rack->r_ctl.rc_min_to; 5511 if (rack->r_rr_config == 3) 5512 rack->rc_on_min_to = 1; 5513 } 5514 } else { 5515 to = rack->r_ctl.rc_min_to; 5516 if (rack->r_rr_config == 3) 5517 rack->rc_on_min_to = 1; 5518 } 5519 } else { 5520 /* Ok we need to do a TLP not RACK */ 5521 activate_tlp: 5522 if ((rack->rc_tlp_in_progress != 0) && 5523 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5524 /* 5525 * The previous send was a TLP and we have sent 5526 * N TLP's without sending new data. 5527 */ 5528 goto activate_rxt; 5529 } 5530 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5531 if (rsm == NULL) { 5532 /* We found no rsm to TLP with. */ 5533 goto activate_rxt; 5534 } 5535 if (rsm->r_flags & RACK_HAS_FIN) { 5536 /* If its a FIN we dont do TLP */ 5537 rsm = NULL; 5538 goto activate_rxt; 5539 } 5540 idx = rsm->r_rtr_cnt - 1; 5541 time_since_sent = 0; 5542 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5543 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5544 else 5545 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5546 if (TSTMP_GT(cts, tstmp_touse)) 5547 time_since_sent = cts - tstmp_touse; 5548 is_tlp_timer = 1; 5549 if (tp->t_srtt) { 5550 if ((rack->rc_srtt_measure_made == 0) && 5551 (tp->t_srtt == 1)) { 5552 /* 5553 * If another stack as run and set srtt to 1, 5554 * then the srtt was 0, so lets use the initial. 5555 */ 5556 srtt = RACK_INITIAL_RTO; 5557 } else { 5558 srtt_cur = tp->t_srtt; 5559 srtt = srtt_cur; 5560 } 5561 } else 5562 srtt = RACK_INITIAL_RTO; 5563 /* 5564 * If the SRTT is not keeping up and the 5565 * rack RTT has spiked we want to use 5566 * the last RTT not the smoothed one. 5567 */ 5568 if (rack_tlp_use_greater && 5569 tp->t_srtt && 5570 (srtt < rack_grab_rtt(tp, rack))) { 5571 srtt = rack_grab_rtt(tp, rack); 5572 } 5573 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5574 if (thresh > time_since_sent) { 5575 to = thresh - time_since_sent; 5576 } else { 5577 to = rack->r_ctl.rc_min_to; 5578 rack_log_alt_to_to_cancel(rack, 5579 thresh, /* flex1 */ 5580 time_since_sent, /* flex2 */ 5581 tstmp_touse, /* flex3 */ 5582 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5583 (uint32_t)rsm->r_tim_lastsent[idx], 5584 srtt, 5585 idx, 99); 5586 } 5587 if (to < rack_tlp_min) { 5588 to = rack_tlp_min; 5589 } 5590 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5591 /* 5592 * If the TLP time works out to larger than the max 5593 * RTO lets not do TLP.. just RTO. 5594 */ 5595 goto activate_rxt; 5596 } 5597 } 5598 if (is_tlp_timer == 0) { 5599 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5600 } else { 5601 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5602 } 5603 if (to == 0) 5604 to = 1; 5605 return (to); 5606 } 5607 5608 static void 5609 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5610 { 5611 if (rack->rc_in_persist == 0) { 5612 if (tp->t_flags & TF_GPUTINPROG) { 5613 /* 5614 * Stop the goodput now, the calling of the 5615 * measurement function clears the flag. 5616 */ 5617 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5618 RACK_QUALITY_PERSIST); 5619 } 5620 #ifdef NETFLIX_SHARED_CWND 5621 if (rack->r_ctl.rc_scw) { 5622 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5623 rack->rack_scwnd_is_idle = 1; 5624 } 5625 #endif 5626 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5627 if (rack->r_ctl.rc_went_idle_time == 0) 5628 rack->r_ctl.rc_went_idle_time = 1; 5629 rack_timer_cancel(tp, rack, cts, __LINE__); 5630 tp->t_rxtshift = 0; 5631 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5632 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5633 rack->rc_in_persist = 1; 5634 } 5635 } 5636 5637 static void 5638 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5639 { 5640 if (rack->rc_inp->inp_in_hpts) { 5641 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 5642 rack->r_ctl.rc_hpts_flags = 0; 5643 } 5644 #ifdef NETFLIX_SHARED_CWND 5645 if (rack->r_ctl.rc_scw) { 5646 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5647 rack->rack_scwnd_is_idle = 0; 5648 } 5649 #endif 5650 if (rack->rc_gp_dyn_mul && 5651 (rack->use_fixed_rate == 0) && 5652 (rack->rc_always_pace)) { 5653 /* 5654 * Do we count this as if a probe-rtt just 5655 * finished? 5656 */ 5657 uint32_t time_idle, idle_min; 5658 5659 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5660 idle_min = rack_min_probertt_hold; 5661 if (rack_probertt_gpsrtt_cnt_div) { 5662 uint64_t extra; 5663 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5664 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5665 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5666 idle_min += (uint32_t)extra; 5667 } 5668 if (time_idle >= idle_min) { 5669 /* Yes, we count it as a probe-rtt. */ 5670 uint32_t us_cts; 5671 5672 us_cts = tcp_get_usecs(NULL); 5673 if (rack->in_probe_rtt == 0) { 5674 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5675 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5676 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5677 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5678 } else { 5679 rack_exit_probertt(rack, us_cts); 5680 } 5681 } 5682 } 5683 rack->rc_in_persist = 0; 5684 rack->r_ctl.rc_went_idle_time = 0; 5685 tp->t_rxtshift = 0; 5686 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5687 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5688 rack->r_ctl.rc_agg_delayed = 0; 5689 rack->r_early = 0; 5690 rack->r_late = 0; 5691 rack->r_ctl.rc_agg_early = 0; 5692 } 5693 5694 static void 5695 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5696 struct hpts_diag *diag, struct timeval *tv) 5697 { 5698 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5699 union tcp_log_stackspecific log; 5700 5701 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5702 log.u_bbr.flex1 = diag->p_nxt_slot; 5703 log.u_bbr.flex2 = diag->p_cur_slot; 5704 log.u_bbr.flex3 = diag->slot_req; 5705 log.u_bbr.flex4 = diag->inp_hptsslot; 5706 log.u_bbr.flex5 = diag->slot_remaining; 5707 log.u_bbr.flex6 = diag->need_new_to; 5708 log.u_bbr.flex7 = diag->p_hpts_active; 5709 log.u_bbr.flex8 = diag->p_on_min_sleep; 5710 /* Hijack other fields as needed */ 5711 log.u_bbr.epoch = diag->have_slept; 5712 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5713 log.u_bbr.pkts_out = diag->co_ret; 5714 log.u_bbr.applimited = diag->hpts_sleep_time; 5715 log.u_bbr.delivered = diag->p_prev_slot; 5716 log.u_bbr.inflight = diag->p_runningslot; 5717 log.u_bbr.bw_inuse = diag->wheel_slot; 5718 log.u_bbr.rttProp = diag->wheel_cts; 5719 log.u_bbr.timeStamp = cts; 5720 log.u_bbr.delRate = diag->maxslots; 5721 log.u_bbr.cur_del_rate = diag->p_curtick; 5722 log.u_bbr.cur_del_rate <<= 32; 5723 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5724 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5725 &rack->rc_inp->inp_socket->so_rcv, 5726 &rack->rc_inp->inp_socket->so_snd, 5727 BBR_LOG_HPTSDIAG, 0, 5728 0, &log, false, tv); 5729 } 5730 5731 } 5732 5733 static void 5734 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5735 { 5736 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5737 union tcp_log_stackspecific log; 5738 struct timeval tv; 5739 5740 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5741 log.u_bbr.flex1 = sb->sb_flags; 5742 log.u_bbr.flex2 = len; 5743 log.u_bbr.flex3 = sb->sb_state; 5744 log.u_bbr.flex8 = type; 5745 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5746 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5747 &rack->rc_inp->inp_socket->so_rcv, 5748 &rack->rc_inp->inp_socket->so_snd, 5749 TCP_LOG_SB_WAKE, 0, 5750 len, &log, false, &tv); 5751 } 5752 } 5753 5754 static void 5755 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5756 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5757 { 5758 struct hpts_diag diag; 5759 struct inpcb *inp; 5760 struct timeval tv; 5761 uint32_t delayed_ack = 0; 5762 uint32_t hpts_timeout; 5763 uint32_t entry_slot = slot; 5764 uint8_t stopped; 5765 uint32_t left = 0; 5766 uint32_t us_cts; 5767 5768 inp = tp->t_inpcb; 5769 if ((tp->t_state == TCPS_CLOSED) || 5770 (tp->t_state == TCPS_LISTEN)) { 5771 return; 5772 } 5773 if (inp->inp_in_hpts) { 5774 /* Already on the pacer */ 5775 return; 5776 } 5777 stopped = rack->rc_tmr_stopped; 5778 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5779 left = rack->r_ctl.rc_timer_exp - cts; 5780 } 5781 rack->r_ctl.rc_timer_exp = 0; 5782 rack->r_ctl.rc_hpts_flags = 0; 5783 us_cts = tcp_get_usecs(&tv); 5784 /* Now early/late accounting */ 5785 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5786 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5787 /* 5788 * We have a early carry over set, 5789 * we can always add more time so we 5790 * can always make this compensation. 5791 * 5792 * Note if ack's are allowed to wake us do not 5793 * penalize the next timer for being awoke 5794 * by an ack aka the rc_agg_early (non-paced mode). 5795 */ 5796 slot += rack->r_ctl.rc_agg_early; 5797 rack->r_early = 0; 5798 rack->r_ctl.rc_agg_early = 0; 5799 } 5800 if (rack->r_late) { 5801 /* 5802 * This is harder, we can 5803 * compensate some but it 5804 * really depends on what 5805 * the current pacing time is. 5806 */ 5807 if (rack->r_ctl.rc_agg_delayed >= slot) { 5808 /* 5809 * We can't compensate for it all. 5810 * And we have to have some time 5811 * on the clock. We always have a min 5812 * 10 slots (10 x 10 i.e. 100 usecs). 5813 */ 5814 if (slot <= HPTS_TICKS_PER_SLOT) { 5815 /* We gain delay */ 5816 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5817 slot = HPTS_TICKS_PER_SLOT; 5818 } else { 5819 /* We take off some */ 5820 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5821 slot = HPTS_TICKS_PER_SLOT; 5822 } 5823 } else { 5824 slot -= rack->r_ctl.rc_agg_delayed; 5825 rack->r_ctl.rc_agg_delayed = 0; 5826 /* Make sure we have 100 useconds at minimum */ 5827 if (slot < HPTS_TICKS_PER_SLOT) { 5828 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5829 slot = HPTS_TICKS_PER_SLOT; 5830 } 5831 if (rack->r_ctl.rc_agg_delayed == 0) 5832 rack->r_late = 0; 5833 } 5834 } 5835 if (slot) { 5836 /* We are pacing too */ 5837 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5838 } 5839 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5840 #ifdef NETFLIX_EXP_DETECTION 5841 if (rack->sack_attack_disable && 5842 (slot < tcp_sad_pacing_interval)) { 5843 /* 5844 * We have a potential attacker on 5845 * the line. We have possibly some 5846 * (or now) pacing time set. We want to 5847 * slow down the processing of sacks by some 5848 * amount (if it is an attacker). Set the default 5849 * slot for attackers in place (unless the orginal 5850 * interval is longer). Its stored in 5851 * micro-seconds, so lets convert to msecs. 5852 */ 5853 slot = tcp_sad_pacing_interval; 5854 } 5855 #endif 5856 if (tp->t_flags & TF_DELACK) { 5857 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5858 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5859 } 5860 if (delayed_ack && ((hpts_timeout == 0) || 5861 (delayed_ack < hpts_timeout))) 5862 hpts_timeout = delayed_ack; 5863 else 5864 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5865 /* 5866 * If no timers are going to run and we will fall off the hptsi 5867 * wheel, we resort to a keep-alive timer if its configured. 5868 */ 5869 if ((hpts_timeout == 0) && 5870 (slot == 0)) { 5871 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5872 (tp->t_state <= TCPS_CLOSING)) { 5873 /* 5874 * Ok we have no timer (persists, rack, tlp, rxt or 5875 * del-ack), we don't have segments being paced. So 5876 * all that is left is the keepalive timer. 5877 */ 5878 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5879 /* Get the established keep-alive time */ 5880 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5881 } else { 5882 /* 5883 * Get the initial setup keep-alive time, 5884 * note that this is probably not going to 5885 * happen, since rack will be running a rxt timer 5886 * if a SYN of some sort is outstanding. It is 5887 * actually handled in rack_timeout_rxt(). 5888 */ 5889 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5890 } 5891 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5892 if (rack->in_probe_rtt) { 5893 /* 5894 * We want to instead not wake up a long time from 5895 * now but to wake up about the time we would 5896 * exit probe-rtt and initiate a keep-alive ack. 5897 * This will get us out of probe-rtt and update 5898 * our min-rtt. 5899 */ 5900 hpts_timeout = rack_min_probertt_hold; 5901 } 5902 } 5903 } 5904 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5905 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5906 /* 5907 * RACK, TLP, persists and RXT timers all are restartable 5908 * based on actions input .. i.e we received a packet (ack 5909 * or sack) and that changes things (rw, or snd_una etc). 5910 * Thus we can restart them with a new value. For 5911 * keep-alive, delayed_ack we keep track of what was left 5912 * and restart the timer with a smaller value. 5913 */ 5914 if (left < hpts_timeout) 5915 hpts_timeout = left; 5916 } 5917 if (hpts_timeout) { 5918 /* 5919 * Hack alert for now we can't time-out over 2,147,483 5920 * seconds (a bit more than 596 hours), which is probably ok 5921 * :). 5922 */ 5923 if (hpts_timeout > 0x7ffffffe) 5924 hpts_timeout = 0x7ffffffe; 5925 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5926 } 5927 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5928 if ((rack->gp_ready == 0) && 5929 (rack->use_fixed_rate == 0) && 5930 (hpts_timeout < slot) && 5931 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5932 /* 5933 * We have no good estimate yet for the 5934 * old clunky burst mitigation or the 5935 * real pacing. And the tlp or rxt is smaller 5936 * than the pacing calculation. Lets not 5937 * pace that long since we know the calculation 5938 * so far is not accurate. 5939 */ 5940 slot = hpts_timeout; 5941 } 5942 rack->r_ctl.last_pacing_time = slot; 5943 /** 5944 * Turn off all the flags for queuing by default. The 5945 * flags have important meanings to what happens when 5946 * LRO interacts with the transport. Most likely (by default now) 5947 * mbuf_queueing and ack compression are on. So the transport 5948 * has a couple of flags that control what happens (if those 5949 * are not on then these flags won't have any effect since it 5950 * won't go through the queuing LRO path). 5951 * 5952 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5953 * pacing output, so don't disturb. But 5954 * it also means LRO can wake me if there 5955 * is a SACK arrival. 5956 * 5957 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5958 * with the above flag (QUEUE_READY) and 5959 * when present it says don't even wake me 5960 * if a SACK arrives. 5961 * 5962 * The idea behind these flags is that if we are pacing we 5963 * set the MBUF_QUEUE_READY and only get woken up if 5964 * a SACK arrives (which could change things) or if 5965 * our pacing timer expires. If, however, we have a rack 5966 * timer running, then we don't even want a sack to wake 5967 * us since the rack timer has to expire before we can send. 5968 * 5969 * Other cases should usually have none of the flags set 5970 * so LRO can call into us. 5971 */ 5972 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5973 if (slot) { 5974 rack->r_ctl.rc_last_output_to = us_cts + slot; 5975 /* 5976 * A pacing timer (slot) is being set, in 5977 * such a case we cannot send (we are blocked by 5978 * the timer). So lets tell LRO that it should not 5979 * wake us unless there is a SACK. Note this only 5980 * will be effective if mbuf queueing is on or 5981 * compressed acks are being processed. 5982 */ 5983 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5984 /* 5985 * But wait if we have a Rack timer running 5986 * even a SACK should not disturb us (with 5987 * the exception of r_rr_config 3). 5988 */ 5989 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5990 (rack->r_rr_config != 3)) 5991 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5992 if (rack->rc_ack_can_sendout_data) { 5993 /* 5994 * Ahh but wait, this is that special case 5995 * where the pacing timer can be disturbed 5996 * backout the changes (used for non-paced 5997 * burst limiting). 5998 */ 5999 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 6000 } 6001 if ((rack->use_rack_rr) && 6002 (rack->r_rr_config < 2) && 6003 ((hpts_timeout) && (hpts_timeout < slot))) { 6004 /* 6005 * Arrange for the hpts to kick back in after the 6006 * t-o if the t-o does not cause a send. 6007 */ 6008 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6009 __LINE__, &diag); 6010 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6011 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6012 } else { 6013 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 6014 __LINE__, &diag); 6015 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6016 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6017 } 6018 } else if (hpts_timeout) { 6019 /* 6020 * With respect to inp_flags2 here, lets let any new acks wake 6021 * us up here. Since we are not pacing (no pacing timer), output 6022 * can happen so we should let it. If its a Rack timer, then any inbound 6023 * packet probably won't change the sending (we will be blocked) 6024 * but it may change the prr stats so letting it in (the set defaults 6025 * at the start of this block) are good enough. 6026 */ 6027 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6028 __LINE__, &diag); 6029 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6030 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6031 } else { 6032 /* No timer starting */ 6033 #ifdef INVARIANTS 6034 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6035 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6036 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6037 } 6038 #endif 6039 } 6040 rack->rc_tmr_stopped = 0; 6041 if (slot) 6042 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 6043 } 6044 6045 /* 6046 * RACK Timer, here we simply do logging and house keeping. 6047 * the normal rack_output() function will call the 6048 * appropriate thing to check if we need to do a RACK retransmit. 6049 * We return 1, saying don't proceed with rack_output only 6050 * when all timers have been stopped (destroyed PCB?). 6051 */ 6052 static int 6053 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6054 { 6055 /* 6056 * This timer simply provides an internal trigger to send out data. 6057 * The check_recovery_mode call will see if there are needed 6058 * retransmissions, if so we will enter fast-recovery. The output 6059 * call may or may not do the same thing depending on sysctl 6060 * settings. 6061 */ 6062 struct rack_sendmap *rsm; 6063 6064 if (tp->t_timers->tt_flags & TT_STOPPED) { 6065 return (1); 6066 } 6067 counter_u64_add(rack_to_tot, 1); 6068 if (rack->r_state && (rack->r_state != tp->t_state)) 6069 rack_set_state(tp, rack); 6070 rack->rc_on_min_to = 0; 6071 rsm = rack_check_recovery_mode(tp, cts); 6072 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6073 if (rsm) { 6074 rack->r_ctl.rc_resend = rsm; 6075 rack->r_timer_override = 1; 6076 if (rack->use_rack_rr) { 6077 /* 6078 * Don't accumulate extra pacing delay 6079 * we are allowing the rack timer to 6080 * over-ride pacing i.e. rrr takes precedence 6081 * if the pacing interval is longer than the rrr 6082 * time (in other words we get the min pacing 6083 * time versus rrr pacing time). 6084 */ 6085 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6086 } 6087 } 6088 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6089 if (rsm == NULL) { 6090 /* restart a timer and return 1 */ 6091 rack_start_hpts_timer(rack, tp, cts, 6092 0, 0, 0); 6093 return (1); 6094 } 6095 return (0); 6096 } 6097 6098 static void 6099 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6100 { 6101 if (rsm->m->m_len > rsm->orig_m_len) { 6102 /* 6103 * Mbuf grew, caused by sbcompress, our offset does 6104 * not change. 6105 */ 6106 rsm->orig_m_len = rsm->m->m_len; 6107 } else if (rsm->m->m_len < rsm->orig_m_len) { 6108 /* 6109 * Mbuf shrank, trimmed off the top by an ack, our 6110 * offset changes. 6111 */ 6112 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6113 rsm->orig_m_len = rsm->m->m_len; 6114 } 6115 } 6116 6117 static void 6118 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6119 { 6120 struct mbuf *m; 6121 uint32_t soff; 6122 6123 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 6124 /* Fix up the orig_m_len and possibly the mbuf offset */ 6125 rack_adjust_orig_mlen(src_rsm); 6126 } 6127 m = src_rsm->m; 6128 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6129 while (soff >= m->m_len) { 6130 /* Move out past this mbuf */ 6131 soff -= m->m_len; 6132 m = m->m_next; 6133 KASSERT((m != NULL), 6134 ("rsm:%p nrsm:%p hit at soff:%u null m", 6135 src_rsm, rsm, soff)); 6136 } 6137 rsm->m = m; 6138 rsm->soff = soff; 6139 rsm->orig_m_len = m->m_len; 6140 } 6141 6142 static __inline void 6143 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 6144 struct rack_sendmap *rsm, uint32_t start) 6145 { 6146 int idx; 6147 6148 nrsm->r_start = start; 6149 nrsm->r_end = rsm->r_end; 6150 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6151 nrsm->r_flags = rsm->r_flags; 6152 nrsm->r_dupack = rsm->r_dupack; 6153 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6154 nrsm->r_rtr_bytes = 0; 6155 nrsm->r_fas = rsm->r_fas; 6156 rsm->r_end = nrsm->r_start; 6157 nrsm->r_just_ret = rsm->r_just_ret; 6158 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6159 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6160 } 6161 /* Now if we have SYN flag we keep it on the left edge */ 6162 if (nrsm->r_flags & RACK_HAS_SYN) 6163 nrsm->r_flags &= ~RACK_HAS_SYN; 6164 /* Now if we have a FIN flag we keep it on the right edge */ 6165 if (rsm->r_flags & RACK_HAS_FIN) 6166 rsm->r_flags &= ~RACK_HAS_FIN; 6167 /* Push bit must go to the right edge as well */ 6168 if (rsm->r_flags & RACK_HAD_PUSH) 6169 rsm->r_flags &= ~RACK_HAD_PUSH; 6170 /* Clone over the state of the hw_tls flag */ 6171 nrsm->r_hw_tls = rsm->r_hw_tls; 6172 /* 6173 * Now we need to find nrsm's new location in the mbuf chain 6174 * we basically calculate a new offset, which is soff + 6175 * how much is left in original rsm. Then we walk out the mbuf 6176 * chain to find the righ postion, it may be the same mbuf 6177 * or maybe not. 6178 */ 6179 KASSERT(((rsm->m != NULL) || 6180 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6181 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6182 if (rsm->m) 6183 rack_setup_offset_for_rsm(rsm, nrsm); 6184 } 6185 6186 static struct rack_sendmap * 6187 rack_merge_rsm(struct tcp_rack *rack, 6188 struct rack_sendmap *l_rsm, 6189 struct rack_sendmap *r_rsm) 6190 { 6191 /* 6192 * We are merging two ack'd RSM's, 6193 * the l_rsm is on the left (lower seq 6194 * values) and the r_rsm is on the right 6195 * (higher seq value). The simplest way 6196 * to merge these is to move the right 6197 * one into the left. I don't think there 6198 * is any reason we need to try to find 6199 * the oldest (or last oldest retransmitted). 6200 */ 6201 struct rack_sendmap *rm; 6202 6203 rack_log_map_chg(rack->rc_tp, rack, NULL, 6204 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6205 l_rsm->r_end = r_rsm->r_end; 6206 if (l_rsm->r_dupack < r_rsm->r_dupack) 6207 l_rsm->r_dupack = r_rsm->r_dupack; 6208 if (r_rsm->r_rtr_bytes) 6209 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6210 if (r_rsm->r_in_tmap) { 6211 /* This really should not happen */ 6212 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6213 r_rsm->r_in_tmap = 0; 6214 } 6215 6216 /* Now the flags */ 6217 if (r_rsm->r_flags & RACK_HAS_FIN) 6218 l_rsm->r_flags |= RACK_HAS_FIN; 6219 if (r_rsm->r_flags & RACK_TLP) 6220 l_rsm->r_flags |= RACK_TLP; 6221 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6222 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6223 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6224 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6225 /* 6226 * If both are app-limited then let the 6227 * free lower the count. If right is app 6228 * limited and left is not, transfer. 6229 */ 6230 l_rsm->r_flags |= RACK_APP_LIMITED; 6231 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6232 if (r_rsm == rack->r_ctl.rc_first_appl) 6233 rack->r_ctl.rc_first_appl = l_rsm; 6234 } 6235 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6236 #ifdef INVARIANTS 6237 if (rm != r_rsm) { 6238 panic("removing head in rack:%p rsm:%p rm:%p", 6239 rack, r_rsm, rm); 6240 } 6241 #endif 6242 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6243 /* Transfer the split limit to the map we free */ 6244 r_rsm->r_limit_type = l_rsm->r_limit_type; 6245 l_rsm->r_limit_type = 0; 6246 } 6247 rack_free(rack, r_rsm); 6248 return (l_rsm); 6249 } 6250 6251 /* 6252 * TLP Timer, here we simply setup what segment we want to 6253 * have the TLP expire on, the normal rack_output() will then 6254 * send it out. 6255 * 6256 * We return 1, saying don't proceed with rack_output only 6257 * when all timers have been stopped (destroyed PCB?). 6258 */ 6259 static int 6260 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6261 { 6262 /* 6263 * Tail Loss Probe. 6264 */ 6265 struct rack_sendmap *rsm = NULL; 6266 struct rack_sendmap *insret; 6267 struct socket *so; 6268 uint32_t amm; 6269 uint32_t out, avail; 6270 int collapsed_win = 0; 6271 6272 if (tp->t_timers->tt_flags & TT_STOPPED) { 6273 return (1); 6274 } 6275 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6276 /* Its not time yet */ 6277 return (0); 6278 } 6279 if (ctf_progress_timeout_check(tp, true)) { 6280 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6281 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6282 return (1); 6283 } 6284 /* 6285 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6286 * need to figure out how to force a full MSS segment out. 6287 */ 6288 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6289 rack->r_ctl.retran_during_recovery = 0; 6290 rack->r_ctl.dsack_byte_cnt = 0; 6291 counter_u64_add(rack_tlp_tot, 1); 6292 if (rack->r_state && (rack->r_state != tp->t_state)) 6293 rack_set_state(tp, rack); 6294 so = tp->t_inpcb->inp_socket; 6295 avail = sbavail(&so->so_snd); 6296 out = tp->snd_max - tp->snd_una; 6297 if (out > tp->snd_wnd) { 6298 /* special case, we need a retransmission */ 6299 collapsed_win = 1; 6300 goto need_retran; 6301 } 6302 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6303 rack->r_ctl.dsack_persist--; 6304 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6305 rack->r_ctl.num_dsack = 0; 6306 } 6307 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6308 } 6309 if ((tp->t_flags & TF_GPUTINPROG) && 6310 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6311 /* 6312 * If this is the second in a row 6313 * TLP and we are doing a measurement 6314 * its time to abandon the measurement. 6315 * Something is likely broken on 6316 * the clients network and measuring a 6317 * broken network does us no good. 6318 */ 6319 tp->t_flags &= ~TF_GPUTINPROG; 6320 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6321 rack->r_ctl.rc_gp_srtt /*flex1*/, 6322 tp->gput_seq, 6323 0, 0, 18, __LINE__, NULL, 0); 6324 } 6325 /* 6326 * Check our send oldest always settings, and if 6327 * there is an oldest to send jump to the need_retran. 6328 */ 6329 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6330 goto need_retran; 6331 6332 if (avail > out) { 6333 /* New data is available */ 6334 amm = avail - out; 6335 if (amm > ctf_fixed_maxseg(tp)) { 6336 amm = ctf_fixed_maxseg(tp); 6337 if ((amm + out) > tp->snd_wnd) { 6338 /* We are rwnd limited */ 6339 goto need_retran; 6340 } 6341 } else if (amm < ctf_fixed_maxseg(tp)) { 6342 /* not enough to fill a MTU */ 6343 goto need_retran; 6344 } 6345 if (IN_FASTRECOVERY(tp->t_flags)) { 6346 /* Unlikely */ 6347 if (rack->rack_no_prr == 0) { 6348 if (out + amm <= tp->snd_wnd) { 6349 rack->r_ctl.rc_prr_sndcnt = amm; 6350 rack->r_ctl.rc_tlp_new_data = amm; 6351 rack_log_to_prr(rack, 4, 0); 6352 } 6353 } else 6354 goto need_retran; 6355 } else { 6356 /* Set the send-new override */ 6357 if (out + amm <= tp->snd_wnd) 6358 rack->r_ctl.rc_tlp_new_data = amm; 6359 else 6360 goto need_retran; 6361 } 6362 rack->r_ctl.rc_tlpsend = NULL; 6363 counter_u64_add(rack_tlp_newdata, 1); 6364 goto send; 6365 } 6366 need_retran: 6367 /* 6368 * Ok we need to arrange the last un-acked segment to be re-sent, or 6369 * optionally the first un-acked segment. 6370 */ 6371 if (collapsed_win == 0) { 6372 if (rack_always_send_oldest) 6373 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6374 else { 6375 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6376 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6377 rsm = rack_find_high_nonack(rack, rsm); 6378 } 6379 } 6380 if (rsm == NULL) { 6381 counter_u64_add(rack_tlp_does_nada, 1); 6382 #ifdef TCP_BLACKBOX 6383 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6384 #endif 6385 goto out; 6386 } 6387 } else { 6388 /* 6389 * We must find the last segment 6390 * that was acceptable by the client. 6391 */ 6392 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6393 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6394 /* Found one */ 6395 break; 6396 } 6397 } 6398 if (rsm == NULL) { 6399 /* None? if so send the first */ 6400 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6401 if (rsm == NULL) { 6402 counter_u64_add(rack_tlp_does_nada, 1); 6403 #ifdef TCP_BLACKBOX 6404 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6405 #endif 6406 goto out; 6407 } 6408 } 6409 } 6410 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6411 /* 6412 * We need to split this the last segment in two. 6413 */ 6414 struct rack_sendmap *nrsm; 6415 6416 nrsm = rack_alloc_full_limit(rack); 6417 if (nrsm == NULL) { 6418 /* 6419 * No memory to split, we will just exit and punt 6420 * off to the RXT timer. 6421 */ 6422 counter_u64_add(rack_tlp_does_nada, 1); 6423 goto out; 6424 } 6425 rack_clone_rsm(rack, nrsm, rsm, 6426 (rsm->r_end - ctf_fixed_maxseg(tp))); 6427 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6428 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6429 #ifdef INVARIANTS 6430 if (insret != NULL) { 6431 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6432 nrsm, insret, rack, rsm); 6433 } 6434 #endif 6435 if (rsm->r_in_tmap) { 6436 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6437 nrsm->r_in_tmap = 1; 6438 } 6439 rsm = nrsm; 6440 } 6441 rack->r_ctl.rc_tlpsend = rsm; 6442 send: 6443 /* Make sure output path knows we are doing a TLP */ 6444 *doing_tlp = 1; 6445 rack->r_timer_override = 1; 6446 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6447 return (0); 6448 out: 6449 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6450 return (0); 6451 } 6452 6453 /* 6454 * Delayed ack Timer, here we simply need to setup the 6455 * ACK_NOW flag and remove the DELACK flag. From there 6456 * the output routine will send the ack out. 6457 * 6458 * We only return 1, saying don't proceed, if all timers 6459 * are stopped (destroyed PCB?). 6460 */ 6461 static int 6462 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6463 { 6464 if (tp->t_timers->tt_flags & TT_STOPPED) { 6465 return (1); 6466 } 6467 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6468 tp->t_flags &= ~TF_DELACK; 6469 tp->t_flags |= TF_ACKNOW; 6470 KMOD_TCPSTAT_INC(tcps_delack); 6471 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6472 return (0); 6473 } 6474 6475 /* 6476 * Persists timer, here we simply send the 6477 * same thing as a keepalive will. 6478 * the one byte send. 6479 * 6480 * We only return 1, saying don't proceed, if all timers 6481 * are stopped (destroyed PCB?). 6482 */ 6483 static int 6484 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6485 { 6486 struct tcptemp *t_template; 6487 struct inpcb *inp; 6488 int32_t retval = 1; 6489 6490 inp = tp->t_inpcb; 6491 6492 if (tp->t_timers->tt_flags & TT_STOPPED) { 6493 return (1); 6494 } 6495 if (rack->rc_in_persist == 0) 6496 return (0); 6497 if (ctf_progress_timeout_check(tp, false)) { 6498 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6499 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6500 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6501 return (1); 6502 } 6503 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6504 /* 6505 * Persistence timer into zero window. Force a byte to be output, if 6506 * possible. 6507 */ 6508 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6509 /* 6510 * Hack: if the peer is dead/unreachable, we do not time out if the 6511 * window is closed. After a full backoff, drop the connection if 6512 * the idle time (no responses to probes) reaches the maximum 6513 * backoff that we would use if retransmitting. 6514 */ 6515 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6516 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6517 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6518 KMOD_TCPSTAT_INC(tcps_persistdrop); 6519 retval = 1; 6520 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6521 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6522 goto out; 6523 } 6524 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6525 tp->snd_una == tp->snd_max) 6526 rack_exit_persist(tp, rack, cts); 6527 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6528 /* 6529 * If the user has closed the socket then drop a persisting 6530 * connection after a much reduced timeout. 6531 */ 6532 if (tp->t_state > TCPS_CLOSE_WAIT && 6533 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6534 retval = 1; 6535 KMOD_TCPSTAT_INC(tcps_persistdrop); 6536 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6537 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6538 goto out; 6539 } 6540 t_template = tcpip_maketemplate(rack->rc_inp); 6541 if (t_template) { 6542 /* only set it if we were answered */ 6543 if (rack->forced_ack == 0) { 6544 rack->forced_ack = 1; 6545 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6546 } 6547 tcp_respond(tp, t_template->tt_ipgen, 6548 &t_template->tt_t, (struct mbuf *)NULL, 6549 tp->rcv_nxt, tp->snd_una - 1, 0); 6550 /* This sends an ack */ 6551 if (tp->t_flags & TF_DELACK) 6552 tp->t_flags &= ~TF_DELACK; 6553 free(t_template, M_TEMP); 6554 } 6555 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6556 tp->t_rxtshift++; 6557 out: 6558 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6559 rack_start_hpts_timer(rack, tp, cts, 6560 0, 0, 0); 6561 return (retval); 6562 } 6563 6564 /* 6565 * If a keepalive goes off, we had no other timers 6566 * happening. We always return 1 here since this 6567 * routine either drops the connection or sends 6568 * out a segment with respond. 6569 */ 6570 static int 6571 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6572 { 6573 struct tcptemp *t_template; 6574 struct inpcb *inp; 6575 6576 if (tp->t_timers->tt_flags & TT_STOPPED) { 6577 return (1); 6578 } 6579 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6580 inp = tp->t_inpcb; 6581 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6582 /* 6583 * Keep-alive timer went off; send something or drop connection if 6584 * idle for too long. 6585 */ 6586 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6587 if (tp->t_state < TCPS_ESTABLISHED) 6588 goto dropit; 6589 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6590 tp->t_state <= TCPS_CLOSING) { 6591 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6592 goto dropit; 6593 /* 6594 * Send a packet designed to force a response if the peer is 6595 * up and reachable: either an ACK if the connection is 6596 * still alive, or an RST if the peer has closed the 6597 * connection due to timeout or reboot. Using sequence 6598 * number tp->snd_una-1 causes the transmitted zero-length 6599 * segment to lie outside the receive window; by the 6600 * protocol spec, this requires the correspondent TCP to 6601 * respond. 6602 */ 6603 KMOD_TCPSTAT_INC(tcps_keepprobe); 6604 t_template = tcpip_maketemplate(inp); 6605 if (t_template) { 6606 if (rack->forced_ack == 0) { 6607 rack->forced_ack = 1; 6608 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6609 } 6610 tcp_respond(tp, t_template->tt_ipgen, 6611 &t_template->tt_t, (struct mbuf *)NULL, 6612 tp->rcv_nxt, tp->snd_una - 1, 0); 6613 free(t_template, M_TEMP); 6614 } 6615 } 6616 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6617 return (1); 6618 dropit: 6619 KMOD_TCPSTAT_INC(tcps_keepdrops); 6620 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6621 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6622 return (1); 6623 } 6624 6625 /* 6626 * Retransmit helper function, clear up all the ack 6627 * flags and take care of important book keeping. 6628 */ 6629 static void 6630 rack_remxt_tmr(struct tcpcb *tp) 6631 { 6632 /* 6633 * The retransmit timer went off, all sack'd blocks must be 6634 * un-acked. 6635 */ 6636 struct rack_sendmap *rsm, *trsm = NULL; 6637 struct tcp_rack *rack; 6638 6639 rack = (struct tcp_rack *)tp->t_fb_ptr; 6640 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6641 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6642 if (rack->r_state && (rack->r_state != tp->t_state)) 6643 rack_set_state(tp, rack); 6644 /* 6645 * Ideally we would like to be able to 6646 * mark SACK-PASS on anything not acked here. 6647 * 6648 * However, if we do that we would burst out 6649 * all that data 1ms apart. This would be unwise, 6650 * so for now we will just let the normal rxt timer 6651 * and tlp timer take care of it. 6652 * 6653 * Also we really need to stick them back in sequence 6654 * order. This way we send in the proper order and any 6655 * sacks that come floating in will "re-ack" the data. 6656 * To do this we zap the tmap with an INIT and then 6657 * walk through and place every rsm in the RB tree 6658 * back in its seq ordered place. 6659 */ 6660 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6661 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6662 rsm->r_dupack = 0; 6663 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6664 /* We must re-add it back to the tlist */ 6665 if (trsm == NULL) { 6666 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6667 } else { 6668 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6669 } 6670 rsm->r_in_tmap = 1; 6671 trsm = rsm; 6672 if (rsm->r_flags & RACK_ACKED) 6673 rsm->r_flags |= RACK_WAS_ACKED; 6674 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6675 } 6676 /* Clear the count (we just un-acked them) */ 6677 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6678 rack->r_ctl.rc_sacked = 0; 6679 rack->r_ctl.rc_sacklast = NULL; 6680 rack->r_ctl.rc_agg_delayed = 0; 6681 rack->r_early = 0; 6682 rack->r_ctl.rc_agg_early = 0; 6683 rack->r_late = 0; 6684 /* Clear the tlp rtx mark */ 6685 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6686 if (rack->r_ctl.rc_resend != NULL) 6687 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6688 rack->r_ctl.rc_prr_sndcnt = 0; 6689 rack_log_to_prr(rack, 6, 0); 6690 rack->r_timer_override = 1; 6691 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6692 #ifdef NETFLIX_EXP_DETECTION 6693 || (rack->sack_attack_disable != 0) 6694 #endif 6695 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6696 /* 6697 * For non-sack customers new data 6698 * needs to go out as retransmits until 6699 * we retransmit up to snd_max. 6700 */ 6701 rack->r_must_retran = 1; 6702 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6703 rack->r_ctl.rc_sacked); 6704 } 6705 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6706 } 6707 6708 static void 6709 rack_convert_rtts(struct tcpcb *tp) 6710 { 6711 if (tp->t_srtt > 1) { 6712 uint32_t val, frac; 6713 6714 val = tp->t_srtt >> TCP_RTT_SHIFT; 6715 frac = tp->t_srtt & 0x1f; 6716 tp->t_srtt = TICKS_2_USEC(val); 6717 /* 6718 * frac is the fractional part of the srtt (if any) 6719 * but its in ticks and every bit represents 6720 * 1/32nd of a hz. 6721 */ 6722 if (frac) { 6723 if (hz == 1000) { 6724 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6725 } else { 6726 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6727 } 6728 tp->t_srtt += frac; 6729 } 6730 } 6731 if (tp->t_rttvar) { 6732 uint32_t val, frac; 6733 6734 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6735 frac = tp->t_rttvar & 0x1f; 6736 tp->t_rttvar = TICKS_2_USEC(val); 6737 /* 6738 * frac is the fractional part of the srtt (if any) 6739 * but its in ticks and every bit represents 6740 * 1/32nd of a hz. 6741 */ 6742 if (frac) { 6743 if (hz == 1000) { 6744 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6745 } else { 6746 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6747 } 6748 tp->t_rttvar += frac; 6749 } 6750 } 6751 tp->t_rxtcur = RACK_REXMTVAL(tp); 6752 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6753 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6754 } 6755 if (tp->t_rxtcur > rack_rto_max) { 6756 tp->t_rxtcur = rack_rto_max; 6757 } 6758 } 6759 6760 static void 6761 rack_cc_conn_init(struct tcpcb *tp) 6762 { 6763 struct tcp_rack *rack; 6764 uint32_t srtt; 6765 6766 rack = (struct tcp_rack *)tp->t_fb_ptr; 6767 srtt = tp->t_srtt; 6768 cc_conn_init(tp); 6769 /* 6770 * Now convert to rack's internal format, 6771 * if required. 6772 */ 6773 if ((srtt == 0) && (tp->t_srtt != 0)) 6774 rack_convert_rtts(tp); 6775 /* 6776 * We want a chance to stay in slowstart as 6777 * we create a connection. TCP spec says that 6778 * initially ssthresh is infinite. For our 6779 * purposes that is the snd_wnd. 6780 */ 6781 if (tp->snd_ssthresh < tp->snd_wnd) { 6782 tp->snd_ssthresh = tp->snd_wnd; 6783 } 6784 /* 6785 * We also want to assure a IW worth of 6786 * data can get inflight. 6787 */ 6788 if (rc_init_window(rack) < tp->snd_cwnd) 6789 tp->snd_cwnd = rc_init_window(rack); 6790 } 6791 6792 /* 6793 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6794 * we will setup to retransmit the lowest seq number outstanding. 6795 */ 6796 static int 6797 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6798 { 6799 int32_t rexmt; 6800 struct inpcb *inp; 6801 int32_t retval = 0; 6802 bool isipv6; 6803 6804 inp = tp->t_inpcb; 6805 if (tp->t_timers->tt_flags & TT_STOPPED) { 6806 return (1); 6807 } 6808 if ((tp->t_flags & TF_GPUTINPROG) && 6809 (tp->t_rxtshift)) { 6810 /* 6811 * We have had a second timeout 6812 * measurements on successive rxt's are not profitable. 6813 * It is unlikely to be of any use (the network is 6814 * broken or the client went away). 6815 */ 6816 tp->t_flags &= ~TF_GPUTINPROG; 6817 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6818 rack->r_ctl.rc_gp_srtt /*flex1*/, 6819 tp->gput_seq, 6820 0, 0, 18, __LINE__, NULL, 0); 6821 } 6822 if (ctf_progress_timeout_check(tp, false)) { 6823 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6824 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6825 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6826 return (1); 6827 } 6828 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6829 rack->r_ctl.retran_during_recovery = 0; 6830 rack->r_ctl.dsack_byte_cnt = 0; 6831 if (IN_FASTRECOVERY(tp->t_flags)) 6832 tp->t_flags |= TF_WASFRECOVERY; 6833 else 6834 tp->t_flags &= ~TF_WASFRECOVERY; 6835 if (IN_CONGRECOVERY(tp->t_flags)) 6836 tp->t_flags |= TF_WASCRECOVERY; 6837 else 6838 tp->t_flags &= ~TF_WASCRECOVERY; 6839 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6840 (tp->snd_una == tp->snd_max)) { 6841 /* Nothing outstanding .. nothing to do */ 6842 return (0); 6843 } 6844 if (rack->r_ctl.dsack_persist) { 6845 rack->r_ctl.dsack_persist--; 6846 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6847 rack->r_ctl.num_dsack = 0; 6848 } 6849 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6850 } 6851 /* 6852 * Rack can only run one timer at a time, so we cannot 6853 * run a KEEPINIT (gating SYN sending) and a retransmit 6854 * timer for the SYN. So if we are in a front state and 6855 * have a KEEPINIT timer we need to check the first transmit 6856 * against now to see if we have exceeded the KEEPINIT time 6857 * (if one is set). 6858 */ 6859 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6860 (TP_KEEPINIT(tp) != 0)) { 6861 struct rack_sendmap *rsm; 6862 6863 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6864 if (rsm) { 6865 /* Ok we have something outstanding to test keepinit with */ 6866 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6867 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6868 /* We have exceeded the KEEPINIT time */ 6869 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6870 goto drop_it; 6871 } 6872 } 6873 } 6874 /* 6875 * Retransmission timer went off. Message has not been acked within 6876 * retransmit interval. Back off to a longer retransmit interval 6877 * and retransmit one segment. 6878 */ 6879 rack_remxt_tmr(tp); 6880 if ((rack->r_ctl.rc_resend == NULL) || 6881 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6882 /* 6883 * If the rwnd collapsed on 6884 * the one we are retransmitting 6885 * it does not count against the 6886 * rxt count. 6887 */ 6888 tp->t_rxtshift++; 6889 } 6890 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6891 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6892 drop_it: 6893 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6894 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6895 retval = 1; 6896 tcp_set_inp_to_drop(rack->rc_inp, 6897 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 6898 goto out; 6899 } 6900 if (tp->t_state == TCPS_SYN_SENT) { 6901 /* 6902 * If the SYN was retransmitted, indicate CWND to be limited 6903 * to 1 segment in cc_conn_init(). 6904 */ 6905 tp->snd_cwnd = 1; 6906 } else if (tp->t_rxtshift == 1) { 6907 /* 6908 * first retransmit; record ssthresh and cwnd so they can be 6909 * recovered if this turns out to be a "bad" retransmit. A 6910 * retransmit is considered "bad" if an ACK for this segment 6911 * is received within RTT/2 interval; the assumption here is 6912 * that the ACK was already in flight. See "On Estimating 6913 * End-to-End Network Path Properties" by Allman and Paxson 6914 * for more details. 6915 */ 6916 tp->snd_cwnd_prev = tp->snd_cwnd; 6917 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6918 tp->snd_recover_prev = tp->snd_recover; 6919 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6920 tp->t_flags |= TF_PREVVALID; 6921 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6922 tp->t_flags &= ~TF_PREVVALID; 6923 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6924 if ((tp->t_state == TCPS_SYN_SENT) || 6925 (tp->t_state == TCPS_SYN_RECEIVED)) 6926 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6927 else 6928 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6929 6930 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6931 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6932 /* 6933 * We enter the path for PLMTUD if connection is established or, if 6934 * connection is FIN_WAIT_1 status, reason for the last is that if 6935 * amount of data we send is very small, we could send it in couple 6936 * of packets and process straight to FIN. In that case we won't 6937 * catch ESTABLISHED state. 6938 */ 6939 #ifdef INET6 6940 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6941 #else 6942 isipv6 = false; 6943 #endif 6944 if (((V_tcp_pmtud_blackhole_detect == 1) || 6945 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6946 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6947 ((tp->t_state == TCPS_ESTABLISHED) || 6948 (tp->t_state == TCPS_FIN_WAIT_1))) { 6949 /* 6950 * Idea here is that at each stage of mtu probe (usually, 6951 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6952 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6953 * should take care of that. 6954 */ 6955 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6956 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6957 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6958 tp->t_rxtshift % 2 == 0)) { 6959 /* 6960 * Enter Path MTU Black-hole Detection mechanism: - 6961 * Disable Path MTU Discovery (IP "DF" bit). - 6962 * Reduce MTU to lower value than what we negotiated 6963 * with peer. 6964 */ 6965 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6966 /* Record that we may have found a black hole. */ 6967 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6968 /* Keep track of previous MSS. */ 6969 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6970 } 6971 6972 /* 6973 * Reduce the MSS to blackhole value or to the 6974 * default in an attempt to retransmit. 6975 */ 6976 #ifdef INET6 6977 if (isipv6 && 6978 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6979 /* Use the sysctl tuneable blackhole MSS. */ 6980 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6981 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6982 } else if (isipv6) { 6983 /* Use the default MSS. */ 6984 tp->t_maxseg = V_tcp_v6mssdflt; 6985 /* 6986 * Disable Path MTU Discovery when we switch 6987 * to minmss. 6988 */ 6989 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6990 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6991 } 6992 #endif 6993 #if defined(INET6) && defined(INET) 6994 else 6995 #endif 6996 #ifdef INET 6997 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6998 /* Use the sysctl tuneable blackhole MSS. */ 6999 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 7000 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 7001 } else { 7002 /* Use the default MSS. */ 7003 tp->t_maxseg = V_tcp_mssdflt; 7004 /* 7005 * Disable Path MTU Discovery when we switch 7006 * to minmss. 7007 */ 7008 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 7009 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 7010 } 7011 #endif 7012 } else { 7013 /* 7014 * If further retransmissions are still unsuccessful 7015 * with a lowered MTU, maybe this isn't a blackhole 7016 * and we restore the previous MSS and blackhole 7017 * detection flags. The limit '6' is determined by 7018 * giving each probe stage (1448, 1188, 524) 2 7019 * chances to recover. 7020 */ 7021 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7022 (tp->t_rxtshift >= 6)) { 7023 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7024 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7025 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7026 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7027 } 7028 } 7029 } 7030 /* 7031 * Disable RFC1323 and SACK if we haven't got any response to 7032 * our third SYN to work-around some broken terminal servers 7033 * (most of which have hopefully been retired) that have bad VJ 7034 * header compression code which trashes TCP segments containing 7035 * unknown-to-them TCP options. 7036 */ 7037 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7038 (tp->t_rxtshift == 3)) 7039 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7040 /* 7041 * If we backed off this far, our srtt estimate is probably bogus. 7042 * Clobber it so we'll take the next rtt measurement as our srtt; 7043 * move the current srtt into rttvar to keep the current retransmit 7044 * times until then. 7045 */ 7046 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7047 #ifdef INET6 7048 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 7049 in6_losing(tp->t_inpcb); 7050 else 7051 #endif 7052 in_losing(tp->t_inpcb); 7053 tp->t_rttvar += tp->t_srtt; 7054 tp->t_srtt = 0; 7055 } 7056 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7057 tp->snd_recover = tp->snd_max; 7058 tp->t_flags |= TF_ACKNOW; 7059 tp->t_rtttime = 0; 7060 rack_cong_signal(tp, CC_RTO, tp->snd_una); 7061 out: 7062 return (retval); 7063 } 7064 7065 static int 7066 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7067 { 7068 int32_t ret = 0; 7069 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7070 7071 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7072 (tp->t_flags & TF_GPUTINPROG)) { 7073 /* 7074 * We have a goodput in progress 7075 * and we have entered a late state. 7076 * Do we have enough data in the sb 7077 * to handle the GPUT request? 7078 */ 7079 uint32_t bytes; 7080 7081 bytes = tp->gput_ack - tp->gput_seq; 7082 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7083 bytes += tp->gput_seq - tp->snd_una; 7084 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 7085 /* 7086 * There are not enough bytes in the socket 7087 * buffer that have been sent to cover this 7088 * measurement. Cancel it. 7089 */ 7090 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7091 rack->r_ctl.rc_gp_srtt /*flex1*/, 7092 tp->gput_seq, 7093 0, 0, 18, __LINE__, NULL, 0); 7094 tp->t_flags &= ~TF_GPUTINPROG; 7095 } 7096 } 7097 if (timers == 0) { 7098 return (0); 7099 } 7100 if (tp->t_state == TCPS_LISTEN) { 7101 /* no timers on listen sockets */ 7102 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7103 return (0); 7104 return (1); 7105 } 7106 if ((timers & PACE_TMR_RACK) && 7107 rack->rc_on_min_to) { 7108 /* 7109 * For the rack timer when we 7110 * are on a min-timeout (which means rrr_conf = 3) 7111 * we don't want to check the timer. It may 7112 * be going off for a pace and thats ok we 7113 * want to send the retransmit (if its ready). 7114 * 7115 * If its on a normal rack timer (non-min) then 7116 * we will check if its expired. 7117 */ 7118 goto skip_time_check; 7119 } 7120 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7121 uint32_t left; 7122 7123 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 7124 ret = -1; 7125 rack_log_to_processing(rack, cts, ret, 0); 7126 return (0); 7127 } 7128 if (hpts_calling == 0) { 7129 /* 7130 * A user send or queued mbuf (sack) has called us? We 7131 * return 0 and let the pacing guards 7132 * deal with it if they should or 7133 * should not cause a send. 7134 */ 7135 ret = -2; 7136 rack_log_to_processing(rack, cts, ret, 0); 7137 return (0); 7138 } 7139 /* 7140 * Ok our timer went off early and we are not paced false 7141 * alarm, go back to sleep. 7142 */ 7143 ret = -3; 7144 left = rack->r_ctl.rc_timer_exp - cts; 7145 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 7146 rack_log_to_processing(rack, cts, ret, left); 7147 return (1); 7148 } 7149 skip_time_check: 7150 rack->rc_tmr_stopped = 0; 7151 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 7152 if (timers & PACE_TMR_DELACK) { 7153 ret = rack_timeout_delack(tp, rack, cts); 7154 } else if (timers & PACE_TMR_RACK) { 7155 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7156 rack->r_fast_output = 0; 7157 ret = rack_timeout_rack(tp, rack, cts); 7158 } else if (timers & PACE_TMR_TLP) { 7159 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7160 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 7161 } else if (timers & PACE_TMR_RXT) { 7162 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7163 rack->r_fast_output = 0; 7164 ret = rack_timeout_rxt(tp, rack, cts); 7165 } else if (timers & PACE_TMR_PERSIT) { 7166 ret = rack_timeout_persist(tp, rack, cts); 7167 } else if (timers & PACE_TMR_KEEP) { 7168 ret = rack_timeout_keepalive(tp, rack, cts); 7169 } 7170 rack_log_to_processing(rack, cts, ret, timers); 7171 return (ret); 7172 } 7173 7174 static void 7175 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 7176 { 7177 struct timeval tv; 7178 uint32_t us_cts, flags_on_entry; 7179 uint8_t hpts_removed = 0; 7180 7181 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7182 us_cts = tcp_get_usecs(&tv); 7183 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7184 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7185 ((tp->snd_max - tp->snd_una) == 0))) { 7186 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7187 hpts_removed = 1; 7188 /* If we were not delayed cancel out the flag. */ 7189 if ((tp->snd_max - tp->snd_una) == 0) 7190 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7191 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7192 } 7193 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7194 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7195 if (rack->rc_inp->inp_in_hpts && 7196 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7197 /* 7198 * Canceling timer's when we have no output being 7199 * paced. We also must remove ourselves from the 7200 * hpts. 7201 */ 7202 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7203 hpts_removed = 1; 7204 } 7205 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7206 } 7207 if (hpts_removed == 0) 7208 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7209 } 7210 7211 static void 7212 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7213 { 7214 return; 7215 } 7216 7217 static int 7218 rack_stopall(struct tcpcb *tp) 7219 { 7220 struct tcp_rack *rack; 7221 rack = (struct tcp_rack *)tp->t_fb_ptr; 7222 rack->t_timers_stopped = 1; 7223 return (0); 7224 } 7225 7226 static void 7227 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7228 { 7229 return; 7230 } 7231 7232 static int 7233 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7234 { 7235 return (0); 7236 } 7237 7238 static void 7239 rack_stop_all_timers(struct tcpcb *tp) 7240 { 7241 struct tcp_rack *rack; 7242 7243 /* 7244 * Assure no timers are running. 7245 */ 7246 if (tcp_timer_active(tp, TT_PERSIST)) { 7247 /* We enter in persists, set the flag appropriately */ 7248 rack = (struct tcp_rack *)tp->t_fb_ptr; 7249 rack->rc_in_persist = 1; 7250 } 7251 tcp_timer_suspend(tp, TT_PERSIST); 7252 tcp_timer_suspend(tp, TT_REXMT); 7253 tcp_timer_suspend(tp, TT_KEEP); 7254 tcp_timer_suspend(tp, TT_DELACK); 7255 } 7256 7257 static void 7258 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7259 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7260 { 7261 int32_t idx; 7262 uint16_t stripped_flags; 7263 7264 rsm->r_rtr_cnt++; 7265 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7266 rsm->r_dupack = 0; 7267 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7268 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7269 rsm->r_flags |= RACK_OVERMAX; 7270 } 7271 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7272 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7273 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7274 } 7275 idx = rsm->r_rtr_cnt - 1; 7276 rsm->r_tim_lastsent[idx] = ts; 7277 /* 7278 * Here we don't add in the len of send, since its already 7279 * in snduna <->snd_max. 7280 */ 7281 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7282 rack->r_ctl.rc_sacked); 7283 stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP); 7284 if (rsm->r_flags & RACK_ACKED) { 7285 /* Problably MTU discovery messing with us */ 7286 rsm->r_flags &= ~RACK_ACKED; 7287 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7288 } 7289 if (rsm->r_in_tmap) { 7290 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7291 rsm->r_in_tmap = 0; 7292 } 7293 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7294 rsm->r_in_tmap = 1; 7295 if (rsm->r_flags & RACK_SACK_PASSED) { 7296 /* We have retransmitted due to the SACK pass */ 7297 rsm->r_flags &= ~RACK_SACK_PASSED; 7298 rsm->r_flags |= RACK_WAS_SACKPASS; 7299 } 7300 } 7301 7302 static uint32_t 7303 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7304 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7305 { 7306 /* 7307 * We (re-)transmitted starting at rsm->r_start for some length 7308 * (possibly less than r_end. 7309 */ 7310 struct rack_sendmap *nrsm, *insret; 7311 uint32_t c_end; 7312 int32_t len; 7313 7314 len = *lenp; 7315 c_end = rsm->r_start + len; 7316 if (SEQ_GEQ(c_end, rsm->r_end)) { 7317 /* 7318 * We retransmitted the whole piece or more than the whole 7319 * slopping into the next rsm. 7320 */ 7321 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7322 if (c_end == rsm->r_end) { 7323 *lenp = 0; 7324 return (0); 7325 } else { 7326 int32_t act_len; 7327 7328 /* Hangs over the end return whats left */ 7329 act_len = rsm->r_end - rsm->r_start; 7330 *lenp = (len - act_len); 7331 return (rsm->r_end); 7332 } 7333 /* We don't get out of this block. */ 7334 } 7335 /* 7336 * Here we retransmitted less than the whole thing which means we 7337 * have to split this into what was transmitted and what was not. 7338 */ 7339 nrsm = rack_alloc_full_limit(rack); 7340 if (nrsm == NULL) { 7341 /* 7342 * We can't get memory, so lets not proceed. 7343 */ 7344 *lenp = 0; 7345 return (0); 7346 } 7347 /* 7348 * So here we are going to take the original rsm and make it what we 7349 * retransmitted. nrsm will be the tail portion we did not 7350 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7351 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7352 * 1, 6 and the new piece will be 6, 11. 7353 */ 7354 rack_clone_rsm(rack, nrsm, rsm, c_end); 7355 nrsm->r_dupack = 0; 7356 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7357 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7358 #ifdef INVARIANTS 7359 if (insret != NULL) { 7360 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7361 nrsm, insret, rack, rsm); 7362 } 7363 #endif 7364 if (rsm->r_in_tmap) { 7365 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7366 nrsm->r_in_tmap = 1; 7367 } 7368 rsm->r_flags &= (~RACK_HAS_FIN); 7369 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7370 /* Log a split of rsm into rsm and nrsm */ 7371 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7372 *lenp = 0; 7373 return (0); 7374 } 7375 7376 static void 7377 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7378 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts, 7379 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7380 { 7381 struct tcp_rack *rack; 7382 struct rack_sendmap *rsm, *nrsm, *insret, fe; 7383 register uint32_t snd_max, snd_una; 7384 7385 /* 7386 * Add to the RACK log of packets in flight or retransmitted. If 7387 * there is a TS option we will use the TS echoed, if not we will 7388 * grab a TS. 7389 * 7390 * Retransmissions will increment the count and move the ts to its 7391 * proper place. Note that if options do not include TS's then we 7392 * won't be able to effectively use the ACK for an RTT on a retran. 7393 * 7394 * Notes about r_start and r_end. Lets consider a send starting at 7395 * sequence 1 for 10 bytes. In such an example the r_start would be 7396 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7397 * This means that r_end is actually the first sequence for the next 7398 * slot (11). 7399 * 7400 */ 7401 /* 7402 * If err is set what do we do XXXrrs? should we not add the thing? 7403 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7404 * i.e. proceed with add ** do this for now. 7405 */ 7406 INP_WLOCK_ASSERT(tp->t_inpcb); 7407 if (err) 7408 /* 7409 * We don't log errors -- we could but snd_max does not 7410 * advance in this case either. 7411 */ 7412 return; 7413 7414 if (th_flags & TH_RST) { 7415 /* 7416 * We don't log resets and we return immediately from 7417 * sending 7418 */ 7419 return; 7420 } 7421 rack = (struct tcp_rack *)tp->t_fb_ptr; 7422 snd_una = tp->snd_una; 7423 snd_max = tp->snd_max; 7424 if (th_flags & (TH_SYN | TH_FIN)) { 7425 /* 7426 * The call to rack_log_output is made before bumping 7427 * snd_max. This means we can record one extra byte on a SYN 7428 * or FIN if seq_out is adding more on and a FIN is present 7429 * (and we are not resending). 7430 */ 7431 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7432 len++; 7433 if (th_flags & TH_FIN) 7434 len++; 7435 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7436 /* 7437 * The add/update as not been done for the FIN/SYN 7438 * yet. 7439 */ 7440 snd_max = tp->snd_nxt; 7441 } 7442 } 7443 if (SEQ_LEQ((seq_out + len), snd_una)) { 7444 /* Are sending an old segment to induce an ack (keep-alive)? */ 7445 return; 7446 } 7447 if (SEQ_LT(seq_out, snd_una)) { 7448 /* huh? should we panic? */ 7449 uint32_t end; 7450 7451 end = seq_out + len; 7452 seq_out = snd_una; 7453 if (SEQ_GEQ(end, seq_out)) 7454 len = end - seq_out; 7455 else 7456 len = 0; 7457 } 7458 if (len == 0) { 7459 /* We don't log zero window probes */ 7460 return; 7461 } 7462 rack->r_ctl.rc_time_last_sent = cts; 7463 if (IN_FASTRECOVERY(tp->t_flags)) { 7464 rack->r_ctl.rc_prr_out += len; 7465 } 7466 /* First question is it a retransmission or new? */ 7467 if (seq_out == snd_max) { 7468 /* Its new */ 7469 again: 7470 rsm = rack_alloc(rack); 7471 if (rsm == NULL) { 7472 /* 7473 * Hmm out of memory and the tcb got destroyed while 7474 * we tried to wait. 7475 */ 7476 return; 7477 } 7478 if (th_flags & TH_FIN) { 7479 rsm->r_flags = RACK_HAS_FIN|add_flag; 7480 } else { 7481 rsm->r_flags = add_flag; 7482 } 7483 if (hw_tls) 7484 rsm->r_hw_tls = 1; 7485 rsm->r_tim_lastsent[0] = cts; 7486 rsm->r_rtr_cnt = 1; 7487 rsm->r_rtr_bytes = 0; 7488 if (th_flags & TH_SYN) { 7489 /* The data space is one beyond snd_una */ 7490 rsm->r_flags |= RACK_HAS_SYN; 7491 } 7492 rsm->r_start = seq_out; 7493 rsm->r_end = rsm->r_start + len; 7494 rsm->r_dupack = 0; 7495 /* 7496 * save off the mbuf location that 7497 * sndmbuf_noadv returned (which is 7498 * where we started copying from).. 7499 */ 7500 rsm->m = s_mb; 7501 rsm->soff = s_moff; 7502 /* 7503 * Here we do add in the len of send, since its not yet 7504 * reflected in in snduna <->snd_max 7505 */ 7506 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7507 rack->r_ctl.rc_sacked) + 7508 (rsm->r_end - rsm->r_start)); 7509 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7510 if (rsm->m) { 7511 if (rsm->m->m_len <= rsm->soff) { 7512 /* 7513 * XXXrrs Question, will this happen? 7514 * 7515 * If sbsndptr is set at the correct place 7516 * then s_moff should always be somewhere 7517 * within rsm->m. But if the sbsndptr was 7518 * off then that won't be true. If it occurs 7519 * we need to walkout to the correct location. 7520 */ 7521 struct mbuf *lm; 7522 7523 lm = rsm->m; 7524 while (lm->m_len <= rsm->soff) { 7525 rsm->soff -= lm->m_len; 7526 lm = lm->m_next; 7527 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7528 __func__, rack, s_moff, s_mb, rsm->soff)); 7529 } 7530 rsm->m = lm; 7531 counter_u64_add(rack_sbsndptr_wrong, 1); 7532 } else 7533 counter_u64_add(rack_sbsndptr_right, 1); 7534 rsm->orig_m_len = rsm->m->m_len; 7535 } else 7536 rsm->orig_m_len = 0; 7537 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7538 /* Log a new rsm */ 7539 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7540 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7541 #ifdef INVARIANTS 7542 if (insret != NULL) { 7543 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7544 nrsm, insret, rack, rsm); 7545 } 7546 #endif 7547 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7548 rsm->r_in_tmap = 1; 7549 /* 7550 * Special case detection, is there just a single 7551 * packet outstanding when we are not in recovery? 7552 * 7553 * If this is true mark it so. 7554 */ 7555 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7556 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7557 struct rack_sendmap *prsm; 7558 7559 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7560 if (prsm) 7561 prsm->r_one_out_nr = 1; 7562 } 7563 return; 7564 } 7565 /* 7566 * If we reach here its a retransmission and we need to find it. 7567 */ 7568 memset(&fe, 0, sizeof(fe)); 7569 more: 7570 if (hintrsm && (hintrsm->r_start == seq_out)) { 7571 rsm = hintrsm; 7572 hintrsm = NULL; 7573 } else { 7574 /* No hints sorry */ 7575 rsm = NULL; 7576 } 7577 if ((rsm) && (rsm->r_start == seq_out)) { 7578 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7579 if (len == 0) { 7580 return; 7581 } else { 7582 goto more; 7583 } 7584 } 7585 /* Ok it was not the last pointer go through it the hard way. */ 7586 refind: 7587 fe.r_start = seq_out; 7588 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7589 if (rsm) { 7590 if (rsm->r_start == seq_out) { 7591 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7592 if (len == 0) { 7593 return; 7594 } else { 7595 goto refind; 7596 } 7597 } 7598 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7599 /* Transmitted within this piece */ 7600 /* 7601 * Ok we must split off the front and then let the 7602 * update do the rest 7603 */ 7604 nrsm = rack_alloc_full_limit(rack); 7605 if (nrsm == NULL) { 7606 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7607 return; 7608 } 7609 /* 7610 * copy rsm to nrsm and then trim the front of rsm 7611 * to not include this part. 7612 */ 7613 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7614 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7615 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7616 #ifdef INVARIANTS 7617 if (insret != NULL) { 7618 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7619 nrsm, insret, rack, rsm); 7620 } 7621 #endif 7622 if (rsm->r_in_tmap) { 7623 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7624 nrsm->r_in_tmap = 1; 7625 } 7626 rsm->r_flags &= (~RACK_HAS_FIN); 7627 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7628 if (len == 0) { 7629 return; 7630 } else if (len > 0) 7631 goto refind; 7632 } 7633 } 7634 /* 7635 * Hmm not found in map did they retransmit both old and on into the 7636 * new? 7637 */ 7638 if (seq_out == tp->snd_max) { 7639 goto again; 7640 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7641 #ifdef INVARIANTS 7642 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7643 seq_out, len, tp->snd_una, tp->snd_max); 7644 printf("Starting Dump of all rack entries\n"); 7645 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7646 printf("rsm:%p start:%u end:%u\n", 7647 rsm, rsm->r_start, rsm->r_end); 7648 } 7649 printf("Dump complete\n"); 7650 panic("seq_out not found rack:%p tp:%p", 7651 rack, tp); 7652 #endif 7653 } else { 7654 #ifdef INVARIANTS 7655 /* 7656 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7657 * flag) 7658 */ 7659 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7660 seq_out, len, tp->snd_max, tp); 7661 #endif 7662 } 7663 } 7664 7665 /* 7666 * Record one of the RTT updates from an ack into 7667 * our sample structure. 7668 */ 7669 7670 static void 7671 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7672 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7673 { 7674 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7675 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7676 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7677 } 7678 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7679 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7680 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7681 } 7682 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7683 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7684 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7685 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7686 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7687 } 7688 if ((confidence == 1) && 7689 ((rsm == NULL) || 7690 (rsm->r_just_ret) || 7691 (rsm->r_one_out_nr && 7692 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7693 /* 7694 * If the rsm had a just return 7695 * hit it then we can't trust the 7696 * rtt measurement for buffer deterimination 7697 * Note that a confidence of 2, indicates 7698 * SACK'd which overrides the r_just_ret or 7699 * the r_one_out_nr. If it was a CUM-ACK and 7700 * we had only two outstanding, but get an 7701 * ack for only 1. Then that also lowers our 7702 * confidence. 7703 */ 7704 confidence = 0; 7705 } 7706 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7707 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7708 if (rack->r_ctl.rack_rs.confidence == 0) { 7709 /* 7710 * We take anything with no current confidence 7711 * saved. 7712 */ 7713 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7714 rack->r_ctl.rack_rs.confidence = confidence; 7715 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7716 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7717 /* 7718 * Once we have a confident number, 7719 * we can update it with a smaller 7720 * value since this confident number 7721 * may include the DSACK time until 7722 * the next segment (the second one) arrived. 7723 */ 7724 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7725 rack->r_ctl.rack_rs.confidence = confidence; 7726 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7727 } 7728 } 7729 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7730 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7731 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7732 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7733 } 7734 7735 /* 7736 * Collect new round-trip time estimate 7737 * and update averages and current timeout. 7738 */ 7739 static void 7740 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7741 { 7742 int32_t delta; 7743 uint32_t o_srtt, o_var; 7744 int32_t hrtt_up = 0; 7745 int32_t rtt; 7746 7747 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7748 /* No valid sample */ 7749 return; 7750 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7751 /* We are to use the lowest RTT seen in a single ack */ 7752 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7753 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7754 /* We are to use the highest RTT seen in a single ack */ 7755 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7756 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7757 /* We are to use the average RTT seen in a single ack */ 7758 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7759 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7760 } else { 7761 #ifdef INVARIANTS 7762 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7763 #endif 7764 return; 7765 } 7766 if (rtt == 0) 7767 rtt = 1; 7768 if (rack->rc_gp_rtt_set == 0) { 7769 /* 7770 * With no RTT we have to accept 7771 * even one we are not confident of. 7772 */ 7773 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7774 rack->rc_gp_rtt_set = 1; 7775 } else if (rack->r_ctl.rack_rs.confidence) { 7776 /* update the running gp srtt */ 7777 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7778 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7779 } 7780 if (rack->r_ctl.rack_rs.confidence) { 7781 /* 7782 * record the low and high for highly buffered path computation, 7783 * we only do this if we are confident (not a retransmission). 7784 */ 7785 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7786 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7787 hrtt_up = 1; 7788 } 7789 if (rack->rc_highly_buffered == 0) { 7790 /* 7791 * Currently once we declare a path has 7792 * highly buffered there is no going 7793 * back, which may be a problem... 7794 */ 7795 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7796 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7797 rack->r_ctl.rc_highest_us_rtt, 7798 rack->r_ctl.rc_lowest_us_rtt, 7799 RACK_RTTS_SEEHBP); 7800 rack->rc_highly_buffered = 1; 7801 } 7802 } 7803 } 7804 if ((rack->r_ctl.rack_rs.confidence) || 7805 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7806 /* 7807 * If we are highly confident of it <or> it was 7808 * never retransmitted we accept it as the last us_rtt. 7809 */ 7810 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7811 /* The lowest rtt can be set if its was not retransmited */ 7812 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7813 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7814 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7815 rack->r_ctl.rc_lowest_us_rtt = 1; 7816 } 7817 } 7818 o_srtt = tp->t_srtt; 7819 o_var = tp->t_rttvar; 7820 rack = (struct tcp_rack *)tp->t_fb_ptr; 7821 if (tp->t_srtt != 0) { 7822 /* 7823 * We keep a simple srtt in microseconds, like our rtt 7824 * measurement. We don't need to do any tricks with shifting 7825 * etc. Instead we just add in 1/8th of the new measurement 7826 * and subtract out 1/8 of the old srtt. We do the same with 7827 * the variance after finding the absolute value of the 7828 * difference between this sample and the current srtt. 7829 */ 7830 delta = tp->t_srtt - rtt; 7831 /* Take off 1/8th of the current sRTT */ 7832 tp->t_srtt -= (tp->t_srtt >> 3); 7833 /* Add in 1/8th of the new RTT just measured */ 7834 tp->t_srtt += (rtt >> 3); 7835 if (tp->t_srtt <= 0) 7836 tp->t_srtt = 1; 7837 /* Now lets make the absolute value of the variance */ 7838 if (delta < 0) 7839 delta = -delta; 7840 /* Subtract out 1/8th */ 7841 tp->t_rttvar -= (tp->t_rttvar >> 3); 7842 /* Add in 1/8th of the new variance we just saw */ 7843 tp->t_rttvar += (delta >> 3); 7844 if (tp->t_rttvar <= 0) 7845 tp->t_rttvar = 1; 7846 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7847 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7848 } else { 7849 /* 7850 * No rtt measurement yet - use the unsmoothed rtt. Set the 7851 * variance to half the rtt (so our first retransmit happens 7852 * at 3*rtt). 7853 */ 7854 tp->t_srtt = rtt; 7855 tp->t_rttvar = rtt >> 1; 7856 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7857 } 7858 rack->rc_srtt_measure_made = 1; 7859 KMOD_TCPSTAT_INC(tcps_rttupdated); 7860 tp->t_rttupdated++; 7861 #ifdef STATS 7862 if (rack_stats_gets_ms_rtt == 0) { 7863 /* Send in the microsecond rtt used for rxt timeout purposes */ 7864 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7865 } else if (rack_stats_gets_ms_rtt == 1) { 7866 /* Send in the millisecond rtt used for rxt timeout purposes */ 7867 int32_t ms_rtt; 7868 7869 /* Round up */ 7870 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7871 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7872 } else if (rack_stats_gets_ms_rtt == 2) { 7873 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7874 int32_t ms_rtt; 7875 7876 /* Round up */ 7877 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7878 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7879 } else { 7880 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7881 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7882 } 7883 7884 #endif 7885 /* 7886 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7887 * way we do the smoothing, srtt and rttvar will each average +1/2 7888 * tick of bias. When we compute the retransmit timer, we want 1/2 7889 * tick of rounding and 1 extra tick because of +-1/2 tick 7890 * uncertainty in the firing of the timer. The bias will give us 7891 * exactly the 1.5 tick we need. But, because the bias is 7892 * statistical, we have to test that we don't drop below the minimum 7893 * feasible timer (which is 2 ticks). 7894 */ 7895 tp->t_rxtshift = 0; 7896 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7897 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7898 rack_log_rtt_sample(rack, rtt); 7899 tp->t_softerror = 0; 7900 } 7901 7902 7903 static void 7904 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7905 { 7906 /* 7907 * Apply to filter the inbound us-rtt at us_cts. 7908 */ 7909 uint32_t old_rtt; 7910 7911 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7912 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7913 us_rtt, us_cts); 7914 if (rack->r_ctl.last_pacing_time && 7915 rack->rc_gp_dyn_mul && 7916 (rack->r_ctl.last_pacing_time > us_rtt)) 7917 rack->pacing_longer_than_rtt = 1; 7918 else 7919 rack->pacing_longer_than_rtt = 0; 7920 if (old_rtt > us_rtt) { 7921 /* We just hit a new lower rtt time */ 7922 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7923 __LINE__, RACK_RTTS_NEWRTT); 7924 /* 7925 * Only count it if its lower than what we saw within our 7926 * calculated range. 7927 */ 7928 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7929 if (rack_probertt_lower_within && 7930 rack->rc_gp_dyn_mul && 7931 (rack->use_fixed_rate == 0) && 7932 (rack->rc_always_pace)) { 7933 /* 7934 * We are seeing a new lower rtt very close 7935 * to the time that we would have entered probe-rtt. 7936 * This is probably due to the fact that a peer flow 7937 * has entered probe-rtt. Lets go in now too. 7938 */ 7939 uint32_t val; 7940 7941 val = rack_probertt_lower_within * rack_time_between_probertt; 7942 val /= 100; 7943 if ((rack->in_probe_rtt == 0) && 7944 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7945 rack_enter_probertt(rack, us_cts); 7946 } 7947 } 7948 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7949 } 7950 } 7951 } 7952 7953 static int 7954 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7955 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7956 { 7957 uint32_t us_rtt; 7958 int32_t i, all; 7959 uint32_t t, len_acked; 7960 7961 if ((rsm->r_flags & RACK_ACKED) || 7962 (rsm->r_flags & RACK_WAS_ACKED)) 7963 /* Already done */ 7964 return (0); 7965 if (rsm->r_no_rtt_allowed) { 7966 /* Not allowed */ 7967 return (0); 7968 } 7969 if (ack_type == CUM_ACKED) { 7970 if (SEQ_GT(th_ack, rsm->r_end)) { 7971 len_acked = rsm->r_end - rsm->r_start; 7972 all = 1; 7973 } else { 7974 len_acked = th_ack - rsm->r_start; 7975 all = 0; 7976 } 7977 } else { 7978 len_acked = rsm->r_end - rsm->r_start; 7979 all = 0; 7980 } 7981 if (rsm->r_rtr_cnt == 1) { 7982 7983 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7984 if ((int)t <= 0) 7985 t = 1; 7986 if (!tp->t_rttlow || tp->t_rttlow > t) 7987 tp->t_rttlow = t; 7988 if (!rack->r_ctl.rc_rack_min_rtt || 7989 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7990 rack->r_ctl.rc_rack_min_rtt = t; 7991 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7992 rack->r_ctl.rc_rack_min_rtt = 1; 7993 } 7994 } 7995 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7996 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7997 else 7998 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7999 if (us_rtt == 0) 8000 us_rtt = 1; 8001 if (CC_ALGO(tp)->rttsample != NULL) { 8002 /* Kick the RTT to the CC */ 8003 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 8004 } 8005 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 8006 if (ack_type == SACKED) { 8007 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 8008 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 8009 } else { 8010 /* 8011 * We need to setup what our confidence 8012 * is in this ack. 8013 * 8014 * If the rsm was app limited and it is 8015 * less than a mss in length (the end 8016 * of the send) then we have a gap. If we 8017 * were app limited but say we were sending 8018 * multiple MSS's then we are more confident 8019 * int it. 8020 * 8021 * When we are not app-limited then we see if 8022 * the rsm is being included in the current 8023 * measurement, we tell this by the app_limited_needs_set 8024 * flag. 8025 * 8026 * Note that being cwnd blocked is not applimited 8027 * as well as the pacing delay between packets which 8028 * are sending only 1 or 2 MSS's also will show up 8029 * in the RTT. We probably need to examine this algorithm 8030 * a bit more and enhance it to account for the delay 8031 * between rsm's. We could do that by saving off the 8032 * pacing delay of each rsm (in an rsm) and then 8033 * factoring that in somehow though for now I am 8034 * not sure how :) 8035 */ 8036 int calc_conf = 0; 8037 8038 if (rsm->r_flags & RACK_APP_LIMITED) { 8039 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8040 calc_conf = 0; 8041 else 8042 calc_conf = 1; 8043 } else if (rack->app_limited_needs_set == 0) { 8044 calc_conf = 1; 8045 } else { 8046 calc_conf = 0; 8047 } 8048 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8049 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8050 calc_conf, rsm, rsm->r_rtr_cnt); 8051 } 8052 if ((rsm->r_flags & RACK_TLP) && 8053 (!IN_FASTRECOVERY(tp->t_flags))) { 8054 /* Segment was a TLP and our retrans matched */ 8055 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8056 rack->r_ctl.rc_rsm_start = tp->snd_max; 8057 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 8058 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 8059 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 8060 } 8061 } 8062 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8063 /* New more recent rack_tmit_time */ 8064 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8065 rack->rc_rack_rtt = t; 8066 } 8067 return (1); 8068 } 8069 /* 8070 * We clear the soft/rxtshift since we got an ack. 8071 * There is no assurance we will call the commit() function 8072 * so we need to clear these to avoid incorrect handling. 8073 */ 8074 tp->t_rxtshift = 0; 8075 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8076 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8077 tp->t_softerror = 0; 8078 if (to && (to->to_flags & TOF_TS) && 8079 (ack_type == CUM_ACKED) && 8080 (to->to_tsecr) && 8081 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8082 /* 8083 * Now which timestamp does it match? In this block the ACK 8084 * must be coming from a previous transmission. 8085 */ 8086 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8087 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8088 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8089 if ((int)t <= 0) 8090 t = 1; 8091 if (CC_ALGO(tp)->rttsample != NULL) { 8092 /* 8093 * Kick the RTT to the CC, here 8094 * we lie a bit in that we know the 8095 * retransmission is correct even though 8096 * we retransmitted. This is because 8097 * we match the timestamps. 8098 */ 8099 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 8100 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 8101 else 8102 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 8103 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 8104 } 8105 if ((i + 1) < rsm->r_rtr_cnt) { 8106 /* 8107 * The peer ack'd from our previous 8108 * transmission. We have a spurious 8109 * retransmission and thus we dont 8110 * want to update our rack_rtt. 8111 * 8112 * Hmm should there be a CC revert here? 8113 * 8114 */ 8115 return (0); 8116 } 8117 if (!tp->t_rttlow || tp->t_rttlow > t) 8118 tp->t_rttlow = t; 8119 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8120 rack->r_ctl.rc_rack_min_rtt = t; 8121 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8122 rack->r_ctl.rc_rack_min_rtt = 1; 8123 } 8124 } 8125 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8126 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8127 /* New more recent rack_tmit_time */ 8128 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8129 rack->rc_rack_rtt = t; 8130 } 8131 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 8132 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 8133 rsm->r_rtr_cnt); 8134 return (1); 8135 } 8136 } 8137 goto ts_not_found; 8138 } else { 8139 /* 8140 * Ok its a SACK block that we retransmitted. or a windows 8141 * machine without timestamps. We can tell nothing from the 8142 * time-stamp since its not there or the time the peer last 8143 * recieved a segment that moved forward its cum-ack point. 8144 */ 8145 ts_not_found: 8146 i = rsm->r_rtr_cnt - 1; 8147 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8148 if ((int)t <= 0) 8149 t = 1; 8150 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8151 /* 8152 * We retransmitted and the ack came back in less 8153 * than the smallest rtt we have observed. We most 8154 * likely did an improper retransmit as outlined in 8155 * 6.2 Step 2 point 2 in the rack-draft so we 8156 * don't want to update our rack_rtt. We in 8157 * theory (in future) might want to think about reverting our 8158 * cwnd state but we won't for now. 8159 */ 8160 return (0); 8161 } else if (rack->r_ctl.rc_rack_min_rtt) { 8162 /* 8163 * We retransmitted it and the retransmit did the 8164 * job. 8165 */ 8166 if (!rack->r_ctl.rc_rack_min_rtt || 8167 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8168 rack->r_ctl.rc_rack_min_rtt = t; 8169 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8170 rack->r_ctl.rc_rack_min_rtt = 1; 8171 } 8172 } 8173 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 8174 /* New more recent rack_tmit_time */ 8175 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 8176 rack->rc_rack_rtt = t; 8177 } 8178 return (1); 8179 } 8180 } 8181 return (0); 8182 } 8183 8184 /* 8185 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 8186 */ 8187 static void 8188 rack_log_sack_passed(struct tcpcb *tp, 8189 struct tcp_rack *rack, struct rack_sendmap *rsm) 8190 { 8191 struct rack_sendmap *nrsm; 8192 8193 nrsm = rsm; 8194 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 8195 rack_head, r_tnext) { 8196 if (nrsm == rsm) { 8197 /* Skip orginal segment he is acked */ 8198 continue; 8199 } 8200 if (nrsm->r_flags & RACK_ACKED) { 8201 /* 8202 * Skip ack'd segments, though we 8203 * should not see these, since tmap 8204 * should not have ack'd segments. 8205 */ 8206 continue; 8207 } 8208 if (nrsm->r_flags & RACK_SACK_PASSED) { 8209 /* 8210 * We found one that is already marked 8211 * passed, we have been here before and 8212 * so all others below this are marked. 8213 */ 8214 break; 8215 } 8216 nrsm->r_flags |= RACK_SACK_PASSED; 8217 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8218 } 8219 } 8220 8221 static void 8222 rack_need_set_test(struct tcpcb *tp, 8223 struct tcp_rack *rack, 8224 struct rack_sendmap *rsm, 8225 tcp_seq th_ack, 8226 int line, 8227 int use_which) 8228 { 8229 8230 if ((tp->t_flags & TF_GPUTINPROG) && 8231 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8232 /* 8233 * We were app limited, and this ack 8234 * butts up or goes beyond the point where we want 8235 * to start our next measurement. We need 8236 * to record the new gput_ts as here and 8237 * possibly update the start sequence. 8238 */ 8239 uint32_t seq, ts; 8240 8241 if (rsm->r_rtr_cnt > 1) { 8242 /* 8243 * This is a retransmit, can we 8244 * really make any assessment at this 8245 * point? We are not really sure of 8246 * the timestamp, is it this or the 8247 * previous transmission? 8248 * 8249 * Lets wait for something better that 8250 * is not retransmitted. 8251 */ 8252 return; 8253 } 8254 seq = tp->gput_seq; 8255 ts = tp->gput_ts; 8256 rack->app_limited_needs_set = 0; 8257 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8258 /* Do we start at a new end? */ 8259 if ((use_which == RACK_USE_BEG) && 8260 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8261 /* 8262 * When we get an ACK that just eats 8263 * up some of the rsm, we set RACK_USE_BEG 8264 * since whats at r_start (i.e. th_ack) 8265 * is left unacked and thats where the 8266 * measurement not starts. 8267 */ 8268 tp->gput_seq = rsm->r_start; 8269 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8270 } 8271 if ((use_which == RACK_USE_END) && 8272 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8273 /* 8274 * We use the end when the cumack 8275 * is moving forward and completely 8276 * deleting the rsm passed so basically 8277 * r_end holds th_ack. 8278 * 8279 * For SACK's we also want to use the end 8280 * since this piece just got sacked and 8281 * we want to target anything after that 8282 * in our measurement. 8283 */ 8284 tp->gput_seq = rsm->r_end; 8285 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8286 } 8287 if (use_which == RACK_USE_END_OR_THACK) { 8288 /* 8289 * special case for ack moving forward, 8290 * not a sack, we need to move all the 8291 * way up to where this ack cum-ack moves 8292 * to. 8293 */ 8294 if (SEQ_GT(th_ack, rsm->r_end)) 8295 tp->gput_seq = th_ack; 8296 else 8297 tp->gput_seq = rsm->r_end; 8298 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8299 } 8300 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8301 /* 8302 * We moved beyond this guy's range, re-calculate 8303 * the new end point. 8304 */ 8305 if (rack->rc_gp_filled == 0) { 8306 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8307 } else { 8308 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8309 } 8310 } 8311 /* 8312 * We are moving the goal post, we may be able to clear the 8313 * measure_saw_probe_rtt flag. 8314 */ 8315 if ((rack->in_probe_rtt == 0) && 8316 (rack->measure_saw_probe_rtt) && 8317 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8318 rack->measure_saw_probe_rtt = 0; 8319 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8320 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8321 if (rack->rc_gp_filled && 8322 ((tp->gput_ack - tp->gput_seq) < 8323 max(rc_init_window(rack), (MIN_GP_WIN * 8324 ctf_fixed_maxseg(tp))))) { 8325 uint32_t ideal_amount; 8326 8327 ideal_amount = rack_get_measure_window(tp, rack); 8328 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8329 /* 8330 * There is no sense of continuing this measurement 8331 * because its too small to gain us anything we 8332 * trust. Skip it and that way we can start a new 8333 * measurement quicker. 8334 */ 8335 tp->t_flags &= ~TF_GPUTINPROG; 8336 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8337 0, 0, 0, 6, __LINE__, NULL, 0); 8338 } else { 8339 /* 8340 * Reset the window further out. 8341 */ 8342 tp->gput_ack = tp->gput_seq + ideal_amount; 8343 } 8344 } 8345 } 8346 } 8347 8348 static inline int 8349 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8350 { 8351 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8352 /* Behind our TLP definition or right at */ 8353 return (0); 8354 } 8355 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8356 /* The start is beyond or right at our end of TLP definition */ 8357 return (0); 8358 } 8359 /* It has to be a sub-part of the original TLP recorded */ 8360 return (1); 8361 } 8362 8363 8364 static uint32_t 8365 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8366 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8367 { 8368 uint32_t start, end, changed = 0; 8369 struct rack_sendmap stack_map; 8370 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next; 8371 int32_t used_ref = 1; 8372 int moved = 0; 8373 8374 start = sack->start; 8375 end = sack->end; 8376 rsm = *prsm; 8377 memset(&fe, 0, sizeof(fe)); 8378 do_rest_ofb: 8379 if ((rsm == NULL) || 8380 (SEQ_LT(end, rsm->r_start)) || 8381 (SEQ_GEQ(start, rsm->r_end)) || 8382 (SEQ_LT(start, rsm->r_start))) { 8383 /* 8384 * We are not in the right spot, 8385 * find the correct spot in the tree. 8386 */ 8387 used_ref = 0; 8388 fe.r_start = start; 8389 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8390 moved++; 8391 } 8392 if (rsm == NULL) { 8393 /* TSNH */ 8394 goto out; 8395 } 8396 /* Ok we have an ACK for some piece of this rsm */ 8397 if (rsm->r_start != start) { 8398 if ((rsm->r_flags & RACK_ACKED) == 0) { 8399 /* 8400 * Before any splitting or hookery is 8401 * done is it a TLP of interest i.e. rxt? 8402 */ 8403 if ((rsm->r_flags & RACK_TLP) && 8404 (rsm->r_rtr_cnt > 1)) { 8405 /* 8406 * We are splitting a rxt TLP, check 8407 * if we need to save off the start/end 8408 */ 8409 if (rack->rc_last_tlp_acked_set && 8410 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8411 /* 8412 * We already turned this on since we are inside 8413 * the previous one was a partially sack now we 8414 * are getting another one (maybe all of it). 8415 * 8416 */ 8417 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8418 /* 8419 * Lets make sure we have all of it though. 8420 */ 8421 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8422 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8423 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8424 rack->r_ctl.last_tlp_acked_end); 8425 } 8426 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8427 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8428 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8429 rack->r_ctl.last_tlp_acked_end); 8430 } 8431 } else { 8432 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8433 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8434 rack->rc_last_tlp_past_cumack = 0; 8435 rack->rc_last_tlp_acked_set = 1; 8436 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8437 } 8438 } 8439 /** 8440 * Need to split this in two pieces the before and after, 8441 * the before remains in the map, the after must be 8442 * added. In other words we have: 8443 * rsm |--------------| 8444 * sackblk |-------> 8445 * rsm will become 8446 * rsm |---| 8447 * and nrsm will be the sacked piece 8448 * nrsm |----------| 8449 * 8450 * But before we start down that path lets 8451 * see if the sack spans over on top of 8452 * the next guy and it is already sacked. 8453 * 8454 */ 8455 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8456 if (next && (next->r_flags & RACK_ACKED) && 8457 SEQ_GEQ(end, next->r_start)) { 8458 /** 8459 * So the next one is already acked, and 8460 * we can thus by hookery use our stack_map 8461 * to reflect the piece being sacked and 8462 * then adjust the two tree entries moving 8463 * the start and ends around. So we start like: 8464 * rsm |------------| (not-acked) 8465 * next |-----------| (acked) 8466 * sackblk |--------> 8467 * We want to end like so: 8468 * rsm |------| (not-acked) 8469 * next |-----------------| (acked) 8470 * nrsm |-----| 8471 * Where nrsm is a temporary stack piece we 8472 * use to update all the gizmos. 8473 */ 8474 /* Copy up our fudge block */ 8475 nrsm = &stack_map; 8476 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8477 /* Now adjust our tree blocks */ 8478 rsm->r_end = start; 8479 next->r_start = start; 8480 /* Now we must adjust back where next->m is */ 8481 rack_setup_offset_for_rsm(rsm, next); 8482 8483 /* We don't need to adjust rsm, it did not change */ 8484 /* Clear out the dup ack count of the remainder */ 8485 rsm->r_dupack = 0; 8486 rsm->r_just_ret = 0; 8487 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8488 /* Now lets make sure our fudge block is right */ 8489 nrsm->r_start = start; 8490 /* Now lets update all the stats and such */ 8491 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8492 if (rack->app_limited_needs_set) 8493 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8494 changed += (nrsm->r_end - nrsm->r_start); 8495 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8496 if (nrsm->r_flags & RACK_SACK_PASSED) { 8497 counter_u64_add(rack_reorder_seen, 1); 8498 rack->r_ctl.rc_reorder_ts = cts; 8499 } 8500 /* 8501 * Now we want to go up from rsm (the 8502 * one left un-acked) to the next one 8503 * in the tmap. We do this so when 8504 * we walk backwards we include marking 8505 * sack-passed on rsm (The one passed in 8506 * is skipped since it is generally called 8507 * on something sacked before removing it 8508 * from the tmap). 8509 */ 8510 if (rsm->r_in_tmap) { 8511 nrsm = TAILQ_NEXT(rsm, r_tnext); 8512 /* 8513 * Now that we have the next 8514 * one walk backwards from there. 8515 */ 8516 if (nrsm && nrsm->r_in_tmap) 8517 rack_log_sack_passed(tp, rack, nrsm); 8518 } 8519 /* Now are we done? */ 8520 if (SEQ_LT(end, next->r_end) || 8521 (end == next->r_end)) { 8522 /* Done with block */ 8523 goto out; 8524 } 8525 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8526 counter_u64_add(rack_sack_used_next_merge, 1); 8527 /* Postion for the next block */ 8528 start = next->r_end; 8529 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8530 if (rsm == NULL) 8531 goto out; 8532 } else { 8533 /** 8534 * We can't use any hookery here, so we 8535 * need to split the map. We enter like 8536 * so: 8537 * rsm |--------| 8538 * sackblk |-----> 8539 * We will add the new block nrsm and 8540 * that will be the new portion, and then 8541 * fall through after reseting rsm. So we 8542 * split and look like this: 8543 * rsm |----| 8544 * sackblk |-----> 8545 * nrsm |---| 8546 * We then fall through reseting 8547 * rsm to nrsm, so the next block 8548 * picks it up. 8549 */ 8550 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8551 if (nrsm == NULL) { 8552 /* 8553 * failed XXXrrs what can we do but loose the sack 8554 * info? 8555 */ 8556 goto out; 8557 } 8558 counter_u64_add(rack_sack_splits, 1); 8559 rack_clone_rsm(rack, nrsm, rsm, start); 8560 rsm->r_just_ret = 0; 8561 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8562 #ifdef INVARIANTS 8563 if (insret != NULL) { 8564 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8565 nrsm, insret, rack, rsm); 8566 } 8567 #endif 8568 if (rsm->r_in_tmap) { 8569 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8570 nrsm->r_in_tmap = 1; 8571 } 8572 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8573 rsm->r_flags &= (~RACK_HAS_FIN); 8574 /* Position us to point to the new nrsm that starts the sack blk */ 8575 rsm = nrsm; 8576 } 8577 } else { 8578 /* Already sacked this piece */ 8579 counter_u64_add(rack_sack_skipped_acked, 1); 8580 moved++; 8581 if (end == rsm->r_end) { 8582 /* Done with block */ 8583 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8584 goto out; 8585 } else if (SEQ_LT(end, rsm->r_end)) { 8586 /* A partial sack to a already sacked block */ 8587 moved++; 8588 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8589 goto out; 8590 } else { 8591 /* 8592 * The end goes beyond this guy 8593 * repostion the start to the 8594 * next block. 8595 */ 8596 start = rsm->r_end; 8597 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8598 if (rsm == NULL) 8599 goto out; 8600 } 8601 } 8602 } 8603 if (SEQ_GEQ(end, rsm->r_end)) { 8604 /** 8605 * The end of this block is either beyond this guy or right 8606 * at this guy. I.e.: 8607 * rsm --- |-----| 8608 * end |-----| 8609 * <or> 8610 * end |---------| 8611 */ 8612 if ((rsm->r_flags & RACK_ACKED) == 0) { 8613 /* 8614 * Is it a TLP of interest? 8615 */ 8616 if ((rsm->r_flags & RACK_TLP) && 8617 (rsm->r_rtr_cnt > 1)) { 8618 /* 8619 * We are splitting a rxt TLP, check 8620 * if we need to save off the start/end 8621 */ 8622 if (rack->rc_last_tlp_acked_set && 8623 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8624 /* 8625 * We already turned this on since we are inside 8626 * the previous one was a partially sack now we 8627 * are getting another one (maybe all of it). 8628 */ 8629 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8630 /* 8631 * Lets make sure we have all of it though. 8632 */ 8633 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8634 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8635 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8636 rack->r_ctl.last_tlp_acked_end); 8637 } 8638 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8639 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8640 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8641 rack->r_ctl.last_tlp_acked_end); 8642 } 8643 } else { 8644 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8645 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8646 rack->rc_last_tlp_past_cumack = 0; 8647 rack->rc_last_tlp_acked_set = 1; 8648 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8649 } 8650 } 8651 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8652 changed += (rsm->r_end - rsm->r_start); 8653 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8654 if (rsm->r_in_tmap) /* should be true */ 8655 rack_log_sack_passed(tp, rack, rsm); 8656 /* Is Reordering occuring? */ 8657 if (rsm->r_flags & RACK_SACK_PASSED) { 8658 rsm->r_flags &= ~RACK_SACK_PASSED; 8659 counter_u64_add(rack_reorder_seen, 1); 8660 rack->r_ctl.rc_reorder_ts = cts; 8661 } 8662 if (rack->app_limited_needs_set) 8663 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8664 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8665 rsm->r_flags |= RACK_ACKED; 8666 if (rsm->r_in_tmap) { 8667 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8668 rsm->r_in_tmap = 0; 8669 } 8670 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8671 } else { 8672 counter_u64_add(rack_sack_skipped_acked, 1); 8673 moved++; 8674 } 8675 if (end == rsm->r_end) { 8676 /* This block only - done, setup for next */ 8677 goto out; 8678 } 8679 /* 8680 * There is more not coverend by this rsm move on 8681 * to the next block in the RB tree. 8682 */ 8683 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8684 start = rsm->r_end; 8685 rsm = nrsm; 8686 if (rsm == NULL) 8687 goto out; 8688 goto do_rest_ofb; 8689 } 8690 /** 8691 * The end of this sack block is smaller than 8692 * our rsm i.e.: 8693 * rsm --- |-----| 8694 * end |--| 8695 */ 8696 if ((rsm->r_flags & RACK_ACKED) == 0) { 8697 /* 8698 * Is it a TLP of interest? 8699 */ 8700 if ((rsm->r_flags & RACK_TLP) && 8701 (rsm->r_rtr_cnt > 1)) { 8702 /* 8703 * We are splitting a rxt TLP, check 8704 * if we need to save off the start/end 8705 */ 8706 if (rack->rc_last_tlp_acked_set && 8707 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8708 /* 8709 * We already turned this on since we are inside 8710 * the previous one was a partially sack now we 8711 * are getting another one (maybe all of it). 8712 */ 8713 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8714 /* 8715 * Lets make sure we have all of it though. 8716 */ 8717 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8718 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8719 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8720 rack->r_ctl.last_tlp_acked_end); 8721 } 8722 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8723 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8724 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8725 rack->r_ctl.last_tlp_acked_end); 8726 } 8727 } else { 8728 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8729 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8730 rack->rc_last_tlp_past_cumack = 0; 8731 rack->rc_last_tlp_acked_set = 1; 8732 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8733 } 8734 } 8735 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8736 if (prev && 8737 (prev->r_flags & RACK_ACKED)) { 8738 /** 8739 * Goal, we want the right remainder of rsm to shrink 8740 * in place and span from (rsm->r_start = end) to rsm->r_end. 8741 * We want to expand prev to go all the way 8742 * to prev->r_end <- end. 8743 * so in the tree we have before: 8744 * prev |--------| (acked) 8745 * rsm |-------| (non-acked) 8746 * sackblk |-| 8747 * We churn it so we end up with 8748 * prev |----------| (acked) 8749 * rsm |-----| (non-acked) 8750 * nrsm |-| (temporary) 8751 * 8752 * Note if either prev/rsm is a TLP we don't 8753 * do this. 8754 */ 8755 nrsm = &stack_map; 8756 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8757 prev->r_end = end; 8758 rsm->r_start = end; 8759 /* Now adjust nrsm (stack copy) to be 8760 * the one that is the small 8761 * piece that was "sacked". 8762 */ 8763 nrsm->r_end = end; 8764 rsm->r_dupack = 0; 8765 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8766 /* 8767 * Now that the rsm has had its start moved forward 8768 * lets go ahead and get its new place in the world. 8769 */ 8770 rack_setup_offset_for_rsm(prev, rsm); 8771 /* 8772 * Now nrsm is our new little piece 8773 * that is acked (which was merged 8774 * to prev). Update the rtt and changed 8775 * based on that. Also check for reordering. 8776 */ 8777 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8778 if (rack->app_limited_needs_set) 8779 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8780 changed += (nrsm->r_end - nrsm->r_start); 8781 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8782 if (nrsm->r_flags & RACK_SACK_PASSED) { 8783 counter_u64_add(rack_reorder_seen, 1); 8784 rack->r_ctl.rc_reorder_ts = cts; 8785 } 8786 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8787 rsm = prev; 8788 counter_u64_add(rack_sack_used_prev_merge, 1); 8789 } else { 8790 /** 8791 * This is the case where our previous 8792 * block is not acked either, so we must 8793 * split the block in two. 8794 */ 8795 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8796 if (nrsm == NULL) { 8797 /* failed rrs what can we do but loose the sack info? */ 8798 goto out; 8799 } 8800 if ((rsm->r_flags & RACK_TLP) && 8801 (rsm->r_rtr_cnt > 1)) { 8802 /* 8803 * We are splitting a rxt TLP, check 8804 * if we need to save off the start/end 8805 */ 8806 if (rack->rc_last_tlp_acked_set && 8807 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8808 /* 8809 * We already turned this on since this block is inside 8810 * the previous one was a partially sack now we 8811 * are getting another one (maybe all of it). 8812 */ 8813 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8814 /* 8815 * Lets make sure we have all of it though. 8816 */ 8817 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8818 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8819 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8820 rack->r_ctl.last_tlp_acked_end); 8821 } 8822 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8823 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8824 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8825 rack->r_ctl.last_tlp_acked_end); 8826 } 8827 } else { 8828 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8829 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8830 rack->rc_last_tlp_acked_set = 1; 8831 rack->rc_last_tlp_past_cumack = 0; 8832 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8833 } 8834 } 8835 /** 8836 * In this case nrsm becomes 8837 * nrsm->r_start = end; 8838 * nrsm->r_end = rsm->r_end; 8839 * which is un-acked. 8840 * <and> 8841 * rsm->r_end = nrsm->r_start; 8842 * i.e. the remaining un-acked 8843 * piece is left on the left 8844 * hand side. 8845 * 8846 * So we start like this 8847 * rsm |----------| (not acked) 8848 * sackblk |---| 8849 * build it so we have 8850 * rsm |---| (acked) 8851 * nrsm |------| (not acked) 8852 */ 8853 counter_u64_add(rack_sack_splits, 1); 8854 rack_clone_rsm(rack, nrsm, rsm, end); 8855 rsm->r_flags &= (~RACK_HAS_FIN); 8856 rsm->r_just_ret = 0; 8857 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8858 #ifdef INVARIANTS 8859 if (insret != NULL) { 8860 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8861 nrsm, insret, rack, rsm); 8862 } 8863 #endif 8864 if (rsm->r_in_tmap) { 8865 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8866 nrsm->r_in_tmap = 1; 8867 } 8868 nrsm->r_dupack = 0; 8869 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8870 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8871 changed += (rsm->r_end - rsm->r_start); 8872 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8873 if (rsm->r_in_tmap) /* should be true */ 8874 rack_log_sack_passed(tp, rack, rsm); 8875 /* Is Reordering occuring? */ 8876 if (rsm->r_flags & RACK_SACK_PASSED) { 8877 rsm->r_flags &= ~RACK_SACK_PASSED; 8878 counter_u64_add(rack_reorder_seen, 1); 8879 rack->r_ctl.rc_reorder_ts = cts; 8880 } 8881 if (rack->app_limited_needs_set) 8882 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8883 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8884 rsm->r_flags |= RACK_ACKED; 8885 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8886 if (rsm->r_in_tmap) { 8887 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8888 rsm->r_in_tmap = 0; 8889 } 8890 } 8891 } else if (start != end){ 8892 /* 8893 * The block was already acked. 8894 */ 8895 counter_u64_add(rack_sack_skipped_acked, 1); 8896 moved++; 8897 } 8898 out: 8899 if (rsm && 8900 ((rsm->r_flags & RACK_TLP) == 0) && 8901 (rsm->r_flags & RACK_ACKED)) { 8902 /* 8903 * Now can we merge where we worked 8904 * with either the previous or 8905 * next block? 8906 */ 8907 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8908 while (next) { 8909 if (next->r_flags & RACK_TLP) 8910 break; 8911 if (next->r_flags & RACK_ACKED) { 8912 /* yep this and next can be merged */ 8913 rsm = rack_merge_rsm(rack, rsm, next); 8914 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8915 } else 8916 break; 8917 } 8918 /* Now what about the previous? */ 8919 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8920 while (prev) { 8921 if (prev->r_flags & RACK_TLP) 8922 break; 8923 if (prev->r_flags & RACK_ACKED) { 8924 /* yep the previous and this can be merged */ 8925 rsm = rack_merge_rsm(rack, prev, rsm); 8926 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8927 } else 8928 break; 8929 } 8930 } 8931 if (used_ref == 0) { 8932 counter_u64_add(rack_sack_proc_all, 1); 8933 } else { 8934 counter_u64_add(rack_sack_proc_short, 1); 8935 } 8936 /* Save off the next one for quick reference. */ 8937 if (rsm) 8938 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8939 else 8940 nrsm = NULL; 8941 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8942 /* Pass back the moved. */ 8943 *moved_two = moved; 8944 return (changed); 8945 } 8946 8947 static void inline 8948 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8949 { 8950 struct rack_sendmap *tmap; 8951 8952 tmap = NULL; 8953 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8954 /* Its no longer sacked, mark it so */ 8955 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8956 #ifdef INVARIANTS 8957 if (rsm->r_in_tmap) { 8958 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8959 rack, rsm, rsm->r_flags); 8960 } 8961 #endif 8962 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8963 /* Rebuild it into our tmap */ 8964 if (tmap == NULL) { 8965 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8966 tmap = rsm; 8967 } else { 8968 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8969 tmap = rsm; 8970 } 8971 tmap->r_in_tmap = 1; 8972 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8973 } 8974 /* 8975 * Now lets possibly clear the sack filter so we start 8976 * recognizing sacks that cover this area. 8977 */ 8978 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8979 8980 } 8981 8982 static void 8983 rack_do_decay(struct tcp_rack *rack) 8984 { 8985 struct timeval res; 8986 8987 #define timersub(tvp, uvp, vvp) \ 8988 do { \ 8989 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8990 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8991 if ((vvp)->tv_usec < 0) { \ 8992 (vvp)->tv_sec--; \ 8993 (vvp)->tv_usec += 1000000; \ 8994 } \ 8995 } while (0) 8996 8997 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8998 #undef timersub 8999 9000 rack->r_ctl.input_pkt++; 9001 if ((rack->rc_in_persist) || 9002 (res.tv_sec >= 1) || 9003 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 9004 /* 9005 * Check for decay of non-SAD, 9006 * we want all SAD detection metrics to 9007 * decay 1/4 per second (or more) passed. 9008 */ 9009 uint32_t pkt_delta; 9010 9011 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 9012 /* Update our saved tracking values */ 9013 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 9014 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 9015 /* Now do we escape without decay? */ 9016 #ifdef NETFLIX_EXP_DETECTION 9017 if (rack->rc_in_persist || 9018 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 9019 (pkt_delta < tcp_sad_low_pps)){ 9020 /* 9021 * We don't decay idle connections 9022 * or ones that have a low input pps. 9023 */ 9024 return; 9025 } 9026 /* Decay the counters */ 9027 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 9028 tcp_sad_decay_val); 9029 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 9030 tcp_sad_decay_val); 9031 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 9032 tcp_sad_decay_val); 9033 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 9034 tcp_sad_decay_val); 9035 #endif 9036 } 9037 } 9038 9039 static void 9040 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 9041 { 9042 struct rack_sendmap *rsm, *rm; 9043 9044 /* 9045 * The ACK point is advancing to th_ack, we must drop off 9046 * the packets in the rack log and calculate any eligble 9047 * RTT's. 9048 */ 9049 rack->r_wanted_output = 1; 9050 9051 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 9052 if ((rack->rc_last_tlp_acked_set == 1)&& 9053 (rack->rc_last_tlp_past_cumack == 1) && 9054 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 9055 /* 9056 * We have reached the point where our last rack 9057 * tlp retransmit sequence is ahead of the cum-ack. 9058 * This can only happen when the cum-ack moves all 9059 * the way around (its been a full 2^^31+1 bytes 9060 * or more since we sent a retransmitted TLP). Lets 9061 * turn off the valid flag since its not really valid. 9062 * 9063 * Note since sack's also turn on this event we have 9064 * a complication, we have to wait to age it out until 9065 * the cum-ack is by the TLP before checking which is 9066 * what the next else clause does. 9067 */ 9068 rack_log_dsack_event(rack, 9, __LINE__, 9069 rack->r_ctl.last_tlp_acked_start, 9070 rack->r_ctl.last_tlp_acked_end); 9071 rack->rc_last_tlp_acked_set = 0; 9072 rack->rc_last_tlp_past_cumack = 0; 9073 } else if ((rack->rc_last_tlp_acked_set == 1) && 9074 (rack->rc_last_tlp_past_cumack == 0) && 9075 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 9076 /* 9077 * It is safe to start aging TLP's out. 9078 */ 9079 rack->rc_last_tlp_past_cumack = 1; 9080 } 9081 /* We do the same for the tlp send seq as well */ 9082 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9083 (rack->rc_last_sent_tlp_past_cumack == 1) && 9084 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 9085 rack_log_dsack_event(rack, 9, __LINE__, 9086 rack->r_ctl.last_sent_tlp_seq, 9087 (rack->r_ctl.last_sent_tlp_seq + 9088 rack->r_ctl.last_sent_tlp_len)); 9089 rack->rc_last_sent_tlp_seq_valid = 0; 9090 rack->rc_last_sent_tlp_past_cumack = 0; 9091 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9092 (rack->rc_last_sent_tlp_past_cumack == 0) && 9093 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 9094 /* 9095 * It is safe to start aging TLP's send. 9096 */ 9097 rack->rc_last_sent_tlp_past_cumack = 1; 9098 } 9099 more: 9100 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9101 if (rsm == NULL) { 9102 if ((th_ack - 1) == tp->iss) { 9103 /* 9104 * For the SYN incoming case we will not 9105 * have called tcp_output for the sending of 9106 * the SYN, so there will be no map. All 9107 * other cases should probably be a panic. 9108 */ 9109 return; 9110 } 9111 if (tp->t_flags & TF_SENTFIN) { 9112 /* if we sent a FIN we often will not have map */ 9113 return; 9114 } 9115 #ifdef INVARIANTS 9116 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 9117 tp, 9118 tp->t_state, th_ack, rack, 9119 tp->snd_una, tp->snd_max, tp->snd_nxt); 9120 #endif 9121 return; 9122 } 9123 if (SEQ_LT(th_ack, rsm->r_start)) { 9124 /* Huh map is missing this */ 9125 #ifdef INVARIANTS 9126 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 9127 rsm->r_start, 9128 th_ack, tp->t_state, rack->r_state); 9129 #endif 9130 return; 9131 } 9132 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 9133 9134 /* Now was it a retransmitted TLP? */ 9135 if ((rsm->r_flags & RACK_TLP) && 9136 (rsm->r_rtr_cnt > 1)) { 9137 /* 9138 * Yes, this rsm was a TLP and retransmitted, remember that 9139 * since if a DSACK comes back on this we don't want 9140 * to think of it as a reordered segment. This may 9141 * get updated again with possibly even other TLPs 9142 * in flight, but thats ok. Only when we don't send 9143 * a retransmitted TLP for 1/2 the sequences space 9144 * will it get turned off (above). 9145 */ 9146 if (rack->rc_last_tlp_acked_set && 9147 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9148 /* 9149 * We already turned this on since the end matches, 9150 * the previous one was a partially ack now we 9151 * are getting another one (maybe all of it). 9152 */ 9153 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9154 /* 9155 * Lets make sure we have all of it though. 9156 */ 9157 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9158 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9159 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9160 rack->r_ctl.last_tlp_acked_end); 9161 } 9162 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9163 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9164 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9165 rack->r_ctl.last_tlp_acked_end); 9166 } 9167 } else { 9168 rack->rc_last_tlp_past_cumack = 1; 9169 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9170 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9171 rack->rc_last_tlp_acked_set = 1; 9172 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9173 } 9174 } 9175 /* Now do we consume the whole thing? */ 9176 if (SEQ_GEQ(th_ack, rsm->r_end)) { 9177 /* Its all consumed. */ 9178 uint32_t left; 9179 uint8_t newly_acked; 9180 9181 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 9182 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 9183 rsm->r_rtr_bytes = 0; 9184 /* Record the time of highest cumack sent */ 9185 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9186 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9187 #ifdef INVARIANTS 9188 if (rm != rsm) { 9189 panic("removing head in rack:%p rsm:%p rm:%p", 9190 rack, rsm, rm); 9191 } 9192 #endif 9193 if (rsm->r_in_tmap) { 9194 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9195 rsm->r_in_tmap = 0; 9196 } 9197 newly_acked = 1; 9198 if (rsm->r_flags & RACK_ACKED) { 9199 /* 9200 * It was acked on the scoreboard -- remove 9201 * it from total 9202 */ 9203 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9204 newly_acked = 0; 9205 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9206 /* 9207 * There are segments ACKED on the 9208 * scoreboard further up. We are seeing 9209 * reordering. 9210 */ 9211 rsm->r_flags &= ~RACK_SACK_PASSED; 9212 counter_u64_add(rack_reorder_seen, 1); 9213 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9214 rsm->r_flags |= RACK_ACKED; 9215 rack->r_ctl.rc_reorder_ts = cts; 9216 if (rack->r_ent_rec_ns) { 9217 /* 9218 * We have sent no more, and we saw an sack 9219 * then ack arrive. 9220 */ 9221 rack->r_might_revert = 1; 9222 } 9223 } 9224 if ((rsm->r_flags & RACK_TO_REXT) && 9225 (tp->t_flags & TF_RCVD_TSTMP) && 9226 (to->to_flags & TOF_TS) && 9227 (to->to_tsecr != 0) && 9228 (tp->t_flags & TF_PREVVALID)) { 9229 /* 9230 * We can use the timestamp to see 9231 * if this retransmission was from the 9232 * first transmit. If so we made a mistake. 9233 */ 9234 tp->t_flags &= ~TF_PREVVALID; 9235 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9236 /* The first transmit is what this ack is for */ 9237 rack_cong_signal(tp, CC_RTO_ERR, th_ack); 9238 } 9239 } 9240 left = th_ack - rsm->r_end; 9241 if (rack->app_limited_needs_set && newly_acked) 9242 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9243 /* Free back to zone */ 9244 rack_free(rack, rsm); 9245 if (left) { 9246 goto more; 9247 } 9248 /* Check for reneging */ 9249 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9250 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9251 /* 9252 * The peer has moved snd_una up to 9253 * the edge of this send, i.e. one 9254 * that it had previously acked. The only 9255 * way that can be true if the peer threw 9256 * away data (space issues) that it had 9257 * previously sacked (else it would have 9258 * given us snd_una up to (rsm->r_end). 9259 * We need to undo the acked markings here. 9260 * 9261 * Note we have to look to make sure th_ack is 9262 * our rsm->r_start in case we get an old ack 9263 * where th_ack is behind snd_una. 9264 */ 9265 rack_peer_reneges(rack, rsm, th_ack); 9266 } 9267 return; 9268 } 9269 if (rsm->r_flags & RACK_ACKED) { 9270 /* 9271 * It was acked on the scoreboard -- remove it from 9272 * total for the part being cum-acked. 9273 */ 9274 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9275 } 9276 /* 9277 * Clear the dup ack count for 9278 * the piece that remains. 9279 */ 9280 rsm->r_dupack = 0; 9281 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9282 if (rsm->r_rtr_bytes) { 9283 /* 9284 * It was retransmitted adjust the 9285 * sack holes for what was acked. 9286 */ 9287 int ack_am; 9288 9289 ack_am = (th_ack - rsm->r_start); 9290 if (ack_am >= rsm->r_rtr_bytes) { 9291 rack->r_ctl.rc_holes_rxt -= ack_am; 9292 rsm->r_rtr_bytes -= ack_am; 9293 } 9294 } 9295 /* 9296 * Update where the piece starts and record 9297 * the time of send of highest cumack sent. 9298 */ 9299 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9300 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9301 /* Now we need to move our offset forward too */ 9302 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9303 /* Fix up the orig_m_len and possibly the mbuf offset */ 9304 rack_adjust_orig_mlen(rsm); 9305 } 9306 rsm->soff += (th_ack - rsm->r_start); 9307 rsm->r_start = th_ack; 9308 /* Now do we need to move the mbuf fwd too? */ 9309 if (rsm->m) { 9310 while (rsm->soff >= rsm->m->m_len) { 9311 rsm->soff -= rsm->m->m_len; 9312 rsm->m = rsm->m->m_next; 9313 KASSERT((rsm->m != NULL), 9314 (" nrsm:%p hit at soff:%u null m", 9315 rsm, rsm->soff)); 9316 } 9317 rsm->orig_m_len = rsm->m->m_len; 9318 } 9319 if (rack->app_limited_needs_set) 9320 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9321 } 9322 9323 static void 9324 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9325 { 9326 struct rack_sendmap *rsm; 9327 int sack_pass_fnd = 0; 9328 9329 if (rack->r_might_revert) { 9330 /* 9331 * Ok we have reordering, have not sent anything, we 9332 * might want to revert the congestion state if nothing 9333 * further has SACK_PASSED on it. Lets check. 9334 * 9335 * We also get here when we have DSACKs come in for 9336 * all the data that we FR'd. Note that a rxt or tlp 9337 * timer clears this from happening. 9338 */ 9339 9340 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9341 if (rsm->r_flags & RACK_SACK_PASSED) { 9342 sack_pass_fnd = 1; 9343 break; 9344 } 9345 } 9346 if (sack_pass_fnd == 0) { 9347 /* 9348 * We went into recovery 9349 * incorrectly due to reordering! 9350 */ 9351 int orig_cwnd; 9352 9353 rack->r_ent_rec_ns = 0; 9354 orig_cwnd = tp->snd_cwnd; 9355 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec; 9356 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9357 tp->snd_recover = tp->snd_una; 9358 rack_log_to_prr(rack, 14, orig_cwnd); 9359 EXIT_RECOVERY(tp->t_flags); 9360 } 9361 rack->r_might_revert = 0; 9362 } 9363 } 9364 9365 #ifdef NETFLIX_EXP_DETECTION 9366 static void 9367 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9368 { 9369 if ((rack->do_detection || tcp_force_detection) && 9370 tcp_sack_to_ack_thresh && 9371 tcp_sack_to_move_thresh && 9372 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9373 /* 9374 * We have thresholds set to find 9375 * possible attackers and disable sack. 9376 * Check them. 9377 */ 9378 uint64_t ackratio, moveratio, movetotal; 9379 9380 /* Log detecting */ 9381 rack_log_sad(rack, 1); 9382 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9383 ackratio *= (uint64_t)(1000); 9384 if (rack->r_ctl.ack_count) 9385 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9386 else { 9387 /* We really should not hit here */ 9388 ackratio = 1000; 9389 } 9390 if ((rack->sack_attack_disable == 0) && 9391 (ackratio > rack_highest_sack_thresh_seen)) 9392 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9393 movetotal = rack->r_ctl.sack_moved_extra; 9394 movetotal += rack->r_ctl.sack_noextra_move; 9395 moveratio = rack->r_ctl.sack_moved_extra; 9396 moveratio *= (uint64_t)1000; 9397 if (movetotal) 9398 moveratio /= movetotal; 9399 else { 9400 /* No moves, thats pretty good */ 9401 moveratio = 0; 9402 } 9403 if ((rack->sack_attack_disable == 0) && 9404 (moveratio > rack_highest_move_thresh_seen)) 9405 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9406 if (rack->sack_attack_disable == 0) { 9407 if ((ackratio > tcp_sack_to_ack_thresh) && 9408 (moveratio > tcp_sack_to_move_thresh)) { 9409 /* Disable sack processing */ 9410 rack->sack_attack_disable = 1; 9411 if (rack->r_rep_attack == 0) { 9412 rack->r_rep_attack = 1; 9413 counter_u64_add(rack_sack_attacks_detected, 1); 9414 } 9415 if (tcp_attack_on_turns_on_logging) { 9416 /* 9417 * Turn on logging, used for debugging 9418 * false positives. 9419 */ 9420 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9421 } 9422 /* Clamp the cwnd at flight size */ 9423 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9424 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9425 rack_log_sad(rack, 2); 9426 } 9427 } else { 9428 /* We are sack-disabled check for false positives */ 9429 if ((ackratio <= tcp_restoral_thresh) || 9430 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9431 rack->sack_attack_disable = 0; 9432 rack_log_sad(rack, 3); 9433 /* Restart counting */ 9434 rack->r_ctl.sack_count = 0; 9435 rack->r_ctl.sack_moved_extra = 0; 9436 rack->r_ctl.sack_noextra_move = 1; 9437 rack->r_ctl.ack_count = max(1, 9438 (bytes_this_ack / segsiz)); 9439 9440 if (rack->r_rep_reverse == 0) { 9441 rack->r_rep_reverse = 1; 9442 counter_u64_add(rack_sack_attacks_reversed, 1); 9443 } 9444 /* Restore the cwnd */ 9445 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9446 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9447 } 9448 } 9449 } 9450 } 9451 #endif 9452 9453 static int 9454 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9455 { 9456 9457 uint32_t am, l_end; 9458 int was_tlp = 0; 9459 9460 if (SEQ_GT(end, start)) 9461 am = end - start; 9462 else 9463 am = 0; 9464 if ((rack->rc_last_tlp_acked_set ) && 9465 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9466 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9467 /* 9468 * The DSACK is because of a TLP which we don't 9469 * do anything with the reordering window over since 9470 * it was not reordering that caused the DSACK but 9471 * our previous retransmit TLP. 9472 */ 9473 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9474 was_tlp = 1; 9475 goto skip_dsack_round; 9476 } 9477 if (rack->rc_last_sent_tlp_seq_valid) { 9478 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9479 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9480 (SEQ_LEQ(end, l_end))) { 9481 /* 9482 * This dsack is from the last sent TLP, ignore it 9483 * for reordering purposes. 9484 */ 9485 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9486 was_tlp = 1; 9487 goto skip_dsack_round; 9488 } 9489 } 9490 if (rack->rc_dsack_round_seen == 0) { 9491 rack->rc_dsack_round_seen = 1; 9492 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9493 rack->r_ctl.num_dsack++; 9494 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9495 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9496 } 9497 skip_dsack_round: 9498 /* 9499 * We keep track of how many DSACK blocks we get 9500 * after a recovery incident. 9501 */ 9502 rack->r_ctl.dsack_byte_cnt += am; 9503 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9504 rack->r_ctl.retran_during_recovery && 9505 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9506 /* 9507 * False recovery most likely culprit is reordering. If 9508 * nothing else is missing we need to revert. 9509 */ 9510 rack->r_might_revert = 1; 9511 rack_handle_might_revert(rack->rc_tp, rack); 9512 rack->r_might_revert = 0; 9513 rack->r_ctl.retran_during_recovery = 0; 9514 rack->r_ctl.dsack_byte_cnt = 0; 9515 } 9516 return (was_tlp); 9517 } 9518 9519 static void 9520 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9521 { 9522 /* Deal with changed and PRR here (in recovery only) */ 9523 uint32_t pipe, snd_una; 9524 9525 rack->r_ctl.rc_prr_delivered += changed; 9526 9527 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9528 /* 9529 * It is all outstanding, we are application limited 9530 * and thus we don't need more room to send anything. 9531 * Note we use tp->snd_una here and not th_ack because 9532 * the data as yet not been cut from the sb. 9533 */ 9534 rack->r_ctl.rc_prr_sndcnt = 0; 9535 return; 9536 } 9537 /* Compute prr_sndcnt */ 9538 if (SEQ_GT(tp->snd_una, th_ack)) { 9539 snd_una = tp->snd_una; 9540 } else { 9541 snd_una = th_ack; 9542 } 9543 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9544 if (pipe > tp->snd_ssthresh) { 9545 long sndcnt; 9546 9547 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9548 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9549 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9550 else { 9551 rack->r_ctl.rc_prr_sndcnt = 0; 9552 rack_log_to_prr(rack, 9, 0); 9553 sndcnt = 0; 9554 } 9555 sndcnt++; 9556 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9557 sndcnt -= rack->r_ctl.rc_prr_out; 9558 else 9559 sndcnt = 0; 9560 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9561 rack_log_to_prr(rack, 10, 0); 9562 } else { 9563 uint32_t limit; 9564 9565 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9566 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9567 else 9568 limit = 0; 9569 if (changed > limit) 9570 limit = changed; 9571 limit += ctf_fixed_maxseg(tp); 9572 if (tp->snd_ssthresh > pipe) { 9573 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9574 rack_log_to_prr(rack, 11, 0); 9575 } else { 9576 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9577 rack_log_to_prr(rack, 12, 0); 9578 } 9579 } 9580 } 9581 9582 static void 9583 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9584 { 9585 uint32_t changed; 9586 struct tcp_rack *rack; 9587 struct rack_sendmap *rsm; 9588 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9589 register uint32_t th_ack; 9590 int32_t i, j, k, num_sack_blks = 0; 9591 uint32_t cts, acked, ack_point, sack_changed = 0; 9592 int loop_start = 0, moved_two = 0; 9593 uint32_t tsused; 9594 9595 9596 INP_WLOCK_ASSERT(tp->t_inpcb); 9597 if (th->th_flags & TH_RST) { 9598 /* We don't log resets */ 9599 return; 9600 } 9601 rack = (struct tcp_rack *)tp->t_fb_ptr; 9602 cts = tcp_get_usecs(NULL); 9603 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9604 changed = 0; 9605 th_ack = th->th_ack; 9606 if (rack->sack_attack_disable == 0) 9607 rack_do_decay(rack); 9608 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9609 /* 9610 * You only get credit for 9611 * MSS and greater (and you get extra 9612 * credit for larger cum-ack moves). 9613 */ 9614 int ac; 9615 9616 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9617 rack->r_ctl.ack_count += ac; 9618 counter_u64_add(rack_ack_total, ac); 9619 } 9620 if (rack->r_ctl.ack_count > 0xfff00000) { 9621 /* 9622 * reduce the number to keep us under 9623 * a uint32_t. 9624 */ 9625 rack->r_ctl.ack_count /= 2; 9626 rack->r_ctl.sack_count /= 2; 9627 } 9628 if (SEQ_GT(th_ack, tp->snd_una)) { 9629 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9630 tp->t_acktime = ticks; 9631 } 9632 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9633 changed = th_ack - rsm->r_start; 9634 if (changed) { 9635 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9636 } 9637 if ((to->to_flags & TOF_SACK) == 0) { 9638 /* We are done nothing left and no sack. */ 9639 rack_handle_might_revert(tp, rack); 9640 /* 9641 * For cases where we struck a dup-ack 9642 * with no SACK, add to the changes so 9643 * PRR will work right. 9644 */ 9645 if (dup_ack_struck && (changed == 0)) { 9646 changed += ctf_fixed_maxseg(rack->rc_tp); 9647 } 9648 goto out; 9649 } 9650 /* Sack block processing */ 9651 if (SEQ_GT(th_ack, tp->snd_una)) 9652 ack_point = th_ack; 9653 else 9654 ack_point = tp->snd_una; 9655 for (i = 0; i < to->to_nsacks; i++) { 9656 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9657 &sack, sizeof(sack)); 9658 sack.start = ntohl(sack.start); 9659 sack.end = ntohl(sack.end); 9660 if (SEQ_GT(sack.end, sack.start) && 9661 SEQ_GT(sack.start, ack_point) && 9662 SEQ_LT(sack.start, tp->snd_max) && 9663 SEQ_GT(sack.end, ack_point) && 9664 SEQ_LEQ(sack.end, tp->snd_max)) { 9665 sack_blocks[num_sack_blks] = sack; 9666 num_sack_blks++; 9667 } else if (SEQ_LEQ(sack.start, th_ack) && 9668 SEQ_LEQ(sack.end, th_ack)) { 9669 int was_tlp; 9670 9671 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9672 /* 9673 * Its a D-SACK block. 9674 */ 9675 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9676 } 9677 } 9678 if (rack->rc_dsack_round_seen) { 9679 /* Is the dsack roound over? */ 9680 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9681 /* Yes it is */ 9682 rack->rc_dsack_round_seen = 0; 9683 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9684 } 9685 } 9686 /* 9687 * Sort the SACK blocks so we can update the rack scoreboard with 9688 * just one pass. 9689 */ 9690 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9691 num_sack_blks, th->th_ack); 9692 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9693 if (num_sack_blks == 0) { 9694 /* Nothing to sack (DSACKs?) */ 9695 goto out_with_totals; 9696 } 9697 if (num_sack_blks < 2) { 9698 /* Only one, we don't need to sort */ 9699 goto do_sack_work; 9700 } 9701 /* Sort the sacks */ 9702 for (i = 0; i < num_sack_blks; i++) { 9703 for (j = i + 1; j < num_sack_blks; j++) { 9704 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9705 sack = sack_blocks[i]; 9706 sack_blocks[i] = sack_blocks[j]; 9707 sack_blocks[j] = sack; 9708 } 9709 } 9710 } 9711 /* 9712 * Now are any of the sack block ends the same (yes some 9713 * implementations send these)? 9714 */ 9715 again: 9716 if (num_sack_blks == 0) 9717 goto out_with_totals; 9718 if (num_sack_blks > 1) { 9719 for (i = 0; i < num_sack_blks; i++) { 9720 for (j = i + 1; j < num_sack_blks; j++) { 9721 if (sack_blocks[i].end == sack_blocks[j].end) { 9722 /* 9723 * Ok these two have the same end we 9724 * want the smallest end and then 9725 * throw away the larger and start 9726 * again. 9727 */ 9728 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9729 /* 9730 * The second block covers 9731 * more area use that 9732 */ 9733 sack_blocks[i].start = sack_blocks[j].start; 9734 } 9735 /* 9736 * Now collapse out the dup-sack and 9737 * lower the count 9738 */ 9739 for (k = (j + 1); k < num_sack_blks; k++) { 9740 sack_blocks[j].start = sack_blocks[k].start; 9741 sack_blocks[j].end = sack_blocks[k].end; 9742 j++; 9743 } 9744 num_sack_blks--; 9745 goto again; 9746 } 9747 } 9748 } 9749 } 9750 do_sack_work: 9751 /* 9752 * First lets look to see if 9753 * we have retransmitted and 9754 * can use the transmit next? 9755 */ 9756 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9757 if (rsm && 9758 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9759 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9760 /* 9761 * We probably did the FR and the next 9762 * SACK in continues as we would expect. 9763 */ 9764 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9765 if (acked) { 9766 rack->r_wanted_output = 1; 9767 changed += acked; 9768 sack_changed += acked; 9769 } 9770 if (num_sack_blks == 1) { 9771 /* 9772 * This is what we would expect from 9773 * a normal implementation to happen 9774 * after we have retransmitted the FR, 9775 * i.e the sack-filter pushes down 9776 * to 1 block and the next to be retransmitted 9777 * is the sequence in the sack block (has more 9778 * are acked). Count this as ACK'd data to boost 9779 * up the chances of recovering any false positives. 9780 */ 9781 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9782 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9783 counter_u64_add(rack_express_sack, 1); 9784 if (rack->r_ctl.ack_count > 0xfff00000) { 9785 /* 9786 * reduce the number to keep us under 9787 * a uint32_t. 9788 */ 9789 rack->r_ctl.ack_count /= 2; 9790 rack->r_ctl.sack_count /= 2; 9791 } 9792 goto out_with_totals; 9793 } else { 9794 /* 9795 * Start the loop through the 9796 * rest of blocks, past the first block. 9797 */ 9798 moved_two = 0; 9799 loop_start = 1; 9800 } 9801 } 9802 /* Its a sack of some sort */ 9803 rack->r_ctl.sack_count++; 9804 if (rack->r_ctl.sack_count > 0xfff00000) { 9805 /* 9806 * reduce the number to keep us under 9807 * a uint32_t. 9808 */ 9809 rack->r_ctl.ack_count /= 2; 9810 rack->r_ctl.sack_count /= 2; 9811 } 9812 counter_u64_add(rack_sack_total, 1); 9813 if (rack->sack_attack_disable) { 9814 /* An attacker disablement is in place */ 9815 if (num_sack_blks > 1) { 9816 rack->r_ctl.sack_count += (num_sack_blks - 1); 9817 rack->r_ctl.sack_moved_extra++; 9818 counter_u64_add(rack_move_some, 1); 9819 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9820 rack->r_ctl.sack_moved_extra /= 2; 9821 rack->r_ctl.sack_noextra_move /= 2; 9822 } 9823 } 9824 goto out; 9825 } 9826 rsm = rack->r_ctl.rc_sacklast; 9827 for (i = loop_start; i < num_sack_blks; i++) { 9828 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9829 if (acked) { 9830 rack->r_wanted_output = 1; 9831 changed += acked; 9832 sack_changed += acked; 9833 } 9834 if (moved_two) { 9835 /* 9836 * If we did not get a SACK for at least a MSS and 9837 * had to move at all, or if we moved more than our 9838 * threshold, it counts against the "extra" move. 9839 */ 9840 rack->r_ctl.sack_moved_extra += moved_two; 9841 counter_u64_add(rack_move_some, 1); 9842 } else { 9843 /* 9844 * else we did not have to move 9845 * any more than we would expect. 9846 */ 9847 rack->r_ctl.sack_noextra_move++; 9848 counter_u64_add(rack_move_none, 1); 9849 } 9850 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9851 /* 9852 * If the SACK was not a full MSS then 9853 * we add to sack_count the number of 9854 * MSS's (or possibly more than 9855 * a MSS if its a TSO send) we had to skip by. 9856 */ 9857 rack->r_ctl.sack_count += moved_two; 9858 counter_u64_add(rack_sack_total, moved_two); 9859 } 9860 /* 9861 * Now we need to setup for the next 9862 * round. First we make sure we won't 9863 * exceed the size of our uint32_t on 9864 * the various counts, and then clear out 9865 * moved_two. 9866 */ 9867 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9868 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9869 rack->r_ctl.sack_moved_extra /= 2; 9870 rack->r_ctl.sack_noextra_move /= 2; 9871 } 9872 if (rack->r_ctl.sack_count > 0xfff00000) { 9873 rack->r_ctl.ack_count /= 2; 9874 rack->r_ctl.sack_count /= 2; 9875 } 9876 moved_two = 0; 9877 } 9878 out_with_totals: 9879 if (num_sack_blks > 1) { 9880 /* 9881 * You get an extra stroke if 9882 * you have more than one sack-blk, this 9883 * could be where we are skipping forward 9884 * and the sack-filter is still working, or 9885 * it could be an attacker constantly 9886 * moving us. 9887 */ 9888 rack->r_ctl.sack_moved_extra++; 9889 counter_u64_add(rack_move_some, 1); 9890 } 9891 out: 9892 #ifdef NETFLIX_EXP_DETECTION 9893 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9894 #endif 9895 if (changed) { 9896 /* Something changed cancel the rack timer */ 9897 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9898 } 9899 tsused = tcp_get_usecs(NULL); 9900 rsm = tcp_rack_output(tp, rack, tsused); 9901 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9902 rsm) { 9903 /* Enter recovery */ 9904 rack->r_ctl.rc_rsm_start = rsm->r_start; 9905 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 9906 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 9907 entered_recovery = 1; 9908 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 9909 /* 9910 * When we enter recovery we need to assure we send 9911 * one packet. 9912 */ 9913 if (rack->rack_no_prr == 0) { 9914 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9915 rack_log_to_prr(rack, 8, 0); 9916 } 9917 rack->r_timer_override = 1; 9918 rack->r_early = 0; 9919 rack->r_ctl.rc_agg_early = 0; 9920 } else if (IN_FASTRECOVERY(tp->t_flags) && 9921 rsm && 9922 (rack->r_rr_config == 3)) { 9923 /* 9924 * Assure we can output and we get no 9925 * remembered pace time except the retransmit. 9926 */ 9927 rack->r_timer_override = 1; 9928 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9929 rack->r_ctl.rc_resend = rsm; 9930 } 9931 if (IN_FASTRECOVERY(tp->t_flags) && 9932 (rack->rack_no_prr == 0) && 9933 (entered_recovery == 0)) { 9934 rack_update_prr(tp, rack, changed, th_ack); 9935 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9936 ((rack->rc_inp->inp_in_hpts == 0) && 9937 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9938 /* 9939 * If you are pacing output you don't want 9940 * to override. 9941 */ 9942 rack->r_early = 0; 9943 rack->r_ctl.rc_agg_early = 0; 9944 rack->r_timer_override = 1; 9945 } 9946 } 9947 } 9948 9949 static void 9950 rack_strike_dupack(struct tcp_rack *rack) 9951 { 9952 struct rack_sendmap *rsm; 9953 9954 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9955 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9956 rsm = TAILQ_NEXT(rsm, r_tnext); 9957 } 9958 if (rsm && (rsm->r_dupack < 0xff)) { 9959 rsm->r_dupack++; 9960 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9961 struct timeval tv; 9962 uint32_t cts; 9963 /* 9964 * Here we see if we need to retransmit. For 9965 * a SACK type connection if enough time has passed 9966 * we will get a return of the rsm. For a non-sack 9967 * connection we will get the rsm returned if the 9968 * dupack value is 3 or more. 9969 */ 9970 cts = tcp_get_usecs(&tv); 9971 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9972 if (rack->r_ctl.rc_resend != NULL) { 9973 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9974 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9975 rack->rc_tp->snd_una); 9976 } 9977 rack->r_wanted_output = 1; 9978 rack->r_timer_override = 1; 9979 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9980 } 9981 } else { 9982 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9983 } 9984 } 9985 } 9986 9987 static void 9988 rack_check_bottom_drag(struct tcpcb *tp, 9989 struct tcp_rack *rack, 9990 struct socket *so, int32_t acked) 9991 { 9992 uint32_t segsiz, minseg; 9993 9994 segsiz = ctf_fixed_maxseg(tp); 9995 minseg = segsiz; 9996 9997 if (tp->snd_max == tp->snd_una) { 9998 /* 9999 * We are doing dynamic pacing and we are way 10000 * under. Basically everything got acked while 10001 * we were still waiting on the pacer to expire. 10002 * 10003 * This means we need to boost the b/w in 10004 * addition to any earlier boosting of 10005 * the multipler. 10006 */ 10007 rack->rc_dragged_bottom = 1; 10008 rack_validate_multipliers_at_or_above100(rack); 10009 /* 10010 * Lets use the segment bytes acked plus 10011 * the lowest RTT seen as the basis to 10012 * form a b/w estimate. This will be off 10013 * due to the fact that the true estimate 10014 * should be around 1/2 the time of the RTT 10015 * but we can settle for that. 10016 */ 10017 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 10018 acked) { 10019 uint64_t bw, calc_bw, rtt; 10020 10021 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 10022 if (rtt == 0) { 10023 /* no us sample is there a ms one? */ 10024 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 10025 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 10026 } else { 10027 goto no_measurement; 10028 } 10029 } 10030 bw = acked; 10031 calc_bw = bw * 1000000; 10032 calc_bw /= rtt; 10033 if (rack->r_ctl.last_max_bw && 10034 (rack->r_ctl.last_max_bw < calc_bw)) { 10035 /* 10036 * If we have a last calculated max bw 10037 * enforce it. 10038 */ 10039 calc_bw = rack->r_ctl.last_max_bw; 10040 } 10041 /* now plop it in */ 10042 if (rack->rc_gp_filled == 0) { 10043 if (calc_bw > ONE_POINT_TWO_MEG) { 10044 /* 10045 * If we have no measurement 10046 * don't let us set in more than 10047 * 1.2Mbps. If we are still too 10048 * low after pacing with this we 10049 * will hopefully have a max b/w 10050 * available to sanity check things. 10051 */ 10052 calc_bw = ONE_POINT_TWO_MEG; 10053 } 10054 rack->r_ctl.rc_rtt_diff = 0; 10055 rack->r_ctl.gp_bw = calc_bw; 10056 rack->rc_gp_filled = 1; 10057 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10058 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10059 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10060 } else if (calc_bw > rack->r_ctl.gp_bw) { 10061 rack->r_ctl.rc_rtt_diff = 0; 10062 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10063 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10064 rack->r_ctl.gp_bw = calc_bw; 10065 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10066 } else 10067 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10068 if ((rack->gp_ready == 0) && 10069 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 10070 /* We have enough measurements now */ 10071 rack->gp_ready = 1; 10072 rack_set_cc_pacing(rack); 10073 if (rack->defer_options) 10074 rack_apply_deferred_options(rack); 10075 } 10076 /* 10077 * For acks over 1mss we do a extra boost to simulate 10078 * where we would get 2 acks (we want 110 for the mul). 10079 */ 10080 if (acked > segsiz) 10081 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10082 } else { 10083 /* 10084 * zero rtt possibly?, settle for just an old increase. 10085 */ 10086 no_measurement: 10087 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10088 } 10089 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 10090 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 10091 minseg)) && 10092 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 10093 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 10094 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 10095 (segsiz * rack_req_segs))) { 10096 /* 10097 * We are doing dynamic GP pacing and 10098 * we have everything except 1MSS or less 10099 * bytes left out. We are still pacing away. 10100 * And there is data that could be sent, This 10101 * means we are inserting delayed ack time in 10102 * our measurements because we are pacing too slow. 10103 */ 10104 rack_validate_multipliers_at_or_above100(rack); 10105 rack->rc_dragged_bottom = 1; 10106 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10107 } 10108 } 10109 10110 10111 10112 static void 10113 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 10114 { 10115 /* 10116 * The fast output path is enabled and we 10117 * have moved the cumack forward. Lets see if 10118 * we can expand forward the fast path length by 10119 * that amount. What we would ideally like to 10120 * do is increase the number of bytes in the 10121 * fast path block (left_to_send) by the 10122 * acked amount. However we have to gate that 10123 * by two factors: 10124 * 1) The amount outstanding and the rwnd of the peer 10125 * (i.e. we don't want to exceed the rwnd of the peer). 10126 * <and> 10127 * 2) The amount of data left in the socket buffer (i.e. 10128 * we can't send beyond what is in the buffer). 10129 * 10130 * Note that this does not take into account any increase 10131 * in the cwnd. We will only extend the fast path by 10132 * what was acked. 10133 */ 10134 uint32_t new_total, gating_val; 10135 10136 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 10137 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 10138 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 10139 if (new_total <= gating_val) { 10140 /* We can increase left_to_send by the acked amount */ 10141 counter_u64_add(rack_extended_rfo, 1); 10142 rack->r_ctl.fsb.left_to_send = new_total; 10143 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 10144 ("rack:%p left_to_send:%u sbavail:%u out:%u", 10145 rack, rack->r_ctl.fsb.left_to_send, 10146 sbavail(&rack->rc_inp->inp_socket->so_snd), 10147 (tp->snd_max - tp->snd_una))); 10148 10149 } 10150 } 10151 10152 static void 10153 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 10154 { 10155 /* 10156 * Here any sendmap entry that points to the 10157 * beginning mbuf must be adjusted to the correct 10158 * offset. This must be called with: 10159 * 1) The socket buffer locked 10160 * 2) snd_una adjusted to its new postion. 10161 * 10162 * Note that (2) implies rack_ack_received has also 10163 * been called. 10164 * 10165 * We grab the first mbuf in the socket buffer and 10166 * then go through the front of the sendmap, recalculating 10167 * the stored offset for any sendmap entry that has 10168 * that mbuf. We must use the sb functions to do this 10169 * since its possible an add was done has well as 10170 * the subtraction we may have just completed. This should 10171 * not be a penalty though, since we just referenced the sb 10172 * to go in and trim off the mbufs that we freed (of course 10173 * there will be a penalty for the sendmap references though). 10174 */ 10175 struct mbuf *m; 10176 struct rack_sendmap *rsm; 10177 10178 SOCKBUF_LOCK_ASSERT(sb); 10179 m = sb->sb_mb; 10180 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 10181 if ((rsm == NULL) || (m == NULL)) { 10182 /* Nothing outstanding */ 10183 return; 10184 } 10185 while (rsm->m && (rsm->m == m)) { 10186 /* one to adjust */ 10187 #ifdef INVARIANTS 10188 struct mbuf *tm; 10189 uint32_t soff; 10190 10191 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10192 if (rsm->orig_m_len != m->m_len) { 10193 rack_adjust_orig_mlen(rsm); 10194 } 10195 if (rsm->soff != soff) { 10196 /* 10197 * This is not a fatal error, we anticipate it 10198 * might happen (the else code), so we count it here 10199 * so that under invariant we can see that it really 10200 * does happen. 10201 */ 10202 counter_u64_add(rack_adjust_map_bw, 1); 10203 } 10204 rsm->m = tm; 10205 rsm->soff = soff; 10206 if (tm) 10207 rsm->orig_m_len = rsm->m->m_len; 10208 else 10209 rsm->orig_m_len = 0; 10210 #else 10211 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10212 if (rsm->m) 10213 rsm->orig_m_len = rsm->m->m_len; 10214 else 10215 rsm->orig_m_len = 0; 10216 #endif 10217 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10218 rsm); 10219 if (rsm == NULL) 10220 break; 10221 } 10222 } 10223 10224 /* 10225 * Return value of 1, we do not need to call rack_process_data(). 10226 * return value of 0, rack_process_data can be called. 10227 * For ret_val if its 0 the TCP is locked, if its non-zero 10228 * its unlocked and probably unsafe to touch the TCB. 10229 */ 10230 static int 10231 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10232 struct tcpcb *tp, struct tcpopt *to, 10233 uint32_t tiwin, int32_t tlen, 10234 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10235 { 10236 int32_t ourfinisacked = 0; 10237 int32_t nsegs, acked_amount; 10238 int32_t acked; 10239 struct mbuf *mfree; 10240 struct tcp_rack *rack; 10241 int32_t under_pacing = 0; 10242 int32_t recovery = 0; 10243 10244 rack = (struct tcp_rack *)tp->t_fb_ptr; 10245 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10246 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10247 &rack->r_ctl.challenge_ack_ts, 10248 &rack->r_ctl.challenge_ack_cnt); 10249 rack->r_wanted_output = 1; 10250 return (1); 10251 } 10252 if (rack->gp_ready && 10253 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10254 under_pacing = 1; 10255 } 10256 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10257 int in_rec, dup_ack_struck = 0; 10258 10259 in_rec = IN_FASTRECOVERY(tp->t_flags); 10260 if (rack->rc_in_persist) { 10261 tp->t_rxtshift = 0; 10262 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10263 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10264 } 10265 if ((th->th_ack == tp->snd_una) && 10266 (tiwin == tp->snd_wnd) && 10267 ((to->to_flags & TOF_SACK) == 0)) { 10268 rack_strike_dupack(rack); 10269 dup_ack_struck = 1; 10270 } 10271 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10272 } 10273 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10274 /* 10275 * Old ack, behind (or duplicate to) the last one rcv'd 10276 * Note: We mark reordering is occuring if its 10277 * less than and we have not closed our window. 10278 */ 10279 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10280 counter_u64_add(rack_reorder_seen, 1); 10281 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10282 } 10283 return (0); 10284 } 10285 /* 10286 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10287 * something we sent. 10288 */ 10289 if (tp->t_flags & TF_NEEDSYN) { 10290 /* 10291 * T/TCP: Connection was half-synchronized, and our SYN has 10292 * been ACK'd (so connection is now fully synchronized). Go 10293 * to non-starred state, increment snd_una for ACK of SYN, 10294 * and check if we can do window scaling. 10295 */ 10296 tp->t_flags &= ~TF_NEEDSYN; 10297 tp->snd_una++; 10298 /* Do window scaling? */ 10299 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10300 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10301 tp->rcv_scale = tp->request_r_scale; 10302 /* Send window already scaled. */ 10303 } 10304 } 10305 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10306 INP_WLOCK_ASSERT(tp->t_inpcb); 10307 10308 acked = BYTES_THIS_ACK(tp, th); 10309 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10310 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10311 /* 10312 * If we just performed our first retransmit, and the ACK arrives 10313 * within our recovery window, then it was a mistake to do the 10314 * retransmit in the first place. Recover our original cwnd and 10315 * ssthresh, and proceed to transmit where we left off. 10316 */ 10317 if ((tp->t_flags & TF_PREVVALID) && 10318 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10319 tp->t_flags &= ~TF_PREVVALID; 10320 if (tp->t_rxtshift == 1 && 10321 (int)(ticks - tp->t_badrxtwin) < 0) 10322 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10323 } 10324 if (acked) { 10325 /* assure we are not backed off */ 10326 tp->t_rxtshift = 0; 10327 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10328 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10329 rack->rc_tlp_in_progress = 0; 10330 rack->r_ctl.rc_tlp_cnt_out = 0; 10331 /* 10332 * If it is the RXT timer we want to 10333 * stop it, so we can restart a TLP. 10334 */ 10335 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10336 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10337 #ifdef NETFLIX_HTTP_LOGGING 10338 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10339 #endif 10340 } 10341 /* 10342 * If we have a timestamp reply, update smoothed round trip time. If 10343 * no timestamp is present but transmit timer is running and timed 10344 * sequence number was acked, update smoothed round trip time. Since 10345 * we now have an rtt measurement, cancel the timer backoff (cf., 10346 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10347 * timer. 10348 * 10349 * Some boxes send broken timestamp replies during the SYN+ACK 10350 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10351 * and blow up the retransmit timer. 10352 */ 10353 /* 10354 * If all outstanding data is acked, stop retransmit timer and 10355 * remember to restart (more output or persist). If there is more 10356 * data to be acked, restart retransmit timer, using current 10357 * (possibly backed-off) value. 10358 */ 10359 if (acked == 0) { 10360 if (ofia) 10361 *ofia = ourfinisacked; 10362 return (0); 10363 } 10364 if (IN_RECOVERY(tp->t_flags)) { 10365 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10366 (SEQ_LT(th->th_ack, tp->snd_max))) { 10367 tcp_rack_partialack(tp); 10368 } else { 10369 rack_post_recovery(tp, th->th_ack); 10370 recovery = 1; 10371 } 10372 } 10373 /* 10374 * Let the congestion control algorithm update congestion control 10375 * related information. This typically means increasing the 10376 * congestion window. 10377 */ 10378 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10379 SOCKBUF_LOCK(&so->so_snd); 10380 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10381 tp->snd_wnd -= acked_amount; 10382 mfree = sbcut_locked(&so->so_snd, acked_amount); 10383 if ((sbused(&so->so_snd) == 0) && 10384 (acked > acked_amount) && 10385 (tp->t_state >= TCPS_FIN_WAIT_1) && 10386 (tp->t_flags & TF_SENTFIN)) { 10387 /* 10388 * We must be sure our fin 10389 * was sent and acked (we can be 10390 * in FIN_WAIT_1 without having 10391 * sent the fin). 10392 */ 10393 ourfinisacked = 1; 10394 } 10395 tp->snd_una = th->th_ack; 10396 if (acked_amount && sbavail(&so->so_snd)) 10397 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10398 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10399 /* NB: sowwakeup_locked() does an implicit unlock. */ 10400 sowwakeup_locked(so); 10401 m_freem(mfree); 10402 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10403 tp->snd_recover = tp->snd_una; 10404 10405 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10406 tp->snd_nxt = tp->snd_una; 10407 } 10408 if (under_pacing && 10409 (rack->use_fixed_rate == 0) && 10410 (rack->in_probe_rtt == 0) && 10411 rack->rc_gp_dyn_mul && 10412 rack->rc_always_pace) { 10413 /* Check if we are dragging bottom */ 10414 rack_check_bottom_drag(tp, rack, so, acked); 10415 } 10416 if (tp->snd_una == tp->snd_max) { 10417 /* Nothing left outstanding */ 10418 tp->t_flags &= ~TF_PREVVALID; 10419 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10420 rack->r_ctl.retran_during_recovery = 0; 10421 rack->r_ctl.dsack_byte_cnt = 0; 10422 if (rack->r_ctl.rc_went_idle_time == 0) 10423 rack->r_ctl.rc_went_idle_time = 1; 10424 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10425 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10426 tp->t_acktime = 0; 10427 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10428 /* Set need output so persist might get set */ 10429 rack->r_wanted_output = 1; 10430 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10431 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10432 (sbavail(&so->so_snd) == 0) && 10433 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10434 /* 10435 * The socket was gone and the 10436 * peer sent data (now or in the past), time to 10437 * reset him. 10438 */ 10439 *ret_val = 1; 10440 /* tcp_close will kill the inp pre-log the Reset */ 10441 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10442 tp = tcp_close(tp); 10443 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10444 return (1); 10445 } 10446 } 10447 if (ofia) 10448 *ofia = ourfinisacked; 10449 return (0); 10450 } 10451 10452 static void 10453 rack_collapsed_window(struct tcp_rack *rack) 10454 { 10455 /* 10456 * Now we must walk the 10457 * send map and divide the 10458 * ones left stranded. These 10459 * guys can't cause us to abort 10460 * the connection and are really 10461 * "unsent". However if a buggy 10462 * client actually did keep some 10463 * of the data i.e. collapsed the win 10464 * and refused to ack and then opened 10465 * the win and acked that data. We would 10466 * get into an ack war, the simplier 10467 * method then of just pretending we 10468 * did not send those segments something 10469 * won't work. 10470 */ 10471 struct rack_sendmap *rsm, *nrsm, fe, *insret; 10472 tcp_seq max_seq; 10473 10474 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10475 memset(&fe, 0, sizeof(fe)); 10476 fe.r_start = max_seq; 10477 /* Find the first seq past or at maxseq */ 10478 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10479 if (rsm == NULL) { 10480 /* Nothing to do strange */ 10481 rack->rc_has_collapsed = 0; 10482 return; 10483 } 10484 /* 10485 * Now do we need to split at 10486 * the collapse point? 10487 */ 10488 if (SEQ_GT(max_seq, rsm->r_start)) { 10489 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10490 if (nrsm == NULL) { 10491 /* We can't get a rsm, mark all? */ 10492 nrsm = rsm; 10493 goto no_split; 10494 } 10495 /* Clone it */ 10496 rack_clone_rsm(rack, nrsm, rsm, max_seq); 10497 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10498 #ifdef INVARIANTS 10499 if (insret != NULL) { 10500 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10501 nrsm, insret, rack, rsm); 10502 } 10503 #endif 10504 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 10505 if (rsm->r_in_tmap) { 10506 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10507 nrsm->r_in_tmap = 1; 10508 } 10509 /* 10510 * Set in the new RSM as the 10511 * collapsed starting point 10512 */ 10513 rsm = nrsm; 10514 } 10515 no_split: 10516 counter_u64_add(rack_collapsed_win, 1); 10517 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10518 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10519 } 10520 rack->rc_has_collapsed = 1; 10521 } 10522 10523 static void 10524 rack_un_collapse_window(struct tcp_rack *rack) 10525 { 10526 struct rack_sendmap *rsm; 10527 10528 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10529 if (rsm->r_flags & RACK_RWND_COLLAPSED) 10530 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10531 else 10532 break; 10533 } 10534 rack->rc_has_collapsed = 0; 10535 } 10536 10537 static void 10538 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10539 int32_t tlen, int32_t tfo_syn) 10540 { 10541 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10542 if (rack->rc_dack_mode && 10543 (tlen > 500) && 10544 (rack->rc_dack_toggle == 1)) { 10545 goto no_delayed_ack; 10546 } 10547 rack_timer_cancel(tp, rack, 10548 rack->r_ctl.rc_rcvtime, __LINE__); 10549 tp->t_flags |= TF_DELACK; 10550 } else { 10551 no_delayed_ack: 10552 rack->r_wanted_output = 1; 10553 tp->t_flags |= TF_ACKNOW; 10554 if (rack->rc_dack_mode) { 10555 if (tp->t_flags & TF_DELACK) 10556 rack->rc_dack_toggle = 1; 10557 else 10558 rack->rc_dack_toggle = 0; 10559 } 10560 } 10561 } 10562 10563 static void 10564 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10565 { 10566 /* 10567 * If fast output is in progress, lets validate that 10568 * the new window did not shrink on us and make it 10569 * so fast output should end. 10570 */ 10571 if (rack->r_fast_output) { 10572 uint32_t out; 10573 10574 /* 10575 * Calculate what we will send if left as is 10576 * and compare that to our send window. 10577 */ 10578 out = ctf_outstanding(tp); 10579 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10580 /* ok we have an issue */ 10581 if (out >= tp->snd_wnd) { 10582 /* Turn off fast output the window is met or collapsed */ 10583 rack->r_fast_output = 0; 10584 } else { 10585 /* we have some room left */ 10586 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10587 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10588 /* If not at least 1 full segment never mind */ 10589 rack->r_fast_output = 0; 10590 } 10591 } 10592 } 10593 } 10594 } 10595 10596 10597 /* 10598 * Return value of 1, the TCB is unlocked and most 10599 * likely gone, return value of 0, the TCP is still 10600 * locked. 10601 */ 10602 static int 10603 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10604 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10605 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10606 { 10607 /* 10608 * Update window information. Don't look at window if no ACK: TAC's 10609 * send garbage on first SYN. 10610 */ 10611 int32_t nsegs; 10612 int32_t tfo_syn; 10613 struct tcp_rack *rack; 10614 10615 rack = (struct tcp_rack *)tp->t_fb_ptr; 10616 INP_WLOCK_ASSERT(tp->t_inpcb); 10617 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10618 if ((thflags & TH_ACK) && 10619 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10620 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10621 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10622 /* keep track of pure window updates */ 10623 if (tlen == 0 && 10624 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10625 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10626 tp->snd_wnd = tiwin; 10627 rack_validate_fo_sendwin_up(tp, rack); 10628 tp->snd_wl1 = th->th_seq; 10629 tp->snd_wl2 = th->th_ack; 10630 if (tp->snd_wnd > tp->max_sndwnd) 10631 tp->max_sndwnd = tp->snd_wnd; 10632 rack->r_wanted_output = 1; 10633 } else if (thflags & TH_ACK) { 10634 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10635 tp->snd_wnd = tiwin; 10636 rack_validate_fo_sendwin_up(tp, rack); 10637 tp->snd_wl1 = th->th_seq; 10638 tp->snd_wl2 = th->th_ack; 10639 } 10640 } 10641 if (tp->snd_wnd < ctf_outstanding(tp)) 10642 /* The peer collapsed the window */ 10643 rack_collapsed_window(rack); 10644 else if (rack->rc_has_collapsed) 10645 rack_un_collapse_window(rack); 10646 /* Was persist timer active and now we have window space? */ 10647 if ((rack->rc_in_persist != 0) && 10648 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10649 rack->r_ctl.rc_pace_min_segs))) { 10650 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10651 tp->snd_nxt = tp->snd_max; 10652 /* Make sure we output to start the timer */ 10653 rack->r_wanted_output = 1; 10654 } 10655 /* Do we enter persists? */ 10656 if ((rack->rc_in_persist == 0) && 10657 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10658 TCPS_HAVEESTABLISHED(tp->t_state) && 10659 (tp->snd_max == tp->snd_una) && 10660 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10661 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10662 /* 10663 * Here the rwnd is less than 10664 * the pacing size, we are established, 10665 * nothing is outstanding, and there is 10666 * data to send. Enter persists. 10667 */ 10668 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10669 } 10670 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10671 m_freem(m); 10672 return (0); 10673 } 10674 /* 10675 * don't process the URG bit, ignore them drag 10676 * along the up. 10677 */ 10678 tp->rcv_up = tp->rcv_nxt; 10679 INP_WLOCK_ASSERT(tp->t_inpcb); 10680 10681 /* 10682 * Process the segment text, merging it into the TCP sequencing 10683 * queue, and arranging for acknowledgment of receipt if necessary. 10684 * This process logically involves adjusting tp->rcv_wnd as data is 10685 * presented to the user (this happens in tcp_usrreq.c, case 10686 * PRU_RCVD). If a FIN has already been received on this connection 10687 * then we just ignore the text. 10688 */ 10689 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10690 IS_FASTOPEN(tp->t_flags)); 10691 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10692 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10693 tcp_seq save_start = th->th_seq; 10694 tcp_seq save_rnxt = tp->rcv_nxt; 10695 int save_tlen = tlen; 10696 10697 m_adj(m, drop_hdrlen); /* delayed header drop */ 10698 /* 10699 * Insert segment which includes th into TCP reassembly 10700 * queue with control block tp. Set thflags to whether 10701 * reassembly now includes a segment with FIN. This handles 10702 * the common case inline (segment is the next to be 10703 * received on an established connection, and the queue is 10704 * empty), avoiding linkage into and removal from the queue 10705 * and repetition of various conversions. Set DELACK for 10706 * segments received in order, but ack immediately when 10707 * segments are out of order (so fast retransmit can work). 10708 */ 10709 if (th->th_seq == tp->rcv_nxt && 10710 SEGQ_EMPTY(tp) && 10711 (TCPS_HAVEESTABLISHED(tp->t_state) || 10712 tfo_syn)) { 10713 #ifdef NETFLIX_SB_LIMITS 10714 u_int mcnt, appended; 10715 10716 if (so->so_rcv.sb_shlim) { 10717 mcnt = m_memcnt(m); 10718 appended = 0; 10719 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10720 CFO_NOSLEEP, NULL) == false) { 10721 counter_u64_add(tcp_sb_shlim_fails, 1); 10722 m_freem(m); 10723 return (0); 10724 } 10725 } 10726 #endif 10727 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10728 tp->rcv_nxt += tlen; 10729 if (tlen && 10730 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10731 (tp->t_fbyte_in == 0)) { 10732 tp->t_fbyte_in = ticks; 10733 if (tp->t_fbyte_in == 0) 10734 tp->t_fbyte_in = 1; 10735 if (tp->t_fbyte_out && tp->t_fbyte_in) 10736 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10737 } 10738 thflags = th->th_flags & TH_FIN; 10739 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10740 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10741 SOCKBUF_LOCK(&so->so_rcv); 10742 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10743 m_freem(m); 10744 } else 10745 #ifdef NETFLIX_SB_LIMITS 10746 appended = 10747 #endif 10748 sbappendstream_locked(&so->so_rcv, m, 0); 10749 10750 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10751 /* NB: sorwakeup_locked() does an implicit unlock. */ 10752 sorwakeup_locked(so); 10753 #ifdef NETFLIX_SB_LIMITS 10754 if (so->so_rcv.sb_shlim && appended != mcnt) 10755 counter_fo_release(so->so_rcv.sb_shlim, 10756 mcnt - appended); 10757 #endif 10758 } else { 10759 /* 10760 * XXX: Due to the header drop above "th" is 10761 * theoretically invalid by now. Fortunately 10762 * m_adj() doesn't actually frees any mbufs when 10763 * trimming from the head. 10764 */ 10765 tcp_seq temp = save_start; 10766 10767 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10768 tp->t_flags |= TF_ACKNOW; 10769 if (tp->t_flags & TF_WAKESOR) { 10770 tp->t_flags &= ~TF_WAKESOR; 10771 /* NB: sorwakeup_locked() does an implicit unlock. */ 10772 sorwakeup_locked(so); 10773 } 10774 } 10775 if ((tp->t_flags & TF_SACK_PERMIT) && 10776 (save_tlen > 0) && 10777 TCPS_HAVEESTABLISHED(tp->t_state)) { 10778 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10779 /* 10780 * DSACK actually handled in the fastpath 10781 * above. 10782 */ 10783 RACK_OPTS_INC(tcp_sack_path_1); 10784 tcp_update_sack_list(tp, save_start, 10785 save_start + save_tlen); 10786 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10787 if ((tp->rcv_numsacks >= 1) && 10788 (tp->sackblks[0].end == save_start)) { 10789 /* 10790 * Partial overlap, recorded at todrop 10791 * above. 10792 */ 10793 RACK_OPTS_INC(tcp_sack_path_2a); 10794 tcp_update_sack_list(tp, 10795 tp->sackblks[0].start, 10796 tp->sackblks[0].end); 10797 } else { 10798 RACK_OPTS_INC(tcp_sack_path_2b); 10799 tcp_update_dsack_list(tp, save_start, 10800 save_start + save_tlen); 10801 } 10802 } else if (tlen >= save_tlen) { 10803 /* Update of sackblks. */ 10804 RACK_OPTS_INC(tcp_sack_path_3); 10805 tcp_update_dsack_list(tp, save_start, 10806 save_start + save_tlen); 10807 } else if (tlen > 0) { 10808 RACK_OPTS_INC(tcp_sack_path_4); 10809 tcp_update_dsack_list(tp, save_start, 10810 save_start + tlen); 10811 } 10812 } 10813 } else { 10814 m_freem(m); 10815 thflags &= ~TH_FIN; 10816 } 10817 10818 /* 10819 * If FIN is received ACK the FIN and let the user know that the 10820 * connection is closing. 10821 */ 10822 if (thflags & TH_FIN) { 10823 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10824 /* The socket upcall is handled by socantrcvmore. */ 10825 socantrcvmore(so); 10826 /* 10827 * If connection is half-synchronized (ie NEEDSYN 10828 * flag on) then delay ACK, so it may be piggybacked 10829 * when SYN is sent. Otherwise, since we received a 10830 * FIN then no more input can be expected, send ACK 10831 * now. 10832 */ 10833 if (tp->t_flags & TF_NEEDSYN) { 10834 rack_timer_cancel(tp, rack, 10835 rack->r_ctl.rc_rcvtime, __LINE__); 10836 tp->t_flags |= TF_DELACK; 10837 } else { 10838 tp->t_flags |= TF_ACKNOW; 10839 } 10840 tp->rcv_nxt++; 10841 } 10842 switch (tp->t_state) { 10843 /* 10844 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10845 * CLOSE_WAIT state. 10846 */ 10847 case TCPS_SYN_RECEIVED: 10848 tp->t_starttime = ticks; 10849 /* FALLTHROUGH */ 10850 case TCPS_ESTABLISHED: 10851 rack_timer_cancel(tp, rack, 10852 rack->r_ctl.rc_rcvtime, __LINE__); 10853 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10854 break; 10855 10856 /* 10857 * If still in FIN_WAIT_1 STATE FIN has not been 10858 * acked so enter the CLOSING state. 10859 */ 10860 case TCPS_FIN_WAIT_1: 10861 rack_timer_cancel(tp, rack, 10862 rack->r_ctl.rc_rcvtime, __LINE__); 10863 tcp_state_change(tp, TCPS_CLOSING); 10864 break; 10865 10866 /* 10867 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10868 * starting the time-wait timer, turning off the 10869 * other standard timers. 10870 */ 10871 case TCPS_FIN_WAIT_2: 10872 rack_timer_cancel(tp, rack, 10873 rack->r_ctl.rc_rcvtime, __LINE__); 10874 tcp_twstart(tp); 10875 return (1); 10876 } 10877 } 10878 /* 10879 * Return any desired output. 10880 */ 10881 if ((tp->t_flags & TF_ACKNOW) || 10882 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10883 rack->r_wanted_output = 1; 10884 } 10885 INP_WLOCK_ASSERT(tp->t_inpcb); 10886 return (0); 10887 } 10888 10889 /* 10890 * Here nothing is really faster, its just that we 10891 * have broken out the fast-data path also just like 10892 * the fast-ack. 10893 */ 10894 static int 10895 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10896 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10897 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10898 { 10899 int32_t nsegs; 10900 int32_t newsize = 0; /* automatic sockbuf scaling */ 10901 struct tcp_rack *rack; 10902 #ifdef NETFLIX_SB_LIMITS 10903 u_int mcnt, appended; 10904 #endif 10905 #ifdef TCPDEBUG 10906 /* 10907 * The size of tcp_saveipgen must be the size of the max ip header, 10908 * now IPv6. 10909 */ 10910 u_char tcp_saveipgen[IP6_HDR_LEN]; 10911 struct tcphdr tcp_savetcp; 10912 short ostate = 0; 10913 10914 #endif 10915 /* 10916 * If last ACK falls within this segment's sequence numbers, record 10917 * the timestamp. NOTE that the test is modified according to the 10918 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10919 */ 10920 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10921 return (0); 10922 } 10923 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10924 return (0); 10925 } 10926 if (tiwin && tiwin != tp->snd_wnd) { 10927 return (0); 10928 } 10929 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10930 return (0); 10931 } 10932 if (__predict_false((to->to_flags & TOF_TS) && 10933 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10934 return (0); 10935 } 10936 if (__predict_false((th->th_ack != tp->snd_una))) { 10937 return (0); 10938 } 10939 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10940 return (0); 10941 } 10942 if ((to->to_flags & TOF_TS) != 0 && 10943 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10944 tp->ts_recent_age = tcp_ts_getticks(); 10945 tp->ts_recent = to->to_tsval; 10946 } 10947 rack = (struct tcp_rack *)tp->t_fb_ptr; 10948 /* 10949 * This is a pure, in-sequence data packet with nothing on the 10950 * reassembly queue and we have enough buffer space to take it. 10951 */ 10952 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10953 10954 #ifdef NETFLIX_SB_LIMITS 10955 if (so->so_rcv.sb_shlim) { 10956 mcnt = m_memcnt(m); 10957 appended = 0; 10958 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10959 CFO_NOSLEEP, NULL) == false) { 10960 counter_u64_add(tcp_sb_shlim_fails, 1); 10961 m_freem(m); 10962 return (1); 10963 } 10964 } 10965 #endif 10966 /* Clean receiver SACK report if present */ 10967 if (tp->rcv_numsacks) 10968 tcp_clean_sackreport(tp); 10969 KMOD_TCPSTAT_INC(tcps_preddat); 10970 tp->rcv_nxt += tlen; 10971 if (tlen && 10972 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10973 (tp->t_fbyte_in == 0)) { 10974 tp->t_fbyte_in = ticks; 10975 if (tp->t_fbyte_in == 0) 10976 tp->t_fbyte_in = 1; 10977 if (tp->t_fbyte_out && tp->t_fbyte_in) 10978 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10979 } 10980 /* 10981 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10982 */ 10983 tp->snd_wl1 = th->th_seq; 10984 /* 10985 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10986 */ 10987 tp->rcv_up = tp->rcv_nxt; 10988 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10989 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10990 #ifdef TCPDEBUG 10991 if (so->so_options & SO_DEBUG) 10992 tcp_trace(TA_INPUT, ostate, tp, 10993 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10994 #endif 10995 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10996 10997 /* Add data to socket buffer. */ 10998 SOCKBUF_LOCK(&so->so_rcv); 10999 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11000 m_freem(m); 11001 } else { 11002 /* 11003 * Set new socket buffer size. Give up when limit is 11004 * reached. 11005 */ 11006 if (newsize) 11007 if (!sbreserve_locked(&so->so_rcv, 11008 newsize, so, NULL)) 11009 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 11010 m_adj(m, drop_hdrlen); /* delayed header drop */ 11011 #ifdef NETFLIX_SB_LIMITS 11012 appended = 11013 #endif 11014 sbappendstream_locked(&so->so_rcv, m, 0); 11015 ctf_calc_rwin(so, tp); 11016 } 11017 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 11018 /* NB: sorwakeup_locked() does an implicit unlock. */ 11019 sorwakeup_locked(so); 11020 #ifdef NETFLIX_SB_LIMITS 11021 if (so->so_rcv.sb_shlim && mcnt != appended) 11022 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 11023 #endif 11024 rack_handle_delayed_ack(tp, rack, tlen, 0); 11025 if (tp->snd_una == tp->snd_max) 11026 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 11027 return (1); 11028 } 11029 11030 /* 11031 * This subfunction is used to try to highly optimize the 11032 * fast path. We again allow window updates that are 11033 * in sequence to remain in the fast-path. We also add 11034 * in the __predict's to attempt to help the compiler. 11035 * Note that if we return a 0, then we can *not* process 11036 * it and the caller should push the packet into the 11037 * slow-path. 11038 */ 11039 static int 11040 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11041 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11042 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 11043 { 11044 int32_t acked; 11045 int32_t nsegs; 11046 #ifdef TCPDEBUG 11047 /* 11048 * The size of tcp_saveipgen must be the size of the max ip header, 11049 * now IPv6. 11050 */ 11051 u_char tcp_saveipgen[IP6_HDR_LEN]; 11052 struct tcphdr tcp_savetcp; 11053 short ostate = 0; 11054 #endif 11055 int32_t under_pacing = 0; 11056 struct tcp_rack *rack; 11057 11058 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11059 /* Old ack, behind (or duplicate to) the last one rcv'd */ 11060 return (0); 11061 } 11062 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 11063 /* Above what we have sent? */ 11064 return (0); 11065 } 11066 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 11067 /* We are retransmitting */ 11068 return (0); 11069 } 11070 if (__predict_false(tiwin == 0)) { 11071 /* zero window */ 11072 return (0); 11073 } 11074 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 11075 /* We need a SYN or a FIN, unlikely.. */ 11076 return (0); 11077 } 11078 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 11079 /* Timestamp is behind .. old ack with seq wrap? */ 11080 return (0); 11081 } 11082 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 11083 /* Still recovering */ 11084 return (0); 11085 } 11086 rack = (struct tcp_rack *)tp->t_fb_ptr; 11087 if (rack->r_ctl.rc_sacked) { 11088 /* We have sack holes on our scoreboard */ 11089 return (0); 11090 } 11091 /* Ok if we reach here, we can process a fast-ack */ 11092 if (rack->gp_ready && 11093 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11094 under_pacing = 1; 11095 } 11096 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11097 rack_log_ack(tp, to, th, 0, 0); 11098 /* Did the window get updated? */ 11099 if (tiwin != tp->snd_wnd) { 11100 tp->snd_wnd = tiwin; 11101 rack_validate_fo_sendwin_up(tp, rack); 11102 tp->snd_wl1 = th->th_seq; 11103 if (tp->snd_wnd > tp->max_sndwnd) 11104 tp->max_sndwnd = tp->snd_wnd; 11105 } 11106 /* Do we exit persists? */ 11107 if ((rack->rc_in_persist != 0) && 11108 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 11109 rack->r_ctl.rc_pace_min_segs))) { 11110 rack_exit_persist(tp, rack, cts); 11111 } 11112 /* Do we enter persists? */ 11113 if ((rack->rc_in_persist == 0) && 11114 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 11115 TCPS_HAVEESTABLISHED(tp->t_state) && 11116 (tp->snd_max == tp->snd_una) && 11117 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 11118 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 11119 /* 11120 * Here the rwnd is less than 11121 * the pacing size, we are established, 11122 * nothing is outstanding, and there is 11123 * data to send. Enter persists. 11124 */ 11125 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 11126 } 11127 /* 11128 * If last ACK falls within this segment's sequence numbers, record 11129 * the timestamp. NOTE that the test is modified according to the 11130 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 11131 */ 11132 if ((to->to_flags & TOF_TS) != 0 && 11133 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11134 tp->ts_recent_age = tcp_ts_getticks(); 11135 tp->ts_recent = to->to_tsval; 11136 } 11137 /* 11138 * This is a pure ack for outstanding data. 11139 */ 11140 KMOD_TCPSTAT_INC(tcps_predack); 11141 11142 /* 11143 * "bad retransmit" recovery. 11144 */ 11145 if ((tp->t_flags & TF_PREVVALID) && 11146 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11147 tp->t_flags &= ~TF_PREVVALID; 11148 if (tp->t_rxtshift == 1 && 11149 (int)(ticks - tp->t_badrxtwin) < 0) 11150 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 11151 } 11152 /* 11153 * Recalculate the transmit timer / rtt. 11154 * 11155 * Some boxes send broken timestamp replies during the SYN+ACK 11156 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11157 * and blow up the retransmit timer. 11158 */ 11159 acked = BYTES_THIS_ACK(tp, th); 11160 11161 #ifdef TCP_HHOOK 11162 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11163 hhook_run_tcp_est_in(tp, th, to); 11164 #endif 11165 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11166 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11167 if (acked) { 11168 struct mbuf *mfree; 11169 11170 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11171 SOCKBUF_LOCK(&so->so_snd); 11172 mfree = sbcut_locked(&so->so_snd, acked); 11173 tp->snd_una = th->th_ack; 11174 /* Note we want to hold the sb lock through the sendmap adjust */ 11175 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11176 /* Wake up the socket if we have room to write more */ 11177 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11178 sowwakeup_locked(so); 11179 m_freem(mfree); 11180 tp->t_rxtshift = 0; 11181 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11182 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11183 rack->rc_tlp_in_progress = 0; 11184 rack->r_ctl.rc_tlp_cnt_out = 0; 11185 /* 11186 * If it is the RXT timer we want to 11187 * stop it, so we can restart a TLP. 11188 */ 11189 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11190 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11191 #ifdef NETFLIX_HTTP_LOGGING 11192 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11193 #endif 11194 } 11195 /* 11196 * Let the congestion control algorithm update congestion control 11197 * related information. This typically means increasing the 11198 * congestion window. 11199 */ 11200 if (tp->snd_wnd < ctf_outstanding(tp)) { 11201 /* The peer collapsed the window */ 11202 rack_collapsed_window(rack); 11203 } else if (rack->rc_has_collapsed) 11204 rack_un_collapse_window(rack); 11205 11206 /* 11207 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11208 */ 11209 tp->snd_wl2 = th->th_ack; 11210 tp->t_dupacks = 0; 11211 m_freem(m); 11212 /* ND6_HINT(tp); *//* Some progress has been made. */ 11213 11214 /* 11215 * If all outstanding data are acked, stop retransmit timer, 11216 * otherwise restart timer using current (possibly backed-off) 11217 * value. If process is waiting for space, wakeup/selwakeup/signal. 11218 * If data are ready to send, let tcp_output decide between more 11219 * output or persist. 11220 */ 11221 #ifdef TCPDEBUG 11222 if (so->so_options & SO_DEBUG) 11223 tcp_trace(TA_INPUT, ostate, tp, 11224 (void *)tcp_saveipgen, 11225 &tcp_savetcp, 0); 11226 #endif 11227 if (under_pacing && 11228 (rack->use_fixed_rate == 0) && 11229 (rack->in_probe_rtt == 0) && 11230 rack->rc_gp_dyn_mul && 11231 rack->rc_always_pace) { 11232 /* Check if we are dragging bottom */ 11233 rack_check_bottom_drag(tp, rack, so, acked); 11234 } 11235 if (tp->snd_una == tp->snd_max) { 11236 tp->t_flags &= ~TF_PREVVALID; 11237 rack->r_ctl.retran_during_recovery = 0; 11238 rack->r_ctl.dsack_byte_cnt = 0; 11239 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11240 if (rack->r_ctl.rc_went_idle_time == 0) 11241 rack->r_ctl.rc_went_idle_time = 1; 11242 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11243 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11244 tp->t_acktime = 0; 11245 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11246 } 11247 if (acked && rack->r_fast_output) 11248 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11249 if (sbavail(&so->so_snd)) { 11250 rack->r_wanted_output = 1; 11251 } 11252 return (1); 11253 } 11254 11255 /* 11256 * Return value of 1, the TCB is unlocked and most 11257 * likely gone, return value of 0, the TCP is still 11258 * locked. 11259 */ 11260 static int 11261 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11262 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11263 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11264 { 11265 int32_t ret_val = 0; 11266 int32_t todrop; 11267 int32_t ourfinisacked = 0; 11268 struct tcp_rack *rack; 11269 11270 ctf_calc_rwin(so, tp); 11271 /* 11272 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11273 * SYN, drop the input. if seg contains a RST, then drop the 11274 * connection. if seg does not contain SYN, then drop it. Otherwise 11275 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11276 * tp->irs if seg contains ack then advance tp->snd_una if seg 11277 * contains an ECE and ECN support is enabled, the stream is ECN 11278 * capable. if SYN has been acked change to ESTABLISHED else 11279 * SYN_RCVD state arrange for segment to be acked (eventually) 11280 * continue processing rest of data/controls. 11281 */ 11282 if ((thflags & TH_ACK) && 11283 (SEQ_LEQ(th->th_ack, tp->iss) || 11284 SEQ_GT(th->th_ack, tp->snd_max))) { 11285 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11286 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11287 return (1); 11288 } 11289 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11290 TCP_PROBE5(connect__refused, NULL, tp, 11291 mtod(m, const char *), tp, th); 11292 tp = tcp_drop(tp, ECONNREFUSED); 11293 ctf_do_drop(m, tp); 11294 return (1); 11295 } 11296 if (thflags & TH_RST) { 11297 ctf_do_drop(m, tp); 11298 return (1); 11299 } 11300 if (!(thflags & TH_SYN)) { 11301 ctf_do_drop(m, tp); 11302 return (1); 11303 } 11304 tp->irs = th->th_seq; 11305 tcp_rcvseqinit(tp); 11306 rack = (struct tcp_rack *)tp->t_fb_ptr; 11307 if (thflags & TH_ACK) { 11308 int tfo_partial = 0; 11309 11310 KMOD_TCPSTAT_INC(tcps_connects); 11311 soisconnected(so); 11312 #ifdef MAC 11313 mac_socketpeer_set_from_mbuf(m, so); 11314 #endif 11315 /* Do window scaling on this connection? */ 11316 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11317 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11318 tp->rcv_scale = tp->request_r_scale; 11319 } 11320 tp->rcv_adv += min(tp->rcv_wnd, 11321 TCP_MAXWIN << tp->rcv_scale); 11322 /* 11323 * If not all the data that was sent in the TFO SYN 11324 * has been acked, resend the remainder right away. 11325 */ 11326 if (IS_FASTOPEN(tp->t_flags) && 11327 (tp->snd_una != tp->snd_max)) { 11328 tp->snd_nxt = th->th_ack; 11329 tfo_partial = 1; 11330 } 11331 /* 11332 * If there's data, delay ACK; if there's also a FIN ACKNOW 11333 * will be turned on later. 11334 */ 11335 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11336 rack_timer_cancel(tp, rack, 11337 rack->r_ctl.rc_rcvtime, __LINE__); 11338 tp->t_flags |= TF_DELACK; 11339 } else { 11340 rack->r_wanted_output = 1; 11341 tp->t_flags |= TF_ACKNOW; 11342 rack->rc_dack_toggle = 0; 11343 } 11344 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 11345 (V_tcp_do_ecn == 1)) { 11346 tp->t_flags2 |= TF2_ECN_PERMIT; 11347 KMOD_TCPSTAT_INC(tcps_ecn_shs); 11348 } 11349 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11350 /* 11351 * We advance snd_una for the 11352 * fast open case. If th_ack is 11353 * acknowledging data beyond 11354 * snd_una we can't just call 11355 * ack-processing since the 11356 * data stream in our send-map 11357 * will start at snd_una + 1 (one 11358 * beyond the SYN). If its just 11359 * equal we don't need to do that 11360 * and there is no send_map. 11361 */ 11362 tp->snd_una++; 11363 } 11364 /* 11365 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11366 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11367 */ 11368 tp->t_starttime = ticks; 11369 if (tp->t_flags & TF_NEEDFIN) { 11370 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11371 tp->t_flags &= ~TF_NEEDFIN; 11372 thflags &= ~TH_SYN; 11373 } else { 11374 tcp_state_change(tp, TCPS_ESTABLISHED); 11375 TCP_PROBE5(connect__established, NULL, tp, 11376 mtod(m, const char *), tp, th); 11377 rack_cc_conn_init(tp); 11378 } 11379 } else { 11380 /* 11381 * Received initial SYN in SYN-SENT[*] state => simultaneous 11382 * open. If segment contains CC option and there is a 11383 * cached CC, apply TAO test. If it succeeds, connection is * 11384 * half-synchronized. Otherwise, do 3-way handshake: 11385 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11386 * there was no CC option, clear cached CC value. 11387 */ 11388 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 11389 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11390 } 11391 INP_WLOCK_ASSERT(tp->t_inpcb); 11392 /* 11393 * Advance th->th_seq to correspond to first data byte. If data, 11394 * trim to stay within window, dropping FIN if necessary. 11395 */ 11396 th->th_seq++; 11397 if (tlen > tp->rcv_wnd) { 11398 todrop = tlen - tp->rcv_wnd; 11399 m_adj(m, -todrop); 11400 tlen = tp->rcv_wnd; 11401 thflags &= ~TH_FIN; 11402 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11403 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11404 } 11405 tp->snd_wl1 = th->th_seq - 1; 11406 tp->rcv_up = th->th_seq; 11407 /* 11408 * Client side of transaction: already sent SYN and data. If the 11409 * remote host used T/TCP to validate the SYN, our data will be 11410 * ACK'd; if so, enter normal data segment processing in the middle 11411 * of step 5, ack processing. Otherwise, goto step 6. 11412 */ 11413 if (thflags & TH_ACK) { 11414 /* For syn-sent we need to possibly update the rtt */ 11415 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11416 uint32_t t, mcts; 11417 11418 mcts = tcp_ts_getticks(); 11419 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11420 if (!tp->t_rttlow || tp->t_rttlow > t) 11421 tp->t_rttlow = t; 11422 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11423 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11424 tcp_rack_xmit_timer_commit(rack, tp); 11425 } 11426 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11427 return (ret_val); 11428 /* We may have changed to FIN_WAIT_1 above */ 11429 if (tp->t_state == TCPS_FIN_WAIT_1) { 11430 /* 11431 * In FIN_WAIT_1 STATE in addition to the processing 11432 * for the ESTABLISHED state if our FIN is now 11433 * acknowledged then enter FIN_WAIT_2. 11434 */ 11435 if (ourfinisacked) { 11436 /* 11437 * If we can't receive any more data, then 11438 * closing user can proceed. Starting the 11439 * timer is contrary to the specification, 11440 * but if we don't get a FIN we'll hang 11441 * forever. 11442 * 11443 * XXXjl: we should release the tp also, and 11444 * use a compressed state. 11445 */ 11446 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11447 soisdisconnected(so); 11448 tcp_timer_activate(tp, TT_2MSL, 11449 (tcp_fast_finwait2_recycle ? 11450 tcp_finwait2_timeout : 11451 TP_MAXIDLE(tp))); 11452 } 11453 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11454 } 11455 } 11456 } 11457 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11458 tiwin, thflags, nxt_pkt)); 11459 } 11460 11461 /* 11462 * Return value of 1, the TCB is unlocked and most 11463 * likely gone, return value of 0, the TCP is still 11464 * locked. 11465 */ 11466 static int 11467 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11468 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11469 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11470 { 11471 struct tcp_rack *rack; 11472 int32_t ret_val = 0; 11473 int32_t ourfinisacked = 0; 11474 11475 ctf_calc_rwin(so, tp); 11476 if ((thflags & TH_ACK) && 11477 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11478 SEQ_GT(th->th_ack, tp->snd_max))) { 11479 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11480 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11481 return (1); 11482 } 11483 rack = (struct tcp_rack *)tp->t_fb_ptr; 11484 if (IS_FASTOPEN(tp->t_flags)) { 11485 /* 11486 * When a TFO connection is in SYN_RECEIVED, the 11487 * only valid packets are the initial SYN, a 11488 * retransmit/copy of the initial SYN (possibly with 11489 * a subset of the original data), a valid ACK, a 11490 * FIN, or a RST. 11491 */ 11492 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11493 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11494 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11495 return (1); 11496 } else if (thflags & TH_SYN) { 11497 /* non-initial SYN is ignored */ 11498 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11499 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11500 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11501 ctf_do_drop(m, NULL); 11502 return (0); 11503 } 11504 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11505 ctf_do_drop(m, NULL); 11506 return (0); 11507 } 11508 } 11509 if ((thflags & TH_RST) || 11510 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11511 return (ctf_process_rst(m, th, so, tp)); 11512 /* 11513 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11514 * it's less than ts_recent, drop it. 11515 */ 11516 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11517 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11518 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11519 return (ret_val); 11520 } 11521 /* 11522 * In the SYN-RECEIVED state, validate that the packet belongs to 11523 * this connection before trimming the data to fit the receive 11524 * window. Check the sequence number versus IRS since we know the 11525 * sequence numbers haven't wrapped. This is a partial fix for the 11526 * "LAND" DoS attack. 11527 */ 11528 if (SEQ_LT(th->th_seq, tp->irs)) { 11529 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11530 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11531 return (1); 11532 } 11533 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11534 &rack->r_ctl.challenge_ack_ts, 11535 &rack->r_ctl.challenge_ack_cnt)) { 11536 return (ret_val); 11537 } 11538 /* 11539 * If last ACK falls within this segment's sequence numbers, record 11540 * its timestamp. NOTE: 1) That the test incorporates suggestions 11541 * from the latest proposal of the tcplw@cray.com list (Braden 11542 * 1993/04/26). 2) That updating only on newer timestamps interferes 11543 * with our earlier PAWS tests, so this check should be solely 11544 * predicated on the sequence space of this segment. 3) That we 11545 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11546 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11547 * SEG.Len, This modified check allows us to overcome RFC1323's 11548 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11549 * p.869. In such cases, we can still calculate the RTT correctly 11550 * when RCV.NXT == Last.ACK.Sent. 11551 */ 11552 if ((to->to_flags & TOF_TS) != 0 && 11553 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11554 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11555 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11556 tp->ts_recent_age = tcp_ts_getticks(); 11557 tp->ts_recent = to->to_tsval; 11558 } 11559 tp->snd_wnd = tiwin; 11560 rack_validate_fo_sendwin_up(tp, rack); 11561 /* 11562 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11563 * is on (half-synchronized state), then queue data for later 11564 * processing; else drop segment and return. 11565 */ 11566 if ((thflags & TH_ACK) == 0) { 11567 if (IS_FASTOPEN(tp->t_flags)) { 11568 rack_cc_conn_init(tp); 11569 } 11570 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11571 tiwin, thflags, nxt_pkt)); 11572 } 11573 KMOD_TCPSTAT_INC(tcps_connects); 11574 soisconnected(so); 11575 /* Do window scaling? */ 11576 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11577 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11578 tp->rcv_scale = tp->request_r_scale; 11579 } 11580 /* 11581 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11582 * FIN-WAIT-1 11583 */ 11584 tp->t_starttime = ticks; 11585 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11586 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11587 tp->t_tfo_pending = NULL; 11588 } 11589 if (tp->t_flags & TF_NEEDFIN) { 11590 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11591 tp->t_flags &= ~TF_NEEDFIN; 11592 } else { 11593 tcp_state_change(tp, TCPS_ESTABLISHED); 11594 TCP_PROBE5(accept__established, NULL, tp, 11595 mtod(m, const char *), tp, th); 11596 /* 11597 * TFO connections call cc_conn_init() during SYN 11598 * processing. Calling it again here for such connections 11599 * is not harmless as it would undo the snd_cwnd reduction 11600 * that occurs when a TFO SYN|ACK is retransmitted. 11601 */ 11602 if (!IS_FASTOPEN(tp->t_flags)) 11603 rack_cc_conn_init(tp); 11604 } 11605 /* 11606 * Account for the ACK of our SYN prior to 11607 * regular ACK processing below, except for 11608 * simultaneous SYN, which is handled later. 11609 */ 11610 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11611 tp->snd_una++; 11612 /* 11613 * If segment contains data or ACK, will call tcp_reass() later; if 11614 * not, do so now to pass queued data to user. 11615 */ 11616 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11617 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11618 (struct mbuf *)0); 11619 if (tp->t_flags & TF_WAKESOR) { 11620 tp->t_flags &= ~TF_WAKESOR; 11621 /* NB: sorwakeup_locked() does an implicit unlock. */ 11622 sorwakeup_locked(so); 11623 } 11624 } 11625 tp->snd_wl1 = th->th_seq - 1; 11626 /* For syn-recv we need to possibly update the rtt */ 11627 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11628 uint32_t t, mcts; 11629 11630 mcts = tcp_ts_getticks(); 11631 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11632 if (!tp->t_rttlow || tp->t_rttlow > t) 11633 tp->t_rttlow = t; 11634 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11635 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11636 tcp_rack_xmit_timer_commit(rack, tp); 11637 } 11638 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11639 return (ret_val); 11640 } 11641 if (tp->t_state == TCPS_FIN_WAIT_1) { 11642 /* We could have went to FIN_WAIT_1 (or EST) above */ 11643 /* 11644 * In FIN_WAIT_1 STATE in addition to the processing for the 11645 * ESTABLISHED state if our FIN is now acknowledged then 11646 * enter FIN_WAIT_2. 11647 */ 11648 if (ourfinisacked) { 11649 /* 11650 * If we can't receive any more data, then closing 11651 * user can proceed. Starting the timer is contrary 11652 * to the specification, but if we don't get a FIN 11653 * we'll hang forever. 11654 * 11655 * XXXjl: we should release the tp also, and use a 11656 * compressed state. 11657 */ 11658 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11659 soisdisconnected(so); 11660 tcp_timer_activate(tp, TT_2MSL, 11661 (tcp_fast_finwait2_recycle ? 11662 tcp_finwait2_timeout : 11663 TP_MAXIDLE(tp))); 11664 } 11665 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11666 } 11667 } 11668 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11669 tiwin, thflags, nxt_pkt)); 11670 } 11671 11672 /* 11673 * Return value of 1, the TCB is unlocked and most 11674 * likely gone, return value of 0, the TCP is still 11675 * locked. 11676 */ 11677 static int 11678 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11679 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11680 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11681 { 11682 int32_t ret_val = 0; 11683 struct tcp_rack *rack; 11684 11685 /* 11686 * Header prediction: check for the two common cases of a 11687 * uni-directional data xfer. If the packet has no control flags, 11688 * is in-sequence, the window didn't change and we're not 11689 * retransmitting, it's a candidate. If the length is zero and the 11690 * ack moved forward, we're the sender side of the xfer. Just free 11691 * the data acked & wake any higher level process that was blocked 11692 * waiting for space. If the length is non-zero and the ack didn't 11693 * move, we're the receiver side. If we're getting packets in-order 11694 * (the reassembly queue is empty), add the data toc The socket 11695 * buffer and note that we need a delayed ack. Make sure that the 11696 * hidden state-flags are also off. Since we check for 11697 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11698 */ 11699 rack = (struct tcp_rack *)tp->t_fb_ptr; 11700 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11701 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11702 __predict_true(SEGQ_EMPTY(tp)) && 11703 __predict_true(th->th_seq == tp->rcv_nxt)) { 11704 if (tlen == 0) { 11705 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11706 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11707 return (0); 11708 } 11709 } else { 11710 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11711 tiwin, nxt_pkt, iptos)) { 11712 return (0); 11713 } 11714 } 11715 } 11716 ctf_calc_rwin(so, tp); 11717 11718 if ((thflags & TH_RST) || 11719 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11720 return (ctf_process_rst(m, th, so, tp)); 11721 11722 /* 11723 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11724 * synchronized state. 11725 */ 11726 if (thflags & TH_SYN) { 11727 ctf_challenge_ack(m, th, tp, &ret_val); 11728 return (ret_val); 11729 } 11730 /* 11731 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11732 * it's less than ts_recent, drop it. 11733 */ 11734 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11735 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11736 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11737 return (ret_val); 11738 } 11739 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11740 &rack->r_ctl.challenge_ack_ts, 11741 &rack->r_ctl.challenge_ack_cnt)) { 11742 return (ret_val); 11743 } 11744 /* 11745 * If last ACK falls within this segment's sequence numbers, record 11746 * its timestamp. NOTE: 1) That the test incorporates suggestions 11747 * from the latest proposal of the tcplw@cray.com list (Braden 11748 * 1993/04/26). 2) That updating only on newer timestamps interferes 11749 * with our earlier PAWS tests, so this check should be solely 11750 * predicated on the sequence space of this segment. 3) That we 11751 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11752 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11753 * SEG.Len, This modified check allows us to overcome RFC1323's 11754 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11755 * p.869. In such cases, we can still calculate the RTT correctly 11756 * when RCV.NXT == Last.ACK.Sent. 11757 */ 11758 if ((to->to_flags & TOF_TS) != 0 && 11759 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11760 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11761 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11762 tp->ts_recent_age = tcp_ts_getticks(); 11763 tp->ts_recent = to->to_tsval; 11764 } 11765 /* 11766 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11767 * is on (half-synchronized state), then queue data for later 11768 * processing; else drop segment and return. 11769 */ 11770 if ((thflags & TH_ACK) == 0) { 11771 if (tp->t_flags & TF_NEEDSYN) { 11772 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11773 tiwin, thflags, nxt_pkt)); 11774 11775 } else if (tp->t_flags & TF_ACKNOW) { 11776 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11777 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11778 return (ret_val); 11779 } else { 11780 ctf_do_drop(m, NULL); 11781 return (0); 11782 } 11783 } 11784 /* 11785 * Ack processing. 11786 */ 11787 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11788 return (ret_val); 11789 } 11790 if (sbavail(&so->so_snd)) { 11791 if (ctf_progress_timeout_check(tp, true)) { 11792 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11793 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11794 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11795 return (1); 11796 } 11797 } 11798 /* State changes only happen in rack_process_data() */ 11799 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11800 tiwin, thflags, nxt_pkt)); 11801 } 11802 11803 /* 11804 * Return value of 1, the TCB is unlocked and most 11805 * likely gone, return value of 0, the TCP is still 11806 * locked. 11807 */ 11808 static int 11809 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11810 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11811 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11812 { 11813 int32_t ret_val = 0; 11814 struct tcp_rack *rack; 11815 11816 rack = (struct tcp_rack *)tp->t_fb_ptr; 11817 ctf_calc_rwin(so, tp); 11818 if ((thflags & TH_RST) || 11819 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11820 return (ctf_process_rst(m, th, so, tp)); 11821 /* 11822 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11823 * synchronized state. 11824 */ 11825 if (thflags & TH_SYN) { 11826 ctf_challenge_ack(m, th, tp, &ret_val); 11827 return (ret_val); 11828 } 11829 /* 11830 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11831 * it's less than ts_recent, drop it. 11832 */ 11833 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11834 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11835 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11836 return (ret_val); 11837 } 11838 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11839 &rack->r_ctl.challenge_ack_ts, 11840 &rack->r_ctl.challenge_ack_cnt)) { 11841 return (ret_val); 11842 } 11843 /* 11844 * If last ACK falls within this segment's sequence numbers, record 11845 * its timestamp. NOTE: 1) That the test incorporates suggestions 11846 * from the latest proposal of the tcplw@cray.com list (Braden 11847 * 1993/04/26). 2) That updating only on newer timestamps interferes 11848 * with our earlier PAWS tests, so this check should be solely 11849 * predicated on the sequence space of this segment. 3) That we 11850 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11851 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11852 * SEG.Len, This modified check allows us to overcome RFC1323's 11853 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11854 * p.869. In such cases, we can still calculate the RTT correctly 11855 * when RCV.NXT == Last.ACK.Sent. 11856 */ 11857 if ((to->to_flags & TOF_TS) != 0 && 11858 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11859 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11860 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11861 tp->ts_recent_age = tcp_ts_getticks(); 11862 tp->ts_recent = to->to_tsval; 11863 } 11864 /* 11865 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11866 * is on (half-synchronized state), then queue data for later 11867 * processing; else drop segment and return. 11868 */ 11869 if ((thflags & TH_ACK) == 0) { 11870 if (tp->t_flags & TF_NEEDSYN) { 11871 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11872 tiwin, thflags, nxt_pkt)); 11873 11874 } else if (tp->t_flags & TF_ACKNOW) { 11875 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11876 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11877 return (ret_val); 11878 } else { 11879 ctf_do_drop(m, NULL); 11880 return (0); 11881 } 11882 } 11883 /* 11884 * Ack processing. 11885 */ 11886 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11887 return (ret_val); 11888 } 11889 if (sbavail(&so->so_snd)) { 11890 if (ctf_progress_timeout_check(tp, true)) { 11891 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11892 tp, tick, PROGRESS_DROP, __LINE__); 11893 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11894 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11895 return (1); 11896 } 11897 } 11898 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11899 tiwin, thflags, nxt_pkt)); 11900 } 11901 11902 static int 11903 rack_check_data_after_close(struct mbuf *m, 11904 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11905 { 11906 struct tcp_rack *rack; 11907 11908 rack = (struct tcp_rack *)tp->t_fb_ptr; 11909 if (rack->rc_allow_data_af_clo == 0) { 11910 close_now: 11911 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11912 /* tcp_close will kill the inp pre-log the Reset */ 11913 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11914 tp = tcp_close(tp); 11915 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11916 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11917 return (1); 11918 } 11919 if (sbavail(&so->so_snd) == 0) 11920 goto close_now; 11921 /* Ok we allow data that is ignored and a followup reset */ 11922 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11923 tp->rcv_nxt = th->th_seq + *tlen; 11924 tp->t_flags2 |= TF2_DROP_AF_DATA; 11925 rack->r_wanted_output = 1; 11926 *tlen = 0; 11927 return (0); 11928 } 11929 11930 /* 11931 * Return value of 1, the TCB is unlocked and most 11932 * likely gone, return value of 0, the TCP is still 11933 * locked. 11934 */ 11935 static int 11936 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11937 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11938 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11939 { 11940 int32_t ret_val = 0; 11941 int32_t ourfinisacked = 0; 11942 struct tcp_rack *rack; 11943 11944 rack = (struct tcp_rack *)tp->t_fb_ptr; 11945 ctf_calc_rwin(so, tp); 11946 11947 if ((thflags & TH_RST) || 11948 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11949 return (ctf_process_rst(m, th, so, tp)); 11950 /* 11951 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11952 * synchronized state. 11953 */ 11954 if (thflags & TH_SYN) { 11955 ctf_challenge_ack(m, th, tp, &ret_val); 11956 return (ret_val); 11957 } 11958 /* 11959 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11960 * it's less than ts_recent, drop it. 11961 */ 11962 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11963 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11964 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11965 return (ret_val); 11966 } 11967 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11968 &rack->r_ctl.challenge_ack_ts, 11969 &rack->r_ctl.challenge_ack_cnt)) { 11970 return (ret_val); 11971 } 11972 /* 11973 * If new data are received on a connection after the user processes 11974 * are gone, then RST the other end. 11975 */ 11976 if ((so->so_state & SS_NOFDREF) && tlen) { 11977 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11978 return (1); 11979 } 11980 /* 11981 * If last ACK falls within this segment's sequence numbers, record 11982 * its timestamp. NOTE: 1) That the test incorporates suggestions 11983 * from the latest proposal of the tcplw@cray.com list (Braden 11984 * 1993/04/26). 2) That updating only on newer timestamps interferes 11985 * with our earlier PAWS tests, so this check should be solely 11986 * predicated on the sequence space of this segment. 3) That we 11987 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11988 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11989 * SEG.Len, This modified check allows us to overcome RFC1323's 11990 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11991 * p.869. In such cases, we can still calculate the RTT correctly 11992 * when RCV.NXT == Last.ACK.Sent. 11993 */ 11994 if ((to->to_flags & TOF_TS) != 0 && 11995 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11996 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11997 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11998 tp->ts_recent_age = tcp_ts_getticks(); 11999 tp->ts_recent = to->to_tsval; 12000 } 12001 /* 12002 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12003 * is on (half-synchronized state), then queue data for later 12004 * processing; else drop segment and return. 12005 */ 12006 if ((thflags & TH_ACK) == 0) { 12007 if (tp->t_flags & TF_NEEDSYN) { 12008 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12009 tiwin, thflags, nxt_pkt)); 12010 } else if (tp->t_flags & TF_ACKNOW) { 12011 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12012 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12013 return (ret_val); 12014 } else { 12015 ctf_do_drop(m, NULL); 12016 return (0); 12017 } 12018 } 12019 /* 12020 * Ack processing. 12021 */ 12022 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12023 return (ret_val); 12024 } 12025 if (ourfinisacked) { 12026 /* 12027 * If we can't receive any more data, then closing user can 12028 * proceed. Starting the timer is contrary to the 12029 * specification, but if we don't get a FIN we'll hang 12030 * forever. 12031 * 12032 * XXXjl: we should release the tp also, and use a 12033 * compressed state. 12034 */ 12035 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 12036 soisdisconnected(so); 12037 tcp_timer_activate(tp, TT_2MSL, 12038 (tcp_fast_finwait2_recycle ? 12039 tcp_finwait2_timeout : 12040 TP_MAXIDLE(tp))); 12041 } 12042 tcp_state_change(tp, TCPS_FIN_WAIT_2); 12043 } 12044 if (sbavail(&so->so_snd)) { 12045 if (ctf_progress_timeout_check(tp, true)) { 12046 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12047 tp, tick, PROGRESS_DROP, __LINE__); 12048 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12049 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12050 return (1); 12051 } 12052 } 12053 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12054 tiwin, thflags, nxt_pkt)); 12055 } 12056 12057 /* 12058 * Return value of 1, the TCB is unlocked and most 12059 * likely gone, return value of 0, the TCP is still 12060 * locked. 12061 */ 12062 static int 12063 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 12064 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12065 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12066 { 12067 int32_t ret_val = 0; 12068 int32_t ourfinisacked = 0; 12069 struct tcp_rack *rack; 12070 12071 rack = (struct tcp_rack *)tp->t_fb_ptr; 12072 ctf_calc_rwin(so, tp); 12073 12074 if ((thflags & TH_RST) || 12075 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12076 return (ctf_process_rst(m, th, so, tp)); 12077 /* 12078 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12079 * synchronized state. 12080 */ 12081 if (thflags & TH_SYN) { 12082 ctf_challenge_ack(m, th, tp, &ret_val); 12083 return (ret_val); 12084 } 12085 /* 12086 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12087 * it's less than ts_recent, drop it. 12088 */ 12089 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12090 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12091 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12092 return (ret_val); 12093 } 12094 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12095 &rack->r_ctl.challenge_ack_ts, 12096 &rack->r_ctl.challenge_ack_cnt)) { 12097 return (ret_val); 12098 } 12099 /* 12100 * If new data are received on a connection after the user processes 12101 * are gone, then RST the other end. 12102 */ 12103 if ((so->so_state & SS_NOFDREF) && tlen) { 12104 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12105 return (1); 12106 } 12107 /* 12108 * If last ACK falls within this segment's sequence numbers, record 12109 * its timestamp. NOTE: 1) That the test incorporates suggestions 12110 * from the latest proposal of the tcplw@cray.com list (Braden 12111 * 1993/04/26). 2) That updating only on newer timestamps interferes 12112 * with our earlier PAWS tests, so this check should be solely 12113 * predicated on the sequence space of this segment. 3) That we 12114 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12115 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12116 * SEG.Len, This modified check allows us to overcome RFC1323's 12117 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12118 * p.869. In such cases, we can still calculate the RTT correctly 12119 * when RCV.NXT == Last.ACK.Sent. 12120 */ 12121 if ((to->to_flags & TOF_TS) != 0 && 12122 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12123 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12124 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12125 tp->ts_recent_age = tcp_ts_getticks(); 12126 tp->ts_recent = to->to_tsval; 12127 } 12128 /* 12129 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12130 * is on (half-synchronized state), then queue data for later 12131 * processing; else drop segment and return. 12132 */ 12133 if ((thflags & TH_ACK) == 0) { 12134 if (tp->t_flags & TF_NEEDSYN) { 12135 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12136 tiwin, thflags, nxt_pkt)); 12137 } else if (tp->t_flags & TF_ACKNOW) { 12138 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12139 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12140 return (ret_val); 12141 } else { 12142 ctf_do_drop(m, NULL); 12143 return (0); 12144 } 12145 } 12146 /* 12147 * Ack processing. 12148 */ 12149 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12150 return (ret_val); 12151 } 12152 if (ourfinisacked) { 12153 tcp_twstart(tp); 12154 m_freem(m); 12155 return (1); 12156 } 12157 if (sbavail(&so->so_snd)) { 12158 if (ctf_progress_timeout_check(tp, true)) { 12159 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12160 tp, tick, PROGRESS_DROP, __LINE__); 12161 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12162 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12163 return (1); 12164 } 12165 } 12166 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12167 tiwin, thflags, nxt_pkt)); 12168 } 12169 12170 /* 12171 * Return value of 1, the TCB is unlocked and most 12172 * likely gone, return value of 0, the TCP is still 12173 * locked. 12174 */ 12175 static int 12176 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12177 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12178 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12179 { 12180 int32_t ret_val = 0; 12181 int32_t ourfinisacked = 0; 12182 struct tcp_rack *rack; 12183 12184 rack = (struct tcp_rack *)tp->t_fb_ptr; 12185 ctf_calc_rwin(so, tp); 12186 12187 if ((thflags & TH_RST) || 12188 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12189 return (ctf_process_rst(m, th, so, tp)); 12190 /* 12191 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12192 * synchronized state. 12193 */ 12194 if (thflags & TH_SYN) { 12195 ctf_challenge_ack(m, th, tp, &ret_val); 12196 return (ret_val); 12197 } 12198 /* 12199 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12200 * it's less than ts_recent, drop it. 12201 */ 12202 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12203 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12204 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12205 return (ret_val); 12206 } 12207 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12208 &rack->r_ctl.challenge_ack_ts, 12209 &rack->r_ctl.challenge_ack_cnt)) { 12210 return (ret_val); 12211 } 12212 /* 12213 * If new data are received on a connection after the user processes 12214 * are gone, then RST the other end. 12215 */ 12216 if ((so->so_state & SS_NOFDREF) && tlen) { 12217 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12218 return (1); 12219 } 12220 /* 12221 * If last ACK falls within this segment's sequence numbers, record 12222 * its timestamp. NOTE: 1) That the test incorporates suggestions 12223 * from the latest proposal of the tcplw@cray.com list (Braden 12224 * 1993/04/26). 2) That updating only on newer timestamps interferes 12225 * with our earlier PAWS tests, so this check should be solely 12226 * predicated on the sequence space of this segment. 3) That we 12227 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12228 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12229 * SEG.Len, This modified check allows us to overcome RFC1323's 12230 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12231 * p.869. In such cases, we can still calculate the RTT correctly 12232 * when RCV.NXT == Last.ACK.Sent. 12233 */ 12234 if ((to->to_flags & TOF_TS) != 0 && 12235 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12236 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12237 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12238 tp->ts_recent_age = tcp_ts_getticks(); 12239 tp->ts_recent = to->to_tsval; 12240 } 12241 /* 12242 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12243 * is on (half-synchronized state), then queue data for later 12244 * processing; else drop segment and return. 12245 */ 12246 if ((thflags & TH_ACK) == 0) { 12247 if (tp->t_flags & TF_NEEDSYN) { 12248 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12249 tiwin, thflags, nxt_pkt)); 12250 } else if (tp->t_flags & TF_ACKNOW) { 12251 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12252 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12253 return (ret_val); 12254 } else { 12255 ctf_do_drop(m, NULL); 12256 return (0); 12257 } 12258 } 12259 /* 12260 * case TCPS_LAST_ACK: Ack processing. 12261 */ 12262 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12263 return (ret_val); 12264 } 12265 if (ourfinisacked) { 12266 tp = tcp_close(tp); 12267 ctf_do_drop(m, tp); 12268 return (1); 12269 } 12270 if (sbavail(&so->so_snd)) { 12271 if (ctf_progress_timeout_check(tp, true)) { 12272 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12273 tp, tick, PROGRESS_DROP, __LINE__); 12274 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12275 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12276 return (1); 12277 } 12278 } 12279 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12280 tiwin, thflags, nxt_pkt)); 12281 } 12282 12283 /* 12284 * Return value of 1, the TCB is unlocked and most 12285 * likely gone, return value of 0, the TCP is still 12286 * locked. 12287 */ 12288 static int 12289 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12290 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12291 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12292 { 12293 int32_t ret_val = 0; 12294 int32_t ourfinisacked = 0; 12295 struct tcp_rack *rack; 12296 12297 rack = (struct tcp_rack *)tp->t_fb_ptr; 12298 ctf_calc_rwin(so, tp); 12299 12300 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12301 if ((thflags & TH_RST) || 12302 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12303 return (ctf_process_rst(m, th, so, tp)); 12304 /* 12305 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12306 * synchronized state. 12307 */ 12308 if (thflags & TH_SYN) { 12309 ctf_challenge_ack(m, th, tp, &ret_val); 12310 return (ret_val); 12311 } 12312 /* 12313 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12314 * it's less than ts_recent, drop it. 12315 */ 12316 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12317 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12318 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12319 return (ret_val); 12320 } 12321 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12322 &rack->r_ctl.challenge_ack_ts, 12323 &rack->r_ctl.challenge_ack_cnt)) { 12324 return (ret_val); 12325 } 12326 /* 12327 * If new data are received on a connection after the user processes 12328 * are gone, then RST the other end. 12329 */ 12330 if ((so->so_state & SS_NOFDREF) && 12331 tlen) { 12332 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12333 return (1); 12334 } 12335 /* 12336 * If last ACK falls within this segment's sequence numbers, record 12337 * its timestamp. NOTE: 1) That the test incorporates suggestions 12338 * from the latest proposal of the tcplw@cray.com list (Braden 12339 * 1993/04/26). 2) That updating only on newer timestamps interferes 12340 * with our earlier PAWS tests, so this check should be solely 12341 * predicated on the sequence space of this segment. 3) That we 12342 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12343 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12344 * SEG.Len, This modified check allows us to overcome RFC1323's 12345 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12346 * p.869. In such cases, we can still calculate the RTT correctly 12347 * when RCV.NXT == Last.ACK.Sent. 12348 */ 12349 if ((to->to_flags & TOF_TS) != 0 && 12350 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12351 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12352 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12353 tp->ts_recent_age = tcp_ts_getticks(); 12354 tp->ts_recent = to->to_tsval; 12355 } 12356 /* 12357 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12358 * is on (half-synchronized state), then queue data for later 12359 * processing; else drop segment and return. 12360 */ 12361 if ((thflags & TH_ACK) == 0) { 12362 if (tp->t_flags & TF_NEEDSYN) { 12363 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12364 tiwin, thflags, nxt_pkt)); 12365 } else if (tp->t_flags & TF_ACKNOW) { 12366 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12367 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12368 return (ret_val); 12369 } else { 12370 ctf_do_drop(m, NULL); 12371 return (0); 12372 } 12373 } 12374 /* 12375 * Ack processing. 12376 */ 12377 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12378 return (ret_val); 12379 } 12380 if (sbavail(&so->so_snd)) { 12381 if (ctf_progress_timeout_check(tp, true)) { 12382 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12383 tp, tick, PROGRESS_DROP, __LINE__); 12384 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12385 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12386 return (1); 12387 } 12388 } 12389 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12390 tiwin, thflags, nxt_pkt)); 12391 } 12392 12393 static void inline 12394 rack_clear_rate_sample(struct tcp_rack *rack) 12395 { 12396 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12397 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12398 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12399 } 12400 12401 static void 12402 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12403 { 12404 uint64_t bw_est, rate_wanted; 12405 int chged = 0; 12406 uint32_t user_max, orig_min, orig_max; 12407 12408 orig_min = rack->r_ctl.rc_pace_min_segs; 12409 orig_max = rack->r_ctl.rc_pace_max_segs; 12410 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12411 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12412 chged = 1; 12413 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12414 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12415 if (user_max != rack->r_ctl.rc_pace_max_segs) 12416 chged = 1; 12417 } 12418 if (rack->rc_force_max_seg) { 12419 rack->r_ctl.rc_pace_max_segs = user_max; 12420 } else if (rack->use_fixed_rate) { 12421 bw_est = rack_get_bw(rack); 12422 if ((rack->r_ctl.crte == NULL) || 12423 (bw_est != rack->r_ctl.crte->rate)) { 12424 rack->r_ctl.rc_pace_max_segs = user_max; 12425 } else { 12426 /* We are pacing right at the hardware rate */ 12427 uint32_t segsiz; 12428 12429 segsiz = min(ctf_fixed_maxseg(tp), 12430 rack->r_ctl.rc_pace_min_segs); 12431 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12432 tp, bw_est, segsiz, 0, 12433 rack->r_ctl.crte, NULL); 12434 } 12435 } else if (rack->rc_always_pace) { 12436 if (rack->r_ctl.gp_bw || 12437 #ifdef NETFLIX_PEAKRATE 12438 rack->rc_tp->t_maxpeakrate || 12439 #endif 12440 rack->r_ctl.init_rate) { 12441 /* We have a rate of some sort set */ 12442 uint32_t orig; 12443 12444 bw_est = rack_get_bw(rack); 12445 orig = rack->r_ctl.rc_pace_max_segs; 12446 if (fill_override) 12447 rate_wanted = *fill_override; 12448 else 12449 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12450 if (rate_wanted) { 12451 /* We have something */ 12452 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12453 rate_wanted, 12454 ctf_fixed_maxseg(rack->rc_tp)); 12455 } else 12456 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12457 if (orig != rack->r_ctl.rc_pace_max_segs) 12458 chged = 1; 12459 } else if ((rack->r_ctl.gp_bw == 0) && 12460 (rack->r_ctl.rc_pace_max_segs == 0)) { 12461 /* 12462 * If we have nothing limit us to bursting 12463 * out IW sized pieces. 12464 */ 12465 chged = 1; 12466 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12467 } 12468 } 12469 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12470 chged = 1; 12471 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12472 } 12473 if (chged) 12474 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12475 } 12476 12477 12478 static void 12479 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12480 { 12481 #ifdef INET6 12482 struct ip6_hdr *ip6 = NULL; 12483 #endif 12484 #ifdef INET 12485 struct ip *ip = NULL; 12486 #endif 12487 struct udphdr *udp = NULL; 12488 12489 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12490 #ifdef INET6 12491 if (rack->r_is_v6) { 12492 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12493 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12494 if (tp->t_port) { 12495 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12496 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12497 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12498 udp->uh_dport = tp->t_port; 12499 rack->r_ctl.fsb.udp = udp; 12500 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12501 } else 12502 { 12503 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12504 rack->r_ctl.fsb.udp = NULL; 12505 } 12506 tcpip_fillheaders(rack->rc_inp, 12507 tp->t_port, 12508 ip6, rack->r_ctl.fsb.th); 12509 } else 12510 #endif /* INET6 */ 12511 { 12512 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12513 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12514 if (tp->t_port) { 12515 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12516 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12517 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12518 udp->uh_dport = tp->t_port; 12519 rack->r_ctl.fsb.udp = udp; 12520 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12521 } else 12522 { 12523 rack->r_ctl.fsb.udp = NULL; 12524 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12525 } 12526 tcpip_fillheaders(rack->rc_inp, 12527 tp->t_port, 12528 ip, rack->r_ctl.fsb.th); 12529 } 12530 rack->r_fsb_inited = 1; 12531 } 12532 12533 static int 12534 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12535 { 12536 /* 12537 * Allocate the larger of spaces V6 if available else just 12538 * V4 and include udphdr (overbook) 12539 */ 12540 #ifdef INET6 12541 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12542 #else 12543 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12544 #endif 12545 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12546 M_TCPFSB, M_NOWAIT|M_ZERO); 12547 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12548 return (ENOMEM); 12549 } 12550 rack->r_fsb_inited = 0; 12551 return (0); 12552 } 12553 12554 static int 12555 rack_init(struct tcpcb *tp) 12556 { 12557 struct tcp_rack *rack = NULL; 12558 struct rack_sendmap *insret; 12559 uint32_t iwin, snt, us_cts; 12560 int err; 12561 12562 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12563 if (tp->t_fb_ptr == NULL) { 12564 /* 12565 * We need to allocate memory but cant. The INP and INP_INFO 12566 * locks and they are recusive (happens during setup. So a 12567 * scheme to drop the locks fails :( 12568 * 12569 */ 12570 return (ENOMEM); 12571 } 12572 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12573 12574 rack = (struct tcp_rack *)tp->t_fb_ptr; 12575 RB_INIT(&rack->r_ctl.rc_mtree); 12576 TAILQ_INIT(&rack->r_ctl.rc_free); 12577 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12578 rack->rc_tp = tp; 12579 rack->rc_inp = tp->t_inpcb; 12580 /* Set the flag */ 12581 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12582 /* Probably not needed but lets be sure */ 12583 rack_clear_rate_sample(rack); 12584 /* 12585 * Save off the default values, socket options will poke 12586 * at these if pacing is not on or we have not yet 12587 * reached where pacing is on (gp_ready/fixed enabled). 12588 * When they get set into the CC module (when gp_ready 12589 * is enabled or we enable fixed) then we will set these 12590 * values into the CC and place in here the old values 12591 * so we have a restoral. Then we will set the flag 12592 * rc_pacing_cc_set. That way whenever we turn off pacing 12593 * or switch off this stack, we will know to go restore 12594 * the saved values. 12595 */ 12596 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12597 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12598 /* We want abe like behavior as well */ 12599 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12600 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12601 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12602 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12603 rack->r_ctl.roundends = tp->snd_max; 12604 if (use_rack_rr) 12605 rack->use_rack_rr = 1; 12606 if (V_tcp_delack_enabled) 12607 tp->t_delayed_ack = 1; 12608 else 12609 tp->t_delayed_ack = 0; 12610 #ifdef TCP_ACCOUNTING 12611 if (rack_tcp_accounting) { 12612 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12613 } 12614 #endif 12615 if (rack_enable_shared_cwnd) 12616 rack->rack_enable_scwnd = 1; 12617 rack->rc_user_set_max_segs = rack_hptsi_segments; 12618 rack->rc_force_max_seg = 0; 12619 if (rack_use_imac_dack) 12620 rack->rc_dack_mode = 1; 12621 TAILQ_INIT(&rack->r_ctl.opt_list); 12622 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12623 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12624 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12625 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12626 rack->r_ctl.rc_highest_us_rtt = 0; 12627 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12628 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12629 if (rack_use_cmp_acks) 12630 rack->r_use_cmp_ack = 1; 12631 if (rack_disable_prr) 12632 rack->rack_no_prr = 1; 12633 if (rack_gp_no_rec_chg) 12634 rack->rc_gp_no_rec_chg = 1; 12635 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12636 rack->rc_always_pace = 1; 12637 if (rack->use_fixed_rate || rack->gp_ready) 12638 rack_set_cc_pacing(rack); 12639 } else 12640 rack->rc_always_pace = 0; 12641 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12642 rack->r_mbuf_queue = 1; 12643 else 12644 rack->r_mbuf_queue = 0; 12645 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12646 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12647 else 12648 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12649 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12650 if (rack_limits_scwnd) 12651 rack->r_limit_scw = 1; 12652 else 12653 rack->r_limit_scw = 0; 12654 rack->rc_labc = V_tcp_abc_l_var; 12655 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12656 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12657 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12658 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12659 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12660 rack->r_ctl.rc_min_to = rack_min_to; 12661 microuptime(&rack->r_ctl.act_rcv_time); 12662 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12663 rack->r_running_late = 0; 12664 rack->r_running_early = 0; 12665 rack->rc_init_win = rack_default_init_window; 12666 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12667 if (rack_hw_up_only) 12668 rack->r_up_only = 1; 12669 if (rack_do_dyn_mul) { 12670 /* When dynamic adjustment is on CA needs to start at 100% */ 12671 rack->rc_gp_dyn_mul = 1; 12672 if (rack_do_dyn_mul >= 100) 12673 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12674 } else 12675 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12676 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12677 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12678 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12679 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12680 rack_probertt_filter_life); 12681 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12682 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12683 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12684 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12685 rack->r_ctl.rc_time_probertt_starts = 0; 12686 if (rack_dsack_std_based & 0x1) { 12687 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12688 rack->rc_rack_tmr_std_based = 1; 12689 } 12690 if (rack_dsack_std_based & 0x2) { 12691 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12692 rack->rc_rack_use_dsack = 1; 12693 } 12694 /* We require at least one measurement, even if the sysctl is 0 */ 12695 if (rack_req_measurements) 12696 rack->r_ctl.req_measurements = rack_req_measurements; 12697 else 12698 rack->r_ctl.req_measurements = 1; 12699 if (rack_enable_hw_pacing) 12700 rack->rack_hdw_pace_ena = 1; 12701 if (rack_hw_rate_caps) 12702 rack->r_rack_hw_rate_caps = 1; 12703 /* Do we force on detection? */ 12704 #ifdef NETFLIX_EXP_DETECTION 12705 if (tcp_force_detection) 12706 rack->do_detection = 1; 12707 else 12708 #endif 12709 rack->do_detection = 0; 12710 if (rack_non_rxt_use_cr) 12711 rack->rack_rec_nonrxt_use_cr = 1; 12712 err = rack_init_fsb(tp, rack); 12713 if (err) { 12714 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12715 tp->t_fb_ptr = NULL; 12716 return (err); 12717 } 12718 if (tp->snd_una != tp->snd_max) { 12719 /* Create a send map for the current outstanding data */ 12720 struct rack_sendmap *rsm; 12721 12722 rsm = rack_alloc(rack); 12723 if (rsm == NULL) { 12724 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12725 tp->t_fb_ptr = NULL; 12726 return (ENOMEM); 12727 } 12728 rsm->r_no_rtt_allowed = 1; 12729 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12730 rsm->r_rtr_cnt = 1; 12731 rsm->r_rtr_bytes = 0; 12732 if (tp->t_flags & TF_SENTFIN) { 12733 rsm->r_end = tp->snd_max - 1; 12734 rsm->r_flags |= RACK_HAS_FIN; 12735 } else { 12736 rsm->r_end = tp->snd_max; 12737 } 12738 if (tp->snd_una == tp->iss) { 12739 /* The data space is one beyond snd_una */ 12740 rsm->r_flags |= RACK_HAS_SYN; 12741 rsm->r_start = tp->iss; 12742 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12743 } else 12744 rsm->r_start = tp->snd_una; 12745 rsm->r_dupack = 0; 12746 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12747 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12748 if (rsm->m) 12749 rsm->orig_m_len = rsm->m->m_len; 12750 else 12751 rsm->orig_m_len = 0; 12752 } else { 12753 /* 12754 * This can happen if we have a stand-alone FIN or 12755 * SYN. 12756 */ 12757 rsm->m = NULL; 12758 rsm->orig_m_len = 0; 12759 rsm->soff = 0; 12760 } 12761 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12762 #ifdef INVARIANTS 12763 if (insret != NULL) { 12764 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12765 insret, rack, rsm); 12766 } 12767 #endif 12768 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12769 rsm->r_in_tmap = 1; 12770 } 12771 /* 12772 * Timers in Rack are kept in microseconds so lets 12773 * convert any initial incoming variables 12774 * from ticks into usecs. Note that we 12775 * also change the values of t_srtt and t_rttvar, if 12776 * they are non-zero. They are kept with a 5 12777 * bit decimal so we have to carefully convert 12778 * these to get the full precision. 12779 */ 12780 rack_convert_rtts(tp); 12781 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12782 if (rack_do_hystart) { 12783 struct sockopt sopt; 12784 struct cc_newreno_opts opt; 12785 12786 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 12787 sopt.sopt_dir = SOPT_SET; 12788 opt.name = CC_NEWRENO_ENABLE_HYSTART; 12789 opt.val = rack_do_hystart; 12790 if (CC_ALGO(tp)->ctl_output != NULL) 12791 (void)CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 12792 } 12793 if (rack_def_profile) 12794 rack_set_profile(rack, rack_def_profile); 12795 /* Cancel the GP measurement in progress */ 12796 tp->t_flags &= ~TF_GPUTINPROG; 12797 if (SEQ_GT(tp->snd_max, tp->iss)) 12798 snt = tp->snd_max - tp->iss; 12799 else 12800 snt = 0; 12801 iwin = rc_init_window(rack); 12802 if (snt < iwin) { 12803 /* We are not past the initial window 12804 * so we need to make sure cwnd is 12805 * correct. 12806 */ 12807 if (tp->snd_cwnd < iwin) 12808 tp->snd_cwnd = iwin; 12809 /* 12810 * If we are within the initial window 12811 * we want ssthresh to be unlimited. Setting 12812 * it to the rwnd (which the default stack does 12813 * and older racks) is not really a good idea 12814 * since we want to be in SS and grow both the 12815 * cwnd and the rwnd (via dynamic rwnd growth). If 12816 * we set it to the rwnd then as the peer grows its 12817 * rwnd we will be stuck in CA and never hit SS. 12818 * 12819 * Its far better to raise it up high (this takes the 12820 * risk that there as been a loss already, probably 12821 * we should have an indicator in all stacks of loss 12822 * but we don't), but considering the normal use this 12823 * is a risk worth taking. The consequences of not 12824 * hitting SS are far worse than going one more time 12825 * into it early on (before we have sent even a IW). 12826 * It is highly unlikely that we will have had a loss 12827 * before getting the IW out. 12828 */ 12829 tp->snd_ssthresh = 0xffffffff; 12830 } 12831 rack_stop_all_timers(tp); 12832 /* Lets setup the fsb block */ 12833 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12834 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12835 __LINE__, RACK_RTTS_INIT); 12836 return (0); 12837 } 12838 12839 static int 12840 rack_handoff_ok(struct tcpcb *tp) 12841 { 12842 if ((tp->t_state == TCPS_CLOSED) || 12843 (tp->t_state == TCPS_LISTEN)) { 12844 /* Sure no problem though it may not stick */ 12845 return (0); 12846 } 12847 if ((tp->t_state == TCPS_SYN_SENT) || 12848 (tp->t_state == TCPS_SYN_RECEIVED)) { 12849 /* 12850 * We really don't know if you support sack, 12851 * you have to get to ESTAB or beyond to tell. 12852 */ 12853 return (EAGAIN); 12854 } 12855 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12856 /* 12857 * Rack will only send a FIN after all data is acknowledged. 12858 * So in this case we have more data outstanding. We can't 12859 * switch stacks until either all data and only the FIN 12860 * is left (in which case rack_init() now knows how 12861 * to deal with that) <or> all is acknowledged and we 12862 * are only left with incoming data, though why you 12863 * would want to switch to rack after all data is acknowledged 12864 * I have no idea (rrs)! 12865 */ 12866 return (EAGAIN); 12867 } 12868 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12869 return (0); 12870 } 12871 /* 12872 * If we reach here we don't do SACK on this connection so we can 12873 * never do rack. 12874 */ 12875 return (EINVAL); 12876 } 12877 12878 12879 static void 12880 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12881 { 12882 int ack_cmp = 0; 12883 12884 if (tp->t_fb_ptr) { 12885 struct tcp_rack *rack; 12886 struct rack_sendmap *rsm, *nrsm, *rm; 12887 12888 rack = (struct tcp_rack *)tp->t_fb_ptr; 12889 if (tp->t_in_pkt) { 12890 /* 12891 * It is unsafe to process the packets since a 12892 * reset may be lurking in them (its rare but it 12893 * can occur). If we were to find a RST, then we 12894 * would end up dropping the connection and the 12895 * INP lock, so when we return the caller (tcp_usrreq) 12896 * will blow up when it trys to unlock the inp. 12897 */ 12898 struct mbuf *save, *m; 12899 12900 m = tp->t_in_pkt; 12901 tp->t_in_pkt = NULL; 12902 tp->t_tail_pkt = NULL; 12903 while (m) { 12904 save = m->m_nextpkt; 12905 m->m_nextpkt = NULL; 12906 m_freem(m); 12907 m = save; 12908 } 12909 if ((tp->t_inpcb) && 12910 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP)) 12911 ack_cmp = 1; 12912 if (ack_cmp) { 12913 /* Total if we used large or small (if ack-cmp was used). */ 12914 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS) 12915 counter_u64_add(rack_large_ackcmp, 1); 12916 else 12917 counter_u64_add(rack_small_ackcmp, 1); 12918 } 12919 } 12920 tp->t_flags &= ~TF_FORCEDATA; 12921 #ifdef NETFLIX_SHARED_CWND 12922 if (rack->r_ctl.rc_scw) { 12923 uint32_t limit; 12924 12925 if (rack->r_limit_scw) 12926 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12927 else 12928 limit = 0; 12929 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12930 rack->r_ctl.rc_scw_index, 12931 limit); 12932 rack->r_ctl.rc_scw = NULL; 12933 } 12934 #endif 12935 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12936 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12937 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12938 rack->r_ctl.fsb.th = NULL; 12939 } 12940 /* Convert back to ticks, with */ 12941 if (tp->t_srtt > 1) { 12942 uint32_t val, frac; 12943 12944 val = USEC_2_TICKS(tp->t_srtt); 12945 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12946 tp->t_srtt = val << TCP_RTT_SHIFT; 12947 /* 12948 * frac is the fractional part here is left 12949 * over from converting to hz and shifting. 12950 * We need to convert this to the 5 bit 12951 * remainder. 12952 */ 12953 if (frac) { 12954 if (hz == 1000) { 12955 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12956 } else { 12957 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12958 } 12959 tp->t_srtt += frac; 12960 } 12961 } 12962 if (tp->t_rttvar) { 12963 uint32_t val, frac; 12964 12965 val = USEC_2_TICKS(tp->t_rttvar); 12966 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12967 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12968 /* 12969 * frac is the fractional part here is left 12970 * over from converting to hz and shifting. 12971 * We need to convert this to the 5 bit 12972 * remainder. 12973 */ 12974 if (frac) { 12975 if (hz == 1000) { 12976 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12977 } else { 12978 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12979 } 12980 tp->t_rttvar += frac; 12981 } 12982 } 12983 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12984 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12985 if (rack->rc_always_pace) { 12986 tcp_decrement_paced_conn(); 12987 rack_undo_cc_pacing(rack); 12988 rack->rc_always_pace = 0; 12989 } 12990 /* Clean up any options if they were not applied */ 12991 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12992 struct deferred_opt_list *dol; 12993 12994 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12995 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12996 free(dol, M_TCPDO); 12997 } 12998 /* rack does not use force data but other stacks may clear it */ 12999 if (rack->r_ctl.crte != NULL) { 13000 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 13001 rack->rack_hdrw_pacing = 0; 13002 rack->r_ctl.crte = NULL; 13003 } 13004 #ifdef TCP_BLACKBOX 13005 tcp_log_flowend(tp); 13006 #endif 13007 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 13008 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 13009 #ifdef INVARIANTS 13010 if (rm != rsm) { 13011 panic("At fini, rack:%p rsm:%p rm:%p", 13012 rack, rsm, rm); 13013 } 13014 #endif 13015 uma_zfree(rack_zone, rsm); 13016 } 13017 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 13018 while (rsm) { 13019 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 13020 uma_zfree(rack_zone, rsm); 13021 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 13022 } 13023 rack->rc_free_cnt = 0; 13024 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 13025 tp->t_fb_ptr = NULL; 13026 } 13027 if (tp->t_inpcb) { 13028 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 13029 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 13030 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 13031 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 13032 /* Cancel the GP measurement in progress */ 13033 tp->t_flags &= ~TF_GPUTINPROG; 13034 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 13035 } 13036 /* Make sure snd_nxt is correctly set */ 13037 tp->snd_nxt = tp->snd_max; 13038 } 13039 13040 static void 13041 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 13042 { 13043 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 13044 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 13045 } 13046 switch (tp->t_state) { 13047 case TCPS_SYN_SENT: 13048 rack->r_state = TCPS_SYN_SENT; 13049 rack->r_substate = rack_do_syn_sent; 13050 break; 13051 case TCPS_SYN_RECEIVED: 13052 rack->r_state = TCPS_SYN_RECEIVED; 13053 rack->r_substate = rack_do_syn_recv; 13054 break; 13055 case TCPS_ESTABLISHED: 13056 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13057 rack->r_state = TCPS_ESTABLISHED; 13058 rack->r_substate = rack_do_established; 13059 break; 13060 case TCPS_CLOSE_WAIT: 13061 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13062 rack->r_state = TCPS_CLOSE_WAIT; 13063 rack->r_substate = rack_do_close_wait; 13064 break; 13065 case TCPS_FIN_WAIT_1: 13066 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13067 rack->r_state = TCPS_FIN_WAIT_1; 13068 rack->r_substate = rack_do_fin_wait_1; 13069 break; 13070 case TCPS_CLOSING: 13071 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13072 rack->r_state = TCPS_CLOSING; 13073 rack->r_substate = rack_do_closing; 13074 break; 13075 case TCPS_LAST_ACK: 13076 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13077 rack->r_state = TCPS_LAST_ACK; 13078 rack->r_substate = rack_do_lastack; 13079 break; 13080 case TCPS_FIN_WAIT_2: 13081 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13082 rack->r_state = TCPS_FIN_WAIT_2; 13083 rack->r_substate = rack_do_fin_wait_2; 13084 break; 13085 case TCPS_LISTEN: 13086 case TCPS_CLOSED: 13087 case TCPS_TIME_WAIT: 13088 default: 13089 break; 13090 }; 13091 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 13092 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 13093 13094 } 13095 13096 static void 13097 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 13098 { 13099 /* 13100 * We received an ack, and then did not 13101 * call send or were bounced out due to the 13102 * hpts was running. Now a timer is up as well, is 13103 * it the right timer? 13104 */ 13105 struct rack_sendmap *rsm; 13106 int tmr_up; 13107 13108 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 13109 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 13110 return; 13111 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 13112 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 13113 (tmr_up == PACE_TMR_RXT)) { 13114 /* Should be an RXT */ 13115 return; 13116 } 13117 if (rsm == NULL) { 13118 /* Nothing outstanding? */ 13119 if (tp->t_flags & TF_DELACK) { 13120 if (tmr_up == PACE_TMR_DELACK) 13121 /* We are supposed to have delayed ack up and we do */ 13122 return; 13123 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 13124 /* 13125 * if we hit enobufs then we would expect the possiblity 13126 * of nothing outstanding and the RXT up (and the hptsi timer). 13127 */ 13128 return; 13129 } else if (((V_tcp_always_keepalive || 13130 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13131 (tp->t_state <= TCPS_CLOSING)) && 13132 (tmr_up == PACE_TMR_KEEP) && 13133 (tp->snd_max == tp->snd_una)) { 13134 /* We should have keep alive up and we do */ 13135 return; 13136 } 13137 } 13138 if (SEQ_GT(tp->snd_max, tp->snd_una) && 13139 ((tmr_up == PACE_TMR_TLP) || 13140 (tmr_up == PACE_TMR_RACK) || 13141 (tmr_up == PACE_TMR_RXT))) { 13142 /* 13143 * Either a Rack, TLP or RXT is fine if we 13144 * have outstanding data. 13145 */ 13146 return; 13147 } else if (tmr_up == PACE_TMR_DELACK) { 13148 /* 13149 * If the delayed ack was going to go off 13150 * before the rtx/tlp/rack timer were going to 13151 * expire, then that would be the timer in control. 13152 * Note we don't check the time here trusting the 13153 * code is correct. 13154 */ 13155 return; 13156 } 13157 /* 13158 * Ok the timer originally started is not what we want now. 13159 * We will force the hpts to be stopped if any, and restart 13160 * with the slot set to what was in the saved slot. 13161 */ 13162 if (rack->rc_inp->inp_in_hpts) { 13163 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13164 uint32_t us_cts; 13165 13166 us_cts = tcp_get_usecs(NULL); 13167 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13168 rack->r_early = 1; 13169 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13170 } 13171 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13172 } 13173 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 13174 } 13175 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13176 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13177 } 13178 13179 13180 static void 13181 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13182 { 13183 if ((SEQ_LT(tp->snd_wl1, seq) || 13184 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13185 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13186 /* keep track of pure window updates */ 13187 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13188 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13189 tp->snd_wnd = tiwin; 13190 rack_validate_fo_sendwin_up(tp, rack); 13191 tp->snd_wl1 = seq; 13192 tp->snd_wl2 = ack; 13193 if (tp->snd_wnd > tp->max_sndwnd) 13194 tp->max_sndwnd = tp->snd_wnd; 13195 rack->r_wanted_output = 1; 13196 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13197 tp->snd_wnd = tiwin; 13198 rack_validate_fo_sendwin_up(tp, rack); 13199 tp->snd_wl1 = seq; 13200 tp->snd_wl2 = ack; 13201 } else { 13202 /* Not a valid win update */ 13203 return; 13204 } 13205 if (tp->snd_wnd > tp->max_sndwnd) 13206 tp->max_sndwnd = tp->snd_wnd; 13207 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 13208 /* The peer collapsed the window */ 13209 rack_collapsed_window(rack); 13210 } else if (rack->rc_has_collapsed) 13211 rack_un_collapse_window(rack); 13212 /* Do we exit persists? */ 13213 if ((rack->rc_in_persist != 0) && 13214 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13215 rack->r_ctl.rc_pace_min_segs))) { 13216 rack_exit_persist(tp, rack, cts); 13217 } 13218 /* Do we enter persists? */ 13219 if ((rack->rc_in_persist == 0) && 13220 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13221 TCPS_HAVEESTABLISHED(tp->t_state) && 13222 (tp->snd_max == tp->snd_una) && 13223 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13224 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13225 /* 13226 * Here the rwnd is less than 13227 * the pacing size, we are established, 13228 * nothing is outstanding, and there is 13229 * data to send. Enter persists. 13230 */ 13231 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13232 } 13233 } 13234 13235 static void 13236 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13237 { 13238 13239 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13240 union tcp_log_stackspecific log; 13241 struct timeval ltv; 13242 char tcp_hdr_buf[60]; 13243 struct tcphdr *th; 13244 struct timespec ts; 13245 uint32_t orig_snd_una; 13246 uint8_t xx = 0; 13247 13248 #ifdef NETFLIX_HTTP_LOGGING 13249 struct http_sendfile_track *http_req; 13250 13251 if (SEQ_GT(ae->ack, tp->snd_una)) { 13252 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13253 } else { 13254 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13255 } 13256 #endif 13257 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13258 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 13259 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 13260 if (rack->rack_no_prr == 0) 13261 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13262 else 13263 log.u_bbr.flex1 = 0; 13264 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13265 log.u_bbr.use_lt_bw <<= 1; 13266 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13267 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13268 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13269 log.u_bbr.pkts_out = tp->t_maxseg; 13270 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13271 log.u_bbr.flex7 = 1; 13272 log.u_bbr.lost = ae->flags; 13273 log.u_bbr.cwnd_gain = ackval; 13274 log.u_bbr.pacing_gain = 0x2; 13275 if (ae->flags & TSTMP_HDWR) { 13276 /* Record the hardware timestamp if present */ 13277 log.u_bbr.flex3 = M_TSTMP; 13278 ts.tv_sec = ae->timestamp / 1000000000; 13279 ts.tv_nsec = ae->timestamp % 1000000000; 13280 ltv.tv_sec = ts.tv_sec; 13281 ltv.tv_usec = ts.tv_nsec / 1000; 13282 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13283 } else if (ae->flags & TSTMP_LRO) { 13284 /* Record the LRO the arrival timestamp */ 13285 log.u_bbr.flex3 = M_TSTMP_LRO; 13286 ts.tv_sec = ae->timestamp / 1000000000; 13287 ts.tv_nsec = ae->timestamp % 1000000000; 13288 ltv.tv_sec = ts.tv_sec; 13289 ltv.tv_usec = ts.tv_nsec / 1000; 13290 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13291 } 13292 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13293 /* Log the rcv time */ 13294 log.u_bbr.delRate = ae->timestamp; 13295 #ifdef NETFLIX_HTTP_LOGGING 13296 log.u_bbr.applimited = tp->t_http_closed; 13297 log.u_bbr.applimited <<= 8; 13298 log.u_bbr.applimited |= tp->t_http_open; 13299 log.u_bbr.applimited <<= 8; 13300 log.u_bbr.applimited |= tp->t_http_req; 13301 if (http_req) { 13302 /* Copy out any client req info */ 13303 /* seconds */ 13304 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13305 /* useconds */ 13306 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13307 log.u_bbr.rttProp = http_req->timestamp; 13308 log.u_bbr.cur_del_rate = http_req->start; 13309 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13310 log.u_bbr.flex8 |= 1; 13311 } else { 13312 log.u_bbr.flex8 |= 2; 13313 log.u_bbr.bw_inuse = http_req->end; 13314 } 13315 log.u_bbr.flex6 = http_req->start_seq; 13316 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13317 log.u_bbr.flex8 |= 4; 13318 log.u_bbr.epoch = http_req->end_seq; 13319 } 13320 } 13321 #endif 13322 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13323 th = (struct tcphdr *)tcp_hdr_buf; 13324 th->th_seq = ae->seq; 13325 th->th_ack = ae->ack; 13326 th->th_win = ae->win; 13327 /* Now fill in the ports */ 13328 th->th_sport = tp->t_inpcb->inp_fport; 13329 th->th_dport = tp->t_inpcb->inp_lport; 13330 th->th_flags = ae->flags & 0xff; 13331 /* Now do we have a timestamp option? */ 13332 if (ae->flags & HAS_TSTMP) { 13333 u_char *cp; 13334 uint32_t val; 13335 13336 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13337 cp = (u_char *)(th + 1); 13338 *cp = TCPOPT_NOP; 13339 cp++; 13340 *cp = TCPOPT_NOP; 13341 cp++; 13342 *cp = TCPOPT_TIMESTAMP; 13343 cp++; 13344 *cp = TCPOLEN_TIMESTAMP; 13345 cp++; 13346 val = htonl(ae->ts_value); 13347 bcopy((char *)&val, 13348 (char *)cp, sizeof(uint32_t)); 13349 val = htonl(ae->ts_echo); 13350 bcopy((char *)&val, 13351 (char *)(cp + 4), sizeof(uint32_t)); 13352 } else 13353 th->th_off = (sizeof(struct tcphdr) >> 2); 13354 13355 /* 13356 * For sane logging we need to play a little trick. 13357 * If the ack were fully processed we would have moved 13358 * snd_una to high_seq, but since compressed acks are 13359 * processed in two phases, at this point (logging) snd_una 13360 * won't be advanced. So we would see multiple acks showing 13361 * the advancement. We can prevent that by "pretending" that 13362 * snd_una was advanced and then un-advancing it so that the 13363 * logging code has the right value for tlb_snd_una. 13364 */ 13365 if (tp->snd_una != high_seq) { 13366 orig_snd_una = tp->snd_una; 13367 tp->snd_una = high_seq; 13368 xx = 1; 13369 } else 13370 xx = 0; 13371 TCP_LOG_EVENTP(tp, th, 13372 &tp->t_inpcb->inp_socket->so_rcv, 13373 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13374 0, &log, true, <v); 13375 if (xx) { 13376 tp->snd_una = orig_snd_una; 13377 } 13378 } 13379 13380 } 13381 13382 static int 13383 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13384 { 13385 /* 13386 * Handle a "special" compressed ack mbuf. Each incoming 13387 * ack has only four possible dispositions: 13388 * 13389 * A) It moves the cum-ack forward 13390 * B) It is behind the cum-ack. 13391 * C) It is a window-update ack. 13392 * D) It is a dup-ack. 13393 * 13394 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13395 * in the incoming mbuf. We also need to still pay attention 13396 * to nxt_pkt since there may be another packet after this 13397 * one. 13398 */ 13399 #ifdef TCP_ACCOUNTING 13400 uint64_t ts_val; 13401 uint64_t rdstc; 13402 #endif 13403 int segsiz; 13404 struct timespec ts; 13405 struct tcp_rack *rack; 13406 struct tcp_ackent *ae; 13407 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13408 int cnt, i, did_out, ourfinisacked = 0; 13409 struct tcpopt to_holder, *to = NULL; 13410 int win_up_req = 0; 13411 int nsegs = 0; 13412 int under_pacing = 1; 13413 int recovery = 0; 13414 int idx; 13415 #ifdef TCP_ACCOUNTING 13416 sched_pin(); 13417 #endif 13418 rack = (struct tcp_rack *)tp->t_fb_ptr; 13419 if (rack->gp_ready && 13420 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13421 under_pacing = 0; 13422 else 13423 under_pacing = 1; 13424 13425 if (rack->r_state != tp->t_state) 13426 rack_set_state(tp, rack); 13427 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13428 (tp->t_flags & TF_GPUTINPROG)) { 13429 /* 13430 * We have a goodput in progress 13431 * and we have entered a late state. 13432 * Do we have enough data in the sb 13433 * to handle the GPUT request? 13434 */ 13435 uint32_t bytes; 13436 13437 bytes = tp->gput_ack - tp->gput_seq; 13438 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13439 bytes += tp->gput_seq - tp->snd_una; 13440 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13441 /* 13442 * There are not enough bytes in the socket 13443 * buffer that have been sent to cover this 13444 * measurement. Cancel it. 13445 */ 13446 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13447 rack->r_ctl.rc_gp_srtt /*flex1*/, 13448 tp->gput_seq, 13449 0, 0, 18, __LINE__, NULL, 0); 13450 tp->t_flags &= ~TF_GPUTINPROG; 13451 } 13452 } 13453 to = &to_holder; 13454 to->to_flags = 0; 13455 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13456 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13457 cnt = m->m_len / sizeof(struct tcp_ackent); 13458 idx = cnt / 5; 13459 if (idx >= MAX_NUM_OF_CNTS) 13460 idx = MAX_NUM_OF_CNTS - 1; 13461 counter_u64_add(rack_proc_comp_ack[idx], 1); 13462 counter_u64_add(rack_multi_single_eq, cnt); 13463 high_seq = tp->snd_una; 13464 the_win = tp->snd_wnd; 13465 win_seq = tp->snd_wl1; 13466 win_upd_ack = tp->snd_wl2; 13467 cts = tcp_tv_to_usectick(tv); 13468 ms_cts = tcp_tv_to_mssectick(tv); 13469 segsiz = ctf_fixed_maxseg(tp); 13470 if ((rack->rc_gp_dyn_mul) && 13471 (rack->use_fixed_rate == 0) && 13472 (rack->rc_always_pace)) { 13473 /* Check in on probertt */ 13474 rack_check_probe_rtt(rack, cts); 13475 } 13476 for (i = 0; i < cnt; i++) { 13477 #ifdef TCP_ACCOUNTING 13478 ts_val = get_cyclecount(); 13479 #endif 13480 rack_clear_rate_sample(rack); 13481 ae = ((mtod(m, struct tcp_ackent *)) + i); 13482 /* Setup the window */ 13483 tiwin = ae->win << tp->snd_scale; 13484 /* figure out the type of ack */ 13485 if (SEQ_LT(ae->ack, high_seq)) { 13486 /* Case B*/ 13487 ae->ack_val_set = ACK_BEHIND; 13488 } else if (SEQ_GT(ae->ack, high_seq)) { 13489 /* Case A */ 13490 ae->ack_val_set = ACK_CUMACK; 13491 } else if (tiwin == the_win) { 13492 /* Case D */ 13493 ae->ack_val_set = ACK_DUPACK; 13494 } else { 13495 /* Case C */ 13496 ae->ack_val_set = ACK_RWND; 13497 } 13498 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13499 /* Validate timestamp */ 13500 if (ae->flags & HAS_TSTMP) { 13501 /* Setup for a timestamp */ 13502 to->to_flags = TOF_TS; 13503 ae->ts_echo -= tp->ts_offset; 13504 to->to_tsecr = ae->ts_echo; 13505 to->to_tsval = ae->ts_value; 13506 /* 13507 * If echoed timestamp is later than the current time, fall back to 13508 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13509 * were used when this connection was established. 13510 */ 13511 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13512 to->to_tsecr = 0; 13513 if (tp->ts_recent && 13514 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13515 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13516 #ifdef TCP_ACCOUNTING 13517 rdstc = get_cyclecount(); 13518 if (rdstc > ts_val) { 13519 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13520 (rdstc - ts_val)); 13521 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13522 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13523 } 13524 } 13525 #endif 13526 continue; 13527 } 13528 } 13529 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13530 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13531 tp->ts_recent_age = tcp_ts_getticks(); 13532 tp->ts_recent = ae->ts_value; 13533 } 13534 } else { 13535 /* Setup for a no options */ 13536 to->to_flags = 0; 13537 } 13538 /* Update the rcv time and perform idle reduction possibly */ 13539 if (tp->t_idle_reduce && 13540 (tp->snd_max == tp->snd_una) && 13541 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13542 counter_u64_add(rack_input_idle_reduces, 1); 13543 rack_cc_after_idle(rack, tp); 13544 } 13545 tp->t_rcvtime = ticks; 13546 /* Now what about ECN? */ 13547 if (tp->t_flags2 & TF2_ECN_PERMIT) { 13548 if (ae->flags & TH_CWR) { 13549 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 13550 tp->t_flags |= TF_ACKNOW; 13551 } 13552 switch (ae->codepoint & IPTOS_ECN_MASK) { 13553 case IPTOS_ECN_CE: 13554 tp->t_flags2 |= TF2_ECN_SND_ECE; 13555 KMOD_TCPSTAT_INC(tcps_ecn_ce); 13556 break; 13557 case IPTOS_ECN_ECT0: 13558 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 13559 break; 13560 case IPTOS_ECN_ECT1: 13561 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 13562 break; 13563 } 13564 13565 /* Process a packet differently from RFC3168. */ 13566 cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint); 13567 /* Congestion experienced. */ 13568 if (ae->flags & TH_ECE) { 13569 rack_cong_signal(tp, CC_ECN, ae->ack); 13570 } 13571 } 13572 #ifdef TCP_ACCOUNTING 13573 /* Count for the specific type of ack in */ 13574 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13575 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13576 tp->tcp_cnt_counters[ae->ack_val_set]++; 13577 } 13578 #endif 13579 /* 13580 * Note how we could move up these in the determination 13581 * above, but we don't so that way the timestamp checks (and ECN) 13582 * is done first before we do any processing on the ACK. 13583 * The non-compressed path through the code has this 13584 * weakness (noted by @jtl) that it actually does some 13585 * processing before verifying the timestamp information. 13586 * We don't take that path here which is why we set 13587 * the ack_val_set first, do the timestamp and ecn 13588 * processing, and then look at what we have setup. 13589 */ 13590 if (ae->ack_val_set == ACK_BEHIND) { 13591 /* 13592 * Case B flag reordering, if window is not closed 13593 * or it could be a keep-alive or persists 13594 */ 13595 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13596 counter_u64_add(rack_reorder_seen, 1); 13597 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13598 } 13599 } else if (ae->ack_val_set == ACK_DUPACK) { 13600 /* Case D */ 13601 rack_strike_dupack(rack); 13602 } else if (ae->ack_val_set == ACK_RWND) { 13603 /* Case C */ 13604 win_up_req = 1; 13605 win_upd_ack = ae->ack; 13606 win_seq = ae->seq; 13607 the_win = tiwin; 13608 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13609 } else { 13610 /* Case A */ 13611 if (SEQ_GT(ae->ack, tp->snd_max)) { 13612 /* 13613 * We just send an ack since the incoming 13614 * ack is beyond the largest seq we sent. 13615 */ 13616 if ((tp->t_flags & TF_ACKNOW) == 0) { 13617 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13618 if (tp->t_flags && TF_ACKNOW) 13619 rack->r_wanted_output = 1; 13620 } 13621 } else { 13622 nsegs++; 13623 /* If the window changed setup to update */ 13624 if (tiwin != tp->snd_wnd) { 13625 win_upd_ack = ae->ack; 13626 win_seq = ae->seq; 13627 the_win = tiwin; 13628 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13629 } 13630 #ifdef TCP_ACCOUNTING 13631 /* Account for the acks */ 13632 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13633 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13634 } 13635 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13636 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13637 #endif 13638 high_seq = ae->ack; 13639 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends)) { 13640 rack->r_ctl.current_round++; 13641 rack->r_ctl.roundends = tp->snd_max; 13642 if (CC_ALGO(tp)->newround != NULL) { 13643 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13644 } 13645 } 13646 /* Setup our act_rcv_time */ 13647 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13648 ts.tv_sec = ae->timestamp / 1000000000; 13649 ts.tv_nsec = ae->timestamp % 1000000000; 13650 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13651 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13652 } else { 13653 rack->r_ctl.act_rcv_time = *tv; 13654 } 13655 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13656 if (rack->rc_dsack_round_seen) { 13657 /* Is the dsack round over? */ 13658 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13659 /* Yes it is */ 13660 rack->rc_dsack_round_seen = 0; 13661 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13662 } 13663 } 13664 } 13665 } 13666 /* And lets be sure to commit the rtt measurements for this ack */ 13667 tcp_rack_xmit_timer_commit(rack, tp); 13668 #ifdef TCP_ACCOUNTING 13669 rdstc = get_cyclecount(); 13670 if (rdstc > ts_val) { 13671 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13672 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13673 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13674 if (ae->ack_val_set == ACK_CUMACK) 13675 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13676 } 13677 } 13678 #endif 13679 } 13680 #ifdef TCP_ACCOUNTING 13681 ts_val = get_cyclecount(); 13682 #endif 13683 acked_amount = acked = (high_seq - tp->snd_una); 13684 if (acked) { 13685 if (rack->sack_attack_disable == 0) 13686 rack_do_decay(rack); 13687 if (acked >= segsiz) { 13688 /* 13689 * You only get credit for 13690 * MSS and greater (and you get extra 13691 * credit for larger cum-ack moves). 13692 */ 13693 int ac; 13694 13695 ac = acked / segsiz; 13696 rack->r_ctl.ack_count += ac; 13697 counter_u64_add(rack_ack_total, ac); 13698 } 13699 if (rack->r_ctl.ack_count > 0xfff00000) { 13700 /* 13701 * reduce the number to keep us under 13702 * a uint32_t. 13703 */ 13704 rack->r_ctl.ack_count /= 2; 13705 rack->r_ctl.sack_count /= 2; 13706 } 13707 if (tp->t_flags & TF_NEEDSYN) { 13708 /* 13709 * T/TCP: Connection was half-synchronized, and our SYN has 13710 * been ACK'd (so connection is now fully synchronized). Go 13711 * to non-starred state, increment snd_una for ACK of SYN, 13712 * and check if we can do window scaling. 13713 */ 13714 tp->t_flags &= ~TF_NEEDSYN; 13715 tp->snd_una++; 13716 acked_amount = acked = (high_seq - tp->snd_una); 13717 } 13718 if (acked > sbavail(&so->so_snd)) 13719 acked_amount = sbavail(&so->so_snd); 13720 #ifdef NETFLIX_EXP_DETECTION 13721 /* 13722 * We only care on a cum-ack move if we are in a sack-disabled 13723 * state. We have already added in to the ack_count, and we never 13724 * would disable on a cum-ack move, so we only care to do the 13725 * detection if it may "undo" it, i.e. we were in disabled already. 13726 */ 13727 if (rack->sack_attack_disable) 13728 rack_do_detection(tp, rack, acked_amount, segsiz); 13729 #endif 13730 if (IN_FASTRECOVERY(tp->t_flags) && 13731 (rack->rack_no_prr == 0)) 13732 rack_update_prr(tp, rack, acked_amount, high_seq); 13733 if (IN_RECOVERY(tp->t_flags)) { 13734 if (SEQ_LT(high_seq, tp->snd_recover) && 13735 (SEQ_LT(high_seq, tp->snd_max))) { 13736 tcp_rack_partialack(tp); 13737 } else { 13738 rack_post_recovery(tp, high_seq); 13739 recovery = 1; 13740 } 13741 } 13742 /* Handle the rack-log-ack part (sendmap) */ 13743 if ((sbused(&so->so_snd) == 0) && 13744 (acked > acked_amount) && 13745 (tp->t_state >= TCPS_FIN_WAIT_1) && 13746 (tp->t_flags & TF_SENTFIN)) { 13747 /* 13748 * We must be sure our fin 13749 * was sent and acked (we can be 13750 * in FIN_WAIT_1 without having 13751 * sent the fin). 13752 */ 13753 ourfinisacked = 1; 13754 /* 13755 * Lets make sure snd_una is updated 13756 * since most likely acked_amount = 0 (it 13757 * should be). 13758 */ 13759 tp->snd_una = high_seq; 13760 } 13761 /* Did we make a RTO error? */ 13762 if ((tp->t_flags & TF_PREVVALID) && 13763 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13764 tp->t_flags &= ~TF_PREVVALID; 13765 if (tp->t_rxtshift == 1 && 13766 (int)(ticks - tp->t_badrxtwin) < 0) 13767 rack_cong_signal(tp, CC_RTO_ERR, high_seq); 13768 } 13769 /* Handle the data in the socket buffer */ 13770 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13771 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13772 if (acked_amount > 0) { 13773 struct mbuf *mfree; 13774 13775 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13776 SOCKBUF_LOCK(&so->so_snd); 13777 mfree = sbcut_locked(&so->so_snd, acked_amount); 13778 tp->snd_una = high_seq; 13779 /* Note we want to hold the sb lock through the sendmap adjust */ 13780 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13781 /* Wake up the socket if we have room to write more */ 13782 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13783 sowwakeup_locked(so); 13784 m_freem(mfree); 13785 } 13786 /* update progress */ 13787 tp->t_acktime = ticks; 13788 rack_log_progress_event(rack, tp, tp->t_acktime, 13789 PROGRESS_UPDATE, __LINE__); 13790 /* Clear out shifts and such */ 13791 tp->t_rxtshift = 0; 13792 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13793 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13794 rack->rc_tlp_in_progress = 0; 13795 rack->r_ctl.rc_tlp_cnt_out = 0; 13796 /* Send recover and snd_nxt must be dragged along */ 13797 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13798 tp->snd_recover = tp->snd_una; 13799 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13800 tp->snd_nxt = tp->snd_una; 13801 /* 13802 * If the RXT timer is running we want to 13803 * stop it, so we can restart a TLP (or new RXT). 13804 */ 13805 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13806 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13807 #ifdef NETFLIX_HTTP_LOGGING 13808 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13809 #endif 13810 tp->snd_wl2 = high_seq; 13811 tp->t_dupacks = 0; 13812 if (under_pacing && 13813 (rack->use_fixed_rate == 0) && 13814 (rack->in_probe_rtt == 0) && 13815 rack->rc_gp_dyn_mul && 13816 rack->rc_always_pace) { 13817 /* Check if we are dragging bottom */ 13818 rack_check_bottom_drag(tp, rack, so, acked); 13819 } 13820 if (tp->snd_una == tp->snd_max) { 13821 tp->t_flags &= ~TF_PREVVALID; 13822 rack->r_ctl.retran_during_recovery = 0; 13823 rack->r_ctl.dsack_byte_cnt = 0; 13824 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13825 if (rack->r_ctl.rc_went_idle_time == 0) 13826 rack->r_ctl.rc_went_idle_time = 1; 13827 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13828 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13829 tp->t_acktime = 0; 13830 /* Set so we might enter persists... */ 13831 rack->r_wanted_output = 1; 13832 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13833 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13834 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13835 (sbavail(&so->so_snd) == 0) && 13836 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13837 /* 13838 * The socket was gone and the 13839 * peer sent data (not now in the past), time to 13840 * reset him. 13841 */ 13842 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13843 /* tcp_close will kill the inp pre-log the Reset */ 13844 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13845 #ifdef TCP_ACCOUNTING 13846 rdstc = get_cyclecount(); 13847 if (rdstc > ts_val) { 13848 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13849 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13850 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13851 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13852 } 13853 } 13854 #endif 13855 m_freem(m); 13856 tp = tcp_close(tp); 13857 if (tp == NULL) { 13858 #ifdef TCP_ACCOUNTING 13859 sched_unpin(); 13860 #endif 13861 return (1); 13862 } 13863 /* 13864 * We would normally do drop-with-reset which would 13865 * send back a reset. We can't since we don't have 13866 * all the needed bits. Instead lets arrange for 13867 * a call to tcp_output(). That way since we 13868 * are in the closed state we will generate a reset. 13869 * 13870 * Note if tcp_accounting is on we don't unpin since 13871 * we do that after the goto label. 13872 */ 13873 goto send_out_a_rst; 13874 } 13875 if ((sbused(&so->so_snd) == 0) && 13876 (tp->t_state >= TCPS_FIN_WAIT_1) && 13877 (tp->t_flags & TF_SENTFIN)) { 13878 /* 13879 * If we can't receive any more data, then closing user can 13880 * proceed. Starting the timer is contrary to the 13881 * specification, but if we don't get a FIN we'll hang 13882 * forever. 13883 * 13884 */ 13885 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13886 soisdisconnected(so); 13887 tcp_timer_activate(tp, TT_2MSL, 13888 (tcp_fast_finwait2_recycle ? 13889 tcp_finwait2_timeout : 13890 TP_MAXIDLE(tp))); 13891 } 13892 if (ourfinisacked == 0) { 13893 /* 13894 * We don't change to fin-wait-2 if we have our fin acked 13895 * which means we are probably in TCPS_CLOSING. 13896 */ 13897 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13898 } 13899 } 13900 } 13901 /* Wake up the socket if we have room to write more */ 13902 if (sbavail(&so->so_snd)) { 13903 rack->r_wanted_output = 1; 13904 if (ctf_progress_timeout_check(tp, true)) { 13905 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13906 tp, tick, PROGRESS_DROP, __LINE__); 13907 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 13908 /* 13909 * We cheat here and don't send a RST, we should send one 13910 * when the pacer drops the connection. 13911 */ 13912 #ifdef TCP_ACCOUNTING 13913 rdstc = get_cyclecount(); 13914 if (rdstc > ts_val) { 13915 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13916 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13917 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13918 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13919 } 13920 } 13921 sched_unpin(); 13922 #endif 13923 INP_WUNLOCK(rack->rc_inp); 13924 m_freem(m); 13925 return (1); 13926 } 13927 } 13928 if (ourfinisacked) { 13929 switch(tp->t_state) { 13930 case TCPS_CLOSING: 13931 #ifdef TCP_ACCOUNTING 13932 rdstc = get_cyclecount(); 13933 if (rdstc > ts_val) { 13934 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13935 (rdstc - ts_val)); 13936 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13937 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13938 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13939 } 13940 } 13941 sched_unpin(); 13942 #endif 13943 tcp_twstart(tp); 13944 m_freem(m); 13945 return (1); 13946 break; 13947 case TCPS_LAST_ACK: 13948 #ifdef TCP_ACCOUNTING 13949 rdstc = get_cyclecount(); 13950 if (rdstc > ts_val) { 13951 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13952 (rdstc - ts_val)); 13953 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13954 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13955 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13956 } 13957 } 13958 sched_unpin(); 13959 #endif 13960 tp = tcp_close(tp); 13961 ctf_do_drop(m, tp); 13962 return (1); 13963 break; 13964 case TCPS_FIN_WAIT_1: 13965 #ifdef TCP_ACCOUNTING 13966 rdstc = get_cyclecount(); 13967 if (rdstc > ts_val) { 13968 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13969 (rdstc - ts_val)); 13970 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13971 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13972 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13973 } 13974 } 13975 #endif 13976 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13977 soisdisconnected(so); 13978 tcp_timer_activate(tp, TT_2MSL, 13979 (tcp_fast_finwait2_recycle ? 13980 tcp_finwait2_timeout : 13981 TP_MAXIDLE(tp))); 13982 } 13983 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13984 break; 13985 default: 13986 break; 13987 } 13988 } 13989 if (rack->r_fast_output) { 13990 /* 13991 * We re doing fast output.. can we expand that? 13992 */ 13993 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13994 } 13995 #ifdef TCP_ACCOUNTING 13996 rdstc = get_cyclecount(); 13997 if (rdstc > ts_val) { 13998 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13999 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14000 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14001 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14002 } 14003 } 14004 14005 } else if (win_up_req) { 14006 rdstc = get_cyclecount(); 14007 if (rdstc > ts_val) { 14008 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 14009 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14010 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 14011 } 14012 } 14013 #endif 14014 } 14015 /* Now is there a next packet, if so we are done */ 14016 m_freem(m); 14017 did_out = 0; 14018 if (nxt_pkt) { 14019 #ifdef TCP_ACCOUNTING 14020 sched_unpin(); 14021 #endif 14022 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 14023 return (0); 14024 } 14025 rack_handle_might_revert(tp, rack); 14026 ctf_calc_rwin(so, tp); 14027 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14028 send_out_a_rst: 14029 (void)tp->t_fb->tfb_tcp_output(tp); 14030 did_out = 1; 14031 } 14032 rack_free_trim(rack); 14033 #ifdef TCP_ACCOUNTING 14034 sched_unpin(); 14035 #endif 14036 rack_timer_audit(tp, rack, &so->so_snd); 14037 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 14038 return (0); 14039 } 14040 14041 14042 static int 14043 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 14044 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 14045 int32_t nxt_pkt, struct timeval *tv) 14046 { 14047 #ifdef TCP_ACCOUNTING 14048 uint64_t ts_val; 14049 #endif 14050 int32_t thflags, retval, did_out = 0; 14051 int32_t way_out = 0; 14052 /* 14053 * cts - is the current time from tv (caller gets ts) in microseconds. 14054 * ms_cts - is the current time from tv in milliseconds. 14055 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 14056 */ 14057 uint32_t cts, us_cts, ms_cts; 14058 uint32_t tiwin; 14059 struct timespec ts; 14060 struct tcpopt to; 14061 struct tcp_rack *rack; 14062 struct rack_sendmap *rsm; 14063 int32_t prev_state = 0; 14064 #ifdef TCP_ACCOUNTING 14065 int ack_val_set = 0xf; 14066 #endif 14067 int nsegs; 14068 /* 14069 * tv passed from common code is from either M_TSTMP_LRO or 14070 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 14071 */ 14072 rack = (struct tcp_rack *)tp->t_fb_ptr; 14073 if (m->m_flags & M_ACKCMP) { 14074 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 14075 } 14076 if (m->m_flags & M_ACKCMP) { 14077 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 14078 } 14079 cts = tcp_tv_to_usectick(tv); 14080 ms_cts = tcp_tv_to_mssectick(tv); 14081 nsegs = m->m_pkthdr.lro_nsegs; 14082 counter_u64_add(rack_proc_non_comp_ack, 1); 14083 thflags = th->th_flags; 14084 #ifdef TCP_ACCOUNTING 14085 sched_pin(); 14086 if (thflags & TH_ACK) 14087 ts_val = get_cyclecount(); 14088 #endif 14089 if ((m->m_flags & M_TSTMP) || 14090 (m->m_flags & M_TSTMP_LRO)) { 14091 mbuf_tstmp2timespec(m, &ts); 14092 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14093 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14094 } else 14095 rack->r_ctl.act_rcv_time = *tv; 14096 kern_prefetch(rack, &prev_state); 14097 prev_state = 0; 14098 /* 14099 * Unscale the window into a 32-bit value. For the SYN_SENT state 14100 * the scale is zero. 14101 */ 14102 tiwin = th->th_win << tp->snd_scale; 14103 #ifdef TCP_ACCOUNTING 14104 if (thflags & TH_ACK) { 14105 /* 14106 * We have a tradeoff here. We can either do what we are 14107 * doing i.e. pinning to this CPU and then doing the accounting 14108 * <or> we could do a critical enter, setup the rdtsc and cpu 14109 * as in below, and then validate we are on the same CPU on 14110 * exit. I have choosen to not do the critical enter since 14111 * that often will gain you a context switch, and instead lock 14112 * us (line above this if) to the same CPU with sched_pin(). This 14113 * means we may be context switched out for a higher priority 14114 * interupt but we won't be moved to another CPU. 14115 * 14116 * If this occurs (which it won't very often since we most likely 14117 * are running this code in interupt context and only a higher 14118 * priority will bump us ... clock?) we will falsely add in 14119 * to the time the interupt processing time plus the ack processing 14120 * time. This is ok since its a rare event. 14121 */ 14122 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14123 ctf_fixed_maxseg(tp)); 14124 } 14125 #endif 14126 /* 14127 * Parse options on any incoming segment. 14128 */ 14129 memset(&to, 0, sizeof(to)); 14130 tcp_dooptions(&to, (u_char *)(th + 1), 14131 (th->th_off << 2) - sizeof(struct tcphdr), 14132 (thflags & TH_SYN) ? TO_SYN : 0); 14133 NET_EPOCH_ASSERT(); 14134 INP_WLOCK_ASSERT(tp->t_inpcb); 14135 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14136 __func__)); 14137 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14138 __func__)); 14139 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14140 (tp->t_flags & TF_GPUTINPROG)) { 14141 /* 14142 * We have a goodput in progress 14143 * and we have entered a late state. 14144 * Do we have enough data in the sb 14145 * to handle the GPUT request? 14146 */ 14147 uint32_t bytes; 14148 14149 bytes = tp->gput_ack - tp->gput_seq; 14150 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14151 bytes += tp->gput_seq - tp->snd_una; 14152 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14153 /* 14154 * There are not enough bytes in the socket 14155 * buffer that have been sent to cover this 14156 * measurement. Cancel it. 14157 */ 14158 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14159 rack->r_ctl.rc_gp_srtt /*flex1*/, 14160 tp->gput_seq, 14161 0, 0, 18, __LINE__, NULL, 0); 14162 tp->t_flags &= ~TF_GPUTINPROG; 14163 } 14164 } 14165 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14166 union tcp_log_stackspecific log; 14167 struct timeval ltv; 14168 #ifdef NETFLIX_HTTP_LOGGING 14169 struct http_sendfile_track *http_req; 14170 14171 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14172 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14173 } else { 14174 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14175 } 14176 #endif 14177 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14178 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 14179 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 14180 if (rack->rack_no_prr == 0) 14181 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14182 else 14183 log.u_bbr.flex1 = 0; 14184 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14185 log.u_bbr.use_lt_bw <<= 1; 14186 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14187 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14188 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14189 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14190 log.u_bbr.flex3 = m->m_flags; 14191 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14192 log.u_bbr.lost = thflags; 14193 log.u_bbr.pacing_gain = 0x1; 14194 #ifdef TCP_ACCOUNTING 14195 log.u_bbr.cwnd_gain = ack_val_set; 14196 #endif 14197 log.u_bbr.flex7 = 2; 14198 if (m->m_flags & M_TSTMP) { 14199 /* Record the hardware timestamp if present */ 14200 mbuf_tstmp2timespec(m, &ts); 14201 ltv.tv_sec = ts.tv_sec; 14202 ltv.tv_usec = ts.tv_nsec / 1000; 14203 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14204 } else if (m->m_flags & M_TSTMP_LRO) { 14205 /* Record the LRO the arrival timestamp */ 14206 mbuf_tstmp2timespec(m, &ts); 14207 ltv.tv_sec = ts.tv_sec; 14208 ltv.tv_usec = ts.tv_nsec / 1000; 14209 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14210 } 14211 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14212 /* Log the rcv time */ 14213 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14214 #ifdef NETFLIX_HTTP_LOGGING 14215 log.u_bbr.applimited = tp->t_http_closed; 14216 log.u_bbr.applimited <<= 8; 14217 log.u_bbr.applimited |= tp->t_http_open; 14218 log.u_bbr.applimited <<= 8; 14219 log.u_bbr.applimited |= tp->t_http_req; 14220 if (http_req) { 14221 /* Copy out any client req info */ 14222 /* seconds */ 14223 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14224 /* useconds */ 14225 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14226 log.u_bbr.rttProp = http_req->timestamp; 14227 log.u_bbr.cur_del_rate = http_req->start; 14228 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14229 log.u_bbr.flex8 |= 1; 14230 } else { 14231 log.u_bbr.flex8 |= 2; 14232 log.u_bbr.bw_inuse = http_req->end; 14233 } 14234 log.u_bbr.flex6 = http_req->start_seq; 14235 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14236 log.u_bbr.flex8 |= 4; 14237 log.u_bbr.epoch = http_req->end_seq; 14238 } 14239 } 14240 #endif 14241 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14242 tlen, &log, true, <v); 14243 } 14244 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14245 way_out = 4; 14246 retval = 0; 14247 m_freem(m); 14248 goto done_with_input; 14249 } 14250 /* 14251 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14252 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14253 */ 14254 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14255 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14256 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14257 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14258 #ifdef TCP_ACCOUNTING 14259 sched_unpin(); 14260 #endif 14261 return (1); 14262 } 14263 /* 14264 * If timestamps were negotiated during SYN/ACK and a 14265 * segment without a timestamp is received, silently drop 14266 * the segment, unless it is a RST segment or missing timestamps are 14267 * tolerated. 14268 * See section 3.2 of RFC 7323. 14269 */ 14270 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14271 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14272 way_out = 5; 14273 retval = 0; 14274 m_freem(m); 14275 goto done_with_input; 14276 } 14277 14278 /* 14279 * Segment received on connection. Reset idle time and keep-alive 14280 * timer. XXX: This should be done after segment validation to 14281 * ignore broken/spoofed segs. 14282 */ 14283 if (tp->t_idle_reduce && 14284 (tp->snd_max == tp->snd_una) && 14285 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14286 counter_u64_add(rack_input_idle_reduces, 1); 14287 rack_cc_after_idle(rack, tp); 14288 } 14289 tp->t_rcvtime = ticks; 14290 #ifdef STATS 14291 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14292 #endif 14293 if (tiwin > rack->r_ctl.rc_high_rwnd) 14294 rack->r_ctl.rc_high_rwnd = tiwin; 14295 /* 14296 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14297 * this to occur after we've validated the segment. 14298 */ 14299 if (tp->t_flags2 & TF2_ECN_PERMIT) { 14300 if (thflags & TH_CWR) { 14301 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 14302 tp->t_flags |= TF_ACKNOW; 14303 } 14304 switch (iptos & IPTOS_ECN_MASK) { 14305 case IPTOS_ECN_CE: 14306 tp->t_flags2 |= TF2_ECN_SND_ECE; 14307 KMOD_TCPSTAT_INC(tcps_ecn_ce); 14308 break; 14309 case IPTOS_ECN_ECT0: 14310 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 14311 break; 14312 case IPTOS_ECN_ECT1: 14313 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 14314 break; 14315 } 14316 14317 /* Process a packet differently from RFC3168. */ 14318 cc_ecnpkt_handler(tp, th, iptos); 14319 14320 /* Congestion experienced. */ 14321 if (thflags & TH_ECE) { 14322 rack_cong_signal(tp, CC_ECN, th->th_ack); 14323 } 14324 } 14325 14326 /* 14327 * If echoed timestamp is later than the current time, fall back to 14328 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14329 * were used when this connection was established. 14330 */ 14331 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14332 to.to_tsecr -= tp->ts_offset; 14333 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14334 to.to_tsecr = 0; 14335 } 14336 14337 /* 14338 * If its the first time in we need to take care of options and 14339 * verify we can do SACK for rack! 14340 */ 14341 if (rack->r_state == 0) { 14342 /* Should be init'd by rack_init() */ 14343 KASSERT(rack->rc_inp != NULL, 14344 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14345 if (rack->rc_inp == NULL) { 14346 rack->rc_inp = tp->t_inpcb; 14347 } 14348 14349 /* 14350 * Process options only when we get SYN/ACK back. The SYN 14351 * case for incoming connections is handled in tcp_syncache. 14352 * According to RFC1323 the window field in a SYN (i.e., a 14353 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14354 * this is traditional behavior, may need to be cleaned up. 14355 */ 14356 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14357 /* Handle parallel SYN for ECN */ 14358 if (!(thflags & TH_ACK) && 14359 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) && 14360 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) { 14361 tp->t_flags2 |= TF2_ECN_PERMIT; 14362 tp->t_flags2 |= TF2_ECN_SND_ECE; 14363 TCPSTAT_INC(tcps_ecn_shs); 14364 } 14365 if ((to.to_flags & TOF_SCALE) && 14366 (tp->t_flags & TF_REQ_SCALE)) { 14367 tp->t_flags |= TF_RCVD_SCALE; 14368 tp->snd_scale = to.to_wscale; 14369 } else 14370 tp->t_flags &= ~TF_REQ_SCALE; 14371 /* 14372 * Initial send window. It will be updated with the 14373 * next incoming segment to the scaled value. 14374 */ 14375 tp->snd_wnd = th->th_win; 14376 rack_validate_fo_sendwin_up(tp, rack); 14377 if ((to.to_flags & TOF_TS) && 14378 (tp->t_flags & TF_REQ_TSTMP)) { 14379 tp->t_flags |= TF_RCVD_TSTMP; 14380 tp->ts_recent = to.to_tsval; 14381 tp->ts_recent_age = cts; 14382 } else 14383 tp->t_flags &= ~TF_REQ_TSTMP; 14384 if (to.to_flags & TOF_MSS) { 14385 tcp_mss(tp, to.to_mss); 14386 } 14387 if ((tp->t_flags & TF_SACK_PERMIT) && 14388 (to.to_flags & TOF_SACKPERM) == 0) 14389 tp->t_flags &= ~TF_SACK_PERMIT; 14390 if (IS_FASTOPEN(tp->t_flags)) { 14391 if (to.to_flags & TOF_FASTOPEN) { 14392 uint16_t mss; 14393 14394 if (to.to_flags & TOF_MSS) 14395 mss = to.to_mss; 14396 else 14397 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14398 mss = TCP6_MSS; 14399 else 14400 mss = TCP_MSS; 14401 tcp_fastopen_update_cache(tp, mss, 14402 to.to_tfo_len, to.to_tfo_cookie); 14403 } else 14404 tcp_fastopen_disable_path(tp); 14405 } 14406 } 14407 /* 14408 * At this point we are at the initial call. Here we decide 14409 * if we are doing RACK or not. We do this by seeing if 14410 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14411 * The code now does do dup-ack counting so if you don't 14412 * switch back you won't get rack & TLP, but you will still 14413 * get this stack. 14414 */ 14415 14416 if ((rack_sack_not_required == 0) && 14417 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14418 tcp_switch_back_to_default(tp); 14419 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14420 tlen, iptos); 14421 #ifdef TCP_ACCOUNTING 14422 sched_unpin(); 14423 #endif 14424 return (1); 14425 } 14426 tcp_set_hpts(tp->t_inpcb); 14427 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14428 } 14429 if (thflags & TH_FIN) 14430 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14431 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14432 if ((rack->rc_gp_dyn_mul) && 14433 (rack->use_fixed_rate == 0) && 14434 (rack->rc_always_pace)) { 14435 /* Check in on probertt */ 14436 rack_check_probe_rtt(rack, us_cts); 14437 } 14438 rack_clear_rate_sample(rack); 14439 if (rack->forced_ack) { 14440 uint32_t us_rtt; 14441 14442 /* 14443 * A persist or keep-alive was forced out, update our 14444 * min rtt time. Note we do not worry about lost 14445 * retransmissions since KEEP-ALIVES and persists 14446 * are usually way long on times of sending (though 14447 * if we were really paranoid or worried we could 14448 * at least use timestamps if available to validate). 14449 */ 14450 rack->forced_ack = 0; 14451 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 14452 if (us_rtt == 0) 14453 us_rtt = 1; 14454 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 14455 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 14456 } 14457 /* 14458 * This is the one exception case where we set the rack state 14459 * always. All other times (timers etc) we must have a rack-state 14460 * set (so we assure we have done the checks above for SACK). 14461 */ 14462 rack->r_ctl.rc_rcvtime = cts; 14463 if (rack->r_state != tp->t_state) 14464 rack_set_state(tp, rack); 14465 if (SEQ_GT(th->th_ack, tp->snd_una) && 14466 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14467 kern_prefetch(rsm, &prev_state); 14468 prev_state = rack->r_state; 14469 retval = (*rack->r_substate) (m, th, so, 14470 tp, &to, drop_hdrlen, 14471 tlen, tiwin, thflags, nxt_pkt, iptos); 14472 #ifdef INVARIANTS 14473 if ((retval == 0) && 14474 (tp->t_inpcb == NULL)) { 14475 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14476 retval, tp, prev_state); 14477 } 14478 #endif 14479 if (retval == 0) { 14480 /* 14481 * If retval is 1 the tcb is unlocked and most likely the tp 14482 * is gone. 14483 */ 14484 INP_WLOCK_ASSERT(tp->t_inpcb); 14485 if ((rack->rc_gp_dyn_mul) && 14486 (rack->rc_always_pace) && 14487 (rack->use_fixed_rate == 0) && 14488 rack->in_probe_rtt && 14489 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14490 /* 14491 * If we are going for target, lets recheck before 14492 * we output. 14493 */ 14494 rack_check_probe_rtt(rack, us_cts); 14495 } 14496 if (rack->set_pacing_done_a_iw == 0) { 14497 /* How much has been acked? */ 14498 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14499 /* We have enough to set in the pacing segment size */ 14500 rack->set_pacing_done_a_iw = 1; 14501 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14502 } 14503 } 14504 tcp_rack_xmit_timer_commit(rack, tp); 14505 #ifdef TCP_ACCOUNTING 14506 /* 14507 * If we set the ack_val_se to what ack processing we are doing 14508 * we also want to track how many cycles we burned. Note 14509 * the bits after tcp_output we let be "free". This is because 14510 * we are also tracking the tcp_output times as well. Note the 14511 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14512 * 0xf cannot be returned and is what we initialize it too to 14513 * indicate we are not doing the tabulations. 14514 */ 14515 if (ack_val_set != 0xf) { 14516 uint64_t crtsc; 14517 14518 crtsc = get_cyclecount(); 14519 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14520 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14521 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14522 } 14523 } 14524 #endif 14525 if (nxt_pkt == 0) { 14526 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14527 do_output_now: 14528 did_out = 1; 14529 (void)tp->t_fb->tfb_tcp_output(tp); 14530 } 14531 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14532 rack_free_trim(rack); 14533 } 14534 /* Update any rounds needed */ 14535 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends)) { 14536 rack->r_ctl.current_round++; 14537 rack->r_ctl.roundends = tp->snd_max; 14538 if (CC_ALGO(tp)->newround != NULL) { 14539 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14540 } 14541 } 14542 if ((nxt_pkt == 0) && 14543 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14544 (SEQ_GT(tp->snd_max, tp->snd_una) || 14545 (tp->t_flags & TF_DELACK) || 14546 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14547 (tp->t_state <= TCPS_CLOSING)))) { 14548 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14549 if ((tp->snd_max == tp->snd_una) && 14550 ((tp->t_flags & TF_DELACK) == 0) && 14551 (rack->rc_inp->inp_in_hpts) && 14552 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14553 /* keep alive not needed if we are hptsi output yet */ 14554 ; 14555 } else { 14556 int late = 0; 14557 if (rack->rc_inp->inp_in_hpts) { 14558 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14559 us_cts = tcp_get_usecs(NULL); 14560 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14561 rack->r_early = 1; 14562 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14563 } else 14564 late = 1; 14565 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14566 } 14567 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 14568 } 14569 if (late && (did_out == 0)) { 14570 /* 14571 * We are late in the sending 14572 * and we did not call the output 14573 * (this probably should not happen). 14574 */ 14575 goto do_output_now; 14576 } 14577 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14578 } 14579 way_out = 1; 14580 } else if (nxt_pkt == 0) { 14581 /* Do we have the correct timer running? */ 14582 rack_timer_audit(tp, rack, &so->so_snd); 14583 way_out = 2; 14584 } 14585 done_with_input: 14586 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14587 if (did_out) 14588 rack->r_wanted_output = 0; 14589 #ifdef INVARIANTS 14590 if (tp->t_inpcb == NULL) { 14591 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14592 did_out, 14593 retval, tp, prev_state); 14594 } 14595 #endif 14596 #ifdef TCP_ACCOUNTING 14597 } else { 14598 /* 14599 * Track the time (see above). 14600 */ 14601 if (ack_val_set != 0xf) { 14602 uint64_t crtsc; 14603 14604 crtsc = get_cyclecount(); 14605 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14606 /* 14607 * Note we *DO NOT* increment the per-tcb counters since 14608 * in the else the TP may be gone!! 14609 */ 14610 } 14611 #endif 14612 } 14613 #ifdef TCP_ACCOUNTING 14614 sched_unpin(); 14615 #endif 14616 return (retval); 14617 } 14618 14619 void 14620 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14621 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14622 { 14623 struct timeval tv; 14624 14625 /* First lets see if we have old packets */ 14626 if (tp->t_in_pkt) { 14627 if (ctf_do_queued_segments(so, tp, 1)) { 14628 m_freem(m); 14629 return; 14630 } 14631 } 14632 if (m->m_flags & M_TSTMP_LRO) { 14633 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14634 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14635 } else { 14636 /* Should not be should we kassert instead? */ 14637 tcp_get_usecs(&tv); 14638 } 14639 if (rack_do_segment_nounlock(m, th, so, tp, 14640 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14641 INP_WUNLOCK(tp->t_inpcb); 14642 } 14643 } 14644 14645 struct rack_sendmap * 14646 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14647 { 14648 struct rack_sendmap *rsm = NULL; 14649 int32_t idx; 14650 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14651 14652 /* Return the next guy to be re-transmitted */ 14653 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14654 return (NULL); 14655 } 14656 if (tp->t_flags & TF_SENTFIN) { 14657 /* retran the end FIN? */ 14658 return (NULL); 14659 } 14660 /* ok lets look at this one */ 14661 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14662 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14663 goto check_it; 14664 } 14665 rsm = rack_find_lowest_rsm(rack); 14666 if (rsm == NULL) { 14667 return (NULL); 14668 } 14669 check_it: 14670 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14671 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14672 /* 14673 * No sack so we automatically do the 3 strikes and 14674 * retransmit (no rack timer would be started). 14675 */ 14676 14677 return (rsm); 14678 } 14679 if (rsm->r_flags & RACK_ACKED) { 14680 return (NULL); 14681 } 14682 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14683 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14684 /* Its not yet ready */ 14685 return (NULL); 14686 } 14687 srtt = rack_grab_rtt(tp, rack); 14688 idx = rsm->r_rtr_cnt - 1; 14689 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14690 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14691 if ((tsused == ts_low) || 14692 (TSTMP_LT(tsused, ts_low))) { 14693 /* No time since sending */ 14694 return (NULL); 14695 } 14696 if ((tsused - ts_low) < thresh) { 14697 /* It has not been long enough yet */ 14698 return (NULL); 14699 } 14700 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14701 ((rsm->r_flags & RACK_SACK_PASSED) && 14702 (rack->sack_attack_disable == 0))) { 14703 /* 14704 * We have passed the dup-ack threshold <or> 14705 * a SACK has indicated this is missing. 14706 * Note that if you are a declared attacker 14707 * it is only the dup-ack threshold that 14708 * will cause retransmits. 14709 */ 14710 /* log retransmit reason */ 14711 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14712 rack->r_fast_output = 0; 14713 return (rsm); 14714 } 14715 return (NULL); 14716 } 14717 14718 static void 14719 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14720 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14721 int line, struct rack_sendmap *rsm, uint8_t quality) 14722 { 14723 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14724 union tcp_log_stackspecific log; 14725 struct timeval tv; 14726 14727 memset(&log, 0, sizeof(log)); 14728 log.u_bbr.flex1 = slot; 14729 log.u_bbr.flex2 = len; 14730 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14731 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14732 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14733 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14734 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14735 log.u_bbr.use_lt_bw <<= 1; 14736 log.u_bbr.use_lt_bw |= rack->r_late; 14737 log.u_bbr.use_lt_bw <<= 1; 14738 log.u_bbr.use_lt_bw |= rack->r_early; 14739 log.u_bbr.use_lt_bw <<= 1; 14740 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14741 log.u_bbr.use_lt_bw <<= 1; 14742 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14743 log.u_bbr.use_lt_bw <<= 1; 14744 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14745 log.u_bbr.use_lt_bw <<= 1; 14746 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14747 log.u_bbr.use_lt_bw <<= 1; 14748 log.u_bbr.use_lt_bw |= rack->gp_ready; 14749 log.u_bbr.pkt_epoch = line; 14750 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14751 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14752 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14753 log.u_bbr.bw_inuse = bw_est; 14754 log.u_bbr.delRate = bw; 14755 if (rack->r_ctl.gp_bw == 0) 14756 log.u_bbr.cur_del_rate = 0; 14757 else 14758 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14759 log.u_bbr.rttProp = len_time; 14760 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14761 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14762 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14763 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14764 /* We are in slow start */ 14765 log.u_bbr.flex7 = 1; 14766 } else { 14767 /* we are on congestion avoidance */ 14768 log.u_bbr.flex7 = 0; 14769 } 14770 log.u_bbr.flex8 = method; 14771 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14772 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14773 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14774 log.u_bbr.cwnd_gain <<= 1; 14775 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14776 log.u_bbr.cwnd_gain <<= 1; 14777 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14778 log.u_bbr.bbr_substate = quality; 14779 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14780 &rack->rc_inp->inp_socket->so_rcv, 14781 &rack->rc_inp->inp_socket->so_snd, 14782 BBR_LOG_HPTSI_CALC, 0, 14783 0, &log, false, &tv); 14784 } 14785 } 14786 14787 static uint32_t 14788 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14789 { 14790 uint32_t new_tso, user_max; 14791 14792 user_max = rack->rc_user_set_max_segs * mss; 14793 if (rack->rc_force_max_seg) { 14794 return (user_max); 14795 } 14796 if (rack->use_fixed_rate && 14797 ((rack->r_ctl.crte == NULL) || 14798 (bw != rack->r_ctl.crte->rate))) { 14799 /* Use the user mss since we are not exactly matched */ 14800 return (user_max); 14801 } 14802 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14803 if (new_tso > user_max) 14804 new_tso = user_max; 14805 return (new_tso); 14806 } 14807 14808 static int32_t 14809 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14810 { 14811 uint64_t lentim, fill_bw; 14812 14813 /* Lets first see if we are full, if so continue with normal rate */ 14814 rack->r_via_fill_cw = 0; 14815 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14816 return (slot); 14817 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14818 return (slot); 14819 if (rack->r_ctl.rc_last_us_rtt == 0) 14820 return (slot); 14821 if (rack->rc_pace_fill_if_rttin_range && 14822 (rack->r_ctl.rc_last_us_rtt >= 14823 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14824 /* The rtt is huge, N * smallest, lets not fill */ 14825 return (slot); 14826 } 14827 /* 14828 * first lets calculate the b/w based on the last us-rtt 14829 * and the sndwnd. 14830 */ 14831 fill_bw = rack->r_ctl.cwnd_to_use; 14832 /* Take the rwnd if its smaller */ 14833 if (fill_bw > rack->rc_tp->snd_wnd) 14834 fill_bw = rack->rc_tp->snd_wnd; 14835 if (rack->r_fill_less_agg) { 14836 /* 14837 * Now take away the inflight (this will reduce our 14838 * aggressiveness and yeah, if we get that much out in 1RTT 14839 * we will have had acks come back and still be behind). 14840 */ 14841 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14842 } 14843 /* Now lets make it into a b/w */ 14844 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14845 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14846 /* We are below the min b/w */ 14847 if (non_paced) 14848 *rate_wanted = fill_bw; 14849 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14850 return (slot); 14851 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14852 fill_bw = rack->r_ctl.bw_rate_cap; 14853 rack->r_via_fill_cw = 1; 14854 if (rack->r_rack_hw_rate_caps && 14855 (rack->r_ctl.crte != NULL)) { 14856 uint64_t high_rate; 14857 14858 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14859 if (fill_bw > high_rate) { 14860 /* We are capping bw at the highest rate table entry */ 14861 if (*rate_wanted > high_rate) { 14862 /* The original rate was also capped */ 14863 rack->r_via_fill_cw = 0; 14864 } 14865 rack_log_hdwr_pacing(rack, 14866 fill_bw, high_rate, __LINE__, 14867 0, 3); 14868 fill_bw = high_rate; 14869 if (capped) 14870 *capped = 1; 14871 } 14872 } else if ((rack->r_ctl.crte == NULL) && 14873 (rack->rack_hdrw_pacing == 0) && 14874 (rack->rack_hdw_pace_ena) && 14875 rack->r_rack_hw_rate_caps && 14876 (rack->rack_attempt_hdwr_pace == 0) && 14877 (rack->rc_inp->inp_route.ro_nh != NULL) && 14878 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14879 /* 14880 * Ok we may have a first attempt that is greater than our top rate 14881 * lets check. 14882 */ 14883 uint64_t high_rate; 14884 14885 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14886 if (high_rate) { 14887 if (fill_bw > high_rate) { 14888 fill_bw = high_rate; 14889 if (capped) 14890 *capped = 1; 14891 } 14892 } 14893 } 14894 /* 14895 * Ok fill_bw holds our mythical b/w to fill the cwnd 14896 * in a rtt, what does that time wise equate too? 14897 */ 14898 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14899 lentim /= fill_bw; 14900 *rate_wanted = fill_bw; 14901 if (non_paced || (lentim < slot)) { 14902 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14903 0, lentim, 12, __LINE__, NULL, 0); 14904 return ((int32_t)lentim); 14905 } else 14906 return (slot); 14907 } 14908 14909 static int32_t 14910 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14911 { 14912 uint64_t srtt; 14913 int32_t slot = 0; 14914 int can_start_hw_pacing = 1; 14915 int err; 14916 14917 if (rack->rc_always_pace == 0) { 14918 /* 14919 * We use the most optimistic possible cwnd/srtt for 14920 * sending calculations. This will make our 14921 * calculation anticipate getting more through 14922 * quicker then possible. But thats ok we don't want 14923 * the peer to have a gap in data sending. 14924 */ 14925 uint64_t cwnd, tr_perms = 0; 14926 int32_t reduce = 0; 14927 14928 old_method: 14929 /* 14930 * We keep no precise pacing with the old method 14931 * instead we use the pacer to mitigate bursts. 14932 */ 14933 if (rack->r_ctl.rc_rack_min_rtt) 14934 srtt = rack->r_ctl.rc_rack_min_rtt; 14935 else 14936 srtt = max(tp->t_srtt, 1); 14937 if (rack->r_ctl.rc_rack_largest_cwnd) 14938 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14939 else 14940 cwnd = rack->r_ctl.cwnd_to_use; 14941 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14942 tr_perms = (cwnd * 1000) / srtt; 14943 if (tr_perms == 0) { 14944 tr_perms = ctf_fixed_maxseg(tp); 14945 } 14946 /* 14947 * Calculate how long this will take to drain, if 14948 * the calculation comes out to zero, thats ok we 14949 * will use send_a_lot to possibly spin around for 14950 * more increasing tot_len_this_send to the point 14951 * that its going to require a pace, or we hit the 14952 * cwnd. Which in that case we are just waiting for 14953 * a ACK. 14954 */ 14955 slot = len / tr_perms; 14956 /* Now do we reduce the time so we don't run dry? */ 14957 if (slot && rack_slot_reduction) { 14958 reduce = (slot / rack_slot_reduction); 14959 if (reduce < slot) { 14960 slot -= reduce; 14961 } else 14962 slot = 0; 14963 } 14964 slot *= HPTS_USEC_IN_MSEC; 14965 if (rack->rc_pace_to_cwnd) { 14966 uint64_t rate_wanted = 0; 14967 14968 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14969 rack->rc_ack_can_sendout_data = 1; 14970 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14971 } else 14972 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14973 } else { 14974 uint64_t bw_est, res, lentim, rate_wanted; 14975 uint32_t orig_val, segs, oh; 14976 int capped = 0; 14977 int prev_fill; 14978 14979 if ((rack->r_rr_config == 1) && rsm) { 14980 return (rack->r_ctl.rc_min_to); 14981 } 14982 if (rack->use_fixed_rate) { 14983 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14984 } else if ((rack->r_ctl.init_rate == 0) && 14985 #ifdef NETFLIX_PEAKRATE 14986 (rack->rc_tp->t_maxpeakrate == 0) && 14987 #endif 14988 (rack->r_ctl.gp_bw == 0)) { 14989 /* no way to yet do an estimate */ 14990 bw_est = rate_wanted = 0; 14991 } else { 14992 bw_est = rack_get_bw(rack); 14993 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14994 } 14995 if ((bw_est == 0) || (rate_wanted == 0) || 14996 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14997 /* 14998 * No way yet to make a b/w estimate or 14999 * our raise is set incorrectly. 15000 */ 15001 goto old_method; 15002 } 15003 /* We need to account for all the overheads */ 15004 segs = (len + segsiz - 1) / segsiz; 15005 /* 15006 * We need the diff between 1514 bytes (e-mtu with e-hdr) 15007 * and how much data we put in each packet. Yes this 15008 * means we may be off if we are larger than 1500 bytes 15009 * or smaller. But this just makes us more conservative. 15010 */ 15011 if (rack_hw_rate_min && 15012 (bw_est < rack_hw_rate_min)) 15013 can_start_hw_pacing = 0; 15014 if (ETHERNET_SEGMENT_SIZE > segsiz) 15015 oh = ETHERNET_SEGMENT_SIZE - segsiz; 15016 else 15017 oh = 0; 15018 segs *= oh; 15019 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 15020 res = lentim / rate_wanted; 15021 slot = (uint32_t)res; 15022 orig_val = rack->r_ctl.rc_pace_max_segs; 15023 if (rack->r_ctl.crte == NULL) { 15024 /* 15025 * Only do this if we are not hardware pacing 15026 * since if we are doing hw-pacing below we will 15027 * set make a call after setting up or changing 15028 * the rate. 15029 */ 15030 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 15031 } else if (rack->rc_inp->inp_snd_tag == NULL) { 15032 /* 15033 * We lost our rate somehow, this can happen 15034 * if the interface changed underneath us. 15035 */ 15036 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15037 rack->r_ctl.crte = NULL; 15038 /* Lets re-allow attempting to setup pacing */ 15039 rack->rack_hdrw_pacing = 0; 15040 rack->rack_attempt_hdwr_pace = 0; 15041 rack_log_hdwr_pacing(rack, 15042 rate_wanted, bw_est, __LINE__, 15043 0, 6); 15044 } 15045 /* Did we change the TSO size, if so log it */ 15046 if (rack->r_ctl.rc_pace_max_segs != orig_val) 15047 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 15048 prev_fill = rack->r_via_fill_cw; 15049 if ((rack->rc_pace_to_cwnd) && 15050 (capped == 0) && 15051 (rack->use_fixed_rate == 0) && 15052 (rack->in_probe_rtt == 0) && 15053 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 15054 /* 15055 * We want to pace at our rate *or* faster to 15056 * fill the cwnd to the max if its not full. 15057 */ 15058 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 15059 } 15060 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 15061 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 15062 if ((rack->rack_hdw_pace_ena) && 15063 (can_start_hw_pacing > 0) && 15064 (rack->rack_hdrw_pacing == 0) && 15065 (rack->rack_attempt_hdwr_pace == 0)) { 15066 /* 15067 * Lets attempt to turn on hardware pacing 15068 * if we can. 15069 */ 15070 rack->rack_attempt_hdwr_pace = 1; 15071 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 15072 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15073 rate_wanted, 15074 RS_PACING_GEQ, 15075 &err, &rack->r_ctl.crte_prev_rate); 15076 if (rack->r_ctl.crte) { 15077 rack->rack_hdrw_pacing = 1; 15078 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 15079 0, rack->r_ctl.crte, 15080 NULL); 15081 rack_log_hdwr_pacing(rack, 15082 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15083 err, 0); 15084 rack->r_ctl.last_hw_bw_req = rate_wanted; 15085 } else { 15086 counter_u64_add(rack_hw_pace_init_fail, 1); 15087 } 15088 } else if (rack->rack_hdrw_pacing && 15089 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 15090 /* Do we need to adjust our rate? */ 15091 const struct tcp_hwrate_limit_table *nrte; 15092 15093 if (rack->r_up_only && 15094 (rate_wanted < rack->r_ctl.crte->rate)) { 15095 /** 15096 * We have four possible states here 15097 * having to do with the previous time 15098 * and this time. 15099 * previous | this-time 15100 * A) 0 | 0 -- fill_cw not in the picture 15101 * B) 1 | 0 -- we were doing a fill-cw but now are not 15102 * C) 1 | 1 -- all rates from fill_cw 15103 * D) 0 | 1 -- we were doing non-fill and now we are filling 15104 * 15105 * For case A, C and D we don't allow a drop. But for 15106 * case B where we now our on our steady rate we do 15107 * allow a drop. 15108 * 15109 */ 15110 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 15111 goto done_w_hdwr; 15112 } 15113 if ((rate_wanted > rack->r_ctl.crte->rate) || 15114 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15115 if (rack_hw_rate_to_low && 15116 (bw_est < rack_hw_rate_to_low)) { 15117 /* 15118 * The pacing rate is too low for hardware, but 15119 * do allow hardware pacing to be restarted. 15120 */ 15121 rack_log_hdwr_pacing(rack, 15122 bw_est, rack->r_ctl.crte->rate, __LINE__, 15123 0, 5); 15124 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15125 rack->r_ctl.crte = NULL; 15126 rack->rack_attempt_hdwr_pace = 0; 15127 rack->rack_hdrw_pacing = 0; 15128 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15129 goto done_w_hdwr; 15130 } 15131 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15132 rack->rc_tp, 15133 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15134 rate_wanted, 15135 RS_PACING_GEQ, 15136 &err, &rack->r_ctl.crte_prev_rate); 15137 if (nrte == NULL) { 15138 /* Lost the rate */ 15139 rack->rack_hdrw_pacing = 0; 15140 rack->r_ctl.crte = NULL; 15141 rack_log_hdwr_pacing(rack, 15142 rate_wanted, 0, __LINE__, 15143 err, 1); 15144 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15145 counter_u64_add(rack_hw_pace_lost, 1); 15146 } else if (nrte != rack->r_ctl.crte) { 15147 rack->r_ctl.crte = nrte; 15148 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15149 segsiz, 0, 15150 rack->r_ctl.crte, 15151 NULL); 15152 rack_log_hdwr_pacing(rack, 15153 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15154 err, 2); 15155 rack->r_ctl.last_hw_bw_req = rate_wanted; 15156 } 15157 } else { 15158 /* We just need to adjust the segment size */ 15159 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15160 rack_log_hdwr_pacing(rack, 15161 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15162 0, 4); 15163 rack->r_ctl.last_hw_bw_req = rate_wanted; 15164 } 15165 } 15166 } 15167 if ((rack->r_ctl.crte != NULL) && 15168 (rack->r_ctl.crte->rate == rate_wanted)) { 15169 /* 15170 * We need to add a extra if the rates 15171 * are exactly matched. The idea is 15172 * we want the software to make sure the 15173 * queue is empty before adding more, this 15174 * gives us N MSS extra pace times where 15175 * N is our sysctl 15176 */ 15177 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15178 } 15179 done_w_hdwr: 15180 if (rack_limit_time_with_srtt && 15181 (rack->use_fixed_rate == 0) && 15182 #ifdef NETFLIX_PEAKRATE 15183 (rack->rc_tp->t_maxpeakrate == 0) && 15184 #endif 15185 (rack->rack_hdrw_pacing == 0)) { 15186 /* 15187 * Sanity check, we do not allow the pacing delay 15188 * to be longer than the SRTT of the path. If it is 15189 * a slow path, then adding a packet should increase 15190 * the RTT and compensate for this i.e. the srtt will 15191 * be greater so the allowed pacing time will be greater. 15192 * 15193 * Note this restriction is not for where a peak rate 15194 * is set, we are doing fixed pacing or hardware pacing. 15195 */ 15196 if (rack->rc_tp->t_srtt) 15197 srtt = rack->rc_tp->t_srtt; 15198 else 15199 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15200 if (srtt < (uint64_t)slot) { 15201 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15202 slot = srtt; 15203 } 15204 } 15205 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15206 } 15207 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15208 /* 15209 * If this rate is seeing enobufs when it 15210 * goes to send then either the nic is out 15211 * of gas or we are mis-estimating the time 15212 * somehow and not letting the queue empty 15213 * completely. Lets add to the pacing time. 15214 */ 15215 int hw_boost_delay; 15216 15217 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15218 if (hw_boost_delay > rack_enobuf_hw_max) 15219 hw_boost_delay = rack_enobuf_hw_max; 15220 else if (hw_boost_delay < rack_enobuf_hw_min) 15221 hw_boost_delay = rack_enobuf_hw_min; 15222 slot += hw_boost_delay; 15223 } 15224 if (slot) 15225 counter_u64_add(rack_calc_nonzero, 1); 15226 else 15227 counter_u64_add(rack_calc_zero, 1); 15228 return (slot); 15229 } 15230 15231 static void 15232 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15233 tcp_seq startseq, uint32_t sb_offset) 15234 { 15235 struct rack_sendmap *my_rsm = NULL; 15236 struct rack_sendmap fe; 15237 15238 if (tp->t_state < TCPS_ESTABLISHED) { 15239 /* 15240 * We don't start any measurements if we are 15241 * not at least established. 15242 */ 15243 return; 15244 } 15245 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15246 /* 15247 * We will get no more data into the SB 15248 * this means we need to have the data available 15249 * before we start a measurement. 15250 */ 15251 15252 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15253 max(rc_init_window(rack), 15254 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15255 /* Nope not enough data */ 15256 return; 15257 } 15258 } 15259 tp->t_flags |= TF_GPUTINPROG; 15260 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15261 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15262 tp->gput_seq = startseq; 15263 rack->app_limited_needs_set = 0; 15264 if (rack->in_probe_rtt) 15265 rack->measure_saw_probe_rtt = 1; 15266 else if ((rack->measure_saw_probe_rtt) && 15267 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15268 rack->measure_saw_probe_rtt = 0; 15269 if (rack->rc_gp_filled) 15270 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15271 else { 15272 /* Special case initial measurement */ 15273 struct timeval tv; 15274 15275 tp->gput_ts = tcp_get_usecs(&tv); 15276 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15277 } 15278 /* 15279 * We take a guess out into the future, 15280 * if we have no measurement and no 15281 * initial rate, we measure the first 15282 * initial-windows worth of data to 15283 * speed up getting some GP measurement and 15284 * thus start pacing. 15285 */ 15286 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15287 rack->app_limited_needs_set = 1; 15288 tp->gput_ack = startseq + max(rc_init_window(rack), 15289 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15290 rack_log_pacing_delay_calc(rack, 15291 tp->gput_seq, 15292 tp->gput_ack, 15293 0, 15294 tp->gput_ts, 15295 rack->r_ctl.rc_app_limited_cnt, 15296 9, 15297 __LINE__, NULL, 0); 15298 return; 15299 } 15300 if (sb_offset) { 15301 /* 15302 * We are out somewhere in the sb 15303 * can we use the already outstanding data? 15304 */ 15305 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15306 /* 15307 * Yes first one is good and in this case 15308 * the tp->gput_ts is correctly set based on 15309 * the last ack that arrived (no need to 15310 * set things up when an ack comes in). 15311 */ 15312 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15313 if ((my_rsm == NULL) || 15314 (my_rsm->r_rtr_cnt != 1)) { 15315 /* retransmission? */ 15316 goto use_latest; 15317 } 15318 } else { 15319 if (rack->r_ctl.rc_first_appl == NULL) { 15320 /* 15321 * If rc_first_appl is NULL 15322 * then the cnt should be 0. 15323 * This is probably an error, maybe 15324 * a KASSERT would be approprate. 15325 */ 15326 goto use_latest; 15327 } 15328 /* 15329 * If we have a marker pointer to the last one that is 15330 * app limited we can use that, but we need to set 15331 * things up so that when it gets ack'ed we record 15332 * the ack time (if its not already acked). 15333 */ 15334 rack->app_limited_needs_set = 1; 15335 /* 15336 * We want to get to the rsm that is either 15337 * next with space i.e. over 1 MSS or the one 15338 * after that (after the app-limited). 15339 */ 15340 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15341 rack->r_ctl.rc_first_appl); 15342 if (my_rsm) { 15343 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15344 /* Have to use the next one */ 15345 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15346 my_rsm); 15347 else { 15348 /* Use after the first MSS of it is acked */ 15349 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15350 goto start_set; 15351 } 15352 } 15353 if ((my_rsm == NULL) || 15354 (my_rsm->r_rtr_cnt != 1)) { 15355 /* 15356 * Either its a retransmit or 15357 * the last is the app-limited one. 15358 */ 15359 goto use_latest; 15360 } 15361 } 15362 tp->gput_seq = my_rsm->r_start; 15363 start_set: 15364 if (my_rsm->r_flags & RACK_ACKED) { 15365 /* 15366 * This one has been acked use the arrival ack time 15367 */ 15368 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15369 rack->app_limited_needs_set = 0; 15370 } 15371 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15372 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15373 rack_log_pacing_delay_calc(rack, 15374 tp->gput_seq, 15375 tp->gput_ack, 15376 (uint64_t)my_rsm, 15377 tp->gput_ts, 15378 rack->r_ctl.rc_app_limited_cnt, 15379 9, 15380 __LINE__, NULL, 0); 15381 return; 15382 } 15383 15384 use_latest: 15385 /* 15386 * We don't know how long we may have been 15387 * idle or if this is the first-send. Lets 15388 * setup the flag so we will trim off 15389 * the first ack'd data so we get a true 15390 * measurement. 15391 */ 15392 rack->app_limited_needs_set = 1; 15393 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15394 /* Find this guy so we can pull the send time */ 15395 fe.r_start = startseq; 15396 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15397 if (my_rsm) { 15398 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15399 if (my_rsm->r_flags & RACK_ACKED) { 15400 /* 15401 * Unlikely since its probably what was 15402 * just transmitted (but I am paranoid). 15403 */ 15404 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15405 rack->app_limited_needs_set = 0; 15406 } 15407 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15408 /* This also is unlikely */ 15409 tp->gput_seq = my_rsm->r_start; 15410 } 15411 } else { 15412 /* 15413 * TSNH unless we have some send-map limit, 15414 * and even at that it should not be hitting 15415 * that limit (we should have stopped sending). 15416 */ 15417 struct timeval tv; 15418 15419 microuptime(&tv); 15420 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15421 } 15422 rack_log_pacing_delay_calc(rack, 15423 tp->gput_seq, 15424 tp->gput_ack, 15425 (uint64_t)my_rsm, 15426 tp->gput_ts, 15427 rack->r_ctl.rc_app_limited_cnt, 15428 9, __LINE__, NULL, 0); 15429 } 15430 15431 static inline uint32_t 15432 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15433 uint32_t avail, int32_t sb_offset) 15434 { 15435 uint32_t len; 15436 uint32_t sendwin; 15437 15438 if (tp->snd_wnd > cwnd_to_use) 15439 sendwin = cwnd_to_use; 15440 else 15441 sendwin = tp->snd_wnd; 15442 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15443 /* We never want to go over our peers rcv-window */ 15444 len = 0; 15445 } else { 15446 uint32_t flight; 15447 15448 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15449 if (flight >= sendwin) { 15450 /* 15451 * We have in flight what we are allowed by cwnd (if 15452 * it was rwnd blocking it would have hit above out 15453 * >= tp->snd_wnd). 15454 */ 15455 return (0); 15456 } 15457 len = sendwin - flight; 15458 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15459 /* We would send too much (beyond the rwnd) */ 15460 len = tp->snd_wnd - ctf_outstanding(tp); 15461 } 15462 if ((len + sb_offset) > avail) { 15463 /* 15464 * We don't have that much in the SB, how much is 15465 * there? 15466 */ 15467 len = avail - sb_offset; 15468 } 15469 } 15470 return (len); 15471 } 15472 15473 static void 15474 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15475 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15476 int rsm_is_null, int optlen, int line, uint16_t mode) 15477 { 15478 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15479 union tcp_log_stackspecific log; 15480 struct timeval tv; 15481 15482 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15483 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 15484 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 15485 log.u_bbr.flex1 = error; 15486 log.u_bbr.flex2 = flags; 15487 log.u_bbr.flex3 = rsm_is_null; 15488 log.u_bbr.flex4 = ipoptlen; 15489 log.u_bbr.flex5 = tp->rcv_numsacks; 15490 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15491 log.u_bbr.flex7 = optlen; 15492 log.u_bbr.flex8 = rack->r_fsb_inited; 15493 log.u_bbr.applimited = rack->r_fast_output; 15494 log.u_bbr.bw_inuse = rack_get_bw(rack); 15495 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15496 log.u_bbr.cwnd_gain = mode; 15497 log.u_bbr.pkts_out = orig_len; 15498 log.u_bbr.lt_epoch = len; 15499 log.u_bbr.delivered = line; 15500 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15501 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15502 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15503 len, &log, false, NULL, NULL, 0, &tv); 15504 } 15505 } 15506 15507 15508 static struct mbuf * 15509 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15510 struct rack_fast_send_blk *fsb, 15511 int32_t seglimit, int32_t segsize, int hw_tls) 15512 { 15513 #ifdef KERN_TLS 15514 struct ktls_session *tls, *ntls; 15515 struct mbuf *start; 15516 #endif 15517 struct mbuf *m, *n, **np, *smb; 15518 struct mbuf *top; 15519 int32_t off, soff; 15520 int32_t len = *plen; 15521 int32_t fragsize; 15522 int32_t len_cp = 0; 15523 uint32_t mlen, frags; 15524 15525 soff = off = the_off; 15526 smb = m = the_m; 15527 np = ⊤ 15528 top = NULL; 15529 #ifdef KERN_TLS 15530 if (hw_tls && (m->m_flags & M_EXTPG)) 15531 tls = m->m_epg_tls; 15532 else 15533 tls = NULL; 15534 start = m; 15535 #endif 15536 while (len > 0) { 15537 if (m == NULL) { 15538 *plen = len_cp; 15539 break; 15540 } 15541 #ifdef KERN_TLS 15542 if (hw_tls) { 15543 if (m->m_flags & M_EXTPG) 15544 ntls = m->m_epg_tls; 15545 else 15546 ntls = NULL; 15547 15548 /* 15549 * Avoid mixing TLS records with handshake 15550 * data or TLS records from different 15551 * sessions. 15552 */ 15553 if (tls != ntls) { 15554 MPASS(m != start); 15555 *plen = len_cp; 15556 break; 15557 } 15558 } 15559 #endif 15560 mlen = min(len, m->m_len - off); 15561 if (seglimit) { 15562 /* 15563 * For M_EXTPG mbufs, add 3 segments 15564 * + 1 in case we are crossing page boundaries 15565 * + 2 in case the TLS hdr/trailer are used 15566 * It is cheaper to just add the segments 15567 * than it is to take the cache miss to look 15568 * at the mbuf ext_pgs state in detail. 15569 */ 15570 if (m->m_flags & M_EXTPG) { 15571 fragsize = min(segsize, PAGE_SIZE); 15572 frags = 3; 15573 } else { 15574 fragsize = segsize; 15575 frags = 0; 15576 } 15577 15578 /* Break if we really can't fit anymore. */ 15579 if ((frags + 1) >= seglimit) { 15580 *plen = len_cp; 15581 break; 15582 } 15583 15584 /* 15585 * Reduce size if you can't copy the whole 15586 * mbuf. If we can't copy the whole mbuf, also 15587 * adjust len so the loop will end after this 15588 * mbuf. 15589 */ 15590 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15591 mlen = (seglimit - frags - 1) * fragsize; 15592 len = mlen; 15593 *plen = len_cp + len; 15594 } 15595 frags += howmany(mlen, fragsize); 15596 if (frags == 0) 15597 frags++; 15598 seglimit -= frags; 15599 KASSERT(seglimit > 0, 15600 ("%s: seglimit went too low", __func__)); 15601 } 15602 n = m_get(M_NOWAIT, m->m_type); 15603 *np = n; 15604 if (n == NULL) 15605 goto nospace; 15606 n->m_len = mlen; 15607 soff += mlen; 15608 len_cp += n->m_len; 15609 if (m->m_flags & (M_EXT|M_EXTPG)) { 15610 n->m_data = m->m_data + off; 15611 mb_dupcl(n, m); 15612 } else { 15613 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15614 (u_int)n->m_len); 15615 } 15616 len -= n->m_len; 15617 off = 0; 15618 m = m->m_next; 15619 np = &n->m_next; 15620 if (len || (soff == smb->m_len)) { 15621 /* 15622 * We have more so we move forward or 15623 * we have consumed the entire mbuf and 15624 * len has fell to 0. 15625 */ 15626 soff = 0; 15627 smb = m; 15628 } 15629 15630 } 15631 if (fsb != NULL) { 15632 fsb->m = smb; 15633 fsb->off = soff; 15634 if (smb) { 15635 /* 15636 * Save off the size of the mbuf. We do 15637 * this so that we can recognize when it 15638 * has been trimmed by sbcut() as acks 15639 * come in. 15640 */ 15641 fsb->o_m_len = smb->m_len; 15642 } else { 15643 /* 15644 * This is the case where the next mbuf went to NULL. This 15645 * means with this copy we have sent everything in the sb. 15646 * In theory we could clear the fast_output flag, but lets 15647 * not since its possible that we could get more added 15648 * and acks that call the extend function which would let 15649 * us send more. 15650 */ 15651 fsb->o_m_len = 0; 15652 } 15653 } 15654 return (top); 15655 nospace: 15656 if (top) 15657 m_freem(top); 15658 return (NULL); 15659 15660 } 15661 15662 /* 15663 * This is a copy of m_copym(), taking the TSO segment size/limit 15664 * constraints into account, and advancing the sndptr as it goes. 15665 */ 15666 static struct mbuf * 15667 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15668 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15669 { 15670 struct mbuf *m, *n; 15671 int32_t soff; 15672 15673 soff = rack->r_ctl.fsb.off; 15674 m = rack->r_ctl.fsb.m; 15675 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15676 /* 15677 * The mbuf had the front of it chopped off by an ack 15678 * we need to adjust the soff/off by that difference. 15679 */ 15680 uint32_t delta; 15681 15682 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15683 soff -= delta; 15684 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15685 /* 15686 * The mbuf was expanded probably by 15687 * a m_compress. Just update o_m_len. 15688 */ 15689 rack->r_ctl.fsb.o_m_len = m->m_len; 15690 } 15691 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15692 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15693 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15694 __FUNCTION__, 15695 rack, *plen, m, m->m_len)); 15696 /* Save off the right location before we copy and advance */ 15697 *s_soff = soff; 15698 *s_mb = rack->r_ctl.fsb.m; 15699 n = rack_fo_base_copym(m, soff, plen, 15700 &rack->r_ctl.fsb, 15701 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15702 return (n); 15703 } 15704 15705 static int 15706 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15707 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15708 { 15709 /* 15710 * Enter the fast retransmit path. We are given that a sched_pin is 15711 * in place (if accounting is compliled in) and the cycle count taken 15712 * at the entry is in the ts_val. The concept her is that the rsm 15713 * now holds the mbuf offsets and such so we can directly transmit 15714 * without a lot of overhead, the len field is already set for 15715 * us to prohibit us from sending too much (usually its 1MSS). 15716 */ 15717 struct ip *ip = NULL; 15718 struct udphdr *udp = NULL; 15719 struct tcphdr *th = NULL; 15720 struct mbuf *m = NULL; 15721 struct inpcb *inp; 15722 uint8_t *cpto; 15723 struct tcp_log_buffer *lgb; 15724 #ifdef TCP_ACCOUNTING 15725 uint64_t crtsc; 15726 int cnt_thru = 1; 15727 #endif 15728 struct tcpopt to; 15729 u_char opt[TCP_MAXOLEN]; 15730 uint32_t hdrlen, optlen; 15731 int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0; 15732 uint32_t us_cts; 15733 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15734 uint32_t if_hw_tsomaxsegsize; 15735 15736 #ifdef INET6 15737 struct ip6_hdr *ip6 = NULL; 15738 15739 if (rack->r_is_v6) { 15740 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15741 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15742 } else 15743 #endif /* INET6 */ 15744 { 15745 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15746 hdrlen = sizeof(struct tcpiphdr); 15747 } 15748 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15749 goto failed; 15750 } 15751 if (doing_tlp) { 15752 /* Its a TLP add the flag, it may already be there but be sure */ 15753 rsm->r_flags |= RACK_TLP; 15754 } else { 15755 /* If it was a TLP it is not not on this retransmit */ 15756 rsm->r_flags &= ~RACK_TLP; 15757 } 15758 startseq = rsm->r_start; 15759 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15760 inp = rack->rc_inp; 15761 to.to_flags = 0; 15762 flags = tcp_outflags[tp->t_state]; 15763 if (flags & (TH_SYN|TH_RST)) { 15764 goto failed; 15765 } 15766 if (rsm->r_flags & RACK_HAS_FIN) { 15767 /* We can't send a FIN here */ 15768 goto failed; 15769 } 15770 if (flags & TH_FIN) { 15771 /* We never send a FIN */ 15772 flags &= ~TH_FIN; 15773 } 15774 if (tp->t_flags & TF_RCVD_TSTMP) { 15775 to.to_tsval = ms_cts + tp->ts_offset; 15776 to.to_tsecr = tp->ts_recent; 15777 to.to_flags = TOF_TS; 15778 } 15779 optlen = tcp_addoptions(&to, opt); 15780 hdrlen += optlen; 15781 udp = rack->r_ctl.fsb.udp; 15782 if (udp) 15783 hdrlen += sizeof(struct udphdr); 15784 if (rack->r_ctl.rc_pace_max_segs) 15785 max_val = rack->r_ctl.rc_pace_max_segs; 15786 else if (rack->rc_user_set_max_segs) 15787 max_val = rack->rc_user_set_max_segs * segsiz; 15788 else 15789 max_val = len; 15790 if ((tp->t_flags & TF_TSO) && 15791 V_tcp_do_tso && 15792 (len > segsiz) && 15793 (tp->t_port == 0)) 15794 tso = 1; 15795 #ifdef INET6 15796 if (MHLEN < hdrlen + max_linkhdr) 15797 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15798 else 15799 #endif 15800 m = m_gethdr(M_NOWAIT, MT_DATA); 15801 if (m == NULL) 15802 goto failed; 15803 m->m_data += max_linkhdr; 15804 m->m_len = hdrlen; 15805 th = rack->r_ctl.fsb.th; 15806 /* Establish the len to send */ 15807 if (len > max_val) 15808 len = max_val; 15809 if ((tso) && (len + optlen > tp->t_maxseg)) { 15810 uint32_t if_hw_tsomax; 15811 int32_t max_len; 15812 15813 /* extract TSO information */ 15814 if_hw_tsomax = tp->t_tsomax; 15815 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15816 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15817 /* 15818 * Check if we should limit by maximum payload 15819 * length: 15820 */ 15821 if (if_hw_tsomax != 0) { 15822 /* compute maximum TSO length */ 15823 max_len = (if_hw_tsomax - hdrlen - 15824 max_linkhdr); 15825 if (max_len <= 0) { 15826 goto failed; 15827 } else if (len > max_len) { 15828 len = max_len; 15829 } 15830 } 15831 if (len <= segsiz) { 15832 /* 15833 * In case there are too many small fragments don't 15834 * use TSO: 15835 */ 15836 tso = 0; 15837 } 15838 } else { 15839 tso = 0; 15840 } 15841 if ((tso == 0) && (len > segsiz)) 15842 len = segsiz; 15843 us_cts = tcp_get_usecs(tv); 15844 if ((len == 0) || 15845 (len <= MHLEN - hdrlen - max_linkhdr)) { 15846 goto failed; 15847 } 15848 th->th_seq = htonl(rsm->r_start); 15849 th->th_ack = htonl(tp->rcv_nxt); 15850 /* 15851 * The PUSH bit should only be applied 15852 * if the full retransmission is made. If 15853 * we are sending less than this is the 15854 * left hand edge and should not have 15855 * the PUSH bit. 15856 */ 15857 if ((rsm->r_flags & RACK_HAD_PUSH) && 15858 (len == (rsm->r_end - rsm->r_start))) 15859 flags |= TH_PUSH; 15860 th->th_flags = flags; 15861 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15862 if (th->th_win == 0) { 15863 tp->t_sndzerowin++; 15864 tp->t_flags |= TF_RXWIN0SENT; 15865 } else 15866 tp->t_flags &= ~TF_RXWIN0SENT; 15867 if (rsm->r_flags & RACK_TLP) { 15868 /* 15869 * TLP should not count in retran count, but 15870 * in its own bin 15871 */ 15872 counter_u64_add(rack_tlp_retran, 1); 15873 counter_u64_add(rack_tlp_retran_bytes, len); 15874 } else { 15875 tp->t_sndrexmitpack++; 15876 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15877 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15878 } 15879 #ifdef STATS 15880 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15881 len); 15882 #endif 15883 if (rsm->m == NULL) 15884 goto failed; 15885 if (rsm->orig_m_len != rsm->m->m_len) { 15886 /* Fix up the orig_m_len and possibly the mbuf offset */ 15887 rack_adjust_orig_mlen(rsm); 15888 } 15889 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15890 if (len <= segsiz) { 15891 /* 15892 * Must have ran out of mbufs for the copy 15893 * shorten it to no longer need tso. Lets 15894 * not put on sendalot since we are low on 15895 * mbufs. 15896 */ 15897 tso = 0; 15898 } 15899 if ((m->m_next == NULL) || (len <= 0)){ 15900 goto failed; 15901 } 15902 if (udp) { 15903 if (rack->r_is_v6) 15904 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15905 else 15906 ulen = hdrlen + len - sizeof(struct ip); 15907 udp->uh_ulen = htons(ulen); 15908 } 15909 m->m_pkthdr.rcvif = (struct ifnet *)0; 15910 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15911 #ifdef INET6 15912 if (rack->r_is_v6) { 15913 if (tp->t_port) { 15914 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15915 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15916 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15917 th->th_sum = htons(0); 15918 UDPSTAT_INC(udps_opackets); 15919 } else { 15920 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15921 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15922 th->th_sum = in6_cksum_pseudo(ip6, 15923 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15924 0); 15925 } 15926 } 15927 #endif 15928 #if defined(INET6) && defined(INET) 15929 else 15930 #endif 15931 #ifdef INET 15932 { 15933 if (tp->t_port) { 15934 m->m_pkthdr.csum_flags = CSUM_UDP; 15935 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15936 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15937 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15938 th->th_sum = htons(0); 15939 UDPSTAT_INC(udps_opackets); 15940 } else { 15941 m->m_pkthdr.csum_flags = CSUM_TCP; 15942 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15943 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15944 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15945 IPPROTO_TCP + len + optlen)); 15946 } 15947 /* IP version must be set here for ipv4/ipv6 checking later */ 15948 KASSERT(ip->ip_v == IPVERSION, 15949 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15950 } 15951 #endif 15952 if (tso) { 15953 KASSERT(len > tp->t_maxseg - optlen, 15954 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15955 m->m_pkthdr.csum_flags |= CSUM_TSO; 15956 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15957 } 15958 #ifdef INET6 15959 if (rack->r_is_v6) { 15960 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15961 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15962 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15963 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15964 else 15965 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15966 } 15967 #endif 15968 #if defined(INET) && defined(INET6) 15969 else 15970 #endif 15971 #ifdef INET 15972 { 15973 ip->ip_len = htons(m->m_pkthdr.len); 15974 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15975 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15976 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15977 if (tp->t_port == 0 || len < V_tcp_minmss) { 15978 ip->ip_off |= htons(IP_DF); 15979 } 15980 } else { 15981 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15982 } 15983 } 15984 #endif 15985 /* Time to copy in our header */ 15986 cpto = mtod(m, uint8_t *); 15987 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15988 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15989 if (optlen) { 15990 bcopy(opt, th + 1, optlen); 15991 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15992 } else { 15993 th->th_off = sizeof(struct tcphdr) >> 2; 15994 } 15995 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15996 union tcp_log_stackspecific log; 15997 15998 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15999 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 16000 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 16001 if (rack->rack_no_prr) 16002 log.u_bbr.flex1 = 0; 16003 else 16004 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16005 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16006 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16007 log.u_bbr.flex4 = max_val; 16008 log.u_bbr.flex5 = 0; 16009 /* Save off the early/late values */ 16010 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16011 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16012 log.u_bbr.bw_inuse = rack_get_bw(rack); 16013 if (doing_tlp == 0) 16014 log.u_bbr.flex8 = 1; 16015 else 16016 log.u_bbr.flex8 = 2; 16017 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16018 log.u_bbr.flex7 = 55; 16019 log.u_bbr.pkts_out = tp->t_maxseg; 16020 log.u_bbr.timeStamp = cts; 16021 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16022 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16023 log.u_bbr.delivered = 0; 16024 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16025 len, &log, false, NULL, NULL, 0, tv); 16026 } else 16027 lgb = NULL; 16028 #ifdef INET6 16029 if (rack->r_is_v6) { 16030 error = ip6_output(m, NULL, 16031 &inp->inp_route6, 16032 0, NULL, NULL, inp); 16033 } 16034 #endif 16035 #if defined(INET) && defined(INET6) 16036 else 16037 #endif 16038 #ifdef INET 16039 { 16040 error = ip_output(m, NULL, 16041 &inp->inp_route, 16042 0, 0, inp); 16043 } 16044 #endif 16045 m = NULL; 16046 if (lgb) { 16047 lgb->tlb_errno = error; 16048 lgb = NULL; 16049 } 16050 if (error) { 16051 goto failed; 16052 } 16053 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 16054 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 16055 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 16056 rack->rc_tlp_in_progress = 1; 16057 rack->r_ctl.rc_tlp_cnt_out++; 16058 } 16059 if (error == 0) { 16060 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 16061 if (doing_tlp) { 16062 rack->rc_last_sent_tlp_past_cumack = 0; 16063 rack->rc_last_sent_tlp_seq_valid = 1; 16064 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 16065 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 16066 } 16067 } 16068 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16069 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16070 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 16071 rack->r_ctl.retran_during_recovery += len; 16072 { 16073 int idx; 16074 16075 idx = (len / segsiz) + 3; 16076 if (idx >= TCP_MSS_ACCT_ATIMER) 16077 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16078 else 16079 counter_u64_add(rack_out_size[idx], 1); 16080 } 16081 if (tp->t_rtttime == 0) { 16082 tp->t_rtttime = ticks; 16083 tp->t_rtseq = startseq; 16084 KMOD_TCPSTAT_INC(tcps_segstimed); 16085 } 16086 counter_u64_add(rack_fto_rsm_send, 1); 16087 if (error && (error == ENOBUFS)) { 16088 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 16089 if (rack->rc_enobuf < 0x7f) 16090 rack->rc_enobuf++; 16091 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16092 slot = 10 * HPTS_USEC_IN_MSEC; 16093 } else 16094 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16095 if ((slot == 0) || 16096 (rack->rc_always_pace == 0) || 16097 (rack->r_rr_config == 1)) { 16098 /* 16099 * We have no pacing set or we 16100 * are using old-style rack or 16101 * we are overriden to use the old 1ms pacing. 16102 */ 16103 slot = rack->r_ctl.rc_min_to; 16104 } 16105 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16106 if (rack->r_must_retran) { 16107 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 16108 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 16109 /* 16110 * We have retransmitted all we need. 16111 */ 16112 rack->r_must_retran = 0; 16113 rack->r_ctl.rc_out_at_rto = 0; 16114 } 16115 } 16116 #ifdef TCP_ACCOUNTING 16117 crtsc = get_cyclecount(); 16118 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16119 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16120 } 16121 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16122 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16123 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16124 } 16125 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16126 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16127 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16128 } 16129 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16130 sched_unpin(); 16131 #endif 16132 return (0); 16133 failed: 16134 if (m) 16135 m_free(m); 16136 return (-1); 16137 } 16138 16139 static void 16140 rack_sndbuf_autoscale(struct tcp_rack *rack) 16141 { 16142 /* 16143 * Automatic sizing of send socket buffer. Often the send buffer 16144 * size is not optimally adjusted to the actual network conditions 16145 * at hand (delay bandwidth product). Setting the buffer size too 16146 * small limits throughput on links with high bandwidth and high 16147 * delay (eg. trans-continental/oceanic links). Setting the 16148 * buffer size too big consumes too much real kernel memory, 16149 * especially with many connections on busy servers. 16150 * 16151 * The criteria to step up the send buffer one notch are: 16152 * 1. receive window of remote host is larger than send buffer 16153 * (with a fudge factor of 5/4th); 16154 * 2. send buffer is filled to 7/8th with data (so we actually 16155 * have data to make use of it); 16156 * 3. send buffer fill has not hit maximal automatic size; 16157 * 4. our send window (slow start and cogestion controlled) is 16158 * larger than sent but unacknowledged data in send buffer. 16159 * 16160 * Note that the rack version moves things much faster since 16161 * we want to avoid hitting cache lines in the rack_fast_output() 16162 * path so this is called much less often and thus moves 16163 * the SB forward by a percentage. 16164 */ 16165 struct socket *so; 16166 struct tcpcb *tp; 16167 uint32_t sendwin, scaleup; 16168 16169 tp = rack->rc_tp; 16170 so = rack->rc_inp->inp_socket; 16171 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16172 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16173 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16174 sbused(&so->so_snd) >= 16175 (so->so_snd.sb_hiwat / 8 * 7) && 16176 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16177 sendwin >= (sbused(&so->so_snd) - 16178 (tp->snd_nxt - tp->snd_una))) { 16179 if (rack_autosndbuf_inc) 16180 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16181 else 16182 scaleup = V_tcp_autosndbuf_inc; 16183 if (scaleup < V_tcp_autosndbuf_inc) 16184 scaleup = V_tcp_autosndbuf_inc; 16185 scaleup += so->so_snd.sb_hiwat; 16186 if (scaleup > V_tcp_autosndbuf_max) 16187 scaleup = V_tcp_autosndbuf_max; 16188 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 16189 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16190 } 16191 } 16192 } 16193 16194 static int 16195 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16196 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16197 { 16198 /* 16199 * Enter to do fast output. We are given that the sched_pin is 16200 * in place (if accounting is compiled in) and the cycle count taken 16201 * at entry is in place in ts_val. The idea here is that 16202 * we know how many more bytes needs to be sent (presumably either 16203 * during pacing or to fill the cwnd and that was greater than 16204 * the max-burst). We have how much to send and all the info we 16205 * need to just send. 16206 */ 16207 struct ip *ip = NULL; 16208 struct udphdr *udp = NULL; 16209 struct tcphdr *th = NULL; 16210 struct mbuf *m, *s_mb; 16211 struct inpcb *inp; 16212 uint8_t *cpto; 16213 struct tcp_log_buffer *lgb; 16214 #ifdef TCP_ACCOUNTING 16215 uint64_t crtsc; 16216 #endif 16217 struct tcpopt to; 16218 u_char opt[TCP_MAXOLEN]; 16219 uint32_t hdrlen, optlen; 16220 int cnt_thru = 1; 16221 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0; 16222 uint32_t us_cts, s_soff; 16223 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16224 uint32_t if_hw_tsomaxsegsize; 16225 uint16_t add_flag = RACK_SENT_FP; 16226 #ifdef INET6 16227 struct ip6_hdr *ip6 = NULL; 16228 16229 if (rack->r_is_v6) { 16230 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16231 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16232 } else 16233 #endif /* INET6 */ 16234 { 16235 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16236 hdrlen = sizeof(struct tcpiphdr); 16237 } 16238 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16239 m = NULL; 16240 goto failed; 16241 } 16242 startseq = tp->snd_max; 16243 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16244 inp = rack->rc_inp; 16245 len = rack->r_ctl.fsb.left_to_send; 16246 to.to_flags = 0; 16247 flags = rack->r_ctl.fsb.tcp_flags; 16248 if (tp->t_flags & TF_RCVD_TSTMP) { 16249 to.to_tsval = ms_cts + tp->ts_offset; 16250 to.to_tsecr = tp->ts_recent; 16251 to.to_flags = TOF_TS; 16252 } 16253 optlen = tcp_addoptions(&to, opt); 16254 hdrlen += optlen; 16255 udp = rack->r_ctl.fsb.udp; 16256 if (udp) 16257 hdrlen += sizeof(struct udphdr); 16258 if (rack->r_ctl.rc_pace_max_segs) 16259 max_val = rack->r_ctl.rc_pace_max_segs; 16260 else if (rack->rc_user_set_max_segs) 16261 max_val = rack->rc_user_set_max_segs * segsiz; 16262 else 16263 max_val = len; 16264 if ((tp->t_flags & TF_TSO) && 16265 V_tcp_do_tso && 16266 (len > segsiz) && 16267 (tp->t_port == 0)) 16268 tso = 1; 16269 again: 16270 #ifdef INET6 16271 if (MHLEN < hdrlen + max_linkhdr) 16272 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16273 else 16274 #endif 16275 m = m_gethdr(M_NOWAIT, MT_DATA); 16276 if (m == NULL) 16277 goto failed; 16278 m->m_data += max_linkhdr; 16279 m->m_len = hdrlen; 16280 th = rack->r_ctl.fsb.th; 16281 /* Establish the len to send */ 16282 if (len > max_val) 16283 len = max_val; 16284 if ((tso) && (len + optlen > tp->t_maxseg)) { 16285 uint32_t if_hw_tsomax; 16286 int32_t max_len; 16287 16288 /* extract TSO information */ 16289 if_hw_tsomax = tp->t_tsomax; 16290 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16291 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16292 /* 16293 * Check if we should limit by maximum payload 16294 * length: 16295 */ 16296 if (if_hw_tsomax != 0) { 16297 /* compute maximum TSO length */ 16298 max_len = (if_hw_tsomax - hdrlen - 16299 max_linkhdr); 16300 if (max_len <= 0) { 16301 goto failed; 16302 } else if (len > max_len) { 16303 len = max_len; 16304 } 16305 } 16306 if (len <= segsiz) { 16307 /* 16308 * In case there are too many small fragments don't 16309 * use TSO: 16310 */ 16311 tso = 0; 16312 } 16313 } else { 16314 tso = 0; 16315 } 16316 if ((tso == 0) && (len > segsiz)) 16317 len = segsiz; 16318 us_cts = tcp_get_usecs(tv); 16319 if ((len == 0) || 16320 (len <= MHLEN - hdrlen - max_linkhdr)) { 16321 goto failed; 16322 } 16323 sb_offset = tp->snd_max - tp->snd_una; 16324 th->th_seq = htonl(tp->snd_max); 16325 th->th_ack = htonl(tp->rcv_nxt); 16326 th->th_flags = flags; 16327 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16328 if (th->th_win == 0) { 16329 tp->t_sndzerowin++; 16330 tp->t_flags |= TF_RXWIN0SENT; 16331 } else 16332 tp->t_flags &= ~TF_RXWIN0SENT; 16333 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16334 KMOD_TCPSTAT_INC(tcps_sndpack); 16335 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16336 #ifdef STATS 16337 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16338 len); 16339 #endif 16340 if (rack->r_ctl.fsb.m == NULL) 16341 goto failed; 16342 16343 /* s_mb and s_soff are saved for rack_log_output */ 16344 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16345 &s_mb, &s_soff); 16346 if (len <= segsiz) { 16347 /* 16348 * Must have ran out of mbufs for the copy 16349 * shorten it to no longer need tso. Lets 16350 * not put on sendalot since we are low on 16351 * mbufs. 16352 */ 16353 tso = 0; 16354 } 16355 if (rack->r_ctl.fsb.rfo_apply_push && 16356 (len == rack->r_ctl.fsb.left_to_send)) { 16357 th->th_flags |= TH_PUSH; 16358 add_flag |= RACK_HAD_PUSH; 16359 } 16360 if ((m->m_next == NULL) || (len <= 0)){ 16361 goto failed; 16362 } 16363 if (udp) { 16364 if (rack->r_is_v6) 16365 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16366 else 16367 ulen = hdrlen + len - sizeof(struct ip); 16368 udp->uh_ulen = htons(ulen); 16369 } 16370 m->m_pkthdr.rcvif = (struct ifnet *)0; 16371 if (tp->t_state == TCPS_ESTABLISHED && 16372 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16373 /* 16374 * If the peer has ECN, mark data packets with ECN capable 16375 * transmission (ECT). Ignore pure ack packets, 16376 * retransmissions. 16377 */ 16378 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) { 16379 #ifdef INET6 16380 if (rack->r_is_v6) 16381 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 16382 else 16383 #endif 16384 ip->ip_tos |= IPTOS_ECN_ECT0; 16385 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 16386 /* 16387 * Reply with proper ECN notifications. 16388 * Only set CWR on new data segments. 16389 */ 16390 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 16391 flags |= TH_CWR; 16392 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 16393 } 16394 } 16395 if (tp->t_flags2 & TF2_ECN_SND_ECE) 16396 flags |= TH_ECE; 16397 } 16398 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16399 #ifdef INET6 16400 if (rack->r_is_v6) { 16401 if (tp->t_port) { 16402 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16403 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16404 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16405 th->th_sum = htons(0); 16406 UDPSTAT_INC(udps_opackets); 16407 } else { 16408 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16409 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16410 th->th_sum = in6_cksum_pseudo(ip6, 16411 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16412 0); 16413 } 16414 } 16415 #endif 16416 #if defined(INET6) && defined(INET) 16417 else 16418 #endif 16419 #ifdef INET 16420 { 16421 if (tp->t_port) { 16422 m->m_pkthdr.csum_flags = CSUM_UDP; 16423 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16424 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16425 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16426 th->th_sum = htons(0); 16427 UDPSTAT_INC(udps_opackets); 16428 } else { 16429 m->m_pkthdr.csum_flags = CSUM_TCP; 16430 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16431 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16432 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16433 IPPROTO_TCP + len + optlen)); 16434 } 16435 /* IP version must be set here for ipv4/ipv6 checking later */ 16436 KASSERT(ip->ip_v == IPVERSION, 16437 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16438 } 16439 #endif 16440 if (tso) { 16441 KASSERT(len > tp->t_maxseg - optlen, 16442 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16443 m->m_pkthdr.csum_flags |= CSUM_TSO; 16444 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16445 } 16446 #ifdef INET6 16447 if (rack->r_is_v6) { 16448 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16449 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16450 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16451 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16452 else 16453 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16454 } 16455 #endif 16456 #if defined(INET) && defined(INET6) 16457 else 16458 #endif 16459 #ifdef INET 16460 { 16461 ip->ip_len = htons(m->m_pkthdr.len); 16462 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16463 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16464 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16465 if (tp->t_port == 0 || len < V_tcp_minmss) { 16466 ip->ip_off |= htons(IP_DF); 16467 } 16468 } else { 16469 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16470 } 16471 } 16472 #endif 16473 /* Time to copy in our header */ 16474 cpto = mtod(m, uint8_t *); 16475 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16476 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16477 if (optlen) { 16478 bcopy(opt, th + 1, optlen); 16479 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16480 } else { 16481 th->th_off = sizeof(struct tcphdr) >> 2; 16482 } 16483 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16484 union tcp_log_stackspecific log; 16485 16486 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16487 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 16488 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 16489 if (rack->rack_no_prr) 16490 log.u_bbr.flex1 = 0; 16491 else 16492 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16493 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16494 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16495 log.u_bbr.flex4 = max_val; 16496 log.u_bbr.flex5 = 0; 16497 /* Save off the early/late values */ 16498 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16499 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16500 log.u_bbr.bw_inuse = rack_get_bw(rack); 16501 log.u_bbr.flex8 = 0; 16502 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16503 log.u_bbr.flex7 = 44; 16504 log.u_bbr.pkts_out = tp->t_maxseg; 16505 log.u_bbr.timeStamp = cts; 16506 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16507 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16508 log.u_bbr.delivered = 0; 16509 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16510 len, &log, false, NULL, NULL, 0, tv); 16511 } else 16512 lgb = NULL; 16513 #ifdef INET6 16514 if (rack->r_is_v6) { 16515 error = ip6_output(m, NULL, 16516 &inp->inp_route6, 16517 0, NULL, NULL, inp); 16518 } 16519 #endif 16520 #if defined(INET) && defined(INET6) 16521 else 16522 #endif 16523 #ifdef INET 16524 { 16525 error = ip_output(m, NULL, 16526 &inp->inp_route, 16527 0, 0, inp); 16528 } 16529 #endif 16530 if (lgb) { 16531 lgb->tlb_errno = error; 16532 lgb = NULL; 16533 } 16534 if (error) { 16535 *send_err = error; 16536 m = NULL; 16537 goto failed; 16538 } 16539 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16540 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16541 m = NULL; 16542 if (tp->snd_una == tp->snd_max) { 16543 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16544 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16545 tp->t_acktime = ticks; 16546 } 16547 if (error == 0) 16548 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16549 16550 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16551 tot_len += len; 16552 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16553 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16554 tp->snd_max += len; 16555 tp->snd_nxt = tp->snd_max; 16556 { 16557 int idx; 16558 16559 idx = (len / segsiz) + 3; 16560 if (idx >= TCP_MSS_ACCT_ATIMER) 16561 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16562 else 16563 counter_u64_add(rack_out_size[idx], 1); 16564 } 16565 if (len <= rack->r_ctl.fsb.left_to_send) 16566 rack->r_ctl.fsb.left_to_send -= len; 16567 else 16568 rack->r_ctl.fsb.left_to_send = 0; 16569 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16570 rack->r_fast_output = 0; 16571 rack->r_ctl.fsb.left_to_send = 0; 16572 /* At the end of fast_output scale up the sb */ 16573 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16574 rack_sndbuf_autoscale(rack); 16575 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16576 } 16577 if (tp->t_rtttime == 0) { 16578 tp->t_rtttime = ticks; 16579 tp->t_rtseq = startseq; 16580 KMOD_TCPSTAT_INC(tcps_segstimed); 16581 } 16582 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16583 (max_val > len) && 16584 (tso == 0)) { 16585 max_val -= len; 16586 len = segsiz; 16587 th = rack->r_ctl.fsb.th; 16588 cnt_thru++; 16589 goto again; 16590 } 16591 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16592 counter_u64_add(rack_fto_send, 1); 16593 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16594 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16595 #ifdef TCP_ACCOUNTING 16596 crtsc = get_cyclecount(); 16597 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16598 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16599 } 16600 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16601 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16602 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16603 } 16604 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16605 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16606 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16607 } 16608 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16609 sched_unpin(); 16610 #endif 16611 return (0); 16612 failed: 16613 if (m) 16614 m_free(m); 16615 rack->r_fast_output = 0; 16616 return (-1); 16617 } 16618 16619 static int 16620 rack_output(struct tcpcb *tp) 16621 { 16622 struct socket *so; 16623 uint32_t recwin; 16624 uint32_t sb_offset, s_moff = 0; 16625 int32_t len, flags, error = 0; 16626 struct mbuf *m, *s_mb = NULL; 16627 struct mbuf *mb; 16628 uint32_t if_hw_tsomaxsegcount = 0; 16629 uint32_t if_hw_tsomaxsegsize; 16630 int32_t segsiz, minseg; 16631 long tot_len_this_send = 0; 16632 #ifdef INET 16633 struct ip *ip = NULL; 16634 #endif 16635 #ifdef TCPDEBUG 16636 struct ipovly *ipov = NULL; 16637 #endif 16638 struct udphdr *udp = NULL; 16639 struct tcp_rack *rack; 16640 struct tcphdr *th; 16641 uint8_t pass = 0; 16642 uint8_t mark = 0; 16643 uint8_t wanted_cookie = 0; 16644 u_char opt[TCP_MAXOLEN]; 16645 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16646 uint32_t rack_seq; 16647 16648 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16649 unsigned ipsec_optlen = 0; 16650 16651 #endif 16652 int32_t idle, sendalot; 16653 int32_t sub_from_prr = 0; 16654 volatile int32_t sack_rxmit; 16655 struct rack_sendmap *rsm = NULL; 16656 int32_t tso, mtu; 16657 struct tcpopt to; 16658 int32_t slot = 0; 16659 int32_t sup_rack = 0; 16660 uint32_t cts, ms_cts, delayed, early; 16661 uint16_t add_flag = RACK_SENT_SP; 16662 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16663 uint8_t hpts_calling, doing_tlp = 0; 16664 uint32_t cwnd_to_use, pace_max_seg; 16665 int32_t do_a_prefetch = 0; 16666 int32_t prefetch_rsm = 0; 16667 int32_t orig_len = 0; 16668 struct timeval tv; 16669 int32_t prefetch_so_done = 0; 16670 struct tcp_log_buffer *lgb; 16671 struct inpcb *inp; 16672 struct sockbuf *sb; 16673 uint64_t ts_val = 0; 16674 #ifdef TCP_ACCOUNTING 16675 uint64_t crtsc; 16676 #endif 16677 #ifdef INET6 16678 struct ip6_hdr *ip6 = NULL; 16679 int32_t isipv6; 16680 #endif 16681 uint8_t filled_all = 0; 16682 bool hw_tls = false; 16683 16684 /* setup and take the cache hits here */ 16685 rack = (struct tcp_rack *)tp->t_fb_ptr; 16686 #ifdef TCP_ACCOUNTING 16687 sched_pin(); 16688 ts_val = get_cyclecount(); 16689 #endif 16690 hpts_calling = rack->rc_inp->inp_hpts_calls; 16691 NET_EPOCH_ASSERT(); 16692 INP_WLOCK_ASSERT(rack->rc_inp); 16693 #ifdef TCP_OFFLOAD 16694 if (tp->t_flags & TF_TOE) { 16695 #ifdef TCP_ACCOUNTING 16696 sched_unpin(); 16697 #endif 16698 return (tcp_offload_output(tp)); 16699 } 16700 #endif 16701 /* 16702 * For TFO connections in SYN_RECEIVED, only allow the initial 16703 * SYN|ACK and those sent by the retransmit timer. 16704 */ 16705 if (IS_FASTOPEN(tp->t_flags) && 16706 (tp->t_state == TCPS_SYN_RECEIVED) && 16707 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16708 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16709 #ifdef TCP_ACCOUNTING 16710 sched_unpin(); 16711 #endif 16712 return (0); 16713 } 16714 #ifdef INET6 16715 if (rack->r_state) { 16716 /* Use the cache line loaded if possible */ 16717 isipv6 = rack->r_is_v6; 16718 } else { 16719 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16720 } 16721 #endif 16722 early = 0; 16723 cts = tcp_get_usecs(&tv); 16724 ms_cts = tcp_tv_to_mssectick(&tv); 16725 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16726 rack->rc_inp->inp_in_hpts) { 16727 /* 16728 * We are on the hpts for some timer but not hptsi output. 16729 * Remove from the hpts unconditionally. 16730 */ 16731 rack_timer_cancel(tp, rack, cts, __LINE__); 16732 } 16733 /* Are we pacing and late? */ 16734 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16735 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16736 /* We are delayed */ 16737 delayed = cts - rack->r_ctl.rc_last_output_to; 16738 } else { 16739 delayed = 0; 16740 } 16741 /* Do the timers, which may override the pacer */ 16742 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16743 if (rack_process_timers(tp, rack, cts, hpts_calling, &doing_tlp)) { 16744 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16745 #ifdef TCP_ACCOUNTING 16746 sched_unpin(); 16747 #endif 16748 return (0); 16749 } 16750 } 16751 if (rack->rc_in_persist) { 16752 if (rack->rc_inp->inp_in_hpts == 0) { 16753 /* Timer is not running */ 16754 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16755 } 16756 #ifdef TCP_ACCOUNTING 16757 sched_unpin(); 16758 #endif 16759 return (0); 16760 } 16761 if ((rack->r_timer_override) || 16762 (rack->rc_ack_can_sendout_data) || 16763 (delayed) || 16764 (tp->t_state < TCPS_ESTABLISHED)) { 16765 rack->rc_ack_can_sendout_data = 0; 16766 if (rack->rc_inp->inp_in_hpts) 16767 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 16768 } else if (rack->rc_inp->inp_in_hpts) { 16769 /* 16770 * On the hpts you can't pass even if ACKNOW is on, we will 16771 * when the hpts fires. 16772 */ 16773 #ifdef TCP_ACCOUNTING 16774 crtsc = get_cyclecount(); 16775 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16776 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16777 } 16778 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16779 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16780 tp->tcp_cnt_counters[SND_BLOCKED]++; 16781 } 16782 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16783 sched_unpin(); 16784 #endif 16785 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16786 return (0); 16787 } 16788 rack->rc_inp->inp_hpts_calls = 0; 16789 /* Finish out both pacing early and late accounting */ 16790 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16791 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16792 early = rack->r_ctl.rc_last_output_to - cts; 16793 } else 16794 early = 0; 16795 if (delayed) { 16796 rack->r_ctl.rc_agg_delayed += delayed; 16797 rack->r_late = 1; 16798 } else if (early) { 16799 rack->r_ctl.rc_agg_early += early; 16800 rack->r_early = 1; 16801 } 16802 /* Now that early/late accounting is done turn off the flag */ 16803 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16804 rack->r_wanted_output = 0; 16805 rack->r_timer_override = 0; 16806 if ((tp->t_state != rack->r_state) && 16807 TCPS_HAVEESTABLISHED(tp->t_state)) { 16808 rack_set_state(tp, rack); 16809 } 16810 if ((rack->r_fast_output) && 16811 (doing_tlp == 0) && 16812 (tp->rcv_numsacks == 0)) { 16813 int ret; 16814 16815 error = 0; 16816 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16817 if (ret >= 0) 16818 return(ret); 16819 else if (error) { 16820 inp = rack->rc_inp; 16821 so = inp->inp_socket; 16822 sb = &so->so_snd; 16823 goto nomore; 16824 } 16825 } 16826 inp = rack->rc_inp; 16827 /* 16828 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16829 * only allow the initial SYN or SYN|ACK and those sent 16830 * by the retransmit timer. 16831 */ 16832 if (IS_FASTOPEN(tp->t_flags) && 16833 ((tp->t_state == TCPS_SYN_RECEIVED) || 16834 (tp->t_state == TCPS_SYN_SENT)) && 16835 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16836 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16837 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16838 so = inp->inp_socket; 16839 sb = &so->so_snd; 16840 goto just_return_nolock; 16841 } 16842 /* 16843 * Determine length of data that should be transmitted, and flags 16844 * that will be used. If there is some data or critical controls 16845 * (SYN, RST) to send, then transmit; otherwise, investigate 16846 * further. 16847 */ 16848 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16849 if (tp->t_idle_reduce) { 16850 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16851 rack_cc_after_idle(rack, tp); 16852 } 16853 tp->t_flags &= ~TF_LASTIDLE; 16854 if (idle) { 16855 if (tp->t_flags & TF_MORETOCOME) { 16856 tp->t_flags |= TF_LASTIDLE; 16857 idle = 0; 16858 } 16859 } 16860 if ((tp->snd_una == tp->snd_max) && 16861 rack->r_ctl.rc_went_idle_time && 16862 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16863 idle = cts - rack->r_ctl.rc_went_idle_time; 16864 if (idle > rack_min_probertt_hold) { 16865 /* Count as a probe rtt */ 16866 if (rack->in_probe_rtt == 0) { 16867 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16868 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16869 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16870 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16871 } else { 16872 rack_exit_probertt(rack, cts); 16873 } 16874 } 16875 idle = 0; 16876 } 16877 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16878 rack_init_fsb_block(tp, rack); 16879 again: 16880 /* 16881 * If we've recently taken a timeout, snd_max will be greater than 16882 * snd_nxt. There may be SACK information that allows us to avoid 16883 * resending already delivered data. Adjust snd_nxt accordingly. 16884 */ 16885 sendalot = 0; 16886 cts = tcp_get_usecs(&tv); 16887 ms_cts = tcp_tv_to_mssectick(&tv); 16888 tso = 0; 16889 mtu = 0; 16890 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16891 minseg = segsiz; 16892 if (rack->r_ctl.rc_pace_max_segs == 0) 16893 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16894 else 16895 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16896 sb_offset = tp->snd_max - tp->snd_una; 16897 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16898 flags = tcp_outflags[tp->t_state]; 16899 while (rack->rc_free_cnt < rack_free_cache) { 16900 rsm = rack_alloc(rack); 16901 if (rsm == NULL) { 16902 if (inp->inp_hpts_calls) 16903 /* Retry in a ms */ 16904 slot = (1 * HPTS_USEC_IN_MSEC); 16905 so = inp->inp_socket; 16906 sb = &so->so_snd; 16907 goto just_return_nolock; 16908 } 16909 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16910 rack->rc_free_cnt++; 16911 rsm = NULL; 16912 } 16913 if (inp->inp_hpts_calls) 16914 inp->inp_hpts_calls = 0; 16915 sack_rxmit = 0; 16916 len = 0; 16917 rsm = NULL; 16918 if (flags & TH_RST) { 16919 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16920 so = inp->inp_socket; 16921 sb = &so->so_snd; 16922 goto send; 16923 } 16924 if (rack->r_ctl.rc_resend) { 16925 /* Retransmit timer */ 16926 rsm = rack->r_ctl.rc_resend; 16927 rack->r_ctl.rc_resend = NULL; 16928 len = rsm->r_end - rsm->r_start; 16929 sack_rxmit = 1; 16930 sendalot = 0; 16931 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16932 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16933 __func__, __LINE__, 16934 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16935 sb_offset = rsm->r_start - tp->snd_una; 16936 if (len >= segsiz) 16937 len = segsiz; 16938 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16939 /* We have a retransmit that takes precedence */ 16940 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16941 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16942 /* Enter recovery if not induced by a time-out */ 16943 rack->r_ctl.rc_rsm_start = rsm->r_start; 16944 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 16945 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 16946 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 16947 } 16948 #ifdef INVARIANTS 16949 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16950 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16951 tp, rack, rsm, rsm->r_start, tp->snd_una); 16952 } 16953 #endif 16954 len = rsm->r_end - rsm->r_start; 16955 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16956 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16957 __func__, __LINE__, 16958 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16959 sb_offset = rsm->r_start - tp->snd_una; 16960 sendalot = 0; 16961 if (len >= segsiz) 16962 len = segsiz; 16963 if (len > 0) { 16964 sack_rxmit = 1; 16965 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16966 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16967 min(len, segsiz)); 16968 counter_u64_add(rack_rtm_prr_retran, 1); 16969 } 16970 } else if (rack->r_ctl.rc_tlpsend) { 16971 /* Tail loss probe */ 16972 long cwin; 16973 long tlen; 16974 16975 /* 16976 * Check if we can do a TLP with a RACK'd packet 16977 * this can happen if we are not doing the rack 16978 * cheat and we skipped to a TLP and it 16979 * went off. 16980 */ 16981 rsm = rack->r_ctl.rc_tlpsend; 16982 /* We are doing a TLP make sure the flag is preent */ 16983 rsm->r_flags |= RACK_TLP; 16984 rack->r_ctl.rc_tlpsend = NULL; 16985 sack_rxmit = 1; 16986 tlen = rsm->r_end - rsm->r_start; 16987 if (tlen > segsiz) 16988 tlen = segsiz; 16989 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16990 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16991 __func__, __LINE__, 16992 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16993 sb_offset = rsm->r_start - tp->snd_una; 16994 cwin = min(tp->snd_wnd, tlen); 16995 len = cwin; 16996 } 16997 if (rack->r_must_retran && 16998 (rsm == NULL)) { 16999 /* 17000 * Non-Sack and we had a RTO or MTU change, we 17001 * need to retransmit until we reach 17002 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 17003 */ 17004 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 17005 int sendwin, flight; 17006 17007 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17008 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17009 if (flight >= sendwin) { 17010 so = inp->inp_socket; 17011 sb = &so->so_snd; 17012 goto just_return_nolock; 17013 } 17014 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17015 if (rsm == NULL) { 17016 /* TSNH */ 17017 rack->r_must_retran = 0; 17018 rack->r_ctl.rc_out_at_rto = 0; 17019 rack->r_must_retran = 0; 17020 so = inp->inp_socket; 17021 sb = &so->so_snd; 17022 goto just_return_nolock; 17023 } 17024 sack_rxmit = 1; 17025 len = rsm->r_end - rsm->r_start; 17026 sendalot = 0; 17027 sb_offset = rsm->r_start - tp->snd_una; 17028 if (len >= segsiz) 17029 len = segsiz; 17030 } else { 17031 /* We must be done if there is nothing outstanding */ 17032 rack->r_must_retran = 0; 17033 rack->r_ctl.rc_out_at_rto = 0; 17034 } 17035 } 17036 /* 17037 * Enforce a connection sendmap count limit if set 17038 * as long as we are not retransmiting. 17039 */ 17040 if ((rsm == NULL) && 17041 (rack->do_detection == 0) && 17042 (V_tcp_map_entries_limit > 0) && 17043 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17044 counter_u64_add(rack_to_alloc_limited, 1); 17045 if (!rack->alloc_limit_reported) { 17046 rack->alloc_limit_reported = 1; 17047 counter_u64_add(rack_alloc_limited_conns, 1); 17048 } 17049 so = inp->inp_socket; 17050 sb = &so->so_snd; 17051 goto just_return_nolock; 17052 } 17053 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17054 /* we are retransmitting the fin */ 17055 len--; 17056 if (len) { 17057 /* 17058 * When retransmitting data do *not* include the 17059 * FIN. This could happen from a TLP probe. 17060 */ 17061 flags &= ~TH_FIN; 17062 } 17063 } 17064 #ifdef INVARIANTS 17065 /* For debugging */ 17066 rack->r_ctl.rc_rsm_at_retran = rsm; 17067 #endif 17068 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17069 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17070 int ret; 17071 17072 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17073 if (ret == 0) 17074 return (0); 17075 } 17076 so = inp->inp_socket; 17077 sb = &so->so_snd; 17078 if (do_a_prefetch == 0) { 17079 kern_prefetch(sb, &do_a_prefetch); 17080 do_a_prefetch = 1; 17081 } 17082 #ifdef NETFLIX_SHARED_CWND 17083 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17084 rack->rack_enable_scwnd) { 17085 /* We are doing cwnd sharing */ 17086 if (rack->gp_ready && 17087 (rack->rack_attempted_scwnd == 0) && 17088 (rack->r_ctl.rc_scw == NULL) && 17089 tp->t_lib) { 17090 /* The pcbid is in, lets make an attempt */ 17091 counter_u64_add(rack_try_scwnd, 1); 17092 rack->rack_attempted_scwnd = 1; 17093 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17094 &rack->r_ctl.rc_scw_index, 17095 segsiz); 17096 } 17097 if (rack->r_ctl.rc_scw && 17098 (rack->rack_scwnd_is_idle == 1) && 17099 sbavail(&so->so_snd)) { 17100 /* we are no longer out of data */ 17101 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17102 rack->rack_scwnd_is_idle = 0; 17103 } 17104 if (rack->r_ctl.rc_scw) { 17105 /* First lets update and get the cwnd */ 17106 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17107 rack->r_ctl.rc_scw_index, 17108 tp->snd_cwnd, tp->snd_wnd, segsiz); 17109 } 17110 } 17111 #endif 17112 /* 17113 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17114 * state flags. 17115 */ 17116 if (tp->t_flags & TF_NEEDFIN) 17117 flags |= TH_FIN; 17118 if (tp->t_flags & TF_NEEDSYN) 17119 flags |= TH_SYN; 17120 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17121 void *end_rsm; 17122 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17123 if (end_rsm) 17124 kern_prefetch(end_rsm, &prefetch_rsm); 17125 prefetch_rsm = 1; 17126 } 17127 SOCKBUF_LOCK(sb); 17128 /* 17129 * If snd_nxt == snd_max and we have transmitted a FIN, the 17130 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17131 * negative length. This can also occur when TCP opens up its 17132 * congestion window while receiving additional duplicate acks after 17133 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17134 * the fast-retransmit. 17135 * 17136 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17137 * set to snd_una, the sb_offset will be 0, and the length may wind 17138 * up 0. 17139 * 17140 * If sack_rxmit is true we are retransmitting from the scoreboard 17141 * in which case len is already set. 17142 */ 17143 if ((sack_rxmit == 0) && 17144 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17145 uint32_t avail; 17146 17147 avail = sbavail(sb); 17148 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17149 sb_offset = tp->snd_nxt - tp->snd_una; 17150 else 17151 sb_offset = 0; 17152 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17153 if (rack->r_ctl.rc_tlp_new_data) { 17154 /* TLP is forcing out new data */ 17155 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17156 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17157 } 17158 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17159 if (tp->snd_wnd > sb_offset) 17160 len = tp->snd_wnd - sb_offset; 17161 else 17162 len = 0; 17163 } else { 17164 len = rack->r_ctl.rc_tlp_new_data; 17165 } 17166 } else { 17167 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17168 } 17169 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17170 /* 17171 * For prr=off, we need to send only 1 MSS 17172 * at a time. We do this because another sack could 17173 * be arriving that causes us to send retransmits and 17174 * we don't want to be on a long pace due to a larger send 17175 * that keeps us from sending out the retransmit. 17176 */ 17177 len = segsiz; 17178 } 17179 } else { 17180 uint32_t outstanding; 17181 /* 17182 * We are inside of a Fast recovery episode, this 17183 * is caused by a SACK or 3 dup acks. At this point 17184 * we have sent all the retransmissions and we rely 17185 * on PRR to dictate what we will send in the form of 17186 * new data. 17187 */ 17188 17189 outstanding = tp->snd_max - tp->snd_una; 17190 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17191 if (tp->snd_wnd > outstanding) { 17192 len = tp->snd_wnd - outstanding; 17193 /* Check to see if we have the data */ 17194 if ((sb_offset + len) > avail) { 17195 /* It does not all fit */ 17196 if (avail > sb_offset) 17197 len = avail - sb_offset; 17198 else 17199 len = 0; 17200 } 17201 } else { 17202 len = 0; 17203 } 17204 } else if (avail > sb_offset) { 17205 len = avail - sb_offset; 17206 } else { 17207 len = 0; 17208 } 17209 if (len > 0) { 17210 if (len > rack->r_ctl.rc_prr_sndcnt) { 17211 len = rack->r_ctl.rc_prr_sndcnt; 17212 } 17213 if (len > 0) { 17214 sub_from_prr = 1; 17215 counter_u64_add(rack_rtm_prr_newdata, 1); 17216 } 17217 } 17218 if (len > segsiz) { 17219 /* 17220 * We should never send more than a MSS when 17221 * retransmitting or sending new data in prr 17222 * mode unless the override flag is on. Most 17223 * likely the PRR algorithm is not going to 17224 * let us send a lot as well :-) 17225 */ 17226 if (rack->r_ctl.rc_prr_sendalot == 0) { 17227 len = segsiz; 17228 } 17229 } else if (len < segsiz) { 17230 /* 17231 * Do we send any? The idea here is if the 17232 * send empty's the socket buffer we want to 17233 * do it. However if not then lets just wait 17234 * for our prr_sndcnt to get bigger. 17235 */ 17236 long leftinsb; 17237 17238 leftinsb = sbavail(sb) - sb_offset; 17239 if (leftinsb > len) { 17240 /* This send does not empty the sb */ 17241 len = 0; 17242 } 17243 } 17244 } 17245 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17246 /* 17247 * If you have not established 17248 * and are not doing FAST OPEN 17249 * no data please. 17250 */ 17251 if ((sack_rxmit == 0) && 17252 (!IS_FASTOPEN(tp->t_flags))){ 17253 len = 0; 17254 sb_offset = 0; 17255 } 17256 } 17257 if (prefetch_so_done == 0) { 17258 kern_prefetch(so, &prefetch_so_done); 17259 prefetch_so_done = 1; 17260 } 17261 /* 17262 * Lop off SYN bit if it has already been sent. However, if this is 17263 * SYN-SENT state and if segment contains data and if we don't know 17264 * that foreign host supports TAO, suppress sending segment. 17265 */ 17266 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17267 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17268 /* 17269 * When sending additional segments following a TFO SYN|ACK, 17270 * do not include the SYN bit. 17271 */ 17272 if (IS_FASTOPEN(tp->t_flags) && 17273 (tp->t_state == TCPS_SYN_RECEIVED)) 17274 flags &= ~TH_SYN; 17275 } 17276 /* 17277 * Be careful not to send data and/or FIN on SYN segments. This 17278 * measure is needed to prevent interoperability problems with not 17279 * fully conformant TCP implementations. 17280 */ 17281 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17282 len = 0; 17283 flags &= ~TH_FIN; 17284 } 17285 /* 17286 * On TFO sockets, ensure no data is sent in the following cases: 17287 * 17288 * - When retransmitting SYN|ACK on a passively-created socket 17289 * 17290 * - When retransmitting SYN on an actively created socket 17291 * 17292 * - When sending a zero-length cookie (cookie request) on an 17293 * actively created socket 17294 * 17295 * - When the socket is in the CLOSED state (RST is being sent) 17296 */ 17297 if (IS_FASTOPEN(tp->t_flags) && 17298 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17299 ((tp->t_state == TCPS_SYN_SENT) && 17300 (tp->t_tfo_client_cookie_len == 0)) || 17301 (flags & TH_RST))) { 17302 sack_rxmit = 0; 17303 len = 0; 17304 } 17305 /* Without fast-open there should never be data sent on a SYN */ 17306 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17307 tp->snd_nxt = tp->iss; 17308 len = 0; 17309 } 17310 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17311 /* We only send 1 MSS if we have a DSACK block */ 17312 add_flag |= RACK_SENT_W_DSACK; 17313 len = segsiz; 17314 } 17315 orig_len = len; 17316 if (len <= 0) { 17317 /* 17318 * If FIN has been sent but not acked, but we haven't been 17319 * called to retransmit, len will be < 0. Otherwise, window 17320 * shrank after we sent into it. If window shrank to 0, 17321 * cancel pending retransmit, pull snd_nxt back to (closed) 17322 * window, and set the persist timer if it isn't already 17323 * going. If the window didn't close completely, just wait 17324 * for an ACK. 17325 * 17326 * We also do a general check here to ensure that we will 17327 * set the persist timer when we have data to send, but a 17328 * 0-byte window. This makes sure the persist timer is set 17329 * even if the packet hits one of the "goto send" lines 17330 * below. 17331 */ 17332 len = 0; 17333 if ((tp->snd_wnd == 0) && 17334 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17335 (tp->snd_una == tp->snd_max) && 17336 (sb_offset < (int)sbavail(sb))) { 17337 rack_enter_persist(tp, rack, cts); 17338 } 17339 } else if ((rsm == NULL) && 17340 (doing_tlp == 0) && 17341 (len < pace_max_seg)) { 17342 /* 17343 * We are not sending a maximum sized segment for 17344 * some reason. Should we not send anything (think 17345 * sws or persists)? 17346 */ 17347 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17348 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17349 (len < minseg) && 17350 (len < (int)(sbavail(sb) - sb_offset))) { 17351 /* 17352 * Here the rwnd is less than 17353 * the minimum pacing size, this is not a retransmit, 17354 * we are established and 17355 * the send is not the last in the socket buffer 17356 * we send nothing, and we may enter persists 17357 * if nothing is outstanding. 17358 */ 17359 len = 0; 17360 if (tp->snd_max == tp->snd_una) { 17361 /* 17362 * Nothing out we can 17363 * go into persists. 17364 */ 17365 rack_enter_persist(tp, rack, cts); 17366 } 17367 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17368 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17369 (len < (int)(sbavail(sb) - sb_offset)) && 17370 (len < minseg)) { 17371 /* 17372 * Here we are not retransmitting, and 17373 * the cwnd is not so small that we could 17374 * not send at least a min size (rxt timer 17375 * not having gone off), We have 2 segments or 17376 * more already in flight, its not the tail end 17377 * of the socket buffer and the cwnd is blocking 17378 * us from sending out a minimum pacing segment size. 17379 * Lets not send anything. 17380 */ 17381 len = 0; 17382 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17383 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17384 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17385 (len < (int)(sbavail(sb) - sb_offset)) && 17386 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17387 /* 17388 * Here we have a send window but we have 17389 * filled it up and we can't send another pacing segment. 17390 * We also have in flight more than 2 segments 17391 * and we are not completing the sb i.e. we allow 17392 * the last bytes of the sb to go out even if 17393 * its not a full pacing segment. 17394 */ 17395 len = 0; 17396 } else if ((rack->r_ctl.crte != NULL) && 17397 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17398 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17399 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17400 (len < (int)(sbavail(sb) - sb_offset))) { 17401 /* 17402 * Here we are doing hardware pacing, this is not a TLP, 17403 * we are not sending a pace max segment size, there is rwnd 17404 * room to send at least N pace_max_seg, the cwnd is greater 17405 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17406 * more segments in flight and its not the tail of the socket buffer. 17407 * 17408 * We don't want to send instead we need to get more ack's in to 17409 * allow us to send a full pacing segment. Normally, if we are pacing 17410 * about the right speed, we should have finished our pacing 17411 * send as most of the acks have come back if we are at the 17412 * right rate. This is a bit fuzzy since return path delay 17413 * can delay the acks, which is why we want to make sure we 17414 * have cwnd space to have a bit more than a max pace segments in flight. 17415 * 17416 * If we have not gotten our acks back we are pacing at too high a 17417 * rate delaying will not hurt and will bring our GP estimate down by 17418 * injecting the delay. If we don't do this we will send 17419 * 2 MSS out in response to the acks being clocked in which 17420 * defeats the point of hw-pacing (i.e. to help us get 17421 * larger TSO's out). 17422 */ 17423 len = 0; 17424 17425 } 17426 17427 } 17428 /* len will be >= 0 after this point. */ 17429 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17430 rack_sndbuf_autoscale(rack); 17431 /* 17432 * Decide if we can use TCP Segmentation Offloading (if supported by 17433 * hardware). 17434 * 17435 * TSO may only be used if we are in a pure bulk sending state. The 17436 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17437 * options prevent using TSO. With TSO the TCP header is the same 17438 * (except for the sequence number) for all generated packets. This 17439 * makes it impossible to transmit any options which vary per 17440 * generated segment or packet. 17441 * 17442 * IPv4 handling has a clear separation of ip options and ip header 17443 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17444 * the right thing below to provide length of just ip options and thus 17445 * checking for ipoptlen is enough to decide if ip options are present. 17446 */ 17447 ipoptlen = 0; 17448 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17449 /* 17450 * Pre-calculate here as we save another lookup into the darknesses 17451 * of IPsec that way and can actually decide if TSO is ok. 17452 */ 17453 #ifdef INET6 17454 if (isipv6 && IPSEC_ENABLED(ipv6)) 17455 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17456 #ifdef INET 17457 else 17458 #endif 17459 #endif /* INET6 */ 17460 #ifdef INET 17461 if (IPSEC_ENABLED(ipv4)) 17462 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17463 #endif /* INET */ 17464 #endif 17465 17466 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17467 ipoptlen += ipsec_optlen; 17468 #endif 17469 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17470 (tp->t_port == 0) && 17471 ((tp->t_flags & TF_SIGNATURE) == 0) && 17472 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17473 ipoptlen == 0) 17474 tso = 1; 17475 { 17476 uint32_t outstanding; 17477 17478 outstanding = tp->snd_max - tp->snd_una; 17479 if (tp->t_flags & TF_SENTFIN) { 17480 /* 17481 * If we sent a fin, snd_max is 1 higher than 17482 * snd_una 17483 */ 17484 outstanding--; 17485 } 17486 if (sack_rxmit) { 17487 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17488 flags &= ~TH_FIN; 17489 } else { 17490 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17491 sbused(sb))) 17492 flags &= ~TH_FIN; 17493 } 17494 } 17495 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17496 (long)TCP_MAXWIN << tp->rcv_scale); 17497 17498 /* 17499 * Sender silly window avoidance. We transmit under the following 17500 * conditions when len is non-zero: 17501 * 17502 * - We have a full segment (or more with TSO) - This is the last 17503 * buffer in a write()/send() and we are either idle or running 17504 * NODELAY - we've timed out (e.g. persist timer) - we have more 17505 * then 1/2 the maximum send window's worth of data (receiver may be 17506 * limited the window size) - we need to retransmit 17507 */ 17508 if (len) { 17509 if (len >= segsiz) { 17510 goto send; 17511 } 17512 /* 17513 * NOTE! on localhost connections an 'ack' from the remote 17514 * end may occur synchronously with the output and cause us 17515 * to flush a buffer queued with moretocome. XXX 17516 * 17517 */ 17518 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17519 (idle || (tp->t_flags & TF_NODELAY)) && 17520 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17521 (tp->t_flags & TF_NOPUSH) == 0) { 17522 pass = 2; 17523 goto send; 17524 } 17525 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17526 pass = 22; 17527 goto send; 17528 } 17529 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17530 pass = 4; 17531 goto send; 17532 } 17533 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17534 pass = 5; 17535 goto send; 17536 } 17537 if (sack_rxmit) { 17538 pass = 6; 17539 goto send; 17540 } 17541 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17542 (ctf_outstanding(tp) < (segsiz * 2))) { 17543 /* 17544 * We have less than two MSS outstanding (delayed ack) 17545 * and our rwnd will not let us send a full sized 17546 * MSS. Lets go ahead and let this small segment 17547 * out because we want to try to have at least two 17548 * packets inflight to not be caught by delayed ack. 17549 */ 17550 pass = 12; 17551 goto send; 17552 } 17553 } 17554 /* 17555 * Sending of standalone window updates. 17556 * 17557 * Window updates are important when we close our window due to a 17558 * full socket buffer and are opening it again after the application 17559 * reads data from it. Once the window has opened again and the 17560 * remote end starts to send again the ACK clock takes over and 17561 * provides the most current window information. 17562 * 17563 * We must avoid the silly window syndrome whereas every read from 17564 * the receive buffer, no matter how small, causes a window update 17565 * to be sent. We also should avoid sending a flurry of window 17566 * updates when the socket buffer had queued a lot of data and the 17567 * application is doing small reads. 17568 * 17569 * Prevent a flurry of pointless window updates by only sending an 17570 * update when we can increase the advertized window by more than 17571 * 1/4th of the socket buffer capacity. When the buffer is getting 17572 * full or is very small be more aggressive and send an update 17573 * whenever we can increase by two mss sized segments. In all other 17574 * situations the ACK's to new incoming data will carry further 17575 * window increases. 17576 * 17577 * Don't send an independent window update if a delayed ACK is 17578 * pending (it will get piggy-backed on it) or the remote side 17579 * already has done a half-close and won't send more data. Skip 17580 * this if the connection is in T/TCP half-open state. 17581 */ 17582 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17583 !(tp->t_flags & TF_DELACK) && 17584 !TCPS_HAVERCVDFIN(tp->t_state)) { 17585 /* 17586 * "adv" is the amount we could increase the window, taking 17587 * into account that we are limited by TCP_MAXWIN << 17588 * tp->rcv_scale. 17589 */ 17590 int32_t adv; 17591 int oldwin; 17592 17593 adv = recwin; 17594 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17595 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17596 if (adv > oldwin) 17597 adv -= oldwin; 17598 else { 17599 /* We can't increase the window */ 17600 adv = 0; 17601 } 17602 } else 17603 oldwin = 0; 17604 17605 /* 17606 * If the new window size ends up being the same as or less 17607 * than the old size when it is scaled, then don't force 17608 * a window update. 17609 */ 17610 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17611 goto dontupdate; 17612 17613 if (adv >= (int32_t)(2 * segsiz) && 17614 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17615 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17616 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17617 pass = 7; 17618 goto send; 17619 } 17620 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17621 pass = 23; 17622 goto send; 17623 } 17624 } 17625 dontupdate: 17626 17627 /* 17628 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17629 * is also a catch-all for the retransmit timer timeout case. 17630 */ 17631 if (tp->t_flags & TF_ACKNOW) { 17632 pass = 8; 17633 goto send; 17634 } 17635 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17636 pass = 9; 17637 goto send; 17638 } 17639 /* 17640 * If our state indicates that FIN should be sent and we have not 17641 * yet done so, then we need to send. 17642 */ 17643 if ((flags & TH_FIN) && 17644 (tp->snd_nxt == tp->snd_una)) { 17645 pass = 11; 17646 goto send; 17647 } 17648 /* 17649 * No reason to send a segment, just return. 17650 */ 17651 just_return: 17652 SOCKBUF_UNLOCK(sb); 17653 just_return_nolock: 17654 { 17655 int app_limited = CTF_JR_SENT_DATA; 17656 17657 if (tot_len_this_send > 0) { 17658 /* Make sure snd_nxt is up to max */ 17659 rack->r_ctl.fsb.recwin = recwin; 17660 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17661 if ((error == 0) && 17662 rack_use_rfo && 17663 ((flags & (TH_SYN|TH_FIN)) == 0) && 17664 (ipoptlen == 0) && 17665 (tp->snd_nxt == tp->snd_max) && 17666 (tp->rcv_numsacks == 0) && 17667 rack->r_fsb_inited && 17668 TCPS_HAVEESTABLISHED(tp->t_state) && 17669 (rack->r_must_retran == 0) && 17670 ((tp->t_flags & TF_NEEDFIN) == 0) && 17671 (len > 0) && (orig_len > 0) && 17672 (orig_len > len) && 17673 ((orig_len - len) >= segsiz) && 17674 ((optlen == 0) || 17675 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17676 /* We can send at least one more MSS using our fsb */ 17677 17678 rack->r_fast_output = 1; 17679 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17680 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17681 rack->r_ctl.fsb.tcp_flags = flags; 17682 rack->r_ctl.fsb.left_to_send = orig_len - len; 17683 if (hw_tls) 17684 rack->r_ctl.fsb.hw_tls = 1; 17685 else 17686 rack->r_ctl.fsb.hw_tls = 0; 17687 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17688 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17689 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17690 (tp->snd_max - tp->snd_una))); 17691 if (rack->r_ctl.fsb.left_to_send < segsiz) 17692 rack->r_fast_output = 0; 17693 else { 17694 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17695 rack->r_ctl.fsb.rfo_apply_push = 1; 17696 else 17697 rack->r_ctl.fsb.rfo_apply_push = 0; 17698 } 17699 } else 17700 rack->r_fast_output = 0; 17701 17702 17703 rack_log_fsb(rack, tp, so, flags, 17704 ipoptlen, orig_len, len, 0, 17705 1, optlen, __LINE__, 1); 17706 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17707 tp->snd_nxt = tp->snd_max; 17708 } else { 17709 int end_window = 0; 17710 uint32_t seq = tp->gput_ack; 17711 17712 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17713 if (rsm) { 17714 /* 17715 * Mark the last sent that we just-returned (hinting 17716 * that delayed ack may play a role in any rtt measurement). 17717 */ 17718 rsm->r_just_ret = 1; 17719 } 17720 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17721 rack->r_ctl.rc_agg_delayed = 0; 17722 rack->r_early = 0; 17723 rack->r_late = 0; 17724 rack->r_ctl.rc_agg_early = 0; 17725 if ((ctf_outstanding(tp) + 17726 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17727 minseg)) >= tp->snd_wnd) { 17728 /* We are limited by the rwnd */ 17729 app_limited = CTF_JR_RWND_LIMITED; 17730 if (IN_FASTRECOVERY(tp->t_flags)) 17731 rack->r_ctl.rc_prr_sndcnt = 0; 17732 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17733 /* We are limited by whats available -- app limited */ 17734 app_limited = CTF_JR_APP_LIMITED; 17735 if (IN_FASTRECOVERY(tp->t_flags)) 17736 rack->r_ctl.rc_prr_sndcnt = 0; 17737 } else if ((idle == 0) && 17738 ((tp->t_flags & TF_NODELAY) == 0) && 17739 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17740 (len < segsiz)) { 17741 /* 17742 * No delay is not on and the 17743 * user is sending less than 1MSS. This 17744 * brings out SWS avoidance so we 17745 * don't send. Another app-limited case. 17746 */ 17747 app_limited = CTF_JR_APP_LIMITED; 17748 } else if (tp->t_flags & TF_NOPUSH) { 17749 /* 17750 * The user has requested no push of 17751 * the last segment and we are 17752 * at the last segment. Another app 17753 * limited case. 17754 */ 17755 app_limited = CTF_JR_APP_LIMITED; 17756 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17757 /* Its the cwnd */ 17758 app_limited = CTF_JR_CWND_LIMITED; 17759 } else if (IN_FASTRECOVERY(tp->t_flags) && 17760 (rack->rack_no_prr == 0) && 17761 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17762 app_limited = CTF_JR_PRR; 17763 } else { 17764 /* Now why here are we not sending? */ 17765 #ifdef NOW 17766 #ifdef INVARIANTS 17767 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17768 #endif 17769 #endif 17770 app_limited = CTF_JR_ASSESSING; 17771 } 17772 /* 17773 * App limited in some fashion, for our pacing GP 17774 * measurements we don't want any gap (even cwnd). 17775 * Close down the measurement window. 17776 */ 17777 if (rack_cwnd_block_ends_measure && 17778 ((app_limited == CTF_JR_CWND_LIMITED) || 17779 (app_limited == CTF_JR_PRR))) { 17780 /* 17781 * The reason we are not sending is 17782 * the cwnd (or prr). We have been configured 17783 * to end the measurement window in 17784 * this case. 17785 */ 17786 end_window = 1; 17787 } else if (rack_rwnd_block_ends_measure && 17788 (app_limited == CTF_JR_RWND_LIMITED)) { 17789 /* 17790 * We are rwnd limited and have been 17791 * configured to end the measurement 17792 * window in this case. 17793 */ 17794 end_window = 1; 17795 } else if (app_limited == CTF_JR_APP_LIMITED) { 17796 /* 17797 * A true application limited period, we have 17798 * ran out of data. 17799 */ 17800 end_window = 1; 17801 } else if (app_limited == CTF_JR_ASSESSING) { 17802 /* 17803 * In the assessing case we hit the end of 17804 * the if/else and had no known reason 17805 * This will panic us under invariants.. 17806 * 17807 * If we get this out in logs we need to 17808 * investagate which reason we missed. 17809 */ 17810 end_window = 1; 17811 } 17812 if (end_window) { 17813 uint8_t log = 0; 17814 17815 /* Adjust the Gput measurement */ 17816 if ((tp->t_flags & TF_GPUTINPROG) && 17817 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17818 tp->gput_ack = tp->snd_max; 17819 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17820 /* 17821 * There is not enough to measure. 17822 */ 17823 tp->t_flags &= ~TF_GPUTINPROG; 17824 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17825 rack->r_ctl.rc_gp_srtt /*flex1*/, 17826 tp->gput_seq, 17827 0, 0, 18, __LINE__, NULL, 0); 17828 } else 17829 log = 1; 17830 } 17831 /* Mark the last packet has app limited */ 17832 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17833 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17834 if (rack->r_ctl.rc_app_limited_cnt == 0) 17835 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17836 else { 17837 /* 17838 * Go out to the end app limited and mark 17839 * this new one as next and move the end_appl up 17840 * to this guy. 17841 */ 17842 if (rack->r_ctl.rc_end_appl) 17843 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17844 rack->r_ctl.rc_end_appl = rsm; 17845 } 17846 rsm->r_flags |= RACK_APP_LIMITED; 17847 rack->r_ctl.rc_app_limited_cnt++; 17848 } 17849 if (log) 17850 rack_log_pacing_delay_calc(rack, 17851 rack->r_ctl.rc_app_limited_cnt, seq, 17852 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17853 } 17854 } 17855 if (slot) { 17856 /* set the rack tcb into the slot N */ 17857 counter_u64_add(rack_paced_segments, 1); 17858 } else if (tot_len_this_send) { 17859 counter_u64_add(rack_unpaced_segments, 1); 17860 } 17861 /* Check if we need to go into persists or not */ 17862 if ((tp->snd_max == tp->snd_una) && 17863 TCPS_HAVEESTABLISHED(tp->t_state) && 17864 sbavail(sb) && 17865 (sbavail(sb) > tp->snd_wnd) && 17866 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17867 /* Yes lets make sure to move to persist before timer-start */ 17868 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17869 } 17870 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17871 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17872 } 17873 #ifdef NETFLIX_SHARED_CWND 17874 if ((sbavail(sb) == 0) && 17875 rack->r_ctl.rc_scw) { 17876 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17877 rack->rack_scwnd_is_idle = 1; 17878 } 17879 #endif 17880 #ifdef TCP_ACCOUNTING 17881 if (tot_len_this_send > 0) { 17882 crtsc = get_cyclecount(); 17883 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17884 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17885 } 17886 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17887 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17888 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17889 } 17890 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17891 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17892 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17893 } 17894 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17895 } else { 17896 crtsc = get_cyclecount(); 17897 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17898 tp->tcp_cnt_counters[SND_LIMITED]++; 17899 } 17900 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17901 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17902 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17903 } 17904 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17905 } 17906 sched_unpin(); 17907 #endif 17908 return (0); 17909 17910 send: 17911 if (rsm || sack_rxmit) 17912 counter_u64_add(rack_nfto_resend, 1); 17913 else 17914 counter_u64_add(rack_non_fto_send, 1); 17915 if ((flags & TH_FIN) && 17916 sbavail(sb)) { 17917 /* 17918 * We do not transmit a FIN 17919 * with data outstanding. We 17920 * need to make it so all data 17921 * is acked first. 17922 */ 17923 flags &= ~TH_FIN; 17924 } 17925 /* Enforce stack imposed max seg size if we have one */ 17926 if (rack->r_ctl.rc_pace_max_segs && 17927 (len > rack->r_ctl.rc_pace_max_segs)) { 17928 mark = 1; 17929 len = rack->r_ctl.rc_pace_max_segs; 17930 } 17931 SOCKBUF_LOCK_ASSERT(sb); 17932 if (len > 0) { 17933 if (len >= segsiz) 17934 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17935 else 17936 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17937 } 17938 /* 17939 * Before ESTABLISHED, force sending of initial options unless TCP 17940 * set not to do any options. NOTE: we assume that the IP/TCP header 17941 * plus TCP options always fit in a single mbuf, leaving room for a 17942 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17943 * + optlen <= MCLBYTES 17944 */ 17945 optlen = 0; 17946 #ifdef INET6 17947 if (isipv6) 17948 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17949 else 17950 #endif 17951 hdrlen = sizeof(struct tcpiphdr); 17952 17953 /* 17954 * Compute options for segment. We only have to care about SYN and 17955 * established connection segments. Options for SYN-ACK segments 17956 * are handled in TCP syncache. 17957 */ 17958 to.to_flags = 0; 17959 if ((tp->t_flags & TF_NOOPT) == 0) { 17960 /* Maximum segment size. */ 17961 if (flags & TH_SYN) { 17962 tp->snd_nxt = tp->iss; 17963 to.to_mss = tcp_mssopt(&inp->inp_inc); 17964 if (tp->t_port) 17965 to.to_mss -= V_tcp_udp_tunneling_overhead; 17966 to.to_flags |= TOF_MSS; 17967 17968 /* 17969 * On SYN or SYN|ACK transmits on TFO connections, 17970 * only include the TFO option if it is not a 17971 * retransmit, as the presence of the TFO option may 17972 * have caused the original SYN or SYN|ACK to have 17973 * been dropped by a middlebox. 17974 */ 17975 if (IS_FASTOPEN(tp->t_flags) && 17976 (tp->t_rxtshift == 0)) { 17977 if (tp->t_state == TCPS_SYN_RECEIVED) { 17978 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17979 to.to_tfo_cookie = 17980 (u_int8_t *)&tp->t_tfo_cookie.server; 17981 to.to_flags |= TOF_FASTOPEN; 17982 wanted_cookie = 1; 17983 } else if (tp->t_state == TCPS_SYN_SENT) { 17984 to.to_tfo_len = 17985 tp->t_tfo_client_cookie_len; 17986 to.to_tfo_cookie = 17987 tp->t_tfo_cookie.client; 17988 to.to_flags |= TOF_FASTOPEN; 17989 wanted_cookie = 1; 17990 /* 17991 * If we wind up having more data to 17992 * send with the SYN than can fit in 17993 * one segment, don't send any more 17994 * until the SYN|ACK comes back from 17995 * the other end. 17996 */ 17997 sendalot = 0; 17998 } 17999 } 18000 } 18001 /* Window scaling. */ 18002 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18003 to.to_wscale = tp->request_r_scale; 18004 to.to_flags |= TOF_SCALE; 18005 } 18006 /* Timestamps. */ 18007 if ((tp->t_flags & TF_RCVD_TSTMP) || 18008 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18009 to.to_tsval = ms_cts + tp->ts_offset; 18010 to.to_tsecr = tp->ts_recent; 18011 to.to_flags |= TOF_TS; 18012 } 18013 /* Set receive buffer autosizing timestamp. */ 18014 if (tp->rfbuf_ts == 0 && 18015 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18016 tp->rfbuf_ts = tcp_ts_getticks(); 18017 /* Selective ACK's. */ 18018 if (tp->t_flags & TF_SACK_PERMIT) { 18019 if (flags & TH_SYN) 18020 to.to_flags |= TOF_SACKPERM; 18021 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18022 tp->rcv_numsacks > 0) { 18023 to.to_flags |= TOF_SACK; 18024 to.to_nsacks = tp->rcv_numsacks; 18025 to.to_sacks = (u_char *)tp->sackblks; 18026 } 18027 } 18028 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18029 /* TCP-MD5 (RFC2385). */ 18030 if (tp->t_flags & TF_SIGNATURE) 18031 to.to_flags |= TOF_SIGNATURE; 18032 #endif /* TCP_SIGNATURE */ 18033 18034 /* Processing the options. */ 18035 hdrlen += optlen = tcp_addoptions(&to, opt); 18036 /* 18037 * If we wanted a TFO option to be added, but it was unable 18038 * to fit, ensure no data is sent. 18039 */ 18040 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18041 !(to.to_flags & TOF_FASTOPEN)) 18042 len = 0; 18043 } 18044 if (tp->t_port) { 18045 if (V_tcp_udp_tunneling_port == 0) { 18046 /* The port was removed?? */ 18047 SOCKBUF_UNLOCK(&so->so_snd); 18048 #ifdef TCP_ACCOUNTING 18049 crtsc = get_cyclecount(); 18050 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18051 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18052 } 18053 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18054 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18055 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18056 } 18057 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18058 sched_unpin(); 18059 #endif 18060 return (EHOSTUNREACH); 18061 } 18062 hdrlen += sizeof(struct udphdr); 18063 } 18064 #ifdef INET6 18065 if (isipv6) 18066 ipoptlen = ip6_optlen(tp->t_inpcb); 18067 else 18068 #endif 18069 if (tp->t_inpcb->inp_options) 18070 ipoptlen = tp->t_inpcb->inp_options->m_len - 18071 offsetof(struct ipoption, ipopt_list); 18072 else 18073 ipoptlen = 0; 18074 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18075 ipoptlen += ipsec_optlen; 18076 #endif 18077 18078 /* 18079 * Adjust data length if insertion of options will bump the packet 18080 * length beyond the t_maxseg length. Clear the FIN bit because we 18081 * cut off the tail of the segment. 18082 */ 18083 if (len + optlen + ipoptlen > tp->t_maxseg) { 18084 if (tso) { 18085 uint32_t if_hw_tsomax; 18086 uint32_t moff; 18087 int32_t max_len; 18088 18089 /* extract TSO information */ 18090 if_hw_tsomax = tp->t_tsomax; 18091 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18092 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18093 KASSERT(ipoptlen == 0, 18094 ("%s: TSO can't do IP options", __func__)); 18095 18096 /* 18097 * Check if we should limit by maximum payload 18098 * length: 18099 */ 18100 if (if_hw_tsomax != 0) { 18101 /* compute maximum TSO length */ 18102 max_len = (if_hw_tsomax - hdrlen - 18103 max_linkhdr); 18104 if (max_len <= 0) { 18105 len = 0; 18106 } else if (len > max_len) { 18107 sendalot = 1; 18108 len = max_len; 18109 mark = 2; 18110 } 18111 } 18112 /* 18113 * Prevent the last segment from being fractional 18114 * unless the send sockbuf can be emptied: 18115 */ 18116 max_len = (tp->t_maxseg - optlen); 18117 if ((sb_offset + len) < sbavail(sb)) { 18118 moff = len % (u_int)max_len; 18119 if (moff != 0) { 18120 mark = 3; 18121 len -= moff; 18122 } 18123 } 18124 /* 18125 * In case there are too many small fragments don't 18126 * use TSO: 18127 */ 18128 if (len <= segsiz) { 18129 mark = 4; 18130 tso = 0; 18131 } 18132 /* 18133 * Send the FIN in a separate segment after the bulk 18134 * sending is done. We don't trust the TSO 18135 * implementations to clear the FIN flag on all but 18136 * the last segment. 18137 */ 18138 if (tp->t_flags & TF_NEEDFIN) { 18139 sendalot = 4; 18140 } 18141 } else { 18142 mark = 5; 18143 if (optlen + ipoptlen >= tp->t_maxseg) { 18144 /* 18145 * Since we don't have enough space to put 18146 * the IP header chain and the TCP header in 18147 * one packet as required by RFC 7112, don't 18148 * send it. Also ensure that at least one 18149 * byte of the payload can be put into the 18150 * TCP segment. 18151 */ 18152 SOCKBUF_UNLOCK(&so->so_snd); 18153 error = EMSGSIZE; 18154 sack_rxmit = 0; 18155 goto out; 18156 } 18157 len = tp->t_maxseg - optlen - ipoptlen; 18158 sendalot = 5; 18159 } 18160 } else { 18161 tso = 0; 18162 mark = 6; 18163 } 18164 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18165 ("%s: len > IP_MAXPACKET", __func__)); 18166 #ifdef DIAGNOSTIC 18167 #ifdef INET6 18168 if (max_linkhdr + hdrlen > MCLBYTES) 18169 #else 18170 if (max_linkhdr + hdrlen > MHLEN) 18171 #endif 18172 panic("tcphdr too big"); 18173 #endif 18174 18175 /* 18176 * This KASSERT is here to catch edge cases at a well defined place. 18177 * Before, those had triggered (random) panic conditions further 18178 * down. 18179 */ 18180 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18181 if ((len == 0) && 18182 (flags & TH_FIN) && 18183 (sbused(sb))) { 18184 /* 18185 * We have outstanding data, don't send a fin by itself!. 18186 */ 18187 goto just_return; 18188 } 18189 /* 18190 * Grab a header mbuf, attaching a copy of data to be transmitted, 18191 * and initialize the header from the template for sends on this 18192 * connection. 18193 */ 18194 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18195 if (len) { 18196 uint32_t max_val; 18197 uint32_t moff; 18198 18199 if (rack->r_ctl.rc_pace_max_segs) 18200 max_val = rack->r_ctl.rc_pace_max_segs; 18201 else if (rack->rc_user_set_max_segs) 18202 max_val = rack->rc_user_set_max_segs * segsiz; 18203 else 18204 max_val = len; 18205 /* 18206 * We allow a limit on sending with hptsi. 18207 */ 18208 if (len > max_val) { 18209 mark = 7; 18210 len = max_val; 18211 } 18212 #ifdef INET6 18213 if (MHLEN < hdrlen + max_linkhdr) 18214 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18215 else 18216 #endif 18217 m = m_gethdr(M_NOWAIT, MT_DATA); 18218 18219 if (m == NULL) { 18220 SOCKBUF_UNLOCK(sb); 18221 error = ENOBUFS; 18222 sack_rxmit = 0; 18223 goto out; 18224 } 18225 m->m_data += max_linkhdr; 18226 m->m_len = hdrlen; 18227 18228 /* 18229 * Start the m_copy functions from the closest mbuf to the 18230 * sb_offset in the socket buffer chain. 18231 */ 18232 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18233 s_mb = mb; 18234 s_moff = moff; 18235 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18236 m_copydata(mb, moff, (int)len, 18237 mtod(m, caddr_t)+hdrlen); 18238 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18239 sbsndptr_adv(sb, mb, len); 18240 m->m_len += len; 18241 } else { 18242 struct sockbuf *msb; 18243 18244 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18245 msb = NULL; 18246 else 18247 msb = sb; 18248 m->m_next = tcp_m_copym( 18249 mb, moff, &len, 18250 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18251 ((rsm == NULL) ? hw_tls : 0) 18252 #ifdef NETFLIX_COPY_ARGS 18253 , &filled_all 18254 #endif 18255 ); 18256 if (len <= (tp->t_maxseg - optlen)) { 18257 /* 18258 * Must have ran out of mbufs for the copy 18259 * shorten it to no longer need tso. Lets 18260 * not put on sendalot since we are low on 18261 * mbufs. 18262 */ 18263 tso = 0; 18264 } 18265 if (m->m_next == NULL) { 18266 SOCKBUF_UNLOCK(sb); 18267 (void)m_free(m); 18268 error = ENOBUFS; 18269 sack_rxmit = 0; 18270 goto out; 18271 } 18272 } 18273 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18274 if (rsm && (rsm->r_flags & RACK_TLP)) { 18275 /* 18276 * TLP should not count in retran count, but 18277 * in its own bin 18278 */ 18279 counter_u64_add(rack_tlp_retran, 1); 18280 counter_u64_add(rack_tlp_retran_bytes, len); 18281 } else { 18282 tp->t_sndrexmitpack++; 18283 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18284 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18285 } 18286 #ifdef STATS 18287 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18288 len); 18289 #endif 18290 } else { 18291 KMOD_TCPSTAT_INC(tcps_sndpack); 18292 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18293 #ifdef STATS 18294 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18295 len); 18296 #endif 18297 } 18298 /* 18299 * If we're sending everything we've got, set PUSH. (This 18300 * will keep happy those implementations which only give 18301 * data to the user when a buffer fills or a PUSH comes in.) 18302 */ 18303 if (sb_offset + len == sbused(sb) && 18304 sbused(sb) && 18305 !(flags & TH_SYN)) { 18306 flags |= TH_PUSH; 18307 add_flag |= RACK_HAD_PUSH; 18308 } 18309 18310 SOCKBUF_UNLOCK(sb); 18311 } else { 18312 SOCKBUF_UNLOCK(sb); 18313 if (tp->t_flags & TF_ACKNOW) 18314 KMOD_TCPSTAT_INC(tcps_sndacks); 18315 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18316 KMOD_TCPSTAT_INC(tcps_sndctrl); 18317 else 18318 KMOD_TCPSTAT_INC(tcps_sndwinup); 18319 18320 m = m_gethdr(M_NOWAIT, MT_DATA); 18321 if (m == NULL) { 18322 error = ENOBUFS; 18323 sack_rxmit = 0; 18324 goto out; 18325 } 18326 #ifdef INET6 18327 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18328 MHLEN >= hdrlen) { 18329 M_ALIGN(m, hdrlen); 18330 } else 18331 #endif 18332 m->m_data += max_linkhdr; 18333 m->m_len = hdrlen; 18334 } 18335 SOCKBUF_UNLOCK_ASSERT(sb); 18336 m->m_pkthdr.rcvif = (struct ifnet *)0; 18337 #ifdef MAC 18338 mac_inpcb_create_mbuf(inp, m); 18339 #endif 18340 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18341 #ifdef INET6 18342 if (isipv6) 18343 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18344 else 18345 #endif /* INET6 */ 18346 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18347 th = rack->r_ctl.fsb.th; 18348 udp = rack->r_ctl.fsb.udp; 18349 if (udp) { 18350 #ifdef INET6 18351 if (isipv6) 18352 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18353 else 18354 #endif /* INET6 */ 18355 ulen = hdrlen + len - sizeof(struct ip); 18356 udp->uh_ulen = htons(ulen); 18357 } 18358 } else { 18359 #ifdef INET6 18360 if (isipv6) { 18361 ip6 = mtod(m, struct ip6_hdr *); 18362 if (tp->t_port) { 18363 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18364 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18365 udp->uh_dport = tp->t_port; 18366 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18367 udp->uh_ulen = htons(ulen); 18368 th = (struct tcphdr *)(udp + 1); 18369 } else 18370 th = (struct tcphdr *)(ip6 + 1); 18371 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18372 } else 18373 #endif /* INET6 */ 18374 { 18375 ip = mtod(m, struct ip *); 18376 #ifdef TCPDEBUG 18377 ipov = (struct ipovly *)ip; 18378 #endif 18379 if (tp->t_port) { 18380 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18381 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18382 udp->uh_dport = tp->t_port; 18383 ulen = hdrlen + len - sizeof(struct ip); 18384 udp->uh_ulen = htons(ulen); 18385 th = (struct tcphdr *)(udp + 1); 18386 } else 18387 th = (struct tcphdr *)(ip + 1); 18388 tcpip_fillheaders(inp, tp->t_port, ip, th); 18389 } 18390 } 18391 /* 18392 * Fill in fields, remembering maximum advertised window for use in 18393 * delaying messages about window sizes. If resending a FIN, be sure 18394 * not to use a new sequence number. 18395 */ 18396 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18397 tp->snd_nxt == tp->snd_max) 18398 tp->snd_nxt--; 18399 /* 18400 * If we are starting a connection, send ECN setup SYN packet. If we 18401 * are on a retransmit, we may resend those bits a number of times 18402 * as per RFC 3168. 18403 */ 18404 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 18405 if (tp->t_rxtshift >= 1) { 18406 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 18407 flags |= TH_ECE | TH_CWR; 18408 } else 18409 flags |= TH_ECE | TH_CWR; 18410 } 18411 /* Handle parallel SYN for ECN */ 18412 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18413 (tp->t_flags2 & TF2_ECN_SND_ECE)) { 18414 flags |= TH_ECE; 18415 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18416 } 18417 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18418 (tp->t_flags2 & TF2_ECN_PERMIT)) { 18419 /* 18420 * If the peer has ECN, mark data packets with ECN capable 18421 * transmission (ECT). Ignore pure ack packets, 18422 * retransmissions. 18423 */ 18424 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 18425 (sack_rxmit == 0)) { 18426 #ifdef INET6 18427 if (isipv6) 18428 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 18429 else 18430 #endif 18431 ip->ip_tos |= IPTOS_ECN_ECT0; 18432 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 18433 /* 18434 * Reply with proper ECN notifications. 18435 * Only set CWR on new data segments. 18436 */ 18437 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 18438 flags |= TH_CWR; 18439 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 18440 } 18441 } 18442 if (tp->t_flags2 & TF2_ECN_SND_ECE) 18443 flags |= TH_ECE; 18444 } 18445 /* 18446 * If we are doing retransmissions, then snd_nxt will not reflect 18447 * the first unsent octet. For ACK only packets, we do not want the 18448 * sequence number of the retransmitted packet, we want the sequence 18449 * number of the next unsent octet. So, if there is no data (and no 18450 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18451 * ti_seq. But if we are in persist state, snd_max might reflect 18452 * one byte beyond the right edge of the window, so use snd_nxt in 18453 * that case, since we know we aren't doing a retransmission. 18454 * (retransmit and persist are mutually exclusive...) 18455 */ 18456 if (sack_rxmit == 0) { 18457 if (len || (flags & (TH_SYN | TH_FIN))) { 18458 th->th_seq = htonl(tp->snd_nxt); 18459 rack_seq = tp->snd_nxt; 18460 } else { 18461 th->th_seq = htonl(tp->snd_max); 18462 rack_seq = tp->snd_max; 18463 } 18464 } else { 18465 th->th_seq = htonl(rsm->r_start); 18466 rack_seq = rsm->r_start; 18467 } 18468 th->th_ack = htonl(tp->rcv_nxt); 18469 th->th_flags = flags; 18470 /* 18471 * Calculate receive window. Don't shrink window, but avoid silly 18472 * window syndrome. 18473 * If a RST segment is sent, advertise a window of zero. 18474 */ 18475 if (flags & TH_RST) { 18476 recwin = 0; 18477 } else { 18478 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18479 recwin < (long)segsiz) { 18480 recwin = 0; 18481 } 18482 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18483 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18484 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18485 } 18486 18487 /* 18488 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18489 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18490 * handled in syncache. 18491 */ 18492 if (flags & TH_SYN) 18493 th->th_win = htons((u_short) 18494 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18495 else { 18496 /* Avoid shrinking window with window scaling. */ 18497 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18498 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18499 } 18500 /* 18501 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18502 * window. This may cause the remote transmitter to stall. This 18503 * flag tells soreceive() to disable delayed acknowledgements when 18504 * draining the buffer. This can occur if the receiver is 18505 * attempting to read more data than can be buffered prior to 18506 * transmitting on the connection. 18507 */ 18508 if (th->th_win == 0) { 18509 tp->t_sndzerowin++; 18510 tp->t_flags |= TF_RXWIN0SENT; 18511 } else 18512 tp->t_flags &= ~TF_RXWIN0SENT; 18513 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18514 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18515 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18516 uint8_t *cpto; 18517 18518 cpto = mtod(m, uint8_t *); 18519 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18520 /* 18521 * We have just copied in: 18522 * IP/IP6 18523 * <optional udphdr> 18524 * tcphdr (no options) 18525 * 18526 * We need to grab the correct pointers into the mbuf 18527 * for both the tcp header, and possibly the udp header (if tunneling). 18528 * We do this by using the offset in the copy buffer and adding it 18529 * to the mbuf base pointer (cpto). 18530 */ 18531 #ifdef INET6 18532 if (isipv6) 18533 ip6 = mtod(m, struct ip6_hdr *); 18534 else 18535 #endif /* INET6 */ 18536 ip = mtod(m, struct ip *); 18537 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18538 /* If we have a udp header lets set it into the mbuf as well */ 18539 if (udp) 18540 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18541 } 18542 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18543 if (to.to_flags & TOF_SIGNATURE) { 18544 /* 18545 * Calculate MD5 signature and put it into the place 18546 * determined before. 18547 * NOTE: since TCP options buffer doesn't point into 18548 * mbuf's data, calculate offset and use it. 18549 */ 18550 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18551 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18552 /* 18553 * Do not send segment if the calculation of MD5 18554 * digest has failed. 18555 */ 18556 goto out; 18557 } 18558 } 18559 #endif 18560 if (optlen) { 18561 bcopy(opt, th + 1, optlen); 18562 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18563 } 18564 /* 18565 * Put TCP length in extended header, and then checksum extended 18566 * header and data. 18567 */ 18568 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18569 #ifdef INET6 18570 if (isipv6) { 18571 /* 18572 * ip6_plen is not need to be filled now, and will be filled 18573 * in ip6_output. 18574 */ 18575 if (tp->t_port) { 18576 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18577 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18578 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18579 th->th_sum = htons(0); 18580 UDPSTAT_INC(udps_opackets); 18581 } else { 18582 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18583 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18584 th->th_sum = in6_cksum_pseudo(ip6, 18585 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18586 0); 18587 } 18588 } 18589 #endif 18590 #if defined(INET6) && defined(INET) 18591 else 18592 #endif 18593 #ifdef INET 18594 { 18595 if (tp->t_port) { 18596 m->m_pkthdr.csum_flags = CSUM_UDP; 18597 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18598 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18599 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18600 th->th_sum = htons(0); 18601 UDPSTAT_INC(udps_opackets); 18602 } else { 18603 m->m_pkthdr.csum_flags = CSUM_TCP; 18604 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18605 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18606 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18607 IPPROTO_TCP + len + optlen)); 18608 } 18609 /* IP version must be set here for ipv4/ipv6 checking later */ 18610 KASSERT(ip->ip_v == IPVERSION, 18611 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18612 } 18613 #endif 18614 /* 18615 * Enable TSO and specify the size of the segments. The TCP pseudo 18616 * header checksum is always provided. XXX: Fixme: This is currently 18617 * not the case for IPv6. 18618 */ 18619 if (tso) { 18620 KASSERT(len > tp->t_maxseg - optlen, 18621 ("%s: len <= tso_segsz", __func__)); 18622 m->m_pkthdr.csum_flags |= CSUM_TSO; 18623 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18624 } 18625 KASSERT(len + hdrlen == m_length(m, NULL), 18626 ("%s: mbuf chain different than expected: %d + %u != %u", 18627 __func__, len, hdrlen, m_length(m, NULL))); 18628 18629 #ifdef TCP_HHOOK 18630 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18631 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18632 #endif 18633 /* We're getting ready to send; log now. */ 18634 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18635 union tcp_log_stackspecific log; 18636 18637 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18638 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 18639 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 18640 if (rack->rack_no_prr) 18641 log.u_bbr.flex1 = 0; 18642 else 18643 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18644 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18645 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18646 log.u_bbr.flex4 = orig_len; 18647 if (filled_all) 18648 log.u_bbr.flex5 = 0x80000000; 18649 else 18650 log.u_bbr.flex5 = 0; 18651 /* Save off the early/late values */ 18652 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18653 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18654 log.u_bbr.bw_inuse = rack_get_bw(rack); 18655 if (rsm || sack_rxmit) { 18656 if (doing_tlp) 18657 log.u_bbr.flex8 = 2; 18658 else 18659 log.u_bbr.flex8 = 1; 18660 } else { 18661 if (doing_tlp) 18662 log.u_bbr.flex8 = 3; 18663 else 18664 log.u_bbr.flex8 = 0; 18665 } 18666 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18667 log.u_bbr.flex7 = mark; 18668 log.u_bbr.flex7 <<= 8; 18669 log.u_bbr.flex7 |= pass; 18670 log.u_bbr.pkts_out = tp->t_maxseg; 18671 log.u_bbr.timeStamp = cts; 18672 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18673 log.u_bbr.lt_epoch = cwnd_to_use; 18674 log.u_bbr.delivered = sendalot; 18675 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18676 len, &log, false, NULL, NULL, 0, &tv); 18677 } else 18678 lgb = NULL; 18679 18680 /* 18681 * Fill in IP length and desired time to live and send to IP level. 18682 * There should be a better way to handle ttl and tos; we could keep 18683 * them in the template, but need a way to checksum without them. 18684 */ 18685 /* 18686 * m->m_pkthdr.len should have been set before cksum calcuration, 18687 * because in6_cksum() need it. 18688 */ 18689 #ifdef INET6 18690 if (isipv6) { 18691 /* 18692 * we separately set hoplimit for every segment, since the 18693 * user might want to change the value via setsockopt. Also, 18694 * desired default hop limit might be changed via Neighbor 18695 * Discovery. 18696 */ 18697 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18698 18699 /* 18700 * Set the packet size here for the benefit of DTrace 18701 * probes. ip6_output() will set it properly; it's supposed 18702 * to include the option header lengths as well. 18703 */ 18704 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18705 18706 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18707 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18708 else 18709 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18710 18711 if (tp->t_state == TCPS_SYN_SENT) 18712 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18713 18714 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18715 /* TODO: IPv6 IP6TOS_ECT bit on */ 18716 error = ip6_output(m, 18717 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18718 inp->in6p_outputopts, 18719 #else 18720 NULL, 18721 #endif 18722 &inp->inp_route6, 18723 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18724 NULL, NULL, inp); 18725 18726 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18727 mtu = inp->inp_route6.ro_nh->nh_mtu; 18728 } 18729 #endif /* INET6 */ 18730 #if defined(INET) && defined(INET6) 18731 else 18732 #endif 18733 #ifdef INET 18734 { 18735 ip->ip_len = htons(m->m_pkthdr.len); 18736 #ifdef INET6 18737 if (inp->inp_vflag & INP_IPV6PROTO) 18738 ip->ip_ttl = in6_selecthlim(inp, NULL); 18739 #endif /* INET6 */ 18740 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18741 /* 18742 * If we do path MTU discovery, then we set DF on every 18743 * packet. This might not be the best thing to do according 18744 * to RFC3390 Section 2. However the tcp hostcache migitates 18745 * the problem so it affects only the first tcp connection 18746 * with a host. 18747 * 18748 * NB: Don't set DF on small MTU/MSS to have a safe 18749 * fallback. 18750 */ 18751 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18752 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18753 if (tp->t_port == 0 || len < V_tcp_minmss) { 18754 ip->ip_off |= htons(IP_DF); 18755 } 18756 } else { 18757 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18758 } 18759 18760 if (tp->t_state == TCPS_SYN_SENT) 18761 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18762 18763 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18764 18765 error = ip_output(m, 18766 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18767 inp->inp_options, 18768 #else 18769 NULL, 18770 #endif 18771 &inp->inp_route, 18772 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18773 inp); 18774 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18775 mtu = inp->inp_route.ro_nh->nh_mtu; 18776 } 18777 #endif /* INET */ 18778 18779 out: 18780 if (lgb) { 18781 lgb->tlb_errno = error; 18782 lgb = NULL; 18783 } 18784 /* 18785 * In transmit state, time the transmission and arrange for the 18786 * retransmit. In persist state, just set snd_max. 18787 */ 18788 if (error == 0) { 18789 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18790 if (rsm && doing_tlp) { 18791 rack->rc_last_sent_tlp_past_cumack = 0; 18792 rack->rc_last_sent_tlp_seq_valid = 1; 18793 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18794 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18795 } 18796 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18797 if (rsm && (doing_tlp == 0)) { 18798 /* Set we retransmitted */ 18799 rack->rc_gp_saw_rec = 1; 18800 } else { 18801 if (cwnd_to_use > tp->snd_ssthresh) { 18802 /* Set we sent in CA */ 18803 rack->rc_gp_saw_ca = 1; 18804 } else { 18805 /* Set we sent in SS */ 18806 rack->rc_gp_saw_ss = 1; 18807 } 18808 } 18809 if (doing_tlp && (rsm == NULL)) { 18810 /* Make sure new data TLP cnt is clear */ 18811 rack->r_ctl.rc_tlp_new_data = 0; 18812 } 18813 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18814 (tp->t_flags & TF_SACK_PERMIT) && 18815 tp->rcv_numsacks > 0) 18816 tcp_clean_dsack_blocks(tp); 18817 tot_len_this_send += len; 18818 if (len == 0) 18819 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18820 else if (len == 1) { 18821 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18822 } else if (len > 1) { 18823 int idx; 18824 18825 idx = (len / segsiz) + 3; 18826 if (idx >= TCP_MSS_ACCT_ATIMER) 18827 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18828 else 18829 counter_u64_add(rack_out_size[idx], 1); 18830 } 18831 } 18832 if ((rack->rack_no_prr == 0) && 18833 sub_from_prr && 18834 (error == 0)) { 18835 if (rack->r_ctl.rc_prr_sndcnt >= len) 18836 rack->r_ctl.rc_prr_sndcnt -= len; 18837 else 18838 rack->r_ctl.rc_prr_sndcnt = 0; 18839 } 18840 sub_from_prr = 0; 18841 if (doing_tlp) { 18842 /* Make sure the TLP is added */ 18843 add_flag |= RACK_TLP; 18844 } else if (rsm) { 18845 /* If its a resend without TLP then it must not have the flag */ 18846 rsm->r_flags &= ~RACK_TLP; 18847 } 18848 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18849 rack_to_usec_ts(&tv), 18850 rsm, add_flag, s_mb, s_moff, hw_tls); 18851 18852 18853 if ((error == 0) && 18854 (len > 0) && 18855 (tp->snd_una == tp->snd_max)) 18856 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18857 { 18858 tcp_seq startseq = tp->snd_nxt; 18859 18860 /* Track our lost count */ 18861 if (rsm && (doing_tlp == 0)) 18862 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18863 /* 18864 * Advance snd_nxt over sequence space of this segment. 18865 */ 18866 if (error) 18867 /* We don't log or do anything with errors */ 18868 goto nomore; 18869 if (doing_tlp == 0) { 18870 if (rsm == NULL) { 18871 /* 18872 * Not a retransmission of some 18873 * sort, new data is going out so 18874 * clear our TLP count and flag. 18875 */ 18876 rack->rc_tlp_in_progress = 0; 18877 rack->r_ctl.rc_tlp_cnt_out = 0; 18878 } 18879 } else { 18880 /* 18881 * We have just sent a TLP, mark that it is true 18882 * and make sure our in progress is set so we 18883 * continue to check the count. 18884 */ 18885 rack->rc_tlp_in_progress = 1; 18886 rack->r_ctl.rc_tlp_cnt_out++; 18887 } 18888 if (flags & (TH_SYN | TH_FIN)) { 18889 if (flags & TH_SYN) 18890 tp->snd_nxt++; 18891 if (flags & TH_FIN) { 18892 tp->snd_nxt++; 18893 tp->t_flags |= TF_SENTFIN; 18894 } 18895 } 18896 /* In the ENOBUFS case we do *not* update snd_max */ 18897 if (sack_rxmit) 18898 goto nomore; 18899 18900 tp->snd_nxt += len; 18901 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18902 if (tp->snd_una == tp->snd_max) { 18903 /* 18904 * Update the time we just added data since 18905 * none was outstanding. 18906 */ 18907 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18908 tp->t_acktime = ticks; 18909 } 18910 tp->snd_max = tp->snd_nxt; 18911 /* 18912 * Time this transmission if not a retransmission and 18913 * not currently timing anything. 18914 * This is only relevant in case of switching back to 18915 * the base stack. 18916 */ 18917 if (tp->t_rtttime == 0) { 18918 tp->t_rtttime = ticks; 18919 tp->t_rtseq = startseq; 18920 KMOD_TCPSTAT_INC(tcps_segstimed); 18921 } 18922 if (len && 18923 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18924 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18925 } 18926 /* 18927 * If we are doing FO we need to update the mbuf position and subtract 18928 * this happens when the peer sends us duplicate information and 18929 * we thus want to send a DSACK. 18930 * 18931 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18932 * turned off? If not then we are going to echo multiple DSACK blocks 18933 * out (with the TSO), which we should not be doing. 18934 */ 18935 if (rack->r_fast_output && len) { 18936 if (rack->r_ctl.fsb.left_to_send > len) 18937 rack->r_ctl.fsb.left_to_send -= len; 18938 else 18939 rack->r_ctl.fsb.left_to_send = 0; 18940 if (rack->r_ctl.fsb.left_to_send < segsiz) 18941 rack->r_fast_output = 0; 18942 if (rack->r_fast_output) { 18943 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18944 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18945 } 18946 } 18947 } 18948 nomore: 18949 if (error) { 18950 rack->r_ctl.rc_agg_delayed = 0; 18951 rack->r_early = 0; 18952 rack->r_late = 0; 18953 rack->r_ctl.rc_agg_early = 0; 18954 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18955 /* 18956 * Failures do not advance the seq counter above. For the 18957 * case of ENOBUFS we will fall out and retry in 1ms with 18958 * the hpts. Everything else will just have to retransmit 18959 * with the timer. 18960 * 18961 * In any case, we do not want to loop around for another 18962 * send without a good reason. 18963 */ 18964 sendalot = 0; 18965 switch (error) { 18966 case EPERM: 18967 tp->t_softerror = error; 18968 #ifdef TCP_ACCOUNTING 18969 crtsc = get_cyclecount(); 18970 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18971 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18972 } 18973 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18974 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18975 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18976 } 18977 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18978 sched_unpin(); 18979 #endif 18980 return (error); 18981 case ENOBUFS: 18982 /* 18983 * Pace us right away to retry in a some 18984 * time 18985 */ 18986 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18987 if (rack->rc_enobuf < 0x7f) 18988 rack->rc_enobuf++; 18989 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18990 slot = 10 * HPTS_USEC_IN_MSEC; 18991 if (rack->r_ctl.crte != NULL) { 18992 counter_u64_add(rack_saw_enobuf_hw, 1); 18993 tcp_rl_log_enobuf(rack->r_ctl.crte); 18994 } 18995 counter_u64_add(rack_saw_enobuf, 1); 18996 goto enobufs; 18997 case EMSGSIZE: 18998 /* 18999 * For some reason the interface we used initially 19000 * to send segments changed to another or lowered 19001 * its MTU. If TSO was active we either got an 19002 * interface without TSO capabilits or TSO was 19003 * turned off. If we obtained mtu from ip_output() 19004 * then update it and try again. 19005 */ 19006 if (tso) 19007 tp->t_flags &= ~TF_TSO; 19008 if (mtu != 0) { 19009 tcp_mss_update(tp, -1, mtu, NULL, NULL); 19010 goto again; 19011 } 19012 slot = 10 * HPTS_USEC_IN_MSEC; 19013 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19014 #ifdef TCP_ACCOUNTING 19015 crtsc = get_cyclecount(); 19016 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19017 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19018 } 19019 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19020 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19021 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19022 } 19023 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19024 sched_unpin(); 19025 #endif 19026 return (error); 19027 case ENETUNREACH: 19028 counter_u64_add(rack_saw_enetunreach, 1); 19029 case EHOSTDOWN: 19030 case EHOSTUNREACH: 19031 case ENETDOWN: 19032 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19033 tp->t_softerror = error; 19034 } 19035 /* FALLTHROUGH */ 19036 default: 19037 slot = 10 * HPTS_USEC_IN_MSEC; 19038 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19039 #ifdef TCP_ACCOUNTING 19040 crtsc = get_cyclecount(); 19041 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19042 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19043 } 19044 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19045 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19046 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19047 } 19048 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19049 sched_unpin(); 19050 #endif 19051 return (error); 19052 } 19053 } else { 19054 rack->rc_enobuf = 0; 19055 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19056 rack->r_ctl.retran_during_recovery += len; 19057 } 19058 KMOD_TCPSTAT_INC(tcps_sndtotal); 19059 19060 /* 19061 * Data sent (as far as we can tell). If this advertises a larger 19062 * window than any other segment, then remember the size of the 19063 * advertised window. Any pending ACK has now been sent. 19064 */ 19065 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19066 tp->rcv_adv = tp->rcv_nxt + recwin; 19067 19068 tp->last_ack_sent = tp->rcv_nxt; 19069 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19070 enobufs: 19071 if (sendalot) { 19072 /* Do we need to turn off sendalot? */ 19073 if (rack->r_ctl.rc_pace_max_segs && 19074 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19075 /* We hit our max. */ 19076 sendalot = 0; 19077 } else if ((rack->rc_user_set_max_segs) && 19078 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19079 /* We hit the user defined max */ 19080 sendalot = 0; 19081 } 19082 } 19083 if ((error == 0) && (flags & TH_FIN)) 19084 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19085 if (flags & TH_RST) { 19086 /* 19087 * We don't send again after sending a RST. 19088 */ 19089 slot = 0; 19090 sendalot = 0; 19091 if (error == 0) 19092 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19093 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19094 /* 19095 * Get our pacing rate, if an error 19096 * occurred in sending (ENOBUF) we would 19097 * hit the else if with slot preset. Other 19098 * errors return. 19099 */ 19100 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19101 } 19102 if (rsm && 19103 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19104 rack->use_rack_rr) { 19105 /* Its a retransmit and we use the rack cheat? */ 19106 if ((slot == 0) || 19107 (rack->rc_always_pace == 0) || 19108 (rack->r_rr_config == 1)) { 19109 /* 19110 * We have no pacing set or we 19111 * are using old-style rack or 19112 * we are overriden to use the old 1ms pacing. 19113 */ 19114 slot = rack->r_ctl.rc_min_to; 19115 } 19116 } 19117 /* We have sent clear the flag */ 19118 rack->r_ent_rec_ns = 0; 19119 if (rack->r_must_retran) { 19120 if (rsm) { 19121 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19122 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19123 /* 19124 * We have retransmitted all. 19125 */ 19126 rack->r_must_retran = 0; 19127 rack->r_ctl.rc_out_at_rto = 0; 19128 } 19129 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19130 /* 19131 * Sending new data will also kill 19132 * the loop. 19133 */ 19134 rack->r_must_retran = 0; 19135 rack->r_ctl.rc_out_at_rto = 0; 19136 } 19137 } 19138 rack->r_ctl.fsb.recwin = recwin; 19139 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19140 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19141 /* 19142 * We hit an RTO and now have past snd_max at the RTO 19143 * clear all the WAS flags. 19144 */ 19145 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19146 } 19147 if (slot) { 19148 /* set the rack tcb into the slot N */ 19149 counter_u64_add(rack_paced_segments, 1); 19150 if ((error == 0) && 19151 rack_use_rfo && 19152 ((flags & (TH_SYN|TH_FIN)) == 0) && 19153 (rsm == NULL) && 19154 (tp->snd_nxt == tp->snd_max) && 19155 (ipoptlen == 0) && 19156 (tp->rcv_numsacks == 0) && 19157 rack->r_fsb_inited && 19158 TCPS_HAVEESTABLISHED(tp->t_state) && 19159 (rack->r_must_retran == 0) && 19160 ((tp->t_flags & TF_NEEDFIN) == 0) && 19161 (len > 0) && (orig_len > 0) && 19162 (orig_len > len) && 19163 ((orig_len - len) >= segsiz) && 19164 ((optlen == 0) || 19165 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19166 /* We can send at least one more MSS using our fsb */ 19167 19168 rack->r_fast_output = 1; 19169 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19170 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19171 rack->r_ctl.fsb.tcp_flags = flags; 19172 rack->r_ctl.fsb.left_to_send = orig_len - len; 19173 if (hw_tls) 19174 rack->r_ctl.fsb.hw_tls = 1; 19175 else 19176 rack->r_ctl.fsb.hw_tls = 0; 19177 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19178 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19179 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19180 (tp->snd_max - tp->snd_una))); 19181 if (rack->r_ctl.fsb.left_to_send < segsiz) 19182 rack->r_fast_output = 0; 19183 else { 19184 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19185 rack->r_ctl.fsb.rfo_apply_push = 1; 19186 else 19187 rack->r_ctl.fsb.rfo_apply_push = 0; 19188 } 19189 } else 19190 rack->r_fast_output = 0; 19191 rack_log_fsb(rack, tp, so, flags, 19192 ipoptlen, orig_len, len, error, 19193 (rsm == NULL), optlen, __LINE__, 2); 19194 } else if (sendalot) { 19195 int ret; 19196 19197 if (len) 19198 counter_u64_add(rack_unpaced_segments, 1); 19199 sack_rxmit = 0; 19200 if ((error == 0) && 19201 rack_use_rfo && 19202 ((flags & (TH_SYN|TH_FIN)) == 0) && 19203 (rsm == NULL) && 19204 (ipoptlen == 0) && 19205 (tp->rcv_numsacks == 0) && 19206 (tp->snd_nxt == tp->snd_max) && 19207 (rack->r_must_retran == 0) && 19208 rack->r_fsb_inited && 19209 TCPS_HAVEESTABLISHED(tp->t_state) && 19210 ((tp->t_flags & TF_NEEDFIN) == 0) && 19211 (len > 0) && (orig_len > 0) && 19212 (orig_len > len) && 19213 ((orig_len - len) >= segsiz) && 19214 ((optlen == 0) || 19215 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19216 /* we can use fast_output for more */ 19217 19218 rack->r_fast_output = 1; 19219 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19220 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19221 rack->r_ctl.fsb.tcp_flags = flags; 19222 rack->r_ctl.fsb.left_to_send = orig_len - len; 19223 if (hw_tls) 19224 rack->r_ctl.fsb.hw_tls = 1; 19225 else 19226 rack->r_ctl.fsb.hw_tls = 0; 19227 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19228 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19229 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19230 (tp->snd_max - tp->snd_una))); 19231 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19232 rack->r_fast_output = 0; 19233 } 19234 if (rack->r_fast_output) { 19235 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19236 rack->r_ctl.fsb.rfo_apply_push = 1; 19237 else 19238 rack->r_ctl.fsb.rfo_apply_push = 0; 19239 rack_log_fsb(rack, tp, so, flags, 19240 ipoptlen, orig_len, len, error, 19241 (rsm == NULL), optlen, __LINE__, 3); 19242 error = 0; 19243 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19244 if (ret >= 0) 19245 return (ret); 19246 else if (error) 19247 goto nomore; 19248 19249 } 19250 } 19251 goto again; 19252 } else if (len) { 19253 counter_u64_add(rack_unpaced_segments, 1); 19254 } 19255 /* Assure when we leave that snd_nxt will point to top */ 19256 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19257 tp->snd_nxt = tp->snd_max; 19258 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19259 #ifdef TCP_ACCOUNTING 19260 crtsc = get_cyclecount() - ts_val; 19261 if (tot_len_this_send) { 19262 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19263 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19264 } 19265 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19266 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19267 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19268 } 19269 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19270 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19271 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19272 } 19273 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19274 } else { 19275 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19276 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19277 } 19278 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19279 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19280 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19281 } 19282 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19283 } 19284 sched_unpin(); 19285 #endif 19286 if (error == ENOBUFS) 19287 error = 0; 19288 return (error); 19289 } 19290 19291 static void 19292 rack_update_seg(struct tcp_rack *rack) 19293 { 19294 uint32_t orig_val; 19295 19296 orig_val = rack->r_ctl.rc_pace_max_segs; 19297 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19298 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19299 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19300 } 19301 19302 static void 19303 rack_mtu_change(struct tcpcb *tp) 19304 { 19305 /* 19306 * The MSS may have changed 19307 */ 19308 struct tcp_rack *rack; 19309 19310 rack = (struct tcp_rack *)tp->t_fb_ptr; 19311 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19312 /* 19313 * The MTU has changed we need to resend everything 19314 * since all we have sent is lost. We first fix 19315 * up the mtu though. 19316 */ 19317 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19318 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19319 rack_remxt_tmr(tp); 19320 rack->r_fast_output = 0; 19321 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19322 rack->r_ctl.rc_sacked); 19323 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19324 rack->r_must_retran = 1; 19325 19326 } 19327 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19328 /* We don't use snd_nxt to retransmit */ 19329 tp->snd_nxt = tp->snd_max; 19330 } 19331 19332 static int 19333 rack_set_profile(struct tcp_rack *rack, int prof) 19334 { 19335 int err = EINVAL; 19336 if (prof == 1) { 19337 /* pace_always=1 */ 19338 if (rack->rc_always_pace == 0) { 19339 if (tcp_can_enable_pacing() == 0) 19340 return (EBUSY); 19341 } 19342 rack->rc_always_pace = 1; 19343 if (rack->use_fixed_rate || rack->gp_ready) 19344 rack_set_cc_pacing(rack); 19345 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19346 rack->rack_attempt_hdwr_pace = 0; 19347 /* cmpack=1 */ 19348 if (rack_use_cmp_acks) 19349 rack->r_use_cmp_ack = 1; 19350 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19351 rack->r_use_cmp_ack) 19352 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19353 /* scwnd=1 */ 19354 rack->rack_enable_scwnd = 1; 19355 /* dynamic=100 */ 19356 rack->rc_gp_dyn_mul = 1; 19357 /* gp_inc_ca */ 19358 rack->r_ctl.rack_per_of_gp_ca = 100; 19359 /* rrr_conf=3 */ 19360 rack->r_rr_config = 3; 19361 /* npush=2 */ 19362 rack->r_ctl.rc_no_push_at_mrtt = 2; 19363 /* fillcw=1 */ 19364 rack->rc_pace_to_cwnd = 1; 19365 rack->rc_pace_fill_if_rttin_range = 0; 19366 rack->rtt_limit_mul = 0; 19367 /* noprr=1 */ 19368 rack->rack_no_prr = 1; 19369 /* lscwnd=1 */ 19370 rack->r_limit_scw = 1; 19371 /* gp_inc_rec */ 19372 rack->r_ctl.rack_per_of_gp_rec = 90; 19373 err = 0; 19374 19375 } else if (prof == 3) { 19376 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19377 /* pace_always=1 */ 19378 if (rack->rc_always_pace == 0) { 19379 if (tcp_can_enable_pacing() == 0) 19380 return (EBUSY); 19381 } 19382 rack->rc_always_pace = 1; 19383 if (rack->use_fixed_rate || rack->gp_ready) 19384 rack_set_cc_pacing(rack); 19385 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19386 rack->rack_attempt_hdwr_pace = 0; 19387 /* cmpack=1 */ 19388 if (rack_use_cmp_acks) 19389 rack->r_use_cmp_ack = 1; 19390 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19391 rack->r_use_cmp_ack) 19392 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19393 /* scwnd=1 */ 19394 rack->rack_enable_scwnd = 1; 19395 /* dynamic=100 */ 19396 rack->rc_gp_dyn_mul = 1; 19397 /* gp_inc_ca */ 19398 rack->r_ctl.rack_per_of_gp_ca = 100; 19399 /* rrr_conf=3 */ 19400 rack->r_rr_config = 3; 19401 /* npush=2 */ 19402 rack->r_ctl.rc_no_push_at_mrtt = 2; 19403 /* fillcw=2 */ 19404 rack->rc_pace_to_cwnd = 1; 19405 rack->r_fill_less_agg = 1; 19406 rack->rc_pace_fill_if_rttin_range = 0; 19407 rack->rtt_limit_mul = 0; 19408 /* noprr=1 */ 19409 rack->rack_no_prr = 1; 19410 /* lscwnd=1 */ 19411 rack->r_limit_scw = 1; 19412 /* gp_inc_rec */ 19413 rack->r_ctl.rack_per_of_gp_rec = 90; 19414 err = 0; 19415 19416 19417 } else if (prof == 2) { 19418 /* cmpack=1 */ 19419 if (rack->rc_always_pace == 0) { 19420 if (tcp_can_enable_pacing() == 0) 19421 return (EBUSY); 19422 } 19423 rack->rc_always_pace = 1; 19424 if (rack->use_fixed_rate || rack->gp_ready) 19425 rack_set_cc_pacing(rack); 19426 rack->r_use_cmp_ack = 1; 19427 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19428 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19429 /* pace_always=1 */ 19430 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19431 /* scwnd=1 */ 19432 rack->rack_enable_scwnd = 1; 19433 /* dynamic=100 */ 19434 rack->rc_gp_dyn_mul = 1; 19435 rack->r_ctl.rack_per_of_gp_ca = 100; 19436 /* rrr_conf=3 */ 19437 rack->r_rr_config = 3; 19438 /* npush=2 */ 19439 rack->r_ctl.rc_no_push_at_mrtt = 2; 19440 /* fillcw=1 */ 19441 rack->rc_pace_to_cwnd = 1; 19442 rack->rc_pace_fill_if_rttin_range = 0; 19443 rack->rtt_limit_mul = 0; 19444 /* noprr=1 */ 19445 rack->rack_no_prr = 1; 19446 /* lscwnd=0 */ 19447 rack->r_limit_scw = 0; 19448 err = 0; 19449 } else if (prof == 0) { 19450 /* This changes things back to the default settings */ 19451 err = 0; 19452 if (rack->rc_always_pace) { 19453 tcp_decrement_paced_conn(); 19454 rack_undo_cc_pacing(rack); 19455 rack->rc_always_pace = 0; 19456 } 19457 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19458 rack->rc_always_pace = 1; 19459 if (rack->use_fixed_rate || rack->gp_ready) 19460 rack_set_cc_pacing(rack); 19461 } else 19462 rack->rc_always_pace = 0; 19463 if (rack_dsack_std_based & 0x1) { 19464 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19465 rack->rc_rack_tmr_std_based = 1; 19466 } 19467 if (rack_dsack_std_based & 0x2) { 19468 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19469 rack->rc_rack_use_dsack = 1; 19470 } 19471 if (rack_use_cmp_acks) 19472 rack->r_use_cmp_ack = 1; 19473 else 19474 rack->r_use_cmp_ack = 0; 19475 if (rack_disable_prr) 19476 rack->rack_no_prr = 1; 19477 else 19478 rack->rack_no_prr = 0; 19479 if (rack_gp_no_rec_chg) 19480 rack->rc_gp_no_rec_chg = 1; 19481 else 19482 rack->rc_gp_no_rec_chg = 0; 19483 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19484 rack->r_mbuf_queue = 1; 19485 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19486 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19487 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19488 } else { 19489 rack->r_mbuf_queue = 0; 19490 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19491 } 19492 if (rack_enable_shared_cwnd) 19493 rack->rack_enable_scwnd = 1; 19494 else 19495 rack->rack_enable_scwnd = 0; 19496 if (rack_do_dyn_mul) { 19497 /* When dynamic adjustment is on CA needs to start at 100% */ 19498 rack->rc_gp_dyn_mul = 1; 19499 if (rack_do_dyn_mul >= 100) 19500 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19501 } else { 19502 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19503 rack->rc_gp_dyn_mul = 0; 19504 } 19505 rack->r_rr_config = 0; 19506 rack->r_ctl.rc_no_push_at_mrtt = 0; 19507 rack->rc_pace_to_cwnd = 0; 19508 rack->rc_pace_fill_if_rttin_range = 0; 19509 rack->rtt_limit_mul = 0; 19510 19511 if (rack_enable_hw_pacing) 19512 rack->rack_hdw_pace_ena = 1; 19513 else 19514 rack->rack_hdw_pace_ena = 0; 19515 if (rack_disable_prr) 19516 rack->rack_no_prr = 1; 19517 else 19518 rack->rack_no_prr = 0; 19519 if (rack_limits_scwnd) 19520 rack->r_limit_scw = 1; 19521 else 19522 rack->r_limit_scw = 0; 19523 err = 0; 19524 } 19525 return (err); 19526 } 19527 19528 static int 19529 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19530 { 19531 struct deferred_opt_list *dol; 19532 19533 dol = malloc(sizeof(struct deferred_opt_list), 19534 M_TCPFSB, M_NOWAIT|M_ZERO); 19535 if (dol == NULL) { 19536 /* 19537 * No space yikes -- fail out.. 19538 */ 19539 return (0); 19540 } 19541 dol->optname = sopt_name; 19542 dol->optval = loptval; 19543 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19544 return (1); 19545 } 19546 19547 static int 19548 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19549 uint32_t optval, uint64_t loptval) 19550 { 19551 struct epoch_tracker et; 19552 struct sockopt sopt; 19553 struct cc_newreno_opts opt; 19554 uint64_t val; 19555 int error = 0; 19556 uint16_t ca, ss; 19557 19558 switch (sopt_name) { 19559 19560 case TCP_RACK_DSACK_OPT: 19561 RACK_OPTS_INC(tcp_rack_dsack_opt); 19562 if (optval & 0x1) { 19563 rack->rc_rack_tmr_std_based = 1; 19564 } else { 19565 rack->rc_rack_tmr_std_based = 0; 19566 } 19567 if (optval & 0x2) { 19568 rack->rc_rack_use_dsack = 1; 19569 } else { 19570 rack->rc_rack_use_dsack = 0; 19571 } 19572 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19573 break; 19574 case TCP_RACK_PACING_BETA: 19575 RACK_OPTS_INC(tcp_rack_beta); 19576 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19577 /* This only works for newreno. */ 19578 error = EINVAL; 19579 break; 19580 } 19581 if (rack->rc_pacing_cc_set) { 19582 /* 19583 * Set them into the real CC module 19584 * whats in the rack pcb is the old values 19585 * to be used on restoral/ 19586 */ 19587 sopt.sopt_dir = SOPT_SET; 19588 opt.name = CC_NEWRENO_BETA; 19589 opt.val = optval; 19590 if (CC_ALGO(tp)->ctl_output != NULL) 19591 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19592 else { 19593 error = ENOENT; 19594 break; 19595 } 19596 } else { 19597 /* 19598 * Not pacing yet so set it into our local 19599 * rack pcb storage. 19600 */ 19601 rack->r_ctl.rc_saved_beta.beta = optval; 19602 } 19603 break; 19604 case TCP_RACK_TIMER_SLOP: 19605 RACK_OPTS_INC(tcp_rack_timer_slop); 19606 rack->r_ctl.timer_slop = optval; 19607 if (rack->rc_tp->t_srtt) { 19608 /* 19609 * If we have an SRTT lets update t_rxtcur 19610 * to have the new slop. 19611 */ 19612 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19613 rack_rto_min, rack_rto_max, 19614 rack->r_ctl.timer_slop); 19615 } 19616 break; 19617 case TCP_RACK_PACING_BETA_ECN: 19618 RACK_OPTS_INC(tcp_rack_beta_ecn); 19619 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19620 /* This only works for newreno. */ 19621 error = EINVAL; 19622 break; 19623 } 19624 if (rack->rc_pacing_cc_set) { 19625 /* 19626 * Set them into the real CC module 19627 * whats in the rack pcb is the old values 19628 * to be used on restoral/ 19629 */ 19630 sopt.sopt_dir = SOPT_SET; 19631 opt.name = CC_NEWRENO_BETA_ECN; 19632 opt.val = optval; 19633 if (CC_ALGO(tp)->ctl_output != NULL) 19634 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19635 else 19636 error = ENOENT; 19637 } else { 19638 /* 19639 * Not pacing yet so set it into our local 19640 * rack pcb storage. 19641 */ 19642 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19643 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19644 } 19645 break; 19646 case TCP_DEFER_OPTIONS: 19647 RACK_OPTS_INC(tcp_defer_opt); 19648 if (optval) { 19649 if (rack->gp_ready) { 19650 /* Too late */ 19651 error = EINVAL; 19652 break; 19653 } 19654 rack->defer_options = 1; 19655 } else 19656 rack->defer_options = 0; 19657 break; 19658 case TCP_RACK_MEASURE_CNT: 19659 RACK_OPTS_INC(tcp_rack_measure_cnt); 19660 if (optval && (optval <= 0xff)) { 19661 rack->r_ctl.req_measurements = optval; 19662 } else 19663 error = EINVAL; 19664 break; 19665 case TCP_REC_ABC_VAL: 19666 RACK_OPTS_INC(tcp_rec_abc_val); 19667 if (optval > 0) 19668 rack->r_use_labc_for_rec = 1; 19669 else 19670 rack->r_use_labc_for_rec = 0; 19671 break; 19672 case TCP_RACK_ABC_VAL: 19673 RACK_OPTS_INC(tcp_rack_abc_val); 19674 if ((optval > 0) && (optval < 255)) 19675 rack->rc_labc = optval; 19676 else 19677 error = EINVAL; 19678 break; 19679 case TCP_HDWR_UP_ONLY: 19680 RACK_OPTS_INC(tcp_pacing_up_only); 19681 if (optval) 19682 rack->r_up_only = 1; 19683 else 19684 rack->r_up_only = 0; 19685 break; 19686 case TCP_PACING_RATE_CAP: 19687 RACK_OPTS_INC(tcp_pacing_rate_cap); 19688 rack->r_ctl.bw_rate_cap = loptval; 19689 break; 19690 case TCP_RACK_PROFILE: 19691 RACK_OPTS_INC(tcp_profile); 19692 error = rack_set_profile(rack, optval); 19693 break; 19694 case TCP_USE_CMP_ACKS: 19695 RACK_OPTS_INC(tcp_use_cmp_acks); 19696 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19697 /* You can't turn it off once its on! */ 19698 error = EINVAL; 19699 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19700 rack->r_use_cmp_ack = 1; 19701 rack->r_mbuf_queue = 1; 19702 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19703 } 19704 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19705 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19706 break; 19707 case TCP_SHARED_CWND_TIME_LIMIT: 19708 RACK_OPTS_INC(tcp_lscwnd); 19709 if (optval) 19710 rack->r_limit_scw = 1; 19711 else 19712 rack->r_limit_scw = 0; 19713 break; 19714 case TCP_RACK_PACE_TO_FILL: 19715 RACK_OPTS_INC(tcp_fillcw); 19716 if (optval == 0) 19717 rack->rc_pace_to_cwnd = 0; 19718 else { 19719 rack->rc_pace_to_cwnd = 1; 19720 if (optval > 1) 19721 rack->r_fill_less_agg = 1; 19722 } 19723 if ((optval >= rack_gp_rtt_maxmul) && 19724 rack_gp_rtt_maxmul && 19725 (optval < 0xf)) { 19726 rack->rc_pace_fill_if_rttin_range = 1; 19727 rack->rtt_limit_mul = optval; 19728 } else { 19729 rack->rc_pace_fill_if_rttin_range = 0; 19730 rack->rtt_limit_mul = 0; 19731 } 19732 break; 19733 case TCP_RACK_NO_PUSH_AT_MAX: 19734 RACK_OPTS_INC(tcp_npush); 19735 if (optval == 0) 19736 rack->r_ctl.rc_no_push_at_mrtt = 0; 19737 else if (optval < 0xff) 19738 rack->r_ctl.rc_no_push_at_mrtt = optval; 19739 else 19740 error = EINVAL; 19741 break; 19742 case TCP_SHARED_CWND_ENABLE: 19743 RACK_OPTS_INC(tcp_rack_scwnd); 19744 if (optval == 0) 19745 rack->rack_enable_scwnd = 0; 19746 else 19747 rack->rack_enable_scwnd = 1; 19748 break; 19749 case TCP_RACK_MBUF_QUEUE: 19750 /* Now do we use the LRO mbuf-queue feature */ 19751 RACK_OPTS_INC(tcp_rack_mbufq); 19752 if (optval || rack->r_use_cmp_ack) 19753 rack->r_mbuf_queue = 1; 19754 else 19755 rack->r_mbuf_queue = 0; 19756 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19757 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19758 else 19759 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19760 break; 19761 case TCP_RACK_NONRXT_CFG_RATE: 19762 RACK_OPTS_INC(tcp_rack_cfg_rate); 19763 if (optval == 0) 19764 rack->rack_rec_nonrxt_use_cr = 0; 19765 else 19766 rack->rack_rec_nonrxt_use_cr = 1; 19767 break; 19768 case TCP_NO_PRR: 19769 RACK_OPTS_INC(tcp_rack_noprr); 19770 if (optval == 0) 19771 rack->rack_no_prr = 0; 19772 else if (optval == 1) 19773 rack->rack_no_prr = 1; 19774 else if (optval == 2) 19775 rack->no_prr_addback = 1; 19776 else 19777 error = EINVAL; 19778 break; 19779 case TCP_TIMELY_DYN_ADJ: 19780 RACK_OPTS_INC(tcp_timely_dyn); 19781 if (optval == 0) 19782 rack->rc_gp_dyn_mul = 0; 19783 else { 19784 rack->rc_gp_dyn_mul = 1; 19785 if (optval >= 100) { 19786 /* 19787 * If the user sets something 100 or more 19788 * its the gp_ca value. 19789 */ 19790 rack->r_ctl.rack_per_of_gp_ca = optval; 19791 } 19792 } 19793 break; 19794 case TCP_RACK_DO_DETECTION: 19795 RACK_OPTS_INC(tcp_rack_do_detection); 19796 if (optval == 0) 19797 rack->do_detection = 0; 19798 else 19799 rack->do_detection = 1; 19800 break; 19801 case TCP_RACK_TLP_USE: 19802 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19803 error = EINVAL; 19804 break; 19805 } 19806 RACK_OPTS_INC(tcp_tlp_use); 19807 rack->rack_tlp_threshold_use = optval; 19808 break; 19809 case TCP_RACK_TLP_REDUCE: 19810 /* RACK TLP cwnd reduction (bool) */ 19811 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19812 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19813 break; 19814 /* Pacing related ones */ 19815 case TCP_RACK_PACE_ALWAYS: 19816 /* 19817 * zero is old rack method, 1 is new 19818 * method using a pacing rate. 19819 */ 19820 RACK_OPTS_INC(tcp_rack_pace_always); 19821 if (optval > 0) { 19822 if (rack->rc_always_pace) { 19823 error = EALREADY; 19824 break; 19825 } else if (tcp_can_enable_pacing()) { 19826 rack->rc_always_pace = 1; 19827 if (rack->use_fixed_rate || rack->gp_ready) 19828 rack_set_cc_pacing(rack); 19829 } 19830 else { 19831 error = ENOSPC; 19832 break; 19833 } 19834 } else { 19835 if (rack->rc_always_pace) { 19836 tcp_decrement_paced_conn(); 19837 rack->rc_always_pace = 0; 19838 rack_undo_cc_pacing(rack); 19839 } 19840 } 19841 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19842 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19843 else 19844 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19845 /* A rate may be set irate or other, if so set seg size */ 19846 rack_update_seg(rack); 19847 break; 19848 case TCP_BBR_RACK_INIT_RATE: 19849 RACK_OPTS_INC(tcp_initial_rate); 19850 val = optval; 19851 /* Change from kbits per second to bytes per second */ 19852 val *= 1000; 19853 val /= 8; 19854 rack->r_ctl.init_rate = val; 19855 if (rack->rc_init_win != rack_default_init_window) { 19856 uint32_t win, snt; 19857 19858 /* 19859 * Options don't always get applied 19860 * in the order you think. So in order 19861 * to assure we update a cwnd we need 19862 * to check and see if we are still 19863 * where we should raise the cwnd. 19864 */ 19865 win = rc_init_window(rack); 19866 if (SEQ_GT(tp->snd_max, tp->iss)) 19867 snt = tp->snd_max - tp->iss; 19868 else 19869 snt = 0; 19870 if ((snt < win) && 19871 (tp->snd_cwnd < win)) 19872 tp->snd_cwnd = win; 19873 } 19874 if (rack->rc_always_pace) 19875 rack_update_seg(rack); 19876 break; 19877 case TCP_BBR_IWINTSO: 19878 RACK_OPTS_INC(tcp_initial_win); 19879 if (optval && (optval <= 0xff)) { 19880 uint32_t win, snt; 19881 19882 rack->rc_init_win = optval; 19883 win = rc_init_window(rack); 19884 if (SEQ_GT(tp->snd_max, tp->iss)) 19885 snt = tp->snd_max - tp->iss; 19886 else 19887 snt = 0; 19888 if ((snt < win) && 19889 (tp->t_srtt | 19890 #ifdef NETFLIX_PEAKRATE 19891 tp->t_maxpeakrate | 19892 #endif 19893 rack->r_ctl.init_rate)) { 19894 /* 19895 * We are not past the initial window 19896 * and we have some bases for pacing, 19897 * so we need to possibly adjust up 19898 * the cwnd. Note even if we don't set 19899 * the cwnd, its still ok to raise the rc_init_win 19900 * which can be used coming out of idle when we 19901 * would have a rate. 19902 */ 19903 if (tp->snd_cwnd < win) 19904 tp->snd_cwnd = win; 19905 } 19906 if (rack->rc_always_pace) 19907 rack_update_seg(rack); 19908 } else 19909 error = EINVAL; 19910 break; 19911 case TCP_RACK_FORCE_MSEG: 19912 RACK_OPTS_INC(tcp_rack_force_max_seg); 19913 if (optval) 19914 rack->rc_force_max_seg = 1; 19915 else 19916 rack->rc_force_max_seg = 0; 19917 break; 19918 case TCP_RACK_PACE_MAX_SEG: 19919 /* Max segments size in a pace in bytes */ 19920 RACK_OPTS_INC(tcp_rack_max_seg); 19921 rack->rc_user_set_max_segs = optval; 19922 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19923 break; 19924 case TCP_RACK_PACE_RATE_REC: 19925 /* Set the fixed pacing rate in Bytes per second ca */ 19926 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19927 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19928 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19929 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19930 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19931 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19932 rack->use_fixed_rate = 1; 19933 if (rack->rc_always_pace) 19934 rack_set_cc_pacing(rack); 19935 rack_log_pacing_delay_calc(rack, 19936 rack->r_ctl.rc_fixed_pacing_rate_ss, 19937 rack->r_ctl.rc_fixed_pacing_rate_ca, 19938 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19939 __LINE__, NULL,0); 19940 break; 19941 19942 case TCP_RACK_PACE_RATE_SS: 19943 /* Set the fixed pacing rate in Bytes per second ca */ 19944 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19945 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19946 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19947 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19948 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19949 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19950 rack->use_fixed_rate = 1; 19951 if (rack->rc_always_pace) 19952 rack_set_cc_pacing(rack); 19953 rack_log_pacing_delay_calc(rack, 19954 rack->r_ctl.rc_fixed_pacing_rate_ss, 19955 rack->r_ctl.rc_fixed_pacing_rate_ca, 19956 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19957 __LINE__, NULL, 0); 19958 break; 19959 19960 case TCP_RACK_PACE_RATE_CA: 19961 /* Set the fixed pacing rate in Bytes per second ca */ 19962 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19963 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19964 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19965 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19966 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19967 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19968 rack->use_fixed_rate = 1; 19969 if (rack->rc_always_pace) 19970 rack_set_cc_pacing(rack); 19971 rack_log_pacing_delay_calc(rack, 19972 rack->r_ctl.rc_fixed_pacing_rate_ss, 19973 rack->r_ctl.rc_fixed_pacing_rate_ca, 19974 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19975 __LINE__, NULL, 0); 19976 break; 19977 case TCP_RACK_GP_INCREASE_REC: 19978 RACK_OPTS_INC(tcp_gp_inc_rec); 19979 rack->r_ctl.rack_per_of_gp_rec = optval; 19980 rack_log_pacing_delay_calc(rack, 19981 rack->r_ctl.rack_per_of_gp_ss, 19982 rack->r_ctl.rack_per_of_gp_ca, 19983 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19984 __LINE__, NULL, 0); 19985 break; 19986 case TCP_RACK_GP_INCREASE_CA: 19987 RACK_OPTS_INC(tcp_gp_inc_ca); 19988 ca = optval; 19989 if (ca < 100) { 19990 /* 19991 * We don't allow any reduction 19992 * over the GP b/w. 19993 */ 19994 error = EINVAL; 19995 break; 19996 } 19997 rack->r_ctl.rack_per_of_gp_ca = ca; 19998 rack_log_pacing_delay_calc(rack, 19999 rack->r_ctl.rack_per_of_gp_ss, 20000 rack->r_ctl.rack_per_of_gp_ca, 20001 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20002 __LINE__, NULL, 0); 20003 break; 20004 case TCP_RACK_GP_INCREASE_SS: 20005 RACK_OPTS_INC(tcp_gp_inc_ss); 20006 ss = optval; 20007 if (ss < 100) { 20008 /* 20009 * We don't allow any reduction 20010 * over the GP b/w. 20011 */ 20012 error = EINVAL; 20013 break; 20014 } 20015 rack->r_ctl.rack_per_of_gp_ss = ss; 20016 rack_log_pacing_delay_calc(rack, 20017 rack->r_ctl.rack_per_of_gp_ss, 20018 rack->r_ctl.rack_per_of_gp_ca, 20019 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20020 __LINE__, NULL, 0); 20021 break; 20022 case TCP_RACK_RR_CONF: 20023 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20024 if (optval && optval <= 3) 20025 rack->r_rr_config = optval; 20026 else 20027 rack->r_rr_config = 0; 20028 break; 20029 case TCP_HDWR_RATE_CAP: 20030 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20031 if (optval) { 20032 if (rack->r_rack_hw_rate_caps == 0) 20033 rack->r_rack_hw_rate_caps = 1; 20034 else 20035 error = EALREADY; 20036 } else { 20037 rack->r_rack_hw_rate_caps = 0; 20038 } 20039 break; 20040 case TCP_BBR_HDWR_PACE: 20041 RACK_OPTS_INC(tcp_hdwr_pacing); 20042 if (optval){ 20043 if (rack->rack_hdrw_pacing == 0) { 20044 rack->rack_hdw_pace_ena = 1; 20045 rack->rack_attempt_hdwr_pace = 0; 20046 } else 20047 error = EALREADY; 20048 } else { 20049 rack->rack_hdw_pace_ena = 0; 20050 #ifdef RATELIMIT 20051 if (rack->r_ctl.crte != NULL) { 20052 rack->rack_hdrw_pacing = 0; 20053 rack->rack_attempt_hdwr_pace = 0; 20054 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20055 rack->r_ctl.crte = NULL; 20056 } 20057 #endif 20058 } 20059 break; 20060 /* End Pacing related ones */ 20061 case TCP_RACK_PRR_SENDALOT: 20062 /* Allow PRR to send more than one seg */ 20063 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20064 rack->r_ctl.rc_prr_sendalot = optval; 20065 break; 20066 case TCP_RACK_MIN_TO: 20067 /* Minimum time between rack t-o's in ms */ 20068 RACK_OPTS_INC(tcp_rack_min_to); 20069 rack->r_ctl.rc_min_to = optval; 20070 break; 20071 case TCP_RACK_EARLY_SEG: 20072 /* If early recovery max segments */ 20073 RACK_OPTS_INC(tcp_rack_early_seg); 20074 rack->r_ctl.rc_early_recovery_segs = optval; 20075 break; 20076 case TCP_RACK_ENABLE_HYSTART: 20077 { 20078 struct sockopt sopt; 20079 struct cc_newreno_opts opt; 20080 20081 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 20082 sopt.sopt_dir = SOPT_SET; 20083 opt.name = CC_NEWRENO_ENABLE_HYSTART; 20084 opt.val = optval; 20085 if (CC_ALGO(tp)->ctl_output != NULL) 20086 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 20087 else 20088 error = EINVAL; 20089 } 20090 break; 20091 case TCP_RACK_REORD_THRESH: 20092 /* RACK reorder threshold (shift amount) */ 20093 RACK_OPTS_INC(tcp_rack_reord_thresh); 20094 if ((optval > 0) && (optval < 31)) 20095 rack->r_ctl.rc_reorder_shift = optval; 20096 else 20097 error = EINVAL; 20098 break; 20099 case TCP_RACK_REORD_FADE: 20100 /* Does reordering fade after ms time */ 20101 RACK_OPTS_INC(tcp_rack_reord_fade); 20102 rack->r_ctl.rc_reorder_fade = optval; 20103 break; 20104 case TCP_RACK_TLP_THRESH: 20105 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20106 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20107 if (optval) 20108 rack->r_ctl.rc_tlp_threshold = optval; 20109 else 20110 error = EINVAL; 20111 break; 20112 case TCP_BBR_USE_RACK_RR: 20113 RACK_OPTS_INC(tcp_rack_rr); 20114 if (optval) 20115 rack->use_rack_rr = 1; 20116 else 20117 rack->use_rack_rr = 0; 20118 break; 20119 case TCP_FAST_RSM_HACK: 20120 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20121 if (optval) 20122 rack->fast_rsm_hack = 1; 20123 else 20124 rack->fast_rsm_hack = 0; 20125 break; 20126 case TCP_RACK_PKT_DELAY: 20127 /* RACK added ms i.e. rack-rtt + reord + N */ 20128 RACK_OPTS_INC(tcp_rack_pkt_delay); 20129 rack->r_ctl.rc_pkt_delay = optval; 20130 break; 20131 case TCP_DELACK: 20132 RACK_OPTS_INC(tcp_rack_delayed_ack); 20133 if (optval == 0) 20134 tp->t_delayed_ack = 0; 20135 else 20136 tp->t_delayed_ack = 1; 20137 if (tp->t_flags & TF_DELACK) { 20138 tp->t_flags &= ~TF_DELACK; 20139 tp->t_flags |= TF_ACKNOW; 20140 NET_EPOCH_ENTER(et); 20141 rack_output(tp); 20142 NET_EPOCH_EXIT(et); 20143 } 20144 break; 20145 20146 case TCP_BBR_RACK_RTT_USE: 20147 RACK_OPTS_INC(tcp_rack_rtt_use); 20148 if ((optval != USE_RTT_HIGH) && 20149 (optval != USE_RTT_LOW) && 20150 (optval != USE_RTT_AVG)) 20151 error = EINVAL; 20152 else 20153 rack->r_ctl.rc_rate_sample_method = optval; 20154 break; 20155 case TCP_DATA_AFTER_CLOSE: 20156 RACK_OPTS_INC(tcp_data_after_close); 20157 if (optval) 20158 rack->rc_allow_data_af_clo = 1; 20159 else 20160 rack->rc_allow_data_af_clo = 0; 20161 break; 20162 default: 20163 break; 20164 } 20165 #ifdef NETFLIX_STATS 20166 tcp_log_socket_option(tp, sopt_name, optval, error); 20167 #endif 20168 return (error); 20169 } 20170 20171 20172 static void 20173 rack_apply_deferred_options(struct tcp_rack *rack) 20174 { 20175 struct deferred_opt_list *dol, *sdol; 20176 uint32_t s_optval; 20177 20178 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20179 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20180 /* Disadvantage of deferal is you loose the error return */ 20181 s_optval = (uint32_t)dol->optval; 20182 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20183 free(dol, M_TCPDO); 20184 } 20185 } 20186 20187 static void 20188 rack_hw_tls_change(struct tcpcb *tp, int chg) 20189 { 20190 /* 20191 * HW tls state has changed.. fix all 20192 * rsm's in flight. 20193 */ 20194 struct tcp_rack *rack; 20195 struct rack_sendmap *rsm; 20196 20197 rack = (struct tcp_rack *)tp->t_fb_ptr; 20198 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20199 if (chg) 20200 rsm->r_hw_tls = 1; 20201 else 20202 rsm->r_hw_tls = 0; 20203 } 20204 if (chg) 20205 rack->r_ctl.fsb.hw_tls = 1; 20206 else 20207 rack->r_ctl.fsb.hw_tls = 0; 20208 } 20209 20210 static int 20211 rack_pru_options(struct tcpcb *tp, int flags) 20212 { 20213 if (flags & PRUS_OOB) 20214 return (EOPNOTSUPP); 20215 return (0); 20216 } 20217 20218 static struct tcp_function_block __tcp_rack = { 20219 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20220 .tfb_tcp_output = rack_output, 20221 .tfb_do_queued_segments = ctf_do_queued_segments, 20222 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20223 .tfb_tcp_do_segment = rack_do_segment, 20224 .tfb_tcp_ctloutput = rack_ctloutput, 20225 .tfb_tcp_fb_init = rack_init, 20226 .tfb_tcp_fb_fini = rack_fini, 20227 .tfb_tcp_timer_stop_all = rack_stopall, 20228 .tfb_tcp_timer_activate = rack_timer_activate, 20229 .tfb_tcp_timer_active = rack_timer_active, 20230 .tfb_tcp_timer_stop = rack_timer_stop, 20231 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20232 .tfb_tcp_handoff_ok = rack_handoff_ok, 20233 .tfb_tcp_mtu_chg = rack_mtu_change, 20234 .tfb_pru_options = rack_pru_options, 20235 .tfb_hwtls_change = rack_hw_tls_change, 20236 }; 20237 20238 /* 20239 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20240 * socket option arguments. When it re-acquires the lock after the copy, it 20241 * has to revalidate that the connection is still valid for the socket 20242 * option. 20243 */ 20244 static int 20245 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 20246 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 20247 { 20248 uint64_t loptval; 20249 int32_t error = 0, optval; 20250 20251 switch (sopt->sopt_name) { 20252 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20253 /* Pacing related ones */ 20254 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20255 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20256 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20257 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20258 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20259 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20260 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20261 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20262 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20263 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20264 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20265 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20266 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20267 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20268 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20269 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20270 /* End pacing related */ 20271 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20272 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20273 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20274 case TCP_RACK_MIN_TO: /* URL:min_to */ 20275 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20276 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20277 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20278 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20279 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20280 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20281 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20282 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20283 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20284 case TCP_NO_PRR: /* URL:noprr */ 20285 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20286 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20287 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20288 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20289 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20290 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20291 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20292 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20293 case TCP_RACK_PROFILE: /* URL:profile */ 20294 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20295 case TCP_RACK_ABC_VAL: /* URL:labc */ 20296 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20297 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20298 case TCP_DEFER_OPTIONS: /* URL:defer */ 20299 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20300 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20301 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20302 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20303 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20304 break; 20305 default: 20306 /* Filter off all unknown options to the base stack */ 20307 return (tcp_default_ctloutput(so, sopt, inp, tp)); 20308 break; 20309 } 20310 INP_WUNLOCK(inp); 20311 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20312 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20313 /* 20314 * We truncate it down to 32 bits for the socket-option trace this 20315 * means rates > 34Gbps won't show right, but thats probably ok. 20316 */ 20317 optval = (uint32_t)loptval; 20318 } else { 20319 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20320 /* Save it in 64 bit form too */ 20321 loptval = optval; 20322 } 20323 if (error) 20324 return (error); 20325 INP_WLOCK(inp); 20326 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20327 INP_WUNLOCK(inp); 20328 return (ECONNRESET); 20329 } 20330 if (tp->t_fb != &__tcp_rack) { 20331 INP_WUNLOCK(inp); 20332 return (ENOPROTOOPT); 20333 } 20334 if (rack->defer_options && (rack->gp_ready == 0) && 20335 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20336 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20337 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20338 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20339 /* Options are beind deferred */ 20340 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20341 INP_WUNLOCK(inp); 20342 return (0); 20343 } else { 20344 /* No memory to defer, fail */ 20345 INP_WUNLOCK(inp); 20346 return (ENOMEM); 20347 } 20348 } 20349 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20350 INP_WUNLOCK(inp); 20351 return (error); 20352 } 20353 20354 static void 20355 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20356 { 20357 20358 INP_WLOCK_ASSERT(tp->t_inpcb); 20359 bzero(ti, sizeof(*ti)); 20360 20361 ti->tcpi_state = tp->t_state; 20362 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20363 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20364 if (tp->t_flags & TF_SACK_PERMIT) 20365 ti->tcpi_options |= TCPI_OPT_SACK; 20366 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20367 ti->tcpi_options |= TCPI_OPT_WSCALE; 20368 ti->tcpi_snd_wscale = tp->snd_scale; 20369 ti->tcpi_rcv_wscale = tp->rcv_scale; 20370 } 20371 if (tp->t_flags2 & TF2_ECN_PERMIT) 20372 ti->tcpi_options |= TCPI_OPT_ECN; 20373 if (tp->t_flags & TF_FASTOPEN) 20374 ti->tcpi_options |= TCPI_OPT_TFO; 20375 /* still kept in ticks is t_rcvtime */ 20376 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20377 /* Since we hold everything in precise useconds this is easy */ 20378 ti->tcpi_rtt = tp->t_srtt; 20379 ti->tcpi_rttvar = tp->t_rttvar; 20380 ti->tcpi_rto = tp->t_rxtcur; 20381 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20382 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20383 /* 20384 * FreeBSD-specific extension fields for tcp_info. 20385 */ 20386 ti->tcpi_rcv_space = tp->rcv_wnd; 20387 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20388 ti->tcpi_snd_wnd = tp->snd_wnd; 20389 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20390 ti->tcpi_snd_nxt = tp->snd_nxt; 20391 ti->tcpi_snd_mss = tp->t_maxseg; 20392 ti->tcpi_rcv_mss = tp->t_maxseg; 20393 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20394 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20395 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20396 #ifdef NETFLIX_STATS 20397 ti->tcpi_total_tlp = tp->t_sndtlppack; 20398 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20399 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20400 #endif 20401 #ifdef TCP_OFFLOAD 20402 if (tp->t_flags & TF_TOE) { 20403 ti->tcpi_options |= TCPI_OPT_TOE; 20404 tcp_offload_tcp_info(tp, ti); 20405 } 20406 #endif 20407 } 20408 20409 static int 20410 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 20411 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 20412 { 20413 int32_t error, optval; 20414 uint64_t val, loptval; 20415 struct tcp_info ti; 20416 /* 20417 * Because all our options are either boolean or an int, we can just 20418 * pull everything into optval and then unlock and copy. If we ever 20419 * add a option that is not a int, then this will have quite an 20420 * impact to this routine. 20421 */ 20422 error = 0; 20423 switch (sopt->sopt_name) { 20424 case TCP_INFO: 20425 /* First get the info filled */ 20426 rack_fill_info(tp, &ti); 20427 /* Fix up the rtt related fields if needed */ 20428 INP_WUNLOCK(inp); 20429 error = sooptcopyout(sopt, &ti, sizeof ti); 20430 return (error); 20431 /* 20432 * Beta is the congestion control value for NewReno that influences how 20433 * much of a backoff happens when loss is detected. It is normally set 20434 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20435 * when you exit recovery. 20436 */ 20437 case TCP_RACK_PACING_BETA: 20438 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20439 error = EINVAL; 20440 else if (rack->rc_pacing_cc_set == 0) 20441 optval = rack->r_ctl.rc_saved_beta.beta; 20442 else { 20443 /* 20444 * Reach out into the CC data and report back what 20445 * I have previously set. Yeah it looks hackish but 20446 * we don't want to report the saved values. 20447 */ 20448 if (tp->ccv->cc_data) 20449 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20450 else 20451 error = EINVAL; 20452 } 20453 break; 20454 /* 20455 * Beta_ecn is the congestion control value for NewReno that influences how 20456 * much of a backoff happens when a ECN mark is detected. It is normally set 20457 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20458 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20459 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20460 */ 20461 20462 case TCP_RACK_PACING_BETA_ECN: 20463 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20464 error = EINVAL; 20465 else if (rack->rc_pacing_cc_set == 0) 20466 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20467 else { 20468 /* 20469 * Reach out into the CC data and report back what 20470 * I have previously set. Yeah it looks hackish but 20471 * we don't want to report the saved values. 20472 */ 20473 if (tp->ccv->cc_data) 20474 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20475 else 20476 error = EINVAL; 20477 } 20478 break; 20479 case TCP_RACK_DSACK_OPT: 20480 optval = 0; 20481 if (rack->rc_rack_tmr_std_based) { 20482 optval |= 1; 20483 } 20484 if (rack->rc_rack_use_dsack) { 20485 optval |= 2; 20486 } 20487 break; 20488 case TCP_RACK_ENABLE_HYSTART: 20489 { 20490 struct sockopt sopt; 20491 struct cc_newreno_opts opt; 20492 20493 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 20494 sopt.sopt_dir = SOPT_GET; 20495 opt.name = CC_NEWRENO_ENABLE_HYSTART; 20496 if (CC_ALGO(tp)->ctl_output != NULL) 20497 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 20498 else 20499 error = EINVAL; 20500 optval = opt.val; 20501 } 20502 break; 20503 case TCP_FAST_RSM_HACK: 20504 optval = rack->fast_rsm_hack; 20505 break; 20506 case TCP_DEFER_OPTIONS: 20507 optval = rack->defer_options; 20508 break; 20509 case TCP_RACK_MEASURE_CNT: 20510 optval = rack->r_ctl.req_measurements; 20511 break; 20512 case TCP_REC_ABC_VAL: 20513 optval = rack->r_use_labc_for_rec; 20514 break; 20515 case TCP_RACK_ABC_VAL: 20516 optval = rack->rc_labc; 20517 break; 20518 case TCP_HDWR_UP_ONLY: 20519 optval= rack->r_up_only; 20520 break; 20521 case TCP_PACING_RATE_CAP: 20522 loptval = rack->r_ctl.bw_rate_cap; 20523 break; 20524 case TCP_RACK_PROFILE: 20525 /* You cannot retrieve a profile, its write only */ 20526 error = EINVAL; 20527 break; 20528 case TCP_USE_CMP_ACKS: 20529 optval = rack->r_use_cmp_ack; 20530 break; 20531 case TCP_RACK_PACE_TO_FILL: 20532 optval = rack->rc_pace_to_cwnd; 20533 if (optval && rack->r_fill_less_agg) 20534 optval++; 20535 break; 20536 case TCP_RACK_NO_PUSH_AT_MAX: 20537 optval = rack->r_ctl.rc_no_push_at_mrtt; 20538 break; 20539 case TCP_SHARED_CWND_ENABLE: 20540 optval = rack->rack_enable_scwnd; 20541 break; 20542 case TCP_RACK_NONRXT_CFG_RATE: 20543 optval = rack->rack_rec_nonrxt_use_cr; 20544 break; 20545 case TCP_NO_PRR: 20546 if (rack->rack_no_prr == 1) 20547 optval = 1; 20548 else if (rack->no_prr_addback == 1) 20549 optval = 2; 20550 else 20551 optval = 0; 20552 break; 20553 case TCP_RACK_DO_DETECTION: 20554 optval = rack->do_detection; 20555 break; 20556 case TCP_RACK_MBUF_QUEUE: 20557 /* Now do we use the LRO mbuf-queue feature */ 20558 optval = rack->r_mbuf_queue; 20559 break; 20560 case TCP_TIMELY_DYN_ADJ: 20561 optval = rack->rc_gp_dyn_mul; 20562 break; 20563 case TCP_BBR_IWINTSO: 20564 optval = rack->rc_init_win; 20565 break; 20566 case TCP_RACK_TLP_REDUCE: 20567 /* RACK TLP cwnd reduction (bool) */ 20568 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20569 break; 20570 case TCP_BBR_RACK_INIT_RATE: 20571 val = rack->r_ctl.init_rate; 20572 /* convert to kbits per sec */ 20573 val *= 8; 20574 val /= 1000; 20575 optval = (uint32_t)val; 20576 break; 20577 case TCP_RACK_FORCE_MSEG: 20578 optval = rack->rc_force_max_seg; 20579 break; 20580 case TCP_RACK_PACE_MAX_SEG: 20581 /* Max segments in a pace */ 20582 optval = rack->rc_user_set_max_segs; 20583 break; 20584 case TCP_RACK_PACE_ALWAYS: 20585 /* Use the always pace method */ 20586 optval = rack->rc_always_pace; 20587 break; 20588 case TCP_RACK_PRR_SENDALOT: 20589 /* Allow PRR to send more than one seg */ 20590 optval = rack->r_ctl.rc_prr_sendalot; 20591 break; 20592 case TCP_RACK_MIN_TO: 20593 /* Minimum time between rack t-o's in ms */ 20594 optval = rack->r_ctl.rc_min_to; 20595 break; 20596 case TCP_RACK_EARLY_SEG: 20597 /* If early recovery max segments */ 20598 optval = rack->r_ctl.rc_early_recovery_segs; 20599 break; 20600 case TCP_RACK_REORD_THRESH: 20601 /* RACK reorder threshold (shift amount) */ 20602 optval = rack->r_ctl.rc_reorder_shift; 20603 break; 20604 case TCP_RACK_REORD_FADE: 20605 /* Does reordering fade after ms time */ 20606 optval = rack->r_ctl.rc_reorder_fade; 20607 break; 20608 case TCP_BBR_USE_RACK_RR: 20609 /* Do we use the rack cheat for rxt */ 20610 optval = rack->use_rack_rr; 20611 break; 20612 case TCP_RACK_RR_CONF: 20613 optval = rack->r_rr_config; 20614 break; 20615 case TCP_HDWR_RATE_CAP: 20616 optval = rack->r_rack_hw_rate_caps; 20617 break; 20618 case TCP_BBR_HDWR_PACE: 20619 optval = rack->rack_hdw_pace_ena; 20620 break; 20621 case TCP_RACK_TLP_THRESH: 20622 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20623 optval = rack->r_ctl.rc_tlp_threshold; 20624 break; 20625 case TCP_RACK_PKT_DELAY: 20626 /* RACK added ms i.e. rack-rtt + reord + N */ 20627 optval = rack->r_ctl.rc_pkt_delay; 20628 break; 20629 case TCP_RACK_TLP_USE: 20630 optval = rack->rack_tlp_threshold_use; 20631 break; 20632 case TCP_RACK_PACE_RATE_CA: 20633 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20634 break; 20635 case TCP_RACK_PACE_RATE_SS: 20636 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20637 break; 20638 case TCP_RACK_PACE_RATE_REC: 20639 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20640 break; 20641 case TCP_RACK_GP_INCREASE_SS: 20642 optval = rack->r_ctl.rack_per_of_gp_ca; 20643 break; 20644 case TCP_RACK_GP_INCREASE_CA: 20645 optval = rack->r_ctl.rack_per_of_gp_ss; 20646 break; 20647 case TCP_BBR_RACK_RTT_USE: 20648 optval = rack->r_ctl.rc_rate_sample_method; 20649 break; 20650 case TCP_DELACK: 20651 optval = tp->t_delayed_ack; 20652 break; 20653 case TCP_DATA_AFTER_CLOSE: 20654 optval = rack->rc_allow_data_af_clo; 20655 break; 20656 case TCP_SHARED_CWND_TIME_LIMIT: 20657 optval = rack->r_limit_scw; 20658 break; 20659 case TCP_RACK_TIMER_SLOP: 20660 optval = rack->r_ctl.timer_slop; 20661 break; 20662 default: 20663 return (tcp_default_ctloutput(so, sopt, inp, tp)); 20664 break; 20665 } 20666 INP_WUNLOCK(inp); 20667 if (error == 0) { 20668 if (TCP_PACING_RATE_CAP) 20669 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20670 else 20671 error = sooptcopyout(sopt, &optval, sizeof optval); 20672 } 20673 return (error); 20674 } 20675 20676 static int 20677 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 20678 { 20679 int32_t error = EINVAL; 20680 struct tcp_rack *rack; 20681 20682 rack = (struct tcp_rack *)tp->t_fb_ptr; 20683 if (rack == NULL) { 20684 /* Huh? */ 20685 goto out; 20686 } 20687 if (sopt->sopt_dir == SOPT_SET) { 20688 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 20689 } else if (sopt->sopt_dir == SOPT_GET) { 20690 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 20691 } 20692 out: 20693 INP_WUNLOCK(inp); 20694 return (error); 20695 } 20696 20697 static const char *rack_stack_names[] = { 20698 __XSTRING(STACKNAME), 20699 #ifdef STACKALIAS 20700 __XSTRING(STACKALIAS), 20701 #endif 20702 }; 20703 20704 static int 20705 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20706 { 20707 memset(mem, 0, size); 20708 return (0); 20709 } 20710 20711 static void 20712 rack_dtor(void *mem, int32_t size, void *arg) 20713 { 20714 20715 } 20716 20717 static bool rack_mod_inited = false; 20718 20719 static int 20720 tcp_addrack(module_t mod, int32_t type, void *data) 20721 { 20722 int32_t err = 0; 20723 int num_stacks; 20724 20725 switch (type) { 20726 case MOD_LOAD: 20727 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20728 sizeof(struct rack_sendmap), 20729 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20730 20731 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20732 sizeof(struct tcp_rack), 20733 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20734 20735 sysctl_ctx_init(&rack_sysctl_ctx); 20736 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20737 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20738 OID_AUTO, 20739 #ifdef STACKALIAS 20740 __XSTRING(STACKALIAS), 20741 #else 20742 __XSTRING(STACKNAME), 20743 #endif 20744 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20745 ""); 20746 if (rack_sysctl_root == NULL) { 20747 printf("Failed to add sysctl node\n"); 20748 err = EFAULT; 20749 goto free_uma; 20750 } 20751 rack_init_sysctls(); 20752 num_stacks = nitems(rack_stack_names); 20753 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20754 rack_stack_names, &num_stacks); 20755 if (err) { 20756 printf("Failed to register %s stack name for " 20757 "%s module\n", rack_stack_names[num_stacks], 20758 __XSTRING(MODNAME)); 20759 sysctl_ctx_free(&rack_sysctl_ctx); 20760 free_uma: 20761 uma_zdestroy(rack_zone); 20762 uma_zdestroy(rack_pcb_zone); 20763 rack_counter_destroy(); 20764 printf("Failed to register rack module -- err:%d\n", err); 20765 return (err); 20766 } 20767 tcp_lro_reg_mbufq(); 20768 rack_mod_inited = true; 20769 break; 20770 case MOD_QUIESCE: 20771 err = deregister_tcp_functions(&__tcp_rack, true, false); 20772 break; 20773 case MOD_UNLOAD: 20774 err = deregister_tcp_functions(&__tcp_rack, false, true); 20775 if (err == EBUSY) 20776 break; 20777 if (rack_mod_inited) { 20778 uma_zdestroy(rack_zone); 20779 uma_zdestroy(rack_pcb_zone); 20780 sysctl_ctx_free(&rack_sysctl_ctx); 20781 rack_counter_destroy(); 20782 rack_mod_inited = false; 20783 } 20784 tcp_lro_dereg_mbufq(); 20785 err = 0; 20786 break; 20787 default: 20788 return (EOPNOTSUPP); 20789 } 20790 return (err); 20791 } 20792 20793 static moduledata_t tcp_rack = { 20794 .name = __XSTRING(MODNAME), 20795 .evhand = tcp_addrack, 20796 .priv = 0 20797 }; 20798 20799 MODULE_VERSION(MODNAME, 1); 20800 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20801 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20802