1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_hpts.h> 97 #include <netinet/tcp_ratelimit.h> 98 #include <netinet/tcp_accounting.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/cc/cc.h> 101 #include <netinet/cc/cc_newreno.h> 102 #include <netinet/tcp_fastopen.h> 103 #include <netinet/tcp_lro.h> 104 #ifdef NETFLIX_SHARED_CWND 105 #include <netinet/tcp_shared_cwnd.h> 106 #endif 107 #ifdef TCPDEBUG 108 #include <netinet/tcp_debug.h> 109 #endif /* TCPDEBUG */ 110 #ifdef TCP_OFFLOAD 111 #include <netinet/tcp_offload.h> 112 #endif 113 #ifdef INET6 114 #include <netinet6/tcp6_var.h> 115 #endif 116 117 #include <netipsec/ipsec_support.h> 118 119 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 120 #include <netipsec/ipsec.h> 121 #include <netipsec/ipsec6.h> 122 #endif /* IPSEC */ 123 124 #include <netinet/udp.h> 125 #include <netinet/udp_var.h> 126 #include <machine/in_cksum.h> 127 128 #ifdef MAC 129 #include <security/mac/mac_framework.h> 130 #endif 131 #include "sack_filter.h" 132 #include "tcp_rack.h" 133 #include "rack_bbr_common.h" 134 135 uma_zone_t rack_zone; 136 uma_zone_t rack_pcb_zone; 137 138 #ifndef TICKS2SBT 139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 140 #endif 141 142 VNET_DECLARE(uint32_t, newreno_beta); 143 VNET_DECLARE(uint32_t, newreno_beta_ecn); 144 #define V_newreno_beta VNET(newreno_beta) 145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 146 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 150 151 struct sysctl_ctx_list rack_sysctl_ctx; 152 struct sysctl_oid *rack_sysctl_root; 153 154 #define CUM_ACKED 1 155 #define SACKED 2 156 157 /* 158 * The RACK module incorporates a number of 159 * TCP ideas that have been put out into the IETF 160 * over the last few years: 161 * - Matt Mathis's Rate Halving which slowly drops 162 * the congestion window so that the ack clock can 163 * be maintained during a recovery. 164 * - Yuchung Cheng's RACK TCP (for which its named) that 165 * will stop us using the number of dup acks and instead 166 * use time as the gage of when we retransmit. 167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 168 * of Dukkipati et.al. 169 * RACK depends on SACK, so if an endpoint arrives that 170 * cannot do SACK the state machine below will shuttle the 171 * connection back to using the "default" TCP stack that is 172 * in FreeBSD. 173 * 174 * To implement RACK the original TCP stack was first decomposed 175 * into a functional state machine with individual states 176 * for each of the possible TCP connection states. The do_segement 177 * functions role in life is to mandate the connection supports SACK 178 * initially and then assure that the RACK state matches the conenction 179 * state before calling the states do_segment function. Each 180 * state is simplified due to the fact that the original do_segment 181 * has been decomposed and we *know* what state we are in (no 182 * switches on the state) and all tests for SACK are gone. This 183 * greatly simplifies what each state does. 184 * 185 * TCP output is also over-written with a new version since it 186 * must maintain the new rack scoreboard. 187 * 188 */ 189 static int32_t rack_tlp_thresh = 1; 190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 191 static int32_t rack_tlp_use_greater = 1; 192 static int32_t rack_reorder_thresh = 2; 193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 194 * - 60 seconds */ 195 static uint8_t rack_req_measurements = 1; 196 /* Attack threshold detections */ 197 static uint32_t rack_highest_sack_thresh_seen = 0; 198 static uint32_t rack_highest_move_thresh_seen = 0; 199 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 200 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 201 static int32_t rack_hw_rate_caps = 1; /* 1; */ 202 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 203 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 204 static int32_t rack_hw_up_only = 1; 205 static int32_t rack_stats_gets_ms_rtt = 1; 206 static int32_t rack_prr_addbackmax = 2; 207 208 static int32_t rack_pkt_delay = 1000; 209 static int32_t rack_send_a_lot_in_prr = 1; 210 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 211 static int32_t rack_verbose_logging = 0; 212 static int32_t rack_ignore_data_after_close = 1; 213 static int32_t rack_enable_shared_cwnd = 1; 214 static int32_t rack_use_cmp_acks = 1; 215 static int32_t rack_use_fsb = 1; 216 static int32_t rack_use_rfo = 1; 217 static int32_t rack_use_rsm_rfo = 1; 218 static int32_t rack_max_abc_post_recovery = 2; 219 static int32_t rack_client_low_buf = 0; 220 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 221 #ifdef TCP_ACCOUNTING 222 static int32_t rack_tcp_accounting = 0; 223 #endif 224 static int32_t rack_limits_scwnd = 1; 225 static int32_t rack_enable_mqueue_for_nonpaced = 0; 226 static int32_t rack_disable_prr = 0; 227 static int32_t use_rack_rr = 1; 228 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 229 static int32_t rack_persist_min = 250000; /* 250usec */ 230 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 231 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 232 static int32_t rack_default_init_window = 0; /* Use system default */ 233 static int32_t rack_limit_time_with_srtt = 0; 234 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 235 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 236 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 237 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 238 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 239 /* 240 * Currently regular tcp has a rto_min of 30ms 241 * the backoff goes 12 times so that ends up 242 * being a total of 122.850 seconds before a 243 * connection is killed. 244 */ 245 static uint32_t rack_def_data_window = 20; 246 static uint32_t rack_goal_bdp = 2; 247 static uint32_t rack_min_srtts = 1; 248 static uint32_t rack_min_measure_usec = 0; 249 static int32_t rack_tlp_min = 10000; /* 10ms */ 250 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 251 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 252 static const int32_t rack_free_cache = 2; 253 static int32_t rack_hptsi_segments = 40; 254 static int32_t rack_rate_sample_method = USE_RTT_LOW; 255 static int32_t rack_pace_every_seg = 0; 256 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 257 static int32_t rack_slot_reduction = 4; 258 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 259 static int32_t rack_cwnd_block_ends_measure = 0; 260 static int32_t rack_rwnd_block_ends_measure = 0; 261 static int32_t rack_def_profile = 0; 262 263 static int32_t rack_lower_cwnd_at_tlp = 0; 264 static int32_t rack_limited_retran = 0; 265 static int32_t rack_always_send_oldest = 0; 266 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 267 268 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 269 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 270 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 271 272 /* Probertt */ 273 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 274 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 275 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 276 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 277 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 278 279 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 280 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 281 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 282 static uint32_t rack_probertt_use_min_rtt_exit = 0; 283 static uint32_t rack_probe_rtt_sets_cwnd = 0; 284 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 285 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 286 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 287 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 288 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 289 static uint32_t rack_probertt_filter_life = 10000000; 290 static uint32_t rack_probertt_lower_within = 10; 291 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 292 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 293 static int32_t rack_probertt_clear_is = 1; 294 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 295 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 296 297 /* Part of pacing */ 298 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 299 300 /* Timely information */ 301 /* Combine these two gives the range of 'no change' to bw */ 302 /* ie the up/down provide the upper and lower bound */ 303 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 304 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 305 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 306 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 307 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 308 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 309 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 310 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 311 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 312 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 313 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 314 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 315 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 316 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 317 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 318 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 319 static int32_t rack_use_max_for_nobackoff = 0; 320 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 321 static int32_t rack_timely_no_stopping = 0; 322 static int32_t rack_down_raise_thresh = 100; 323 static int32_t rack_req_segs = 1; 324 static uint64_t rack_bw_rate_cap = 0; 325 326 /* Weird delayed ack mode */ 327 static int32_t rack_use_imac_dack = 0; 328 /* Rack specific counters */ 329 counter_u64_t rack_badfr; 330 counter_u64_t rack_badfr_bytes; 331 counter_u64_t rack_rtm_prr_retran; 332 counter_u64_t rack_rtm_prr_newdata; 333 counter_u64_t rack_timestamp_mismatch; 334 counter_u64_t rack_reorder_seen; 335 counter_u64_t rack_paced_segments; 336 counter_u64_t rack_unpaced_segments; 337 counter_u64_t rack_calc_zero; 338 counter_u64_t rack_calc_nonzero; 339 counter_u64_t rack_saw_enobuf; 340 counter_u64_t rack_saw_enobuf_hw; 341 counter_u64_t rack_saw_enetunreach; 342 counter_u64_t rack_per_timer_hole; 343 counter_u64_t rack_large_ackcmp; 344 counter_u64_t rack_small_ackcmp; 345 #ifdef INVARIANTS 346 counter_u64_t rack_adjust_map_bw; 347 #endif 348 /* Tail loss probe counters */ 349 counter_u64_t rack_tlp_tot; 350 counter_u64_t rack_tlp_newdata; 351 counter_u64_t rack_tlp_retran; 352 counter_u64_t rack_tlp_retran_bytes; 353 counter_u64_t rack_tlp_retran_fail; 354 counter_u64_t rack_to_tot; 355 counter_u64_t rack_to_arm_rack; 356 counter_u64_t rack_to_arm_tlp; 357 counter_u64_t rack_hot_alloc; 358 counter_u64_t rack_to_alloc; 359 counter_u64_t rack_to_alloc_hard; 360 counter_u64_t rack_to_alloc_emerg; 361 counter_u64_t rack_to_alloc_limited; 362 counter_u64_t rack_alloc_limited_conns; 363 counter_u64_t rack_split_limited; 364 365 #define MAX_NUM_OF_CNTS 13 366 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS]; 367 counter_u64_t rack_multi_single_eq; 368 counter_u64_t rack_proc_non_comp_ack; 369 370 counter_u64_t rack_fto_send; 371 counter_u64_t rack_fto_rsm_send; 372 counter_u64_t rack_nfto_resend; 373 counter_u64_t rack_non_fto_send; 374 counter_u64_t rack_extended_rfo; 375 376 counter_u64_t rack_sack_proc_all; 377 counter_u64_t rack_sack_proc_short; 378 counter_u64_t rack_sack_proc_restart; 379 counter_u64_t rack_sack_attacks_detected; 380 counter_u64_t rack_sack_attacks_reversed; 381 counter_u64_t rack_sack_used_next_merge; 382 counter_u64_t rack_sack_splits; 383 counter_u64_t rack_sack_used_prev_merge; 384 counter_u64_t rack_sack_skipped_acked; 385 counter_u64_t rack_ack_total; 386 counter_u64_t rack_express_sack; 387 counter_u64_t rack_sack_total; 388 counter_u64_t rack_move_none; 389 counter_u64_t rack_move_some; 390 391 counter_u64_t rack_used_tlpmethod; 392 counter_u64_t rack_used_tlpmethod2; 393 counter_u64_t rack_enter_tlp_calc; 394 counter_u64_t rack_input_idle_reduces; 395 counter_u64_t rack_collapsed_win; 396 counter_u64_t rack_tlp_does_nada; 397 counter_u64_t rack_try_scwnd; 398 counter_u64_t rack_hw_pace_init_fail; 399 counter_u64_t rack_hw_pace_lost; 400 counter_u64_t rack_sbsndptr_right; 401 counter_u64_t rack_sbsndptr_wrong; 402 403 /* Temp CPU counters */ 404 counter_u64_t rack_find_high; 405 406 counter_u64_t rack_progress_drops; 407 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 408 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 409 410 411 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 412 413 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 414 (tv) = (value) + slop; \ 415 if ((u_long)(tv) < (u_long)(tvmin)) \ 416 (tv) = (tvmin); \ 417 if ((u_long)(tv) > (u_long)(tvmax)) \ 418 (tv) = (tvmax); \ 419 } while (0) 420 421 static void 422 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 423 424 static int 425 rack_process_ack(struct mbuf *m, struct tcphdr *th, 426 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 427 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 428 static int 429 rack_process_data(struct mbuf *m, struct tcphdr *th, 430 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 431 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 432 static void 433 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 434 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 435 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 436 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 437 uint8_t limit_type); 438 static struct rack_sendmap * 439 rack_check_recovery_mode(struct tcpcb *tp, 440 uint32_t tsused); 441 static void 442 rack_cong_signal(struct tcpcb *tp, 443 uint32_t type, uint32_t ack); 444 static void rack_counter_destroy(void); 445 static int 446 rack_ctloutput(struct socket *so, struct sockopt *sopt, 447 struct inpcb *inp, struct tcpcb *tp); 448 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 449 static void 450 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 451 static void 452 rack_do_segment(struct mbuf *m, struct tcphdr *th, 453 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 454 uint8_t iptos); 455 static void rack_dtor(void *mem, int32_t size, void *arg); 456 static void 457 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 458 uint32_t flex1, uint32_t flex2, 459 uint32_t flex3, uint32_t flex4, 460 uint32_t flex5, uint32_t flex6, 461 uint16_t flex7, uint8_t mod); 462 463 static void 464 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 465 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 466 struct rack_sendmap *rsm, uint8_t quality); 467 static struct rack_sendmap * 468 rack_find_high_nonack(struct tcp_rack *rack, 469 struct rack_sendmap *rsm); 470 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 471 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 472 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 473 static int 474 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 475 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 476 static void 477 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 478 tcp_seq th_ack, int line, uint8_t quality); 479 static uint32_t 480 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 481 static int32_t rack_handoff_ok(struct tcpcb *tp); 482 static int32_t rack_init(struct tcpcb *tp); 483 static void rack_init_sysctls(void); 484 static void 485 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 486 struct tcphdr *th, int entered_rec, int dup_ack_struck); 487 static void 488 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 489 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts, 490 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 491 492 static void 493 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 494 struct rack_sendmap *rsm); 495 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 496 static int32_t rack_output(struct tcpcb *tp); 497 498 static uint32_t 499 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 500 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 501 uint32_t cts, int *moved_two); 502 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 503 static void rack_remxt_tmr(struct tcpcb *tp); 504 static int 505 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 506 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 507 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 508 static int32_t rack_stopall(struct tcpcb *tp); 509 static void 510 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 511 uint32_t delta); 512 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 513 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 514 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 515 static uint32_t 516 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 517 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 518 static void 519 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 520 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 521 static int 522 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 523 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 524 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 525 static int 526 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 527 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 528 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 529 static int 530 rack_do_closing(struct mbuf *m, struct tcphdr *th, 531 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 532 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 533 static int 534 rack_do_established(struct mbuf *m, struct tcphdr *th, 535 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 536 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 537 static int 538 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 539 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 540 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 541 static int 542 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 543 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 544 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 545 static int 546 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 547 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 548 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 549 static int 550 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 551 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 552 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 553 static int 554 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 555 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 556 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 557 static int 558 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 559 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 560 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 561 struct rack_sendmap * 562 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 563 uint32_t tsused); 564 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 565 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 566 static void 567 tcp_rack_partialack(struct tcpcb *tp); 568 static int 569 rack_set_profile(struct tcp_rack *rack, int prof); 570 static void 571 rack_apply_deferred_options(struct tcp_rack *rack); 572 573 int32_t rack_clear_counter=0; 574 575 static void 576 rack_set_cc_pacing(struct tcp_rack *rack) 577 { 578 struct sockopt sopt; 579 struct cc_newreno_opts opt; 580 struct newreno old, *ptr; 581 struct tcpcb *tp; 582 int error; 583 584 if (rack->rc_pacing_cc_set) 585 return; 586 587 tp = rack->rc_tp; 588 if (tp->cc_algo == NULL) { 589 /* Tcb is leaving */ 590 printf("No cc algorithm?\n"); 591 return; 592 } 593 rack->rc_pacing_cc_set = 1; 594 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 595 /* Not new-reno we can't play games with beta! */ 596 goto out; 597 } 598 ptr = ((struct newreno *)tp->ccv->cc_data); 599 if (CC_ALGO(tp)->ctl_output == NULL) { 600 /* Huh, why does new_reno no longer have a set function? */ 601 printf("no ctl_output for algo:%s\n", tp->cc_algo->name); 602 goto out; 603 } 604 if (ptr == NULL) { 605 /* Just the default values */ 606 old.beta = V_newreno_beta_ecn; 607 old.beta_ecn = V_newreno_beta_ecn; 608 old.newreno_flags = 0; 609 } else { 610 old.beta = ptr->beta; 611 old.beta_ecn = ptr->beta_ecn; 612 old.newreno_flags = ptr->newreno_flags; 613 } 614 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 615 sopt.sopt_dir = SOPT_SET; 616 opt.name = CC_NEWRENO_BETA; 617 opt.val = rack->r_ctl.rc_saved_beta.beta; 618 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 619 if (error) { 620 printf("Error returned by ctl_output %d\n", error); 621 goto out; 622 } 623 /* 624 * Hack alert we need to set in our newreno_flags 625 * so that Abe behavior is also applied. 626 */ 627 ((struct newreno *)tp->ccv->cc_data)->newreno_flags = CC_NEWRENO_BETA_ECN; 628 opt.name = CC_NEWRENO_BETA_ECN; 629 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 630 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 631 if (error) { 632 printf("Error returned by ctl_output %d\n", error); 633 goto out; 634 } 635 /* Save off the original values for restoral */ 636 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 637 out: 638 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 639 union tcp_log_stackspecific log; 640 struct timeval tv; 641 642 ptr = ((struct newreno *)tp->ccv->cc_data); 643 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 644 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 645 if (ptr) { 646 log.u_bbr.flex1 = ptr->beta; 647 log.u_bbr.flex2 = ptr->beta_ecn; 648 log.u_bbr.flex3 = ptr->newreno_flags; 649 } 650 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 651 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 652 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 653 log.u_bbr.flex7 = rack->gp_ready; 654 log.u_bbr.flex7 <<= 1; 655 log.u_bbr.flex7 |= rack->use_fixed_rate; 656 log.u_bbr.flex7 <<= 1; 657 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 658 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 659 log.u_bbr.flex8 = 3; 660 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 661 0, &log, false, NULL, NULL, 0, &tv); 662 } 663 } 664 665 static void 666 rack_undo_cc_pacing(struct tcp_rack *rack) 667 { 668 struct newreno old, *ptr; 669 struct tcpcb *tp; 670 671 if (rack->rc_pacing_cc_set == 0) 672 return; 673 tp = rack->rc_tp; 674 rack->rc_pacing_cc_set = 0; 675 if (tp->cc_algo == NULL) 676 /* Tcb is leaving */ 677 return; 678 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 679 /* Not new-reno nothing to do! */ 680 return; 681 } 682 ptr = ((struct newreno *)tp->ccv->cc_data); 683 if (ptr == NULL) { 684 /* 685 * This happens at rack_fini() if the 686 * cc module gets freed on us. In that 687 * case we loose our "new" settings but 688 * thats ok, since the tcb is going away anyway. 689 */ 690 return; 691 } 692 /* Grab out our set values */ 693 memcpy(&old, ptr, sizeof(struct newreno)); 694 /* Copy back in the original values */ 695 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 696 /* Now save back the values we had set in (for when pacing is restored) */ 697 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 698 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 699 union tcp_log_stackspecific log; 700 struct timeval tv; 701 702 ptr = ((struct newreno *)tp->ccv->cc_data); 703 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 704 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 705 log.u_bbr.flex1 = ptr->beta; 706 log.u_bbr.flex2 = ptr->beta_ecn; 707 log.u_bbr.flex3 = ptr->newreno_flags; 708 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 709 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 710 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 711 log.u_bbr.flex7 = rack->gp_ready; 712 log.u_bbr.flex7 <<= 1; 713 log.u_bbr.flex7 |= rack->use_fixed_rate; 714 log.u_bbr.flex7 <<= 1; 715 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 716 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 717 log.u_bbr.flex8 = 4; 718 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 719 0, &log, false, NULL, NULL, 0, &tv); 720 } 721 } 722 723 #ifdef NETFLIX_PEAKRATE 724 static inline void 725 rack_update_peakrate_thr(struct tcpcb *tp) 726 { 727 /* Keep in mind that t_maxpeakrate is in B/s. */ 728 uint64_t peak; 729 peak = uqmax((tp->t_maxseg * 2), 730 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 731 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 732 } 733 #endif 734 735 static int 736 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 737 { 738 uint32_t stat; 739 int32_t error; 740 int i; 741 742 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 743 if (error || req->newptr == NULL) 744 return error; 745 746 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 747 if (error) 748 return (error); 749 if (stat == 1) { 750 #ifdef INVARIANTS 751 printf("Clearing RACK counters\n"); 752 #endif 753 counter_u64_zero(rack_badfr); 754 counter_u64_zero(rack_badfr_bytes); 755 counter_u64_zero(rack_rtm_prr_retran); 756 counter_u64_zero(rack_rtm_prr_newdata); 757 counter_u64_zero(rack_timestamp_mismatch); 758 counter_u64_zero(rack_reorder_seen); 759 counter_u64_zero(rack_tlp_tot); 760 counter_u64_zero(rack_tlp_newdata); 761 counter_u64_zero(rack_tlp_retran); 762 counter_u64_zero(rack_tlp_retran_bytes); 763 counter_u64_zero(rack_tlp_retran_fail); 764 counter_u64_zero(rack_to_tot); 765 counter_u64_zero(rack_to_arm_rack); 766 counter_u64_zero(rack_to_arm_tlp); 767 counter_u64_zero(rack_paced_segments); 768 counter_u64_zero(rack_calc_zero); 769 counter_u64_zero(rack_calc_nonzero); 770 counter_u64_zero(rack_unpaced_segments); 771 counter_u64_zero(rack_saw_enobuf); 772 counter_u64_zero(rack_saw_enobuf_hw); 773 counter_u64_zero(rack_saw_enetunreach); 774 counter_u64_zero(rack_per_timer_hole); 775 counter_u64_zero(rack_large_ackcmp); 776 counter_u64_zero(rack_small_ackcmp); 777 #ifdef INVARIANTS 778 counter_u64_zero(rack_adjust_map_bw); 779 #endif 780 counter_u64_zero(rack_to_alloc_hard); 781 counter_u64_zero(rack_to_alloc_emerg); 782 counter_u64_zero(rack_sack_proc_all); 783 counter_u64_zero(rack_fto_send); 784 counter_u64_zero(rack_fto_rsm_send); 785 counter_u64_zero(rack_extended_rfo); 786 counter_u64_zero(rack_hw_pace_init_fail); 787 counter_u64_zero(rack_hw_pace_lost); 788 counter_u64_zero(rack_sbsndptr_wrong); 789 counter_u64_zero(rack_sbsndptr_right); 790 counter_u64_zero(rack_non_fto_send); 791 counter_u64_zero(rack_nfto_resend); 792 counter_u64_zero(rack_sack_proc_short); 793 counter_u64_zero(rack_sack_proc_restart); 794 counter_u64_zero(rack_to_alloc); 795 counter_u64_zero(rack_to_alloc_limited); 796 counter_u64_zero(rack_alloc_limited_conns); 797 counter_u64_zero(rack_split_limited); 798 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 799 counter_u64_zero(rack_proc_comp_ack[i]); 800 } 801 counter_u64_zero(rack_multi_single_eq); 802 counter_u64_zero(rack_proc_non_comp_ack); 803 counter_u64_zero(rack_find_high); 804 counter_u64_zero(rack_sack_attacks_detected); 805 counter_u64_zero(rack_sack_attacks_reversed); 806 counter_u64_zero(rack_sack_used_next_merge); 807 counter_u64_zero(rack_sack_used_prev_merge); 808 counter_u64_zero(rack_sack_splits); 809 counter_u64_zero(rack_sack_skipped_acked); 810 counter_u64_zero(rack_ack_total); 811 counter_u64_zero(rack_express_sack); 812 counter_u64_zero(rack_sack_total); 813 counter_u64_zero(rack_move_none); 814 counter_u64_zero(rack_move_some); 815 counter_u64_zero(rack_used_tlpmethod); 816 counter_u64_zero(rack_used_tlpmethod2); 817 counter_u64_zero(rack_enter_tlp_calc); 818 counter_u64_zero(rack_progress_drops); 819 counter_u64_zero(rack_tlp_does_nada); 820 counter_u64_zero(rack_try_scwnd); 821 counter_u64_zero(rack_collapsed_win); 822 } 823 rack_clear_counter = 0; 824 return (0); 825 } 826 827 static void 828 rack_init_sysctls(void) 829 { 830 int i; 831 struct sysctl_oid *rack_counters; 832 struct sysctl_oid *rack_attack; 833 struct sysctl_oid *rack_pacing; 834 struct sysctl_oid *rack_timely; 835 struct sysctl_oid *rack_timers; 836 struct sysctl_oid *rack_tlp; 837 struct sysctl_oid *rack_misc; 838 struct sysctl_oid *rack_measure; 839 struct sysctl_oid *rack_probertt; 840 struct sysctl_oid *rack_hw_pacing; 841 842 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 843 SYSCTL_CHILDREN(rack_sysctl_root), 844 OID_AUTO, 845 "sack_attack", 846 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 847 "Rack Sack Attack Counters and Controls"); 848 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 849 SYSCTL_CHILDREN(rack_sysctl_root), 850 OID_AUTO, 851 "stats", 852 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 853 "Rack Counters"); 854 SYSCTL_ADD_S32(&rack_sysctl_ctx, 855 SYSCTL_CHILDREN(rack_sysctl_root), 856 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 857 &rack_rate_sample_method , USE_RTT_LOW, 858 "What method should we use for rate sampling 0=high, 1=low "); 859 /* Probe rtt related controls */ 860 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 861 SYSCTL_CHILDREN(rack_sysctl_root), 862 OID_AUTO, 863 "probertt", 864 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 865 "ProbeRTT related Controls"); 866 SYSCTL_ADD_U16(&rack_sysctl_ctx, 867 SYSCTL_CHILDREN(rack_probertt), 868 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 869 &rack_atexit_prtt_hbp, 130, 870 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 871 SYSCTL_ADD_U16(&rack_sysctl_ctx, 872 SYSCTL_CHILDREN(rack_probertt), 873 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 874 &rack_atexit_prtt, 130, 875 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 876 SYSCTL_ADD_U16(&rack_sysctl_ctx, 877 SYSCTL_CHILDREN(rack_probertt), 878 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 879 &rack_per_of_gp_probertt, 60, 880 "What percentage of goodput do we pace at in probertt"); 881 SYSCTL_ADD_U16(&rack_sysctl_ctx, 882 SYSCTL_CHILDREN(rack_probertt), 883 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 884 &rack_per_of_gp_probertt_reduce, 10, 885 "What percentage of goodput do we reduce every gp_srtt"); 886 SYSCTL_ADD_U16(&rack_sysctl_ctx, 887 SYSCTL_CHILDREN(rack_probertt), 888 OID_AUTO, "gp_per_low", CTLFLAG_RW, 889 &rack_per_of_gp_lowthresh, 40, 890 "What percentage of goodput do we allow the multiplier to fall to"); 891 SYSCTL_ADD_U32(&rack_sysctl_ctx, 892 SYSCTL_CHILDREN(rack_probertt), 893 OID_AUTO, "time_between", CTLFLAG_RW, 894 & rack_time_between_probertt, 96000000, 895 "How many useconds between the lowest rtt falling must past before we enter probertt"); 896 SYSCTL_ADD_U32(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_probertt), 898 OID_AUTO, "safety", CTLFLAG_RW, 899 &rack_probe_rtt_safety_val, 2000000, 900 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 901 SYSCTL_ADD_U32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_probertt), 903 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 904 &rack_probe_rtt_sets_cwnd, 0, 905 "Do we set the cwnd too (if always_lower is on)"); 906 SYSCTL_ADD_U32(&rack_sysctl_ctx, 907 SYSCTL_CHILDREN(rack_probertt), 908 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 909 &rack_max_drain_wait, 2, 910 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 911 SYSCTL_ADD_U32(&rack_sysctl_ctx, 912 SYSCTL_CHILDREN(rack_probertt), 913 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 914 &rack_must_drain, 1, 915 "We must drain this many gp_srtt's waiting for flight to reach goal"); 916 SYSCTL_ADD_U32(&rack_sysctl_ctx, 917 SYSCTL_CHILDREN(rack_probertt), 918 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 919 &rack_probertt_use_min_rtt_entry, 1, 920 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 921 SYSCTL_ADD_U32(&rack_sysctl_ctx, 922 SYSCTL_CHILDREN(rack_probertt), 923 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 924 &rack_probertt_use_min_rtt_exit, 0, 925 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 926 SYSCTL_ADD_U32(&rack_sysctl_ctx, 927 SYSCTL_CHILDREN(rack_probertt), 928 OID_AUTO, "length_div", CTLFLAG_RW, 929 &rack_probertt_gpsrtt_cnt_div, 0, 930 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 931 SYSCTL_ADD_U32(&rack_sysctl_ctx, 932 SYSCTL_CHILDREN(rack_probertt), 933 OID_AUTO, "length_mul", CTLFLAG_RW, 934 &rack_probertt_gpsrtt_cnt_mul, 0, 935 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 936 SYSCTL_ADD_U32(&rack_sysctl_ctx, 937 SYSCTL_CHILDREN(rack_probertt), 938 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 939 &rack_min_probertt_hold, 200000, 940 "What is the minimum time we hold probertt at target"); 941 SYSCTL_ADD_U32(&rack_sysctl_ctx, 942 SYSCTL_CHILDREN(rack_probertt), 943 OID_AUTO, "filter_life", CTLFLAG_RW, 944 &rack_probertt_filter_life, 10000000, 945 "What is the time for the filters life in useconds"); 946 SYSCTL_ADD_U32(&rack_sysctl_ctx, 947 SYSCTL_CHILDREN(rack_probertt), 948 OID_AUTO, "lower_within", CTLFLAG_RW, 949 &rack_probertt_lower_within, 10, 950 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 951 SYSCTL_ADD_U32(&rack_sysctl_ctx, 952 SYSCTL_CHILDREN(rack_probertt), 953 OID_AUTO, "must_move", CTLFLAG_RW, 954 &rack_min_rtt_movement, 250, 955 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 956 SYSCTL_ADD_U32(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_probertt), 958 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 959 &rack_probertt_clear_is, 1, 960 "Do we clear I/S counts on exiting probe-rtt"); 961 SYSCTL_ADD_S32(&rack_sysctl_ctx, 962 SYSCTL_CHILDREN(rack_probertt), 963 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 964 &rack_max_drain_hbp, 1, 965 "How many extra drain gpsrtt's do we get in highly buffered paths"); 966 SYSCTL_ADD_S32(&rack_sysctl_ctx, 967 SYSCTL_CHILDREN(rack_probertt), 968 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 969 &rack_hbp_thresh, 3, 970 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 971 /* Pacing related sysctls */ 972 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_sysctl_root), 974 OID_AUTO, 975 "pacing", 976 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 977 "Pacing related Controls"); 978 SYSCTL_ADD_S32(&rack_sysctl_ctx, 979 SYSCTL_CHILDREN(rack_pacing), 980 OID_AUTO, "max_pace_over", CTLFLAG_RW, 981 &rack_max_per_above, 30, 982 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 983 SYSCTL_ADD_S32(&rack_sysctl_ctx, 984 SYSCTL_CHILDREN(rack_pacing), 985 OID_AUTO, "pace_to_one", CTLFLAG_RW, 986 &rack_pace_one_seg, 0, 987 "Do we allow low b/w pacing of 1MSS instead of two"); 988 SYSCTL_ADD_S32(&rack_sysctl_ctx, 989 SYSCTL_CHILDREN(rack_pacing), 990 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 991 &rack_limit_time_with_srtt, 0, 992 "Do we limit pacing time based on srtt"); 993 SYSCTL_ADD_S32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_pacing), 995 OID_AUTO, "init_win", CTLFLAG_RW, 996 &rack_default_init_window, 0, 997 "Do we have a rack initial window 0 = system default"); 998 SYSCTL_ADD_U16(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_pacing), 1000 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1001 &rack_per_of_gp_ss, 250, 1002 "If non zero, what percentage of goodput to pace at in slow start"); 1003 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_pacing), 1005 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1006 &rack_per_of_gp_ca, 150, 1007 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1008 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_pacing), 1010 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1011 &rack_per_of_gp_rec, 200, 1012 "If non zero, what percentage of goodput to pace at in recovery"); 1013 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_pacing), 1015 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1016 &rack_hptsi_segments, 40, 1017 "What size is the max for TSO segments in pacing and burst mitigation"); 1018 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_pacing), 1020 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1021 &rack_slot_reduction, 4, 1022 "When doing only burst mitigation what is the reduce divisor"); 1023 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1024 SYSCTL_CHILDREN(rack_sysctl_root), 1025 OID_AUTO, "use_pacing", CTLFLAG_RW, 1026 &rack_pace_every_seg, 0, 1027 "If set we use pacing, if clear we use only the original burst mitigation"); 1028 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1029 SYSCTL_CHILDREN(rack_pacing), 1030 OID_AUTO, "rate_cap", CTLFLAG_RW, 1031 &rack_bw_rate_cap, 0, 1032 "If set we apply this value to the absolute rate cap used by pacing"); 1033 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_sysctl_root), 1035 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1036 &rack_req_measurements, 1, 1037 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1038 /* Hardware pacing */ 1039 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_sysctl_root), 1041 OID_AUTO, 1042 "hdwr_pacing", 1043 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1044 "Pacing related Controls"); 1045 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1046 SYSCTL_CHILDREN(rack_hw_pacing), 1047 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1048 &rack_hw_rwnd_factor, 2, 1049 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_hw_pacing), 1052 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1053 &rack_enobuf_hw_boost_mult, 2, 1054 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_hw_pacing), 1057 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1058 &rack_enobuf_hw_max, 2, 1059 "What is the max boost the pacing time if we see a ENOBUFS?"); 1060 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_hw_pacing), 1062 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1063 &rack_enobuf_hw_min, 2, 1064 "What is the min boost the pacing time if we see a ENOBUFS?"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_hw_pacing), 1067 OID_AUTO, "enable", CTLFLAG_RW, 1068 &rack_enable_hw_pacing, 0, 1069 "Should RACK attempt to use hw pacing?"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_hw_pacing), 1072 OID_AUTO, "rate_cap", CTLFLAG_RW, 1073 &rack_hw_rate_caps, 1, 1074 "Does the highest hardware pacing rate cap the rate we will send at??"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_hw_pacing), 1077 OID_AUTO, "rate_min", CTLFLAG_RW, 1078 &rack_hw_rate_min, 0, 1079 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_hw_pacing), 1082 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1083 &rack_hw_rate_to_low, 0, 1084 "If we fall below this rate, dis-engage hw pacing?"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_hw_pacing), 1087 OID_AUTO, "up_only", CTLFLAG_RW, 1088 &rack_hw_up_only, 1, 1089 "Do we allow hw pacing to lower the rate selected?"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_hw_pacing), 1092 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1093 &rack_hw_pace_extra_slots, 2, 1094 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1095 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_sysctl_root), 1097 OID_AUTO, 1098 "timely", 1099 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1100 "Rack Timely RTT Controls"); 1101 /* Timely based GP dynmics */ 1102 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1103 SYSCTL_CHILDREN(rack_timely), 1104 OID_AUTO, "upper", CTLFLAG_RW, 1105 &rack_gp_per_bw_mul_up, 2, 1106 "Rack timely upper range for equal b/w (in percentage)"); 1107 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1108 SYSCTL_CHILDREN(rack_timely), 1109 OID_AUTO, "lower", CTLFLAG_RW, 1110 &rack_gp_per_bw_mul_down, 4, 1111 "Rack timely lower range for equal b/w (in percentage)"); 1112 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1113 SYSCTL_CHILDREN(rack_timely), 1114 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1115 &rack_gp_rtt_maxmul, 3, 1116 "Rack timely multipler of lowest rtt for rtt_max"); 1117 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1118 SYSCTL_CHILDREN(rack_timely), 1119 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1120 &rack_gp_rtt_mindiv, 4, 1121 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1122 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1123 SYSCTL_CHILDREN(rack_timely), 1124 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1125 &rack_gp_rtt_minmul, 1, 1126 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1127 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_timely), 1129 OID_AUTO, "decrease", CTLFLAG_RW, 1130 &rack_gp_decrease_per, 20, 1131 "Rack timely decrease percentage of our GP multiplication factor"); 1132 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1133 SYSCTL_CHILDREN(rack_timely), 1134 OID_AUTO, "increase", CTLFLAG_RW, 1135 &rack_gp_increase_per, 2, 1136 "Rack timely increase perentage of our GP multiplication factor"); 1137 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1138 SYSCTL_CHILDREN(rack_timely), 1139 OID_AUTO, "lowerbound", CTLFLAG_RW, 1140 &rack_per_lower_bound, 50, 1141 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1142 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1143 SYSCTL_CHILDREN(rack_timely), 1144 OID_AUTO, "upperboundss", CTLFLAG_RW, 1145 &rack_per_upper_bound_ss, 0, 1146 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1147 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1148 SYSCTL_CHILDREN(rack_timely), 1149 OID_AUTO, "upperboundca", CTLFLAG_RW, 1150 &rack_per_upper_bound_ca, 0, 1151 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1152 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1153 SYSCTL_CHILDREN(rack_timely), 1154 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1155 &rack_do_dyn_mul, 0, 1156 "Rack timely do we enable dynmaic timely goodput by default"); 1157 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1158 SYSCTL_CHILDREN(rack_timely), 1159 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1160 &rack_gp_no_rec_chg, 1, 1161 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1162 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1163 SYSCTL_CHILDREN(rack_timely), 1164 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1165 &rack_timely_dec_clear, 6, 1166 "Rack timely what threshold do we count to before another boost during b/w decent"); 1167 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1168 SYSCTL_CHILDREN(rack_timely), 1169 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1170 &rack_timely_max_push_rise, 3, 1171 "Rack timely how many times do we push up with b/w increase"); 1172 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_timely), 1174 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1175 &rack_timely_max_push_drop, 3, 1176 "Rack timely how many times do we push back on b/w decent"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_timely), 1179 OID_AUTO, "min_segs", CTLFLAG_RW, 1180 &rack_timely_min_segs, 4, 1181 "Rack timely when setting the cwnd what is the min num segments"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_timely), 1184 OID_AUTO, "noback_max", CTLFLAG_RW, 1185 &rack_use_max_for_nobackoff, 0, 1186 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_timely), 1189 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1190 &rack_timely_int_timely_only, 0, 1191 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_timely), 1194 OID_AUTO, "nonstop", CTLFLAG_RW, 1195 &rack_timely_no_stopping, 0, 1196 "Rack timely don't stop increase"); 1197 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_timely), 1199 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1200 &rack_down_raise_thresh, 100, 1201 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1202 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1203 SYSCTL_CHILDREN(rack_timely), 1204 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1205 &rack_req_segs, 1, 1206 "Bottom dragging if not these many segments outstanding and room"); 1207 1208 /* TLP and Rack related parameters */ 1209 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1210 SYSCTL_CHILDREN(rack_sysctl_root), 1211 OID_AUTO, 1212 "tlp", 1213 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1214 "TLP and Rack related Controls"); 1215 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1216 SYSCTL_CHILDREN(rack_tlp), 1217 OID_AUTO, "use_rrr", CTLFLAG_RW, 1218 &use_rack_rr, 1, 1219 "Do we use Rack Rapid Recovery"); 1220 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1221 SYSCTL_CHILDREN(rack_tlp), 1222 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1223 &rack_max_abc_post_recovery, 2, 1224 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1225 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1226 SYSCTL_CHILDREN(rack_tlp), 1227 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1228 &rack_non_rxt_use_cr, 0, 1229 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1230 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1231 SYSCTL_CHILDREN(rack_tlp), 1232 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1233 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1234 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1235 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1236 SYSCTL_CHILDREN(rack_tlp), 1237 OID_AUTO, "limit", CTLFLAG_RW, 1238 &rack_tlp_limit, 2, 1239 "How many TLP's can be sent without sending new data"); 1240 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1241 SYSCTL_CHILDREN(rack_tlp), 1242 OID_AUTO, "use_greater", CTLFLAG_RW, 1243 &rack_tlp_use_greater, 1, 1244 "Should we use the rack_rtt time if its greater than srtt"); 1245 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1246 SYSCTL_CHILDREN(rack_tlp), 1247 OID_AUTO, "tlpminto", CTLFLAG_RW, 1248 &rack_tlp_min, 10000, 1249 "TLP minimum timeout per the specification (in microseconds)"); 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_tlp), 1252 OID_AUTO, "send_oldest", CTLFLAG_RW, 1253 &rack_always_send_oldest, 0, 1254 "Should we always send the oldest TLP and RACK-TLP"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_tlp), 1257 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1258 &rack_limited_retran, 0, 1259 "How many times can a rack timeout drive out sends"); 1260 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_tlp), 1262 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1263 &rack_lower_cwnd_at_tlp, 0, 1264 "When a TLP completes a retran should we enter recovery"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_tlp), 1267 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1268 &rack_reorder_thresh, 2, 1269 "What factor for rack will be added when seeing reordering (shift right)"); 1270 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1271 SYSCTL_CHILDREN(rack_tlp), 1272 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1273 &rack_tlp_thresh, 1, 1274 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1275 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1276 SYSCTL_CHILDREN(rack_tlp), 1277 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1278 &rack_reorder_fade, 60000000, 1279 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_tlp), 1282 OID_AUTO, "pktdelay", CTLFLAG_RW, 1283 &rack_pkt_delay, 1000, 1284 "Extra RACK time (in microseconds) besides reordering thresh"); 1285 1286 /* Timer related controls */ 1287 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_sysctl_root), 1289 OID_AUTO, 1290 "timers", 1291 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1292 "Timer related controls"); 1293 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1294 SYSCTL_CHILDREN(rack_timers), 1295 OID_AUTO, "persmin", CTLFLAG_RW, 1296 &rack_persist_min, 250000, 1297 "What is the minimum time in microseconds between persists"); 1298 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1299 SYSCTL_CHILDREN(rack_timers), 1300 OID_AUTO, "persmax", CTLFLAG_RW, 1301 &rack_persist_max, 2000000, 1302 "What is the largest delay in microseconds between persists"); 1303 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1304 SYSCTL_CHILDREN(rack_timers), 1305 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1306 &rack_delayed_ack_time, 40000, 1307 "Delayed ack time (40ms in microseconds)"); 1308 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1309 SYSCTL_CHILDREN(rack_timers), 1310 OID_AUTO, "minrto", CTLFLAG_RW, 1311 &rack_rto_min, 30000, 1312 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1313 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_timers), 1315 OID_AUTO, "maxrto", CTLFLAG_RW, 1316 &rack_rto_max, 4000000, 1317 "Maxiumum RTO in microseconds -- should be at least as large as min_rto"); 1318 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1319 SYSCTL_CHILDREN(rack_timers), 1320 OID_AUTO, "minto", CTLFLAG_RW, 1321 &rack_min_to, 1000, 1322 "Minimum rack timeout in microseconds"); 1323 /* Measure controls */ 1324 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_sysctl_root), 1326 OID_AUTO, 1327 "measure", 1328 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1329 "Measure related controls"); 1330 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1331 SYSCTL_CHILDREN(rack_measure), 1332 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1333 &rack_wma_divisor, 8, 1334 "When doing b/w calculation what is the divisor for the WMA"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_measure), 1337 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1338 &rack_cwnd_block_ends_measure, 0, 1339 "Does a cwnd just-return end the measurement window (app limited)"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_measure), 1342 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1343 &rack_rwnd_block_ends_measure, 0, 1344 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1345 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_measure), 1347 OID_AUTO, "min_target", CTLFLAG_RW, 1348 &rack_def_data_window, 20, 1349 "What is the minimum target window (in mss) for a GP measurements"); 1350 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_measure), 1352 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1353 &rack_goal_bdp, 2, 1354 "What is the goal BDP to measure"); 1355 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_measure), 1357 OID_AUTO, "min_srtts", CTLFLAG_RW, 1358 &rack_min_srtts, 1, 1359 "What is the goal BDP to measure"); 1360 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_measure), 1362 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1363 &rack_min_measure_usec, 0, 1364 "What is the Minimum time time for a measurement if 0, this is off"); 1365 /* Misc rack controls */ 1366 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1367 SYSCTL_CHILDREN(rack_sysctl_root), 1368 OID_AUTO, 1369 "misc", 1370 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1371 "Misc related controls"); 1372 #ifdef TCP_ACCOUNTING 1373 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1374 SYSCTL_CHILDREN(rack_misc), 1375 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1376 &rack_tcp_accounting, 0, 1377 "Should we turn on TCP accounting for all rack sessions?"); 1378 #endif 1379 1380 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1381 SYSCTL_CHILDREN(rack_misc), 1382 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1383 &rack_dsack_std_based, 3, 1384 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1385 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1386 SYSCTL_CHILDREN(rack_misc), 1387 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1388 &rack_prr_addbackmax, 2, 1389 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1390 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1391 SYSCTL_CHILDREN(rack_misc), 1392 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1393 &rack_stats_gets_ms_rtt, 1, 1394 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1395 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1396 SYSCTL_CHILDREN(rack_misc), 1397 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1398 &rack_client_low_buf, 0, 1399 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1400 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1401 SYSCTL_CHILDREN(rack_misc), 1402 OID_AUTO, "defprofile", CTLFLAG_RW, 1403 &rack_def_profile, 0, 1404 "Should RACK use a default profile (0=no, num == profile num)?"); 1405 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1406 SYSCTL_CHILDREN(rack_misc), 1407 OID_AUTO, "cmpack", CTLFLAG_RW, 1408 &rack_use_cmp_acks, 1, 1409 "Should RACK have LRO send compressed acks"); 1410 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1411 SYSCTL_CHILDREN(rack_misc), 1412 OID_AUTO, "fsb", CTLFLAG_RW, 1413 &rack_use_fsb, 1, 1414 "Should RACK use the fast send block?"); 1415 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1416 SYSCTL_CHILDREN(rack_misc), 1417 OID_AUTO, "rfo", CTLFLAG_RW, 1418 &rack_use_rfo, 1, 1419 "Should RACK use rack_fast_output()?"); 1420 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1421 SYSCTL_CHILDREN(rack_misc), 1422 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1423 &rack_use_rsm_rfo, 1, 1424 "Should RACK use rack_fast_rsm_output()?"); 1425 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1426 SYSCTL_CHILDREN(rack_misc), 1427 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1428 &rack_enable_shared_cwnd, 1, 1429 "Should RACK try to use the shared cwnd on connections where allowed"); 1430 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1431 SYSCTL_CHILDREN(rack_misc), 1432 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1433 &rack_limits_scwnd, 1, 1434 "Should RACK place low end time limits on the shared cwnd feature"); 1435 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1436 SYSCTL_CHILDREN(rack_misc), 1437 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1438 &rack_enable_mqueue_for_nonpaced, 0, 1439 "Should RACK use mbuf queuing for non-paced connections"); 1440 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_misc), 1442 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1443 &rack_use_imac_dack, 0, 1444 "Should RACK try to emulate iMac delayed ack"); 1445 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_misc), 1447 OID_AUTO, "no_prr", CTLFLAG_RW, 1448 &rack_disable_prr, 0, 1449 "Should RACK not use prr and only pace (must have pacing on)"); 1450 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_misc), 1452 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1453 &rack_verbose_logging, 0, 1454 "Should RACK black box logging be verbose"); 1455 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_misc), 1457 OID_AUTO, "data_after_close", CTLFLAG_RW, 1458 &rack_ignore_data_after_close, 1, 1459 "Do we hold off sending a RST until all pending data is ack'd"); 1460 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_misc), 1462 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1463 &rack_sack_not_required, 1, 1464 "Do we allow rack to run on connections not supporting SACK"); 1465 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_misc), 1467 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1468 &rack_send_a_lot_in_prr, 1, 1469 "Send a lot in prr"); 1470 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1471 SYSCTL_CHILDREN(rack_misc), 1472 OID_AUTO, "autoscale", CTLFLAG_RW, 1473 &rack_autosndbuf_inc, 20, 1474 "What percentage should rack scale up its snd buffer by?"); 1475 /* Sack Attacker detection stuff */ 1476 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1477 SYSCTL_CHILDREN(rack_attack), 1478 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1479 &rack_highest_sack_thresh_seen, 0, 1480 "Highest sack to ack ratio seen"); 1481 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1482 SYSCTL_CHILDREN(rack_attack), 1483 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1484 &rack_highest_move_thresh_seen, 0, 1485 "Highest move to non-move ratio seen"); 1486 rack_ack_total = counter_u64_alloc(M_WAITOK); 1487 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_attack), 1489 OID_AUTO, "acktotal", CTLFLAG_RD, 1490 &rack_ack_total, 1491 "Total number of Ack's"); 1492 rack_express_sack = counter_u64_alloc(M_WAITOK); 1493 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1494 SYSCTL_CHILDREN(rack_attack), 1495 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1496 &rack_express_sack, 1497 "Total expresss number of Sack's"); 1498 rack_sack_total = counter_u64_alloc(M_WAITOK); 1499 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1500 SYSCTL_CHILDREN(rack_attack), 1501 OID_AUTO, "sacktotal", CTLFLAG_RD, 1502 &rack_sack_total, 1503 "Total number of SACKs"); 1504 rack_move_none = counter_u64_alloc(M_WAITOK); 1505 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1506 SYSCTL_CHILDREN(rack_attack), 1507 OID_AUTO, "move_none", CTLFLAG_RD, 1508 &rack_move_none, 1509 "Total number of SACK index reuse of postions under threshold"); 1510 rack_move_some = counter_u64_alloc(M_WAITOK); 1511 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1512 SYSCTL_CHILDREN(rack_attack), 1513 OID_AUTO, "move_some", CTLFLAG_RD, 1514 &rack_move_some, 1515 "Total number of SACK index reuse of postions over threshold"); 1516 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1517 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1518 SYSCTL_CHILDREN(rack_attack), 1519 OID_AUTO, "attacks", CTLFLAG_RD, 1520 &rack_sack_attacks_detected, 1521 "Total number of SACK attackers that had sack disabled"); 1522 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1523 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1524 SYSCTL_CHILDREN(rack_attack), 1525 OID_AUTO, "reversed", CTLFLAG_RD, 1526 &rack_sack_attacks_reversed, 1527 "Total number of SACK attackers that were later determined false positive"); 1528 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1529 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1530 SYSCTL_CHILDREN(rack_attack), 1531 OID_AUTO, "nextmerge", CTLFLAG_RD, 1532 &rack_sack_used_next_merge, 1533 "Total number of times we used the next merge"); 1534 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1535 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1536 SYSCTL_CHILDREN(rack_attack), 1537 OID_AUTO, "prevmerge", CTLFLAG_RD, 1538 &rack_sack_used_prev_merge, 1539 "Total number of times we used the prev merge"); 1540 /* Counters */ 1541 rack_fto_send = counter_u64_alloc(M_WAITOK); 1542 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1543 SYSCTL_CHILDREN(rack_counters), 1544 OID_AUTO, "fto_send", CTLFLAG_RD, 1545 &rack_fto_send, "Total number of rack_fast_output sends"); 1546 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1547 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1548 SYSCTL_CHILDREN(rack_counters), 1549 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1550 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1551 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1552 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1553 SYSCTL_CHILDREN(rack_counters), 1554 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1555 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1556 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1557 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1558 SYSCTL_CHILDREN(rack_counters), 1559 OID_AUTO, "nfto_send", CTLFLAG_RD, 1560 &rack_non_fto_send, "Total number of rack_output first sends"); 1561 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1562 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1563 SYSCTL_CHILDREN(rack_counters), 1564 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1565 &rack_extended_rfo, "Total number of times we extended rfo"); 1566 1567 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1568 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1569 SYSCTL_CHILDREN(rack_counters), 1570 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1571 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1572 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1573 1574 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1575 SYSCTL_CHILDREN(rack_counters), 1576 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1577 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1578 rack_badfr = counter_u64_alloc(M_WAITOK); 1579 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1580 SYSCTL_CHILDREN(rack_counters), 1581 OID_AUTO, "badfr", CTLFLAG_RD, 1582 &rack_badfr, "Total number of bad FRs"); 1583 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1585 SYSCTL_CHILDREN(rack_counters), 1586 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 1587 &rack_badfr_bytes, "Total number of bad FRs"); 1588 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 1589 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1590 SYSCTL_CHILDREN(rack_counters), 1591 OID_AUTO, "prrsndret", CTLFLAG_RD, 1592 &rack_rtm_prr_retran, 1593 "Total number of prr based retransmits"); 1594 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1596 SYSCTL_CHILDREN(rack_counters), 1597 OID_AUTO, "prrsndnew", CTLFLAG_RD, 1598 &rack_rtm_prr_newdata, 1599 "Total number of prr based new transmits"); 1600 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 1601 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_counters), 1603 OID_AUTO, "tsnf", CTLFLAG_RD, 1604 &rack_timestamp_mismatch, 1605 "Total number of timestamps that we could not find the reported ts"); 1606 rack_find_high = counter_u64_alloc(M_WAITOK); 1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1608 SYSCTL_CHILDREN(rack_counters), 1609 OID_AUTO, "findhigh", CTLFLAG_RD, 1610 &rack_find_high, 1611 "Total number of FIN causing find-high"); 1612 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1614 SYSCTL_CHILDREN(rack_counters), 1615 OID_AUTO, "reordering", CTLFLAG_RD, 1616 &rack_reorder_seen, 1617 "Total number of times we added delay due to reordering"); 1618 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1620 SYSCTL_CHILDREN(rack_counters), 1621 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1622 &rack_tlp_tot, 1623 "Total number of tail loss probe expirations"); 1624 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_counters), 1627 OID_AUTO, "tlp_new", CTLFLAG_RD, 1628 &rack_tlp_newdata, 1629 "Total number of tail loss probe sending new data"); 1630 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1632 SYSCTL_CHILDREN(rack_counters), 1633 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1634 &rack_tlp_retran, 1635 "Total number of tail loss probe sending retransmitted data"); 1636 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1638 SYSCTL_CHILDREN(rack_counters), 1639 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1640 &rack_tlp_retran_bytes, 1641 "Total bytes of tail loss probe sending retransmitted data"); 1642 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1644 SYSCTL_CHILDREN(rack_counters), 1645 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 1646 &rack_tlp_retran_fail, 1647 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 1648 rack_to_tot = counter_u64_alloc(M_WAITOK); 1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_counters), 1651 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1652 &rack_to_tot, 1653 "Total number of times the rack to expired"); 1654 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_counters), 1657 OID_AUTO, "arm_rack", CTLFLAG_RD, 1658 &rack_to_arm_rack, 1659 "Total number of times the rack timer armed"); 1660 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1662 SYSCTL_CHILDREN(rack_counters), 1663 OID_AUTO, "arm_tlp", CTLFLAG_RD, 1664 &rack_to_arm_tlp, 1665 "Total number of times the tlp timer armed"); 1666 rack_calc_zero = counter_u64_alloc(M_WAITOK); 1667 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 1668 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1669 SYSCTL_CHILDREN(rack_counters), 1670 OID_AUTO, "calc_zero", CTLFLAG_RD, 1671 &rack_calc_zero, 1672 "Total number of times pacing time worked out to zero"); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_counters), 1675 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 1676 &rack_calc_nonzero, 1677 "Total number of times pacing time worked out to non-zero"); 1678 rack_paced_segments = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "paced", CTLFLAG_RD, 1682 &rack_paced_segments, 1683 "Total number of times a segment send caused hptsi"); 1684 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "unpaced", CTLFLAG_RD, 1688 &rack_unpaced_segments, 1689 "Total number of times a segment did not cause hptsi"); 1690 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1692 SYSCTL_CHILDREN(rack_counters), 1693 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1694 &rack_saw_enobuf, 1695 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1696 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1698 SYSCTL_CHILDREN(rack_counters), 1699 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1700 &rack_saw_enobuf_hw, 1701 "Total number of times a send returned enobuf for hdwr paced connections"); 1702 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1704 SYSCTL_CHILDREN(rack_counters), 1705 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1706 &rack_saw_enetunreach, 1707 "Total number of times a send received a enetunreachable"); 1708 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1710 SYSCTL_CHILDREN(rack_counters), 1711 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1712 &rack_hot_alloc, 1713 "Total allocations from the top of our list"); 1714 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1716 SYSCTL_CHILDREN(rack_counters), 1717 OID_AUTO, "allocs", CTLFLAG_RD, 1718 &rack_to_alloc, 1719 "Total allocations of tracking structures"); 1720 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1722 SYSCTL_CHILDREN(rack_counters), 1723 OID_AUTO, "allochard", CTLFLAG_RD, 1724 &rack_to_alloc_hard, 1725 "Total allocations done with sleeping the hard way"); 1726 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1727 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1728 SYSCTL_CHILDREN(rack_counters), 1729 OID_AUTO, "allocemerg", CTLFLAG_RD, 1730 &rack_to_alloc_emerg, 1731 "Total allocations done from emergency cache"); 1732 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1733 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1734 SYSCTL_CHILDREN(rack_counters), 1735 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1736 &rack_to_alloc_limited, 1737 "Total allocations dropped due to limit"); 1738 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1739 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1740 SYSCTL_CHILDREN(rack_counters), 1741 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1742 &rack_alloc_limited_conns, 1743 "Connections with allocations dropped due to limit"); 1744 rack_split_limited = counter_u64_alloc(M_WAITOK); 1745 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1746 SYSCTL_CHILDREN(rack_counters), 1747 OID_AUTO, "split_limited", CTLFLAG_RD, 1748 &rack_split_limited, 1749 "Split allocations dropped due to limit"); 1750 1751 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 1752 char name[32]; 1753 sprintf(name, "cmp_ack_cnt_%d", i); 1754 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_counters), 1757 OID_AUTO, name, CTLFLAG_RD, 1758 &rack_proc_comp_ack[i], 1759 "Number of compressed acks we processed"); 1760 } 1761 rack_large_ackcmp = counter_u64_alloc(M_WAITOK); 1762 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1763 SYSCTL_CHILDREN(rack_counters), 1764 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD, 1765 &rack_large_ackcmp, 1766 "Number of TCP connections with large mbuf's for compressed acks"); 1767 rack_small_ackcmp = counter_u64_alloc(M_WAITOK); 1768 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1769 SYSCTL_CHILDREN(rack_counters), 1770 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD, 1771 &rack_small_ackcmp, 1772 "Number of TCP connections with small mbuf's for compressed acks"); 1773 #ifdef INVARIANTS 1774 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1775 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1776 SYSCTL_CHILDREN(rack_counters), 1777 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1778 &rack_adjust_map_bw, 1779 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1780 #endif 1781 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1783 SYSCTL_CHILDREN(rack_counters), 1784 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1785 &rack_multi_single_eq, 1786 "Number of compressed acks total represented"); 1787 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1788 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1789 SYSCTL_CHILDREN(rack_counters), 1790 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1791 &rack_proc_non_comp_ack, 1792 "Number of non compresseds acks that we processed"); 1793 1794 1795 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1796 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1797 SYSCTL_CHILDREN(rack_counters), 1798 OID_AUTO, "sack_long", CTLFLAG_RD, 1799 &rack_sack_proc_all, 1800 "Total times we had to walk whole list for sack processing"); 1801 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1802 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1803 SYSCTL_CHILDREN(rack_counters), 1804 OID_AUTO, "sack_restart", CTLFLAG_RD, 1805 &rack_sack_proc_restart, 1806 "Total times we had to walk whole list due to a restart"); 1807 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1809 SYSCTL_CHILDREN(rack_counters), 1810 OID_AUTO, "sack_short", CTLFLAG_RD, 1811 &rack_sack_proc_short, 1812 "Total times we took shortcut for sack processing"); 1813 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 1814 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1815 SYSCTL_CHILDREN(rack_counters), 1816 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 1817 &rack_enter_tlp_calc, 1818 "Total times we called calc-tlp"); 1819 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 1820 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1821 SYSCTL_CHILDREN(rack_counters), 1822 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 1823 &rack_used_tlpmethod, 1824 "Total number of runt sacks"); 1825 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 1826 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1827 SYSCTL_CHILDREN(rack_counters), 1828 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 1829 &rack_used_tlpmethod2, 1830 "Total number of times we hit TLP method 2"); 1831 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1832 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1833 SYSCTL_CHILDREN(rack_attack), 1834 OID_AUTO, "skipacked", CTLFLAG_RD, 1835 &rack_sack_skipped_acked, 1836 "Total number of times we skipped previously sacked"); 1837 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1838 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1839 SYSCTL_CHILDREN(rack_attack), 1840 OID_AUTO, "ofsplit", CTLFLAG_RD, 1841 &rack_sack_splits, 1842 "Total number of times we did the old fashion tree split"); 1843 rack_progress_drops = counter_u64_alloc(M_WAITOK); 1844 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1845 SYSCTL_CHILDREN(rack_counters), 1846 OID_AUTO, "prog_drops", CTLFLAG_RD, 1847 &rack_progress_drops, 1848 "Total number of progress drops"); 1849 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1850 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1851 SYSCTL_CHILDREN(rack_counters), 1852 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1853 &rack_input_idle_reduces, 1854 "Total number of idle reductions on input"); 1855 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1856 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1857 SYSCTL_CHILDREN(rack_counters), 1858 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1859 &rack_collapsed_win, 1860 "Total number of collapsed windows"); 1861 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 1862 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1863 SYSCTL_CHILDREN(rack_counters), 1864 OID_AUTO, "tlp_nada", CTLFLAG_RD, 1865 &rack_tlp_does_nada, 1866 "Total number of nada tlp calls"); 1867 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1868 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1869 SYSCTL_CHILDREN(rack_counters), 1870 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1871 &rack_try_scwnd, 1872 "Total number of scwnd attempts"); 1873 1874 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1875 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1876 SYSCTL_CHILDREN(rack_counters), 1877 OID_AUTO, "timer_hole", CTLFLAG_RD, 1878 &rack_per_timer_hole, 1879 "Total persists start in timer hole"); 1880 1881 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK); 1882 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1883 SYSCTL_CHILDREN(rack_counters), 1884 OID_AUTO, "sndptr_wrong", CTLFLAG_RD, 1885 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret"); 1886 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK); 1887 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1888 SYSCTL_CHILDREN(rack_counters), 1889 OID_AUTO, "sndptr_right", CTLFLAG_RD, 1890 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret"); 1891 1892 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1893 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1894 OID_AUTO, "outsize", CTLFLAG_RD, 1895 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1896 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1897 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1898 OID_AUTO, "opts", CTLFLAG_RD, 1899 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1900 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1901 SYSCTL_CHILDREN(rack_sysctl_root), 1902 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1903 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1904 } 1905 1906 static __inline int 1907 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1908 { 1909 if (SEQ_GEQ(b->r_start, a->r_start) && 1910 SEQ_LT(b->r_start, a->r_end)) { 1911 /* 1912 * The entry b is within the 1913 * block a. i.e.: 1914 * a -- |-------------| 1915 * b -- |----| 1916 * <or> 1917 * b -- |------| 1918 * <or> 1919 * b -- |-----------| 1920 */ 1921 return (0); 1922 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1923 /* 1924 * b falls as either the next 1925 * sequence block after a so a 1926 * is said to be smaller than b. 1927 * i.e: 1928 * a -- |------| 1929 * b -- |--------| 1930 * or 1931 * b -- |-----| 1932 */ 1933 return (1); 1934 } 1935 /* 1936 * Whats left is where a is 1937 * larger than b. i.e: 1938 * a -- |-------| 1939 * b -- |---| 1940 * or even possibly 1941 * b -- |--------------| 1942 */ 1943 return (-1); 1944 } 1945 1946 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1947 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1948 1949 static uint32_t 1950 rc_init_window(struct tcp_rack *rack) 1951 { 1952 uint32_t win; 1953 1954 if (rack->rc_init_win == 0) { 1955 /* 1956 * Nothing set by the user, use the system stack 1957 * default. 1958 */ 1959 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1960 } 1961 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1962 return (win); 1963 } 1964 1965 static uint64_t 1966 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1967 { 1968 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1969 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1970 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1971 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1972 else 1973 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1974 } 1975 1976 static uint64_t 1977 rack_get_bw(struct tcp_rack *rack) 1978 { 1979 if (rack->use_fixed_rate) { 1980 /* Return the fixed pacing rate */ 1981 return (rack_get_fixed_pacing_bw(rack)); 1982 } 1983 if (rack->r_ctl.gp_bw == 0) { 1984 /* 1985 * We have yet no b/w measurement, 1986 * if we have a user set initial bw 1987 * return it. If we don't have that and 1988 * we have an srtt, use the tcp IW (10) to 1989 * calculate a fictional b/w over the SRTT 1990 * which is more or less a guess. Note 1991 * we don't use our IW from rack on purpose 1992 * so if we have like IW=30, we are not 1993 * calculating a "huge" b/w. 1994 */ 1995 uint64_t bw, srtt; 1996 if (rack->r_ctl.init_rate) 1997 return (rack->r_ctl.init_rate); 1998 1999 /* Has the user set a max peak rate? */ 2000 #ifdef NETFLIX_PEAKRATE 2001 if (rack->rc_tp->t_maxpeakrate) 2002 return (rack->rc_tp->t_maxpeakrate); 2003 #endif 2004 /* Ok lets come up with the IW guess, if we have a srtt */ 2005 if (rack->rc_tp->t_srtt == 0) { 2006 /* 2007 * Go with old pacing method 2008 * i.e. burst mitigation only. 2009 */ 2010 return (0); 2011 } 2012 /* Ok lets get the initial TCP win (not racks) */ 2013 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2014 srtt = (uint64_t)rack->rc_tp->t_srtt; 2015 bw *= (uint64_t)USECS_IN_SECOND; 2016 bw /= srtt; 2017 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2018 bw = rack->r_ctl.bw_rate_cap; 2019 return (bw); 2020 } else { 2021 uint64_t bw; 2022 2023 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2024 /* Averaging is done, we can return the value */ 2025 bw = rack->r_ctl.gp_bw; 2026 } else { 2027 /* Still doing initial average must calculate */ 2028 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 2029 } 2030 #ifdef NETFLIX_PEAKRATE 2031 if ((rack->rc_tp->t_maxpeakrate) && 2032 (bw > rack->rc_tp->t_maxpeakrate)) { 2033 /* The user has set a peak rate to pace at 2034 * don't allow us to pace faster than that. 2035 */ 2036 return (rack->rc_tp->t_maxpeakrate); 2037 } 2038 #endif 2039 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2040 bw = rack->r_ctl.bw_rate_cap; 2041 return (bw); 2042 } 2043 } 2044 2045 static uint16_t 2046 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2047 { 2048 if (rack->use_fixed_rate) { 2049 return (100); 2050 } else if (rack->in_probe_rtt && (rsm == NULL)) 2051 return (rack->r_ctl.rack_per_of_gp_probertt); 2052 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2053 rack->r_ctl.rack_per_of_gp_rec)) { 2054 if (rsm) { 2055 /* a retransmission always use the recovery rate */ 2056 return (rack->r_ctl.rack_per_of_gp_rec); 2057 } else if (rack->rack_rec_nonrxt_use_cr) { 2058 /* Directed to use the configured rate */ 2059 goto configured_rate; 2060 } else if (rack->rack_no_prr && 2061 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2062 /* No PRR, lets just use the b/w estimate only */ 2063 return (100); 2064 } else { 2065 /* 2066 * Here we may have a non-retransmit but we 2067 * have no overrides, so just use the recovery 2068 * rate (prr is in effect). 2069 */ 2070 return (rack->r_ctl.rack_per_of_gp_rec); 2071 } 2072 } 2073 configured_rate: 2074 /* For the configured rate we look at our cwnd vs the ssthresh */ 2075 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2076 return (rack->r_ctl.rack_per_of_gp_ss); 2077 else 2078 return (rack->r_ctl.rack_per_of_gp_ca); 2079 } 2080 2081 static void 2082 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 2083 { 2084 /* 2085 * Types of logs (mod value) 2086 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 2087 * 2 = a dsack round begins, persist is reset to 16. 2088 * 3 = a dsack round ends 2089 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 2090 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 2091 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 2092 */ 2093 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2094 union tcp_log_stackspecific log; 2095 struct timeval tv; 2096 2097 memset(&log, 0, sizeof(log)); 2098 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2099 log.u_bbr.flex1 <<= 1; 2100 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2101 log.u_bbr.flex1 <<= 1; 2102 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2103 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2104 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2105 log.u_bbr.flex4 = flex4; 2106 log.u_bbr.flex5 = flex5; 2107 log.u_bbr.flex6 = flex6; 2108 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2109 log.u_bbr.flex8 = mod; 2110 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2111 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2112 &rack->rc_inp->inp_socket->so_rcv, 2113 &rack->rc_inp->inp_socket->so_snd, 2114 RACK_DSACK_HANDLING, 0, 2115 0, &log, false, &tv); 2116 } 2117 } 2118 2119 static void 2120 rack_log_hdwr_pacing(struct tcp_rack *rack, 2121 uint64_t rate, uint64_t hw_rate, int line, 2122 int error, uint16_t mod) 2123 { 2124 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2125 union tcp_log_stackspecific log; 2126 struct timeval tv; 2127 const struct ifnet *ifp; 2128 2129 memset(&log, 0, sizeof(log)); 2130 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2131 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2132 if (rack->r_ctl.crte) { 2133 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2134 } else if (rack->rc_inp->inp_route.ro_nh && 2135 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2136 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2137 } else 2138 ifp = NULL; 2139 if (ifp) { 2140 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2141 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2142 } 2143 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2144 log.u_bbr.bw_inuse = rate; 2145 log.u_bbr.flex5 = line; 2146 log.u_bbr.flex6 = error; 2147 log.u_bbr.flex7 = mod; 2148 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2149 log.u_bbr.flex8 = rack->use_fixed_rate; 2150 log.u_bbr.flex8 <<= 1; 2151 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2152 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2153 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2154 if (rack->r_ctl.crte) 2155 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2156 else 2157 log.u_bbr.cur_del_rate = 0; 2158 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2159 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2160 &rack->rc_inp->inp_socket->so_rcv, 2161 &rack->rc_inp->inp_socket->so_snd, 2162 BBR_LOG_HDWR_PACE, 0, 2163 0, &log, false, &tv); 2164 } 2165 } 2166 2167 static uint64_t 2168 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2169 { 2170 /* 2171 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2172 */ 2173 uint64_t bw_est, high_rate; 2174 uint64_t gain; 2175 2176 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2177 bw_est = bw * gain; 2178 bw_est /= (uint64_t)100; 2179 /* Never fall below the minimum (def 64kbps) */ 2180 if (bw_est < RACK_MIN_BW) 2181 bw_est = RACK_MIN_BW; 2182 if (rack->r_rack_hw_rate_caps) { 2183 /* Rate caps are in place */ 2184 if (rack->r_ctl.crte != NULL) { 2185 /* We have a hdwr rate already */ 2186 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2187 if (bw_est >= high_rate) { 2188 /* We are capping bw at the highest rate table entry */ 2189 rack_log_hdwr_pacing(rack, 2190 bw_est, high_rate, __LINE__, 2191 0, 3); 2192 bw_est = high_rate; 2193 if (capped) 2194 *capped = 1; 2195 } 2196 } else if ((rack->rack_hdrw_pacing == 0) && 2197 (rack->rack_hdw_pace_ena) && 2198 (rack->rack_attempt_hdwr_pace == 0) && 2199 (rack->rc_inp->inp_route.ro_nh != NULL) && 2200 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2201 /* 2202 * Special case, we have not yet attempted hardware 2203 * pacing, and yet we may, when we do, find out if we are 2204 * above the highest rate. We need to know the maxbw for the interface 2205 * in question (if it supports ratelimiting). We get back 2206 * a 0, if the interface is not found in the RL lists. 2207 */ 2208 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2209 if (high_rate) { 2210 /* Yep, we have a rate is it above this rate? */ 2211 if (bw_est > high_rate) { 2212 bw_est = high_rate; 2213 if (capped) 2214 *capped = 1; 2215 } 2216 } 2217 } 2218 } 2219 return (bw_est); 2220 } 2221 2222 static void 2223 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2224 { 2225 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2226 union tcp_log_stackspecific log; 2227 struct timeval tv; 2228 2229 if ((mod != 1) && (rack_verbose_logging == 0)) { 2230 /* 2231 * We get 3 values currently for mod 2232 * 1 - We are retransmitting and this tells the reason. 2233 * 2 - We are clearing a dup-ack count. 2234 * 3 - We are incrementing a dup-ack count. 2235 * 2236 * The clear/increment are only logged 2237 * if you have BBverbose on. 2238 */ 2239 return; 2240 } 2241 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2242 log.u_bbr.flex1 = tsused; 2243 log.u_bbr.flex2 = thresh; 2244 log.u_bbr.flex3 = rsm->r_flags; 2245 log.u_bbr.flex4 = rsm->r_dupack; 2246 log.u_bbr.flex5 = rsm->r_start; 2247 log.u_bbr.flex6 = rsm->r_end; 2248 log.u_bbr.flex8 = mod; 2249 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2250 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2251 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2252 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2253 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2254 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2255 log.u_bbr.pacing_gain = rack->r_must_retran; 2256 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2257 &rack->rc_inp->inp_socket->so_rcv, 2258 &rack->rc_inp->inp_socket->so_snd, 2259 BBR_LOG_SETTINGS_CHG, 0, 2260 0, &log, false, &tv); 2261 } 2262 } 2263 2264 static void 2265 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2266 { 2267 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2268 union tcp_log_stackspecific log; 2269 struct timeval tv; 2270 2271 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2272 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2273 log.u_bbr.flex2 = to; 2274 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2275 log.u_bbr.flex4 = slot; 2276 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2277 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2278 log.u_bbr.flex7 = rack->rc_in_persist; 2279 log.u_bbr.flex8 = which; 2280 if (rack->rack_no_prr) 2281 log.u_bbr.pkts_out = 0; 2282 else 2283 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2284 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2285 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2286 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2287 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2288 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2289 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2290 log.u_bbr.pacing_gain = rack->r_must_retran; 2291 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2292 log.u_bbr.lost = rack_rto_min; 2293 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2294 &rack->rc_inp->inp_socket->so_rcv, 2295 &rack->rc_inp->inp_socket->so_snd, 2296 BBR_LOG_TIMERSTAR, 0, 2297 0, &log, false, &tv); 2298 } 2299 } 2300 2301 static void 2302 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2303 { 2304 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2305 union tcp_log_stackspecific log; 2306 struct timeval tv; 2307 2308 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2309 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2310 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2311 log.u_bbr.flex8 = to_num; 2312 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2313 log.u_bbr.flex2 = rack->rc_rack_rtt; 2314 if (rsm == NULL) 2315 log.u_bbr.flex3 = 0; 2316 else 2317 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2318 if (rack->rack_no_prr) 2319 log.u_bbr.flex5 = 0; 2320 else 2321 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2322 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2323 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2324 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2325 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2326 log.u_bbr.pacing_gain = rack->r_must_retran; 2327 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2328 &rack->rc_inp->inp_socket->so_rcv, 2329 &rack->rc_inp->inp_socket->so_snd, 2330 BBR_LOG_RTO, 0, 2331 0, &log, false, &tv); 2332 } 2333 } 2334 2335 static void 2336 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2337 struct rack_sendmap *prev, 2338 struct rack_sendmap *rsm, 2339 struct rack_sendmap *next, 2340 int flag, uint32_t th_ack, int line) 2341 { 2342 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2343 union tcp_log_stackspecific log; 2344 struct timeval tv; 2345 2346 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2347 log.u_bbr.flex8 = flag; 2348 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2349 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2350 log.u_bbr.cur_del_rate = (uint64_t)prev; 2351 log.u_bbr.delRate = (uint64_t)rsm; 2352 log.u_bbr.rttProp = (uint64_t)next; 2353 log.u_bbr.flex7 = 0; 2354 if (prev) { 2355 log.u_bbr.flex1 = prev->r_start; 2356 log.u_bbr.flex2 = prev->r_end; 2357 log.u_bbr.flex7 |= 0x4; 2358 } 2359 if (rsm) { 2360 log.u_bbr.flex3 = rsm->r_start; 2361 log.u_bbr.flex4 = rsm->r_end; 2362 log.u_bbr.flex7 |= 0x2; 2363 } 2364 if (next) { 2365 log.u_bbr.flex5 = next->r_start; 2366 log.u_bbr.flex6 = next->r_end; 2367 log.u_bbr.flex7 |= 0x1; 2368 } 2369 log.u_bbr.applimited = line; 2370 log.u_bbr.pkts_out = th_ack; 2371 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2372 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2373 if (rack->rack_no_prr) 2374 log.u_bbr.lost = 0; 2375 else 2376 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2377 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2378 &rack->rc_inp->inp_socket->so_rcv, 2379 &rack->rc_inp->inp_socket->so_snd, 2380 TCP_LOG_MAPCHG, 0, 2381 0, &log, false, &tv); 2382 } 2383 } 2384 2385 static void 2386 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2387 struct rack_sendmap *rsm, int conf) 2388 { 2389 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2390 union tcp_log_stackspecific log; 2391 struct timeval tv; 2392 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2393 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2394 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2395 log.u_bbr.flex1 = t; 2396 log.u_bbr.flex2 = len; 2397 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2398 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2399 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2400 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2401 log.u_bbr.flex7 = conf; 2402 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2403 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2404 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2405 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2406 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2407 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2408 if (rsm) { 2409 log.u_bbr.pkt_epoch = rsm->r_start; 2410 log.u_bbr.lost = rsm->r_end; 2411 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2412 log.u_bbr.pacing_gain = rsm->r_flags; 2413 } else { 2414 /* Its a SYN */ 2415 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2416 log.u_bbr.lost = 0; 2417 log.u_bbr.cwnd_gain = 0; 2418 log.u_bbr.pacing_gain = 0; 2419 } 2420 /* Write out general bits of interest rrs here */ 2421 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2422 log.u_bbr.use_lt_bw <<= 1; 2423 log.u_bbr.use_lt_bw |= rack->forced_ack; 2424 log.u_bbr.use_lt_bw <<= 1; 2425 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2426 log.u_bbr.use_lt_bw <<= 1; 2427 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2428 log.u_bbr.use_lt_bw <<= 1; 2429 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2430 log.u_bbr.use_lt_bw <<= 1; 2431 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2432 log.u_bbr.use_lt_bw <<= 1; 2433 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2434 log.u_bbr.use_lt_bw <<= 1; 2435 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2436 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2437 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2438 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2439 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2440 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2441 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2442 log.u_bbr.bw_inuse <<= 32; 2443 if (rsm) 2444 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2445 TCP_LOG_EVENTP(tp, NULL, 2446 &rack->rc_inp->inp_socket->so_rcv, 2447 &rack->rc_inp->inp_socket->so_snd, 2448 BBR_LOG_BBRRTT, 0, 2449 0, &log, false, &tv); 2450 2451 2452 } 2453 } 2454 2455 static void 2456 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2457 { 2458 /* 2459 * Log the rtt sample we are 2460 * applying to the srtt algorithm in 2461 * useconds. 2462 */ 2463 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2464 union tcp_log_stackspecific log; 2465 struct timeval tv; 2466 2467 /* Convert our ms to a microsecond */ 2468 memset(&log, 0, sizeof(log)); 2469 log.u_bbr.flex1 = rtt; 2470 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2471 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2472 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2473 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2474 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2475 log.u_bbr.flex7 = 1; 2476 log.u_bbr.flex8 = rack->sack_attack_disable; 2477 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2478 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2479 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2480 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2481 log.u_bbr.pacing_gain = rack->r_must_retran; 2482 /* 2483 * We capture in delRate the upper 32 bits as 2484 * the confidence level we had declared, and the 2485 * lower 32 bits as the actual RTT using the arrival 2486 * timestamp. 2487 */ 2488 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2489 log.u_bbr.delRate <<= 32; 2490 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2491 /* Lets capture all the things that make up t_rtxcur */ 2492 log.u_bbr.applimited = rack_rto_min; 2493 log.u_bbr.epoch = rack_rto_max; 2494 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2495 log.u_bbr.lost = rack_rto_min; 2496 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2497 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2498 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2499 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2500 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2501 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2502 &rack->rc_inp->inp_socket->so_rcv, 2503 &rack->rc_inp->inp_socket->so_snd, 2504 TCP_LOG_RTT, 0, 2505 0, &log, false, &tv); 2506 } 2507 } 2508 2509 static void 2510 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2511 { 2512 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2513 union tcp_log_stackspecific log; 2514 struct timeval tv; 2515 2516 /* Convert our ms to a microsecond */ 2517 memset(&log, 0, sizeof(log)); 2518 log.u_bbr.flex1 = rtt; 2519 log.u_bbr.flex2 = send_time; 2520 log.u_bbr.flex3 = ack_time; 2521 log.u_bbr.flex4 = where; 2522 log.u_bbr.flex7 = 2; 2523 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2524 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2525 &rack->rc_inp->inp_socket->so_rcv, 2526 &rack->rc_inp->inp_socket->so_snd, 2527 TCP_LOG_RTT, 0, 2528 0, &log, false, &tv); 2529 } 2530 } 2531 2532 2533 2534 static inline void 2535 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2536 { 2537 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2538 union tcp_log_stackspecific log; 2539 struct timeval tv; 2540 2541 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2542 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2543 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2544 log.u_bbr.flex1 = line; 2545 log.u_bbr.flex2 = tick; 2546 log.u_bbr.flex3 = tp->t_maxunacktime; 2547 log.u_bbr.flex4 = tp->t_acktime; 2548 log.u_bbr.flex8 = event; 2549 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2550 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2551 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2552 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2553 log.u_bbr.pacing_gain = rack->r_must_retran; 2554 TCP_LOG_EVENTP(tp, NULL, 2555 &rack->rc_inp->inp_socket->so_rcv, 2556 &rack->rc_inp->inp_socket->so_snd, 2557 BBR_LOG_PROGRESS, 0, 2558 0, &log, false, &tv); 2559 } 2560 } 2561 2562 static void 2563 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2564 { 2565 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2566 union tcp_log_stackspecific log; 2567 2568 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2569 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2570 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2571 log.u_bbr.flex1 = slot; 2572 if (rack->rack_no_prr) 2573 log.u_bbr.flex2 = 0; 2574 else 2575 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2576 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2577 log.u_bbr.flex8 = rack->rc_in_persist; 2578 log.u_bbr.timeStamp = cts; 2579 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2580 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2581 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2582 log.u_bbr.pacing_gain = rack->r_must_retran; 2583 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2584 &rack->rc_inp->inp_socket->so_rcv, 2585 &rack->rc_inp->inp_socket->so_snd, 2586 BBR_LOG_BBRSND, 0, 2587 0, &log, false, tv); 2588 } 2589 } 2590 2591 static void 2592 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2593 { 2594 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2595 union tcp_log_stackspecific log; 2596 struct timeval tv; 2597 2598 memset(&log, 0, sizeof(log)); 2599 log.u_bbr.flex1 = did_out; 2600 log.u_bbr.flex2 = nxt_pkt; 2601 log.u_bbr.flex3 = way_out; 2602 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2603 if (rack->rack_no_prr) 2604 log.u_bbr.flex5 = 0; 2605 else 2606 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2607 log.u_bbr.flex6 = nsegs; 2608 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2609 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2610 log.u_bbr.flex7 <<= 1; 2611 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2612 log.u_bbr.flex7 <<= 1; 2613 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2614 log.u_bbr.flex8 = rack->rc_in_persist; 2615 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2616 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2617 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2618 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2619 log.u_bbr.use_lt_bw <<= 1; 2620 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2621 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2622 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2623 log.u_bbr.pacing_gain = rack->r_must_retran; 2624 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2625 &rack->rc_inp->inp_socket->so_rcv, 2626 &rack->rc_inp->inp_socket->so_snd, 2627 BBR_LOG_DOSEG_DONE, 0, 2628 0, &log, false, &tv); 2629 } 2630 } 2631 2632 static void 2633 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2634 { 2635 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2636 union tcp_log_stackspecific log; 2637 struct timeval tv; 2638 uint32_t cts; 2639 2640 memset(&log, 0, sizeof(log)); 2641 cts = tcp_get_usecs(&tv); 2642 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2643 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2644 log.u_bbr.flex4 = arg1; 2645 log.u_bbr.flex5 = arg2; 2646 log.u_bbr.flex6 = arg3; 2647 log.u_bbr.flex8 = frm; 2648 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2649 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2650 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2651 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2652 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2653 log.u_bbr.pacing_gain = rack->r_must_retran; 2654 TCP_LOG_EVENTP(tp, NULL, 2655 &tp->t_inpcb->inp_socket->so_rcv, 2656 &tp->t_inpcb->inp_socket->so_snd, 2657 TCP_HDWR_PACE_SIZE, 0, 2658 0, &log, false, &tv); 2659 } 2660 } 2661 2662 static void 2663 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2664 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2665 { 2666 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2667 union tcp_log_stackspecific log; 2668 struct timeval tv; 2669 2670 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2671 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2672 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2673 log.u_bbr.flex1 = slot; 2674 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2675 log.u_bbr.flex4 = reason; 2676 if (rack->rack_no_prr) 2677 log.u_bbr.flex5 = 0; 2678 else 2679 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2680 log.u_bbr.flex7 = hpts_calling; 2681 log.u_bbr.flex8 = rack->rc_in_persist; 2682 log.u_bbr.lt_epoch = cwnd_to_use; 2683 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2684 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2685 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2686 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2687 log.u_bbr.pacing_gain = rack->r_must_retran; 2688 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2689 &rack->rc_inp->inp_socket->so_rcv, 2690 &rack->rc_inp->inp_socket->so_snd, 2691 BBR_LOG_JUSTRET, 0, 2692 tlen, &log, false, &tv); 2693 } 2694 } 2695 2696 static void 2697 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2698 struct timeval *tv, uint32_t flags_on_entry) 2699 { 2700 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2701 union tcp_log_stackspecific log; 2702 2703 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2704 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2705 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2706 log.u_bbr.flex1 = line; 2707 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2708 log.u_bbr.flex3 = flags_on_entry; 2709 log.u_bbr.flex4 = us_cts; 2710 if (rack->rack_no_prr) 2711 log.u_bbr.flex5 = 0; 2712 else 2713 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2714 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2715 log.u_bbr.flex7 = hpts_removed; 2716 log.u_bbr.flex8 = 1; 2717 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2718 log.u_bbr.timeStamp = us_cts; 2719 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2720 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2721 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2722 log.u_bbr.pacing_gain = rack->r_must_retran; 2723 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2724 &rack->rc_inp->inp_socket->so_rcv, 2725 &rack->rc_inp->inp_socket->so_snd, 2726 BBR_LOG_TIMERCANC, 0, 2727 0, &log, false, tv); 2728 } 2729 } 2730 2731 static void 2732 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2733 uint32_t flex1, uint32_t flex2, 2734 uint32_t flex3, uint32_t flex4, 2735 uint32_t flex5, uint32_t flex6, 2736 uint16_t flex7, uint8_t mod) 2737 { 2738 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2739 union tcp_log_stackspecific log; 2740 struct timeval tv; 2741 2742 if (mod == 1) { 2743 /* No you can't use 1, its for the real to cancel */ 2744 return; 2745 } 2746 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2747 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2748 log.u_bbr.flex1 = flex1; 2749 log.u_bbr.flex2 = flex2; 2750 log.u_bbr.flex3 = flex3; 2751 log.u_bbr.flex4 = flex4; 2752 log.u_bbr.flex5 = flex5; 2753 log.u_bbr.flex6 = flex6; 2754 log.u_bbr.flex7 = flex7; 2755 log.u_bbr.flex8 = mod; 2756 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2757 &rack->rc_inp->inp_socket->so_rcv, 2758 &rack->rc_inp->inp_socket->so_snd, 2759 BBR_LOG_TIMERCANC, 0, 2760 0, &log, false, &tv); 2761 } 2762 } 2763 2764 static void 2765 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2766 { 2767 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2768 union tcp_log_stackspecific log; 2769 struct timeval tv; 2770 2771 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2772 log.u_bbr.flex1 = timers; 2773 log.u_bbr.flex2 = ret; 2774 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2775 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2776 log.u_bbr.flex5 = cts; 2777 if (rack->rack_no_prr) 2778 log.u_bbr.flex6 = 0; 2779 else 2780 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2781 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2782 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2783 log.u_bbr.pacing_gain = rack->r_must_retran; 2784 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2785 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2786 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2787 &rack->rc_inp->inp_socket->so_rcv, 2788 &rack->rc_inp->inp_socket->so_snd, 2789 BBR_LOG_TO_PROCESS, 0, 2790 0, &log, false, &tv); 2791 } 2792 } 2793 2794 static void 2795 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd) 2796 { 2797 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2798 union tcp_log_stackspecific log; 2799 struct timeval tv; 2800 2801 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2802 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2803 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2804 if (rack->rack_no_prr) 2805 log.u_bbr.flex3 = 0; 2806 else 2807 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2808 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2809 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2810 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2811 log.u_bbr.flex8 = frm; 2812 log.u_bbr.pkts_out = orig_cwnd; 2813 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2814 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2815 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2816 log.u_bbr.use_lt_bw <<= 1; 2817 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2818 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2819 &rack->rc_inp->inp_socket->so_rcv, 2820 &rack->rc_inp->inp_socket->so_snd, 2821 BBR_LOG_BBRUPD, 0, 2822 0, &log, false, &tv); 2823 } 2824 } 2825 2826 #ifdef NETFLIX_EXP_DETECTION 2827 static void 2828 rack_log_sad(struct tcp_rack *rack, int event) 2829 { 2830 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2831 union tcp_log_stackspecific log; 2832 struct timeval tv; 2833 2834 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2835 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2836 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2837 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2838 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2839 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2840 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2841 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2842 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2843 log.u_bbr.lt_epoch |= rack->do_detection; 2844 log.u_bbr.applimited = tcp_map_minimum; 2845 log.u_bbr.flex7 = rack->sack_attack_disable; 2846 log.u_bbr.flex8 = event; 2847 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2848 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2849 log.u_bbr.delivered = tcp_sad_decay_val; 2850 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2851 &rack->rc_inp->inp_socket->so_rcv, 2852 &rack->rc_inp->inp_socket->so_snd, 2853 TCP_SAD_DETECTION, 0, 2854 0, &log, false, &tv); 2855 } 2856 } 2857 #endif 2858 2859 static void 2860 rack_counter_destroy(void) 2861 { 2862 int i; 2863 2864 counter_u64_free(rack_fto_send); 2865 counter_u64_free(rack_fto_rsm_send); 2866 counter_u64_free(rack_nfto_resend); 2867 counter_u64_free(rack_hw_pace_init_fail); 2868 counter_u64_free(rack_hw_pace_lost); 2869 counter_u64_free(rack_non_fto_send); 2870 counter_u64_free(rack_extended_rfo); 2871 counter_u64_free(rack_ack_total); 2872 counter_u64_free(rack_express_sack); 2873 counter_u64_free(rack_sack_total); 2874 counter_u64_free(rack_move_none); 2875 counter_u64_free(rack_move_some); 2876 counter_u64_free(rack_sack_attacks_detected); 2877 counter_u64_free(rack_sack_attacks_reversed); 2878 counter_u64_free(rack_sack_used_next_merge); 2879 counter_u64_free(rack_sack_used_prev_merge); 2880 counter_u64_free(rack_badfr); 2881 counter_u64_free(rack_badfr_bytes); 2882 counter_u64_free(rack_rtm_prr_retran); 2883 counter_u64_free(rack_rtm_prr_newdata); 2884 counter_u64_free(rack_timestamp_mismatch); 2885 counter_u64_free(rack_find_high); 2886 counter_u64_free(rack_reorder_seen); 2887 counter_u64_free(rack_tlp_tot); 2888 counter_u64_free(rack_tlp_newdata); 2889 counter_u64_free(rack_tlp_retran); 2890 counter_u64_free(rack_tlp_retran_bytes); 2891 counter_u64_free(rack_tlp_retran_fail); 2892 counter_u64_free(rack_to_tot); 2893 counter_u64_free(rack_to_arm_rack); 2894 counter_u64_free(rack_to_arm_tlp); 2895 counter_u64_free(rack_calc_zero); 2896 counter_u64_free(rack_calc_nonzero); 2897 counter_u64_free(rack_paced_segments); 2898 counter_u64_free(rack_unpaced_segments); 2899 counter_u64_free(rack_saw_enobuf); 2900 counter_u64_free(rack_saw_enobuf_hw); 2901 counter_u64_free(rack_saw_enetunreach); 2902 counter_u64_free(rack_hot_alloc); 2903 counter_u64_free(rack_to_alloc); 2904 counter_u64_free(rack_to_alloc_hard); 2905 counter_u64_free(rack_to_alloc_emerg); 2906 counter_u64_free(rack_to_alloc_limited); 2907 counter_u64_free(rack_alloc_limited_conns); 2908 counter_u64_free(rack_split_limited); 2909 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 2910 counter_u64_free(rack_proc_comp_ack[i]); 2911 } 2912 counter_u64_free(rack_multi_single_eq); 2913 counter_u64_free(rack_proc_non_comp_ack); 2914 counter_u64_free(rack_sack_proc_all); 2915 counter_u64_free(rack_sack_proc_restart); 2916 counter_u64_free(rack_sack_proc_short); 2917 counter_u64_free(rack_enter_tlp_calc); 2918 counter_u64_free(rack_used_tlpmethod); 2919 counter_u64_free(rack_used_tlpmethod2); 2920 counter_u64_free(rack_sack_skipped_acked); 2921 counter_u64_free(rack_sack_splits); 2922 counter_u64_free(rack_progress_drops); 2923 counter_u64_free(rack_input_idle_reduces); 2924 counter_u64_free(rack_collapsed_win); 2925 counter_u64_free(rack_tlp_does_nada); 2926 counter_u64_free(rack_try_scwnd); 2927 counter_u64_free(rack_per_timer_hole); 2928 counter_u64_free(rack_large_ackcmp); 2929 counter_u64_free(rack_small_ackcmp); 2930 #ifdef INVARIANTS 2931 counter_u64_free(rack_adjust_map_bw); 2932 #endif 2933 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2934 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2935 } 2936 2937 static struct rack_sendmap * 2938 rack_alloc(struct tcp_rack *rack) 2939 { 2940 struct rack_sendmap *rsm; 2941 2942 /* 2943 * First get the top of the list it in 2944 * theory is the "hottest" rsm we have, 2945 * possibly just freed by ack processing. 2946 */ 2947 if (rack->rc_free_cnt > rack_free_cache) { 2948 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2949 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2950 counter_u64_add(rack_hot_alloc, 1); 2951 rack->rc_free_cnt--; 2952 return (rsm); 2953 } 2954 /* 2955 * Once we get under our free cache we probably 2956 * no longer have a "hot" one available. Lets 2957 * get one from UMA. 2958 */ 2959 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2960 if (rsm) { 2961 rack->r_ctl.rc_num_maps_alloced++; 2962 counter_u64_add(rack_to_alloc, 1); 2963 return (rsm); 2964 } 2965 /* 2966 * Dig in to our aux rsm's (the last two) since 2967 * UMA failed to get us one. 2968 */ 2969 if (rack->rc_free_cnt) { 2970 counter_u64_add(rack_to_alloc_emerg, 1); 2971 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2972 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2973 rack->rc_free_cnt--; 2974 return (rsm); 2975 } 2976 return (NULL); 2977 } 2978 2979 static struct rack_sendmap * 2980 rack_alloc_full_limit(struct tcp_rack *rack) 2981 { 2982 if ((V_tcp_map_entries_limit > 0) && 2983 (rack->do_detection == 0) && 2984 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2985 counter_u64_add(rack_to_alloc_limited, 1); 2986 if (!rack->alloc_limit_reported) { 2987 rack->alloc_limit_reported = 1; 2988 counter_u64_add(rack_alloc_limited_conns, 1); 2989 } 2990 return (NULL); 2991 } 2992 return (rack_alloc(rack)); 2993 } 2994 2995 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2996 static struct rack_sendmap * 2997 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2998 { 2999 struct rack_sendmap *rsm; 3000 3001 if (limit_type) { 3002 /* currently there is only one limit type */ 3003 if (V_tcp_map_split_limit > 0 && 3004 (rack->do_detection == 0) && 3005 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 3006 counter_u64_add(rack_split_limited, 1); 3007 if (!rack->alloc_limit_reported) { 3008 rack->alloc_limit_reported = 1; 3009 counter_u64_add(rack_alloc_limited_conns, 1); 3010 } 3011 return (NULL); 3012 } 3013 } 3014 3015 /* allocate and mark in the limit type, if set */ 3016 rsm = rack_alloc(rack); 3017 if (rsm != NULL && limit_type) { 3018 rsm->r_limit_type = limit_type; 3019 rack->r_ctl.rc_num_split_allocs++; 3020 } 3021 return (rsm); 3022 } 3023 3024 static void 3025 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 3026 { 3027 if (rsm->r_flags & RACK_APP_LIMITED) { 3028 if (rack->r_ctl.rc_app_limited_cnt > 0) { 3029 rack->r_ctl.rc_app_limited_cnt--; 3030 } 3031 } 3032 if (rsm->r_limit_type) { 3033 /* currently there is only one limit type */ 3034 rack->r_ctl.rc_num_split_allocs--; 3035 } 3036 if (rsm == rack->r_ctl.rc_first_appl) { 3037 if (rack->r_ctl.rc_app_limited_cnt == 0) 3038 rack->r_ctl.rc_first_appl = NULL; 3039 else { 3040 /* Follow the next one out */ 3041 struct rack_sendmap fe; 3042 3043 fe.r_start = rsm->r_nseq_appl; 3044 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3045 } 3046 } 3047 if (rsm == rack->r_ctl.rc_resend) 3048 rack->r_ctl.rc_resend = NULL; 3049 if (rsm == rack->r_ctl.rc_rsm_at_retran) 3050 rack->r_ctl.rc_rsm_at_retran = NULL; 3051 if (rsm == rack->r_ctl.rc_end_appl) 3052 rack->r_ctl.rc_end_appl = NULL; 3053 if (rack->r_ctl.rc_tlpsend == rsm) 3054 rack->r_ctl.rc_tlpsend = NULL; 3055 if (rack->r_ctl.rc_sacklast == rsm) 3056 rack->r_ctl.rc_sacklast = NULL; 3057 memset(rsm, 0, sizeof(struct rack_sendmap)); 3058 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3059 rack->rc_free_cnt++; 3060 } 3061 3062 static void 3063 rack_free_trim(struct tcp_rack *rack) 3064 { 3065 struct rack_sendmap *rsm; 3066 3067 /* 3068 * Free up all the tail entries until 3069 * we get our list down to the limit. 3070 */ 3071 while (rack->rc_free_cnt > rack_free_cache) { 3072 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3073 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3074 rack->rc_free_cnt--; 3075 uma_zfree(rack_zone, rsm); 3076 } 3077 } 3078 3079 3080 static uint32_t 3081 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3082 { 3083 uint64_t srtt, bw, len, tim; 3084 uint32_t segsiz, def_len, minl; 3085 3086 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3087 def_len = rack_def_data_window * segsiz; 3088 if (rack->rc_gp_filled == 0) { 3089 /* 3090 * We have no measurement (IW is in flight?) so 3091 * we can only guess using our data_window sysctl 3092 * value (usually 20MSS). 3093 */ 3094 return (def_len); 3095 } 3096 /* 3097 * Now we have a number of factors to consider. 3098 * 3099 * 1) We have a desired BDP which is usually 3100 * at least 2. 3101 * 2) We have a minimum number of rtt's usually 1 SRTT 3102 * but we allow it too to be more. 3103 * 3) We want to make sure a measurement last N useconds (if 3104 * we have set rack_min_measure_usec. 3105 * 3106 * We handle the first concern here by trying to create a data 3107 * window of max(rack_def_data_window, DesiredBDP). The 3108 * second concern we handle in not letting the measurement 3109 * window end normally until at least the required SRTT's 3110 * have gone by which is done further below in 3111 * rack_enough_for_measurement(). Finally the third concern 3112 * we also handle here by calculating how long that time 3113 * would take at the current BW and then return the 3114 * max of our first calculation and that length. Note 3115 * that if rack_min_measure_usec is 0, we don't deal 3116 * with concern 3. Also for both Concern 1 and 3 an 3117 * application limited period could end the measurement 3118 * earlier. 3119 * 3120 * So lets calculate the BDP with the "known" b/w using 3121 * the SRTT has our rtt and then multiply it by the 3122 * goal. 3123 */ 3124 bw = rack_get_bw(rack); 3125 srtt = (uint64_t)tp->t_srtt; 3126 len = bw * srtt; 3127 len /= (uint64_t)HPTS_USEC_IN_SEC; 3128 len *= max(1, rack_goal_bdp); 3129 /* Now we need to round up to the nearest MSS */ 3130 len = roundup(len, segsiz); 3131 if (rack_min_measure_usec) { 3132 /* Now calculate our min length for this b/w */ 3133 tim = rack_min_measure_usec; 3134 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3135 if (minl == 0) 3136 minl = 1; 3137 minl = roundup(minl, segsiz); 3138 if (len < minl) 3139 len = minl; 3140 } 3141 /* 3142 * Now if we have a very small window we want 3143 * to attempt to get the window that is 3144 * as small as possible. This happens on 3145 * low b/w connections and we don't want to 3146 * span huge numbers of rtt's between measurements. 3147 * 3148 * We basically include 2 over our "MIN window" so 3149 * that the measurement can be shortened (possibly) by 3150 * an ack'ed packet. 3151 */ 3152 if (len < def_len) 3153 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3154 else 3155 return (max((uint32_t)len, def_len)); 3156 3157 } 3158 3159 static int 3160 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3161 { 3162 uint32_t tim, srtts, segsiz; 3163 3164 /* 3165 * Has enough time passed for the GP measurement to be valid? 3166 */ 3167 if ((tp->snd_max == tp->snd_una) || 3168 (th_ack == tp->snd_max)){ 3169 /* All is acked */ 3170 *quality = RACK_QUALITY_ALLACKED; 3171 return (1); 3172 } 3173 if (SEQ_LT(th_ack, tp->gput_seq)) { 3174 /* Not enough bytes yet */ 3175 return (0); 3176 } 3177 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3178 if (SEQ_LT(th_ack, tp->gput_ack) && 3179 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3180 /* Not enough bytes yet */ 3181 return (0); 3182 } 3183 if (rack->r_ctl.rc_first_appl && 3184 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3185 /* 3186 * We are up to the app limited send point 3187 * we have to measure irrespective of the time.. 3188 */ 3189 *quality = RACK_QUALITY_APPLIMITED; 3190 return (1); 3191 } 3192 /* Now what about time? */ 3193 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3194 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3195 if (tim >= srtts) { 3196 *quality = RACK_QUALITY_HIGH; 3197 return (1); 3198 } 3199 /* Nope not even a full SRTT has passed */ 3200 return (0); 3201 } 3202 3203 static void 3204 rack_log_timely(struct tcp_rack *rack, 3205 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3206 uint64_t up_bnd, int line, uint8_t method) 3207 { 3208 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3209 union tcp_log_stackspecific log; 3210 struct timeval tv; 3211 3212 memset(&log, 0, sizeof(log)); 3213 log.u_bbr.flex1 = logged; 3214 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3215 log.u_bbr.flex2 <<= 4; 3216 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3217 log.u_bbr.flex2 <<= 4; 3218 log.u_bbr.flex2 |= rack->rc_gp_incr; 3219 log.u_bbr.flex2 <<= 4; 3220 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3221 log.u_bbr.flex3 = rack->rc_gp_incr; 3222 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3223 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3224 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3225 log.u_bbr.flex7 = rack->rc_gp_bwred; 3226 log.u_bbr.flex8 = method; 3227 log.u_bbr.cur_del_rate = cur_bw; 3228 log.u_bbr.delRate = low_bnd; 3229 log.u_bbr.bw_inuse = up_bnd; 3230 log.u_bbr.rttProp = rack_get_bw(rack); 3231 log.u_bbr.pkt_epoch = line; 3232 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3233 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3234 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3235 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3236 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3237 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3238 log.u_bbr.cwnd_gain <<= 1; 3239 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3240 log.u_bbr.cwnd_gain <<= 1; 3241 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3242 log.u_bbr.cwnd_gain <<= 1; 3243 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3244 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3245 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3246 &rack->rc_inp->inp_socket->so_rcv, 3247 &rack->rc_inp->inp_socket->so_snd, 3248 TCP_TIMELY_WORK, 0, 3249 0, &log, false, &tv); 3250 } 3251 } 3252 3253 static int 3254 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3255 { 3256 /* 3257 * Before we increase we need to know if 3258 * the estimate just made was less than 3259 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3260 * 3261 * If we already are pacing at a fast enough 3262 * rate to push us faster there is no sense of 3263 * increasing. 3264 * 3265 * We first caculate our actual pacing rate (ss or ca multipler 3266 * times our cur_bw). 3267 * 3268 * Then we take the last measured rate and multipy by our 3269 * maximum pacing overage to give us a max allowable rate. 3270 * 3271 * If our act_rate is smaller than our max_allowable rate 3272 * then we should increase. Else we should hold steady. 3273 * 3274 */ 3275 uint64_t act_rate, max_allow_rate; 3276 3277 if (rack_timely_no_stopping) 3278 return (1); 3279 3280 if ((cur_bw == 0) || (last_bw_est == 0)) { 3281 /* 3282 * Initial startup case or 3283 * everything is acked case. 3284 */ 3285 rack_log_timely(rack, mult, cur_bw, 0, 0, 3286 __LINE__, 9); 3287 return (1); 3288 } 3289 if (mult <= 100) { 3290 /* 3291 * We can always pace at or slightly above our rate. 3292 */ 3293 rack_log_timely(rack, mult, cur_bw, 0, 0, 3294 __LINE__, 9); 3295 return (1); 3296 } 3297 act_rate = cur_bw * (uint64_t)mult; 3298 act_rate /= 100; 3299 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3300 max_allow_rate /= 100; 3301 if (act_rate < max_allow_rate) { 3302 /* 3303 * Here the rate we are actually pacing at 3304 * is smaller than 10% above our last measurement. 3305 * This means we are pacing below what we would 3306 * like to try to achieve (plus some wiggle room). 3307 */ 3308 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3309 __LINE__, 9); 3310 return (1); 3311 } else { 3312 /* 3313 * Here we are already pacing at least rack_max_per_above(10%) 3314 * what we are getting back. This indicates most likely 3315 * that we are being limited (cwnd/rwnd/app) and can't 3316 * get any more b/w. There is no sense of trying to 3317 * raise up the pacing rate its not speeding us up 3318 * and we already are pacing faster than we are getting. 3319 */ 3320 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3321 __LINE__, 8); 3322 return (0); 3323 } 3324 } 3325 3326 static void 3327 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3328 { 3329 /* 3330 * When we drag bottom, we want to assure 3331 * that no multiplier is below 1.0, if so 3332 * we want to restore it to at least that. 3333 */ 3334 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3335 /* This is unlikely we usually do not touch recovery */ 3336 rack->r_ctl.rack_per_of_gp_rec = 100; 3337 } 3338 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3339 rack->r_ctl.rack_per_of_gp_ca = 100; 3340 } 3341 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3342 rack->r_ctl.rack_per_of_gp_ss = 100; 3343 } 3344 } 3345 3346 static void 3347 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3348 { 3349 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3350 rack->r_ctl.rack_per_of_gp_ca = 100; 3351 } 3352 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3353 rack->r_ctl.rack_per_of_gp_ss = 100; 3354 } 3355 } 3356 3357 static void 3358 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3359 { 3360 int32_t calc, logged, plus; 3361 3362 logged = 0; 3363 3364 if (override) { 3365 /* 3366 * override is passed when we are 3367 * loosing b/w and making one last 3368 * gasp at trying to not loose out 3369 * to a new-reno flow. 3370 */ 3371 goto extra_boost; 3372 } 3373 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3374 if (rack->rc_gp_incr && 3375 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3376 /* 3377 * Reset and get 5 strokes more before the boost. Note 3378 * that the count is 0 based so we have to add one. 3379 */ 3380 extra_boost: 3381 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3382 rack->rc_gp_timely_inc_cnt = 0; 3383 } else 3384 plus = (uint32_t)rack_gp_increase_per; 3385 /* Must be at least 1% increase for true timely increases */ 3386 if ((plus < 1) && 3387 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3388 plus = 1; 3389 if (rack->rc_gp_saw_rec && 3390 (rack->rc_gp_no_rec_chg == 0) && 3391 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3392 rack->r_ctl.rack_per_of_gp_rec)) { 3393 /* We have been in recovery ding it too */ 3394 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3395 if (calc > 0xffff) 3396 calc = 0xffff; 3397 logged |= 1; 3398 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3399 if (rack_per_upper_bound_ss && 3400 (rack->rc_dragged_bottom == 0) && 3401 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3402 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3403 } 3404 if (rack->rc_gp_saw_ca && 3405 (rack->rc_gp_saw_ss == 0) && 3406 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3407 rack->r_ctl.rack_per_of_gp_ca)) { 3408 /* In CA */ 3409 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3410 if (calc > 0xffff) 3411 calc = 0xffff; 3412 logged |= 2; 3413 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3414 if (rack_per_upper_bound_ca && 3415 (rack->rc_dragged_bottom == 0) && 3416 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3417 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3418 } 3419 if (rack->rc_gp_saw_ss && 3420 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3421 rack->r_ctl.rack_per_of_gp_ss)) { 3422 /* In SS */ 3423 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3424 if (calc > 0xffff) 3425 calc = 0xffff; 3426 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3427 if (rack_per_upper_bound_ss && 3428 (rack->rc_dragged_bottom == 0) && 3429 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3430 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3431 logged |= 4; 3432 } 3433 if (logged && 3434 (rack->rc_gp_incr == 0)){ 3435 /* Go into increment mode */ 3436 rack->rc_gp_incr = 1; 3437 rack->rc_gp_timely_inc_cnt = 0; 3438 } 3439 if (rack->rc_gp_incr && 3440 logged && 3441 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3442 rack->rc_gp_timely_inc_cnt++; 3443 } 3444 rack_log_timely(rack, logged, plus, 0, 0, 3445 __LINE__, 1); 3446 } 3447 3448 static uint32_t 3449 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3450 { 3451 /* 3452 * norm_grad = rtt_diff / minrtt; 3453 * new_per = curper * (1 - B * norm_grad) 3454 * 3455 * B = rack_gp_decrease_per (default 10%) 3456 * rtt_dif = input var current rtt-diff 3457 * curper = input var current percentage 3458 * minrtt = from rack filter 3459 * 3460 */ 3461 uint64_t perf; 3462 3463 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3464 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3465 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3466 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3467 (uint64_t)1000000)) / 3468 (uint64_t)1000000); 3469 if (perf > curper) { 3470 /* TSNH */ 3471 perf = curper - 1; 3472 } 3473 return ((uint32_t)perf); 3474 } 3475 3476 static uint32_t 3477 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3478 { 3479 /* 3480 * highrttthresh 3481 * result = curper * (1 - (B * ( 1 - ------ )) 3482 * gp_srtt 3483 * 3484 * B = rack_gp_decrease_per (default 10%) 3485 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3486 */ 3487 uint64_t perf; 3488 uint32_t highrttthresh; 3489 3490 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3491 3492 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3493 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3494 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3495 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3496 return (perf); 3497 } 3498 3499 static void 3500 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3501 { 3502 uint64_t logvar, logvar2, logvar3; 3503 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3504 3505 if (rack->rc_gp_incr) { 3506 /* Turn off increment counting */ 3507 rack->rc_gp_incr = 0; 3508 rack->rc_gp_timely_inc_cnt = 0; 3509 } 3510 ss_red = ca_red = rec_red = 0; 3511 logged = 0; 3512 /* Calculate the reduction value */ 3513 if (rtt_diff < 0) { 3514 rtt_diff *= -1; 3515 } 3516 /* Must be at least 1% reduction */ 3517 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3518 /* We have been in recovery ding it too */ 3519 if (timely_says == 2) { 3520 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3521 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3522 if (alt < new_per) 3523 val = alt; 3524 else 3525 val = new_per; 3526 } else 3527 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3528 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3529 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3530 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3531 } else { 3532 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3533 rec_red = 0; 3534 } 3535 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3536 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3537 logged |= 1; 3538 } 3539 if (rack->rc_gp_saw_ss) { 3540 /* Sent in SS */ 3541 if (timely_says == 2) { 3542 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3543 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3544 if (alt < new_per) 3545 val = alt; 3546 else 3547 val = new_per; 3548 } else 3549 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3550 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3551 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3552 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3553 } else { 3554 ss_red = new_per; 3555 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3556 logvar = new_per; 3557 logvar <<= 32; 3558 logvar |= alt; 3559 logvar2 = (uint32_t)rtt; 3560 logvar2 <<= 32; 3561 logvar2 |= (uint32_t)rtt_diff; 3562 logvar3 = rack_gp_rtt_maxmul; 3563 logvar3 <<= 32; 3564 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3565 rack_log_timely(rack, timely_says, 3566 logvar2, logvar3, 3567 logvar, __LINE__, 10); 3568 } 3569 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3570 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3571 logged |= 4; 3572 } else if (rack->rc_gp_saw_ca) { 3573 /* Sent in CA */ 3574 if (timely_says == 2) { 3575 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3576 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3577 if (alt < new_per) 3578 val = alt; 3579 else 3580 val = new_per; 3581 } else 3582 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3583 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3584 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3585 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3586 } else { 3587 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3588 ca_red = 0; 3589 logvar = new_per; 3590 logvar <<= 32; 3591 logvar |= alt; 3592 logvar2 = (uint32_t)rtt; 3593 logvar2 <<= 32; 3594 logvar2 |= (uint32_t)rtt_diff; 3595 logvar3 = rack_gp_rtt_maxmul; 3596 logvar3 <<= 32; 3597 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3598 rack_log_timely(rack, timely_says, 3599 logvar2, logvar3, 3600 logvar, __LINE__, 10); 3601 } 3602 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3603 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3604 logged |= 2; 3605 } 3606 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3607 rack->rc_gp_timely_dec_cnt++; 3608 if (rack_timely_dec_clear && 3609 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3610 rack->rc_gp_timely_dec_cnt = 0; 3611 } 3612 logvar = ss_red; 3613 logvar <<= 32; 3614 logvar |= ca_red; 3615 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3616 __LINE__, 2); 3617 } 3618 3619 static void 3620 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3621 uint32_t rtt, uint32_t line, uint8_t reas) 3622 { 3623 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3624 union tcp_log_stackspecific log; 3625 struct timeval tv; 3626 3627 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3628 log.u_bbr.flex1 = line; 3629 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3630 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3631 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3632 log.u_bbr.flex5 = rtt; 3633 log.u_bbr.flex6 = rack->rc_highly_buffered; 3634 log.u_bbr.flex6 <<= 1; 3635 log.u_bbr.flex6 |= rack->forced_ack; 3636 log.u_bbr.flex6 <<= 1; 3637 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3638 log.u_bbr.flex6 <<= 1; 3639 log.u_bbr.flex6 |= rack->in_probe_rtt; 3640 log.u_bbr.flex6 <<= 1; 3641 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3642 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3643 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3644 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3645 log.u_bbr.flex8 = reas; 3646 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3647 log.u_bbr.delRate = rack_get_bw(rack); 3648 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3649 log.u_bbr.cur_del_rate <<= 32; 3650 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3651 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3652 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3653 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3654 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3655 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3656 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3657 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3658 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3659 log.u_bbr.rttProp = us_cts; 3660 log.u_bbr.rttProp <<= 32; 3661 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3662 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3663 &rack->rc_inp->inp_socket->so_rcv, 3664 &rack->rc_inp->inp_socket->so_snd, 3665 BBR_LOG_RTT_SHRINKS, 0, 3666 0, &log, false, &rack->r_ctl.act_rcv_time); 3667 } 3668 } 3669 3670 static void 3671 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3672 { 3673 uint64_t bwdp; 3674 3675 bwdp = rack_get_bw(rack); 3676 bwdp *= (uint64_t)rtt; 3677 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3678 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3679 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3680 /* 3681 * A window protocol must be able to have 4 packets 3682 * outstanding as the floor in order to function 3683 * (especially considering delayed ack :D). 3684 */ 3685 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3686 } 3687 } 3688 3689 static void 3690 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3691 { 3692 /** 3693 * ProbeRTT is a bit different in rack_pacing than in 3694 * BBR. It is like BBR in that it uses the lowering of 3695 * the RTT as a signal that we saw something new and 3696 * counts from there for how long between. But it is 3697 * different in that its quite simple. It does not 3698 * play with the cwnd and wait until we get down 3699 * to N segments outstanding and hold that for 3700 * 200ms. Instead it just sets the pacing reduction 3701 * rate to a set percentage (70 by default) and hold 3702 * that for a number of recent GP Srtt's. 3703 */ 3704 uint32_t segsiz; 3705 3706 if (rack->rc_gp_dyn_mul == 0) 3707 return; 3708 3709 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3710 /* We are idle */ 3711 return; 3712 } 3713 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3714 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3715 /* 3716 * Stop the goodput now, the idea here is 3717 * that future measurements with in_probe_rtt 3718 * won't register if they are not greater so 3719 * we want to get what info (if any) is available 3720 * now. 3721 */ 3722 rack_do_goodput_measurement(rack->rc_tp, rack, 3723 rack->rc_tp->snd_una, __LINE__, 3724 RACK_QUALITY_PROBERTT); 3725 } 3726 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3727 rack->r_ctl.rc_time_probertt_entered = us_cts; 3728 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3729 rack->r_ctl.rc_pace_min_segs); 3730 rack->in_probe_rtt = 1; 3731 rack->measure_saw_probe_rtt = 1; 3732 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3733 rack->r_ctl.rc_time_probertt_starts = 0; 3734 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3735 if (rack_probertt_use_min_rtt_entry) 3736 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3737 else 3738 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3739 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3740 __LINE__, RACK_RTTS_ENTERPROBE); 3741 } 3742 3743 static void 3744 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3745 { 3746 struct rack_sendmap *rsm; 3747 uint32_t segsiz; 3748 3749 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3750 rack->r_ctl.rc_pace_min_segs); 3751 rack->in_probe_rtt = 0; 3752 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3753 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3754 /* 3755 * Stop the goodput now, the idea here is 3756 * that future measurements with in_probe_rtt 3757 * won't register if they are not greater so 3758 * we want to get what info (if any) is available 3759 * now. 3760 */ 3761 rack_do_goodput_measurement(rack->rc_tp, rack, 3762 rack->rc_tp->snd_una, __LINE__, 3763 RACK_QUALITY_PROBERTT); 3764 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3765 /* 3766 * We don't have enough data to make a measurement. 3767 * So lets just stop and start here after exiting 3768 * probe-rtt. We probably are not interested in 3769 * the results anyway. 3770 */ 3771 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3772 } 3773 /* 3774 * Measurements through the current snd_max are going 3775 * to be limited by the slower pacing rate. 3776 * 3777 * We need to mark these as app-limited so we 3778 * don't collapse the b/w. 3779 */ 3780 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3781 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3782 if (rack->r_ctl.rc_app_limited_cnt == 0) 3783 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3784 else { 3785 /* 3786 * Go out to the end app limited and mark 3787 * this new one as next and move the end_appl up 3788 * to this guy. 3789 */ 3790 if (rack->r_ctl.rc_end_appl) 3791 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3792 rack->r_ctl.rc_end_appl = rsm; 3793 } 3794 rsm->r_flags |= RACK_APP_LIMITED; 3795 rack->r_ctl.rc_app_limited_cnt++; 3796 } 3797 /* 3798 * Now, we need to examine our pacing rate multipliers. 3799 * If its under 100%, we need to kick it back up to 3800 * 100%. We also don't let it be over our "max" above 3801 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3802 * Note setting clamp_atexit_prtt to 0 has the effect 3803 * of setting CA/SS to 100% always at exit (which is 3804 * the default behavior). 3805 */ 3806 if (rack_probertt_clear_is) { 3807 rack->rc_gp_incr = 0; 3808 rack->rc_gp_bwred = 0; 3809 rack->rc_gp_timely_inc_cnt = 0; 3810 rack->rc_gp_timely_dec_cnt = 0; 3811 } 3812 /* Do we do any clamping at exit? */ 3813 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3814 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3815 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3816 } 3817 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3818 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3819 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3820 } 3821 /* 3822 * Lets set rtt_diff to 0, so that we will get a "boost" 3823 * after exiting. 3824 */ 3825 rack->r_ctl.rc_rtt_diff = 0; 3826 3827 /* Clear all flags so we start fresh */ 3828 rack->rc_tp->t_bytes_acked = 0; 3829 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3830 /* 3831 * If configured to, set the cwnd and ssthresh to 3832 * our targets. 3833 */ 3834 if (rack_probe_rtt_sets_cwnd) { 3835 uint64_t ebdp; 3836 uint32_t setto; 3837 3838 /* Set ssthresh so we get into CA once we hit our target */ 3839 if (rack_probertt_use_min_rtt_exit == 1) { 3840 /* Set to min rtt */ 3841 rack_set_prtt_target(rack, segsiz, 3842 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3843 } else if (rack_probertt_use_min_rtt_exit == 2) { 3844 /* Set to current gp rtt */ 3845 rack_set_prtt_target(rack, segsiz, 3846 rack->r_ctl.rc_gp_srtt); 3847 } else if (rack_probertt_use_min_rtt_exit == 3) { 3848 /* Set to entry gp rtt */ 3849 rack_set_prtt_target(rack, segsiz, 3850 rack->r_ctl.rc_entry_gp_rtt); 3851 } else { 3852 uint64_t sum; 3853 uint32_t setval; 3854 3855 sum = rack->r_ctl.rc_entry_gp_rtt; 3856 sum *= 10; 3857 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3858 if (sum >= 20) { 3859 /* 3860 * A highly buffered path needs 3861 * cwnd space for timely to work. 3862 * Lets set things up as if 3863 * we are heading back here again. 3864 */ 3865 setval = rack->r_ctl.rc_entry_gp_rtt; 3866 } else if (sum >= 15) { 3867 /* 3868 * Lets take the smaller of the 3869 * two since we are just somewhat 3870 * buffered. 3871 */ 3872 setval = rack->r_ctl.rc_gp_srtt; 3873 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3874 setval = rack->r_ctl.rc_entry_gp_rtt; 3875 } else { 3876 /* 3877 * Here we are not highly buffered 3878 * and should pick the min we can to 3879 * keep from causing loss. 3880 */ 3881 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3882 } 3883 rack_set_prtt_target(rack, segsiz, 3884 setval); 3885 } 3886 if (rack_probe_rtt_sets_cwnd > 1) { 3887 /* There is a percentage here to boost */ 3888 ebdp = rack->r_ctl.rc_target_probertt_flight; 3889 ebdp *= rack_probe_rtt_sets_cwnd; 3890 ebdp /= 100; 3891 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3892 } else 3893 setto = rack->r_ctl.rc_target_probertt_flight; 3894 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3895 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3896 /* Enforce a min */ 3897 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3898 } 3899 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3900 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3901 } 3902 rack_log_rtt_shrinks(rack, us_cts, 3903 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3904 __LINE__, RACK_RTTS_EXITPROBE); 3905 /* Clear times last so log has all the info */ 3906 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3907 rack->r_ctl.rc_time_probertt_entered = us_cts; 3908 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3909 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3910 } 3911 3912 static void 3913 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3914 { 3915 /* Check in on probe-rtt */ 3916 if (rack->rc_gp_filled == 0) { 3917 /* We do not do p-rtt unless we have gp measurements */ 3918 return; 3919 } 3920 if (rack->in_probe_rtt) { 3921 uint64_t no_overflow; 3922 uint32_t endtime, must_stay; 3923 3924 if (rack->r_ctl.rc_went_idle_time && 3925 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3926 /* 3927 * We went idle during prtt, just exit now. 3928 */ 3929 rack_exit_probertt(rack, us_cts); 3930 } else if (rack_probe_rtt_safety_val && 3931 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3932 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3933 /* 3934 * Probe RTT safety value triggered! 3935 */ 3936 rack_log_rtt_shrinks(rack, us_cts, 3937 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3938 __LINE__, RACK_RTTS_SAFETY); 3939 rack_exit_probertt(rack, us_cts); 3940 } 3941 /* Calculate the max we will wait */ 3942 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3943 if (rack->rc_highly_buffered) 3944 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3945 /* Calculate the min we must wait */ 3946 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3947 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3948 TSTMP_LT(us_cts, endtime)) { 3949 uint32_t calc; 3950 /* Do we lower more? */ 3951 no_exit: 3952 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3953 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3954 else 3955 calc = 0; 3956 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3957 if (calc) { 3958 /* Maybe */ 3959 calc *= rack_per_of_gp_probertt_reduce; 3960 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3961 /* Limit it too */ 3962 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3963 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3964 } 3965 /* We must reach target or the time set */ 3966 return; 3967 } 3968 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3969 if ((TSTMP_LT(us_cts, must_stay) && 3970 rack->rc_highly_buffered) || 3971 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3972 rack->r_ctl.rc_target_probertt_flight)) { 3973 /* We are not past the must_stay time */ 3974 goto no_exit; 3975 } 3976 rack_log_rtt_shrinks(rack, us_cts, 3977 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3978 __LINE__, RACK_RTTS_REACHTARGET); 3979 rack->r_ctl.rc_time_probertt_starts = us_cts; 3980 if (rack->r_ctl.rc_time_probertt_starts == 0) 3981 rack->r_ctl.rc_time_probertt_starts = 1; 3982 /* Restore back to our rate we want to pace at in prtt */ 3983 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3984 } 3985 /* 3986 * Setup our end time, some number of gp_srtts plus 200ms. 3987 */ 3988 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3989 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3990 if (rack_probertt_gpsrtt_cnt_div) 3991 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3992 else 3993 endtime = 0; 3994 endtime += rack_min_probertt_hold; 3995 endtime += rack->r_ctl.rc_time_probertt_starts; 3996 if (TSTMP_GEQ(us_cts, endtime)) { 3997 /* yes, exit probertt */ 3998 rack_exit_probertt(rack, us_cts); 3999 } 4000 4001 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 4002 /* Go into probertt, its been too long since we went lower */ 4003 rack_enter_probertt(rack, us_cts); 4004 } 4005 } 4006 4007 static void 4008 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 4009 uint32_t rtt, int32_t rtt_diff) 4010 { 4011 uint64_t cur_bw, up_bnd, low_bnd, subfr; 4012 uint32_t losses; 4013 4014 if ((rack->rc_gp_dyn_mul == 0) || 4015 (rack->use_fixed_rate) || 4016 (rack->in_probe_rtt) || 4017 (rack->rc_always_pace == 0)) { 4018 /* No dynamic GP multipler in play */ 4019 return; 4020 } 4021 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 4022 cur_bw = rack_get_bw(rack); 4023 /* Calculate our up and down range */ 4024 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 4025 up_bnd /= 100; 4026 up_bnd += rack->r_ctl.last_gp_comp_bw; 4027 4028 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 4029 subfr /= 100; 4030 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 4031 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 4032 /* 4033 * This is the case where our RTT is above 4034 * the max target and we have been configured 4035 * to just do timely no bonus up stuff in that case. 4036 * 4037 * There are two configurations, set to 1, and we 4038 * just do timely if we are over our max. If its 4039 * set above 1 then we slam the multipliers down 4040 * to 100 and then decrement per timely. 4041 */ 4042 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4043 __LINE__, 3); 4044 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 4045 rack_validate_multipliers_at_or_below_100(rack); 4046 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4047 } else if ((last_bw_est < low_bnd) && !losses) { 4048 /* 4049 * We are decreasing this is a bit complicated this 4050 * means we are loosing ground. This could be 4051 * because another flow entered and we are competing 4052 * for b/w with it. This will push the RTT up which 4053 * makes timely unusable unless we want to get shoved 4054 * into a corner and just be backed off (the age 4055 * old problem with delay based CC). 4056 * 4057 * On the other hand if it was a route change we 4058 * would like to stay somewhat contained and not 4059 * blow out the buffers. 4060 */ 4061 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4062 __LINE__, 3); 4063 rack->r_ctl.last_gp_comp_bw = cur_bw; 4064 if (rack->rc_gp_bwred == 0) { 4065 /* Go into reduction counting */ 4066 rack->rc_gp_bwred = 1; 4067 rack->rc_gp_timely_dec_cnt = 0; 4068 } 4069 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 4070 (timely_says == 0)) { 4071 /* 4072 * Push another time with a faster pacing 4073 * to try to gain back (we include override to 4074 * get a full raise factor). 4075 */ 4076 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4077 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4078 (timely_says == 0) || 4079 (rack_down_raise_thresh == 0)) { 4080 /* 4081 * Do an override up in b/w if we were 4082 * below the threshold or if the threshold 4083 * is zero we always do the raise. 4084 */ 4085 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4086 } else { 4087 /* Log it stays the same */ 4088 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4089 __LINE__, 11); 4090 } 4091 rack->rc_gp_timely_dec_cnt++; 4092 /* We are not incrementing really no-count */ 4093 rack->rc_gp_incr = 0; 4094 rack->rc_gp_timely_inc_cnt = 0; 4095 } else { 4096 /* 4097 * Lets just use the RTT 4098 * information and give up 4099 * pushing. 4100 */ 4101 goto use_timely; 4102 } 4103 } else if ((timely_says != 2) && 4104 !losses && 4105 (last_bw_est > up_bnd)) { 4106 /* 4107 * We are increasing b/w lets keep going, updating 4108 * our b/w and ignoring any timely input, unless 4109 * of course we are at our max raise (if there is one). 4110 */ 4111 4112 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4113 __LINE__, 3); 4114 rack->r_ctl.last_gp_comp_bw = cur_bw; 4115 if (rack->rc_gp_saw_ss && 4116 rack_per_upper_bound_ss && 4117 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 4118 /* 4119 * In cases where we can't go higher 4120 * we should just use timely. 4121 */ 4122 goto use_timely; 4123 } 4124 if (rack->rc_gp_saw_ca && 4125 rack_per_upper_bound_ca && 4126 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4127 /* 4128 * In cases where we can't go higher 4129 * we should just use timely. 4130 */ 4131 goto use_timely; 4132 } 4133 rack->rc_gp_bwred = 0; 4134 rack->rc_gp_timely_dec_cnt = 0; 4135 /* You get a set number of pushes if timely is trying to reduce */ 4136 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4137 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4138 } else { 4139 /* Log it stays the same */ 4140 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4141 __LINE__, 12); 4142 } 4143 return; 4144 } else { 4145 /* 4146 * We are staying between the lower and upper range bounds 4147 * so use timely to decide. 4148 */ 4149 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4150 __LINE__, 3); 4151 use_timely: 4152 if (timely_says) { 4153 rack->rc_gp_incr = 0; 4154 rack->rc_gp_timely_inc_cnt = 0; 4155 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4156 !losses && 4157 (last_bw_est < low_bnd)) { 4158 /* We are loosing ground */ 4159 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4160 rack->rc_gp_timely_dec_cnt++; 4161 /* We are not incrementing really no-count */ 4162 rack->rc_gp_incr = 0; 4163 rack->rc_gp_timely_inc_cnt = 0; 4164 } else 4165 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4166 } else { 4167 rack->rc_gp_bwred = 0; 4168 rack->rc_gp_timely_dec_cnt = 0; 4169 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4170 } 4171 } 4172 } 4173 4174 static int32_t 4175 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4176 { 4177 int32_t timely_says; 4178 uint64_t log_mult, log_rtt_a_diff; 4179 4180 log_rtt_a_diff = rtt; 4181 log_rtt_a_diff <<= 32; 4182 log_rtt_a_diff |= (uint32_t)rtt_diff; 4183 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4184 rack_gp_rtt_maxmul)) { 4185 /* Reduce the b/w multipler */ 4186 timely_says = 2; 4187 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4188 log_mult <<= 32; 4189 log_mult |= prev_rtt; 4190 rack_log_timely(rack, timely_says, log_mult, 4191 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4192 log_rtt_a_diff, __LINE__, 4); 4193 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4194 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4195 max(rack_gp_rtt_mindiv , 1)))) { 4196 /* Increase the b/w multipler */ 4197 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4198 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4199 max(rack_gp_rtt_mindiv , 1)); 4200 log_mult <<= 32; 4201 log_mult |= prev_rtt; 4202 timely_says = 0; 4203 rack_log_timely(rack, timely_says, log_mult , 4204 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4205 log_rtt_a_diff, __LINE__, 5); 4206 } else { 4207 /* 4208 * Use a gradient to find it the timely gradient 4209 * is: 4210 * grad = rc_rtt_diff / min_rtt; 4211 * 4212 * anything below or equal to 0 will be 4213 * a increase indication. Anything above 4214 * zero is a decrease. Note we take care 4215 * of the actual gradient calculation 4216 * in the reduction (its not needed for 4217 * increase). 4218 */ 4219 log_mult = prev_rtt; 4220 if (rtt_diff <= 0) { 4221 /* 4222 * Rttdiff is less than zero, increase the 4223 * b/w multipler (its 0 or negative) 4224 */ 4225 timely_says = 0; 4226 rack_log_timely(rack, timely_says, log_mult, 4227 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4228 } else { 4229 /* Reduce the b/w multipler */ 4230 timely_says = 1; 4231 rack_log_timely(rack, timely_says, log_mult, 4232 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4233 } 4234 } 4235 return (timely_says); 4236 } 4237 4238 static void 4239 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4240 tcp_seq th_ack, int line, uint8_t quality) 4241 { 4242 uint64_t tim, bytes_ps, ltim, stim, utim; 4243 uint32_t segsiz, bytes, reqbytes, us_cts; 4244 int32_t gput, new_rtt_diff, timely_says; 4245 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4246 int did_add = 0; 4247 4248 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4249 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4250 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4251 tim = us_cts - tp->gput_ts; 4252 else 4253 tim = 0; 4254 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4255 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4256 else 4257 stim = 0; 4258 /* 4259 * Use the larger of the send time or ack time. This prevents us 4260 * from being influenced by ack artifacts to come up with too 4261 * high of measurement. Note that since we are spanning over many more 4262 * bytes in most of our measurements hopefully that is less likely to 4263 * occur. 4264 */ 4265 if (tim > stim) 4266 utim = max(tim, 1); 4267 else 4268 utim = max(stim, 1); 4269 /* Lets get a msec time ltim too for the old stuff */ 4270 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4271 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4272 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4273 if ((tim == 0) && (stim == 0)) { 4274 /* 4275 * Invalid measurement time, maybe 4276 * all on one ack/one send? 4277 */ 4278 bytes = 0; 4279 bytes_ps = 0; 4280 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4281 0, 0, 0, 10, __LINE__, NULL, quality); 4282 goto skip_measurement; 4283 } 4284 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4285 /* We never made a us_rtt measurement? */ 4286 bytes = 0; 4287 bytes_ps = 0; 4288 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4289 0, 0, 0, 10, __LINE__, NULL, quality); 4290 goto skip_measurement; 4291 } 4292 /* 4293 * Calculate the maximum possible b/w this connection 4294 * could have. We base our calculation on the lowest 4295 * rtt we have seen during the measurement and the 4296 * largest rwnd the client has given us in that time. This 4297 * forms a BDP that is the maximum that we could ever 4298 * get to the client. Anything larger is not valid. 4299 * 4300 * I originally had code here that rejected measurements 4301 * where the time was less than 1/2 the latest us_rtt. 4302 * But after thinking on that I realized its wrong since 4303 * say you had a 150Mbps or even 1Gbps link, and you 4304 * were a long way away.. example I am in Europe (100ms rtt) 4305 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4306 * bytes my time would be 1.2ms, and yet my rtt would say 4307 * the measurement was invalid the time was < 50ms. The 4308 * same thing is true for 150Mb (8ms of time). 4309 * 4310 * A better way I realized is to look at what the maximum 4311 * the connection could possibly do. This is gated on 4312 * the lowest RTT we have seen and the highest rwnd. 4313 * We should in theory never exceed that, if we are 4314 * then something on the path is storing up packets 4315 * and then feeding them all at once to our endpoint 4316 * messing up our measurement. 4317 */ 4318 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4319 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4320 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4321 if (SEQ_LT(th_ack, tp->gput_seq)) { 4322 /* No measurement can be made */ 4323 bytes = 0; 4324 bytes_ps = 0; 4325 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4326 0, 0, 0, 10, __LINE__, NULL, quality); 4327 goto skip_measurement; 4328 } else 4329 bytes = (th_ack - tp->gput_seq); 4330 bytes_ps = (uint64_t)bytes; 4331 /* 4332 * Don't measure a b/w for pacing unless we have gotten at least 4333 * an initial windows worth of data in this measurement interval. 4334 * 4335 * Small numbers of bytes get badly influenced by delayed ack and 4336 * other artifacts. Note we take the initial window or our 4337 * defined minimum GP (defaulting to 10 which hopefully is the 4338 * IW). 4339 */ 4340 if (rack->rc_gp_filled == 0) { 4341 /* 4342 * The initial estimate is special. We 4343 * have blasted out an IW worth of packets 4344 * without a real valid ack ts results. We 4345 * then setup the app_limited_needs_set flag, 4346 * this should get the first ack in (probably 2 4347 * MSS worth) to be recorded as the timestamp. 4348 * We thus allow a smaller number of bytes i.e. 4349 * IW - 2MSS. 4350 */ 4351 reqbytes -= (2 * segsiz); 4352 /* Also lets fill previous for our first measurement to be neutral */ 4353 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4354 } 4355 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4356 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4357 rack->r_ctl.rc_app_limited_cnt, 4358 0, 0, 10, __LINE__, NULL, quality); 4359 goto skip_measurement; 4360 } 4361 /* 4362 * We now need to calculate the Timely like status so 4363 * we can update (possibly) the b/w multipliers. 4364 */ 4365 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4366 if (rack->rc_gp_filled == 0) { 4367 /* No previous reading */ 4368 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4369 } else { 4370 if (rack->measure_saw_probe_rtt == 0) { 4371 /* 4372 * We don't want a probertt to be counted 4373 * since it will be negative incorrectly. We 4374 * expect to be reducing the RTT when we 4375 * pace at a slower rate. 4376 */ 4377 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4378 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4379 } 4380 } 4381 timely_says = rack_make_timely_judgement(rack, 4382 rack->r_ctl.rc_gp_srtt, 4383 rack->r_ctl.rc_rtt_diff, 4384 rack->r_ctl.rc_prev_gp_srtt 4385 ); 4386 bytes_ps *= HPTS_USEC_IN_SEC; 4387 bytes_ps /= utim; 4388 if (bytes_ps > rack->r_ctl.last_max_bw) { 4389 /* 4390 * Something is on path playing 4391 * since this b/w is not possible based 4392 * on our BDP (highest rwnd and lowest rtt 4393 * we saw in the measurement window). 4394 * 4395 * Another option here would be to 4396 * instead skip the measurement. 4397 */ 4398 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4399 bytes_ps, rack->r_ctl.last_max_bw, 0, 4400 11, __LINE__, NULL, quality); 4401 bytes_ps = rack->r_ctl.last_max_bw; 4402 } 4403 /* We store gp for b/w in bytes per second */ 4404 if (rack->rc_gp_filled == 0) { 4405 /* Initial measurment */ 4406 if (bytes_ps) { 4407 rack->r_ctl.gp_bw = bytes_ps; 4408 rack->rc_gp_filled = 1; 4409 rack->r_ctl.num_measurements = 1; 4410 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4411 } else { 4412 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4413 rack->r_ctl.rc_app_limited_cnt, 4414 0, 0, 10, __LINE__, NULL, quality); 4415 } 4416 if (rack->rc_inp->inp_in_hpts && 4417 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4418 /* 4419 * Ok we can't trust the pacer in this case 4420 * where we transition from un-paced to paced. 4421 * Or for that matter when the burst mitigation 4422 * was making a wild guess and got it wrong. 4423 * Stop the pacer and clear up all the aggregate 4424 * delays etc. 4425 */ 4426 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 4427 rack->r_ctl.rc_hpts_flags = 0; 4428 rack->r_ctl.rc_last_output_to = 0; 4429 } 4430 did_add = 2; 4431 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4432 /* Still a small number run an average */ 4433 rack->r_ctl.gp_bw += bytes_ps; 4434 addpart = rack->r_ctl.num_measurements; 4435 rack->r_ctl.num_measurements++; 4436 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4437 /* We have collected enought to move forward */ 4438 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4439 } 4440 did_add = 3; 4441 } else { 4442 /* 4443 * We want to take 1/wma of the goodput and add in to 7/8th 4444 * of the old value weighted by the srtt. So if your measurement 4445 * period is say 2 SRTT's long you would get 1/4 as the 4446 * value, if it was like 1/2 SRTT then you would get 1/16th. 4447 * 4448 * But we must be careful not to take too much i.e. if the 4449 * srtt is say 20ms and the measurement is taken over 4450 * 400ms our weight would be 400/20 i.e. 20. On the 4451 * other hand if we get a measurement over 1ms with a 4452 * 10ms rtt we only want to take a much smaller portion. 4453 */ 4454 if (rack->r_ctl.num_measurements < 0xff) { 4455 rack->r_ctl.num_measurements++; 4456 } 4457 srtt = (uint64_t)tp->t_srtt; 4458 if (srtt == 0) { 4459 /* 4460 * Strange why did t_srtt go back to zero? 4461 */ 4462 if (rack->r_ctl.rc_rack_min_rtt) 4463 srtt = rack->r_ctl.rc_rack_min_rtt; 4464 else 4465 srtt = HPTS_USEC_IN_MSEC; 4466 } 4467 /* 4468 * XXXrrs: Note for reviewers, in playing with 4469 * dynamic pacing I discovered this GP calculation 4470 * as done originally leads to some undesired results. 4471 * Basically you can get longer measurements contributing 4472 * too much to the WMA. Thus I changed it if you are doing 4473 * dynamic adjustments to only do the aportioned adjustment 4474 * if we have a very small (time wise) measurement. Longer 4475 * measurements just get there weight (defaulting to 1/8) 4476 * add to the WMA. We may want to think about changing 4477 * this to always do that for both sides i.e. dynamic 4478 * and non-dynamic... but considering lots of folks 4479 * were playing with this I did not want to change the 4480 * calculation per.se. without your thoughts.. Lawerence? 4481 * Peter?? 4482 */ 4483 if (rack->rc_gp_dyn_mul == 0) { 4484 subpart = rack->r_ctl.gp_bw * utim; 4485 subpart /= (srtt * 8); 4486 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4487 /* 4488 * The b/w update takes no more 4489 * away then 1/2 our running total 4490 * so factor it in. 4491 */ 4492 addpart = bytes_ps * utim; 4493 addpart /= (srtt * 8); 4494 } else { 4495 /* 4496 * Don't allow a single measurement 4497 * to account for more than 1/2 of the 4498 * WMA. This could happen on a retransmission 4499 * where utim becomes huge compared to 4500 * srtt (multiple retransmissions when using 4501 * the sending rate which factors in all the 4502 * transmissions from the first one). 4503 */ 4504 subpart = rack->r_ctl.gp_bw / 2; 4505 addpart = bytes_ps / 2; 4506 } 4507 resid_bw = rack->r_ctl.gp_bw - subpart; 4508 rack->r_ctl.gp_bw = resid_bw + addpart; 4509 did_add = 1; 4510 } else { 4511 if ((utim / srtt) <= 1) { 4512 /* 4513 * The b/w update was over a small period 4514 * of time. The idea here is to prevent a small 4515 * measurement time period from counting 4516 * too much. So we scale it based on the 4517 * time so it attributes less than 1/rack_wma_divisor 4518 * of its measurement. 4519 */ 4520 subpart = rack->r_ctl.gp_bw * utim; 4521 subpart /= (srtt * rack_wma_divisor); 4522 addpart = bytes_ps * utim; 4523 addpart /= (srtt * rack_wma_divisor); 4524 } else { 4525 /* 4526 * The scaled measurement was long 4527 * enough so lets just add in the 4528 * portion of the measurment i.e. 1/rack_wma_divisor 4529 */ 4530 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4531 addpart = bytes_ps / rack_wma_divisor; 4532 } 4533 if ((rack->measure_saw_probe_rtt == 0) || 4534 (bytes_ps > rack->r_ctl.gp_bw)) { 4535 /* 4536 * For probe-rtt we only add it in 4537 * if its larger, all others we just 4538 * add in. 4539 */ 4540 did_add = 1; 4541 resid_bw = rack->r_ctl.gp_bw - subpart; 4542 rack->r_ctl.gp_bw = resid_bw + addpart; 4543 } 4544 } 4545 } 4546 if ((rack->gp_ready == 0) && 4547 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4548 /* We have enough measurements now */ 4549 rack->gp_ready = 1; 4550 rack_set_cc_pacing(rack); 4551 if (rack->defer_options) 4552 rack_apply_deferred_options(rack); 4553 } 4554 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4555 rack_get_bw(rack), 22, did_add, NULL, quality); 4556 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4557 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4558 rack_update_multiplier(rack, timely_says, bytes_ps, 4559 rack->r_ctl.rc_gp_srtt, 4560 rack->r_ctl.rc_rtt_diff); 4561 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4562 rack_get_bw(rack), 3, line, NULL, quality); 4563 /* reset the gp srtt and setup the new prev */ 4564 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4565 /* Record the lost count for the next measurement */ 4566 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4567 /* 4568 * We restart our diffs based on the gpsrtt in the 4569 * measurement window. 4570 */ 4571 rack->rc_gp_rtt_set = 0; 4572 rack->rc_gp_saw_rec = 0; 4573 rack->rc_gp_saw_ca = 0; 4574 rack->rc_gp_saw_ss = 0; 4575 rack->rc_dragged_bottom = 0; 4576 skip_measurement: 4577 4578 #ifdef STATS 4579 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4580 gput); 4581 /* 4582 * XXXLAS: This is a temporary hack, and should be 4583 * chained off VOI_TCP_GPUT when stats(9) grows an 4584 * API to deal with chained VOIs. 4585 */ 4586 if (tp->t_stats_gput_prev > 0) 4587 stats_voi_update_abs_s32(tp->t_stats, 4588 VOI_TCP_GPUT_ND, 4589 ((gput - tp->t_stats_gput_prev) * 100) / 4590 tp->t_stats_gput_prev); 4591 #endif 4592 tp->t_flags &= ~TF_GPUTINPROG; 4593 tp->t_stats_gput_prev = gput; 4594 /* 4595 * Now are we app limited now and there is space from where we 4596 * were to where we want to go? 4597 * 4598 * We don't do the other case i.e. non-applimited here since 4599 * the next send will trigger us picking up the missing data. 4600 */ 4601 if (rack->r_ctl.rc_first_appl && 4602 TCPS_HAVEESTABLISHED(tp->t_state) && 4603 rack->r_ctl.rc_app_limited_cnt && 4604 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4605 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4606 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4607 /* 4608 * Yep there is enough outstanding to make a measurement here. 4609 */ 4610 struct rack_sendmap *rsm, fe; 4611 4612 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4613 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4614 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4615 rack->app_limited_needs_set = 0; 4616 tp->gput_seq = th_ack; 4617 if (rack->in_probe_rtt) 4618 rack->measure_saw_probe_rtt = 1; 4619 else if ((rack->measure_saw_probe_rtt) && 4620 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4621 rack->measure_saw_probe_rtt = 0; 4622 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4623 /* There is a full window to gain info from */ 4624 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4625 } else { 4626 /* We can only measure up to the applimited point */ 4627 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4628 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4629 /* 4630 * We don't have enough to make a measurement. 4631 */ 4632 tp->t_flags &= ~TF_GPUTINPROG; 4633 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4634 0, 0, 0, 6, __LINE__, NULL, quality); 4635 return; 4636 } 4637 } 4638 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4639 /* 4640 * We will get no more data into the SB 4641 * this means we need to have the data available 4642 * before we start a measurement. 4643 */ 4644 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4645 /* Nope not enough data. */ 4646 return; 4647 } 4648 } 4649 tp->t_flags |= TF_GPUTINPROG; 4650 /* 4651 * Now we need to find the timestamp of the send at tp->gput_seq 4652 * for the send based measurement. 4653 */ 4654 fe.r_start = tp->gput_seq; 4655 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4656 if (rsm) { 4657 /* Ok send-based limit is set */ 4658 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4659 /* 4660 * Move back to include the earlier part 4661 * so our ack time lines up right (this may 4662 * make an overlapping measurement but thats 4663 * ok). 4664 */ 4665 tp->gput_seq = rsm->r_start; 4666 } 4667 if (rsm->r_flags & RACK_ACKED) 4668 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4669 else 4670 rack->app_limited_needs_set = 1; 4671 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4672 } else { 4673 /* 4674 * If we don't find the rsm due to some 4675 * send-limit set the current time, which 4676 * basically disables the send-limit. 4677 */ 4678 struct timeval tv; 4679 4680 microuptime(&tv); 4681 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4682 } 4683 rack_log_pacing_delay_calc(rack, 4684 tp->gput_seq, 4685 tp->gput_ack, 4686 (uint64_t)rsm, 4687 tp->gput_ts, 4688 rack->r_ctl.rc_app_limited_cnt, 4689 9, 4690 __LINE__, NULL, quality); 4691 } 4692 } 4693 4694 /* 4695 * CC wrapper hook functions 4696 */ 4697 static void 4698 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4699 uint16_t type, int32_t recovery) 4700 { 4701 uint32_t prior_cwnd, acked; 4702 struct tcp_log_buffer *lgb = NULL; 4703 uint8_t labc_to_use, quality; 4704 4705 INP_WLOCK_ASSERT(tp->t_inpcb); 4706 tp->ccv->nsegs = nsegs; 4707 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4708 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4709 uint32_t max; 4710 4711 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4712 if (tp->ccv->bytes_this_ack > max) { 4713 tp->ccv->bytes_this_ack = max; 4714 } 4715 } 4716 #ifdef STATS 4717 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4718 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4719 #endif 4720 quality = RACK_QUALITY_NONE; 4721 if ((tp->t_flags & TF_GPUTINPROG) && 4722 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4723 /* Measure the Goodput */ 4724 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4725 #ifdef NETFLIX_PEAKRATE 4726 if ((type == CC_ACK) && 4727 (tp->t_maxpeakrate)) { 4728 /* 4729 * We update t_peakrate_thr. This gives us roughly 4730 * one update per round trip time. Note 4731 * it will only be used if pace_always is off i.e 4732 * we don't do this for paced flows. 4733 */ 4734 rack_update_peakrate_thr(tp); 4735 } 4736 #endif 4737 } 4738 /* Which way our we limited, if not cwnd limited no advance in CA */ 4739 if (tp->snd_cwnd <= tp->snd_wnd) 4740 tp->ccv->flags |= CCF_CWND_LIMITED; 4741 else 4742 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4743 if (tp->snd_cwnd > tp->snd_ssthresh) { 4744 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4745 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4746 /* For the setting of a window past use the actual scwnd we are using */ 4747 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4748 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4749 tp->ccv->flags |= CCF_ABC_SENTAWND; 4750 } 4751 } else { 4752 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4753 tp->t_bytes_acked = 0; 4754 } 4755 prior_cwnd = tp->snd_cwnd; 4756 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4757 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4758 labc_to_use = rack->rc_labc; 4759 else 4760 labc_to_use = rack_max_abc_post_recovery; 4761 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4762 union tcp_log_stackspecific log; 4763 struct timeval tv; 4764 4765 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4766 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4767 log.u_bbr.flex1 = th_ack; 4768 log.u_bbr.flex2 = tp->ccv->flags; 4769 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4770 log.u_bbr.flex4 = tp->ccv->nsegs; 4771 log.u_bbr.flex5 = labc_to_use; 4772 log.u_bbr.flex6 = prior_cwnd; 4773 log.u_bbr.flex7 = V_tcp_do_newsack; 4774 log.u_bbr.flex8 = 1; 4775 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4776 0, &log, false, NULL, NULL, 0, &tv); 4777 } 4778 if (CC_ALGO(tp)->ack_received != NULL) { 4779 /* XXXLAS: Find a way to live without this */ 4780 tp->ccv->curack = th_ack; 4781 tp->ccv->labc = labc_to_use; 4782 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4783 CC_ALGO(tp)->ack_received(tp->ccv, type); 4784 } 4785 if (lgb) { 4786 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4787 } 4788 if (rack->r_must_retran) { 4789 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4790 /* 4791 * We now are beyond the rxt point so lets disable 4792 * the flag. 4793 */ 4794 rack->r_ctl.rc_out_at_rto = 0; 4795 rack->r_must_retran = 0; 4796 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4797 /* 4798 * Only decrement the rc_out_at_rto if the cwnd advances 4799 * at least a whole segment. Otherwise next time the peer 4800 * acks, we won't be able to send this generaly happens 4801 * when we are in Congestion Avoidance. 4802 */ 4803 if (acked <= rack->r_ctl.rc_out_at_rto){ 4804 rack->r_ctl.rc_out_at_rto -= acked; 4805 } else { 4806 rack->r_ctl.rc_out_at_rto = 0; 4807 } 4808 } 4809 } 4810 #ifdef STATS 4811 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4812 #endif 4813 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4814 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4815 } 4816 #ifdef NETFLIX_PEAKRATE 4817 /* we enforce max peak rate if it is set and we are not pacing */ 4818 if ((rack->rc_always_pace == 0) && 4819 tp->t_peakrate_thr && 4820 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4821 tp->snd_cwnd = tp->t_peakrate_thr; 4822 } 4823 #endif 4824 } 4825 4826 static void 4827 tcp_rack_partialack(struct tcpcb *tp) 4828 { 4829 struct tcp_rack *rack; 4830 4831 rack = (struct tcp_rack *)tp->t_fb_ptr; 4832 INP_WLOCK_ASSERT(tp->t_inpcb); 4833 /* 4834 * If we are doing PRR and have enough 4835 * room to send <or> we are pacing and prr 4836 * is disabled we will want to see if we 4837 * can send data (by setting r_wanted_output to 4838 * true). 4839 */ 4840 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4841 rack->rack_no_prr) 4842 rack->r_wanted_output = 1; 4843 } 4844 4845 static void 4846 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4847 { 4848 struct tcp_rack *rack; 4849 uint32_t orig_cwnd; 4850 4851 orig_cwnd = tp->snd_cwnd; 4852 INP_WLOCK_ASSERT(tp->t_inpcb); 4853 rack = (struct tcp_rack *)tp->t_fb_ptr; 4854 /* only alert CC if we alerted when we entered */ 4855 if (CC_ALGO(tp)->post_recovery != NULL) { 4856 tp->ccv->curack = th_ack; 4857 CC_ALGO(tp)->post_recovery(tp->ccv); 4858 if (tp->snd_cwnd < tp->snd_ssthresh) { 4859 /* 4860 * Rack has burst control and pacing 4861 * so lets not set this any lower than 4862 * snd_ssthresh per RFC-6582 (option 2). 4863 */ 4864 tp->snd_cwnd = tp->snd_ssthresh; 4865 } 4866 } 4867 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4868 union tcp_log_stackspecific log; 4869 struct timeval tv; 4870 4871 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4872 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4873 log.u_bbr.flex1 = th_ack; 4874 log.u_bbr.flex2 = tp->ccv->flags; 4875 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4876 log.u_bbr.flex4 = tp->ccv->nsegs; 4877 log.u_bbr.flex5 = V_tcp_abc_l_var; 4878 log.u_bbr.flex6 = orig_cwnd; 4879 log.u_bbr.flex7 = V_tcp_do_newsack; 4880 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4881 log.u_bbr.flex8 = 2; 4882 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4883 0, &log, false, NULL, NULL, 0, &tv); 4884 } 4885 if ((rack->rack_no_prr == 0) && 4886 (rack->no_prr_addback == 0) && 4887 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4888 /* 4889 * Suck the next prr cnt back into cwnd, but 4890 * only do that if we are not application limited. 4891 */ 4892 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4893 /* 4894 * We are allowed to add back to the cwnd the amount we did 4895 * not get out if: 4896 * a) no_prr_addback is off. 4897 * b) we are not app limited 4898 * c) we are doing prr 4899 * <and> 4900 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4901 */ 4902 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4903 rack->r_ctl.rc_prr_sndcnt); 4904 } 4905 rack->r_ctl.rc_prr_sndcnt = 0; 4906 rack_log_to_prr(rack, 1, 0); 4907 } 4908 rack_log_to_prr(rack, 14, orig_cwnd); 4909 tp->snd_recover = tp->snd_una; 4910 if (rack->r_ctl.dsack_persist) { 4911 rack->r_ctl.dsack_persist--; 4912 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4913 rack->r_ctl.num_dsack = 0; 4914 } 4915 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4916 } 4917 EXIT_RECOVERY(tp->t_flags); 4918 } 4919 4920 static void 4921 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack) 4922 { 4923 struct tcp_rack *rack; 4924 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4925 4926 INP_WLOCK_ASSERT(tp->t_inpcb); 4927 #ifdef STATS 4928 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4929 #endif 4930 if (IN_RECOVERY(tp->t_flags) == 0) { 4931 in_rec_at_entry = 0; 4932 ssthresh_enter = tp->snd_ssthresh; 4933 cwnd_enter = tp->snd_cwnd; 4934 } else 4935 in_rec_at_entry = 1; 4936 rack = (struct tcp_rack *)tp->t_fb_ptr; 4937 switch (type) { 4938 case CC_NDUPACK: 4939 tp->t_flags &= ~TF_WASFRECOVERY; 4940 tp->t_flags &= ~TF_WASCRECOVERY; 4941 if (!IN_FASTRECOVERY(tp->t_flags)) { 4942 rack->r_ctl.rc_prr_delivered = 0; 4943 rack->r_ctl.rc_prr_out = 0; 4944 if (rack->rack_no_prr == 0) { 4945 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4946 rack_log_to_prr(rack, 2, in_rec_at_entry); 4947 } 4948 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4949 tp->snd_recover = tp->snd_max; 4950 if (tp->t_flags2 & TF2_ECN_PERMIT) 4951 tp->t_flags2 |= TF2_ECN_SND_CWR; 4952 } 4953 break; 4954 case CC_ECN: 4955 if (!IN_CONGRECOVERY(tp->t_flags) || 4956 /* 4957 * Allow ECN reaction on ACK to CWR, if 4958 * that data segment was also CE marked. 4959 */ 4960 SEQ_GEQ(ack, tp->snd_recover)) { 4961 EXIT_CONGRECOVERY(tp->t_flags); 4962 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4963 tp->snd_recover = tp->snd_max + 1; 4964 if (tp->t_flags2 & TF2_ECN_PERMIT) 4965 tp->t_flags2 |= TF2_ECN_SND_CWR; 4966 } 4967 break; 4968 case CC_RTO: 4969 tp->t_dupacks = 0; 4970 tp->t_bytes_acked = 0; 4971 EXIT_RECOVERY(tp->t_flags); 4972 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4973 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4974 orig_cwnd = tp->snd_cwnd; 4975 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4976 rack_log_to_prr(rack, 16, orig_cwnd); 4977 if (tp->t_flags2 & TF2_ECN_PERMIT) 4978 tp->t_flags2 |= TF2_ECN_SND_CWR; 4979 break; 4980 case CC_RTO_ERR: 4981 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4982 /* RTO was unnecessary, so reset everything. */ 4983 tp->snd_cwnd = tp->snd_cwnd_prev; 4984 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4985 tp->snd_recover = tp->snd_recover_prev; 4986 if (tp->t_flags & TF_WASFRECOVERY) { 4987 ENTER_FASTRECOVERY(tp->t_flags); 4988 tp->t_flags &= ~TF_WASFRECOVERY; 4989 } 4990 if (tp->t_flags & TF_WASCRECOVERY) { 4991 ENTER_CONGRECOVERY(tp->t_flags); 4992 tp->t_flags &= ~TF_WASCRECOVERY; 4993 } 4994 tp->snd_nxt = tp->snd_max; 4995 tp->t_badrxtwin = 0; 4996 break; 4997 } 4998 if ((CC_ALGO(tp)->cong_signal != NULL) && 4999 (type != CC_RTO)){ 5000 tp->ccv->curack = ack; 5001 CC_ALGO(tp)->cong_signal(tp->ccv, type); 5002 } 5003 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 5004 rack_log_to_prr(rack, 15, cwnd_enter); 5005 rack->r_ctl.dsack_byte_cnt = 0; 5006 rack->r_ctl.retran_during_recovery = 0; 5007 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 5008 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 5009 rack->r_ent_rec_ns = 1; 5010 } 5011 } 5012 5013 static inline void 5014 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 5015 { 5016 uint32_t i_cwnd; 5017 5018 INP_WLOCK_ASSERT(tp->t_inpcb); 5019 5020 #ifdef NETFLIX_STATS 5021 KMOD_TCPSTAT_INC(tcps_idle_restarts); 5022 if (tp->t_state == TCPS_ESTABLISHED) 5023 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 5024 #endif 5025 if (CC_ALGO(tp)->after_idle != NULL) 5026 CC_ALGO(tp)->after_idle(tp->ccv); 5027 5028 if (tp->snd_cwnd == 1) 5029 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 5030 else 5031 i_cwnd = rc_init_window(rack); 5032 5033 /* 5034 * Being idle is no differnt than the initial window. If the cc 5035 * clamps it down below the initial window raise it to the initial 5036 * window. 5037 */ 5038 if (tp->snd_cwnd < i_cwnd) { 5039 tp->snd_cwnd = i_cwnd; 5040 } 5041 } 5042 5043 /* 5044 * Indicate whether this ack should be delayed. We can delay the ack if 5045 * following conditions are met: 5046 * - There is no delayed ack timer in progress. 5047 * - Our last ack wasn't a 0-sized window. We never want to delay 5048 * the ack that opens up a 0-sized window. 5049 * - LRO wasn't used for this segment. We make sure by checking that the 5050 * segment size is not larger than the MSS. 5051 * - Delayed acks are enabled or this is a half-synchronized T/TCP 5052 * connection. 5053 */ 5054 #define DELAY_ACK(tp, tlen) \ 5055 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 5056 ((tp->t_flags & TF_DELACK) == 0) && \ 5057 (tlen <= tp->t_maxseg) && \ 5058 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 5059 5060 static struct rack_sendmap * 5061 rack_find_lowest_rsm(struct tcp_rack *rack) 5062 { 5063 struct rack_sendmap *rsm; 5064 5065 /* 5066 * Walk the time-order transmitted list looking for an rsm that is 5067 * not acked. This will be the one that was sent the longest time 5068 * ago that is still outstanding. 5069 */ 5070 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 5071 if (rsm->r_flags & RACK_ACKED) { 5072 continue; 5073 } 5074 goto finish; 5075 } 5076 finish: 5077 return (rsm); 5078 } 5079 5080 static struct rack_sendmap * 5081 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5082 { 5083 struct rack_sendmap *prsm; 5084 5085 /* 5086 * Walk the sequence order list backward until we hit and arrive at 5087 * the highest seq not acked. In theory when this is called it 5088 * should be the last segment (which it was not). 5089 */ 5090 counter_u64_add(rack_find_high, 1); 5091 prsm = rsm; 5092 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 5093 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5094 continue; 5095 } 5096 return (prsm); 5097 } 5098 return (NULL); 5099 } 5100 5101 static uint32_t 5102 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 5103 { 5104 int32_t lro; 5105 uint32_t thresh; 5106 5107 /* 5108 * lro is the flag we use to determine if we have seen reordering. 5109 * If it gets set we have seen reordering. The reorder logic either 5110 * works in one of two ways: 5111 * 5112 * If reorder-fade is configured, then we track the last time we saw 5113 * re-ordering occur. If we reach the point where enough time as 5114 * passed we no longer consider reordering has occuring. 5115 * 5116 * Or if reorder-face is 0, then once we see reordering we consider 5117 * the connection to alway be subject to reordering and just set lro 5118 * to 1. 5119 * 5120 * In the end if lro is non-zero we add the extra time for 5121 * reordering in. 5122 */ 5123 if (srtt == 0) 5124 srtt = 1; 5125 if (rack->r_ctl.rc_reorder_ts) { 5126 if (rack->r_ctl.rc_reorder_fade) { 5127 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5128 lro = cts - rack->r_ctl.rc_reorder_ts; 5129 if (lro == 0) { 5130 /* 5131 * No time as passed since the last 5132 * reorder, mark it as reordering. 5133 */ 5134 lro = 1; 5135 } 5136 } else { 5137 /* Negative time? */ 5138 lro = 0; 5139 } 5140 if (lro > rack->r_ctl.rc_reorder_fade) { 5141 /* Turn off reordering seen too */ 5142 rack->r_ctl.rc_reorder_ts = 0; 5143 lro = 0; 5144 } 5145 } else { 5146 /* Reodering does not fade */ 5147 lro = 1; 5148 } 5149 } else { 5150 lro = 0; 5151 } 5152 if (rack->rc_rack_tmr_std_based == 0) { 5153 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5154 } else { 5155 /* Standards based pkt-delay is 1/4 srtt */ 5156 thresh = srtt + (srtt >> 2); 5157 } 5158 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5159 /* It must be set, if not you get 1/4 rtt */ 5160 if (rack->r_ctl.rc_reorder_shift) 5161 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5162 else 5163 thresh += (srtt >> 2); 5164 } 5165 if (rack->rc_rack_use_dsack && 5166 lro && 5167 (rack->r_ctl.num_dsack > 0)) { 5168 /* 5169 * We only increase the reordering window if we 5170 * have seen reordering <and> we have a DSACK count. 5171 */ 5172 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5173 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5174 } 5175 /* SRTT * 2 is the ceiling */ 5176 if (thresh > (srtt * 2)) { 5177 thresh = srtt * 2; 5178 } 5179 /* And we don't want it above the RTO max either */ 5180 if (thresh > rack_rto_max) { 5181 thresh = rack_rto_max; 5182 } 5183 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5184 return (thresh); 5185 } 5186 5187 static uint32_t 5188 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5189 struct rack_sendmap *rsm, uint32_t srtt) 5190 { 5191 struct rack_sendmap *prsm; 5192 uint32_t thresh, len; 5193 int segsiz; 5194 5195 if (srtt == 0) 5196 srtt = 1; 5197 if (rack->r_ctl.rc_tlp_threshold) 5198 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5199 else 5200 thresh = (srtt * 2); 5201 5202 /* Get the previous sent packet, if any */ 5203 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5204 counter_u64_add(rack_enter_tlp_calc, 1); 5205 len = rsm->r_end - rsm->r_start; 5206 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5207 /* Exactly like the ID */ 5208 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5209 uint32_t alt_thresh; 5210 /* 5211 * Compensate for delayed-ack with the d-ack time. 5212 */ 5213 counter_u64_add(rack_used_tlpmethod, 1); 5214 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5215 if (alt_thresh > thresh) 5216 thresh = alt_thresh; 5217 } 5218 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5219 /* 2.1 behavior */ 5220 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5221 if (prsm && (len <= segsiz)) { 5222 /* 5223 * Two packets outstanding, thresh should be (2*srtt) + 5224 * possible inter-packet delay (if any). 5225 */ 5226 uint32_t inter_gap = 0; 5227 int idx, nidx; 5228 5229 counter_u64_add(rack_used_tlpmethod, 1); 5230 idx = rsm->r_rtr_cnt - 1; 5231 nidx = prsm->r_rtr_cnt - 1; 5232 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5233 /* Yes it was sent later (or at the same time) */ 5234 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5235 } 5236 thresh += inter_gap; 5237 } else if (len <= segsiz) { 5238 /* 5239 * Possibly compensate for delayed-ack. 5240 */ 5241 uint32_t alt_thresh; 5242 5243 counter_u64_add(rack_used_tlpmethod2, 1); 5244 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5245 if (alt_thresh > thresh) 5246 thresh = alt_thresh; 5247 } 5248 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5249 /* 2.2 behavior */ 5250 if (len <= segsiz) { 5251 uint32_t alt_thresh; 5252 /* 5253 * Compensate for delayed-ack with the d-ack time. 5254 */ 5255 counter_u64_add(rack_used_tlpmethod, 1); 5256 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5257 if (alt_thresh > thresh) 5258 thresh = alt_thresh; 5259 } 5260 } 5261 /* Not above an RTO */ 5262 if (thresh > tp->t_rxtcur) { 5263 thresh = tp->t_rxtcur; 5264 } 5265 /* Not above a RTO max */ 5266 if (thresh > rack_rto_max) { 5267 thresh = rack_rto_max; 5268 } 5269 /* Apply user supplied min TLP */ 5270 if (thresh < rack_tlp_min) { 5271 thresh = rack_tlp_min; 5272 } 5273 return (thresh); 5274 } 5275 5276 static uint32_t 5277 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5278 { 5279 /* 5280 * We want the rack_rtt which is the 5281 * last rtt we measured. However if that 5282 * does not exist we fallback to the srtt (which 5283 * we probably will never do) and then as a last 5284 * resort we use RACK_INITIAL_RTO if no srtt is 5285 * yet set. 5286 */ 5287 if (rack->rc_rack_rtt) 5288 return (rack->rc_rack_rtt); 5289 else if (tp->t_srtt == 0) 5290 return (RACK_INITIAL_RTO); 5291 return (tp->t_srtt); 5292 } 5293 5294 static struct rack_sendmap * 5295 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5296 { 5297 /* 5298 * Check to see that we don't need to fall into recovery. We will 5299 * need to do so if our oldest transmit is past the time we should 5300 * have had an ack. 5301 */ 5302 struct tcp_rack *rack; 5303 struct rack_sendmap *rsm; 5304 int32_t idx; 5305 uint32_t srtt, thresh; 5306 5307 rack = (struct tcp_rack *)tp->t_fb_ptr; 5308 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5309 return (NULL); 5310 } 5311 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5312 if (rsm == NULL) 5313 return (NULL); 5314 5315 if (rsm->r_flags & RACK_ACKED) { 5316 rsm = rack_find_lowest_rsm(rack); 5317 if (rsm == NULL) 5318 return (NULL); 5319 } 5320 idx = rsm->r_rtr_cnt - 1; 5321 srtt = rack_grab_rtt(tp, rack); 5322 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5323 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5324 return (NULL); 5325 } 5326 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5327 return (NULL); 5328 } 5329 /* Ok if we reach here we are over-due and this guy can be sent */ 5330 if (IN_RECOVERY(tp->t_flags) == 0) { 5331 /* 5332 * For the one that enters us into recovery record undo 5333 * info. 5334 */ 5335 rack->r_ctl.rc_rsm_start = rsm->r_start; 5336 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5337 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5338 } 5339 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 5340 return (rsm); 5341 } 5342 5343 static uint32_t 5344 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5345 { 5346 int32_t t; 5347 int32_t tt; 5348 uint32_t ret_val; 5349 5350 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5351 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5352 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5353 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 5354 tp->t_rxtshift++; 5355 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5356 ret_val = (uint32_t)tt; 5357 return (ret_val); 5358 } 5359 5360 static uint32_t 5361 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5362 { 5363 /* 5364 * Start the FR timer, we do this based on getting the first one in 5365 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5366 * events we need to stop the running timer (if its running) before 5367 * starting the new one. 5368 */ 5369 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5370 uint32_t srtt_cur; 5371 int32_t idx; 5372 int32_t is_tlp_timer = 0; 5373 struct rack_sendmap *rsm; 5374 5375 if (rack->t_timers_stopped) { 5376 /* All timers have been stopped none are to run */ 5377 return (0); 5378 } 5379 if (rack->rc_in_persist) { 5380 /* We can't start any timer in persists */ 5381 return (rack_get_persists_timer_val(tp, rack)); 5382 } 5383 rack->rc_on_min_to = 0; 5384 if ((tp->t_state < TCPS_ESTABLISHED) || 5385 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5386 goto activate_rxt; 5387 } 5388 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5389 if ((rsm == NULL) || sup_rack) { 5390 /* Nothing on the send map or no rack */ 5391 activate_rxt: 5392 time_since_sent = 0; 5393 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5394 if (rsm) { 5395 /* 5396 * Should we discount the RTX timer any? 5397 * 5398 * We want to discount it the smallest amount. 5399 * If a timer (Rack/TLP or RXT) has gone off more 5400 * recently thats the discount we want to use (now - timer time). 5401 * If the retransmit of the oldest packet was more recent then 5402 * we want to use that (now - oldest-packet-last_transmit_time). 5403 * 5404 */ 5405 idx = rsm->r_rtr_cnt - 1; 5406 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5407 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5408 else 5409 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5410 if (TSTMP_GT(cts, tstmp_touse)) 5411 time_since_sent = cts - tstmp_touse; 5412 } 5413 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5414 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5415 to = tp->t_rxtcur; 5416 if (to > time_since_sent) 5417 to -= time_since_sent; 5418 else 5419 to = rack->r_ctl.rc_min_to; 5420 if (to == 0) 5421 to = 1; 5422 /* Special case for KEEPINIT */ 5423 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5424 (TP_KEEPINIT(tp) != 0) && 5425 rsm) { 5426 /* 5427 * We have to put a ceiling on the rxt timer 5428 * of the keep-init timeout. 5429 */ 5430 uint32_t max_time, red; 5431 5432 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5433 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5434 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5435 if (red < max_time) 5436 max_time -= red; 5437 else 5438 max_time = 1; 5439 } 5440 /* Reduce timeout to the keep value if needed */ 5441 if (max_time < to) 5442 to = max_time; 5443 } 5444 return (to); 5445 } 5446 return (0); 5447 } 5448 if (rsm->r_flags & RACK_ACKED) { 5449 rsm = rack_find_lowest_rsm(rack); 5450 if (rsm == NULL) { 5451 /* No lowest? */ 5452 goto activate_rxt; 5453 } 5454 } 5455 if (rack->sack_attack_disable) { 5456 /* 5457 * We don't want to do 5458 * any TLP's if you are an attacker. 5459 * Though if you are doing what 5460 * is expected you may still have 5461 * SACK-PASSED marks. 5462 */ 5463 goto activate_rxt; 5464 } 5465 /* Convert from ms to usecs */ 5466 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5467 if ((tp->t_flags & TF_SENTFIN) && 5468 ((tp->snd_max - tp->snd_una) == 1) && 5469 (rsm->r_flags & RACK_HAS_FIN)) { 5470 /* 5471 * We don't start a rack timer if all we have is a 5472 * FIN outstanding. 5473 */ 5474 goto activate_rxt; 5475 } 5476 if ((rack->use_rack_rr == 0) && 5477 (IN_FASTRECOVERY(tp->t_flags)) && 5478 (rack->rack_no_prr == 0) && 5479 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5480 /* 5481 * We are not cheating, in recovery and 5482 * not enough ack's to yet get our next 5483 * retransmission out. 5484 * 5485 * Note that classified attackers do not 5486 * get to use the rack-cheat. 5487 */ 5488 goto activate_tlp; 5489 } 5490 srtt = rack_grab_rtt(tp, rack); 5491 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5492 idx = rsm->r_rtr_cnt - 1; 5493 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5494 if (SEQ_GEQ(exp, cts)) { 5495 to = exp - cts; 5496 if (to < rack->r_ctl.rc_min_to) { 5497 to = rack->r_ctl.rc_min_to; 5498 if (rack->r_rr_config == 3) 5499 rack->rc_on_min_to = 1; 5500 } 5501 } else { 5502 to = rack->r_ctl.rc_min_to; 5503 if (rack->r_rr_config == 3) 5504 rack->rc_on_min_to = 1; 5505 } 5506 } else { 5507 /* Ok we need to do a TLP not RACK */ 5508 activate_tlp: 5509 if ((rack->rc_tlp_in_progress != 0) && 5510 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5511 /* 5512 * The previous send was a TLP and we have sent 5513 * N TLP's without sending new data. 5514 */ 5515 goto activate_rxt; 5516 } 5517 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5518 if (rsm == NULL) { 5519 /* We found no rsm to TLP with. */ 5520 goto activate_rxt; 5521 } 5522 if (rsm->r_flags & RACK_HAS_FIN) { 5523 /* If its a FIN we dont do TLP */ 5524 rsm = NULL; 5525 goto activate_rxt; 5526 } 5527 idx = rsm->r_rtr_cnt - 1; 5528 time_since_sent = 0; 5529 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5530 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5531 else 5532 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5533 if (TSTMP_GT(cts, tstmp_touse)) 5534 time_since_sent = cts - tstmp_touse; 5535 is_tlp_timer = 1; 5536 if (tp->t_srtt) { 5537 if ((rack->rc_srtt_measure_made == 0) && 5538 (tp->t_srtt == 1)) { 5539 /* 5540 * If another stack as run and set srtt to 1, 5541 * then the srtt was 0, so lets use the initial. 5542 */ 5543 srtt = RACK_INITIAL_RTO; 5544 } else { 5545 srtt_cur = tp->t_srtt; 5546 srtt = srtt_cur; 5547 } 5548 } else 5549 srtt = RACK_INITIAL_RTO; 5550 /* 5551 * If the SRTT is not keeping up and the 5552 * rack RTT has spiked we want to use 5553 * the last RTT not the smoothed one. 5554 */ 5555 if (rack_tlp_use_greater && 5556 tp->t_srtt && 5557 (srtt < rack_grab_rtt(tp, rack))) { 5558 srtt = rack_grab_rtt(tp, rack); 5559 } 5560 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5561 if (thresh > time_since_sent) { 5562 to = thresh - time_since_sent; 5563 } else { 5564 to = rack->r_ctl.rc_min_to; 5565 rack_log_alt_to_to_cancel(rack, 5566 thresh, /* flex1 */ 5567 time_since_sent, /* flex2 */ 5568 tstmp_touse, /* flex3 */ 5569 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5570 (uint32_t)rsm->r_tim_lastsent[idx], 5571 srtt, 5572 idx, 99); 5573 } 5574 if (to < rack_tlp_min) { 5575 to = rack_tlp_min; 5576 } 5577 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5578 /* 5579 * If the TLP time works out to larger than the max 5580 * RTO lets not do TLP.. just RTO. 5581 */ 5582 goto activate_rxt; 5583 } 5584 } 5585 if (is_tlp_timer == 0) { 5586 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5587 } else { 5588 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5589 } 5590 if (to == 0) 5591 to = 1; 5592 return (to); 5593 } 5594 5595 static void 5596 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5597 { 5598 if (rack->rc_in_persist == 0) { 5599 if (tp->t_flags & TF_GPUTINPROG) { 5600 /* 5601 * Stop the goodput now, the calling of the 5602 * measurement function clears the flag. 5603 */ 5604 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5605 RACK_QUALITY_PERSIST); 5606 } 5607 #ifdef NETFLIX_SHARED_CWND 5608 if (rack->r_ctl.rc_scw) { 5609 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5610 rack->rack_scwnd_is_idle = 1; 5611 } 5612 #endif 5613 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5614 if (rack->r_ctl.rc_went_idle_time == 0) 5615 rack->r_ctl.rc_went_idle_time = 1; 5616 rack_timer_cancel(tp, rack, cts, __LINE__); 5617 tp->t_rxtshift = 0; 5618 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5619 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5620 rack->rc_in_persist = 1; 5621 } 5622 } 5623 5624 static void 5625 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5626 { 5627 if (rack->rc_inp->inp_in_hpts) { 5628 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 5629 rack->r_ctl.rc_hpts_flags = 0; 5630 } 5631 #ifdef NETFLIX_SHARED_CWND 5632 if (rack->r_ctl.rc_scw) { 5633 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5634 rack->rack_scwnd_is_idle = 0; 5635 } 5636 #endif 5637 if (rack->rc_gp_dyn_mul && 5638 (rack->use_fixed_rate == 0) && 5639 (rack->rc_always_pace)) { 5640 /* 5641 * Do we count this as if a probe-rtt just 5642 * finished? 5643 */ 5644 uint32_t time_idle, idle_min; 5645 5646 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5647 idle_min = rack_min_probertt_hold; 5648 if (rack_probertt_gpsrtt_cnt_div) { 5649 uint64_t extra; 5650 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5651 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5652 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5653 idle_min += (uint32_t)extra; 5654 } 5655 if (time_idle >= idle_min) { 5656 /* Yes, we count it as a probe-rtt. */ 5657 uint32_t us_cts; 5658 5659 us_cts = tcp_get_usecs(NULL); 5660 if (rack->in_probe_rtt == 0) { 5661 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5662 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5663 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5664 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5665 } else { 5666 rack_exit_probertt(rack, us_cts); 5667 } 5668 } 5669 } 5670 rack->rc_in_persist = 0; 5671 rack->r_ctl.rc_went_idle_time = 0; 5672 tp->t_rxtshift = 0; 5673 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5674 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5675 rack->r_ctl.rc_agg_delayed = 0; 5676 rack->r_early = 0; 5677 rack->r_late = 0; 5678 rack->r_ctl.rc_agg_early = 0; 5679 } 5680 5681 static void 5682 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5683 struct hpts_diag *diag, struct timeval *tv) 5684 { 5685 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5686 union tcp_log_stackspecific log; 5687 5688 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5689 log.u_bbr.flex1 = diag->p_nxt_slot; 5690 log.u_bbr.flex2 = diag->p_cur_slot; 5691 log.u_bbr.flex3 = diag->slot_req; 5692 log.u_bbr.flex4 = diag->inp_hptsslot; 5693 log.u_bbr.flex5 = diag->slot_remaining; 5694 log.u_bbr.flex6 = diag->need_new_to; 5695 log.u_bbr.flex7 = diag->p_hpts_active; 5696 log.u_bbr.flex8 = diag->p_on_min_sleep; 5697 /* Hijack other fields as needed */ 5698 log.u_bbr.epoch = diag->have_slept; 5699 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5700 log.u_bbr.pkts_out = diag->co_ret; 5701 log.u_bbr.applimited = diag->hpts_sleep_time; 5702 log.u_bbr.delivered = diag->p_prev_slot; 5703 log.u_bbr.inflight = diag->p_runningslot; 5704 log.u_bbr.bw_inuse = diag->wheel_slot; 5705 log.u_bbr.rttProp = diag->wheel_cts; 5706 log.u_bbr.timeStamp = cts; 5707 log.u_bbr.delRate = diag->maxslots; 5708 log.u_bbr.cur_del_rate = diag->p_curtick; 5709 log.u_bbr.cur_del_rate <<= 32; 5710 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5711 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5712 &rack->rc_inp->inp_socket->so_rcv, 5713 &rack->rc_inp->inp_socket->so_snd, 5714 BBR_LOG_HPTSDIAG, 0, 5715 0, &log, false, tv); 5716 } 5717 5718 } 5719 5720 static void 5721 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5722 { 5723 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5724 union tcp_log_stackspecific log; 5725 struct timeval tv; 5726 5727 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5728 log.u_bbr.flex1 = sb->sb_flags; 5729 log.u_bbr.flex2 = len; 5730 log.u_bbr.flex3 = sb->sb_state; 5731 log.u_bbr.flex8 = type; 5732 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5733 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5734 &rack->rc_inp->inp_socket->so_rcv, 5735 &rack->rc_inp->inp_socket->so_snd, 5736 TCP_LOG_SB_WAKE, 0, 5737 len, &log, false, &tv); 5738 } 5739 } 5740 5741 static void 5742 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5743 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5744 { 5745 struct hpts_diag diag; 5746 struct inpcb *inp; 5747 struct timeval tv; 5748 uint32_t delayed_ack = 0; 5749 uint32_t hpts_timeout; 5750 uint32_t entry_slot = slot; 5751 uint8_t stopped; 5752 uint32_t left = 0; 5753 uint32_t us_cts; 5754 5755 inp = tp->t_inpcb; 5756 if ((tp->t_state == TCPS_CLOSED) || 5757 (tp->t_state == TCPS_LISTEN)) { 5758 return; 5759 } 5760 if (inp->inp_in_hpts) { 5761 /* Already on the pacer */ 5762 return; 5763 } 5764 stopped = rack->rc_tmr_stopped; 5765 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5766 left = rack->r_ctl.rc_timer_exp - cts; 5767 } 5768 rack->r_ctl.rc_timer_exp = 0; 5769 rack->r_ctl.rc_hpts_flags = 0; 5770 us_cts = tcp_get_usecs(&tv); 5771 /* Now early/late accounting */ 5772 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5773 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5774 /* 5775 * We have a early carry over set, 5776 * we can always add more time so we 5777 * can always make this compensation. 5778 * 5779 * Note if ack's are allowed to wake us do not 5780 * penalize the next timer for being awoke 5781 * by an ack aka the rc_agg_early (non-paced mode). 5782 */ 5783 slot += rack->r_ctl.rc_agg_early; 5784 rack->r_early = 0; 5785 rack->r_ctl.rc_agg_early = 0; 5786 } 5787 if (rack->r_late) { 5788 /* 5789 * This is harder, we can 5790 * compensate some but it 5791 * really depends on what 5792 * the current pacing time is. 5793 */ 5794 if (rack->r_ctl.rc_agg_delayed >= slot) { 5795 /* 5796 * We can't compensate for it all. 5797 * And we have to have some time 5798 * on the clock. We always have a min 5799 * 10 slots (10 x 10 i.e. 100 usecs). 5800 */ 5801 if (slot <= HPTS_TICKS_PER_SLOT) { 5802 /* We gain delay */ 5803 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5804 slot = HPTS_TICKS_PER_SLOT; 5805 } else { 5806 /* We take off some */ 5807 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5808 slot = HPTS_TICKS_PER_SLOT; 5809 } 5810 } else { 5811 slot -= rack->r_ctl.rc_agg_delayed; 5812 rack->r_ctl.rc_agg_delayed = 0; 5813 /* Make sure we have 100 useconds at minimum */ 5814 if (slot < HPTS_TICKS_PER_SLOT) { 5815 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5816 slot = HPTS_TICKS_PER_SLOT; 5817 } 5818 if (rack->r_ctl.rc_agg_delayed == 0) 5819 rack->r_late = 0; 5820 } 5821 } 5822 if (slot) { 5823 /* We are pacing too */ 5824 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5825 } 5826 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5827 #ifdef NETFLIX_EXP_DETECTION 5828 if (rack->sack_attack_disable && 5829 (slot < tcp_sad_pacing_interval)) { 5830 /* 5831 * We have a potential attacker on 5832 * the line. We have possibly some 5833 * (or now) pacing time set. We want to 5834 * slow down the processing of sacks by some 5835 * amount (if it is an attacker). Set the default 5836 * slot for attackers in place (unless the orginal 5837 * interval is longer). Its stored in 5838 * micro-seconds, so lets convert to msecs. 5839 */ 5840 slot = tcp_sad_pacing_interval; 5841 } 5842 #endif 5843 if (tp->t_flags & TF_DELACK) { 5844 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5845 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5846 } 5847 if (delayed_ack && ((hpts_timeout == 0) || 5848 (delayed_ack < hpts_timeout))) 5849 hpts_timeout = delayed_ack; 5850 else 5851 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5852 /* 5853 * If no timers are going to run and we will fall off the hptsi 5854 * wheel, we resort to a keep-alive timer if its configured. 5855 */ 5856 if ((hpts_timeout == 0) && 5857 (slot == 0)) { 5858 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5859 (tp->t_state <= TCPS_CLOSING)) { 5860 /* 5861 * Ok we have no timer (persists, rack, tlp, rxt or 5862 * del-ack), we don't have segments being paced. So 5863 * all that is left is the keepalive timer. 5864 */ 5865 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5866 /* Get the established keep-alive time */ 5867 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5868 } else { 5869 /* 5870 * Get the initial setup keep-alive time, 5871 * note that this is probably not going to 5872 * happen, since rack will be running a rxt timer 5873 * if a SYN of some sort is outstanding. It is 5874 * actually handled in rack_timeout_rxt(). 5875 */ 5876 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5877 } 5878 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5879 if (rack->in_probe_rtt) { 5880 /* 5881 * We want to instead not wake up a long time from 5882 * now but to wake up about the time we would 5883 * exit probe-rtt and initiate a keep-alive ack. 5884 * This will get us out of probe-rtt and update 5885 * our min-rtt. 5886 */ 5887 hpts_timeout = rack_min_probertt_hold; 5888 } 5889 } 5890 } 5891 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5892 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5893 /* 5894 * RACK, TLP, persists and RXT timers all are restartable 5895 * based on actions input .. i.e we received a packet (ack 5896 * or sack) and that changes things (rw, or snd_una etc). 5897 * Thus we can restart them with a new value. For 5898 * keep-alive, delayed_ack we keep track of what was left 5899 * and restart the timer with a smaller value. 5900 */ 5901 if (left < hpts_timeout) 5902 hpts_timeout = left; 5903 } 5904 if (hpts_timeout) { 5905 /* 5906 * Hack alert for now we can't time-out over 2,147,483 5907 * seconds (a bit more than 596 hours), which is probably ok 5908 * :). 5909 */ 5910 if (hpts_timeout > 0x7ffffffe) 5911 hpts_timeout = 0x7ffffffe; 5912 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5913 } 5914 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5915 if ((rack->gp_ready == 0) && 5916 (rack->use_fixed_rate == 0) && 5917 (hpts_timeout < slot) && 5918 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5919 /* 5920 * We have no good estimate yet for the 5921 * old clunky burst mitigation or the 5922 * real pacing. And the tlp or rxt is smaller 5923 * than the pacing calculation. Lets not 5924 * pace that long since we know the calculation 5925 * so far is not accurate. 5926 */ 5927 slot = hpts_timeout; 5928 } 5929 rack->r_ctl.last_pacing_time = slot; 5930 /** 5931 * Turn off all the flags for queuing by default. The 5932 * flags have important meanings to what happens when 5933 * LRO interacts with the transport. Most likely (by default now) 5934 * mbuf_queueing and ack compression are on. So the transport 5935 * has a couple of flags that control what happens (if those 5936 * are not on then these flags won't have any effect since it 5937 * won't go through the queuing LRO path). 5938 * 5939 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5940 * pacing output, so don't disturb. But 5941 * it also means LRO can wake me if there 5942 * is a SACK arrival. 5943 * 5944 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5945 * with the above flag (QUEUE_READY) and 5946 * when present it says don't even wake me 5947 * if a SACK arrives. 5948 * 5949 * The idea behind these flags is that if we are pacing we 5950 * set the MBUF_QUEUE_READY and only get woken up if 5951 * a SACK arrives (which could change things) or if 5952 * our pacing timer expires. If, however, we have a rack 5953 * timer running, then we don't even want a sack to wake 5954 * us since the rack timer has to expire before we can send. 5955 * 5956 * Other cases should usually have none of the flags set 5957 * so LRO can call into us. 5958 */ 5959 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5960 if (slot) { 5961 rack->r_ctl.rc_last_output_to = us_cts + slot; 5962 /* 5963 * A pacing timer (slot) is being set, in 5964 * such a case we cannot send (we are blocked by 5965 * the timer). So lets tell LRO that it should not 5966 * wake us unless there is a SACK. Note this only 5967 * will be effective if mbuf queueing is on or 5968 * compressed acks are being processed. 5969 */ 5970 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5971 /* 5972 * But wait if we have a Rack timer running 5973 * even a SACK should not disturb us (with 5974 * the exception of r_rr_config 3). 5975 */ 5976 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5977 (rack->r_rr_config != 3)) 5978 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5979 if (rack->rc_ack_can_sendout_data) { 5980 /* 5981 * Ahh but wait, this is that special case 5982 * where the pacing timer can be disturbed 5983 * backout the changes (used for non-paced 5984 * burst limiting). 5985 */ 5986 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5987 } 5988 if ((rack->use_rack_rr) && 5989 (rack->r_rr_config < 2) && 5990 ((hpts_timeout) && (hpts_timeout < slot))) { 5991 /* 5992 * Arrange for the hpts to kick back in after the 5993 * t-o if the t-o does not cause a send. 5994 */ 5995 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5996 __LINE__, &diag); 5997 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5998 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5999 } else { 6000 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 6001 __LINE__, &diag); 6002 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6003 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 6004 } 6005 } else if (hpts_timeout) { 6006 /* 6007 * With respect to inp_flags2 here, lets let any new acks wake 6008 * us up here. Since we are not pacing (no pacing timer), output 6009 * can happen so we should let it. If its a Rack timer, then any inbound 6010 * packet probably won't change the sending (we will be blocked) 6011 * but it may change the prr stats so letting it in (the set defaults 6012 * at the start of this block) are good enough. 6013 */ 6014 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 6015 __LINE__, &diag); 6016 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 6017 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 6018 } else { 6019 /* No timer starting */ 6020 #ifdef INVARIANTS 6021 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 6022 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 6023 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 6024 } 6025 #endif 6026 } 6027 rack->rc_tmr_stopped = 0; 6028 if (slot) 6029 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 6030 } 6031 6032 /* 6033 * RACK Timer, here we simply do logging and house keeping. 6034 * the normal rack_output() function will call the 6035 * appropriate thing to check if we need to do a RACK retransmit. 6036 * We return 1, saying don't proceed with rack_output only 6037 * when all timers have been stopped (destroyed PCB?). 6038 */ 6039 static int 6040 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6041 { 6042 /* 6043 * This timer simply provides an internal trigger to send out data. 6044 * The check_recovery_mode call will see if there are needed 6045 * retransmissions, if so we will enter fast-recovery. The output 6046 * call may or may not do the same thing depending on sysctl 6047 * settings. 6048 */ 6049 struct rack_sendmap *rsm; 6050 6051 if (tp->t_timers->tt_flags & TT_STOPPED) { 6052 return (1); 6053 } 6054 counter_u64_add(rack_to_tot, 1); 6055 if (rack->r_state && (rack->r_state != tp->t_state)) 6056 rack_set_state(tp, rack); 6057 rack->rc_on_min_to = 0; 6058 rsm = rack_check_recovery_mode(tp, cts); 6059 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 6060 if (rsm) { 6061 rack->r_ctl.rc_resend = rsm; 6062 rack->r_timer_override = 1; 6063 if (rack->use_rack_rr) { 6064 /* 6065 * Don't accumulate extra pacing delay 6066 * we are allowing the rack timer to 6067 * over-ride pacing i.e. rrr takes precedence 6068 * if the pacing interval is longer than the rrr 6069 * time (in other words we get the min pacing 6070 * time versus rrr pacing time). 6071 */ 6072 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6073 } 6074 } 6075 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 6076 if (rsm == NULL) { 6077 /* restart a timer and return 1 */ 6078 rack_start_hpts_timer(rack, tp, cts, 6079 0, 0, 0); 6080 return (1); 6081 } 6082 return (0); 6083 } 6084 6085 static void 6086 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 6087 { 6088 if (rsm->m->m_len > rsm->orig_m_len) { 6089 /* 6090 * Mbuf grew, caused by sbcompress, our offset does 6091 * not change. 6092 */ 6093 rsm->orig_m_len = rsm->m->m_len; 6094 } else if (rsm->m->m_len < rsm->orig_m_len) { 6095 /* 6096 * Mbuf shrank, trimmed off the top by an ack, our 6097 * offset changes. 6098 */ 6099 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6100 rsm->orig_m_len = rsm->m->m_len; 6101 } 6102 } 6103 6104 static void 6105 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6106 { 6107 struct mbuf *m; 6108 uint32_t soff; 6109 6110 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 6111 /* Fix up the orig_m_len and possibly the mbuf offset */ 6112 rack_adjust_orig_mlen(src_rsm); 6113 } 6114 m = src_rsm->m; 6115 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6116 while (soff >= m->m_len) { 6117 /* Move out past this mbuf */ 6118 soff -= m->m_len; 6119 m = m->m_next; 6120 KASSERT((m != NULL), 6121 ("rsm:%p nrsm:%p hit at soff:%u null m", 6122 src_rsm, rsm, soff)); 6123 } 6124 rsm->m = m; 6125 rsm->soff = soff; 6126 rsm->orig_m_len = m->m_len; 6127 } 6128 6129 static __inline void 6130 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 6131 struct rack_sendmap *rsm, uint32_t start) 6132 { 6133 int idx; 6134 6135 nrsm->r_start = start; 6136 nrsm->r_end = rsm->r_end; 6137 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6138 nrsm->r_flags = rsm->r_flags; 6139 nrsm->r_dupack = rsm->r_dupack; 6140 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6141 nrsm->r_rtr_bytes = 0; 6142 rsm->r_end = nrsm->r_start; 6143 nrsm->r_just_ret = rsm->r_just_ret; 6144 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6145 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6146 } 6147 /* Now if we have SYN flag we keep it on the left edge */ 6148 if (nrsm->r_flags & RACK_HAS_SYN) 6149 nrsm->r_flags &= ~RACK_HAS_SYN; 6150 /* Now if we have a FIN flag we keep it on the right edge */ 6151 if (rsm->r_flags & RACK_HAS_FIN) 6152 rsm->r_flags &= ~RACK_HAS_FIN; 6153 /* Push bit must go to the right edge as well */ 6154 if (rsm->r_flags & RACK_HAD_PUSH) 6155 rsm->r_flags &= ~RACK_HAD_PUSH; 6156 /* Clone over the state of the hw_tls flag */ 6157 nrsm->r_hw_tls = rsm->r_hw_tls; 6158 /* 6159 * Now we need to find nrsm's new location in the mbuf chain 6160 * we basically calculate a new offset, which is soff + 6161 * how much is left in original rsm. Then we walk out the mbuf 6162 * chain to find the righ postion, it may be the same mbuf 6163 * or maybe not. 6164 */ 6165 KASSERT(((rsm->m != NULL) || 6166 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6167 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6168 if (rsm->m) 6169 rack_setup_offset_for_rsm(rsm, nrsm); 6170 } 6171 6172 static struct rack_sendmap * 6173 rack_merge_rsm(struct tcp_rack *rack, 6174 struct rack_sendmap *l_rsm, 6175 struct rack_sendmap *r_rsm) 6176 { 6177 /* 6178 * We are merging two ack'd RSM's, 6179 * the l_rsm is on the left (lower seq 6180 * values) and the r_rsm is on the right 6181 * (higher seq value). The simplest way 6182 * to merge these is to move the right 6183 * one into the left. I don't think there 6184 * is any reason we need to try to find 6185 * the oldest (or last oldest retransmitted). 6186 */ 6187 struct rack_sendmap *rm; 6188 6189 rack_log_map_chg(rack->rc_tp, rack, NULL, 6190 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6191 l_rsm->r_end = r_rsm->r_end; 6192 if (l_rsm->r_dupack < r_rsm->r_dupack) 6193 l_rsm->r_dupack = r_rsm->r_dupack; 6194 if (r_rsm->r_rtr_bytes) 6195 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6196 if (r_rsm->r_in_tmap) { 6197 /* This really should not happen */ 6198 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6199 r_rsm->r_in_tmap = 0; 6200 } 6201 6202 /* Now the flags */ 6203 if (r_rsm->r_flags & RACK_HAS_FIN) 6204 l_rsm->r_flags |= RACK_HAS_FIN; 6205 if (r_rsm->r_flags & RACK_TLP) 6206 l_rsm->r_flags |= RACK_TLP; 6207 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6208 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6209 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6210 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6211 /* 6212 * If both are app-limited then let the 6213 * free lower the count. If right is app 6214 * limited and left is not, transfer. 6215 */ 6216 l_rsm->r_flags |= RACK_APP_LIMITED; 6217 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6218 if (r_rsm == rack->r_ctl.rc_first_appl) 6219 rack->r_ctl.rc_first_appl = l_rsm; 6220 } 6221 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6222 #ifdef INVARIANTS 6223 if (rm != r_rsm) { 6224 panic("removing head in rack:%p rsm:%p rm:%p", 6225 rack, r_rsm, rm); 6226 } 6227 #endif 6228 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6229 /* Transfer the split limit to the map we free */ 6230 r_rsm->r_limit_type = l_rsm->r_limit_type; 6231 l_rsm->r_limit_type = 0; 6232 } 6233 rack_free(rack, r_rsm); 6234 return (l_rsm); 6235 } 6236 6237 /* 6238 * TLP Timer, here we simply setup what segment we want to 6239 * have the TLP expire on, the normal rack_output() will then 6240 * send it out. 6241 * 6242 * We return 1, saying don't proceed with rack_output only 6243 * when all timers have been stopped (destroyed PCB?). 6244 */ 6245 static int 6246 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6247 { 6248 /* 6249 * Tail Loss Probe. 6250 */ 6251 struct rack_sendmap *rsm = NULL; 6252 struct rack_sendmap *insret; 6253 struct socket *so; 6254 uint32_t amm; 6255 uint32_t out, avail; 6256 int collapsed_win = 0; 6257 6258 if (tp->t_timers->tt_flags & TT_STOPPED) { 6259 return (1); 6260 } 6261 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6262 /* Its not time yet */ 6263 return (0); 6264 } 6265 if (ctf_progress_timeout_check(tp, true)) { 6266 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6267 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6268 return (1); 6269 } 6270 /* 6271 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6272 * need to figure out how to force a full MSS segment out. 6273 */ 6274 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6275 rack->r_ctl.retran_during_recovery = 0; 6276 rack->r_ctl.dsack_byte_cnt = 0; 6277 counter_u64_add(rack_tlp_tot, 1); 6278 if (rack->r_state && (rack->r_state != tp->t_state)) 6279 rack_set_state(tp, rack); 6280 so = tp->t_inpcb->inp_socket; 6281 avail = sbavail(&so->so_snd); 6282 out = tp->snd_max - tp->snd_una; 6283 if (out > tp->snd_wnd) { 6284 /* special case, we need a retransmission */ 6285 collapsed_win = 1; 6286 goto need_retran; 6287 } 6288 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6289 rack->r_ctl.dsack_persist--; 6290 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6291 rack->r_ctl.num_dsack = 0; 6292 } 6293 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6294 } 6295 if ((tp->t_flags & TF_GPUTINPROG) && 6296 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6297 /* 6298 * If this is the second in a row 6299 * TLP and we are doing a measurement 6300 * its time to abandon the measurement. 6301 * Something is likely broken on 6302 * the clients network and measuring a 6303 * broken network does us no good. 6304 */ 6305 tp->t_flags &= ~TF_GPUTINPROG; 6306 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6307 rack->r_ctl.rc_gp_srtt /*flex1*/, 6308 tp->gput_seq, 6309 0, 0, 18, __LINE__, NULL, 0); 6310 } 6311 /* 6312 * Check our send oldest always settings, and if 6313 * there is an oldest to send jump to the need_retran. 6314 */ 6315 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6316 goto need_retran; 6317 6318 if (avail > out) { 6319 /* New data is available */ 6320 amm = avail - out; 6321 if (amm > ctf_fixed_maxseg(tp)) { 6322 amm = ctf_fixed_maxseg(tp); 6323 if ((amm + out) > tp->snd_wnd) { 6324 /* We are rwnd limited */ 6325 goto need_retran; 6326 } 6327 } else if (amm < ctf_fixed_maxseg(tp)) { 6328 /* not enough to fill a MTU */ 6329 goto need_retran; 6330 } 6331 if (IN_FASTRECOVERY(tp->t_flags)) { 6332 /* Unlikely */ 6333 if (rack->rack_no_prr == 0) { 6334 if (out + amm <= tp->snd_wnd) { 6335 rack->r_ctl.rc_prr_sndcnt = amm; 6336 rack->r_ctl.rc_tlp_new_data = amm; 6337 rack_log_to_prr(rack, 4, 0); 6338 } 6339 } else 6340 goto need_retran; 6341 } else { 6342 /* Set the send-new override */ 6343 if (out + amm <= tp->snd_wnd) 6344 rack->r_ctl.rc_tlp_new_data = amm; 6345 else 6346 goto need_retran; 6347 } 6348 rack->r_ctl.rc_tlpsend = NULL; 6349 counter_u64_add(rack_tlp_newdata, 1); 6350 goto send; 6351 } 6352 need_retran: 6353 /* 6354 * Ok we need to arrange the last un-acked segment to be re-sent, or 6355 * optionally the first un-acked segment. 6356 */ 6357 if (collapsed_win == 0) { 6358 if (rack_always_send_oldest) 6359 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6360 else { 6361 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6362 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6363 rsm = rack_find_high_nonack(rack, rsm); 6364 } 6365 } 6366 if (rsm == NULL) { 6367 counter_u64_add(rack_tlp_does_nada, 1); 6368 #ifdef TCP_BLACKBOX 6369 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6370 #endif 6371 goto out; 6372 } 6373 } else { 6374 /* 6375 * We must find the last segment 6376 * that was acceptable by the client. 6377 */ 6378 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6379 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6380 /* Found one */ 6381 break; 6382 } 6383 } 6384 if (rsm == NULL) { 6385 /* None? if so send the first */ 6386 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6387 if (rsm == NULL) { 6388 counter_u64_add(rack_tlp_does_nada, 1); 6389 #ifdef TCP_BLACKBOX 6390 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6391 #endif 6392 goto out; 6393 } 6394 } 6395 } 6396 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6397 /* 6398 * We need to split this the last segment in two. 6399 */ 6400 struct rack_sendmap *nrsm; 6401 6402 nrsm = rack_alloc_full_limit(rack); 6403 if (nrsm == NULL) { 6404 /* 6405 * No memory to split, we will just exit and punt 6406 * off to the RXT timer. 6407 */ 6408 counter_u64_add(rack_tlp_does_nada, 1); 6409 goto out; 6410 } 6411 rack_clone_rsm(rack, nrsm, rsm, 6412 (rsm->r_end - ctf_fixed_maxseg(tp))); 6413 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6414 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6415 #ifdef INVARIANTS 6416 if (insret != NULL) { 6417 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6418 nrsm, insret, rack, rsm); 6419 } 6420 #endif 6421 if (rsm->r_in_tmap) { 6422 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6423 nrsm->r_in_tmap = 1; 6424 } 6425 rsm = nrsm; 6426 } 6427 rack->r_ctl.rc_tlpsend = rsm; 6428 send: 6429 /* Make sure output path knows we are doing a TLP */ 6430 *doing_tlp = 1; 6431 rack->r_timer_override = 1; 6432 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6433 return (0); 6434 out: 6435 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6436 return (0); 6437 } 6438 6439 /* 6440 * Delayed ack Timer, here we simply need to setup the 6441 * ACK_NOW flag and remove the DELACK flag. From there 6442 * the output routine will send the ack out. 6443 * 6444 * We only return 1, saying don't proceed, if all timers 6445 * are stopped (destroyed PCB?). 6446 */ 6447 static int 6448 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6449 { 6450 if (tp->t_timers->tt_flags & TT_STOPPED) { 6451 return (1); 6452 } 6453 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6454 tp->t_flags &= ~TF_DELACK; 6455 tp->t_flags |= TF_ACKNOW; 6456 KMOD_TCPSTAT_INC(tcps_delack); 6457 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6458 return (0); 6459 } 6460 6461 /* 6462 * Persists timer, here we simply send the 6463 * same thing as a keepalive will. 6464 * the one byte send. 6465 * 6466 * We only return 1, saying don't proceed, if all timers 6467 * are stopped (destroyed PCB?). 6468 */ 6469 static int 6470 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6471 { 6472 struct tcptemp *t_template; 6473 struct inpcb *inp; 6474 int32_t retval = 1; 6475 6476 inp = tp->t_inpcb; 6477 6478 if (tp->t_timers->tt_flags & TT_STOPPED) { 6479 return (1); 6480 } 6481 if (rack->rc_in_persist == 0) 6482 return (0); 6483 if (ctf_progress_timeout_check(tp, false)) { 6484 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6485 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6486 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6487 return (1); 6488 } 6489 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6490 /* 6491 * Persistence timer into zero window. Force a byte to be output, if 6492 * possible. 6493 */ 6494 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6495 /* 6496 * Hack: if the peer is dead/unreachable, we do not time out if the 6497 * window is closed. After a full backoff, drop the connection if 6498 * the idle time (no responses to probes) reaches the maximum 6499 * backoff that we would use if retransmitting. 6500 */ 6501 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6502 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6503 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6504 KMOD_TCPSTAT_INC(tcps_persistdrop); 6505 retval = 1; 6506 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6507 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6508 goto out; 6509 } 6510 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6511 tp->snd_una == tp->snd_max) 6512 rack_exit_persist(tp, rack, cts); 6513 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6514 /* 6515 * If the user has closed the socket then drop a persisting 6516 * connection after a much reduced timeout. 6517 */ 6518 if (tp->t_state > TCPS_CLOSE_WAIT && 6519 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6520 retval = 1; 6521 KMOD_TCPSTAT_INC(tcps_persistdrop); 6522 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6523 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6524 goto out; 6525 } 6526 t_template = tcpip_maketemplate(rack->rc_inp); 6527 if (t_template) { 6528 /* only set it if we were answered */ 6529 if (rack->forced_ack == 0) { 6530 rack->forced_ack = 1; 6531 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6532 } 6533 tcp_respond(tp, t_template->tt_ipgen, 6534 &t_template->tt_t, (struct mbuf *)NULL, 6535 tp->rcv_nxt, tp->snd_una - 1, 0); 6536 /* This sends an ack */ 6537 if (tp->t_flags & TF_DELACK) 6538 tp->t_flags &= ~TF_DELACK; 6539 free(t_template, M_TEMP); 6540 } 6541 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6542 tp->t_rxtshift++; 6543 out: 6544 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6545 rack_start_hpts_timer(rack, tp, cts, 6546 0, 0, 0); 6547 return (retval); 6548 } 6549 6550 /* 6551 * If a keepalive goes off, we had no other timers 6552 * happening. We always return 1 here since this 6553 * routine either drops the connection or sends 6554 * out a segment with respond. 6555 */ 6556 static int 6557 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6558 { 6559 struct tcptemp *t_template; 6560 struct inpcb *inp; 6561 6562 if (tp->t_timers->tt_flags & TT_STOPPED) { 6563 return (1); 6564 } 6565 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6566 inp = tp->t_inpcb; 6567 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6568 /* 6569 * Keep-alive timer went off; send something or drop connection if 6570 * idle for too long. 6571 */ 6572 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6573 if (tp->t_state < TCPS_ESTABLISHED) 6574 goto dropit; 6575 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6576 tp->t_state <= TCPS_CLOSING) { 6577 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6578 goto dropit; 6579 /* 6580 * Send a packet designed to force a response if the peer is 6581 * up and reachable: either an ACK if the connection is 6582 * still alive, or an RST if the peer has closed the 6583 * connection due to timeout or reboot. Using sequence 6584 * number tp->snd_una-1 causes the transmitted zero-length 6585 * segment to lie outside the receive window; by the 6586 * protocol spec, this requires the correspondent TCP to 6587 * respond. 6588 */ 6589 KMOD_TCPSTAT_INC(tcps_keepprobe); 6590 t_template = tcpip_maketemplate(inp); 6591 if (t_template) { 6592 if (rack->forced_ack == 0) { 6593 rack->forced_ack = 1; 6594 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6595 } 6596 tcp_respond(tp, t_template->tt_ipgen, 6597 &t_template->tt_t, (struct mbuf *)NULL, 6598 tp->rcv_nxt, tp->snd_una - 1, 0); 6599 free(t_template, M_TEMP); 6600 } 6601 } 6602 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6603 return (1); 6604 dropit: 6605 KMOD_TCPSTAT_INC(tcps_keepdrops); 6606 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6607 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6608 return (1); 6609 } 6610 6611 /* 6612 * Retransmit helper function, clear up all the ack 6613 * flags and take care of important book keeping. 6614 */ 6615 static void 6616 rack_remxt_tmr(struct tcpcb *tp) 6617 { 6618 /* 6619 * The retransmit timer went off, all sack'd blocks must be 6620 * un-acked. 6621 */ 6622 struct rack_sendmap *rsm, *trsm = NULL; 6623 struct tcp_rack *rack; 6624 6625 rack = (struct tcp_rack *)tp->t_fb_ptr; 6626 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6627 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6628 if (rack->r_state && (rack->r_state != tp->t_state)) 6629 rack_set_state(tp, rack); 6630 /* 6631 * Ideally we would like to be able to 6632 * mark SACK-PASS on anything not acked here. 6633 * 6634 * However, if we do that we would burst out 6635 * all that data 1ms apart. This would be unwise, 6636 * so for now we will just let the normal rxt timer 6637 * and tlp timer take care of it. 6638 * 6639 * Also we really need to stick them back in sequence 6640 * order. This way we send in the proper order and any 6641 * sacks that come floating in will "re-ack" the data. 6642 * To do this we zap the tmap with an INIT and then 6643 * walk through and place every rsm in the RB tree 6644 * back in its seq ordered place. 6645 */ 6646 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6647 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6648 rsm->r_dupack = 0; 6649 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6650 /* We must re-add it back to the tlist */ 6651 if (trsm == NULL) { 6652 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6653 } else { 6654 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6655 } 6656 rsm->r_in_tmap = 1; 6657 trsm = rsm; 6658 if (rsm->r_flags & RACK_ACKED) 6659 rsm->r_flags |= RACK_WAS_ACKED; 6660 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6661 } 6662 /* Clear the count (we just un-acked them) */ 6663 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6664 rack->r_ctl.rc_sacked = 0; 6665 rack->r_ctl.rc_sacklast = NULL; 6666 rack->r_ctl.rc_agg_delayed = 0; 6667 rack->r_early = 0; 6668 rack->r_ctl.rc_agg_early = 0; 6669 rack->r_late = 0; 6670 /* Clear the tlp rtx mark */ 6671 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6672 if (rack->r_ctl.rc_resend != NULL) 6673 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6674 rack->r_ctl.rc_prr_sndcnt = 0; 6675 rack_log_to_prr(rack, 6, 0); 6676 rack->r_timer_override = 1; 6677 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6678 #ifdef NETFLIX_EXP_DETECTION 6679 || (rack->sack_attack_disable != 0) 6680 #endif 6681 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6682 /* 6683 * For non-sack customers new data 6684 * needs to go out as retransmits until 6685 * we retransmit up to snd_max. 6686 */ 6687 rack->r_must_retran = 1; 6688 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6689 rack->r_ctl.rc_sacked); 6690 } 6691 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6692 } 6693 6694 static void 6695 rack_convert_rtts(struct tcpcb *tp) 6696 { 6697 if (tp->t_srtt > 1) { 6698 uint32_t val, frac; 6699 6700 val = tp->t_srtt >> TCP_RTT_SHIFT; 6701 frac = tp->t_srtt & 0x1f; 6702 tp->t_srtt = TICKS_2_USEC(val); 6703 /* 6704 * frac is the fractional part of the srtt (if any) 6705 * but its in ticks and every bit represents 6706 * 1/32nd of a hz. 6707 */ 6708 if (frac) { 6709 if (hz == 1000) { 6710 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6711 } else { 6712 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6713 } 6714 tp->t_srtt += frac; 6715 } 6716 } 6717 if (tp->t_rttvar) { 6718 uint32_t val, frac; 6719 6720 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6721 frac = tp->t_rttvar & 0x1f; 6722 tp->t_rttvar = TICKS_2_USEC(val); 6723 /* 6724 * frac is the fractional part of the srtt (if any) 6725 * but its in ticks and every bit represents 6726 * 1/32nd of a hz. 6727 */ 6728 if (frac) { 6729 if (hz == 1000) { 6730 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6731 } else { 6732 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6733 } 6734 tp->t_rttvar += frac; 6735 } 6736 } 6737 tp->t_rxtcur = RACK_REXMTVAL(tp); 6738 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6739 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6740 } 6741 if (tp->t_rxtcur > rack_rto_max) { 6742 tp->t_rxtcur = rack_rto_max; 6743 } 6744 } 6745 6746 static void 6747 rack_cc_conn_init(struct tcpcb *tp) 6748 { 6749 struct tcp_rack *rack; 6750 uint32_t srtt; 6751 6752 rack = (struct tcp_rack *)tp->t_fb_ptr; 6753 srtt = tp->t_srtt; 6754 cc_conn_init(tp); 6755 /* 6756 * Now convert to rack's internal format, 6757 * if required. 6758 */ 6759 if ((srtt == 0) && (tp->t_srtt != 0)) 6760 rack_convert_rtts(tp); 6761 /* 6762 * We want a chance to stay in slowstart as 6763 * we create a connection. TCP spec says that 6764 * initially ssthresh is infinite. For our 6765 * purposes that is the snd_wnd. 6766 */ 6767 if (tp->snd_ssthresh < tp->snd_wnd) { 6768 tp->snd_ssthresh = tp->snd_wnd; 6769 } 6770 /* 6771 * We also want to assure a IW worth of 6772 * data can get inflight. 6773 */ 6774 if (rc_init_window(rack) < tp->snd_cwnd) 6775 tp->snd_cwnd = rc_init_window(rack); 6776 } 6777 6778 /* 6779 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6780 * we will setup to retransmit the lowest seq number outstanding. 6781 */ 6782 static int 6783 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6784 { 6785 int32_t rexmt; 6786 struct inpcb *inp; 6787 int32_t retval = 0; 6788 bool isipv6; 6789 6790 inp = tp->t_inpcb; 6791 if (tp->t_timers->tt_flags & TT_STOPPED) { 6792 return (1); 6793 } 6794 if ((tp->t_flags & TF_GPUTINPROG) && 6795 (tp->t_rxtshift)) { 6796 /* 6797 * We have had a second timeout 6798 * measurements on successive rxt's are not profitable. 6799 * It is unlikely to be of any use (the network is 6800 * broken or the client went away). 6801 */ 6802 tp->t_flags &= ~TF_GPUTINPROG; 6803 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6804 rack->r_ctl.rc_gp_srtt /*flex1*/, 6805 tp->gput_seq, 6806 0, 0, 18, __LINE__, NULL, 0); 6807 } 6808 if (ctf_progress_timeout_check(tp, false)) { 6809 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6810 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6811 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6812 return (1); 6813 } 6814 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6815 rack->r_ctl.retran_during_recovery = 0; 6816 rack->r_ctl.dsack_byte_cnt = 0; 6817 if (IN_FASTRECOVERY(tp->t_flags)) 6818 tp->t_flags |= TF_WASFRECOVERY; 6819 else 6820 tp->t_flags &= ~TF_WASFRECOVERY; 6821 if (IN_CONGRECOVERY(tp->t_flags)) 6822 tp->t_flags |= TF_WASCRECOVERY; 6823 else 6824 tp->t_flags &= ~TF_WASCRECOVERY; 6825 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6826 (tp->snd_una == tp->snd_max)) { 6827 /* Nothing outstanding .. nothing to do */ 6828 return (0); 6829 } 6830 if (rack->r_ctl.dsack_persist) { 6831 rack->r_ctl.dsack_persist--; 6832 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6833 rack->r_ctl.num_dsack = 0; 6834 } 6835 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6836 } 6837 /* 6838 * Rack can only run one timer at a time, so we cannot 6839 * run a KEEPINIT (gating SYN sending) and a retransmit 6840 * timer for the SYN. So if we are in a front state and 6841 * have a KEEPINIT timer we need to check the first transmit 6842 * against now to see if we have exceeded the KEEPINIT time 6843 * (if one is set). 6844 */ 6845 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6846 (TP_KEEPINIT(tp) != 0)) { 6847 struct rack_sendmap *rsm; 6848 6849 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6850 if (rsm) { 6851 /* Ok we have something outstanding to test keepinit with */ 6852 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6853 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6854 /* We have exceeded the KEEPINIT time */ 6855 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6856 goto drop_it; 6857 } 6858 } 6859 } 6860 /* 6861 * Retransmission timer went off. Message has not been acked within 6862 * retransmit interval. Back off to a longer retransmit interval 6863 * and retransmit one segment. 6864 */ 6865 rack_remxt_tmr(tp); 6866 if ((rack->r_ctl.rc_resend == NULL) || 6867 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6868 /* 6869 * If the rwnd collapsed on 6870 * the one we are retransmitting 6871 * it does not count against the 6872 * rxt count. 6873 */ 6874 tp->t_rxtshift++; 6875 } 6876 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6877 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6878 drop_it: 6879 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6880 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6881 retval = 1; 6882 tcp_set_inp_to_drop(rack->rc_inp, 6883 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 6884 goto out; 6885 } 6886 if (tp->t_state == TCPS_SYN_SENT) { 6887 /* 6888 * If the SYN was retransmitted, indicate CWND to be limited 6889 * to 1 segment in cc_conn_init(). 6890 */ 6891 tp->snd_cwnd = 1; 6892 } else if (tp->t_rxtshift == 1) { 6893 /* 6894 * first retransmit; record ssthresh and cwnd so they can be 6895 * recovered if this turns out to be a "bad" retransmit. A 6896 * retransmit is considered "bad" if an ACK for this segment 6897 * is received within RTT/2 interval; the assumption here is 6898 * that the ACK was already in flight. See "On Estimating 6899 * End-to-End Network Path Properties" by Allman and Paxson 6900 * for more details. 6901 */ 6902 tp->snd_cwnd_prev = tp->snd_cwnd; 6903 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6904 tp->snd_recover_prev = tp->snd_recover; 6905 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6906 tp->t_flags |= TF_PREVVALID; 6907 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6908 tp->t_flags &= ~TF_PREVVALID; 6909 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6910 if ((tp->t_state == TCPS_SYN_SENT) || 6911 (tp->t_state == TCPS_SYN_RECEIVED)) 6912 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6913 else 6914 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6915 6916 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6917 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6918 /* 6919 * We enter the path for PLMTUD if connection is established or, if 6920 * connection is FIN_WAIT_1 status, reason for the last is that if 6921 * amount of data we send is very small, we could send it in couple 6922 * of packets and process straight to FIN. In that case we won't 6923 * catch ESTABLISHED state. 6924 */ 6925 #ifdef INET6 6926 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6927 #else 6928 isipv6 = false; 6929 #endif 6930 if (((V_tcp_pmtud_blackhole_detect == 1) || 6931 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6932 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6933 ((tp->t_state == TCPS_ESTABLISHED) || 6934 (tp->t_state == TCPS_FIN_WAIT_1))) { 6935 /* 6936 * Idea here is that at each stage of mtu probe (usually, 6937 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6938 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6939 * should take care of that. 6940 */ 6941 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6942 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6943 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6944 tp->t_rxtshift % 2 == 0)) { 6945 /* 6946 * Enter Path MTU Black-hole Detection mechanism: - 6947 * Disable Path MTU Discovery (IP "DF" bit). - 6948 * Reduce MTU to lower value than what we negotiated 6949 * with peer. 6950 */ 6951 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6952 /* Record that we may have found a black hole. */ 6953 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6954 /* Keep track of previous MSS. */ 6955 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6956 } 6957 6958 /* 6959 * Reduce the MSS to blackhole value or to the 6960 * default in an attempt to retransmit. 6961 */ 6962 #ifdef INET6 6963 if (isipv6 && 6964 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6965 /* Use the sysctl tuneable blackhole MSS. */ 6966 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6967 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6968 } else if (isipv6) { 6969 /* Use the default MSS. */ 6970 tp->t_maxseg = V_tcp_v6mssdflt; 6971 /* 6972 * Disable Path MTU Discovery when we switch 6973 * to minmss. 6974 */ 6975 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6976 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6977 } 6978 #endif 6979 #if defined(INET6) && defined(INET) 6980 else 6981 #endif 6982 #ifdef INET 6983 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6984 /* Use the sysctl tuneable blackhole MSS. */ 6985 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6986 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6987 } else { 6988 /* Use the default MSS. */ 6989 tp->t_maxseg = V_tcp_mssdflt; 6990 /* 6991 * Disable Path MTU Discovery when we switch 6992 * to minmss. 6993 */ 6994 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6995 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6996 } 6997 #endif 6998 } else { 6999 /* 7000 * If further retransmissions are still unsuccessful 7001 * with a lowered MTU, maybe this isn't a blackhole 7002 * and we restore the previous MSS and blackhole 7003 * detection flags. The limit '6' is determined by 7004 * giving each probe stage (1448, 1188, 524) 2 7005 * chances to recover. 7006 */ 7007 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 7008 (tp->t_rxtshift >= 6)) { 7009 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 7010 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 7011 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 7012 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 7013 } 7014 } 7015 } 7016 /* 7017 * Disable RFC1323 and SACK if we haven't got any response to 7018 * our third SYN to work-around some broken terminal servers 7019 * (most of which have hopefully been retired) that have bad VJ 7020 * header compression code which trashes TCP segments containing 7021 * unknown-to-them TCP options. 7022 */ 7023 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 7024 (tp->t_rxtshift == 3)) 7025 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 7026 /* 7027 * If we backed off this far, our srtt estimate is probably bogus. 7028 * Clobber it so we'll take the next rtt measurement as our srtt; 7029 * move the current srtt into rttvar to keep the current retransmit 7030 * times until then. 7031 */ 7032 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 7033 #ifdef INET6 7034 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 7035 in6_losing(tp->t_inpcb); 7036 else 7037 #endif 7038 in_losing(tp->t_inpcb); 7039 tp->t_rttvar += tp->t_srtt; 7040 tp->t_srtt = 0; 7041 } 7042 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 7043 tp->snd_recover = tp->snd_max; 7044 tp->t_flags |= TF_ACKNOW; 7045 tp->t_rtttime = 0; 7046 rack_cong_signal(tp, CC_RTO, tp->snd_una); 7047 out: 7048 return (retval); 7049 } 7050 7051 static int 7052 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 7053 { 7054 int32_t ret = 0; 7055 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 7056 7057 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 7058 (tp->t_flags & TF_GPUTINPROG)) { 7059 /* 7060 * We have a goodput in progress 7061 * and we have entered a late state. 7062 * Do we have enough data in the sb 7063 * to handle the GPUT request? 7064 */ 7065 uint32_t bytes; 7066 7067 bytes = tp->gput_ack - tp->gput_seq; 7068 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 7069 bytes += tp->gput_seq - tp->snd_una; 7070 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 7071 /* 7072 * There are not enough bytes in the socket 7073 * buffer that have been sent to cover this 7074 * measurement. Cancel it. 7075 */ 7076 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 7077 rack->r_ctl.rc_gp_srtt /*flex1*/, 7078 tp->gput_seq, 7079 0, 0, 18, __LINE__, NULL, 0); 7080 tp->t_flags &= ~TF_GPUTINPROG; 7081 } 7082 } 7083 if (timers == 0) { 7084 return (0); 7085 } 7086 if (tp->t_state == TCPS_LISTEN) { 7087 /* no timers on listen sockets */ 7088 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 7089 return (0); 7090 return (1); 7091 } 7092 if ((timers & PACE_TMR_RACK) && 7093 rack->rc_on_min_to) { 7094 /* 7095 * For the rack timer when we 7096 * are on a min-timeout (which means rrr_conf = 3) 7097 * we don't want to check the timer. It may 7098 * be going off for a pace and thats ok we 7099 * want to send the retransmit (if its ready). 7100 * 7101 * If its on a normal rack timer (non-min) then 7102 * we will check if its expired. 7103 */ 7104 goto skip_time_check; 7105 } 7106 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 7107 uint32_t left; 7108 7109 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 7110 ret = -1; 7111 rack_log_to_processing(rack, cts, ret, 0); 7112 return (0); 7113 } 7114 if (hpts_calling == 0) { 7115 /* 7116 * A user send or queued mbuf (sack) has called us? We 7117 * return 0 and let the pacing guards 7118 * deal with it if they should or 7119 * should not cause a send. 7120 */ 7121 ret = -2; 7122 rack_log_to_processing(rack, cts, ret, 0); 7123 return (0); 7124 } 7125 /* 7126 * Ok our timer went off early and we are not paced false 7127 * alarm, go back to sleep. 7128 */ 7129 ret = -3; 7130 left = rack->r_ctl.rc_timer_exp - cts; 7131 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 7132 rack_log_to_processing(rack, cts, ret, left); 7133 return (1); 7134 } 7135 skip_time_check: 7136 rack->rc_tmr_stopped = 0; 7137 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 7138 if (timers & PACE_TMR_DELACK) { 7139 ret = rack_timeout_delack(tp, rack, cts); 7140 } else if (timers & PACE_TMR_RACK) { 7141 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7142 rack->r_fast_output = 0; 7143 ret = rack_timeout_rack(tp, rack, cts); 7144 } else if (timers & PACE_TMR_TLP) { 7145 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7146 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 7147 } else if (timers & PACE_TMR_RXT) { 7148 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7149 rack->r_fast_output = 0; 7150 ret = rack_timeout_rxt(tp, rack, cts); 7151 } else if (timers & PACE_TMR_PERSIT) { 7152 ret = rack_timeout_persist(tp, rack, cts); 7153 } else if (timers & PACE_TMR_KEEP) { 7154 ret = rack_timeout_keepalive(tp, rack, cts); 7155 } 7156 rack_log_to_processing(rack, cts, ret, timers); 7157 return (ret); 7158 } 7159 7160 static void 7161 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 7162 { 7163 struct timeval tv; 7164 uint32_t us_cts, flags_on_entry; 7165 uint8_t hpts_removed = 0; 7166 7167 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7168 us_cts = tcp_get_usecs(&tv); 7169 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7170 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7171 ((tp->snd_max - tp->snd_una) == 0))) { 7172 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7173 hpts_removed = 1; 7174 /* If we were not delayed cancel out the flag. */ 7175 if ((tp->snd_max - tp->snd_una) == 0) 7176 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7177 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7178 } 7179 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7180 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7181 if (rack->rc_inp->inp_in_hpts && 7182 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7183 /* 7184 * Canceling timer's when we have no output being 7185 * paced. We also must remove ourselves from the 7186 * hpts. 7187 */ 7188 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7189 hpts_removed = 1; 7190 } 7191 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7192 } 7193 if (hpts_removed == 0) 7194 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7195 } 7196 7197 static void 7198 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7199 { 7200 return; 7201 } 7202 7203 static int 7204 rack_stopall(struct tcpcb *tp) 7205 { 7206 struct tcp_rack *rack; 7207 rack = (struct tcp_rack *)tp->t_fb_ptr; 7208 rack->t_timers_stopped = 1; 7209 return (0); 7210 } 7211 7212 static void 7213 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7214 { 7215 return; 7216 } 7217 7218 static int 7219 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7220 { 7221 return (0); 7222 } 7223 7224 static void 7225 rack_stop_all_timers(struct tcpcb *tp) 7226 { 7227 struct tcp_rack *rack; 7228 7229 /* 7230 * Assure no timers are running. 7231 */ 7232 if (tcp_timer_active(tp, TT_PERSIST)) { 7233 /* We enter in persists, set the flag appropriately */ 7234 rack = (struct tcp_rack *)tp->t_fb_ptr; 7235 rack->rc_in_persist = 1; 7236 } 7237 tcp_timer_suspend(tp, TT_PERSIST); 7238 tcp_timer_suspend(tp, TT_REXMT); 7239 tcp_timer_suspend(tp, TT_KEEP); 7240 tcp_timer_suspend(tp, TT_DELACK); 7241 } 7242 7243 static void 7244 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7245 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7246 { 7247 int32_t idx; 7248 uint16_t stripped_flags; 7249 7250 rsm->r_rtr_cnt++; 7251 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7252 rsm->r_dupack = 0; 7253 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7254 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7255 rsm->r_flags |= RACK_OVERMAX; 7256 } 7257 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7258 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7259 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7260 } 7261 idx = rsm->r_rtr_cnt - 1; 7262 rsm->r_tim_lastsent[idx] = ts; 7263 stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP); 7264 if (rsm->r_flags & RACK_ACKED) { 7265 /* Problably MTU discovery messing with us */ 7266 rsm->r_flags &= ~RACK_ACKED; 7267 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7268 } 7269 if (rsm->r_in_tmap) { 7270 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7271 rsm->r_in_tmap = 0; 7272 } 7273 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7274 rsm->r_in_tmap = 1; 7275 if (rsm->r_flags & RACK_SACK_PASSED) { 7276 /* We have retransmitted due to the SACK pass */ 7277 rsm->r_flags &= ~RACK_SACK_PASSED; 7278 rsm->r_flags |= RACK_WAS_SACKPASS; 7279 } 7280 } 7281 7282 static uint32_t 7283 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7284 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7285 { 7286 /* 7287 * We (re-)transmitted starting at rsm->r_start for some length 7288 * (possibly less than r_end. 7289 */ 7290 struct rack_sendmap *nrsm, *insret; 7291 uint32_t c_end; 7292 int32_t len; 7293 7294 len = *lenp; 7295 c_end = rsm->r_start + len; 7296 if (SEQ_GEQ(c_end, rsm->r_end)) { 7297 /* 7298 * We retransmitted the whole piece or more than the whole 7299 * slopping into the next rsm. 7300 */ 7301 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7302 if (c_end == rsm->r_end) { 7303 *lenp = 0; 7304 return (0); 7305 } else { 7306 int32_t act_len; 7307 7308 /* Hangs over the end return whats left */ 7309 act_len = rsm->r_end - rsm->r_start; 7310 *lenp = (len - act_len); 7311 return (rsm->r_end); 7312 } 7313 /* We don't get out of this block. */ 7314 } 7315 /* 7316 * Here we retransmitted less than the whole thing which means we 7317 * have to split this into what was transmitted and what was not. 7318 */ 7319 nrsm = rack_alloc_full_limit(rack); 7320 if (nrsm == NULL) { 7321 /* 7322 * We can't get memory, so lets not proceed. 7323 */ 7324 *lenp = 0; 7325 return (0); 7326 } 7327 /* 7328 * So here we are going to take the original rsm and make it what we 7329 * retransmitted. nrsm will be the tail portion we did not 7330 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7331 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7332 * 1, 6 and the new piece will be 6, 11. 7333 */ 7334 rack_clone_rsm(rack, nrsm, rsm, c_end); 7335 nrsm->r_dupack = 0; 7336 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7337 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7338 #ifdef INVARIANTS 7339 if (insret != NULL) { 7340 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7341 nrsm, insret, rack, rsm); 7342 } 7343 #endif 7344 if (rsm->r_in_tmap) { 7345 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7346 nrsm->r_in_tmap = 1; 7347 } 7348 rsm->r_flags &= (~RACK_HAS_FIN); 7349 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7350 /* Log a split of rsm into rsm and nrsm */ 7351 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7352 *lenp = 0; 7353 return (0); 7354 } 7355 7356 static void 7357 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7358 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts, 7359 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7360 { 7361 struct tcp_rack *rack; 7362 struct rack_sendmap *rsm, *nrsm, *insret, fe; 7363 register uint32_t snd_max, snd_una; 7364 7365 /* 7366 * Add to the RACK log of packets in flight or retransmitted. If 7367 * there is a TS option we will use the TS echoed, if not we will 7368 * grab a TS. 7369 * 7370 * Retransmissions will increment the count and move the ts to its 7371 * proper place. Note that if options do not include TS's then we 7372 * won't be able to effectively use the ACK for an RTT on a retran. 7373 * 7374 * Notes about r_start and r_end. Lets consider a send starting at 7375 * sequence 1 for 10 bytes. In such an example the r_start would be 7376 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7377 * This means that r_end is actually the first sequence for the next 7378 * slot (11). 7379 * 7380 */ 7381 /* 7382 * If err is set what do we do XXXrrs? should we not add the thing? 7383 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7384 * i.e. proceed with add ** do this for now. 7385 */ 7386 INP_WLOCK_ASSERT(tp->t_inpcb); 7387 if (err) 7388 /* 7389 * We don't log errors -- we could but snd_max does not 7390 * advance in this case either. 7391 */ 7392 return; 7393 7394 if (th_flags & TH_RST) { 7395 /* 7396 * We don't log resets and we return immediately from 7397 * sending 7398 */ 7399 return; 7400 } 7401 rack = (struct tcp_rack *)tp->t_fb_ptr; 7402 snd_una = tp->snd_una; 7403 snd_max = tp->snd_max; 7404 if (th_flags & (TH_SYN | TH_FIN)) { 7405 /* 7406 * The call to rack_log_output is made before bumping 7407 * snd_max. This means we can record one extra byte on a SYN 7408 * or FIN if seq_out is adding more on and a FIN is present 7409 * (and we are not resending). 7410 */ 7411 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7412 len++; 7413 if (th_flags & TH_FIN) 7414 len++; 7415 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7416 /* 7417 * The add/update as not been done for the FIN/SYN 7418 * yet. 7419 */ 7420 snd_max = tp->snd_nxt; 7421 } 7422 } 7423 if (SEQ_LEQ((seq_out + len), snd_una)) { 7424 /* Are sending an old segment to induce an ack (keep-alive)? */ 7425 return; 7426 } 7427 if (SEQ_LT(seq_out, snd_una)) { 7428 /* huh? should we panic? */ 7429 uint32_t end; 7430 7431 end = seq_out + len; 7432 seq_out = snd_una; 7433 if (SEQ_GEQ(end, seq_out)) 7434 len = end - seq_out; 7435 else 7436 len = 0; 7437 } 7438 if (len == 0) { 7439 /* We don't log zero window probes */ 7440 return; 7441 } 7442 rack->r_ctl.rc_time_last_sent = cts; 7443 if (IN_FASTRECOVERY(tp->t_flags)) { 7444 rack->r_ctl.rc_prr_out += len; 7445 } 7446 /* First question is it a retransmission or new? */ 7447 if (seq_out == snd_max) { 7448 /* Its new */ 7449 again: 7450 rsm = rack_alloc(rack); 7451 if (rsm == NULL) { 7452 /* 7453 * Hmm out of memory and the tcb got destroyed while 7454 * we tried to wait. 7455 */ 7456 return; 7457 } 7458 if (th_flags & TH_FIN) { 7459 rsm->r_flags = RACK_HAS_FIN|add_flag; 7460 } else { 7461 rsm->r_flags = add_flag; 7462 } 7463 if (hw_tls) 7464 rsm->r_hw_tls = 1; 7465 rsm->r_tim_lastsent[0] = cts; 7466 rsm->r_rtr_cnt = 1; 7467 rsm->r_rtr_bytes = 0; 7468 if (th_flags & TH_SYN) { 7469 /* The data space is one beyond snd_una */ 7470 rsm->r_flags |= RACK_HAS_SYN; 7471 } 7472 rsm->r_start = seq_out; 7473 rsm->r_end = rsm->r_start + len; 7474 rsm->r_dupack = 0; 7475 /* 7476 * save off the mbuf location that 7477 * sndmbuf_noadv returned (which is 7478 * where we started copying from).. 7479 */ 7480 rsm->m = s_mb; 7481 rsm->soff = s_moff; 7482 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7483 if (rsm->m) { 7484 if (rsm->m->m_len <= rsm->soff) { 7485 /* 7486 * XXXrrs Question, will this happen? 7487 * 7488 * If sbsndptr is set at the correct place 7489 * then s_moff should always be somewhere 7490 * within rsm->m. But if the sbsndptr was 7491 * off then that won't be true. If it occurs 7492 * we need to walkout to the correct location. 7493 */ 7494 struct mbuf *lm; 7495 7496 lm = rsm->m; 7497 while (lm->m_len <= rsm->soff) { 7498 rsm->soff -= lm->m_len; 7499 lm = lm->m_next; 7500 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7501 __func__, rack, s_moff, s_mb, rsm->soff)); 7502 } 7503 rsm->m = lm; 7504 counter_u64_add(rack_sbsndptr_wrong, 1); 7505 } else 7506 counter_u64_add(rack_sbsndptr_right, 1); 7507 rsm->orig_m_len = rsm->m->m_len; 7508 } else 7509 rsm->orig_m_len = 0; 7510 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7511 /* Log a new rsm */ 7512 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7513 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7514 #ifdef INVARIANTS 7515 if (insret != NULL) { 7516 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7517 nrsm, insret, rack, rsm); 7518 } 7519 #endif 7520 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7521 rsm->r_in_tmap = 1; 7522 /* 7523 * Special case detection, is there just a single 7524 * packet outstanding when we are not in recovery? 7525 * 7526 * If this is true mark it so. 7527 */ 7528 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7529 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7530 struct rack_sendmap *prsm; 7531 7532 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7533 if (prsm) 7534 prsm->r_one_out_nr = 1; 7535 } 7536 return; 7537 } 7538 /* 7539 * If we reach here its a retransmission and we need to find it. 7540 */ 7541 memset(&fe, 0, sizeof(fe)); 7542 more: 7543 if (hintrsm && (hintrsm->r_start == seq_out)) { 7544 rsm = hintrsm; 7545 hintrsm = NULL; 7546 } else { 7547 /* No hints sorry */ 7548 rsm = NULL; 7549 } 7550 if ((rsm) && (rsm->r_start == seq_out)) { 7551 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7552 if (len == 0) { 7553 return; 7554 } else { 7555 goto more; 7556 } 7557 } 7558 /* Ok it was not the last pointer go through it the hard way. */ 7559 refind: 7560 fe.r_start = seq_out; 7561 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7562 if (rsm) { 7563 if (rsm->r_start == seq_out) { 7564 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7565 if (len == 0) { 7566 return; 7567 } else { 7568 goto refind; 7569 } 7570 } 7571 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7572 /* Transmitted within this piece */ 7573 /* 7574 * Ok we must split off the front and then let the 7575 * update do the rest 7576 */ 7577 nrsm = rack_alloc_full_limit(rack); 7578 if (nrsm == NULL) { 7579 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7580 return; 7581 } 7582 /* 7583 * copy rsm to nrsm and then trim the front of rsm 7584 * to not include this part. 7585 */ 7586 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7587 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7588 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7589 #ifdef INVARIANTS 7590 if (insret != NULL) { 7591 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7592 nrsm, insret, rack, rsm); 7593 } 7594 #endif 7595 if (rsm->r_in_tmap) { 7596 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7597 nrsm->r_in_tmap = 1; 7598 } 7599 rsm->r_flags &= (~RACK_HAS_FIN); 7600 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7601 if (len == 0) { 7602 return; 7603 } else if (len > 0) 7604 goto refind; 7605 } 7606 } 7607 /* 7608 * Hmm not found in map did they retransmit both old and on into the 7609 * new? 7610 */ 7611 if (seq_out == tp->snd_max) { 7612 goto again; 7613 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7614 #ifdef INVARIANTS 7615 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7616 seq_out, len, tp->snd_una, tp->snd_max); 7617 printf("Starting Dump of all rack entries\n"); 7618 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7619 printf("rsm:%p start:%u end:%u\n", 7620 rsm, rsm->r_start, rsm->r_end); 7621 } 7622 printf("Dump complete\n"); 7623 panic("seq_out not found rack:%p tp:%p", 7624 rack, tp); 7625 #endif 7626 } else { 7627 #ifdef INVARIANTS 7628 /* 7629 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7630 * flag) 7631 */ 7632 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7633 seq_out, len, tp->snd_max, tp); 7634 #endif 7635 } 7636 } 7637 7638 /* 7639 * Record one of the RTT updates from an ack into 7640 * our sample structure. 7641 */ 7642 7643 static void 7644 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7645 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7646 { 7647 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7648 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7649 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7650 } 7651 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7652 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7653 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7654 } 7655 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7656 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7657 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7658 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7659 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7660 } 7661 if ((confidence == 1) && 7662 ((rsm == NULL) || 7663 (rsm->r_just_ret) || 7664 (rsm->r_one_out_nr && 7665 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7666 /* 7667 * If the rsm had a just return 7668 * hit it then we can't trust the 7669 * rtt measurement for buffer deterimination 7670 * Note that a confidence of 2, indicates 7671 * SACK'd which overrides the r_just_ret or 7672 * the r_one_out_nr. If it was a CUM-ACK and 7673 * we had only two outstanding, but get an 7674 * ack for only 1. Then that also lowers our 7675 * confidence. 7676 */ 7677 confidence = 0; 7678 } 7679 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7680 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7681 if (rack->r_ctl.rack_rs.confidence == 0) { 7682 /* 7683 * We take anything with no current confidence 7684 * saved. 7685 */ 7686 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7687 rack->r_ctl.rack_rs.confidence = confidence; 7688 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7689 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7690 /* 7691 * Once we have a confident number, 7692 * we can update it with a smaller 7693 * value since this confident number 7694 * may include the DSACK time until 7695 * the next segment (the second one) arrived. 7696 */ 7697 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7698 rack->r_ctl.rack_rs.confidence = confidence; 7699 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7700 } 7701 } 7702 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7703 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7704 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7705 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7706 } 7707 7708 /* 7709 * Collect new round-trip time estimate 7710 * and update averages and current timeout. 7711 */ 7712 static void 7713 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7714 { 7715 int32_t delta; 7716 uint32_t o_srtt, o_var; 7717 int32_t hrtt_up = 0; 7718 int32_t rtt; 7719 7720 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7721 /* No valid sample */ 7722 return; 7723 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7724 /* We are to use the lowest RTT seen in a single ack */ 7725 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7726 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7727 /* We are to use the highest RTT seen in a single ack */ 7728 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7729 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7730 /* We are to use the average RTT seen in a single ack */ 7731 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7732 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7733 } else { 7734 #ifdef INVARIANTS 7735 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7736 #endif 7737 return; 7738 } 7739 if (rtt == 0) 7740 rtt = 1; 7741 if (rack->rc_gp_rtt_set == 0) { 7742 /* 7743 * With no RTT we have to accept 7744 * even one we are not confident of. 7745 */ 7746 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7747 rack->rc_gp_rtt_set = 1; 7748 } else if (rack->r_ctl.rack_rs.confidence) { 7749 /* update the running gp srtt */ 7750 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7751 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7752 } 7753 if (rack->r_ctl.rack_rs.confidence) { 7754 /* 7755 * record the low and high for highly buffered path computation, 7756 * we only do this if we are confident (not a retransmission). 7757 */ 7758 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7759 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7760 hrtt_up = 1; 7761 } 7762 if (rack->rc_highly_buffered == 0) { 7763 /* 7764 * Currently once we declare a path has 7765 * highly buffered there is no going 7766 * back, which may be a problem... 7767 */ 7768 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7769 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7770 rack->r_ctl.rc_highest_us_rtt, 7771 rack->r_ctl.rc_lowest_us_rtt, 7772 RACK_RTTS_SEEHBP); 7773 rack->rc_highly_buffered = 1; 7774 } 7775 } 7776 } 7777 if ((rack->r_ctl.rack_rs.confidence) || 7778 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7779 /* 7780 * If we are highly confident of it <or> it was 7781 * never retransmitted we accept it as the last us_rtt. 7782 */ 7783 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7784 /* The lowest rtt can be set if its was not retransmited */ 7785 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7786 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7787 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7788 rack->r_ctl.rc_lowest_us_rtt = 1; 7789 } 7790 } 7791 o_srtt = tp->t_srtt; 7792 o_var = tp->t_rttvar; 7793 rack = (struct tcp_rack *)tp->t_fb_ptr; 7794 if (tp->t_srtt != 0) { 7795 /* 7796 * We keep a simple srtt in microseconds, like our rtt 7797 * measurement. We don't need to do any tricks with shifting 7798 * etc. Instead we just add in 1/8th of the new measurement 7799 * and subtract out 1/8 of the old srtt. We do the same with 7800 * the variance after finding the absolute value of the 7801 * difference between this sample and the current srtt. 7802 */ 7803 delta = tp->t_srtt - rtt; 7804 /* Take off 1/8th of the current sRTT */ 7805 tp->t_srtt -= (tp->t_srtt >> 3); 7806 /* Add in 1/8th of the new RTT just measured */ 7807 tp->t_srtt += (rtt >> 3); 7808 if (tp->t_srtt <= 0) 7809 tp->t_srtt = 1; 7810 /* Now lets make the absolute value of the variance */ 7811 if (delta < 0) 7812 delta = -delta; 7813 /* Subtract out 1/8th */ 7814 tp->t_rttvar -= (tp->t_rttvar >> 3); 7815 /* Add in 1/8th of the new variance we just saw */ 7816 tp->t_rttvar += (delta >> 3); 7817 if (tp->t_rttvar <= 0) 7818 tp->t_rttvar = 1; 7819 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7820 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7821 } else { 7822 /* 7823 * No rtt measurement yet - use the unsmoothed rtt. Set the 7824 * variance to half the rtt (so our first retransmit happens 7825 * at 3*rtt). 7826 */ 7827 tp->t_srtt = rtt; 7828 tp->t_rttvar = rtt >> 1; 7829 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7830 } 7831 rack->rc_srtt_measure_made = 1; 7832 KMOD_TCPSTAT_INC(tcps_rttupdated); 7833 tp->t_rttupdated++; 7834 #ifdef STATS 7835 if (rack_stats_gets_ms_rtt == 0) { 7836 /* Send in the microsecond rtt used for rxt timeout purposes */ 7837 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7838 } else if (rack_stats_gets_ms_rtt == 1) { 7839 /* Send in the millisecond rtt used for rxt timeout purposes */ 7840 int32_t ms_rtt; 7841 7842 /* Round up */ 7843 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7844 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7845 } else if (rack_stats_gets_ms_rtt == 2) { 7846 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7847 int32_t ms_rtt; 7848 7849 /* Round up */ 7850 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7851 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7852 } else { 7853 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7854 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7855 } 7856 7857 #endif 7858 /* 7859 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7860 * way we do the smoothing, srtt and rttvar will each average +1/2 7861 * tick of bias. When we compute the retransmit timer, we want 1/2 7862 * tick of rounding and 1 extra tick because of +-1/2 tick 7863 * uncertainty in the firing of the timer. The bias will give us 7864 * exactly the 1.5 tick we need. But, because the bias is 7865 * statistical, we have to test that we don't drop below the minimum 7866 * feasible timer (which is 2 ticks). 7867 */ 7868 tp->t_rxtshift = 0; 7869 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7870 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7871 rack_log_rtt_sample(rack, rtt); 7872 tp->t_softerror = 0; 7873 } 7874 7875 7876 static void 7877 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7878 { 7879 /* 7880 * Apply to filter the inbound us-rtt at us_cts. 7881 */ 7882 uint32_t old_rtt; 7883 7884 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7885 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7886 us_rtt, us_cts); 7887 if (rack->r_ctl.last_pacing_time && 7888 rack->rc_gp_dyn_mul && 7889 (rack->r_ctl.last_pacing_time > us_rtt)) 7890 rack->pacing_longer_than_rtt = 1; 7891 else 7892 rack->pacing_longer_than_rtt = 0; 7893 if (old_rtt > us_rtt) { 7894 /* We just hit a new lower rtt time */ 7895 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7896 __LINE__, RACK_RTTS_NEWRTT); 7897 /* 7898 * Only count it if its lower than what we saw within our 7899 * calculated range. 7900 */ 7901 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7902 if (rack_probertt_lower_within && 7903 rack->rc_gp_dyn_mul && 7904 (rack->use_fixed_rate == 0) && 7905 (rack->rc_always_pace)) { 7906 /* 7907 * We are seeing a new lower rtt very close 7908 * to the time that we would have entered probe-rtt. 7909 * This is probably due to the fact that a peer flow 7910 * has entered probe-rtt. Lets go in now too. 7911 */ 7912 uint32_t val; 7913 7914 val = rack_probertt_lower_within * rack_time_between_probertt; 7915 val /= 100; 7916 if ((rack->in_probe_rtt == 0) && 7917 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7918 rack_enter_probertt(rack, us_cts); 7919 } 7920 } 7921 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7922 } 7923 } 7924 } 7925 7926 static int 7927 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7928 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7929 { 7930 int32_t i, all; 7931 uint32_t t, len_acked; 7932 7933 if ((rsm->r_flags & RACK_ACKED) || 7934 (rsm->r_flags & RACK_WAS_ACKED)) 7935 /* Already done */ 7936 return (0); 7937 if (rsm->r_no_rtt_allowed) { 7938 /* Not allowed */ 7939 return (0); 7940 } 7941 if (ack_type == CUM_ACKED) { 7942 if (SEQ_GT(th_ack, rsm->r_end)) { 7943 len_acked = rsm->r_end - rsm->r_start; 7944 all = 1; 7945 } else { 7946 len_acked = th_ack - rsm->r_start; 7947 all = 0; 7948 } 7949 } else { 7950 len_acked = rsm->r_end - rsm->r_start; 7951 all = 0; 7952 } 7953 if (rsm->r_rtr_cnt == 1) { 7954 uint32_t us_rtt; 7955 7956 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7957 if ((int)t <= 0) 7958 t = 1; 7959 if (!tp->t_rttlow || tp->t_rttlow > t) 7960 tp->t_rttlow = t; 7961 if (!rack->r_ctl.rc_rack_min_rtt || 7962 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7963 rack->r_ctl.rc_rack_min_rtt = t; 7964 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7965 rack->r_ctl.rc_rack_min_rtt = 1; 7966 } 7967 } 7968 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7969 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7970 else 7971 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7972 if (us_rtt == 0) 7973 us_rtt = 1; 7974 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7975 if (ack_type == SACKED) { 7976 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7977 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7978 } else { 7979 /* 7980 * We need to setup what our confidence 7981 * is in this ack. 7982 * 7983 * If the rsm was app limited and it is 7984 * less than a mss in length (the end 7985 * of the send) then we have a gap. If we 7986 * were app limited but say we were sending 7987 * multiple MSS's then we are more confident 7988 * int it. 7989 * 7990 * When we are not app-limited then we see if 7991 * the rsm is being included in the current 7992 * measurement, we tell this by the app_limited_needs_set 7993 * flag. 7994 * 7995 * Note that being cwnd blocked is not applimited 7996 * as well as the pacing delay between packets which 7997 * are sending only 1 or 2 MSS's also will show up 7998 * in the RTT. We probably need to examine this algorithm 7999 * a bit more and enhance it to account for the delay 8000 * between rsm's. We could do that by saving off the 8001 * pacing delay of each rsm (in an rsm) and then 8002 * factoring that in somehow though for now I am 8003 * not sure how :) 8004 */ 8005 int calc_conf = 0; 8006 8007 if (rsm->r_flags & RACK_APP_LIMITED) { 8008 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 8009 calc_conf = 0; 8010 else 8011 calc_conf = 1; 8012 } else if (rack->app_limited_needs_set == 0) { 8013 calc_conf = 1; 8014 } else { 8015 calc_conf = 0; 8016 } 8017 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 8018 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 8019 calc_conf, rsm, rsm->r_rtr_cnt); 8020 } 8021 if ((rsm->r_flags & RACK_TLP) && 8022 (!IN_FASTRECOVERY(tp->t_flags))) { 8023 /* Segment was a TLP and our retrans matched */ 8024 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 8025 rack->r_ctl.rc_rsm_start = tp->snd_max; 8026 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 8027 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 8028 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 8029 } 8030 } 8031 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8032 /* New more recent rack_tmit_time */ 8033 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8034 rack->rc_rack_rtt = t; 8035 } 8036 return (1); 8037 } 8038 /* 8039 * We clear the soft/rxtshift since we got an ack. 8040 * There is no assurance we will call the commit() function 8041 * so we need to clear these to avoid incorrect handling. 8042 */ 8043 tp->t_rxtshift = 0; 8044 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 8045 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 8046 tp->t_softerror = 0; 8047 if (to && (to->to_flags & TOF_TS) && 8048 (ack_type == CUM_ACKED) && 8049 (to->to_tsecr) && 8050 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 8051 /* 8052 * Now which timestamp does it match? In this block the ACK 8053 * must be coming from a previous transmission. 8054 */ 8055 for (i = 0; i < rsm->r_rtr_cnt; i++) { 8056 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 8057 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8058 if ((int)t <= 0) 8059 t = 1; 8060 if ((i + 1) < rsm->r_rtr_cnt) { 8061 /* 8062 * The peer ack'd from our previous 8063 * transmission. We have a spurious 8064 * retransmission and thus we dont 8065 * want to update our rack_rtt. 8066 */ 8067 return (0); 8068 } 8069 if (!tp->t_rttlow || tp->t_rttlow > t) 8070 tp->t_rttlow = t; 8071 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8072 rack->r_ctl.rc_rack_min_rtt = t; 8073 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8074 rack->r_ctl.rc_rack_min_rtt = 1; 8075 } 8076 } 8077 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 8078 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 8079 /* New more recent rack_tmit_time */ 8080 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 8081 rack->rc_rack_rtt = t; 8082 } 8083 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 8084 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 8085 rsm->r_rtr_cnt); 8086 return (1); 8087 } 8088 } 8089 goto ts_not_found; 8090 } else { 8091 /* 8092 * Ok its a SACK block that we retransmitted. or a windows 8093 * machine without timestamps. We can tell nothing from the 8094 * time-stamp since its not there or the time the peer last 8095 * recieved a segment that moved forward its cum-ack point. 8096 */ 8097 ts_not_found: 8098 i = rsm->r_rtr_cnt - 1; 8099 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8100 if ((int)t <= 0) 8101 t = 1; 8102 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8103 /* 8104 * We retransmitted and the ack came back in less 8105 * than the smallest rtt we have observed. We most 8106 * likely did an improper retransmit as outlined in 8107 * 6.2 Step 2 point 2 in the rack-draft so we 8108 * don't want to update our rack_rtt. We in 8109 * theory (in future) might want to think about reverting our 8110 * cwnd state but we won't for now. 8111 */ 8112 return (0); 8113 } else if (rack->r_ctl.rc_rack_min_rtt) { 8114 /* 8115 * We retransmitted it and the retransmit did the 8116 * job. 8117 */ 8118 if (!rack->r_ctl.rc_rack_min_rtt || 8119 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8120 rack->r_ctl.rc_rack_min_rtt = t; 8121 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8122 rack->r_ctl.rc_rack_min_rtt = 1; 8123 } 8124 } 8125 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 8126 /* New more recent rack_tmit_time */ 8127 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 8128 rack->rc_rack_rtt = t; 8129 } 8130 return (1); 8131 } 8132 } 8133 return (0); 8134 } 8135 8136 /* 8137 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 8138 */ 8139 static void 8140 rack_log_sack_passed(struct tcpcb *tp, 8141 struct tcp_rack *rack, struct rack_sendmap *rsm) 8142 { 8143 struct rack_sendmap *nrsm; 8144 8145 nrsm = rsm; 8146 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 8147 rack_head, r_tnext) { 8148 if (nrsm == rsm) { 8149 /* Skip orginal segment he is acked */ 8150 continue; 8151 } 8152 if (nrsm->r_flags & RACK_ACKED) { 8153 /* 8154 * Skip ack'd segments, though we 8155 * should not see these, since tmap 8156 * should not have ack'd segments. 8157 */ 8158 continue; 8159 } 8160 if (nrsm->r_flags & RACK_SACK_PASSED) { 8161 /* 8162 * We found one that is already marked 8163 * passed, we have been here before and 8164 * so all others below this are marked. 8165 */ 8166 break; 8167 } 8168 nrsm->r_flags |= RACK_SACK_PASSED; 8169 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8170 } 8171 } 8172 8173 static void 8174 rack_need_set_test(struct tcpcb *tp, 8175 struct tcp_rack *rack, 8176 struct rack_sendmap *rsm, 8177 tcp_seq th_ack, 8178 int line, 8179 int use_which) 8180 { 8181 8182 if ((tp->t_flags & TF_GPUTINPROG) && 8183 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8184 /* 8185 * We were app limited, and this ack 8186 * butts up or goes beyond the point where we want 8187 * to start our next measurement. We need 8188 * to record the new gput_ts as here and 8189 * possibly update the start sequence. 8190 */ 8191 uint32_t seq, ts; 8192 8193 if (rsm->r_rtr_cnt > 1) { 8194 /* 8195 * This is a retransmit, can we 8196 * really make any assessment at this 8197 * point? We are not really sure of 8198 * the timestamp, is it this or the 8199 * previous transmission? 8200 * 8201 * Lets wait for something better that 8202 * is not retransmitted. 8203 */ 8204 return; 8205 } 8206 seq = tp->gput_seq; 8207 ts = tp->gput_ts; 8208 rack->app_limited_needs_set = 0; 8209 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8210 /* Do we start at a new end? */ 8211 if ((use_which == RACK_USE_BEG) && 8212 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8213 /* 8214 * When we get an ACK that just eats 8215 * up some of the rsm, we set RACK_USE_BEG 8216 * since whats at r_start (i.e. th_ack) 8217 * is left unacked and thats where the 8218 * measurement not starts. 8219 */ 8220 tp->gput_seq = rsm->r_start; 8221 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8222 } 8223 if ((use_which == RACK_USE_END) && 8224 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8225 /* 8226 * We use the end when the cumack 8227 * is moving forward and completely 8228 * deleting the rsm passed so basically 8229 * r_end holds th_ack. 8230 * 8231 * For SACK's we also want to use the end 8232 * since this piece just got sacked and 8233 * we want to target anything after that 8234 * in our measurement. 8235 */ 8236 tp->gput_seq = rsm->r_end; 8237 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8238 } 8239 if (use_which == RACK_USE_END_OR_THACK) { 8240 /* 8241 * special case for ack moving forward, 8242 * not a sack, we need to move all the 8243 * way up to where this ack cum-ack moves 8244 * to. 8245 */ 8246 if (SEQ_GT(th_ack, rsm->r_end)) 8247 tp->gput_seq = th_ack; 8248 else 8249 tp->gput_seq = rsm->r_end; 8250 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8251 } 8252 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8253 /* 8254 * We moved beyond this guy's range, re-calculate 8255 * the new end point. 8256 */ 8257 if (rack->rc_gp_filled == 0) { 8258 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8259 } else { 8260 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8261 } 8262 } 8263 /* 8264 * We are moving the goal post, we may be able to clear the 8265 * measure_saw_probe_rtt flag. 8266 */ 8267 if ((rack->in_probe_rtt == 0) && 8268 (rack->measure_saw_probe_rtt) && 8269 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8270 rack->measure_saw_probe_rtt = 0; 8271 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8272 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8273 if (rack->rc_gp_filled && 8274 ((tp->gput_ack - tp->gput_seq) < 8275 max(rc_init_window(rack), (MIN_GP_WIN * 8276 ctf_fixed_maxseg(tp))))) { 8277 uint32_t ideal_amount; 8278 8279 ideal_amount = rack_get_measure_window(tp, rack); 8280 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8281 /* 8282 * There is no sense of continuing this measurement 8283 * because its too small to gain us anything we 8284 * trust. Skip it and that way we can start a new 8285 * measurement quicker. 8286 */ 8287 tp->t_flags &= ~TF_GPUTINPROG; 8288 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8289 0, 0, 0, 6, __LINE__, NULL, 0); 8290 } else { 8291 /* 8292 * Reset the window further out. 8293 */ 8294 tp->gput_ack = tp->gput_seq + ideal_amount; 8295 } 8296 } 8297 } 8298 } 8299 8300 static inline int 8301 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8302 { 8303 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8304 /* Behind our TLP definition or right at */ 8305 return (0); 8306 } 8307 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8308 /* The start is beyond or right at our end of TLP definition */ 8309 return (0); 8310 } 8311 /* It has to be a sub-part of the original TLP recorded */ 8312 return (1); 8313 } 8314 8315 8316 static uint32_t 8317 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8318 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8319 { 8320 uint32_t start, end, changed = 0; 8321 struct rack_sendmap stack_map; 8322 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next; 8323 int32_t used_ref = 1; 8324 int moved = 0; 8325 8326 start = sack->start; 8327 end = sack->end; 8328 rsm = *prsm; 8329 memset(&fe, 0, sizeof(fe)); 8330 do_rest_ofb: 8331 if ((rsm == NULL) || 8332 (SEQ_LT(end, rsm->r_start)) || 8333 (SEQ_GEQ(start, rsm->r_end)) || 8334 (SEQ_LT(start, rsm->r_start))) { 8335 /* 8336 * We are not in the right spot, 8337 * find the correct spot in the tree. 8338 */ 8339 used_ref = 0; 8340 fe.r_start = start; 8341 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8342 moved++; 8343 } 8344 if (rsm == NULL) { 8345 /* TSNH */ 8346 goto out; 8347 } 8348 /* Ok we have an ACK for some piece of this rsm */ 8349 if (rsm->r_start != start) { 8350 if ((rsm->r_flags & RACK_ACKED) == 0) { 8351 /* 8352 * Before any splitting or hookery is 8353 * done is it a TLP of interest i.e. rxt? 8354 */ 8355 if ((rsm->r_flags & RACK_TLP) && 8356 (rsm->r_rtr_cnt > 1)) { 8357 /* 8358 * We are splitting a rxt TLP, check 8359 * if we need to save off the start/end 8360 */ 8361 if (rack->rc_last_tlp_acked_set && 8362 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8363 /* 8364 * We already turned this on since we are inside 8365 * the previous one was a partially sack now we 8366 * are getting another one (maybe all of it). 8367 * 8368 */ 8369 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8370 /* 8371 * Lets make sure we have all of it though. 8372 */ 8373 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8374 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8375 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8376 rack->r_ctl.last_tlp_acked_end); 8377 } 8378 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8379 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8380 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8381 rack->r_ctl.last_tlp_acked_end); 8382 } 8383 } else { 8384 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8385 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8386 rack->rc_last_tlp_past_cumack = 0; 8387 rack->rc_last_tlp_acked_set = 1; 8388 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8389 } 8390 } 8391 /** 8392 * Need to split this in two pieces the before and after, 8393 * the before remains in the map, the after must be 8394 * added. In other words we have: 8395 * rsm |--------------| 8396 * sackblk |-------> 8397 * rsm will become 8398 * rsm |---| 8399 * and nrsm will be the sacked piece 8400 * nrsm |----------| 8401 * 8402 * But before we start down that path lets 8403 * see if the sack spans over on top of 8404 * the next guy and it is already sacked. 8405 * 8406 */ 8407 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8408 if (next && (next->r_flags & RACK_ACKED) && 8409 SEQ_GEQ(end, next->r_start)) { 8410 /** 8411 * So the next one is already acked, and 8412 * we can thus by hookery use our stack_map 8413 * to reflect the piece being sacked and 8414 * then adjust the two tree entries moving 8415 * the start and ends around. So we start like: 8416 * rsm |------------| (not-acked) 8417 * next |-----------| (acked) 8418 * sackblk |--------> 8419 * We want to end like so: 8420 * rsm |------| (not-acked) 8421 * next |-----------------| (acked) 8422 * nrsm |-----| 8423 * Where nrsm is a temporary stack piece we 8424 * use to update all the gizmos. 8425 */ 8426 /* Copy up our fudge block */ 8427 nrsm = &stack_map; 8428 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8429 /* Now adjust our tree blocks */ 8430 rsm->r_end = start; 8431 next->r_start = start; 8432 /* Now we must adjust back where next->m is */ 8433 rack_setup_offset_for_rsm(rsm, next); 8434 8435 /* We don't need to adjust rsm, it did not change */ 8436 /* Clear out the dup ack count of the remainder */ 8437 rsm->r_dupack = 0; 8438 rsm->r_just_ret = 0; 8439 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8440 /* Now lets make sure our fudge block is right */ 8441 nrsm->r_start = start; 8442 /* Now lets update all the stats and such */ 8443 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8444 if (rack->app_limited_needs_set) 8445 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8446 changed += (nrsm->r_end - nrsm->r_start); 8447 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8448 if (nrsm->r_flags & RACK_SACK_PASSED) { 8449 counter_u64_add(rack_reorder_seen, 1); 8450 rack->r_ctl.rc_reorder_ts = cts; 8451 } 8452 /* 8453 * Now we want to go up from rsm (the 8454 * one left un-acked) to the next one 8455 * in the tmap. We do this so when 8456 * we walk backwards we include marking 8457 * sack-passed on rsm (The one passed in 8458 * is skipped since it is generally called 8459 * on something sacked before removing it 8460 * from the tmap). 8461 */ 8462 if (rsm->r_in_tmap) { 8463 nrsm = TAILQ_NEXT(rsm, r_tnext); 8464 /* 8465 * Now that we have the next 8466 * one walk backwards from there. 8467 */ 8468 if (nrsm && nrsm->r_in_tmap) 8469 rack_log_sack_passed(tp, rack, nrsm); 8470 } 8471 /* Now are we done? */ 8472 if (SEQ_LT(end, next->r_end) || 8473 (end == next->r_end)) { 8474 /* Done with block */ 8475 goto out; 8476 } 8477 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8478 counter_u64_add(rack_sack_used_next_merge, 1); 8479 /* Postion for the next block */ 8480 start = next->r_end; 8481 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8482 if (rsm == NULL) 8483 goto out; 8484 } else { 8485 /** 8486 * We can't use any hookery here, so we 8487 * need to split the map. We enter like 8488 * so: 8489 * rsm |--------| 8490 * sackblk |-----> 8491 * We will add the new block nrsm and 8492 * that will be the new portion, and then 8493 * fall through after reseting rsm. So we 8494 * split and look like this: 8495 * rsm |----| 8496 * sackblk |-----> 8497 * nrsm |---| 8498 * We then fall through reseting 8499 * rsm to nrsm, so the next block 8500 * picks it up. 8501 */ 8502 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8503 if (nrsm == NULL) { 8504 /* 8505 * failed XXXrrs what can we do but loose the sack 8506 * info? 8507 */ 8508 goto out; 8509 } 8510 counter_u64_add(rack_sack_splits, 1); 8511 rack_clone_rsm(rack, nrsm, rsm, start); 8512 rsm->r_just_ret = 0; 8513 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8514 #ifdef INVARIANTS 8515 if (insret != NULL) { 8516 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8517 nrsm, insret, rack, rsm); 8518 } 8519 #endif 8520 if (rsm->r_in_tmap) { 8521 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8522 nrsm->r_in_tmap = 1; 8523 } 8524 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8525 rsm->r_flags &= (~RACK_HAS_FIN); 8526 /* Position us to point to the new nrsm that starts the sack blk */ 8527 rsm = nrsm; 8528 } 8529 } else { 8530 /* Already sacked this piece */ 8531 counter_u64_add(rack_sack_skipped_acked, 1); 8532 moved++; 8533 if (end == rsm->r_end) { 8534 /* Done with block */ 8535 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8536 goto out; 8537 } else if (SEQ_LT(end, rsm->r_end)) { 8538 /* A partial sack to a already sacked block */ 8539 moved++; 8540 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8541 goto out; 8542 } else { 8543 /* 8544 * The end goes beyond this guy 8545 * repostion the start to the 8546 * next block. 8547 */ 8548 start = rsm->r_end; 8549 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8550 if (rsm == NULL) 8551 goto out; 8552 } 8553 } 8554 } 8555 if (SEQ_GEQ(end, rsm->r_end)) { 8556 /** 8557 * The end of this block is either beyond this guy or right 8558 * at this guy. I.e.: 8559 * rsm --- |-----| 8560 * end |-----| 8561 * <or> 8562 * end |---------| 8563 */ 8564 if ((rsm->r_flags & RACK_ACKED) == 0) { 8565 /* 8566 * Is it a TLP of interest? 8567 */ 8568 if ((rsm->r_flags & RACK_TLP) && 8569 (rsm->r_rtr_cnt > 1)) { 8570 /* 8571 * We are splitting a rxt TLP, check 8572 * if we need to save off the start/end 8573 */ 8574 if (rack->rc_last_tlp_acked_set && 8575 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8576 /* 8577 * We already turned this on since we are inside 8578 * the previous one was a partially sack now we 8579 * are getting another one (maybe all of it). 8580 */ 8581 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8582 /* 8583 * Lets make sure we have all of it though. 8584 */ 8585 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8586 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8587 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8588 rack->r_ctl.last_tlp_acked_end); 8589 } 8590 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8591 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8592 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8593 rack->r_ctl.last_tlp_acked_end); 8594 } 8595 } else { 8596 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8597 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8598 rack->rc_last_tlp_past_cumack = 0; 8599 rack->rc_last_tlp_acked_set = 1; 8600 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8601 } 8602 } 8603 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8604 changed += (rsm->r_end - rsm->r_start); 8605 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8606 if (rsm->r_in_tmap) /* should be true */ 8607 rack_log_sack_passed(tp, rack, rsm); 8608 /* Is Reordering occuring? */ 8609 if (rsm->r_flags & RACK_SACK_PASSED) { 8610 rsm->r_flags &= ~RACK_SACK_PASSED; 8611 counter_u64_add(rack_reorder_seen, 1); 8612 rack->r_ctl.rc_reorder_ts = cts; 8613 } 8614 if (rack->app_limited_needs_set) 8615 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8616 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8617 rsm->r_flags |= RACK_ACKED; 8618 if (rsm->r_in_tmap) { 8619 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8620 rsm->r_in_tmap = 0; 8621 } 8622 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8623 } else { 8624 counter_u64_add(rack_sack_skipped_acked, 1); 8625 moved++; 8626 } 8627 if (end == rsm->r_end) { 8628 /* This block only - done, setup for next */ 8629 goto out; 8630 } 8631 /* 8632 * There is more not coverend by this rsm move on 8633 * to the next block in the RB tree. 8634 */ 8635 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8636 start = rsm->r_end; 8637 rsm = nrsm; 8638 if (rsm == NULL) 8639 goto out; 8640 goto do_rest_ofb; 8641 } 8642 /** 8643 * The end of this sack block is smaller than 8644 * our rsm i.e.: 8645 * rsm --- |-----| 8646 * end |--| 8647 */ 8648 if ((rsm->r_flags & RACK_ACKED) == 0) { 8649 /* 8650 * Is it a TLP of interest? 8651 */ 8652 if ((rsm->r_flags & RACK_TLP) && 8653 (rsm->r_rtr_cnt > 1)) { 8654 /* 8655 * We are splitting a rxt TLP, check 8656 * if we need to save off the start/end 8657 */ 8658 if (rack->rc_last_tlp_acked_set && 8659 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8660 /* 8661 * We already turned this on since we are inside 8662 * the previous one was a partially sack now we 8663 * are getting another one (maybe all of it). 8664 */ 8665 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8666 /* 8667 * Lets make sure we have all of it though. 8668 */ 8669 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8670 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8671 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8672 rack->r_ctl.last_tlp_acked_end); 8673 } 8674 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8675 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8676 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8677 rack->r_ctl.last_tlp_acked_end); 8678 } 8679 } else { 8680 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8681 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8682 rack->rc_last_tlp_past_cumack = 0; 8683 rack->rc_last_tlp_acked_set = 1; 8684 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8685 } 8686 } 8687 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8688 if (prev && 8689 (prev->r_flags & RACK_ACKED)) { 8690 /** 8691 * Goal, we want the right remainder of rsm to shrink 8692 * in place and span from (rsm->r_start = end) to rsm->r_end. 8693 * We want to expand prev to go all the way 8694 * to prev->r_end <- end. 8695 * so in the tree we have before: 8696 * prev |--------| (acked) 8697 * rsm |-------| (non-acked) 8698 * sackblk |-| 8699 * We churn it so we end up with 8700 * prev |----------| (acked) 8701 * rsm |-----| (non-acked) 8702 * nrsm |-| (temporary) 8703 * 8704 * Note if either prev/rsm is a TLP we don't 8705 * do this. 8706 */ 8707 nrsm = &stack_map; 8708 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8709 prev->r_end = end; 8710 rsm->r_start = end; 8711 /* Now adjust nrsm (stack copy) to be 8712 * the one that is the small 8713 * piece that was "sacked". 8714 */ 8715 nrsm->r_end = end; 8716 rsm->r_dupack = 0; 8717 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8718 /* 8719 * Now that the rsm has had its start moved forward 8720 * lets go ahead and get its new place in the world. 8721 */ 8722 rack_setup_offset_for_rsm(prev, rsm); 8723 /* 8724 * Now nrsm is our new little piece 8725 * that is acked (which was merged 8726 * to prev). Update the rtt and changed 8727 * based on that. Also check for reordering. 8728 */ 8729 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8730 if (rack->app_limited_needs_set) 8731 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8732 changed += (nrsm->r_end - nrsm->r_start); 8733 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8734 if (nrsm->r_flags & RACK_SACK_PASSED) { 8735 counter_u64_add(rack_reorder_seen, 1); 8736 rack->r_ctl.rc_reorder_ts = cts; 8737 } 8738 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8739 rsm = prev; 8740 counter_u64_add(rack_sack_used_prev_merge, 1); 8741 } else { 8742 /** 8743 * This is the case where our previous 8744 * block is not acked either, so we must 8745 * split the block in two. 8746 */ 8747 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8748 if (nrsm == NULL) { 8749 /* failed rrs what can we do but loose the sack info? */ 8750 goto out; 8751 } 8752 if ((rsm->r_flags & RACK_TLP) && 8753 (rsm->r_rtr_cnt > 1)) { 8754 /* 8755 * We are splitting a rxt TLP, check 8756 * if we need to save off the start/end 8757 */ 8758 if (rack->rc_last_tlp_acked_set && 8759 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8760 /* 8761 * We already turned this on since this block is inside 8762 * the previous one was a partially sack now we 8763 * are getting another one (maybe all of it). 8764 */ 8765 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8766 /* 8767 * Lets make sure we have all of it though. 8768 */ 8769 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8770 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8771 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8772 rack->r_ctl.last_tlp_acked_end); 8773 } 8774 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8775 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8776 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8777 rack->r_ctl.last_tlp_acked_end); 8778 } 8779 } else { 8780 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8781 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8782 rack->rc_last_tlp_acked_set = 1; 8783 rack->rc_last_tlp_past_cumack = 0; 8784 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8785 } 8786 } 8787 /** 8788 * In this case nrsm becomes 8789 * nrsm->r_start = end; 8790 * nrsm->r_end = rsm->r_end; 8791 * which is un-acked. 8792 * <and> 8793 * rsm->r_end = nrsm->r_start; 8794 * i.e. the remaining un-acked 8795 * piece is left on the left 8796 * hand side. 8797 * 8798 * So we start like this 8799 * rsm |----------| (not acked) 8800 * sackblk |---| 8801 * build it so we have 8802 * rsm |---| (acked) 8803 * nrsm |------| (not acked) 8804 */ 8805 counter_u64_add(rack_sack_splits, 1); 8806 rack_clone_rsm(rack, nrsm, rsm, end); 8807 rsm->r_flags &= (~RACK_HAS_FIN); 8808 rsm->r_just_ret = 0; 8809 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8810 #ifdef INVARIANTS 8811 if (insret != NULL) { 8812 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8813 nrsm, insret, rack, rsm); 8814 } 8815 #endif 8816 if (rsm->r_in_tmap) { 8817 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8818 nrsm->r_in_tmap = 1; 8819 } 8820 nrsm->r_dupack = 0; 8821 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8822 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8823 changed += (rsm->r_end - rsm->r_start); 8824 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8825 if (rsm->r_in_tmap) /* should be true */ 8826 rack_log_sack_passed(tp, rack, rsm); 8827 /* Is Reordering occuring? */ 8828 if (rsm->r_flags & RACK_SACK_PASSED) { 8829 rsm->r_flags &= ~RACK_SACK_PASSED; 8830 counter_u64_add(rack_reorder_seen, 1); 8831 rack->r_ctl.rc_reorder_ts = cts; 8832 } 8833 if (rack->app_limited_needs_set) 8834 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8835 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8836 rsm->r_flags |= RACK_ACKED; 8837 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8838 if (rsm->r_in_tmap) { 8839 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8840 rsm->r_in_tmap = 0; 8841 } 8842 } 8843 } else if (start != end){ 8844 /* 8845 * The block was already acked. 8846 */ 8847 counter_u64_add(rack_sack_skipped_acked, 1); 8848 moved++; 8849 } 8850 out: 8851 if (rsm && 8852 ((rsm->r_flags & RACK_TLP) == 0) && 8853 (rsm->r_flags & RACK_ACKED)) { 8854 /* 8855 * Now can we merge where we worked 8856 * with either the previous or 8857 * next block? 8858 */ 8859 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8860 while (next) { 8861 if (next->r_flags & RACK_TLP) 8862 break; 8863 if (next->r_flags & RACK_ACKED) { 8864 /* yep this and next can be merged */ 8865 rsm = rack_merge_rsm(rack, rsm, next); 8866 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8867 } else 8868 break; 8869 } 8870 /* Now what about the previous? */ 8871 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8872 while (prev) { 8873 if (prev->r_flags & RACK_TLP) 8874 break; 8875 if (prev->r_flags & RACK_ACKED) { 8876 /* yep the previous and this can be merged */ 8877 rsm = rack_merge_rsm(rack, prev, rsm); 8878 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8879 } else 8880 break; 8881 } 8882 } 8883 if (used_ref == 0) { 8884 counter_u64_add(rack_sack_proc_all, 1); 8885 } else { 8886 counter_u64_add(rack_sack_proc_short, 1); 8887 } 8888 /* Save off the next one for quick reference. */ 8889 if (rsm) 8890 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8891 else 8892 nrsm = NULL; 8893 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8894 /* Pass back the moved. */ 8895 *moved_two = moved; 8896 return (changed); 8897 } 8898 8899 static void inline 8900 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8901 { 8902 struct rack_sendmap *tmap; 8903 8904 tmap = NULL; 8905 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8906 /* Its no longer sacked, mark it so */ 8907 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8908 #ifdef INVARIANTS 8909 if (rsm->r_in_tmap) { 8910 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8911 rack, rsm, rsm->r_flags); 8912 } 8913 #endif 8914 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8915 /* Rebuild it into our tmap */ 8916 if (tmap == NULL) { 8917 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8918 tmap = rsm; 8919 } else { 8920 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8921 tmap = rsm; 8922 } 8923 tmap->r_in_tmap = 1; 8924 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8925 } 8926 /* 8927 * Now lets possibly clear the sack filter so we start 8928 * recognizing sacks that cover this area. 8929 */ 8930 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8931 8932 } 8933 8934 static void 8935 rack_do_decay(struct tcp_rack *rack) 8936 { 8937 struct timeval res; 8938 8939 #define timersub(tvp, uvp, vvp) \ 8940 do { \ 8941 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8942 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8943 if ((vvp)->tv_usec < 0) { \ 8944 (vvp)->tv_sec--; \ 8945 (vvp)->tv_usec += 1000000; \ 8946 } \ 8947 } while (0) 8948 8949 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8950 #undef timersub 8951 8952 rack->r_ctl.input_pkt++; 8953 if ((rack->rc_in_persist) || 8954 (res.tv_sec >= 1) || 8955 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8956 /* 8957 * Check for decay of non-SAD, 8958 * we want all SAD detection metrics to 8959 * decay 1/4 per second (or more) passed. 8960 */ 8961 uint32_t pkt_delta; 8962 8963 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8964 /* Update our saved tracking values */ 8965 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8966 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8967 /* Now do we escape without decay? */ 8968 #ifdef NETFLIX_EXP_DETECTION 8969 if (rack->rc_in_persist || 8970 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8971 (pkt_delta < tcp_sad_low_pps)){ 8972 /* 8973 * We don't decay idle connections 8974 * or ones that have a low input pps. 8975 */ 8976 return; 8977 } 8978 /* Decay the counters */ 8979 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8980 tcp_sad_decay_val); 8981 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8982 tcp_sad_decay_val); 8983 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8984 tcp_sad_decay_val); 8985 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8986 tcp_sad_decay_val); 8987 #endif 8988 } 8989 } 8990 8991 static void 8992 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8993 { 8994 struct rack_sendmap *rsm, *rm; 8995 8996 /* 8997 * The ACK point is advancing to th_ack, we must drop off 8998 * the packets in the rack log and calculate any eligble 8999 * RTT's. 9000 */ 9001 rack->r_wanted_output = 1; 9002 9003 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 9004 if ((rack->rc_last_tlp_acked_set == 1)&& 9005 (rack->rc_last_tlp_past_cumack == 1) && 9006 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 9007 /* 9008 * We have reached the point where our last rack 9009 * tlp retransmit sequence is ahead of the cum-ack. 9010 * This can only happen when the cum-ack moves all 9011 * the way around (its been a full 2^^31+1 bytes 9012 * or more since we sent a retransmitted TLP). Lets 9013 * turn off the valid flag since its not really valid. 9014 * 9015 * Note since sack's also turn on this event we have 9016 * a complication, we have to wait to age it out until 9017 * the cum-ack is by the TLP before checking which is 9018 * what the next else clause does. 9019 */ 9020 rack_log_dsack_event(rack, 9, __LINE__, 9021 rack->r_ctl.last_tlp_acked_start, 9022 rack->r_ctl.last_tlp_acked_end); 9023 rack->rc_last_tlp_acked_set = 0; 9024 rack->rc_last_tlp_past_cumack = 0; 9025 } else if ((rack->rc_last_tlp_acked_set == 1) && 9026 (rack->rc_last_tlp_past_cumack == 0) && 9027 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 9028 /* 9029 * It is safe to start aging TLP's out. 9030 */ 9031 rack->rc_last_tlp_past_cumack = 1; 9032 } 9033 /* We do the same for the tlp send seq as well */ 9034 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9035 (rack->rc_last_sent_tlp_past_cumack == 1) && 9036 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 9037 rack_log_dsack_event(rack, 9, __LINE__, 9038 rack->r_ctl.last_sent_tlp_seq, 9039 (rack->r_ctl.last_sent_tlp_seq + 9040 rack->r_ctl.last_sent_tlp_len)); 9041 rack->rc_last_sent_tlp_seq_valid = 0; 9042 rack->rc_last_sent_tlp_past_cumack = 0; 9043 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 9044 (rack->rc_last_sent_tlp_past_cumack == 0) && 9045 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 9046 /* 9047 * It is safe to start aging TLP's send. 9048 */ 9049 rack->rc_last_sent_tlp_past_cumack = 1; 9050 } 9051 more: 9052 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9053 if (rsm == NULL) { 9054 if ((th_ack - 1) == tp->iss) { 9055 /* 9056 * For the SYN incoming case we will not 9057 * have called tcp_output for the sending of 9058 * the SYN, so there will be no map. All 9059 * other cases should probably be a panic. 9060 */ 9061 return; 9062 } 9063 if (tp->t_flags & TF_SENTFIN) { 9064 /* if we sent a FIN we often will not have map */ 9065 return; 9066 } 9067 #ifdef INVARIANTS 9068 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 9069 tp, 9070 tp->t_state, th_ack, rack, 9071 tp->snd_una, tp->snd_max, tp->snd_nxt); 9072 #endif 9073 return; 9074 } 9075 if (SEQ_LT(th_ack, rsm->r_start)) { 9076 /* Huh map is missing this */ 9077 #ifdef INVARIANTS 9078 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 9079 rsm->r_start, 9080 th_ack, tp->t_state, rack->r_state); 9081 #endif 9082 return; 9083 } 9084 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 9085 9086 /* Now was it a retransmitted TLP? */ 9087 if ((rsm->r_flags & RACK_TLP) && 9088 (rsm->r_rtr_cnt > 1)) { 9089 /* 9090 * Yes, this rsm was a TLP and retransmitted, remember that 9091 * since if a DSACK comes back on this we don't want 9092 * to think of it as a reordered segment. This may 9093 * get updated again with possibly even other TLPs 9094 * in flight, but thats ok. Only when we don't send 9095 * a retransmitted TLP for 1/2 the sequences space 9096 * will it get turned off (above). 9097 */ 9098 if (rack->rc_last_tlp_acked_set && 9099 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9100 /* 9101 * We already turned this on since the end matches, 9102 * the previous one was a partially ack now we 9103 * are getting another one (maybe all of it). 9104 */ 9105 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9106 /* 9107 * Lets make sure we have all of it though. 9108 */ 9109 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9110 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9111 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9112 rack->r_ctl.last_tlp_acked_end); 9113 } 9114 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9115 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9116 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9117 rack->r_ctl.last_tlp_acked_end); 9118 } 9119 } else { 9120 rack->rc_last_tlp_past_cumack = 1; 9121 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9122 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9123 rack->rc_last_tlp_acked_set = 1; 9124 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9125 } 9126 } 9127 /* Now do we consume the whole thing? */ 9128 if (SEQ_GEQ(th_ack, rsm->r_end)) { 9129 /* Its all consumed. */ 9130 uint32_t left; 9131 uint8_t newly_acked; 9132 9133 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 9134 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 9135 rsm->r_rtr_bytes = 0; 9136 /* Record the time of highest cumack sent */ 9137 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9138 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9139 #ifdef INVARIANTS 9140 if (rm != rsm) { 9141 panic("removing head in rack:%p rsm:%p rm:%p", 9142 rack, rsm, rm); 9143 } 9144 #endif 9145 if (rsm->r_in_tmap) { 9146 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9147 rsm->r_in_tmap = 0; 9148 } 9149 newly_acked = 1; 9150 if (rsm->r_flags & RACK_ACKED) { 9151 /* 9152 * It was acked on the scoreboard -- remove 9153 * it from total 9154 */ 9155 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9156 newly_acked = 0; 9157 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9158 /* 9159 * There are segments ACKED on the 9160 * scoreboard further up. We are seeing 9161 * reordering. 9162 */ 9163 rsm->r_flags &= ~RACK_SACK_PASSED; 9164 counter_u64_add(rack_reorder_seen, 1); 9165 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9166 rsm->r_flags |= RACK_ACKED; 9167 rack->r_ctl.rc_reorder_ts = cts; 9168 if (rack->r_ent_rec_ns) { 9169 /* 9170 * We have sent no more, and we saw an sack 9171 * then ack arrive. 9172 */ 9173 rack->r_might_revert = 1; 9174 } 9175 } 9176 if ((rsm->r_flags & RACK_TO_REXT) && 9177 (tp->t_flags & TF_RCVD_TSTMP) && 9178 (to->to_flags & TOF_TS) && 9179 (to->to_tsecr != 0) && 9180 (tp->t_flags & TF_PREVVALID)) { 9181 /* 9182 * We can use the timestamp to see 9183 * if this retransmission was from the 9184 * first transmit. If so we made a mistake. 9185 */ 9186 tp->t_flags &= ~TF_PREVVALID; 9187 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9188 /* The first transmit is what this ack is for */ 9189 rack_cong_signal(tp, CC_RTO_ERR, th_ack); 9190 } 9191 } 9192 left = th_ack - rsm->r_end; 9193 if (rack->app_limited_needs_set && newly_acked) 9194 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9195 /* Free back to zone */ 9196 rack_free(rack, rsm); 9197 if (left) { 9198 goto more; 9199 } 9200 /* Check for reneging */ 9201 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9202 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9203 /* 9204 * The peer has moved snd_una up to 9205 * the edge of this send, i.e. one 9206 * that it had previously acked. The only 9207 * way that can be true if the peer threw 9208 * away data (space issues) that it had 9209 * previously sacked (else it would have 9210 * given us snd_una up to (rsm->r_end). 9211 * We need to undo the acked markings here. 9212 * 9213 * Note we have to look to make sure th_ack is 9214 * our rsm->r_start in case we get an old ack 9215 * where th_ack is behind snd_una. 9216 */ 9217 rack_peer_reneges(rack, rsm, th_ack); 9218 } 9219 return; 9220 } 9221 if (rsm->r_flags & RACK_ACKED) { 9222 /* 9223 * It was acked on the scoreboard -- remove it from 9224 * total for the part being cum-acked. 9225 */ 9226 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9227 } 9228 /* 9229 * Clear the dup ack count for 9230 * the piece that remains. 9231 */ 9232 rsm->r_dupack = 0; 9233 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9234 if (rsm->r_rtr_bytes) { 9235 /* 9236 * It was retransmitted adjust the 9237 * sack holes for what was acked. 9238 */ 9239 int ack_am; 9240 9241 ack_am = (th_ack - rsm->r_start); 9242 if (ack_am >= rsm->r_rtr_bytes) { 9243 rack->r_ctl.rc_holes_rxt -= ack_am; 9244 rsm->r_rtr_bytes -= ack_am; 9245 } 9246 } 9247 /* 9248 * Update where the piece starts and record 9249 * the time of send of highest cumack sent. 9250 */ 9251 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9252 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9253 /* Now we need to move our offset forward too */ 9254 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9255 /* Fix up the orig_m_len and possibly the mbuf offset */ 9256 rack_adjust_orig_mlen(rsm); 9257 } 9258 rsm->soff += (th_ack - rsm->r_start); 9259 rsm->r_start = th_ack; 9260 /* Now do we need to move the mbuf fwd too? */ 9261 if (rsm->m) { 9262 while (rsm->soff >= rsm->m->m_len) { 9263 rsm->soff -= rsm->m->m_len; 9264 rsm->m = rsm->m->m_next; 9265 KASSERT((rsm->m != NULL), 9266 (" nrsm:%p hit at soff:%u null m", 9267 rsm, rsm->soff)); 9268 } 9269 rsm->orig_m_len = rsm->m->m_len; 9270 } 9271 if (rack->app_limited_needs_set) 9272 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9273 } 9274 9275 static void 9276 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9277 { 9278 struct rack_sendmap *rsm; 9279 int sack_pass_fnd = 0; 9280 9281 if (rack->r_might_revert) { 9282 /* 9283 * Ok we have reordering, have not sent anything, we 9284 * might want to revert the congestion state if nothing 9285 * further has SACK_PASSED on it. Lets check. 9286 * 9287 * We also get here when we have DSACKs come in for 9288 * all the data that we FR'd. Note that a rxt or tlp 9289 * timer clears this from happening. 9290 */ 9291 9292 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9293 if (rsm->r_flags & RACK_SACK_PASSED) { 9294 sack_pass_fnd = 1; 9295 break; 9296 } 9297 } 9298 if (sack_pass_fnd == 0) { 9299 /* 9300 * We went into recovery 9301 * incorrectly due to reordering! 9302 */ 9303 int orig_cwnd; 9304 9305 rack->r_ent_rec_ns = 0; 9306 orig_cwnd = tp->snd_cwnd; 9307 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec; 9308 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9309 tp->snd_recover = tp->snd_una; 9310 rack_log_to_prr(rack, 14, orig_cwnd); 9311 EXIT_RECOVERY(tp->t_flags); 9312 } 9313 rack->r_might_revert = 0; 9314 } 9315 } 9316 9317 #ifdef NETFLIX_EXP_DETECTION 9318 static void 9319 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9320 { 9321 if ((rack->do_detection || tcp_force_detection) && 9322 tcp_sack_to_ack_thresh && 9323 tcp_sack_to_move_thresh && 9324 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9325 /* 9326 * We have thresholds set to find 9327 * possible attackers and disable sack. 9328 * Check them. 9329 */ 9330 uint64_t ackratio, moveratio, movetotal; 9331 9332 /* Log detecting */ 9333 rack_log_sad(rack, 1); 9334 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9335 ackratio *= (uint64_t)(1000); 9336 if (rack->r_ctl.ack_count) 9337 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9338 else { 9339 /* We really should not hit here */ 9340 ackratio = 1000; 9341 } 9342 if ((rack->sack_attack_disable == 0) && 9343 (ackratio > rack_highest_sack_thresh_seen)) 9344 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9345 movetotal = rack->r_ctl.sack_moved_extra; 9346 movetotal += rack->r_ctl.sack_noextra_move; 9347 moveratio = rack->r_ctl.sack_moved_extra; 9348 moveratio *= (uint64_t)1000; 9349 if (movetotal) 9350 moveratio /= movetotal; 9351 else { 9352 /* No moves, thats pretty good */ 9353 moveratio = 0; 9354 } 9355 if ((rack->sack_attack_disable == 0) && 9356 (moveratio > rack_highest_move_thresh_seen)) 9357 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9358 if (rack->sack_attack_disable == 0) { 9359 if ((ackratio > tcp_sack_to_ack_thresh) && 9360 (moveratio > tcp_sack_to_move_thresh)) { 9361 /* Disable sack processing */ 9362 rack->sack_attack_disable = 1; 9363 if (rack->r_rep_attack == 0) { 9364 rack->r_rep_attack = 1; 9365 counter_u64_add(rack_sack_attacks_detected, 1); 9366 } 9367 if (tcp_attack_on_turns_on_logging) { 9368 /* 9369 * Turn on logging, used for debugging 9370 * false positives. 9371 */ 9372 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9373 } 9374 /* Clamp the cwnd at flight size */ 9375 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9376 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9377 rack_log_sad(rack, 2); 9378 } 9379 } else { 9380 /* We are sack-disabled check for false positives */ 9381 if ((ackratio <= tcp_restoral_thresh) || 9382 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9383 rack->sack_attack_disable = 0; 9384 rack_log_sad(rack, 3); 9385 /* Restart counting */ 9386 rack->r_ctl.sack_count = 0; 9387 rack->r_ctl.sack_moved_extra = 0; 9388 rack->r_ctl.sack_noextra_move = 1; 9389 rack->r_ctl.ack_count = max(1, 9390 (bytes_this_ack / segsiz)); 9391 9392 if (rack->r_rep_reverse == 0) { 9393 rack->r_rep_reverse = 1; 9394 counter_u64_add(rack_sack_attacks_reversed, 1); 9395 } 9396 /* Restore the cwnd */ 9397 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9398 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9399 } 9400 } 9401 } 9402 } 9403 #endif 9404 9405 static int 9406 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9407 { 9408 9409 uint32_t am, l_end; 9410 int was_tlp = 0; 9411 9412 if (SEQ_GT(end, start)) 9413 am = end - start; 9414 else 9415 am = 0; 9416 if ((rack->rc_last_tlp_acked_set ) && 9417 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9418 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9419 /* 9420 * The DSACK is because of a TLP which we don't 9421 * do anything with the reordering window over since 9422 * it was not reordering that caused the DSACK but 9423 * our previous retransmit TLP. 9424 */ 9425 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9426 was_tlp = 1; 9427 goto skip_dsack_round; 9428 } 9429 if (rack->rc_last_sent_tlp_seq_valid) { 9430 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9431 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9432 (SEQ_LEQ(end, l_end))) { 9433 /* 9434 * This dsack is from the last sent TLP, ignore it 9435 * for reordering purposes. 9436 */ 9437 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9438 was_tlp = 1; 9439 goto skip_dsack_round; 9440 } 9441 } 9442 if (rack->rc_dsack_round_seen == 0) { 9443 rack->rc_dsack_round_seen = 1; 9444 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9445 rack->r_ctl.num_dsack++; 9446 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9447 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9448 } 9449 skip_dsack_round: 9450 /* 9451 * We keep track of how many DSACK blocks we get 9452 * after a recovery incident. 9453 */ 9454 rack->r_ctl.dsack_byte_cnt += am; 9455 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9456 rack->r_ctl.retran_during_recovery && 9457 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9458 /* 9459 * False recovery most likely culprit is reordering. If 9460 * nothing else is missing we need to revert. 9461 */ 9462 rack->r_might_revert = 1; 9463 rack_handle_might_revert(rack->rc_tp, rack); 9464 rack->r_might_revert = 0; 9465 rack->r_ctl.retran_during_recovery = 0; 9466 rack->r_ctl.dsack_byte_cnt = 0; 9467 } 9468 return (was_tlp); 9469 } 9470 9471 static void 9472 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9473 { 9474 /* Deal with changed and PRR here (in recovery only) */ 9475 uint32_t pipe, snd_una; 9476 9477 rack->r_ctl.rc_prr_delivered += changed; 9478 9479 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9480 /* 9481 * It is all outstanding, we are application limited 9482 * and thus we don't need more room to send anything. 9483 * Note we use tp->snd_una here and not th_ack because 9484 * the data as yet not been cut from the sb. 9485 */ 9486 rack->r_ctl.rc_prr_sndcnt = 0; 9487 return; 9488 } 9489 /* Compute prr_sndcnt */ 9490 if (SEQ_GT(tp->snd_una, th_ack)) { 9491 snd_una = tp->snd_una; 9492 } else { 9493 snd_una = th_ack; 9494 } 9495 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9496 if (pipe > tp->snd_ssthresh) { 9497 long sndcnt; 9498 9499 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9500 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9501 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9502 else { 9503 rack->r_ctl.rc_prr_sndcnt = 0; 9504 rack_log_to_prr(rack, 9, 0); 9505 sndcnt = 0; 9506 } 9507 sndcnt++; 9508 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9509 sndcnt -= rack->r_ctl.rc_prr_out; 9510 else 9511 sndcnt = 0; 9512 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9513 rack_log_to_prr(rack, 10, 0); 9514 } else { 9515 uint32_t limit; 9516 9517 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9518 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9519 else 9520 limit = 0; 9521 if (changed > limit) 9522 limit = changed; 9523 limit += ctf_fixed_maxseg(tp); 9524 if (tp->snd_ssthresh > pipe) { 9525 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9526 rack_log_to_prr(rack, 11, 0); 9527 } else { 9528 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9529 rack_log_to_prr(rack, 12, 0); 9530 } 9531 } 9532 } 9533 9534 static void 9535 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9536 { 9537 uint32_t changed; 9538 struct tcp_rack *rack; 9539 struct rack_sendmap *rsm; 9540 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9541 register uint32_t th_ack; 9542 int32_t i, j, k, num_sack_blks = 0; 9543 uint32_t cts, acked, ack_point, sack_changed = 0; 9544 int loop_start = 0, moved_two = 0; 9545 uint32_t tsused; 9546 9547 9548 INP_WLOCK_ASSERT(tp->t_inpcb); 9549 if (th->th_flags & TH_RST) { 9550 /* We don't log resets */ 9551 return; 9552 } 9553 rack = (struct tcp_rack *)tp->t_fb_ptr; 9554 cts = tcp_get_usecs(NULL); 9555 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9556 changed = 0; 9557 th_ack = th->th_ack; 9558 if (rack->sack_attack_disable == 0) 9559 rack_do_decay(rack); 9560 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9561 /* 9562 * You only get credit for 9563 * MSS and greater (and you get extra 9564 * credit for larger cum-ack moves). 9565 */ 9566 int ac; 9567 9568 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9569 rack->r_ctl.ack_count += ac; 9570 counter_u64_add(rack_ack_total, ac); 9571 } 9572 if (rack->r_ctl.ack_count > 0xfff00000) { 9573 /* 9574 * reduce the number to keep us under 9575 * a uint32_t. 9576 */ 9577 rack->r_ctl.ack_count /= 2; 9578 rack->r_ctl.sack_count /= 2; 9579 } 9580 if (SEQ_GT(th_ack, tp->snd_una)) { 9581 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9582 tp->t_acktime = ticks; 9583 } 9584 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9585 changed = th_ack - rsm->r_start; 9586 if (changed) { 9587 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9588 } 9589 if ((to->to_flags & TOF_SACK) == 0) { 9590 /* We are done nothing left and no sack. */ 9591 rack_handle_might_revert(tp, rack); 9592 /* 9593 * For cases where we struck a dup-ack 9594 * with no SACK, add to the changes so 9595 * PRR will work right. 9596 */ 9597 if (dup_ack_struck && (changed == 0)) { 9598 changed += ctf_fixed_maxseg(rack->rc_tp); 9599 } 9600 goto out; 9601 } 9602 /* Sack block processing */ 9603 if (SEQ_GT(th_ack, tp->snd_una)) 9604 ack_point = th_ack; 9605 else 9606 ack_point = tp->snd_una; 9607 for (i = 0; i < to->to_nsacks; i++) { 9608 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9609 &sack, sizeof(sack)); 9610 sack.start = ntohl(sack.start); 9611 sack.end = ntohl(sack.end); 9612 if (SEQ_GT(sack.end, sack.start) && 9613 SEQ_GT(sack.start, ack_point) && 9614 SEQ_LT(sack.start, tp->snd_max) && 9615 SEQ_GT(sack.end, ack_point) && 9616 SEQ_LEQ(sack.end, tp->snd_max)) { 9617 sack_blocks[num_sack_blks] = sack; 9618 num_sack_blks++; 9619 } else if (SEQ_LEQ(sack.start, th_ack) && 9620 SEQ_LEQ(sack.end, th_ack)) { 9621 int was_tlp; 9622 9623 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9624 /* 9625 * Its a D-SACK block. 9626 */ 9627 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9628 } 9629 } 9630 if (rack->rc_dsack_round_seen) { 9631 /* Is the dsack roound over? */ 9632 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9633 /* Yes it is */ 9634 rack->rc_dsack_round_seen = 0; 9635 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9636 } 9637 } 9638 /* 9639 * Sort the SACK blocks so we can update the rack scoreboard with 9640 * just one pass. 9641 */ 9642 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9643 num_sack_blks, th->th_ack); 9644 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9645 if (num_sack_blks == 0) { 9646 /* Nothing to sack (DSACKs?) */ 9647 goto out_with_totals; 9648 } 9649 if (num_sack_blks < 2) { 9650 /* Only one, we don't need to sort */ 9651 goto do_sack_work; 9652 } 9653 /* Sort the sacks */ 9654 for (i = 0; i < num_sack_blks; i++) { 9655 for (j = i + 1; j < num_sack_blks; j++) { 9656 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9657 sack = sack_blocks[i]; 9658 sack_blocks[i] = sack_blocks[j]; 9659 sack_blocks[j] = sack; 9660 } 9661 } 9662 } 9663 /* 9664 * Now are any of the sack block ends the same (yes some 9665 * implementations send these)? 9666 */ 9667 again: 9668 if (num_sack_blks == 0) 9669 goto out_with_totals; 9670 if (num_sack_blks > 1) { 9671 for (i = 0; i < num_sack_blks; i++) { 9672 for (j = i + 1; j < num_sack_blks; j++) { 9673 if (sack_blocks[i].end == sack_blocks[j].end) { 9674 /* 9675 * Ok these two have the same end we 9676 * want the smallest end and then 9677 * throw away the larger and start 9678 * again. 9679 */ 9680 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9681 /* 9682 * The second block covers 9683 * more area use that 9684 */ 9685 sack_blocks[i].start = sack_blocks[j].start; 9686 } 9687 /* 9688 * Now collapse out the dup-sack and 9689 * lower the count 9690 */ 9691 for (k = (j + 1); k < num_sack_blks; k++) { 9692 sack_blocks[j].start = sack_blocks[k].start; 9693 sack_blocks[j].end = sack_blocks[k].end; 9694 j++; 9695 } 9696 num_sack_blks--; 9697 goto again; 9698 } 9699 } 9700 } 9701 } 9702 do_sack_work: 9703 /* 9704 * First lets look to see if 9705 * we have retransmitted and 9706 * can use the transmit next? 9707 */ 9708 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9709 if (rsm && 9710 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9711 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9712 /* 9713 * We probably did the FR and the next 9714 * SACK in continues as we would expect. 9715 */ 9716 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9717 if (acked) { 9718 rack->r_wanted_output = 1; 9719 changed += acked; 9720 sack_changed += acked; 9721 } 9722 if (num_sack_blks == 1) { 9723 /* 9724 * This is what we would expect from 9725 * a normal implementation to happen 9726 * after we have retransmitted the FR, 9727 * i.e the sack-filter pushes down 9728 * to 1 block and the next to be retransmitted 9729 * is the sequence in the sack block (has more 9730 * are acked). Count this as ACK'd data to boost 9731 * up the chances of recovering any false positives. 9732 */ 9733 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9734 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9735 counter_u64_add(rack_express_sack, 1); 9736 if (rack->r_ctl.ack_count > 0xfff00000) { 9737 /* 9738 * reduce the number to keep us under 9739 * a uint32_t. 9740 */ 9741 rack->r_ctl.ack_count /= 2; 9742 rack->r_ctl.sack_count /= 2; 9743 } 9744 goto out_with_totals; 9745 } else { 9746 /* 9747 * Start the loop through the 9748 * rest of blocks, past the first block. 9749 */ 9750 moved_two = 0; 9751 loop_start = 1; 9752 } 9753 } 9754 /* Its a sack of some sort */ 9755 rack->r_ctl.sack_count++; 9756 if (rack->r_ctl.sack_count > 0xfff00000) { 9757 /* 9758 * reduce the number to keep us under 9759 * a uint32_t. 9760 */ 9761 rack->r_ctl.ack_count /= 2; 9762 rack->r_ctl.sack_count /= 2; 9763 } 9764 counter_u64_add(rack_sack_total, 1); 9765 if (rack->sack_attack_disable) { 9766 /* An attacker disablement is in place */ 9767 if (num_sack_blks > 1) { 9768 rack->r_ctl.sack_count += (num_sack_blks - 1); 9769 rack->r_ctl.sack_moved_extra++; 9770 counter_u64_add(rack_move_some, 1); 9771 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9772 rack->r_ctl.sack_moved_extra /= 2; 9773 rack->r_ctl.sack_noextra_move /= 2; 9774 } 9775 } 9776 goto out; 9777 } 9778 rsm = rack->r_ctl.rc_sacklast; 9779 for (i = loop_start; i < num_sack_blks; i++) { 9780 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9781 if (acked) { 9782 rack->r_wanted_output = 1; 9783 changed += acked; 9784 sack_changed += acked; 9785 } 9786 if (moved_two) { 9787 /* 9788 * If we did not get a SACK for at least a MSS and 9789 * had to move at all, or if we moved more than our 9790 * threshold, it counts against the "extra" move. 9791 */ 9792 rack->r_ctl.sack_moved_extra += moved_two; 9793 counter_u64_add(rack_move_some, 1); 9794 } else { 9795 /* 9796 * else we did not have to move 9797 * any more than we would expect. 9798 */ 9799 rack->r_ctl.sack_noextra_move++; 9800 counter_u64_add(rack_move_none, 1); 9801 } 9802 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9803 /* 9804 * If the SACK was not a full MSS then 9805 * we add to sack_count the number of 9806 * MSS's (or possibly more than 9807 * a MSS if its a TSO send) we had to skip by. 9808 */ 9809 rack->r_ctl.sack_count += moved_two; 9810 counter_u64_add(rack_sack_total, moved_two); 9811 } 9812 /* 9813 * Now we need to setup for the next 9814 * round. First we make sure we won't 9815 * exceed the size of our uint32_t on 9816 * the various counts, and then clear out 9817 * moved_two. 9818 */ 9819 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9820 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9821 rack->r_ctl.sack_moved_extra /= 2; 9822 rack->r_ctl.sack_noextra_move /= 2; 9823 } 9824 if (rack->r_ctl.sack_count > 0xfff00000) { 9825 rack->r_ctl.ack_count /= 2; 9826 rack->r_ctl.sack_count /= 2; 9827 } 9828 moved_two = 0; 9829 } 9830 out_with_totals: 9831 if (num_sack_blks > 1) { 9832 /* 9833 * You get an extra stroke if 9834 * you have more than one sack-blk, this 9835 * could be where we are skipping forward 9836 * and the sack-filter is still working, or 9837 * it could be an attacker constantly 9838 * moving us. 9839 */ 9840 rack->r_ctl.sack_moved_extra++; 9841 counter_u64_add(rack_move_some, 1); 9842 } 9843 out: 9844 #ifdef NETFLIX_EXP_DETECTION 9845 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9846 #endif 9847 if (changed) { 9848 /* Something changed cancel the rack timer */ 9849 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9850 } 9851 tsused = tcp_get_usecs(NULL); 9852 rsm = tcp_rack_output(tp, rack, tsused); 9853 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9854 rsm) { 9855 /* Enter recovery */ 9856 rack->r_ctl.rc_rsm_start = rsm->r_start; 9857 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 9858 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 9859 entered_recovery = 1; 9860 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 9861 /* 9862 * When we enter recovery we need to assure we send 9863 * one packet. 9864 */ 9865 if (rack->rack_no_prr == 0) { 9866 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9867 rack_log_to_prr(rack, 8, 0); 9868 } 9869 rack->r_timer_override = 1; 9870 rack->r_early = 0; 9871 rack->r_ctl.rc_agg_early = 0; 9872 } else if (IN_FASTRECOVERY(tp->t_flags) && 9873 rsm && 9874 (rack->r_rr_config == 3)) { 9875 /* 9876 * Assure we can output and we get no 9877 * remembered pace time except the retransmit. 9878 */ 9879 rack->r_timer_override = 1; 9880 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9881 rack->r_ctl.rc_resend = rsm; 9882 } 9883 if (IN_FASTRECOVERY(tp->t_flags) && 9884 (rack->rack_no_prr == 0) && 9885 (entered_recovery == 0)) { 9886 rack_update_prr(tp, rack, changed, th_ack); 9887 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9888 ((rack->rc_inp->inp_in_hpts == 0) && 9889 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9890 /* 9891 * If you are pacing output you don't want 9892 * to override. 9893 */ 9894 rack->r_early = 0; 9895 rack->r_ctl.rc_agg_early = 0; 9896 rack->r_timer_override = 1; 9897 } 9898 } 9899 } 9900 9901 static void 9902 rack_strike_dupack(struct tcp_rack *rack) 9903 { 9904 struct rack_sendmap *rsm; 9905 9906 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9907 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9908 rsm = TAILQ_NEXT(rsm, r_tnext); 9909 } 9910 if (rsm && (rsm->r_dupack < 0xff)) { 9911 rsm->r_dupack++; 9912 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9913 struct timeval tv; 9914 uint32_t cts; 9915 /* 9916 * Here we see if we need to retransmit. For 9917 * a SACK type connection if enough time has passed 9918 * we will get a return of the rsm. For a non-sack 9919 * connection we will get the rsm returned if the 9920 * dupack value is 3 or more. 9921 */ 9922 cts = tcp_get_usecs(&tv); 9923 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9924 if (rack->r_ctl.rc_resend != NULL) { 9925 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9926 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9927 rack->rc_tp->snd_una); 9928 } 9929 rack->r_wanted_output = 1; 9930 rack->r_timer_override = 1; 9931 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9932 } 9933 } else { 9934 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9935 } 9936 } 9937 } 9938 9939 static void 9940 rack_check_bottom_drag(struct tcpcb *tp, 9941 struct tcp_rack *rack, 9942 struct socket *so, int32_t acked) 9943 { 9944 uint32_t segsiz, minseg; 9945 9946 segsiz = ctf_fixed_maxseg(tp); 9947 minseg = segsiz; 9948 9949 if (tp->snd_max == tp->snd_una) { 9950 /* 9951 * We are doing dynamic pacing and we are way 9952 * under. Basically everything got acked while 9953 * we were still waiting on the pacer to expire. 9954 * 9955 * This means we need to boost the b/w in 9956 * addition to any earlier boosting of 9957 * the multipler. 9958 */ 9959 rack->rc_dragged_bottom = 1; 9960 rack_validate_multipliers_at_or_above100(rack); 9961 /* 9962 * Lets use the segment bytes acked plus 9963 * the lowest RTT seen as the basis to 9964 * form a b/w estimate. This will be off 9965 * due to the fact that the true estimate 9966 * should be around 1/2 the time of the RTT 9967 * but we can settle for that. 9968 */ 9969 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9970 acked) { 9971 uint64_t bw, calc_bw, rtt; 9972 9973 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9974 if (rtt == 0) { 9975 /* no us sample is there a ms one? */ 9976 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9977 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9978 } else { 9979 goto no_measurement; 9980 } 9981 } 9982 bw = acked; 9983 calc_bw = bw * 1000000; 9984 calc_bw /= rtt; 9985 if (rack->r_ctl.last_max_bw && 9986 (rack->r_ctl.last_max_bw < calc_bw)) { 9987 /* 9988 * If we have a last calculated max bw 9989 * enforce it. 9990 */ 9991 calc_bw = rack->r_ctl.last_max_bw; 9992 } 9993 /* now plop it in */ 9994 if (rack->rc_gp_filled == 0) { 9995 if (calc_bw > ONE_POINT_TWO_MEG) { 9996 /* 9997 * If we have no measurement 9998 * don't let us set in more than 9999 * 1.2Mbps. If we are still too 10000 * low after pacing with this we 10001 * will hopefully have a max b/w 10002 * available to sanity check things. 10003 */ 10004 calc_bw = ONE_POINT_TWO_MEG; 10005 } 10006 rack->r_ctl.rc_rtt_diff = 0; 10007 rack->r_ctl.gp_bw = calc_bw; 10008 rack->rc_gp_filled = 1; 10009 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10010 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10011 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10012 } else if (calc_bw > rack->r_ctl.gp_bw) { 10013 rack->r_ctl.rc_rtt_diff = 0; 10014 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 10015 rack->r_ctl.num_measurements = RACK_REQ_AVG; 10016 rack->r_ctl.gp_bw = calc_bw; 10017 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 10018 } else 10019 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10020 if ((rack->gp_ready == 0) && 10021 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 10022 /* We have enough measurements now */ 10023 rack->gp_ready = 1; 10024 rack_set_cc_pacing(rack); 10025 if (rack->defer_options) 10026 rack_apply_deferred_options(rack); 10027 } 10028 /* 10029 * For acks over 1mss we do a extra boost to simulate 10030 * where we would get 2 acks (we want 110 for the mul). 10031 */ 10032 if (acked > segsiz) 10033 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10034 } else { 10035 /* 10036 * zero rtt possibly?, settle for just an old increase. 10037 */ 10038 no_measurement: 10039 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10040 } 10041 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 10042 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 10043 minseg)) && 10044 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 10045 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 10046 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 10047 (segsiz * rack_req_segs))) { 10048 /* 10049 * We are doing dynamic GP pacing and 10050 * we have everything except 1MSS or less 10051 * bytes left out. We are still pacing away. 10052 * And there is data that could be sent, This 10053 * means we are inserting delayed ack time in 10054 * our measurements because we are pacing too slow. 10055 */ 10056 rack_validate_multipliers_at_or_above100(rack); 10057 rack->rc_dragged_bottom = 1; 10058 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10059 } 10060 } 10061 10062 10063 10064 static void 10065 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 10066 { 10067 /* 10068 * The fast output path is enabled and we 10069 * have moved the cumack forward. Lets see if 10070 * we can expand forward the fast path length by 10071 * that amount. What we would ideally like to 10072 * do is increase the number of bytes in the 10073 * fast path block (left_to_send) by the 10074 * acked amount. However we have to gate that 10075 * by two factors: 10076 * 1) The amount outstanding and the rwnd of the peer 10077 * (i.e. we don't want to exceed the rwnd of the peer). 10078 * <and> 10079 * 2) The amount of data left in the socket buffer (i.e. 10080 * we can't send beyond what is in the buffer). 10081 * 10082 * Note that this does not take into account any increase 10083 * in the cwnd. We will only extend the fast path by 10084 * what was acked. 10085 */ 10086 uint32_t new_total, gating_val; 10087 10088 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 10089 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 10090 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 10091 if (new_total <= gating_val) { 10092 /* We can increase left_to_send by the acked amount */ 10093 counter_u64_add(rack_extended_rfo, 1); 10094 rack->r_ctl.fsb.left_to_send = new_total; 10095 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 10096 ("rack:%p left_to_send:%u sbavail:%u out:%u", 10097 rack, rack->r_ctl.fsb.left_to_send, 10098 sbavail(&rack->rc_inp->inp_socket->so_snd), 10099 (tp->snd_max - tp->snd_una))); 10100 10101 } 10102 } 10103 10104 static void 10105 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 10106 { 10107 /* 10108 * Here any sendmap entry that points to the 10109 * beginning mbuf must be adjusted to the correct 10110 * offset. This must be called with: 10111 * 1) The socket buffer locked 10112 * 2) snd_una adjusted to its new postion. 10113 * 10114 * Note that (2) implies rack_ack_received has also 10115 * been called. 10116 * 10117 * We grab the first mbuf in the socket buffer and 10118 * then go through the front of the sendmap, recalculating 10119 * the stored offset for any sendmap entry that has 10120 * that mbuf. We must use the sb functions to do this 10121 * since its possible an add was done has well as 10122 * the subtraction we may have just completed. This should 10123 * not be a penalty though, since we just referenced the sb 10124 * to go in and trim off the mbufs that we freed (of course 10125 * there will be a penalty for the sendmap references though). 10126 */ 10127 struct mbuf *m; 10128 struct rack_sendmap *rsm; 10129 10130 SOCKBUF_LOCK_ASSERT(sb); 10131 m = sb->sb_mb; 10132 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 10133 if ((rsm == NULL) || (m == NULL)) { 10134 /* Nothing outstanding */ 10135 return; 10136 } 10137 while (rsm->m && (rsm->m == m)) { 10138 /* one to adjust */ 10139 #ifdef INVARIANTS 10140 struct mbuf *tm; 10141 uint32_t soff; 10142 10143 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10144 if (rsm->orig_m_len != m->m_len) { 10145 rack_adjust_orig_mlen(rsm); 10146 } 10147 if (rsm->soff != soff) { 10148 /* 10149 * This is not a fatal error, we anticipate it 10150 * might happen (the else code), so we count it here 10151 * so that under invariant we can see that it really 10152 * does happen. 10153 */ 10154 counter_u64_add(rack_adjust_map_bw, 1); 10155 } 10156 rsm->m = tm; 10157 rsm->soff = soff; 10158 if (tm) 10159 rsm->orig_m_len = rsm->m->m_len; 10160 else 10161 rsm->orig_m_len = 0; 10162 #else 10163 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10164 if (rsm->m) 10165 rsm->orig_m_len = rsm->m->m_len; 10166 else 10167 rsm->orig_m_len = 0; 10168 #endif 10169 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10170 rsm); 10171 if (rsm == NULL) 10172 break; 10173 } 10174 } 10175 10176 /* 10177 * Return value of 1, we do not need to call rack_process_data(). 10178 * return value of 0, rack_process_data can be called. 10179 * For ret_val if its 0 the TCP is locked, if its non-zero 10180 * its unlocked and probably unsafe to touch the TCB. 10181 */ 10182 static int 10183 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10184 struct tcpcb *tp, struct tcpopt *to, 10185 uint32_t tiwin, int32_t tlen, 10186 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10187 { 10188 int32_t ourfinisacked = 0; 10189 int32_t nsegs, acked_amount; 10190 int32_t acked; 10191 struct mbuf *mfree; 10192 struct tcp_rack *rack; 10193 int32_t under_pacing = 0; 10194 int32_t recovery = 0; 10195 10196 rack = (struct tcp_rack *)tp->t_fb_ptr; 10197 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10198 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10199 &rack->r_ctl.challenge_ack_ts, 10200 &rack->r_ctl.challenge_ack_cnt); 10201 rack->r_wanted_output = 1; 10202 return (1); 10203 } 10204 if (rack->gp_ready && 10205 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10206 under_pacing = 1; 10207 } 10208 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10209 int in_rec, dup_ack_struck = 0; 10210 10211 in_rec = IN_FASTRECOVERY(tp->t_flags); 10212 if (rack->rc_in_persist) { 10213 tp->t_rxtshift = 0; 10214 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10215 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10216 } 10217 if ((th->th_ack == tp->snd_una) && 10218 (tiwin == tp->snd_wnd) && 10219 ((to->to_flags & TOF_SACK) == 0)) { 10220 rack_strike_dupack(rack); 10221 dup_ack_struck = 1; 10222 } 10223 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10224 } 10225 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10226 /* 10227 * Old ack, behind (or duplicate to) the last one rcv'd 10228 * Note: We mark reordering is occuring if its 10229 * less than and we have not closed our window. 10230 */ 10231 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10232 counter_u64_add(rack_reorder_seen, 1); 10233 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10234 } 10235 return (0); 10236 } 10237 /* 10238 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10239 * something we sent. 10240 */ 10241 if (tp->t_flags & TF_NEEDSYN) { 10242 /* 10243 * T/TCP: Connection was half-synchronized, and our SYN has 10244 * been ACK'd (so connection is now fully synchronized). Go 10245 * to non-starred state, increment snd_una for ACK of SYN, 10246 * and check if we can do window scaling. 10247 */ 10248 tp->t_flags &= ~TF_NEEDSYN; 10249 tp->snd_una++; 10250 /* Do window scaling? */ 10251 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10252 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10253 tp->rcv_scale = tp->request_r_scale; 10254 /* Send window already scaled. */ 10255 } 10256 } 10257 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10258 INP_WLOCK_ASSERT(tp->t_inpcb); 10259 10260 acked = BYTES_THIS_ACK(tp, th); 10261 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10262 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10263 /* 10264 * If we just performed our first retransmit, and the ACK arrives 10265 * within our recovery window, then it was a mistake to do the 10266 * retransmit in the first place. Recover our original cwnd and 10267 * ssthresh, and proceed to transmit where we left off. 10268 */ 10269 if ((tp->t_flags & TF_PREVVALID) && 10270 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10271 tp->t_flags &= ~TF_PREVVALID; 10272 if (tp->t_rxtshift == 1 && 10273 (int)(ticks - tp->t_badrxtwin) < 0) 10274 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10275 } 10276 if (acked) { 10277 /* assure we are not backed off */ 10278 tp->t_rxtshift = 0; 10279 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10280 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10281 rack->rc_tlp_in_progress = 0; 10282 rack->r_ctl.rc_tlp_cnt_out = 0; 10283 /* 10284 * If it is the RXT timer we want to 10285 * stop it, so we can restart a TLP. 10286 */ 10287 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10288 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10289 #ifdef NETFLIX_HTTP_LOGGING 10290 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10291 #endif 10292 } 10293 /* 10294 * If we have a timestamp reply, update smoothed round trip time. If 10295 * no timestamp is present but transmit timer is running and timed 10296 * sequence number was acked, update smoothed round trip time. Since 10297 * we now have an rtt measurement, cancel the timer backoff (cf., 10298 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10299 * timer. 10300 * 10301 * Some boxes send broken timestamp replies during the SYN+ACK 10302 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10303 * and blow up the retransmit timer. 10304 */ 10305 /* 10306 * If all outstanding data is acked, stop retransmit timer and 10307 * remember to restart (more output or persist). If there is more 10308 * data to be acked, restart retransmit timer, using current 10309 * (possibly backed-off) value. 10310 */ 10311 if (acked == 0) { 10312 if (ofia) 10313 *ofia = ourfinisacked; 10314 return (0); 10315 } 10316 if (IN_RECOVERY(tp->t_flags)) { 10317 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10318 (SEQ_LT(th->th_ack, tp->snd_max))) { 10319 tcp_rack_partialack(tp); 10320 } else { 10321 rack_post_recovery(tp, th->th_ack); 10322 recovery = 1; 10323 } 10324 } 10325 /* 10326 * Let the congestion control algorithm update congestion control 10327 * related information. This typically means increasing the 10328 * congestion window. 10329 */ 10330 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10331 SOCKBUF_LOCK(&so->so_snd); 10332 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10333 tp->snd_wnd -= acked_amount; 10334 mfree = sbcut_locked(&so->so_snd, acked_amount); 10335 if ((sbused(&so->so_snd) == 0) && 10336 (acked > acked_amount) && 10337 (tp->t_state >= TCPS_FIN_WAIT_1) && 10338 (tp->t_flags & TF_SENTFIN)) { 10339 /* 10340 * We must be sure our fin 10341 * was sent and acked (we can be 10342 * in FIN_WAIT_1 without having 10343 * sent the fin). 10344 */ 10345 ourfinisacked = 1; 10346 } 10347 tp->snd_una = th->th_ack; 10348 if (acked_amount && sbavail(&so->so_snd)) 10349 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10350 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10351 /* NB: sowwakeup_locked() does an implicit unlock. */ 10352 sowwakeup_locked(so); 10353 m_freem(mfree); 10354 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10355 tp->snd_recover = tp->snd_una; 10356 10357 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10358 tp->snd_nxt = tp->snd_una; 10359 } 10360 if (under_pacing && 10361 (rack->use_fixed_rate == 0) && 10362 (rack->in_probe_rtt == 0) && 10363 rack->rc_gp_dyn_mul && 10364 rack->rc_always_pace) { 10365 /* Check if we are dragging bottom */ 10366 rack_check_bottom_drag(tp, rack, so, acked); 10367 } 10368 if (tp->snd_una == tp->snd_max) { 10369 /* Nothing left outstanding */ 10370 tp->t_flags &= ~TF_PREVVALID; 10371 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10372 rack->r_ctl.retran_during_recovery = 0; 10373 rack->r_ctl.dsack_byte_cnt = 0; 10374 if (rack->r_ctl.rc_went_idle_time == 0) 10375 rack->r_ctl.rc_went_idle_time = 1; 10376 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10377 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10378 tp->t_acktime = 0; 10379 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10380 /* Set need output so persist might get set */ 10381 rack->r_wanted_output = 1; 10382 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10383 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10384 (sbavail(&so->so_snd) == 0) && 10385 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10386 /* 10387 * The socket was gone and the 10388 * peer sent data (now or in the past), time to 10389 * reset him. 10390 */ 10391 *ret_val = 1; 10392 /* tcp_close will kill the inp pre-log the Reset */ 10393 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10394 tp = tcp_close(tp); 10395 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10396 return (1); 10397 } 10398 } 10399 if (ofia) 10400 *ofia = ourfinisacked; 10401 return (0); 10402 } 10403 10404 static void 10405 rack_collapsed_window(struct tcp_rack *rack) 10406 { 10407 /* 10408 * Now we must walk the 10409 * send map and divide the 10410 * ones left stranded. These 10411 * guys can't cause us to abort 10412 * the connection and are really 10413 * "unsent". However if a buggy 10414 * client actually did keep some 10415 * of the data i.e. collapsed the win 10416 * and refused to ack and then opened 10417 * the win and acked that data. We would 10418 * get into an ack war, the simplier 10419 * method then of just pretending we 10420 * did not send those segments something 10421 * won't work. 10422 */ 10423 struct rack_sendmap *rsm, *nrsm, fe, *insret; 10424 tcp_seq max_seq; 10425 10426 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10427 memset(&fe, 0, sizeof(fe)); 10428 fe.r_start = max_seq; 10429 /* Find the first seq past or at maxseq */ 10430 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10431 if (rsm == NULL) { 10432 /* Nothing to do strange */ 10433 rack->rc_has_collapsed = 0; 10434 return; 10435 } 10436 /* 10437 * Now do we need to split at 10438 * the collapse point? 10439 */ 10440 if (SEQ_GT(max_seq, rsm->r_start)) { 10441 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10442 if (nrsm == NULL) { 10443 /* We can't get a rsm, mark all? */ 10444 nrsm = rsm; 10445 goto no_split; 10446 } 10447 /* Clone it */ 10448 rack_clone_rsm(rack, nrsm, rsm, max_seq); 10449 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10450 #ifdef INVARIANTS 10451 if (insret != NULL) { 10452 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10453 nrsm, insret, rack, rsm); 10454 } 10455 #endif 10456 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 10457 if (rsm->r_in_tmap) { 10458 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10459 nrsm->r_in_tmap = 1; 10460 } 10461 /* 10462 * Set in the new RSM as the 10463 * collapsed starting point 10464 */ 10465 rsm = nrsm; 10466 } 10467 no_split: 10468 counter_u64_add(rack_collapsed_win, 1); 10469 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10470 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10471 } 10472 rack->rc_has_collapsed = 1; 10473 } 10474 10475 static void 10476 rack_un_collapse_window(struct tcp_rack *rack) 10477 { 10478 struct rack_sendmap *rsm; 10479 10480 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10481 if (rsm->r_flags & RACK_RWND_COLLAPSED) 10482 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10483 else 10484 break; 10485 } 10486 rack->rc_has_collapsed = 0; 10487 } 10488 10489 static void 10490 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10491 int32_t tlen, int32_t tfo_syn) 10492 { 10493 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10494 if (rack->rc_dack_mode && 10495 (tlen > 500) && 10496 (rack->rc_dack_toggle == 1)) { 10497 goto no_delayed_ack; 10498 } 10499 rack_timer_cancel(tp, rack, 10500 rack->r_ctl.rc_rcvtime, __LINE__); 10501 tp->t_flags |= TF_DELACK; 10502 } else { 10503 no_delayed_ack: 10504 rack->r_wanted_output = 1; 10505 tp->t_flags |= TF_ACKNOW; 10506 if (rack->rc_dack_mode) { 10507 if (tp->t_flags & TF_DELACK) 10508 rack->rc_dack_toggle = 1; 10509 else 10510 rack->rc_dack_toggle = 0; 10511 } 10512 } 10513 } 10514 10515 static void 10516 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10517 { 10518 /* 10519 * If fast output is in progress, lets validate that 10520 * the new window did not shrink on us and make it 10521 * so fast output should end. 10522 */ 10523 if (rack->r_fast_output) { 10524 uint32_t out; 10525 10526 /* 10527 * Calculate what we will send if left as is 10528 * and compare that to our send window. 10529 */ 10530 out = ctf_outstanding(tp); 10531 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10532 /* ok we have an issue */ 10533 if (out >= tp->snd_wnd) { 10534 /* Turn off fast output the window is met or collapsed */ 10535 rack->r_fast_output = 0; 10536 } else { 10537 /* we have some room left */ 10538 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10539 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10540 /* If not at least 1 full segment never mind */ 10541 rack->r_fast_output = 0; 10542 } 10543 } 10544 } 10545 } 10546 } 10547 10548 10549 /* 10550 * Return value of 1, the TCB is unlocked and most 10551 * likely gone, return value of 0, the TCP is still 10552 * locked. 10553 */ 10554 static int 10555 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10556 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10557 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10558 { 10559 /* 10560 * Update window information. Don't look at window if no ACK: TAC's 10561 * send garbage on first SYN. 10562 */ 10563 int32_t nsegs; 10564 int32_t tfo_syn; 10565 struct tcp_rack *rack; 10566 10567 rack = (struct tcp_rack *)tp->t_fb_ptr; 10568 INP_WLOCK_ASSERT(tp->t_inpcb); 10569 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10570 if ((thflags & TH_ACK) && 10571 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10572 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10573 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10574 /* keep track of pure window updates */ 10575 if (tlen == 0 && 10576 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10577 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10578 tp->snd_wnd = tiwin; 10579 rack_validate_fo_sendwin_up(tp, rack); 10580 tp->snd_wl1 = th->th_seq; 10581 tp->snd_wl2 = th->th_ack; 10582 if (tp->snd_wnd > tp->max_sndwnd) 10583 tp->max_sndwnd = tp->snd_wnd; 10584 rack->r_wanted_output = 1; 10585 } else if (thflags & TH_ACK) { 10586 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10587 tp->snd_wnd = tiwin; 10588 rack_validate_fo_sendwin_up(tp, rack); 10589 tp->snd_wl1 = th->th_seq; 10590 tp->snd_wl2 = th->th_ack; 10591 } 10592 } 10593 if (tp->snd_wnd < ctf_outstanding(tp)) 10594 /* The peer collapsed the window */ 10595 rack_collapsed_window(rack); 10596 else if (rack->rc_has_collapsed) 10597 rack_un_collapse_window(rack); 10598 /* Was persist timer active and now we have window space? */ 10599 if ((rack->rc_in_persist != 0) && 10600 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10601 rack->r_ctl.rc_pace_min_segs))) { 10602 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10603 tp->snd_nxt = tp->snd_max; 10604 /* Make sure we output to start the timer */ 10605 rack->r_wanted_output = 1; 10606 } 10607 /* Do we enter persists? */ 10608 if ((rack->rc_in_persist == 0) && 10609 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10610 TCPS_HAVEESTABLISHED(tp->t_state) && 10611 (tp->snd_max == tp->snd_una) && 10612 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10613 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10614 /* 10615 * Here the rwnd is less than 10616 * the pacing size, we are established, 10617 * nothing is outstanding, and there is 10618 * data to send. Enter persists. 10619 */ 10620 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10621 } 10622 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10623 m_freem(m); 10624 return (0); 10625 } 10626 /* 10627 * don't process the URG bit, ignore them drag 10628 * along the up. 10629 */ 10630 tp->rcv_up = tp->rcv_nxt; 10631 INP_WLOCK_ASSERT(tp->t_inpcb); 10632 10633 /* 10634 * Process the segment text, merging it into the TCP sequencing 10635 * queue, and arranging for acknowledgment of receipt if necessary. 10636 * This process logically involves adjusting tp->rcv_wnd as data is 10637 * presented to the user (this happens in tcp_usrreq.c, case 10638 * PRU_RCVD). If a FIN has already been received on this connection 10639 * then we just ignore the text. 10640 */ 10641 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10642 IS_FASTOPEN(tp->t_flags)); 10643 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10644 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10645 tcp_seq save_start = th->th_seq; 10646 tcp_seq save_rnxt = tp->rcv_nxt; 10647 int save_tlen = tlen; 10648 10649 m_adj(m, drop_hdrlen); /* delayed header drop */ 10650 /* 10651 * Insert segment which includes th into TCP reassembly 10652 * queue with control block tp. Set thflags to whether 10653 * reassembly now includes a segment with FIN. This handles 10654 * the common case inline (segment is the next to be 10655 * received on an established connection, and the queue is 10656 * empty), avoiding linkage into and removal from the queue 10657 * and repetition of various conversions. Set DELACK for 10658 * segments received in order, but ack immediately when 10659 * segments are out of order (so fast retransmit can work). 10660 */ 10661 if (th->th_seq == tp->rcv_nxt && 10662 SEGQ_EMPTY(tp) && 10663 (TCPS_HAVEESTABLISHED(tp->t_state) || 10664 tfo_syn)) { 10665 #ifdef NETFLIX_SB_LIMITS 10666 u_int mcnt, appended; 10667 10668 if (so->so_rcv.sb_shlim) { 10669 mcnt = m_memcnt(m); 10670 appended = 0; 10671 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10672 CFO_NOSLEEP, NULL) == false) { 10673 counter_u64_add(tcp_sb_shlim_fails, 1); 10674 m_freem(m); 10675 return (0); 10676 } 10677 } 10678 #endif 10679 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10680 tp->rcv_nxt += tlen; 10681 if (tlen && 10682 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10683 (tp->t_fbyte_in == 0)) { 10684 tp->t_fbyte_in = ticks; 10685 if (tp->t_fbyte_in == 0) 10686 tp->t_fbyte_in = 1; 10687 if (tp->t_fbyte_out && tp->t_fbyte_in) 10688 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10689 } 10690 thflags = th->th_flags & TH_FIN; 10691 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10692 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10693 SOCKBUF_LOCK(&so->so_rcv); 10694 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10695 m_freem(m); 10696 } else 10697 #ifdef NETFLIX_SB_LIMITS 10698 appended = 10699 #endif 10700 sbappendstream_locked(&so->so_rcv, m, 0); 10701 10702 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10703 /* NB: sorwakeup_locked() does an implicit unlock. */ 10704 sorwakeup_locked(so); 10705 #ifdef NETFLIX_SB_LIMITS 10706 if (so->so_rcv.sb_shlim && appended != mcnt) 10707 counter_fo_release(so->so_rcv.sb_shlim, 10708 mcnt - appended); 10709 #endif 10710 } else { 10711 /* 10712 * XXX: Due to the header drop above "th" is 10713 * theoretically invalid by now. Fortunately 10714 * m_adj() doesn't actually frees any mbufs when 10715 * trimming from the head. 10716 */ 10717 tcp_seq temp = save_start; 10718 10719 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10720 tp->t_flags |= TF_ACKNOW; 10721 if (tp->t_flags & TF_WAKESOR) { 10722 tp->t_flags &= ~TF_WAKESOR; 10723 /* NB: sorwakeup_locked() does an implicit unlock. */ 10724 sorwakeup_locked(so); 10725 } 10726 } 10727 if ((tp->t_flags & TF_SACK_PERMIT) && 10728 (save_tlen > 0) && 10729 TCPS_HAVEESTABLISHED(tp->t_state)) { 10730 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10731 /* 10732 * DSACK actually handled in the fastpath 10733 * above. 10734 */ 10735 RACK_OPTS_INC(tcp_sack_path_1); 10736 tcp_update_sack_list(tp, save_start, 10737 save_start + save_tlen); 10738 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10739 if ((tp->rcv_numsacks >= 1) && 10740 (tp->sackblks[0].end == save_start)) { 10741 /* 10742 * Partial overlap, recorded at todrop 10743 * above. 10744 */ 10745 RACK_OPTS_INC(tcp_sack_path_2a); 10746 tcp_update_sack_list(tp, 10747 tp->sackblks[0].start, 10748 tp->sackblks[0].end); 10749 } else { 10750 RACK_OPTS_INC(tcp_sack_path_2b); 10751 tcp_update_dsack_list(tp, save_start, 10752 save_start + save_tlen); 10753 } 10754 } else if (tlen >= save_tlen) { 10755 /* Update of sackblks. */ 10756 RACK_OPTS_INC(tcp_sack_path_3); 10757 tcp_update_dsack_list(tp, save_start, 10758 save_start + save_tlen); 10759 } else if (tlen > 0) { 10760 RACK_OPTS_INC(tcp_sack_path_4); 10761 tcp_update_dsack_list(tp, save_start, 10762 save_start + tlen); 10763 } 10764 } 10765 } else { 10766 m_freem(m); 10767 thflags &= ~TH_FIN; 10768 } 10769 10770 /* 10771 * If FIN is received ACK the FIN and let the user know that the 10772 * connection is closing. 10773 */ 10774 if (thflags & TH_FIN) { 10775 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10776 /* The socket upcall is handled by socantrcvmore. */ 10777 socantrcvmore(so); 10778 /* 10779 * If connection is half-synchronized (ie NEEDSYN 10780 * flag on) then delay ACK, so it may be piggybacked 10781 * when SYN is sent. Otherwise, since we received a 10782 * FIN then no more input can be expected, send ACK 10783 * now. 10784 */ 10785 if (tp->t_flags & TF_NEEDSYN) { 10786 rack_timer_cancel(tp, rack, 10787 rack->r_ctl.rc_rcvtime, __LINE__); 10788 tp->t_flags |= TF_DELACK; 10789 } else { 10790 tp->t_flags |= TF_ACKNOW; 10791 } 10792 tp->rcv_nxt++; 10793 } 10794 switch (tp->t_state) { 10795 /* 10796 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10797 * CLOSE_WAIT state. 10798 */ 10799 case TCPS_SYN_RECEIVED: 10800 tp->t_starttime = ticks; 10801 /* FALLTHROUGH */ 10802 case TCPS_ESTABLISHED: 10803 rack_timer_cancel(tp, rack, 10804 rack->r_ctl.rc_rcvtime, __LINE__); 10805 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10806 break; 10807 10808 /* 10809 * If still in FIN_WAIT_1 STATE FIN has not been 10810 * acked so enter the CLOSING state. 10811 */ 10812 case TCPS_FIN_WAIT_1: 10813 rack_timer_cancel(tp, rack, 10814 rack->r_ctl.rc_rcvtime, __LINE__); 10815 tcp_state_change(tp, TCPS_CLOSING); 10816 break; 10817 10818 /* 10819 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10820 * starting the time-wait timer, turning off the 10821 * other standard timers. 10822 */ 10823 case TCPS_FIN_WAIT_2: 10824 rack_timer_cancel(tp, rack, 10825 rack->r_ctl.rc_rcvtime, __LINE__); 10826 tcp_twstart(tp); 10827 return (1); 10828 } 10829 } 10830 /* 10831 * Return any desired output. 10832 */ 10833 if ((tp->t_flags & TF_ACKNOW) || 10834 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10835 rack->r_wanted_output = 1; 10836 } 10837 INP_WLOCK_ASSERT(tp->t_inpcb); 10838 return (0); 10839 } 10840 10841 /* 10842 * Here nothing is really faster, its just that we 10843 * have broken out the fast-data path also just like 10844 * the fast-ack. 10845 */ 10846 static int 10847 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10848 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10849 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10850 { 10851 int32_t nsegs; 10852 int32_t newsize = 0; /* automatic sockbuf scaling */ 10853 struct tcp_rack *rack; 10854 #ifdef NETFLIX_SB_LIMITS 10855 u_int mcnt, appended; 10856 #endif 10857 #ifdef TCPDEBUG 10858 /* 10859 * The size of tcp_saveipgen must be the size of the max ip header, 10860 * now IPv6. 10861 */ 10862 u_char tcp_saveipgen[IP6_HDR_LEN]; 10863 struct tcphdr tcp_savetcp; 10864 short ostate = 0; 10865 10866 #endif 10867 /* 10868 * If last ACK falls within this segment's sequence numbers, record 10869 * the timestamp. NOTE that the test is modified according to the 10870 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10871 */ 10872 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10873 return (0); 10874 } 10875 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10876 return (0); 10877 } 10878 if (tiwin && tiwin != tp->snd_wnd) { 10879 return (0); 10880 } 10881 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10882 return (0); 10883 } 10884 if (__predict_false((to->to_flags & TOF_TS) && 10885 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10886 return (0); 10887 } 10888 if (__predict_false((th->th_ack != tp->snd_una))) { 10889 return (0); 10890 } 10891 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10892 return (0); 10893 } 10894 if ((to->to_flags & TOF_TS) != 0 && 10895 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10896 tp->ts_recent_age = tcp_ts_getticks(); 10897 tp->ts_recent = to->to_tsval; 10898 } 10899 rack = (struct tcp_rack *)tp->t_fb_ptr; 10900 /* 10901 * This is a pure, in-sequence data packet with nothing on the 10902 * reassembly queue and we have enough buffer space to take it. 10903 */ 10904 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10905 10906 #ifdef NETFLIX_SB_LIMITS 10907 if (so->so_rcv.sb_shlim) { 10908 mcnt = m_memcnt(m); 10909 appended = 0; 10910 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10911 CFO_NOSLEEP, NULL) == false) { 10912 counter_u64_add(tcp_sb_shlim_fails, 1); 10913 m_freem(m); 10914 return (1); 10915 } 10916 } 10917 #endif 10918 /* Clean receiver SACK report if present */ 10919 if (tp->rcv_numsacks) 10920 tcp_clean_sackreport(tp); 10921 KMOD_TCPSTAT_INC(tcps_preddat); 10922 tp->rcv_nxt += tlen; 10923 if (tlen && 10924 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10925 (tp->t_fbyte_in == 0)) { 10926 tp->t_fbyte_in = ticks; 10927 if (tp->t_fbyte_in == 0) 10928 tp->t_fbyte_in = 1; 10929 if (tp->t_fbyte_out && tp->t_fbyte_in) 10930 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10931 } 10932 /* 10933 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10934 */ 10935 tp->snd_wl1 = th->th_seq; 10936 /* 10937 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10938 */ 10939 tp->rcv_up = tp->rcv_nxt; 10940 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10941 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10942 #ifdef TCPDEBUG 10943 if (so->so_options & SO_DEBUG) 10944 tcp_trace(TA_INPUT, ostate, tp, 10945 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10946 #endif 10947 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10948 10949 /* Add data to socket buffer. */ 10950 SOCKBUF_LOCK(&so->so_rcv); 10951 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10952 m_freem(m); 10953 } else { 10954 /* 10955 * Set new socket buffer size. Give up when limit is 10956 * reached. 10957 */ 10958 if (newsize) 10959 if (!sbreserve_locked(&so->so_rcv, 10960 newsize, so, NULL)) 10961 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10962 m_adj(m, drop_hdrlen); /* delayed header drop */ 10963 #ifdef NETFLIX_SB_LIMITS 10964 appended = 10965 #endif 10966 sbappendstream_locked(&so->so_rcv, m, 0); 10967 ctf_calc_rwin(so, tp); 10968 } 10969 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10970 /* NB: sorwakeup_locked() does an implicit unlock. */ 10971 sorwakeup_locked(so); 10972 #ifdef NETFLIX_SB_LIMITS 10973 if (so->so_rcv.sb_shlim && mcnt != appended) 10974 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10975 #endif 10976 rack_handle_delayed_ack(tp, rack, tlen, 0); 10977 if (tp->snd_una == tp->snd_max) 10978 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10979 return (1); 10980 } 10981 10982 /* 10983 * This subfunction is used to try to highly optimize the 10984 * fast path. We again allow window updates that are 10985 * in sequence to remain in the fast-path. We also add 10986 * in the __predict's to attempt to help the compiler. 10987 * Note that if we return a 0, then we can *not* process 10988 * it and the caller should push the packet into the 10989 * slow-path. 10990 */ 10991 static int 10992 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10993 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10994 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10995 { 10996 int32_t acked; 10997 int32_t nsegs; 10998 #ifdef TCPDEBUG 10999 /* 11000 * The size of tcp_saveipgen must be the size of the max ip header, 11001 * now IPv6. 11002 */ 11003 u_char tcp_saveipgen[IP6_HDR_LEN]; 11004 struct tcphdr tcp_savetcp; 11005 short ostate = 0; 11006 #endif 11007 int32_t under_pacing = 0; 11008 struct tcp_rack *rack; 11009 11010 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11011 /* Old ack, behind (or duplicate to) the last one rcv'd */ 11012 return (0); 11013 } 11014 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 11015 /* Above what we have sent? */ 11016 return (0); 11017 } 11018 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 11019 /* We are retransmitting */ 11020 return (0); 11021 } 11022 if (__predict_false(tiwin == 0)) { 11023 /* zero window */ 11024 return (0); 11025 } 11026 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 11027 /* We need a SYN or a FIN, unlikely.. */ 11028 return (0); 11029 } 11030 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 11031 /* Timestamp is behind .. old ack with seq wrap? */ 11032 return (0); 11033 } 11034 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 11035 /* Still recovering */ 11036 return (0); 11037 } 11038 rack = (struct tcp_rack *)tp->t_fb_ptr; 11039 if (rack->r_ctl.rc_sacked) { 11040 /* We have sack holes on our scoreboard */ 11041 return (0); 11042 } 11043 /* Ok if we reach here, we can process a fast-ack */ 11044 if (rack->gp_ready && 11045 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11046 under_pacing = 1; 11047 } 11048 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11049 rack_log_ack(tp, to, th, 0, 0); 11050 /* Did the window get updated? */ 11051 if (tiwin != tp->snd_wnd) { 11052 tp->snd_wnd = tiwin; 11053 rack_validate_fo_sendwin_up(tp, rack); 11054 tp->snd_wl1 = th->th_seq; 11055 if (tp->snd_wnd > tp->max_sndwnd) 11056 tp->max_sndwnd = tp->snd_wnd; 11057 } 11058 /* Do we exit persists? */ 11059 if ((rack->rc_in_persist != 0) && 11060 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 11061 rack->r_ctl.rc_pace_min_segs))) { 11062 rack_exit_persist(tp, rack, cts); 11063 } 11064 /* Do we enter persists? */ 11065 if ((rack->rc_in_persist == 0) && 11066 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 11067 TCPS_HAVEESTABLISHED(tp->t_state) && 11068 (tp->snd_max == tp->snd_una) && 11069 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 11070 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 11071 /* 11072 * Here the rwnd is less than 11073 * the pacing size, we are established, 11074 * nothing is outstanding, and there is 11075 * data to send. Enter persists. 11076 */ 11077 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 11078 } 11079 /* 11080 * If last ACK falls within this segment's sequence numbers, record 11081 * the timestamp. NOTE that the test is modified according to the 11082 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 11083 */ 11084 if ((to->to_flags & TOF_TS) != 0 && 11085 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11086 tp->ts_recent_age = tcp_ts_getticks(); 11087 tp->ts_recent = to->to_tsval; 11088 } 11089 /* 11090 * This is a pure ack for outstanding data. 11091 */ 11092 KMOD_TCPSTAT_INC(tcps_predack); 11093 11094 /* 11095 * "bad retransmit" recovery. 11096 */ 11097 if ((tp->t_flags & TF_PREVVALID) && 11098 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11099 tp->t_flags &= ~TF_PREVVALID; 11100 if (tp->t_rxtshift == 1 && 11101 (int)(ticks - tp->t_badrxtwin) < 0) 11102 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 11103 } 11104 /* 11105 * Recalculate the transmit timer / rtt. 11106 * 11107 * Some boxes send broken timestamp replies during the SYN+ACK 11108 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11109 * and blow up the retransmit timer. 11110 */ 11111 acked = BYTES_THIS_ACK(tp, th); 11112 11113 #ifdef TCP_HHOOK 11114 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11115 hhook_run_tcp_est_in(tp, th, to); 11116 #endif 11117 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11118 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11119 if (acked) { 11120 struct mbuf *mfree; 11121 11122 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11123 SOCKBUF_LOCK(&so->so_snd); 11124 mfree = sbcut_locked(&so->so_snd, acked); 11125 tp->snd_una = th->th_ack; 11126 /* Note we want to hold the sb lock through the sendmap adjust */ 11127 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11128 /* Wake up the socket if we have room to write more */ 11129 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11130 sowwakeup_locked(so); 11131 m_freem(mfree); 11132 tp->t_rxtshift = 0; 11133 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11134 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11135 rack->rc_tlp_in_progress = 0; 11136 rack->r_ctl.rc_tlp_cnt_out = 0; 11137 /* 11138 * If it is the RXT timer we want to 11139 * stop it, so we can restart a TLP. 11140 */ 11141 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11142 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11143 #ifdef NETFLIX_HTTP_LOGGING 11144 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11145 #endif 11146 } 11147 /* 11148 * Let the congestion control algorithm update congestion control 11149 * related information. This typically means increasing the 11150 * congestion window. 11151 */ 11152 if (tp->snd_wnd < ctf_outstanding(tp)) { 11153 /* The peer collapsed the window */ 11154 rack_collapsed_window(rack); 11155 } else if (rack->rc_has_collapsed) 11156 rack_un_collapse_window(rack); 11157 11158 /* 11159 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11160 */ 11161 tp->snd_wl2 = th->th_ack; 11162 tp->t_dupacks = 0; 11163 m_freem(m); 11164 /* ND6_HINT(tp); *//* Some progress has been made. */ 11165 11166 /* 11167 * If all outstanding data are acked, stop retransmit timer, 11168 * otherwise restart timer using current (possibly backed-off) 11169 * value. If process is waiting for space, wakeup/selwakeup/signal. 11170 * If data are ready to send, let tcp_output decide between more 11171 * output or persist. 11172 */ 11173 #ifdef TCPDEBUG 11174 if (so->so_options & SO_DEBUG) 11175 tcp_trace(TA_INPUT, ostate, tp, 11176 (void *)tcp_saveipgen, 11177 &tcp_savetcp, 0); 11178 #endif 11179 if (under_pacing && 11180 (rack->use_fixed_rate == 0) && 11181 (rack->in_probe_rtt == 0) && 11182 rack->rc_gp_dyn_mul && 11183 rack->rc_always_pace) { 11184 /* Check if we are dragging bottom */ 11185 rack_check_bottom_drag(tp, rack, so, acked); 11186 } 11187 if (tp->snd_una == tp->snd_max) { 11188 tp->t_flags &= ~TF_PREVVALID; 11189 rack->r_ctl.retran_during_recovery = 0; 11190 rack->r_ctl.dsack_byte_cnt = 0; 11191 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11192 if (rack->r_ctl.rc_went_idle_time == 0) 11193 rack->r_ctl.rc_went_idle_time = 1; 11194 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11195 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11196 tp->t_acktime = 0; 11197 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11198 } 11199 if (acked && rack->r_fast_output) 11200 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11201 if (sbavail(&so->so_snd)) { 11202 rack->r_wanted_output = 1; 11203 } 11204 return (1); 11205 } 11206 11207 /* 11208 * Return value of 1, the TCB is unlocked and most 11209 * likely gone, return value of 0, the TCP is still 11210 * locked. 11211 */ 11212 static int 11213 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11214 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11215 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11216 { 11217 int32_t ret_val = 0; 11218 int32_t todrop; 11219 int32_t ourfinisacked = 0; 11220 struct tcp_rack *rack; 11221 11222 ctf_calc_rwin(so, tp); 11223 /* 11224 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11225 * SYN, drop the input. if seg contains a RST, then drop the 11226 * connection. if seg does not contain SYN, then drop it. Otherwise 11227 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11228 * tp->irs if seg contains ack then advance tp->snd_una if seg 11229 * contains an ECE and ECN support is enabled, the stream is ECN 11230 * capable. if SYN has been acked change to ESTABLISHED else 11231 * SYN_RCVD state arrange for segment to be acked (eventually) 11232 * continue processing rest of data/controls. 11233 */ 11234 if ((thflags & TH_ACK) && 11235 (SEQ_LEQ(th->th_ack, tp->iss) || 11236 SEQ_GT(th->th_ack, tp->snd_max))) { 11237 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11238 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11239 return (1); 11240 } 11241 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11242 TCP_PROBE5(connect__refused, NULL, tp, 11243 mtod(m, const char *), tp, th); 11244 tp = tcp_drop(tp, ECONNREFUSED); 11245 ctf_do_drop(m, tp); 11246 return (1); 11247 } 11248 if (thflags & TH_RST) { 11249 ctf_do_drop(m, tp); 11250 return (1); 11251 } 11252 if (!(thflags & TH_SYN)) { 11253 ctf_do_drop(m, tp); 11254 return (1); 11255 } 11256 tp->irs = th->th_seq; 11257 tcp_rcvseqinit(tp); 11258 rack = (struct tcp_rack *)tp->t_fb_ptr; 11259 if (thflags & TH_ACK) { 11260 int tfo_partial = 0; 11261 11262 KMOD_TCPSTAT_INC(tcps_connects); 11263 soisconnected(so); 11264 #ifdef MAC 11265 mac_socketpeer_set_from_mbuf(m, so); 11266 #endif 11267 /* Do window scaling on this connection? */ 11268 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11269 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11270 tp->rcv_scale = tp->request_r_scale; 11271 } 11272 tp->rcv_adv += min(tp->rcv_wnd, 11273 TCP_MAXWIN << tp->rcv_scale); 11274 /* 11275 * If not all the data that was sent in the TFO SYN 11276 * has been acked, resend the remainder right away. 11277 */ 11278 if (IS_FASTOPEN(tp->t_flags) && 11279 (tp->snd_una != tp->snd_max)) { 11280 tp->snd_nxt = th->th_ack; 11281 tfo_partial = 1; 11282 } 11283 /* 11284 * If there's data, delay ACK; if there's also a FIN ACKNOW 11285 * will be turned on later. 11286 */ 11287 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11288 rack_timer_cancel(tp, rack, 11289 rack->r_ctl.rc_rcvtime, __LINE__); 11290 tp->t_flags |= TF_DELACK; 11291 } else { 11292 rack->r_wanted_output = 1; 11293 tp->t_flags |= TF_ACKNOW; 11294 rack->rc_dack_toggle = 0; 11295 } 11296 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 11297 (V_tcp_do_ecn == 1)) { 11298 tp->t_flags2 |= TF2_ECN_PERMIT; 11299 KMOD_TCPSTAT_INC(tcps_ecn_shs); 11300 } 11301 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11302 /* 11303 * We advance snd_una for the 11304 * fast open case. If th_ack is 11305 * acknowledging data beyond 11306 * snd_una we can't just call 11307 * ack-processing since the 11308 * data stream in our send-map 11309 * will start at snd_una + 1 (one 11310 * beyond the SYN). If its just 11311 * equal we don't need to do that 11312 * and there is no send_map. 11313 */ 11314 tp->snd_una++; 11315 } 11316 /* 11317 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11318 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11319 */ 11320 tp->t_starttime = ticks; 11321 if (tp->t_flags & TF_NEEDFIN) { 11322 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11323 tp->t_flags &= ~TF_NEEDFIN; 11324 thflags &= ~TH_SYN; 11325 } else { 11326 tcp_state_change(tp, TCPS_ESTABLISHED); 11327 TCP_PROBE5(connect__established, NULL, tp, 11328 mtod(m, const char *), tp, th); 11329 rack_cc_conn_init(tp); 11330 } 11331 } else { 11332 /* 11333 * Received initial SYN in SYN-SENT[*] state => simultaneous 11334 * open. If segment contains CC option and there is a 11335 * cached CC, apply TAO test. If it succeeds, connection is * 11336 * half-synchronized. Otherwise, do 3-way handshake: 11337 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11338 * there was no CC option, clear cached CC value. 11339 */ 11340 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 11341 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11342 } 11343 INP_WLOCK_ASSERT(tp->t_inpcb); 11344 /* 11345 * Advance th->th_seq to correspond to first data byte. If data, 11346 * trim to stay within window, dropping FIN if necessary. 11347 */ 11348 th->th_seq++; 11349 if (tlen > tp->rcv_wnd) { 11350 todrop = tlen - tp->rcv_wnd; 11351 m_adj(m, -todrop); 11352 tlen = tp->rcv_wnd; 11353 thflags &= ~TH_FIN; 11354 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11355 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11356 } 11357 tp->snd_wl1 = th->th_seq - 1; 11358 tp->rcv_up = th->th_seq; 11359 /* 11360 * Client side of transaction: already sent SYN and data. If the 11361 * remote host used T/TCP to validate the SYN, our data will be 11362 * ACK'd; if so, enter normal data segment processing in the middle 11363 * of step 5, ack processing. Otherwise, goto step 6. 11364 */ 11365 if (thflags & TH_ACK) { 11366 /* For syn-sent we need to possibly update the rtt */ 11367 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11368 uint32_t t, mcts; 11369 11370 mcts = tcp_ts_getticks(); 11371 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11372 if (!tp->t_rttlow || tp->t_rttlow > t) 11373 tp->t_rttlow = t; 11374 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11375 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11376 tcp_rack_xmit_timer_commit(rack, tp); 11377 } 11378 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11379 return (ret_val); 11380 /* We may have changed to FIN_WAIT_1 above */ 11381 if (tp->t_state == TCPS_FIN_WAIT_1) { 11382 /* 11383 * In FIN_WAIT_1 STATE in addition to the processing 11384 * for the ESTABLISHED state if our FIN is now 11385 * acknowledged then enter FIN_WAIT_2. 11386 */ 11387 if (ourfinisacked) { 11388 /* 11389 * If we can't receive any more data, then 11390 * closing user can proceed. Starting the 11391 * timer is contrary to the specification, 11392 * but if we don't get a FIN we'll hang 11393 * forever. 11394 * 11395 * XXXjl: we should release the tp also, and 11396 * use a compressed state. 11397 */ 11398 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11399 soisdisconnected(so); 11400 tcp_timer_activate(tp, TT_2MSL, 11401 (tcp_fast_finwait2_recycle ? 11402 tcp_finwait2_timeout : 11403 TP_MAXIDLE(tp))); 11404 } 11405 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11406 } 11407 } 11408 } 11409 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11410 tiwin, thflags, nxt_pkt)); 11411 } 11412 11413 /* 11414 * Return value of 1, the TCB is unlocked and most 11415 * likely gone, return value of 0, the TCP is still 11416 * locked. 11417 */ 11418 static int 11419 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11420 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11421 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11422 { 11423 struct tcp_rack *rack; 11424 int32_t ret_val = 0; 11425 int32_t ourfinisacked = 0; 11426 11427 ctf_calc_rwin(so, tp); 11428 if ((thflags & TH_ACK) && 11429 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11430 SEQ_GT(th->th_ack, tp->snd_max))) { 11431 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11432 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11433 return (1); 11434 } 11435 rack = (struct tcp_rack *)tp->t_fb_ptr; 11436 if (IS_FASTOPEN(tp->t_flags)) { 11437 /* 11438 * When a TFO connection is in SYN_RECEIVED, the 11439 * only valid packets are the initial SYN, a 11440 * retransmit/copy of the initial SYN (possibly with 11441 * a subset of the original data), a valid ACK, a 11442 * FIN, or a RST. 11443 */ 11444 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11445 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11446 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11447 return (1); 11448 } else if (thflags & TH_SYN) { 11449 /* non-initial SYN is ignored */ 11450 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11451 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11452 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11453 ctf_do_drop(m, NULL); 11454 return (0); 11455 } 11456 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11457 ctf_do_drop(m, NULL); 11458 return (0); 11459 } 11460 } 11461 if ((thflags & TH_RST) || 11462 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11463 return (ctf_process_rst(m, th, so, tp)); 11464 /* 11465 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11466 * it's less than ts_recent, drop it. 11467 */ 11468 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11469 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11470 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11471 return (ret_val); 11472 } 11473 /* 11474 * In the SYN-RECEIVED state, validate that the packet belongs to 11475 * this connection before trimming the data to fit the receive 11476 * window. Check the sequence number versus IRS since we know the 11477 * sequence numbers haven't wrapped. This is a partial fix for the 11478 * "LAND" DoS attack. 11479 */ 11480 if (SEQ_LT(th->th_seq, tp->irs)) { 11481 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11482 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11483 return (1); 11484 } 11485 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11486 &rack->r_ctl.challenge_ack_ts, 11487 &rack->r_ctl.challenge_ack_cnt)) { 11488 return (ret_val); 11489 } 11490 /* 11491 * If last ACK falls within this segment's sequence numbers, record 11492 * its timestamp. NOTE: 1) That the test incorporates suggestions 11493 * from the latest proposal of the tcplw@cray.com list (Braden 11494 * 1993/04/26). 2) That updating only on newer timestamps interferes 11495 * with our earlier PAWS tests, so this check should be solely 11496 * predicated on the sequence space of this segment. 3) That we 11497 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11498 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11499 * SEG.Len, This modified check allows us to overcome RFC1323's 11500 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11501 * p.869. In such cases, we can still calculate the RTT correctly 11502 * when RCV.NXT == Last.ACK.Sent. 11503 */ 11504 if ((to->to_flags & TOF_TS) != 0 && 11505 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11506 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11507 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11508 tp->ts_recent_age = tcp_ts_getticks(); 11509 tp->ts_recent = to->to_tsval; 11510 } 11511 tp->snd_wnd = tiwin; 11512 rack_validate_fo_sendwin_up(tp, rack); 11513 /* 11514 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11515 * is on (half-synchronized state), then queue data for later 11516 * processing; else drop segment and return. 11517 */ 11518 if ((thflags & TH_ACK) == 0) { 11519 if (IS_FASTOPEN(tp->t_flags)) { 11520 rack_cc_conn_init(tp); 11521 } 11522 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11523 tiwin, thflags, nxt_pkt)); 11524 } 11525 KMOD_TCPSTAT_INC(tcps_connects); 11526 soisconnected(so); 11527 /* Do window scaling? */ 11528 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11529 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11530 tp->rcv_scale = tp->request_r_scale; 11531 } 11532 /* 11533 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11534 * FIN-WAIT-1 11535 */ 11536 tp->t_starttime = ticks; 11537 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11538 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11539 tp->t_tfo_pending = NULL; 11540 } 11541 if (tp->t_flags & TF_NEEDFIN) { 11542 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11543 tp->t_flags &= ~TF_NEEDFIN; 11544 } else { 11545 tcp_state_change(tp, TCPS_ESTABLISHED); 11546 TCP_PROBE5(accept__established, NULL, tp, 11547 mtod(m, const char *), tp, th); 11548 /* 11549 * TFO connections call cc_conn_init() during SYN 11550 * processing. Calling it again here for such connections 11551 * is not harmless as it would undo the snd_cwnd reduction 11552 * that occurs when a TFO SYN|ACK is retransmitted. 11553 */ 11554 if (!IS_FASTOPEN(tp->t_flags)) 11555 rack_cc_conn_init(tp); 11556 } 11557 /* 11558 * Account for the ACK of our SYN prior to 11559 * regular ACK processing below, except for 11560 * simultaneous SYN, which is handled later. 11561 */ 11562 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11563 tp->snd_una++; 11564 /* 11565 * If segment contains data or ACK, will call tcp_reass() later; if 11566 * not, do so now to pass queued data to user. 11567 */ 11568 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11569 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11570 (struct mbuf *)0); 11571 if (tp->t_flags & TF_WAKESOR) { 11572 tp->t_flags &= ~TF_WAKESOR; 11573 /* NB: sorwakeup_locked() does an implicit unlock. */ 11574 sorwakeup_locked(so); 11575 } 11576 } 11577 tp->snd_wl1 = th->th_seq - 1; 11578 /* For syn-recv we need to possibly update the rtt */ 11579 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11580 uint32_t t, mcts; 11581 11582 mcts = tcp_ts_getticks(); 11583 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11584 if (!tp->t_rttlow || tp->t_rttlow > t) 11585 tp->t_rttlow = t; 11586 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11587 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11588 tcp_rack_xmit_timer_commit(rack, tp); 11589 } 11590 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11591 return (ret_val); 11592 } 11593 if (tp->t_state == TCPS_FIN_WAIT_1) { 11594 /* We could have went to FIN_WAIT_1 (or EST) above */ 11595 /* 11596 * In FIN_WAIT_1 STATE in addition to the processing for the 11597 * ESTABLISHED state if our FIN is now acknowledged then 11598 * enter FIN_WAIT_2. 11599 */ 11600 if (ourfinisacked) { 11601 /* 11602 * If we can't receive any more data, then closing 11603 * user can proceed. Starting the timer is contrary 11604 * to the specification, but if we don't get a FIN 11605 * we'll hang forever. 11606 * 11607 * XXXjl: we should release the tp also, and use a 11608 * compressed state. 11609 */ 11610 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11611 soisdisconnected(so); 11612 tcp_timer_activate(tp, TT_2MSL, 11613 (tcp_fast_finwait2_recycle ? 11614 tcp_finwait2_timeout : 11615 TP_MAXIDLE(tp))); 11616 } 11617 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11618 } 11619 } 11620 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11621 tiwin, thflags, nxt_pkt)); 11622 } 11623 11624 /* 11625 * Return value of 1, the TCB is unlocked and most 11626 * likely gone, return value of 0, the TCP is still 11627 * locked. 11628 */ 11629 static int 11630 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11631 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11632 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11633 { 11634 int32_t ret_val = 0; 11635 struct tcp_rack *rack; 11636 11637 /* 11638 * Header prediction: check for the two common cases of a 11639 * uni-directional data xfer. If the packet has no control flags, 11640 * is in-sequence, the window didn't change and we're not 11641 * retransmitting, it's a candidate. If the length is zero and the 11642 * ack moved forward, we're the sender side of the xfer. Just free 11643 * the data acked & wake any higher level process that was blocked 11644 * waiting for space. If the length is non-zero and the ack didn't 11645 * move, we're the receiver side. If we're getting packets in-order 11646 * (the reassembly queue is empty), add the data toc The socket 11647 * buffer and note that we need a delayed ack. Make sure that the 11648 * hidden state-flags are also off. Since we check for 11649 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11650 */ 11651 rack = (struct tcp_rack *)tp->t_fb_ptr; 11652 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11653 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11654 __predict_true(SEGQ_EMPTY(tp)) && 11655 __predict_true(th->th_seq == tp->rcv_nxt)) { 11656 if (tlen == 0) { 11657 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11658 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11659 return (0); 11660 } 11661 } else { 11662 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11663 tiwin, nxt_pkt, iptos)) { 11664 return (0); 11665 } 11666 } 11667 } 11668 ctf_calc_rwin(so, tp); 11669 11670 if ((thflags & TH_RST) || 11671 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11672 return (ctf_process_rst(m, th, so, tp)); 11673 11674 /* 11675 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11676 * synchronized state. 11677 */ 11678 if (thflags & TH_SYN) { 11679 ctf_challenge_ack(m, th, tp, &ret_val); 11680 return (ret_val); 11681 } 11682 /* 11683 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11684 * it's less than ts_recent, drop it. 11685 */ 11686 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11687 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11688 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11689 return (ret_val); 11690 } 11691 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11692 &rack->r_ctl.challenge_ack_ts, 11693 &rack->r_ctl.challenge_ack_cnt)) { 11694 return (ret_val); 11695 } 11696 /* 11697 * If last ACK falls within this segment's sequence numbers, record 11698 * its timestamp. NOTE: 1) That the test incorporates suggestions 11699 * from the latest proposal of the tcplw@cray.com list (Braden 11700 * 1993/04/26). 2) That updating only on newer timestamps interferes 11701 * with our earlier PAWS tests, so this check should be solely 11702 * predicated on the sequence space of this segment. 3) That we 11703 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11704 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11705 * SEG.Len, This modified check allows us to overcome RFC1323's 11706 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11707 * p.869. In such cases, we can still calculate the RTT correctly 11708 * when RCV.NXT == Last.ACK.Sent. 11709 */ 11710 if ((to->to_flags & TOF_TS) != 0 && 11711 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11712 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11713 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11714 tp->ts_recent_age = tcp_ts_getticks(); 11715 tp->ts_recent = to->to_tsval; 11716 } 11717 /* 11718 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11719 * is on (half-synchronized state), then queue data for later 11720 * processing; else drop segment and return. 11721 */ 11722 if ((thflags & TH_ACK) == 0) { 11723 if (tp->t_flags & TF_NEEDSYN) { 11724 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11725 tiwin, thflags, nxt_pkt)); 11726 11727 } else if (tp->t_flags & TF_ACKNOW) { 11728 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11729 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11730 return (ret_val); 11731 } else { 11732 ctf_do_drop(m, NULL); 11733 return (0); 11734 } 11735 } 11736 /* 11737 * Ack processing. 11738 */ 11739 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11740 return (ret_val); 11741 } 11742 if (sbavail(&so->so_snd)) { 11743 if (ctf_progress_timeout_check(tp, true)) { 11744 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11745 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11746 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11747 return (1); 11748 } 11749 } 11750 /* State changes only happen in rack_process_data() */ 11751 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11752 tiwin, thflags, nxt_pkt)); 11753 } 11754 11755 /* 11756 * Return value of 1, the TCB is unlocked and most 11757 * likely gone, return value of 0, the TCP is still 11758 * locked. 11759 */ 11760 static int 11761 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11762 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11763 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11764 { 11765 int32_t ret_val = 0; 11766 struct tcp_rack *rack; 11767 11768 rack = (struct tcp_rack *)tp->t_fb_ptr; 11769 ctf_calc_rwin(so, tp); 11770 if ((thflags & TH_RST) || 11771 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11772 return (ctf_process_rst(m, th, so, tp)); 11773 /* 11774 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11775 * synchronized state. 11776 */ 11777 if (thflags & TH_SYN) { 11778 ctf_challenge_ack(m, th, tp, &ret_val); 11779 return (ret_val); 11780 } 11781 /* 11782 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11783 * it's less than ts_recent, drop it. 11784 */ 11785 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11786 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11787 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11788 return (ret_val); 11789 } 11790 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11791 &rack->r_ctl.challenge_ack_ts, 11792 &rack->r_ctl.challenge_ack_cnt)) { 11793 return (ret_val); 11794 } 11795 /* 11796 * If last ACK falls within this segment's sequence numbers, record 11797 * its timestamp. NOTE: 1) That the test incorporates suggestions 11798 * from the latest proposal of the tcplw@cray.com list (Braden 11799 * 1993/04/26). 2) That updating only on newer timestamps interferes 11800 * with our earlier PAWS tests, so this check should be solely 11801 * predicated on the sequence space of this segment. 3) That we 11802 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11803 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11804 * SEG.Len, This modified check allows us to overcome RFC1323's 11805 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11806 * p.869. In such cases, we can still calculate the RTT correctly 11807 * when RCV.NXT == Last.ACK.Sent. 11808 */ 11809 if ((to->to_flags & TOF_TS) != 0 && 11810 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11811 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11812 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11813 tp->ts_recent_age = tcp_ts_getticks(); 11814 tp->ts_recent = to->to_tsval; 11815 } 11816 /* 11817 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11818 * is on (half-synchronized state), then queue data for later 11819 * processing; else drop segment and return. 11820 */ 11821 if ((thflags & TH_ACK) == 0) { 11822 if (tp->t_flags & TF_NEEDSYN) { 11823 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11824 tiwin, thflags, nxt_pkt)); 11825 11826 } else if (tp->t_flags & TF_ACKNOW) { 11827 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11828 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11829 return (ret_val); 11830 } else { 11831 ctf_do_drop(m, NULL); 11832 return (0); 11833 } 11834 } 11835 /* 11836 * Ack processing. 11837 */ 11838 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11839 return (ret_val); 11840 } 11841 if (sbavail(&so->so_snd)) { 11842 if (ctf_progress_timeout_check(tp, true)) { 11843 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11844 tp, tick, PROGRESS_DROP, __LINE__); 11845 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11846 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11847 return (1); 11848 } 11849 } 11850 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11851 tiwin, thflags, nxt_pkt)); 11852 } 11853 11854 static int 11855 rack_check_data_after_close(struct mbuf *m, 11856 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11857 { 11858 struct tcp_rack *rack; 11859 11860 rack = (struct tcp_rack *)tp->t_fb_ptr; 11861 if (rack->rc_allow_data_af_clo == 0) { 11862 close_now: 11863 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11864 /* tcp_close will kill the inp pre-log the Reset */ 11865 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11866 tp = tcp_close(tp); 11867 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11868 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11869 return (1); 11870 } 11871 if (sbavail(&so->so_snd) == 0) 11872 goto close_now; 11873 /* Ok we allow data that is ignored and a followup reset */ 11874 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11875 tp->rcv_nxt = th->th_seq + *tlen; 11876 tp->t_flags2 |= TF2_DROP_AF_DATA; 11877 rack->r_wanted_output = 1; 11878 *tlen = 0; 11879 return (0); 11880 } 11881 11882 /* 11883 * Return value of 1, the TCB is unlocked and most 11884 * likely gone, return value of 0, the TCP is still 11885 * locked. 11886 */ 11887 static int 11888 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11889 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11890 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11891 { 11892 int32_t ret_val = 0; 11893 int32_t ourfinisacked = 0; 11894 struct tcp_rack *rack; 11895 11896 rack = (struct tcp_rack *)tp->t_fb_ptr; 11897 ctf_calc_rwin(so, tp); 11898 11899 if ((thflags & TH_RST) || 11900 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11901 return (ctf_process_rst(m, th, so, tp)); 11902 /* 11903 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11904 * synchronized state. 11905 */ 11906 if (thflags & TH_SYN) { 11907 ctf_challenge_ack(m, th, tp, &ret_val); 11908 return (ret_val); 11909 } 11910 /* 11911 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11912 * it's less than ts_recent, drop it. 11913 */ 11914 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11915 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11916 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11917 return (ret_val); 11918 } 11919 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11920 &rack->r_ctl.challenge_ack_ts, 11921 &rack->r_ctl.challenge_ack_cnt)) { 11922 return (ret_val); 11923 } 11924 /* 11925 * If new data are received on a connection after the user processes 11926 * are gone, then RST the other end. 11927 */ 11928 if ((so->so_state & SS_NOFDREF) && tlen) { 11929 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11930 return (1); 11931 } 11932 /* 11933 * If last ACK falls within this segment's sequence numbers, record 11934 * its timestamp. NOTE: 1) That the test incorporates suggestions 11935 * from the latest proposal of the tcplw@cray.com list (Braden 11936 * 1993/04/26). 2) That updating only on newer timestamps interferes 11937 * with our earlier PAWS tests, so this check should be solely 11938 * predicated on the sequence space of this segment. 3) That we 11939 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11940 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11941 * SEG.Len, This modified check allows us to overcome RFC1323's 11942 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11943 * p.869. In such cases, we can still calculate the RTT correctly 11944 * when RCV.NXT == Last.ACK.Sent. 11945 */ 11946 if ((to->to_flags & TOF_TS) != 0 && 11947 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11948 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11949 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11950 tp->ts_recent_age = tcp_ts_getticks(); 11951 tp->ts_recent = to->to_tsval; 11952 } 11953 /* 11954 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11955 * is on (half-synchronized state), then queue data for later 11956 * processing; else drop segment and return. 11957 */ 11958 if ((thflags & TH_ACK) == 0) { 11959 if (tp->t_flags & TF_NEEDSYN) { 11960 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11961 tiwin, thflags, nxt_pkt)); 11962 } else if (tp->t_flags & TF_ACKNOW) { 11963 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11964 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11965 return (ret_val); 11966 } else { 11967 ctf_do_drop(m, NULL); 11968 return (0); 11969 } 11970 } 11971 /* 11972 * Ack processing. 11973 */ 11974 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11975 return (ret_val); 11976 } 11977 if (ourfinisacked) { 11978 /* 11979 * If we can't receive any more data, then closing user can 11980 * proceed. Starting the timer is contrary to the 11981 * specification, but if we don't get a FIN we'll hang 11982 * forever. 11983 * 11984 * XXXjl: we should release the tp also, and use a 11985 * compressed state. 11986 */ 11987 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11988 soisdisconnected(so); 11989 tcp_timer_activate(tp, TT_2MSL, 11990 (tcp_fast_finwait2_recycle ? 11991 tcp_finwait2_timeout : 11992 TP_MAXIDLE(tp))); 11993 } 11994 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11995 } 11996 if (sbavail(&so->so_snd)) { 11997 if (ctf_progress_timeout_check(tp, true)) { 11998 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11999 tp, tick, PROGRESS_DROP, __LINE__); 12000 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12001 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12002 return (1); 12003 } 12004 } 12005 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12006 tiwin, thflags, nxt_pkt)); 12007 } 12008 12009 /* 12010 * Return value of 1, the TCB is unlocked and most 12011 * likely gone, return value of 0, the TCP is still 12012 * locked. 12013 */ 12014 static int 12015 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 12016 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12017 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12018 { 12019 int32_t ret_val = 0; 12020 int32_t ourfinisacked = 0; 12021 struct tcp_rack *rack; 12022 12023 rack = (struct tcp_rack *)tp->t_fb_ptr; 12024 ctf_calc_rwin(so, tp); 12025 12026 if ((thflags & TH_RST) || 12027 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12028 return (ctf_process_rst(m, th, so, tp)); 12029 /* 12030 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12031 * synchronized state. 12032 */ 12033 if (thflags & TH_SYN) { 12034 ctf_challenge_ack(m, th, tp, &ret_val); 12035 return (ret_val); 12036 } 12037 /* 12038 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12039 * it's less than ts_recent, drop it. 12040 */ 12041 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12042 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12043 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12044 return (ret_val); 12045 } 12046 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12047 &rack->r_ctl.challenge_ack_ts, 12048 &rack->r_ctl.challenge_ack_cnt)) { 12049 return (ret_val); 12050 } 12051 /* 12052 * If new data are received on a connection after the user processes 12053 * are gone, then RST the other end. 12054 */ 12055 if ((so->so_state & SS_NOFDREF) && tlen) { 12056 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12057 return (1); 12058 } 12059 /* 12060 * If last ACK falls within this segment's sequence numbers, record 12061 * its timestamp. NOTE: 1) That the test incorporates suggestions 12062 * from the latest proposal of the tcplw@cray.com list (Braden 12063 * 1993/04/26). 2) That updating only on newer timestamps interferes 12064 * with our earlier PAWS tests, so this check should be solely 12065 * predicated on the sequence space of this segment. 3) That we 12066 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12067 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12068 * SEG.Len, This modified check allows us to overcome RFC1323's 12069 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12070 * p.869. In such cases, we can still calculate the RTT correctly 12071 * when RCV.NXT == Last.ACK.Sent. 12072 */ 12073 if ((to->to_flags & TOF_TS) != 0 && 12074 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12075 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12076 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12077 tp->ts_recent_age = tcp_ts_getticks(); 12078 tp->ts_recent = to->to_tsval; 12079 } 12080 /* 12081 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12082 * is on (half-synchronized state), then queue data for later 12083 * processing; else drop segment and return. 12084 */ 12085 if ((thflags & TH_ACK) == 0) { 12086 if (tp->t_flags & TF_NEEDSYN) { 12087 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12088 tiwin, thflags, nxt_pkt)); 12089 } else if (tp->t_flags & TF_ACKNOW) { 12090 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12091 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12092 return (ret_val); 12093 } else { 12094 ctf_do_drop(m, NULL); 12095 return (0); 12096 } 12097 } 12098 /* 12099 * Ack processing. 12100 */ 12101 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12102 return (ret_val); 12103 } 12104 if (ourfinisacked) { 12105 tcp_twstart(tp); 12106 m_freem(m); 12107 return (1); 12108 } 12109 if (sbavail(&so->so_snd)) { 12110 if (ctf_progress_timeout_check(tp, true)) { 12111 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12112 tp, tick, PROGRESS_DROP, __LINE__); 12113 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12114 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12115 return (1); 12116 } 12117 } 12118 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12119 tiwin, thflags, nxt_pkt)); 12120 } 12121 12122 /* 12123 * Return value of 1, the TCB is unlocked and most 12124 * likely gone, return value of 0, the TCP is still 12125 * locked. 12126 */ 12127 static int 12128 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12129 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12130 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12131 { 12132 int32_t ret_val = 0; 12133 int32_t ourfinisacked = 0; 12134 struct tcp_rack *rack; 12135 12136 rack = (struct tcp_rack *)tp->t_fb_ptr; 12137 ctf_calc_rwin(so, tp); 12138 12139 if ((thflags & TH_RST) || 12140 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12141 return (ctf_process_rst(m, th, so, tp)); 12142 /* 12143 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12144 * synchronized state. 12145 */ 12146 if (thflags & TH_SYN) { 12147 ctf_challenge_ack(m, th, tp, &ret_val); 12148 return (ret_val); 12149 } 12150 /* 12151 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12152 * it's less than ts_recent, drop it. 12153 */ 12154 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12155 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12156 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12157 return (ret_val); 12158 } 12159 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12160 &rack->r_ctl.challenge_ack_ts, 12161 &rack->r_ctl.challenge_ack_cnt)) { 12162 return (ret_val); 12163 } 12164 /* 12165 * If new data are received on a connection after the user processes 12166 * are gone, then RST the other end. 12167 */ 12168 if ((so->so_state & SS_NOFDREF) && tlen) { 12169 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12170 return (1); 12171 } 12172 /* 12173 * If last ACK falls within this segment's sequence numbers, record 12174 * its timestamp. NOTE: 1) That the test incorporates suggestions 12175 * from the latest proposal of the tcplw@cray.com list (Braden 12176 * 1993/04/26). 2) That updating only on newer timestamps interferes 12177 * with our earlier PAWS tests, so this check should be solely 12178 * predicated on the sequence space of this segment. 3) That we 12179 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12180 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12181 * SEG.Len, This modified check allows us to overcome RFC1323's 12182 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12183 * p.869. In such cases, we can still calculate the RTT correctly 12184 * when RCV.NXT == Last.ACK.Sent. 12185 */ 12186 if ((to->to_flags & TOF_TS) != 0 && 12187 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12188 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12189 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12190 tp->ts_recent_age = tcp_ts_getticks(); 12191 tp->ts_recent = to->to_tsval; 12192 } 12193 /* 12194 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12195 * is on (half-synchronized state), then queue data for later 12196 * processing; else drop segment and return. 12197 */ 12198 if ((thflags & TH_ACK) == 0) { 12199 if (tp->t_flags & TF_NEEDSYN) { 12200 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12201 tiwin, thflags, nxt_pkt)); 12202 } else if (tp->t_flags & TF_ACKNOW) { 12203 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12204 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12205 return (ret_val); 12206 } else { 12207 ctf_do_drop(m, NULL); 12208 return (0); 12209 } 12210 } 12211 /* 12212 * case TCPS_LAST_ACK: Ack processing. 12213 */ 12214 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12215 return (ret_val); 12216 } 12217 if (ourfinisacked) { 12218 tp = tcp_close(tp); 12219 ctf_do_drop(m, tp); 12220 return (1); 12221 } 12222 if (sbavail(&so->so_snd)) { 12223 if (ctf_progress_timeout_check(tp, true)) { 12224 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12225 tp, tick, PROGRESS_DROP, __LINE__); 12226 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12227 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12228 return (1); 12229 } 12230 } 12231 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12232 tiwin, thflags, nxt_pkt)); 12233 } 12234 12235 /* 12236 * Return value of 1, the TCB is unlocked and most 12237 * likely gone, return value of 0, the TCP is still 12238 * locked. 12239 */ 12240 static int 12241 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12242 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12243 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12244 { 12245 int32_t ret_val = 0; 12246 int32_t ourfinisacked = 0; 12247 struct tcp_rack *rack; 12248 12249 rack = (struct tcp_rack *)tp->t_fb_ptr; 12250 ctf_calc_rwin(so, tp); 12251 12252 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12253 if ((thflags & TH_RST) || 12254 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12255 return (ctf_process_rst(m, th, so, tp)); 12256 /* 12257 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12258 * synchronized state. 12259 */ 12260 if (thflags & TH_SYN) { 12261 ctf_challenge_ack(m, th, tp, &ret_val); 12262 return (ret_val); 12263 } 12264 /* 12265 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12266 * it's less than ts_recent, drop it. 12267 */ 12268 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12269 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12270 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12271 return (ret_val); 12272 } 12273 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12274 &rack->r_ctl.challenge_ack_ts, 12275 &rack->r_ctl.challenge_ack_cnt)) { 12276 return (ret_val); 12277 } 12278 /* 12279 * If new data are received on a connection after the user processes 12280 * are gone, then RST the other end. 12281 */ 12282 if ((so->so_state & SS_NOFDREF) && 12283 tlen) { 12284 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12285 return (1); 12286 } 12287 /* 12288 * If last ACK falls within this segment's sequence numbers, record 12289 * its timestamp. NOTE: 1) That the test incorporates suggestions 12290 * from the latest proposal of the tcplw@cray.com list (Braden 12291 * 1993/04/26). 2) That updating only on newer timestamps interferes 12292 * with our earlier PAWS tests, so this check should be solely 12293 * predicated on the sequence space of this segment. 3) That we 12294 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12295 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12296 * SEG.Len, This modified check allows us to overcome RFC1323's 12297 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12298 * p.869. In such cases, we can still calculate the RTT correctly 12299 * when RCV.NXT == Last.ACK.Sent. 12300 */ 12301 if ((to->to_flags & TOF_TS) != 0 && 12302 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12303 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12304 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12305 tp->ts_recent_age = tcp_ts_getticks(); 12306 tp->ts_recent = to->to_tsval; 12307 } 12308 /* 12309 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12310 * is on (half-synchronized state), then queue data for later 12311 * processing; else drop segment and return. 12312 */ 12313 if ((thflags & TH_ACK) == 0) { 12314 if (tp->t_flags & TF_NEEDSYN) { 12315 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12316 tiwin, thflags, nxt_pkt)); 12317 } else if (tp->t_flags & TF_ACKNOW) { 12318 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12319 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12320 return (ret_val); 12321 } else { 12322 ctf_do_drop(m, NULL); 12323 return (0); 12324 } 12325 } 12326 /* 12327 * Ack processing. 12328 */ 12329 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12330 return (ret_val); 12331 } 12332 if (sbavail(&so->so_snd)) { 12333 if (ctf_progress_timeout_check(tp, true)) { 12334 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12335 tp, tick, PROGRESS_DROP, __LINE__); 12336 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 12337 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12338 return (1); 12339 } 12340 } 12341 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12342 tiwin, thflags, nxt_pkt)); 12343 } 12344 12345 static void inline 12346 rack_clear_rate_sample(struct tcp_rack *rack) 12347 { 12348 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12349 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12350 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12351 } 12352 12353 static void 12354 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12355 { 12356 uint64_t bw_est, rate_wanted; 12357 int chged = 0; 12358 uint32_t user_max, orig_min, orig_max; 12359 12360 orig_min = rack->r_ctl.rc_pace_min_segs; 12361 orig_max = rack->r_ctl.rc_pace_max_segs; 12362 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12363 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12364 chged = 1; 12365 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12366 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12367 if (user_max != rack->r_ctl.rc_pace_max_segs) 12368 chged = 1; 12369 } 12370 if (rack->rc_force_max_seg) { 12371 rack->r_ctl.rc_pace_max_segs = user_max; 12372 } else if (rack->use_fixed_rate) { 12373 bw_est = rack_get_bw(rack); 12374 if ((rack->r_ctl.crte == NULL) || 12375 (bw_est != rack->r_ctl.crte->rate)) { 12376 rack->r_ctl.rc_pace_max_segs = user_max; 12377 } else { 12378 /* We are pacing right at the hardware rate */ 12379 uint32_t segsiz; 12380 12381 segsiz = min(ctf_fixed_maxseg(tp), 12382 rack->r_ctl.rc_pace_min_segs); 12383 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12384 tp, bw_est, segsiz, 0, 12385 rack->r_ctl.crte, NULL); 12386 } 12387 } else if (rack->rc_always_pace) { 12388 if (rack->r_ctl.gp_bw || 12389 #ifdef NETFLIX_PEAKRATE 12390 rack->rc_tp->t_maxpeakrate || 12391 #endif 12392 rack->r_ctl.init_rate) { 12393 /* We have a rate of some sort set */ 12394 uint32_t orig; 12395 12396 bw_est = rack_get_bw(rack); 12397 orig = rack->r_ctl.rc_pace_max_segs; 12398 if (fill_override) 12399 rate_wanted = *fill_override; 12400 else 12401 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12402 if (rate_wanted) { 12403 /* We have something */ 12404 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12405 rate_wanted, 12406 ctf_fixed_maxseg(rack->rc_tp)); 12407 } else 12408 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12409 if (orig != rack->r_ctl.rc_pace_max_segs) 12410 chged = 1; 12411 } else if ((rack->r_ctl.gp_bw == 0) && 12412 (rack->r_ctl.rc_pace_max_segs == 0)) { 12413 /* 12414 * If we have nothing limit us to bursting 12415 * out IW sized pieces. 12416 */ 12417 chged = 1; 12418 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12419 } 12420 } 12421 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12422 chged = 1; 12423 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12424 } 12425 if (chged) 12426 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12427 } 12428 12429 12430 static void 12431 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12432 { 12433 #ifdef INET6 12434 struct ip6_hdr *ip6 = NULL; 12435 #endif 12436 #ifdef INET 12437 struct ip *ip = NULL; 12438 #endif 12439 struct udphdr *udp = NULL; 12440 12441 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12442 #ifdef INET6 12443 if (rack->r_is_v6) { 12444 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12445 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12446 if (tp->t_port) { 12447 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12448 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12449 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12450 udp->uh_dport = tp->t_port; 12451 rack->r_ctl.fsb.udp = udp; 12452 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12453 } else 12454 { 12455 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12456 rack->r_ctl.fsb.udp = NULL; 12457 } 12458 tcpip_fillheaders(rack->rc_inp, 12459 tp->t_port, 12460 ip6, rack->r_ctl.fsb.th); 12461 } else 12462 #endif /* INET6 */ 12463 { 12464 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12465 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12466 if (tp->t_port) { 12467 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12468 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12469 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12470 udp->uh_dport = tp->t_port; 12471 rack->r_ctl.fsb.udp = udp; 12472 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12473 } else 12474 { 12475 rack->r_ctl.fsb.udp = NULL; 12476 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12477 } 12478 tcpip_fillheaders(rack->rc_inp, 12479 tp->t_port, 12480 ip, rack->r_ctl.fsb.th); 12481 } 12482 rack->r_fsb_inited = 1; 12483 } 12484 12485 static int 12486 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12487 { 12488 /* 12489 * Allocate the larger of spaces V6 if available else just 12490 * V4 and include udphdr (overbook) 12491 */ 12492 #ifdef INET6 12493 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12494 #else 12495 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12496 #endif 12497 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12498 M_TCPFSB, M_NOWAIT|M_ZERO); 12499 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12500 return (ENOMEM); 12501 } 12502 rack->r_fsb_inited = 0; 12503 return (0); 12504 } 12505 12506 static int 12507 rack_init(struct tcpcb *tp) 12508 { 12509 struct tcp_rack *rack = NULL; 12510 struct rack_sendmap *insret; 12511 uint32_t iwin, snt, us_cts; 12512 int err; 12513 12514 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12515 if (tp->t_fb_ptr == NULL) { 12516 /* 12517 * We need to allocate memory but cant. The INP and INP_INFO 12518 * locks and they are recusive (happens during setup. So a 12519 * scheme to drop the locks fails :( 12520 * 12521 */ 12522 return (ENOMEM); 12523 } 12524 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12525 12526 rack = (struct tcp_rack *)tp->t_fb_ptr; 12527 RB_INIT(&rack->r_ctl.rc_mtree); 12528 TAILQ_INIT(&rack->r_ctl.rc_free); 12529 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12530 rack->rc_tp = tp; 12531 rack->rc_inp = tp->t_inpcb; 12532 /* Set the flag */ 12533 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12534 /* Probably not needed but lets be sure */ 12535 rack_clear_rate_sample(rack); 12536 /* 12537 * Save off the default values, socket options will poke 12538 * at these if pacing is not on or we have not yet 12539 * reached where pacing is on (gp_ready/fixed enabled). 12540 * When they get set into the CC module (when gp_ready 12541 * is enabled or we enable fixed) then we will set these 12542 * values into the CC and place in here the old values 12543 * so we have a restoral. Then we will set the flag 12544 * rc_pacing_cc_set. That way whenever we turn off pacing 12545 * or switch off this stack, we will know to go restore 12546 * the saved values. 12547 */ 12548 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12549 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12550 /* We want abe like behavior as well */ 12551 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN; 12552 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12553 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12554 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12555 if (use_rack_rr) 12556 rack->use_rack_rr = 1; 12557 if (V_tcp_delack_enabled) 12558 tp->t_delayed_ack = 1; 12559 else 12560 tp->t_delayed_ack = 0; 12561 #ifdef TCP_ACCOUNTING 12562 if (rack_tcp_accounting) { 12563 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12564 } 12565 #endif 12566 if (rack_enable_shared_cwnd) 12567 rack->rack_enable_scwnd = 1; 12568 rack->rc_user_set_max_segs = rack_hptsi_segments; 12569 rack->rc_force_max_seg = 0; 12570 if (rack_use_imac_dack) 12571 rack->rc_dack_mode = 1; 12572 TAILQ_INIT(&rack->r_ctl.opt_list); 12573 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12574 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12575 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12576 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12577 rack->r_ctl.rc_highest_us_rtt = 0; 12578 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12579 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12580 if (rack_use_cmp_acks) 12581 rack->r_use_cmp_ack = 1; 12582 if (rack_disable_prr) 12583 rack->rack_no_prr = 1; 12584 if (rack_gp_no_rec_chg) 12585 rack->rc_gp_no_rec_chg = 1; 12586 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12587 rack->rc_always_pace = 1; 12588 if (rack->use_fixed_rate || rack->gp_ready) 12589 rack_set_cc_pacing(rack); 12590 } else 12591 rack->rc_always_pace = 0; 12592 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12593 rack->r_mbuf_queue = 1; 12594 else 12595 rack->r_mbuf_queue = 0; 12596 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12597 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12598 else 12599 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12600 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12601 if (rack_limits_scwnd) 12602 rack->r_limit_scw = 1; 12603 else 12604 rack->r_limit_scw = 0; 12605 rack->rc_labc = V_tcp_abc_l_var; 12606 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12607 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12608 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12609 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12610 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12611 rack->r_ctl.rc_min_to = rack_min_to; 12612 microuptime(&rack->r_ctl.act_rcv_time); 12613 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12614 rack->r_running_late = 0; 12615 rack->r_running_early = 0; 12616 rack->rc_init_win = rack_default_init_window; 12617 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12618 if (rack_hw_up_only) 12619 rack->r_up_only = 1; 12620 if (rack_do_dyn_mul) { 12621 /* When dynamic adjustment is on CA needs to start at 100% */ 12622 rack->rc_gp_dyn_mul = 1; 12623 if (rack_do_dyn_mul >= 100) 12624 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12625 } else 12626 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12627 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12628 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12629 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12630 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12631 rack_probertt_filter_life); 12632 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12633 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12634 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12635 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12636 rack->r_ctl.rc_time_probertt_starts = 0; 12637 if (rack_dsack_std_based & 0x1) { 12638 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12639 rack->rc_rack_tmr_std_based = 1; 12640 } 12641 if (rack_dsack_std_based & 0x2) { 12642 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12643 rack->rc_rack_use_dsack = 1; 12644 } 12645 /* We require at least one measurement, even if the sysctl is 0 */ 12646 if (rack_req_measurements) 12647 rack->r_ctl.req_measurements = rack_req_measurements; 12648 else 12649 rack->r_ctl.req_measurements = 1; 12650 if (rack_enable_hw_pacing) 12651 rack->rack_hdw_pace_ena = 1; 12652 if (rack_hw_rate_caps) 12653 rack->r_rack_hw_rate_caps = 1; 12654 /* Do we force on detection? */ 12655 #ifdef NETFLIX_EXP_DETECTION 12656 if (tcp_force_detection) 12657 rack->do_detection = 1; 12658 else 12659 #endif 12660 rack->do_detection = 0; 12661 if (rack_non_rxt_use_cr) 12662 rack->rack_rec_nonrxt_use_cr = 1; 12663 err = rack_init_fsb(tp, rack); 12664 if (err) { 12665 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12666 tp->t_fb_ptr = NULL; 12667 return (err); 12668 } 12669 if (tp->snd_una != tp->snd_max) { 12670 /* Create a send map for the current outstanding data */ 12671 struct rack_sendmap *rsm; 12672 12673 rsm = rack_alloc(rack); 12674 if (rsm == NULL) { 12675 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12676 tp->t_fb_ptr = NULL; 12677 return (ENOMEM); 12678 } 12679 rsm->r_no_rtt_allowed = 1; 12680 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12681 rsm->r_rtr_cnt = 1; 12682 rsm->r_rtr_bytes = 0; 12683 if (tp->t_flags & TF_SENTFIN) { 12684 rsm->r_end = tp->snd_max - 1; 12685 rsm->r_flags |= RACK_HAS_FIN; 12686 } else { 12687 rsm->r_end = tp->snd_max; 12688 } 12689 if (tp->snd_una == tp->iss) { 12690 /* The data space is one beyond snd_una */ 12691 rsm->r_flags |= RACK_HAS_SYN; 12692 rsm->r_start = tp->iss; 12693 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12694 } else 12695 rsm->r_start = tp->snd_una; 12696 rsm->r_dupack = 0; 12697 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12698 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12699 if (rsm->m) 12700 rsm->orig_m_len = rsm->m->m_len; 12701 else 12702 rsm->orig_m_len = 0; 12703 } else { 12704 /* 12705 * This can happen if we have a stand-alone FIN or 12706 * SYN. 12707 */ 12708 rsm->m = NULL; 12709 rsm->orig_m_len = 0; 12710 rsm->soff = 0; 12711 } 12712 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12713 #ifdef INVARIANTS 12714 if (insret != NULL) { 12715 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12716 insret, rack, rsm); 12717 } 12718 #endif 12719 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12720 rsm->r_in_tmap = 1; 12721 } 12722 /* 12723 * Timers in Rack are kept in microseconds so lets 12724 * convert any initial incoming variables 12725 * from ticks into usecs. Note that we 12726 * also change the values of t_srtt and t_rttvar, if 12727 * they are non-zero. They are kept with a 5 12728 * bit decimal so we have to carefully convert 12729 * these to get the full precision. 12730 */ 12731 rack_convert_rtts(tp); 12732 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12733 if (rack_def_profile) 12734 rack_set_profile(rack, rack_def_profile); 12735 /* Cancel the GP measurement in progress */ 12736 tp->t_flags &= ~TF_GPUTINPROG; 12737 if (SEQ_GT(tp->snd_max, tp->iss)) 12738 snt = tp->snd_max - tp->iss; 12739 else 12740 snt = 0; 12741 iwin = rc_init_window(rack); 12742 if (snt < iwin) { 12743 /* We are not past the initial window 12744 * so we need to make sure cwnd is 12745 * correct. 12746 */ 12747 if (tp->snd_cwnd < iwin) 12748 tp->snd_cwnd = iwin; 12749 /* 12750 * If we are within the initial window 12751 * we want ssthresh to be unlimited. Setting 12752 * it to the rwnd (which the default stack does 12753 * and older racks) is not really a good idea 12754 * since we want to be in SS and grow both the 12755 * cwnd and the rwnd (via dynamic rwnd growth). If 12756 * we set it to the rwnd then as the peer grows its 12757 * rwnd we will be stuck in CA and never hit SS. 12758 * 12759 * Its far better to raise it up high (this takes the 12760 * risk that there as been a loss already, probably 12761 * we should have an indicator in all stacks of loss 12762 * but we don't), but considering the normal use this 12763 * is a risk worth taking. The consequences of not 12764 * hitting SS are far worse than going one more time 12765 * into it early on (before we have sent even a IW). 12766 * It is highly unlikely that we will have had a loss 12767 * before getting the IW out. 12768 */ 12769 tp->snd_ssthresh = 0xffffffff; 12770 } 12771 rack_stop_all_timers(tp); 12772 /* Lets setup the fsb block */ 12773 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12774 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12775 __LINE__, RACK_RTTS_INIT); 12776 return (0); 12777 } 12778 12779 static int 12780 rack_handoff_ok(struct tcpcb *tp) 12781 { 12782 if ((tp->t_state == TCPS_CLOSED) || 12783 (tp->t_state == TCPS_LISTEN)) { 12784 /* Sure no problem though it may not stick */ 12785 return (0); 12786 } 12787 if ((tp->t_state == TCPS_SYN_SENT) || 12788 (tp->t_state == TCPS_SYN_RECEIVED)) { 12789 /* 12790 * We really don't know if you support sack, 12791 * you have to get to ESTAB or beyond to tell. 12792 */ 12793 return (EAGAIN); 12794 } 12795 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12796 /* 12797 * Rack will only send a FIN after all data is acknowledged. 12798 * So in this case we have more data outstanding. We can't 12799 * switch stacks until either all data and only the FIN 12800 * is left (in which case rack_init() now knows how 12801 * to deal with that) <or> all is acknowledged and we 12802 * are only left with incoming data, though why you 12803 * would want to switch to rack after all data is acknowledged 12804 * I have no idea (rrs)! 12805 */ 12806 return (EAGAIN); 12807 } 12808 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12809 return (0); 12810 } 12811 /* 12812 * If we reach here we don't do SACK on this connection so we can 12813 * never do rack. 12814 */ 12815 return (EINVAL); 12816 } 12817 12818 12819 static void 12820 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12821 { 12822 int ack_cmp = 0; 12823 12824 if (tp->t_fb_ptr) { 12825 struct tcp_rack *rack; 12826 struct rack_sendmap *rsm, *nrsm, *rm; 12827 12828 rack = (struct tcp_rack *)tp->t_fb_ptr; 12829 if (tp->t_in_pkt) { 12830 /* 12831 * It is unsafe to process the packets since a 12832 * reset may be lurking in them (its rare but it 12833 * can occur). If we were to find a RST, then we 12834 * would end up dropping the connection and the 12835 * INP lock, so when we return the caller (tcp_usrreq) 12836 * will blow up when it trys to unlock the inp. 12837 */ 12838 struct mbuf *save, *m; 12839 12840 m = tp->t_in_pkt; 12841 tp->t_in_pkt = NULL; 12842 tp->t_tail_pkt = NULL; 12843 while (m) { 12844 save = m->m_nextpkt; 12845 m->m_nextpkt = NULL; 12846 m_freem(m); 12847 m = save; 12848 } 12849 if ((tp->t_inpcb) && 12850 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP)) 12851 ack_cmp = 1; 12852 if (ack_cmp) { 12853 /* Total if we used large or small (if ack-cmp was used). */ 12854 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS) 12855 counter_u64_add(rack_large_ackcmp, 1); 12856 else 12857 counter_u64_add(rack_small_ackcmp, 1); 12858 } 12859 } 12860 tp->t_flags &= ~TF_FORCEDATA; 12861 #ifdef NETFLIX_SHARED_CWND 12862 if (rack->r_ctl.rc_scw) { 12863 uint32_t limit; 12864 12865 if (rack->r_limit_scw) 12866 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12867 else 12868 limit = 0; 12869 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12870 rack->r_ctl.rc_scw_index, 12871 limit); 12872 rack->r_ctl.rc_scw = NULL; 12873 } 12874 #endif 12875 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12876 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12877 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12878 rack->r_ctl.fsb.th = NULL; 12879 } 12880 /* Convert back to ticks, with */ 12881 if (tp->t_srtt > 1) { 12882 uint32_t val, frac; 12883 12884 val = USEC_2_TICKS(tp->t_srtt); 12885 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12886 tp->t_srtt = val << TCP_RTT_SHIFT; 12887 /* 12888 * frac is the fractional part here is left 12889 * over from converting to hz and shifting. 12890 * We need to convert this to the 5 bit 12891 * remainder. 12892 */ 12893 if (frac) { 12894 if (hz == 1000) { 12895 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12896 } else { 12897 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12898 } 12899 tp->t_srtt += frac; 12900 } 12901 } 12902 if (tp->t_rttvar) { 12903 uint32_t val, frac; 12904 12905 val = USEC_2_TICKS(tp->t_rttvar); 12906 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12907 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12908 /* 12909 * frac is the fractional part here is left 12910 * over from converting to hz and shifting. 12911 * We need to convert this to the 5 bit 12912 * remainder. 12913 */ 12914 if (frac) { 12915 if (hz == 1000) { 12916 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12917 } else { 12918 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12919 } 12920 tp->t_rttvar += frac; 12921 } 12922 } 12923 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12924 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12925 if (rack->rc_always_pace) { 12926 tcp_decrement_paced_conn(); 12927 rack_undo_cc_pacing(rack); 12928 rack->rc_always_pace = 0; 12929 } 12930 /* Clean up any options if they were not applied */ 12931 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12932 struct deferred_opt_list *dol; 12933 12934 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12935 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12936 free(dol, M_TCPDO); 12937 } 12938 /* rack does not use force data but other stacks may clear it */ 12939 if (rack->r_ctl.crte != NULL) { 12940 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12941 rack->rack_hdrw_pacing = 0; 12942 rack->r_ctl.crte = NULL; 12943 } 12944 #ifdef TCP_BLACKBOX 12945 tcp_log_flowend(tp); 12946 #endif 12947 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12948 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12949 #ifdef INVARIANTS 12950 if (rm != rsm) { 12951 panic("At fini, rack:%p rsm:%p rm:%p", 12952 rack, rsm, rm); 12953 } 12954 #endif 12955 uma_zfree(rack_zone, rsm); 12956 } 12957 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12958 while (rsm) { 12959 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12960 uma_zfree(rack_zone, rsm); 12961 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12962 } 12963 rack->rc_free_cnt = 0; 12964 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12965 tp->t_fb_ptr = NULL; 12966 } 12967 if (tp->t_inpcb) { 12968 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12969 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12970 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12971 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 12972 /* Cancel the GP measurement in progress */ 12973 tp->t_flags &= ~TF_GPUTINPROG; 12974 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 12975 } 12976 /* Make sure snd_nxt is correctly set */ 12977 tp->snd_nxt = tp->snd_max; 12978 } 12979 12980 static void 12981 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12982 { 12983 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12984 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12985 } 12986 switch (tp->t_state) { 12987 case TCPS_SYN_SENT: 12988 rack->r_state = TCPS_SYN_SENT; 12989 rack->r_substate = rack_do_syn_sent; 12990 break; 12991 case TCPS_SYN_RECEIVED: 12992 rack->r_state = TCPS_SYN_RECEIVED; 12993 rack->r_substate = rack_do_syn_recv; 12994 break; 12995 case TCPS_ESTABLISHED: 12996 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12997 rack->r_state = TCPS_ESTABLISHED; 12998 rack->r_substate = rack_do_established; 12999 break; 13000 case TCPS_CLOSE_WAIT: 13001 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13002 rack->r_state = TCPS_CLOSE_WAIT; 13003 rack->r_substate = rack_do_close_wait; 13004 break; 13005 case TCPS_FIN_WAIT_1: 13006 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13007 rack->r_state = TCPS_FIN_WAIT_1; 13008 rack->r_substate = rack_do_fin_wait_1; 13009 break; 13010 case TCPS_CLOSING: 13011 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13012 rack->r_state = TCPS_CLOSING; 13013 rack->r_substate = rack_do_closing; 13014 break; 13015 case TCPS_LAST_ACK: 13016 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13017 rack->r_state = TCPS_LAST_ACK; 13018 rack->r_substate = rack_do_lastack; 13019 break; 13020 case TCPS_FIN_WAIT_2: 13021 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13022 rack->r_state = TCPS_FIN_WAIT_2; 13023 rack->r_substate = rack_do_fin_wait_2; 13024 break; 13025 case TCPS_LISTEN: 13026 case TCPS_CLOSED: 13027 case TCPS_TIME_WAIT: 13028 default: 13029 break; 13030 }; 13031 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 13032 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 13033 13034 } 13035 13036 static void 13037 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 13038 { 13039 /* 13040 * We received an ack, and then did not 13041 * call send or were bounced out due to the 13042 * hpts was running. Now a timer is up as well, is 13043 * it the right timer? 13044 */ 13045 struct rack_sendmap *rsm; 13046 int tmr_up; 13047 13048 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 13049 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 13050 return; 13051 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 13052 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 13053 (tmr_up == PACE_TMR_RXT)) { 13054 /* Should be an RXT */ 13055 return; 13056 } 13057 if (rsm == NULL) { 13058 /* Nothing outstanding? */ 13059 if (tp->t_flags & TF_DELACK) { 13060 if (tmr_up == PACE_TMR_DELACK) 13061 /* We are supposed to have delayed ack up and we do */ 13062 return; 13063 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 13064 /* 13065 * if we hit enobufs then we would expect the possiblity 13066 * of nothing outstanding and the RXT up (and the hptsi timer). 13067 */ 13068 return; 13069 } else if (((V_tcp_always_keepalive || 13070 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13071 (tp->t_state <= TCPS_CLOSING)) && 13072 (tmr_up == PACE_TMR_KEEP) && 13073 (tp->snd_max == tp->snd_una)) { 13074 /* We should have keep alive up and we do */ 13075 return; 13076 } 13077 } 13078 if (SEQ_GT(tp->snd_max, tp->snd_una) && 13079 ((tmr_up == PACE_TMR_TLP) || 13080 (tmr_up == PACE_TMR_RACK) || 13081 (tmr_up == PACE_TMR_RXT))) { 13082 /* 13083 * Either a Rack, TLP or RXT is fine if we 13084 * have outstanding data. 13085 */ 13086 return; 13087 } else if (tmr_up == PACE_TMR_DELACK) { 13088 /* 13089 * If the delayed ack was going to go off 13090 * before the rtx/tlp/rack timer were going to 13091 * expire, then that would be the timer in control. 13092 * Note we don't check the time here trusting the 13093 * code is correct. 13094 */ 13095 return; 13096 } 13097 /* 13098 * Ok the timer originally started is not what we want now. 13099 * We will force the hpts to be stopped if any, and restart 13100 * with the slot set to what was in the saved slot. 13101 */ 13102 if (rack->rc_inp->inp_in_hpts) { 13103 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13104 uint32_t us_cts; 13105 13106 us_cts = tcp_get_usecs(NULL); 13107 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13108 rack->r_early = 1; 13109 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13110 } 13111 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13112 } 13113 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 13114 } 13115 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13116 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13117 } 13118 13119 13120 static void 13121 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13122 { 13123 if ((SEQ_LT(tp->snd_wl1, seq) || 13124 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13125 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13126 /* keep track of pure window updates */ 13127 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13128 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13129 tp->snd_wnd = tiwin; 13130 rack_validate_fo_sendwin_up(tp, rack); 13131 tp->snd_wl1 = seq; 13132 tp->snd_wl2 = ack; 13133 if (tp->snd_wnd > tp->max_sndwnd) 13134 tp->max_sndwnd = tp->snd_wnd; 13135 rack->r_wanted_output = 1; 13136 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13137 tp->snd_wnd = tiwin; 13138 rack_validate_fo_sendwin_up(tp, rack); 13139 tp->snd_wl1 = seq; 13140 tp->snd_wl2 = ack; 13141 } else { 13142 /* Not a valid win update */ 13143 return; 13144 } 13145 if (tp->snd_wnd > tp->max_sndwnd) 13146 tp->max_sndwnd = tp->snd_wnd; 13147 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 13148 /* The peer collapsed the window */ 13149 rack_collapsed_window(rack); 13150 } else if (rack->rc_has_collapsed) 13151 rack_un_collapse_window(rack); 13152 /* Do we exit persists? */ 13153 if ((rack->rc_in_persist != 0) && 13154 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13155 rack->r_ctl.rc_pace_min_segs))) { 13156 rack_exit_persist(tp, rack, cts); 13157 } 13158 /* Do we enter persists? */ 13159 if ((rack->rc_in_persist == 0) && 13160 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13161 TCPS_HAVEESTABLISHED(tp->t_state) && 13162 (tp->snd_max == tp->snd_una) && 13163 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13164 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13165 /* 13166 * Here the rwnd is less than 13167 * the pacing size, we are established, 13168 * nothing is outstanding, and there is 13169 * data to send. Enter persists. 13170 */ 13171 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13172 } 13173 } 13174 13175 static void 13176 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13177 { 13178 13179 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13180 union tcp_log_stackspecific log; 13181 struct timeval ltv; 13182 char tcp_hdr_buf[60]; 13183 struct tcphdr *th; 13184 struct timespec ts; 13185 uint32_t orig_snd_una; 13186 uint8_t xx = 0; 13187 13188 #ifdef NETFLIX_HTTP_LOGGING 13189 struct http_sendfile_track *http_req; 13190 13191 if (SEQ_GT(ae->ack, tp->snd_una)) { 13192 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13193 } else { 13194 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13195 } 13196 #endif 13197 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13198 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 13199 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 13200 if (rack->rack_no_prr == 0) 13201 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13202 else 13203 log.u_bbr.flex1 = 0; 13204 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13205 log.u_bbr.use_lt_bw <<= 1; 13206 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13207 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13208 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13209 log.u_bbr.pkts_out = tp->t_maxseg; 13210 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13211 log.u_bbr.flex7 = 1; 13212 log.u_bbr.lost = ae->flags; 13213 log.u_bbr.cwnd_gain = ackval; 13214 log.u_bbr.pacing_gain = 0x2; 13215 if (ae->flags & TSTMP_HDWR) { 13216 /* Record the hardware timestamp if present */ 13217 log.u_bbr.flex3 = M_TSTMP; 13218 ts.tv_sec = ae->timestamp / 1000000000; 13219 ts.tv_nsec = ae->timestamp % 1000000000; 13220 ltv.tv_sec = ts.tv_sec; 13221 ltv.tv_usec = ts.tv_nsec / 1000; 13222 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13223 } else if (ae->flags & TSTMP_LRO) { 13224 /* Record the LRO the arrival timestamp */ 13225 log.u_bbr.flex3 = M_TSTMP_LRO; 13226 ts.tv_sec = ae->timestamp / 1000000000; 13227 ts.tv_nsec = ae->timestamp % 1000000000; 13228 ltv.tv_sec = ts.tv_sec; 13229 ltv.tv_usec = ts.tv_nsec / 1000; 13230 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13231 } 13232 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13233 /* Log the rcv time */ 13234 log.u_bbr.delRate = ae->timestamp; 13235 #ifdef NETFLIX_HTTP_LOGGING 13236 log.u_bbr.applimited = tp->t_http_closed; 13237 log.u_bbr.applimited <<= 8; 13238 log.u_bbr.applimited |= tp->t_http_open; 13239 log.u_bbr.applimited <<= 8; 13240 log.u_bbr.applimited |= tp->t_http_req; 13241 if (http_req) { 13242 /* Copy out any client req info */ 13243 /* seconds */ 13244 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13245 /* useconds */ 13246 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13247 log.u_bbr.rttProp = http_req->timestamp; 13248 log.u_bbr.cur_del_rate = http_req->start; 13249 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13250 log.u_bbr.flex8 |= 1; 13251 } else { 13252 log.u_bbr.flex8 |= 2; 13253 log.u_bbr.bw_inuse = http_req->end; 13254 } 13255 log.u_bbr.flex6 = http_req->start_seq; 13256 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13257 log.u_bbr.flex8 |= 4; 13258 log.u_bbr.epoch = http_req->end_seq; 13259 } 13260 } 13261 #endif 13262 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13263 th = (struct tcphdr *)tcp_hdr_buf; 13264 th->th_seq = ae->seq; 13265 th->th_ack = ae->ack; 13266 th->th_win = ae->win; 13267 /* Now fill in the ports */ 13268 th->th_sport = tp->t_inpcb->inp_fport; 13269 th->th_dport = tp->t_inpcb->inp_lport; 13270 th->th_flags = ae->flags & 0xff; 13271 /* Now do we have a timestamp option? */ 13272 if (ae->flags & HAS_TSTMP) { 13273 u_char *cp; 13274 uint32_t val; 13275 13276 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13277 cp = (u_char *)(th + 1); 13278 *cp = TCPOPT_NOP; 13279 cp++; 13280 *cp = TCPOPT_NOP; 13281 cp++; 13282 *cp = TCPOPT_TIMESTAMP; 13283 cp++; 13284 *cp = TCPOLEN_TIMESTAMP; 13285 cp++; 13286 val = htonl(ae->ts_value); 13287 bcopy((char *)&val, 13288 (char *)cp, sizeof(uint32_t)); 13289 val = htonl(ae->ts_echo); 13290 bcopy((char *)&val, 13291 (char *)(cp + 4), sizeof(uint32_t)); 13292 } else 13293 th->th_off = (sizeof(struct tcphdr) >> 2); 13294 13295 /* 13296 * For sane logging we need to play a little trick. 13297 * If the ack were fully processed we would have moved 13298 * snd_una to high_seq, but since compressed acks are 13299 * processed in two phases, at this point (logging) snd_una 13300 * won't be advanced. So we would see multiple acks showing 13301 * the advancement. We can prevent that by "pretending" that 13302 * snd_una was advanced and then un-advancing it so that the 13303 * logging code has the right value for tlb_snd_una. 13304 */ 13305 if (tp->snd_una != high_seq) { 13306 orig_snd_una = tp->snd_una; 13307 tp->snd_una = high_seq; 13308 xx = 1; 13309 } else 13310 xx = 0; 13311 TCP_LOG_EVENTP(tp, th, 13312 &tp->t_inpcb->inp_socket->so_rcv, 13313 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13314 0, &log, true, <v); 13315 if (xx) { 13316 tp->snd_una = orig_snd_una; 13317 } 13318 } 13319 13320 } 13321 13322 static int 13323 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13324 { 13325 /* 13326 * Handle a "special" compressed ack mbuf. Each incoming 13327 * ack has only four possible dispositions: 13328 * 13329 * A) It moves the cum-ack forward 13330 * B) It is behind the cum-ack. 13331 * C) It is a window-update ack. 13332 * D) It is a dup-ack. 13333 * 13334 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13335 * in the incoming mbuf. We also need to still pay attention 13336 * to nxt_pkt since there may be another packet after this 13337 * one. 13338 */ 13339 #ifdef TCP_ACCOUNTING 13340 uint64_t ts_val; 13341 uint64_t rdstc; 13342 #endif 13343 int segsiz; 13344 struct timespec ts; 13345 struct tcp_rack *rack; 13346 struct tcp_ackent *ae; 13347 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13348 int cnt, i, did_out, ourfinisacked = 0; 13349 struct tcpopt to_holder, *to = NULL; 13350 int win_up_req = 0; 13351 int nsegs = 0; 13352 int under_pacing = 1; 13353 int recovery = 0; 13354 int idx; 13355 #ifdef TCP_ACCOUNTING 13356 sched_pin(); 13357 #endif 13358 rack = (struct tcp_rack *)tp->t_fb_ptr; 13359 if (rack->gp_ready && 13360 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13361 under_pacing = 0; 13362 else 13363 under_pacing = 1; 13364 13365 if (rack->r_state != tp->t_state) 13366 rack_set_state(tp, rack); 13367 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13368 (tp->t_flags & TF_GPUTINPROG)) { 13369 /* 13370 * We have a goodput in progress 13371 * and we have entered a late state. 13372 * Do we have enough data in the sb 13373 * to handle the GPUT request? 13374 */ 13375 uint32_t bytes; 13376 13377 bytes = tp->gput_ack - tp->gput_seq; 13378 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13379 bytes += tp->gput_seq - tp->snd_una; 13380 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13381 /* 13382 * There are not enough bytes in the socket 13383 * buffer that have been sent to cover this 13384 * measurement. Cancel it. 13385 */ 13386 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13387 rack->r_ctl.rc_gp_srtt /*flex1*/, 13388 tp->gput_seq, 13389 0, 0, 18, __LINE__, NULL, 0); 13390 tp->t_flags &= ~TF_GPUTINPROG; 13391 } 13392 } 13393 to = &to_holder; 13394 to->to_flags = 0; 13395 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13396 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13397 cnt = m->m_len / sizeof(struct tcp_ackent); 13398 idx = cnt / 5; 13399 if (idx >= MAX_NUM_OF_CNTS) 13400 idx = MAX_NUM_OF_CNTS - 1; 13401 counter_u64_add(rack_proc_comp_ack[idx], 1); 13402 counter_u64_add(rack_multi_single_eq, cnt); 13403 high_seq = tp->snd_una; 13404 the_win = tp->snd_wnd; 13405 win_seq = tp->snd_wl1; 13406 win_upd_ack = tp->snd_wl2; 13407 cts = tcp_tv_to_usectick(tv); 13408 ms_cts = tcp_tv_to_mssectick(tv); 13409 segsiz = ctf_fixed_maxseg(tp); 13410 if ((rack->rc_gp_dyn_mul) && 13411 (rack->use_fixed_rate == 0) && 13412 (rack->rc_always_pace)) { 13413 /* Check in on probertt */ 13414 rack_check_probe_rtt(rack, cts); 13415 } 13416 for (i = 0; i < cnt; i++) { 13417 #ifdef TCP_ACCOUNTING 13418 ts_val = get_cyclecount(); 13419 #endif 13420 rack_clear_rate_sample(rack); 13421 ae = ((mtod(m, struct tcp_ackent *)) + i); 13422 /* Setup the window */ 13423 tiwin = ae->win << tp->snd_scale; 13424 /* figure out the type of ack */ 13425 if (SEQ_LT(ae->ack, high_seq)) { 13426 /* Case B*/ 13427 ae->ack_val_set = ACK_BEHIND; 13428 } else if (SEQ_GT(ae->ack, high_seq)) { 13429 /* Case A */ 13430 ae->ack_val_set = ACK_CUMACK; 13431 } else if (tiwin == the_win) { 13432 /* Case D */ 13433 ae->ack_val_set = ACK_DUPACK; 13434 } else { 13435 /* Case C */ 13436 ae->ack_val_set = ACK_RWND; 13437 } 13438 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13439 /* Validate timestamp */ 13440 if (ae->flags & HAS_TSTMP) { 13441 /* Setup for a timestamp */ 13442 to->to_flags = TOF_TS; 13443 ae->ts_echo -= tp->ts_offset; 13444 to->to_tsecr = ae->ts_echo; 13445 to->to_tsval = ae->ts_value; 13446 /* 13447 * If echoed timestamp is later than the current time, fall back to 13448 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13449 * were used when this connection was established. 13450 */ 13451 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13452 to->to_tsecr = 0; 13453 if (tp->ts_recent && 13454 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13455 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13456 #ifdef TCP_ACCOUNTING 13457 rdstc = get_cyclecount(); 13458 if (rdstc > ts_val) { 13459 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13460 (rdstc - ts_val)); 13461 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13462 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13463 } 13464 } 13465 #endif 13466 continue; 13467 } 13468 } 13469 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13470 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13471 tp->ts_recent_age = tcp_ts_getticks(); 13472 tp->ts_recent = ae->ts_value; 13473 } 13474 } else { 13475 /* Setup for a no options */ 13476 to->to_flags = 0; 13477 } 13478 /* Update the rcv time and perform idle reduction possibly */ 13479 if (tp->t_idle_reduce && 13480 (tp->snd_max == tp->snd_una) && 13481 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13482 counter_u64_add(rack_input_idle_reduces, 1); 13483 rack_cc_after_idle(rack, tp); 13484 } 13485 tp->t_rcvtime = ticks; 13486 /* Now what about ECN? */ 13487 if (tp->t_flags2 & TF2_ECN_PERMIT) { 13488 if (ae->flags & TH_CWR) { 13489 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 13490 tp->t_flags |= TF_ACKNOW; 13491 } 13492 switch (ae->codepoint & IPTOS_ECN_MASK) { 13493 case IPTOS_ECN_CE: 13494 tp->t_flags2 |= TF2_ECN_SND_ECE; 13495 KMOD_TCPSTAT_INC(tcps_ecn_ce); 13496 break; 13497 case IPTOS_ECN_ECT0: 13498 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 13499 break; 13500 case IPTOS_ECN_ECT1: 13501 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 13502 break; 13503 } 13504 13505 /* Process a packet differently from RFC3168. */ 13506 cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint); 13507 /* Congestion experienced. */ 13508 if (ae->flags & TH_ECE) { 13509 rack_cong_signal(tp, CC_ECN, ae->ack); 13510 } 13511 } 13512 #ifdef TCP_ACCOUNTING 13513 /* Count for the specific type of ack in */ 13514 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13515 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13516 tp->tcp_cnt_counters[ae->ack_val_set]++; 13517 } 13518 #endif 13519 /* 13520 * Note how we could move up these in the determination 13521 * above, but we don't so that way the timestamp checks (and ECN) 13522 * is done first before we do any processing on the ACK. 13523 * The non-compressed path through the code has this 13524 * weakness (noted by @jtl) that it actually does some 13525 * processing before verifying the timestamp information. 13526 * We don't take that path here which is why we set 13527 * the ack_val_set first, do the timestamp and ecn 13528 * processing, and then look at what we have setup. 13529 */ 13530 if (ae->ack_val_set == ACK_BEHIND) { 13531 /* 13532 * Case B flag reordering, if window is not closed 13533 * or it could be a keep-alive or persists 13534 */ 13535 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13536 counter_u64_add(rack_reorder_seen, 1); 13537 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13538 } 13539 } else if (ae->ack_val_set == ACK_DUPACK) { 13540 /* Case D */ 13541 rack_strike_dupack(rack); 13542 } else if (ae->ack_val_set == ACK_RWND) { 13543 /* Case C */ 13544 win_up_req = 1; 13545 win_upd_ack = ae->ack; 13546 win_seq = ae->seq; 13547 the_win = tiwin; 13548 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13549 } else { 13550 /* Case A */ 13551 if (SEQ_GT(ae->ack, tp->snd_max)) { 13552 /* 13553 * We just send an ack since the incoming 13554 * ack is beyond the largest seq we sent. 13555 */ 13556 if ((tp->t_flags & TF_ACKNOW) == 0) { 13557 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13558 if (tp->t_flags && TF_ACKNOW) 13559 rack->r_wanted_output = 1; 13560 } 13561 } else { 13562 nsegs++; 13563 /* If the window changed setup to update */ 13564 if (tiwin != tp->snd_wnd) { 13565 win_upd_ack = ae->ack; 13566 win_seq = ae->seq; 13567 the_win = tiwin; 13568 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13569 } 13570 #ifdef TCP_ACCOUNTING 13571 /* Account for the acks */ 13572 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13573 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13574 } 13575 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13576 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13577 #endif 13578 high_seq = ae->ack; 13579 /* Setup our act_rcv_time */ 13580 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13581 ts.tv_sec = ae->timestamp / 1000000000; 13582 ts.tv_nsec = ae->timestamp % 1000000000; 13583 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13584 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13585 } else { 13586 rack->r_ctl.act_rcv_time = *tv; 13587 } 13588 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13589 if (rack->rc_dsack_round_seen) { 13590 /* Is the dsack round over? */ 13591 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13592 /* Yes it is */ 13593 rack->rc_dsack_round_seen = 0; 13594 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13595 } 13596 } 13597 } 13598 } 13599 /* And lets be sure to commit the rtt measurements for this ack */ 13600 tcp_rack_xmit_timer_commit(rack, tp); 13601 #ifdef TCP_ACCOUNTING 13602 rdstc = get_cyclecount(); 13603 if (rdstc > ts_val) { 13604 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13605 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13606 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13607 if (ae->ack_val_set == ACK_CUMACK) 13608 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13609 } 13610 } 13611 #endif 13612 } 13613 #ifdef TCP_ACCOUNTING 13614 ts_val = get_cyclecount(); 13615 #endif 13616 acked_amount = acked = (high_seq - tp->snd_una); 13617 if (acked) { 13618 if (rack->sack_attack_disable == 0) 13619 rack_do_decay(rack); 13620 if (acked >= segsiz) { 13621 /* 13622 * You only get credit for 13623 * MSS and greater (and you get extra 13624 * credit for larger cum-ack moves). 13625 */ 13626 int ac; 13627 13628 ac = acked / segsiz; 13629 rack->r_ctl.ack_count += ac; 13630 counter_u64_add(rack_ack_total, ac); 13631 } 13632 if (rack->r_ctl.ack_count > 0xfff00000) { 13633 /* 13634 * reduce the number to keep us under 13635 * a uint32_t. 13636 */ 13637 rack->r_ctl.ack_count /= 2; 13638 rack->r_ctl.sack_count /= 2; 13639 } 13640 if (tp->t_flags & TF_NEEDSYN) { 13641 /* 13642 * T/TCP: Connection was half-synchronized, and our SYN has 13643 * been ACK'd (so connection is now fully synchronized). Go 13644 * to non-starred state, increment snd_una for ACK of SYN, 13645 * and check if we can do window scaling. 13646 */ 13647 tp->t_flags &= ~TF_NEEDSYN; 13648 tp->snd_una++; 13649 acked_amount = acked = (high_seq - tp->snd_una); 13650 } 13651 if (acked > sbavail(&so->so_snd)) 13652 acked_amount = sbavail(&so->so_snd); 13653 #ifdef NETFLIX_EXP_DETECTION 13654 /* 13655 * We only care on a cum-ack move if we are in a sack-disabled 13656 * state. We have already added in to the ack_count, and we never 13657 * would disable on a cum-ack move, so we only care to do the 13658 * detection if it may "undo" it, i.e. we were in disabled already. 13659 */ 13660 if (rack->sack_attack_disable) 13661 rack_do_detection(tp, rack, acked_amount, segsiz); 13662 #endif 13663 if (IN_FASTRECOVERY(tp->t_flags) && 13664 (rack->rack_no_prr == 0)) 13665 rack_update_prr(tp, rack, acked_amount, high_seq); 13666 if (IN_RECOVERY(tp->t_flags)) { 13667 if (SEQ_LT(high_seq, tp->snd_recover) && 13668 (SEQ_LT(high_seq, tp->snd_max))) { 13669 tcp_rack_partialack(tp); 13670 } else { 13671 rack_post_recovery(tp, high_seq); 13672 recovery = 1; 13673 } 13674 } 13675 /* Handle the rack-log-ack part (sendmap) */ 13676 if ((sbused(&so->so_snd) == 0) && 13677 (acked > acked_amount) && 13678 (tp->t_state >= TCPS_FIN_WAIT_1) && 13679 (tp->t_flags & TF_SENTFIN)) { 13680 /* 13681 * We must be sure our fin 13682 * was sent and acked (we can be 13683 * in FIN_WAIT_1 without having 13684 * sent the fin). 13685 */ 13686 ourfinisacked = 1; 13687 /* 13688 * Lets make sure snd_una is updated 13689 * since most likely acked_amount = 0 (it 13690 * should be). 13691 */ 13692 tp->snd_una = high_seq; 13693 } 13694 /* Did we make a RTO error? */ 13695 if ((tp->t_flags & TF_PREVVALID) && 13696 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13697 tp->t_flags &= ~TF_PREVVALID; 13698 if (tp->t_rxtshift == 1 && 13699 (int)(ticks - tp->t_badrxtwin) < 0) 13700 rack_cong_signal(tp, CC_RTO_ERR, high_seq); 13701 } 13702 /* Handle the data in the socket buffer */ 13703 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13704 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13705 if (acked_amount > 0) { 13706 struct mbuf *mfree; 13707 13708 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13709 SOCKBUF_LOCK(&so->so_snd); 13710 mfree = sbcut_locked(&so->so_snd, acked_amount); 13711 tp->snd_una = high_seq; 13712 /* Note we want to hold the sb lock through the sendmap adjust */ 13713 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13714 /* Wake up the socket if we have room to write more */ 13715 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13716 sowwakeup_locked(so); 13717 m_freem(mfree); 13718 } 13719 /* update progress */ 13720 tp->t_acktime = ticks; 13721 rack_log_progress_event(rack, tp, tp->t_acktime, 13722 PROGRESS_UPDATE, __LINE__); 13723 /* Clear out shifts and such */ 13724 tp->t_rxtshift = 0; 13725 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13726 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13727 rack->rc_tlp_in_progress = 0; 13728 rack->r_ctl.rc_tlp_cnt_out = 0; 13729 /* Send recover and snd_nxt must be dragged along */ 13730 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13731 tp->snd_recover = tp->snd_una; 13732 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13733 tp->snd_nxt = tp->snd_una; 13734 /* 13735 * If the RXT timer is running we want to 13736 * stop it, so we can restart a TLP (or new RXT). 13737 */ 13738 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13739 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13740 #ifdef NETFLIX_HTTP_LOGGING 13741 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13742 #endif 13743 tp->snd_wl2 = high_seq; 13744 tp->t_dupacks = 0; 13745 if (under_pacing && 13746 (rack->use_fixed_rate == 0) && 13747 (rack->in_probe_rtt == 0) && 13748 rack->rc_gp_dyn_mul && 13749 rack->rc_always_pace) { 13750 /* Check if we are dragging bottom */ 13751 rack_check_bottom_drag(tp, rack, so, acked); 13752 } 13753 if (tp->snd_una == tp->snd_max) { 13754 tp->t_flags &= ~TF_PREVVALID; 13755 rack->r_ctl.retran_during_recovery = 0; 13756 rack->r_ctl.dsack_byte_cnt = 0; 13757 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13758 if (rack->r_ctl.rc_went_idle_time == 0) 13759 rack->r_ctl.rc_went_idle_time = 1; 13760 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13761 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13762 tp->t_acktime = 0; 13763 /* Set so we might enter persists... */ 13764 rack->r_wanted_output = 1; 13765 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13766 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13767 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13768 (sbavail(&so->so_snd) == 0) && 13769 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13770 /* 13771 * The socket was gone and the 13772 * peer sent data (not now in the past), time to 13773 * reset him. 13774 */ 13775 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13776 /* tcp_close will kill the inp pre-log the Reset */ 13777 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13778 #ifdef TCP_ACCOUNTING 13779 rdstc = get_cyclecount(); 13780 if (rdstc > ts_val) { 13781 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13782 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13783 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13784 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13785 } 13786 } 13787 #endif 13788 m_freem(m); 13789 tp = tcp_close(tp); 13790 if (tp == NULL) { 13791 #ifdef TCP_ACCOUNTING 13792 sched_unpin(); 13793 #endif 13794 return (1); 13795 } 13796 /* 13797 * We would normally do drop-with-reset which would 13798 * send back a reset. We can't since we don't have 13799 * all the needed bits. Instead lets arrange for 13800 * a call to tcp_output(). That way since we 13801 * are in the closed state we will generate a reset. 13802 * 13803 * Note if tcp_accounting is on we don't unpin since 13804 * we do that after the goto label. 13805 */ 13806 goto send_out_a_rst; 13807 } 13808 if ((sbused(&so->so_snd) == 0) && 13809 (tp->t_state >= TCPS_FIN_WAIT_1) && 13810 (tp->t_flags & TF_SENTFIN)) { 13811 /* 13812 * If we can't receive any more data, then closing user can 13813 * proceed. Starting the timer is contrary to the 13814 * specification, but if we don't get a FIN we'll hang 13815 * forever. 13816 * 13817 */ 13818 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13819 soisdisconnected(so); 13820 tcp_timer_activate(tp, TT_2MSL, 13821 (tcp_fast_finwait2_recycle ? 13822 tcp_finwait2_timeout : 13823 TP_MAXIDLE(tp))); 13824 } 13825 if (ourfinisacked == 0) { 13826 /* 13827 * We don't change to fin-wait-2 if we have our fin acked 13828 * which means we are probably in TCPS_CLOSING. 13829 */ 13830 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13831 } 13832 } 13833 } 13834 /* Wake up the socket if we have room to write more */ 13835 if (sbavail(&so->so_snd)) { 13836 rack->r_wanted_output = 1; 13837 if (ctf_progress_timeout_check(tp, true)) { 13838 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13839 tp, tick, PROGRESS_DROP, __LINE__); 13840 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 13841 /* 13842 * We cheat here and don't send a RST, we should send one 13843 * when the pacer drops the connection. 13844 */ 13845 #ifdef TCP_ACCOUNTING 13846 rdstc = get_cyclecount(); 13847 if (rdstc > ts_val) { 13848 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13849 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13850 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13851 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13852 } 13853 } 13854 sched_unpin(); 13855 #endif 13856 INP_WUNLOCK(rack->rc_inp); 13857 m_freem(m); 13858 return (1); 13859 } 13860 } 13861 if (ourfinisacked) { 13862 switch(tp->t_state) { 13863 case TCPS_CLOSING: 13864 #ifdef TCP_ACCOUNTING 13865 rdstc = get_cyclecount(); 13866 if (rdstc > ts_val) { 13867 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13868 (rdstc - ts_val)); 13869 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13870 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13871 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13872 } 13873 } 13874 sched_unpin(); 13875 #endif 13876 tcp_twstart(tp); 13877 m_freem(m); 13878 return (1); 13879 break; 13880 case TCPS_LAST_ACK: 13881 #ifdef TCP_ACCOUNTING 13882 rdstc = get_cyclecount(); 13883 if (rdstc > ts_val) { 13884 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13885 (rdstc - ts_val)); 13886 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13887 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13888 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13889 } 13890 } 13891 sched_unpin(); 13892 #endif 13893 tp = tcp_close(tp); 13894 ctf_do_drop(m, tp); 13895 return (1); 13896 break; 13897 case TCPS_FIN_WAIT_1: 13898 #ifdef TCP_ACCOUNTING 13899 rdstc = get_cyclecount(); 13900 if (rdstc > ts_val) { 13901 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13902 (rdstc - ts_val)); 13903 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13904 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13905 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13906 } 13907 } 13908 #endif 13909 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13910 soisdisconnected(so); 13911 tcp_timer_activate(tp, TT_2MSL, 13912 (tcp_fast_finwait2_recycle ? 13913 tcp_finwait2_timeout : 13914 TP_MAXIDLE(tp))); 13915 } 13916 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13917 break; 13918 default: 13919 break; 13920 } 13921 } 13922 if (rack->r_fast_output) { 13923 /* 13924 * We re doing fast output.. can we expand that? 13925 */ 13926 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13927 } 13928 #ifdef TCP_ACCOUNTING 13929 rdstc = get_cyclecount(); 13930 if (rdstc > ts_val) { 13931 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13932 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13933 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13934 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13935 } 13936 } 13937 13938 } else if (win_up_req) { 13939 rdstc = get_cyclecount(); 13940 if (rdstc > ts_val) { 13941 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 13942 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13943 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13944 } 13945 } 13946 #endif 13947 } 13948 /* Now is there a next packet, if so we are done */ 13949 m_freem(m); 13950 did_out = 0; 13951 if (nxt_pkt) { 13952 #ifdef TCP_ACCOUNTING 13953 sched_unpin(); 13954 #endif 13955 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13956 return (0); 13957 } 13958 rack_handle_might_revert(tp, rack); 13959 ctf_calc_rwin(so, tp); 13960 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13961 send_out_a_rst: 13962 (void)tp->t_fb->tfb_tcp_output(tp); 13963 did_out = 1; 13964 } 13965 rack_free_trim(rack); 13966 #ifdef TCP_ACCOUNTING 13967 sched_unpin(); 13968 #endif 13969 rack_timer_audit(tp, rack, &so->so_snd); 13970 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13971 return (0); 13972 } 13973 13974 13975 static int 13976 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13977 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13978 int32_t nxt_pkt, struct timeval *tv) 13979 { 13980 #ifdef TCP_ACCOUNTING 13981 uint64_t ts_val; 13982 #endif 13983 int32_t thflags, retval, did_out = 0; 13984 int32_t way_out = 0; 13985 /* 13986 * cts - is the current time from tv (caller gets ts) in microseconds. 13987 * ms_cts - is the current time from tv in milliseconds. 13988 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 13989 */ 13990 uint32_t cts, us_cts, ms_cts; 13991 uint32_t tiwin; 13992 struct timespec ts; 13993 struct tcpopt to; 13994 struct tcp_rack *rack; 13995 struct rack_sendmap *rsm; 13996 int32_t prev_state = 0; 13997 #ifdef TCP_ACCOUNTING 13998 int ack_val_set = 0xf; 13999 #endif 14000 int nsegs; 14001 /* 14002 * tv passed from common code is from either M_TSTMP_LRO or 14003 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 14004 */ 14005 rack = (struct tcp_rack *)tp->t_fb_ptr; 14006 if (m->m_flags & M_ACKCMP) { 14007 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 14008 } 14009 if (m->m_flags & M_ACKCMP) { 14010 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 14011 } 14012 cts = tcp_tv_to_usectick(tv); 14013 ms_cts = tcp_tv_to_mssectick(tv); 14014 nsegs = m->m_pkthdr.lro_nsegs; 14015 counter_u64_add(rack_proc_non_comp_ack, 1); 14016 thflags = th->th_flags; 14017 #ifdef TCP_ACCOUNTING 14018 sched_pin(); 14019 if (thflags & TH_ACK) 14020 ts_val = get_cyclecount(); 14021 #endif 14022 if ((m->m_flags & M_TSTMP) || 14023 (m->m_flags & M_TSTMP_LRO)) { 14024 mbuf_tstmp2timespec(m, &ts); 14025 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14026 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14027 } else 14028 rack->r_ctl.act_rcv_time = *tv; 14029 kern_prefetch(rack, &prev_state); 14030 prev_state = 0; 14031 /* 14032 * Unscale the window into a 32-bit value. For the SYN_SENT state 14033 * the scale is zero. 14034 */ 14035 tiwin = th->th_win << tp->snd_scale; 14036 #ifdef TCP_ACCOUNTING 14037 if (thflags & TH_ACK) { 14038 /* 14039 * We have a tradeoff here. We can either do what we are 14040 * doing i.e. pinning to this CPU and then doing the accounting 14041 * <or> we could do a critical enter, setup the rdtsc and cpu 14042 * as in below, and then validate we are on the same CPU on 14043 * exit. I have choosen to not do the critical enter since 14044 * that often will gain you a context switch, and instead lock 14045 * us (line above this if) to the same CPU with sched_pin(). This 14046 * means we may be context switched out for a higher priority 14047 * interupt but we won't be moved to another CPU. 14048 * 14049 * If this occurs (which it won't very often since we most likely 14050 * are running this code in interupt context and only a higher 14051 * priority will bump us ... clock?) we will falsely add in 14052 * to the time the interupt processing time plus the ack processing 14053 * time. This is ok since its a rare event. 14054 */ 14055 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14056 ctf_fixed_maxseg(tp)); 14057 } 14058 #endif 14059 /* 14060 * Parse options on any incoming segment. 14061 */ 14062 memset(&to, 0, sizeof(to)); 14063 tcp_dooptions(&to, (u_char *)(th + 1), 14064 (th->th_off << 2) - sizeof(struct tcphdr), 14065 (thflags & TH_SYN) ? TO_SYN : 0); 14066 NET_EPOCH_ASSERT(); 14067 INP_WLOCK_ASSERT(tp->t_inpcb); 14068 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14069 __func__)); 14070 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14071 __func__)); 14072 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14073 (tp->t_flags & TF_GPUTINPROG)) { 14074 /* 14075 * We have a goodput in progress 14076 * and we have entered a late state. 14077 * Do we have enough data in the sb 14078 * to handle the GPUT request? 14079 */ 14080 uint32_t bytes; 14081 14082 bytes = tp->gput_ack - tp->gput_seq; 14083 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14084 bytes += tp->gput_seq - tp->snd_una; 14085 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14086 /* 14087 * There are not enough bytes in the socket 14088 * buffer that have been sent to cover this 14089 * measurement. Cancel it. 14090 */ 14091 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14092 rack->r_ctl.rc_gp_srtt /*flex1*/, 14093 tp->gput_seq, 14094 0, 0, 18, __LINE__, NULL, 0); 14095 tp->t_flags &= ~TF_GPUTINPROG; 14096 } 14097 } 14098 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14099 union tcp_log_stackspecific log; 14100 struct timeval ltv; 14101 #ifdef NETFLIX_HTTP_LOGGING 14102 struct http_sendfile_track *http_req; 14103 14104 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14105 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14106 } else { 14107 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14108 } 14109 #endif 14110 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14111 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 14112 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 14113 if (rack->rack_no_prr == 0) 14114 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14115 else 14116 log.u_bbr.flex1 = 0; 14117 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14118 log.u_bbr.use_lt_bw <<= 1; 14119 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14120 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14121 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14122 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14123 log.u_bbr.flex3 = m->m_flags; 14124 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14125 log.u_bbr.lost = thflags; 14126 log.u_bbr.pacing_gain = 0x1; 14127 #ifdef TCP_ACCOUNTING 14128 log.u_bbr.cwnd_gain = ack_val_set; 14129 #endif 14130 log.u_bbr.flex7 = 2; 14131 if (m->m_flags & M_TSTMP) { 14132 /* Record the hardware timestamp if present */ 14133 mbuf_tstmp2timespec(m, &ts); 14134 ltv.tv_sec = ts.tv_sec; 14135 ltv.tv_usec = ts.tv_nsec / 1000; 14136 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14137 } else if (m->m_flags & M_TSTMP_LRO) { 14138 /* Record the LRO the arrival timestamp */ 14139 mbuf_tstmp2timespec(m, &ts); 14140 ltv.tv_sec = ts.tv_sec; 14141 ltv.tv_usec = ts.tv_nsec / 1000; 14142 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14143 } 14144 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14145 /* Log the rcv time */ 14146 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14147 #ifdef NETFLIX_HTTP_LOGGING 14148 log.u_bbr.applimited = tp->t_http_closed; 14149 log.u_bbr.applimited <<= 8; 14150 log.u_bbr.applimited |= tp->t_http_open; 14151 log.u_bbr.applimited <<= 8; 14152 log.u_bbr.applimited |= tp->t_http_req; 14153 if (http_req) { 14154 /* Copy out any client req info */ 14155 /* seconds */ 14156 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14157 /* useconds */ 14158 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14159 log.u_bbr.rttProp = http_req->timestamp; 14160 log.u_bbr.cur_del_rate = http_req->start; 14161 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14162 log.u_bbr.flex8 |= 1; 14163 } else { 14164 log.u_bbr.flex8 |= 2; 14165 log.u_bbr.bw_inuse = http_req->end; 14166 } 14167 log.u_bbr.flex6 = http_req->start_seq; 14168 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14169 log.u_bbr.flex8 |= 4; 14170 log.u_bbr.epoch = http_req->end_seq; 14171 } 14172 } 14173 #endif 14174 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14175 tlen, &log, true, <v); 14176 } 14177 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14178 way_out = 4; 14179 retval = 0; 14180 m_freem(m); 14181 goto done_with_input; 14182 } 14183 /* 14184 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14185 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14186 */ 14187 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14188 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14189 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14190 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14191 #ifdef TCP_ACCOUNTING 14192 sched_unpin(); 14193 #endif 14194 return (1); 14195 } 14196 /* 14197 * If timestamps were negotiated during SYN/ACK and a 14198 * segment without a timestamp is received, silently drop 14199 * the segment, unless it is a RST segment or missing timestamps are 14200 * tolerated. 14201 * See section 3.2 of RFC 7323. 14202 */ 14203 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14204 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14205 way_out = 5; 14206 retval = 0; 14207 m_freem(m); 14208 goto done_with_input; 14209 } 14210 14211 /* 14212 * Segment received on connection. Reset idle time and keep-alive 14213 * timer. XXX: This should be done after segment validation to 14214 * ignore broken/spoofed segs. 14215 */ 14216 if (tp->t_idle_reduce && 14217 (tp->snd_max == tp->snd_una) && 14218 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14219 counter_u64_add(rack_input_idle_reduces, 1); 14220 rack_cc_after_idle(rack, tp); 14221 } 14222 tp->t_rcvtime = ticks; 14223 #ifdef STATS 14224 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14225 #endif 14226 if (tiwin > rack->r_ctl.rc_high_rwnd) 14227 rack->r_ctl.rc_high_rwnd = tiwin; 14228 /* 14229 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14230 * this to occur after we've validated the segment. 14231 */ 14232 if (tp->t_flags2 & TF2_ECN_PERMIT) { 14233 if (thflags & TH_CWR) { 14234 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 14235 tp->t_flags |= TF_ACKNOW; 14236 } 14237 switch (iptos & IPTOS_ECN_MASK) { 14238 case IPTOS_ECN_CE: 14239 tp->t_flags2 |= TF2_ECN_SND_ECE; 14240 KMOD_TCPSTAT_INC(tcps_ecn_ce); 14241 break; 14242 case IPTOS_ECN_ECT0: 14243 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 14244 break; 14245 case IPTOS_ECN_ECT1: 14246 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 14247 break; 14248 } 14249 14250 /* Process a packet differently from RFC3168. */ 14251 cc_ecnpkt_handler(tp, th, iptos); 14252 14253 /* Congestion experienced. */ 14254 if (thflags & TH_ECE) { 14255 rack_cong_signal(tp, CC_ECN, th->th_ack); 14256 } 14257 } 14258 14259 /* 14260 * If echoed timestamp is later than the current time, fall back to 14261 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14262 * were used when this connection was established. 14263 */ 14264 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14265 to.to_tsecr -= tp->ts_offset; 14266 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14267 to.to_tsecr = 0; 14268 } 14269 14270 /* 14271 * If its the first time in we need to take care of options and 14272 * verify we can do SACK for rack! 14273 */ 14274 if (rack->r_state == 0) { 14275 /* Should be init'd by rack_init() */ 14276 KASSERT(rack->rc_inp != NULL, 14277 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14278 if (rack->rc_inp == NULL) { 14279 rack->rc_inp = tp->t_inpcb; 14280 } 14281 14282 /* 14283 * Process options only when we get SYN/ACK back. The SYN 14284 * case for incoming connections is handled in tcp_syncache. 14285 * According to RFC1323 the window field in a SYN (i.e., a 14286 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14287 * this is traditional behavior, may need to be cleaned up. 14288 */ 14289 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14290 /* Handle parallel SYN for ECN */ 14291 if (!(thflags & TH_ACK) && 14292 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) && 14293 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) { 14294 tp->t_flags2 |= TF2_ECN_PERMIT; 14295 tp->t_flags2 |= TF2_ECN_SND_ECE; 14296 TCPSTAT_INC(tcps_ecn_shs); 14297 } 14298 if ((to.to_flags & TOF_SCALE) && 14299 (tp->t_flags & TF_REQ_SCALE)) { 14300 tp->t_flags |= TF_RCVD_SCALE; 14301 tp->snd_scale = to.to_wscale; 14302 } else 14303 tp->t_flags &= ~TF_REQ_SCALE; 14304 /* 14305 * Initial send window. It will be updated with the 14306 * next incoming segment to the scaled value. 14307 */ 14308 tp->snd_wnd = th->th_win; 14309 rack_validate_fo_sendwin_up(tp, rack); 14310 if ((to.to_flags & TOF_TS) && 14311 (tp->t_flags & TF_REQ_TSTMP)) { 14312 tp->t_flags |= TF_RCVD_TSTMP; 14313 tp->ts_recent = to.to_tsval; 14314 tp->ts_recent_age = cts; 14315 } else 14316 tp->t_flags &= ~TF_REQ_TSTMP; 14317 if (to.to_flags & TOF_MSS) { 14318 tcp_mss(tp, to.to_mss); 14319 } 14320 if ((tp->t_flags & TF_SACK_PERMIT) && 14321 (to.to_flags & TOF_SACKPERM) == 0) 14322 tp->t_flags &= ~TF_SACK_PERMIT; 14323 if (IS_FASTOPEN(tp->t_flags)) { 14324 if (to.to_flags & TOF_FASTOPEN) { 14325 uint16_t mss; 14326 14327 if (to.to_flags & TOF_MSS) 14328 mss = to.to_mss; 14329 else 14330 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14331 mss = TCP6_MSS; 14332 else 14333 mss = TCP_MSS; 14334 tcp_fastopen_update_cache(tp, mss, 14335 to.to_tfo_len, to.to_tfo_cookie); 14336 } else 14337 tcp_fastopen_disable_path(tp); 14338 } 14339 } 14340 /* 14341 * At this point we are at the initial call. Here we decide 14342 * if we are doing RACK or not. We do this by seeing if 14343 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14344 * The code now does do dup-ack counting so if you don't 14345 * switch back you won't get rack & TLP, but you will still 14346 * get this stack. 14347 */ 14348 14349 if ((rack_sack_not_required == 0) && 14350 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14351 tcp_switch_back_to_default(tp); 14352 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14353 tlen, iptos); 14354 #ifdef TCP_ACCOUNTING 14355 sched_unpin(); 14356 #endif 14357 return (1); 14358 } 14359 tcp_set_hpts(tp->t_inpcb); 14360 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14361 } 14362 if (thflags & TH_FIN) 14363 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14364 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14365 if ((rack->rc_gp_dyn_mul) && 14366 (rack->use_fixed_rate == 0) && 14367 (rack->rc_always_pace)) { 14368 /* Check in on probertt */ 14369 rack_check_probe_rtt(rack, us_cts); 14370 } 14371 rack_clear_rate_sample(rack); 14372 if (rack->forced_ack) { 14373 uint32_t us_rtt; 14374 14375 /* 14376 * A persist or keep-alive was forced out, update our 14377 * min rtt time. Note we do not worry about lost 14378 * retransmissions since KEEP-ALIVES and persists 14379 * are usually way long on times of sending (though 14380 * if we were really paranoid or worried we could 14381 * at least use timestamps if available to validate). 14382 */ 14383 rack->forced_ack = 0; 14384 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 14385 if (us_rtt == 0) 14386 us_rtt = 1; 14387 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 14388 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 14389 } 14390 /* 14391 * This is the one exception case where we set the rack state 14392 * always. All other times (timers etc) we must have a rack-state 14393 * set (so we assure we have done the checks above for SACK). 14394 */ 14395 rack->r_ctl.rc_rcvtime = cts; 14396 if (rack->r_state != tp->t_state) 14397 rack_set_state(tp, rack); 14398 if (SEQ_GT(th->th_ack, tp->snd_una) && 14399 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14400 kern_prefetch(rsm, &prev_state); 14401 prev_state = rack->r_state; 14402 retval = (*rack->r_substate) (m, th, so, 14403 tp, &to, drop_hdrlen, 14404 tlen, tiwin, thflags, nxt_pkt, iptos); 14405 #ifdef INVARIANTS 14406 if ((retval == 0) && 14407 (tp->t_inpcb == NULL)) { 14408 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14409 retval, tp, prev_state); 14410 } 14411 #endif 14412 if (retval == 0) { 14413 /* 14414 * If retval is 1 the tcb is unlocked and most likely the tp 14415 * is gone. 14416 */ 14417 INP_WLOCK_ASSERT(tp->t_inpcb); 14418 if ((rack->rc_gp_dyn_mul) && 14419 (rack->rc_always_pace) && 14420 (rack->use_fixed_rate == 0) && 14421 rack->in_probe_rtt && 14422 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14423 /* 14424 * If we are going for target, lets recheck before 14425 * we output. 14426 */ 14427 rack_check_probe_rtt(rack, us_cts); 14428 } 14429 if (rack->set_pacing_done_a_iw == 0) { 14430 /* How much has been acked? */ 14431 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14432 /* We have enough to set in the pacing segment size */ 14433 rack->set_pacing_done_a_iw = 1; 14434 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14435 } 14436 } 14437 tcp_rack_xmit_timer_commit(rack, tp); 14438 #ifdef TCP_ACCOUNTING 14439 /* 14440 * If we set the ack_val_se to what ack processing we are doing 14441 * we also want to track how many cycles we burned. Note 14442 * the bits after tcp_output we let be "free". This is because 14443 * we are also tracking the tcp_output times as well. Note the 14444 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14445 * 0xf cannot be returned and is what we initialize it too to 14446 * indicate we are not doing the tabulations. 14447 */ 14448 if (ack_val_set != 0xf) { 14449 uint64_t crtsc; 14450 14451 crtsc = get_cyclecount(); 14452 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14453 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14454 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14455 } 14456 } 14457 #endif 14458 if (nxt_pkt == 0) { 14459 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14460 do_output_now: 14461 did_out = 1; 14462 (void)tp->t_fb->tfb_tcp_output(tp); 14463 } 14464 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14465 rack_free_trim(rack); 14466 } 14467 if ((nxt_pkt == 0) && 14468 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14469 (SEQ_GT(tp->snd_max, tp->snd_una) || 14470 (tp->t_flags & TF_DELACK) || 14471 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14472 (tp->t_state <= TCPS_CLOSING)))) { 14473 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14474 if ((tp->snd_max == tp->snd_una) && 14475 ((tp->t_flags & TF_DELACK) == 0) && 14476 (rack->rc_inp->inp_in_hpts) && 14477 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14478 /* keep alive not needed if we are hptsi output yet */ 14479 ; 14480 } else { 14481 int late = 0; 14482 if (rack->rc_inp->inp_in_hpts) { 14483 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14484 us_cts = tcp_get_usecs(NULL); 14485 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14486 rack->r_early = 1; 14487 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14488 } else 14489 late = 1; 14490 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14491 } 14492 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 14493 } 14494 if (late && (did_out == 0)) { 14495 /* 14496 * We are late in the sending 14497 * and we did not call the output 14498 * (this probably should not happen). 14499 */ 14500 goto do_output_now; 14501 } 14502 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14503 } 14504 way_out = 1; 14505 } else if (nxt_pkt == 0) { 14506 /* Do we have the correct timer running? */ 14507 rack_timer_audit(tp, rack, &so->so_snd); 14508 way_out = 2; 14509 } 14510 done_with_input: 14511 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14512 if (did_out) 14513 rack->r_wanted_output = 0; 14514 #ifdef INVARIANTS 14515 if (tp->t_inpcb == NULL) { 14516 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14517 did_out, 14518 retval, tp, prev_state); 14519 } 14520 #endif 14521 #ifdef TCP_ACCOUNTING 14522 } else { 14523 /* 14524 * Track the time (see above). 14525 */ 14526 if (ack_val_set != 0xf) { 14527 uint64_t crtsc; 14528 14529 crtsc = get_cyclecount(); 14530 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14531 /* 14532 * Note we *DO NOT* increment the per-tcb counters since 14533 * in the else the TP may be gone!! 14534 */ 14535 } 14536 #endif 14537 } 14538 #ifdef TCP_ACCOUNTING 14539 sched_unpin(); 14540 #endif 14541 return (retval); 14542 } 14543 14544 void 14545 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14546 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14547 { 14548 struct timeval tv; 14549 14550 /* First lets see if we have old packets */ 14551 if (tp->t_in_pkt) { 14552 if (ctf_do_queued_segments(so, tp, 1)) { 14553 m_freem(m); 14554 return; 14555 } 14556 } 14557 if (m->m_flags & M_TSTMP_LRO) { 14558 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14559 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14560 } else { 14561 /* Should not be should we kassert instead? */ 14562 tcp_get_usecs(&tv); 14563 } 14564 if (rack_do_segment_nounlock(m, th, so, tp, 14565 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14566 INP_WUNLOCK(tp->t_inpcb); 14567 } 14568 } 14569 14570 struct rack_sendmap * 14571 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14572 { 14573 struct rack_sendmap *rsm = NULL; 14574 int32_t idx; 14575 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14576 14577 /* Return the next guy to be re-transmitted */ 14578 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14579 return (NULL); 14580 } 14581 if (tp->t_flags & TF_SENTFIN) { 14582 /* retran the end FIN? */ 14583 return (NULL); 14584 } 14585 /* ok lets look at this one */ 14586 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14587 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14588 goto check_it; 14589 } 14590 rsm = rack_find_lowest_rsm(rack); 14591 if (rsm == NULL) { 14592 return (NULL); 14593 } 14594 check_it: 14595 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14596 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14597 /* 14598 * No sack so we automatically do the 3 strikes and 14599 * retransmit (no rack timer would be started). 14600 */ 14601 14602 return (rsm); 14603 } 14604 if (rsm->r_flags & RACK_ACKED) { 14605 return (NULL); 14606 } 14607 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14608 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14609 /* Its not yet ready */ 14610 return (NULL); 14611 } 14612 srtt = rack_grab_rtt(tp, rack); 14613 idx = rsm->r_rtr_cnt - 1; 14614 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14615 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14616 if ((tsused == ts_low) || 14617 (TSTMP_LT(tsused, ts_low))) { 14618 /* No time since sending */ 14619 return (NULL); 14620 } 14621 if ((tsused - ts_low) < thresh) { 14622 /* It has not been long enough yet */ 14623 return (NULL); 14624 } 14625 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14626 ((rsm->r_flags & RACK_SACK_PASSED) && 14627 (rack->sack_attack_disable == 0))) { 14628 /* 14629 * We have passed the dup-ack threshold <or> 14630 * a SACK has indicated this is missing. 14631 * Note that if you are a declared attacker 14632 * it is only the dup-ack threshold that 14633 * will cause retransmits. 14634 */ 14635 /* log retransmit reason */ 14636 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14637 rack->r_fast_output = 0; 14638 return (rsm); 14639 } 14640 return (NULL); 14641 } 14642 14643 static void 14644 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14645 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14646 int line, struct rack_sendmap *rsm, uint8_t quality) 14647 { 14648 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14649 union tcp_log_stackspecific log; 14650 struct timeval tv; 14651 14652 memset(&log, 0, sizeof(log)); 14653 log.u_bbr.flex1 = slot; 14654 log.u_bbr.flex2 = len; 14655 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14656 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14657 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14658 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14659 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14660 log.u_bbr.use_lt_bw <<= 1; 14661 log.u_bbr.use_lt_bw |= rack->r_late; 14662 log.u_bbr.use_lt_bw <<= 1; 14663 log.u_bbr.use_lt_bw |= rack->r_early; 14664 log.u_bbr.use_lt_bw <<= 1; 14665 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14666 log.u_bbr.use_lt_bw <<= 1; 14667 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14668 log.u_bbr.use_lt_bw <<= 1; 14669 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14670 log.u_bbr.use_lt_bw <<= 1; 14671 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14672 log.u_bbr.use_lt_bw <<= 1; 14673 log.u_bbr.use_lt_bw |= rack->gp_ready; 14674 log.u_bbr.pkt_epoch = line; 14675 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14676 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14677 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14678 log.u_bbr.bw_inuse = bw_est; 14679 log.u_bbr.delRate = bw; 14680 if (rack->r_ctl.gp_bw == 0) 14681 log.u_bbr.cur_del_rate = 0; 14682 else 14683 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14684 log.u_bbr.rttProp = len_time; 14685 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14686 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14687 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14688 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14689 /* We are in slow start */ 14690 log.u_bbr.flex7 = 1; 14691 } else { 14692 /* we are on congestion avoidance */ 14693 log.u_bbr.flex7 = 0; 14694 } 14695 log.u_bbr.flex8 = method; 14696 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14697 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14698 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14699 log.u_bbr.cwnd_gain <<= 1; 14700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14701 log.u_bbr.cwnd_gain <<= 1; 14702 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14703 log.u_bbr.bbr_substate = quality; 14704 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14705 &rack->rc_inp->inp_socket->so_rcv, 14706 &rack->rc_inp->inp_socket->so_snd, 14707 BBR_LOG_HPTSI_CALC, 0, 14708 0, &log, false, &tv); 14709 } 14710 } 14711 14712 static uint32_t 14713 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14714 { 14715 uint32_t new_tso, user_max; 14716 14717 user_max = rack->rc_user_set_max_segs * mss; 14718 if (rack->rc_force_max_seg) { 14719 return (user_max); 14720 } 14721 if (rack->use_fixed_rate && 14722 ((rack->r_ctl.crte == NULL) || 14723 (bw != rack->r_ctl.crte->rate))) { 14724 /* Use the user mss since we are not exactly matched */ 14725 return (user_max); 14726 } 14727 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14728 if (new_tso > user_max) 14729 new_tso = user_max; 14730 return (new_tso); 14731 } 14732 14733 static int32_t 14734 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14735 { 14736 uint64_t lentim, fill_bw; 14737 14738 /* Lets first see if we are full, if so continue with normal rate */ 14739 rack->r_via_fill_cw = 0; 14740 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14741 return (slot); 14742 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14743 return (slot); 14744 if (rack->r_ctl.rc_last_us_rtt == 0) 14745 return (slot); 14746 if (rack->rc_pace_fill_if_rttin_range && 14747 (rack->r_ctl.rc_last_us_rtt >= 14748 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14749 /* The rtt is huge, N * smallest, lets not fill */ 14750 return (slot); 14751 } 14752 /* 14753 * first lets calculate the b/w based on the last us-rtt 14754 * and the sndwnd. 14755 */ 14756 fill_bw = rack->r_ctl.cwnd_to_use; 14757 /* Take the rwnd if its smaller */ 14758 if (fill_bw > rack->rc_tp->snd_wnd) 14759 fill_bw = rack->rc_tp->snd_wnd; 14760 if (rack->r_fill_less_agg) { 14761 /* 14762 * Now take away the inflight (this will reduce our 14763 * aggressiveness and yeah, if we get that much out in 1RTT 14764 * we will have had acks come back and still be behind). 14765 */ 14766 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14767 } 14768 /* Now lets make it into a b/w */ 14769 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14770 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14771 /* We are below the min b/w */ 14772 if (non_paced) 14773 *rate_wanted = fill_bw; 14774 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14775 return (slot); 14776 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14777 fill_bw = rack->r_ctl.bw_rate_cap; 14778 rack->r_via_fill_cw = 1; 14779 if (rack->r_rack_hw_rate_caps && 14780 (rack->r_ctl.crte != NULL)) { 14781 uint64_t high_rate; 14782 14783 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14784 if (fill_bw > high_rate) { 14785 /* We are capping bw at the highest rate table entry */ 14786 if (*rate_wanted > high_rate) { 14787 /* The original rate was also capped */ 14788 rack->r_via_fill_cw = 0; 14789 } 14790 rack_log_hdwr_pacing(rack, 14791 fill_bw, high_rate, __LINE__, 14792 0, 3); 14793 fill_bw = high_rate; 14794 if (capped) 14795 *capped = 1; 14796 } 14797 } else if ((rack->r_ctl.crte == NULL) && 14798 (rack->rack_hdrw_pacing == 0) && 14799 (rack->rack_hdw_pace_ena) && 14800 rack->r_rack_hw_rate_caps && 14801 (rack->rack_attempt_hdwr_pace == 0) && 14802 (rack->rc_inp->inp_route.ro_nh != NULL) && 14803 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14804 /* 14805 * Ok we may have a first attempt that is greater than our top rate 14806 * lets check. 14807 */ 14808 uint64_t high_rate; 14809 14810 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14811 if (high_rate) { 14812 if (fill_bw > high_rate) { 14813 fill_bw = high_rate; 14814 if (capped) 14815 *capped = 1; 14816 } 14817 } 14818 } 14819 /* 14820 * Ok fill_bw holds our mythical b/w to fill the cwnd 14821 * in a rtt, what does that time wise equate too? 14822 */ 14823 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14824 lentim /= fill_bw; 14825 *rate_wanted = fill_bw; 14826 if (non_paced || (lentim < slot)) { 14827 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14828 0, lentim, 12, __LINE__, NULL, 0); 14829 return ((int32_t)lentim); 14830 } else 14831 return (slot); 14832 } 14833 14834 static int32_t 14835 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14836 { 14837 int32_t slot = 0; 14838 int can_start_hw_pacing = 1; 14839 int err; 14840 14841 if (rack->rc_always_pace == 0) { 14842 /* 14843 * We use the most optimistic possible cwnd/srtt for 14844 * sending calculations. This will make our 14845 * calculation anticipate getting more through 14846 * quicker then possible. But thats ok we don't want 14847 * the peer to have a gap in data sending. 14848 */ 14849 uint32_t srtt, cwnd, tr_perms = 0; 14850 int32_t reduce = 0; 14851 14852 old_method: 14853 /* 14854 * We keep no precise pacing with the old method 14855 * instead we use the pacer to mitigate bursts. 14856 */ 14857 if (rack->r_ctl.rc_rack_min_rtt) 14858 srtt = rack->r_ctl.rc_rack_min_rtt; 14859 else 14860 srtt = max(tp->t_srtt, 1); 14861 if (rack->r_ctl.rc_rack_largest_cwnd) 14862 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14863 else 14864 cwnd = rack->r_ctl.cwnd_to_use; 14865 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14866 tr_perms = (cwnd * 1000) / srtt; 14867 if (tr_perms == 0) { 14868 tr_perms = ctf_fixed_maxseg(tp); 14869 } 14870 /* 14871 * Calculate how long this will take to drain, if 14872 * the calculation comes out to zero, thats ok we 14873 * will use send_a_lot to possibly spin around for 14874 * more increasing tot_len_this_send to the point 14875 * that its going to require a pace, or we hit the 14876 * cwnd. Which in that case we are just waiting for 14877 * a ACK. 14878 */ 14879 slot = len / tr_perms; 14880 /* Now do we reduce the time so we don't run dry? */ 14881 if (slot && rack_slot_reduction) { 14882 reduce = (slot / rack_slot_reduction); 14883 if (reduce < slot) { 14884 slot -= reduce; 14885 } else 14886 slot = 0; 14887 } 14888 slot *= HPTS_USEC_IN_MSEC; 14889 if (rack->rc_pace_to_cwnd) { 14890 uint64_t rate_wanted = 0; 14891 14892 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14893 rack->rc_ack_can_sendout_data = 1; 14894 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14895 } else 14896 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14897 } else { 14898 uint64_t bw_est, res, lentim, rate_wanted; 14899 uint32_t orig_val, srtt, segs, oh; 14900 int capped = 0; 14901 int prev_fill; 14902 14903 if ((rack->r_rr_config == 1) && rsm) { 14904 return (rack->r_ctl.rc_min_to); 14905 } 14906 if (rack->use_fixed_rate) { 14907 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14908 } else if ((rack->r_ctl.init_rate == 0) && 14909 #ifdef NETFLIX_PEAKRATE 14910 (rack->rc_tp->t_maxpeakrate == 0) && 14911 #endif 14912 (rack->r_ctl.gp_bw == 0)) { 14913 /* no way to yet do an estimate */ 14914 bw_est = rate_wanted = 0; 14915 } else { 14916 bw_est = rack_get_bw(rack); 14917 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14918 } 14919 if ((bw_est == 0) || (rate_wanted == 0) || 14920 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14921 /* 14922 * No way yet to make a b/w estimate or 14923 * our raise is set incorrectly. 14924 */ 14925 goto old_method; 14926 } 14927 /* We need to account for all the overheads */ 14928 segs = (len + segsiz - 1) / segsiz; 14929 /* 14930 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14931 * and how much data we put in each packet. Yes this 14932 * means we may be off if we are larger than 1500 bytes 14933 * or smaller. But this just makes us more conservative. 14934 */ 14935 if (rack_hw_rate_min && 14936 (bw_est < rack_hw_rate_min)) 14937 can_start_hw_pacing = 0; 14938 if (ETHERNET_SEGMENT_SIZE > segsiz) 14939 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14940 else 14941 oh = 0; 14942 segs *= oh; 14943 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14944 res = lentim / rate_wanted; 14945 slot = (uint32_t)res; 14946 orig_val = rack->r_ctl.rc_pace_max_segs; 14947 if (rack->r_ctl.crte == NULL) { 14948 /* 14949 * Only do this if we are not hardware pacing 14950 * since if we are doing hw-pacing below we will 14951 * set make a call after setting up or changing 14952 * the rate. 14953 */ 14954 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14955 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14956 /* 14957 * We lost our rate somehow, this can happen 14958 * if the interface changed underneath us. 14959 */ 14960 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14961 rack->r_ctl.crte = NULL; 14962 /* Lets re-allow attempting to setup pacing */ 14963 rack->rack_hdrw_pacing = 0; 14964 rack->rack_attempt_hdwr_pace = 0; 14965 rack_log_hdwr_pacing(rack, 14966 rate_wanted, bw_est, __LINE__, 14967 0, 6); 14968 } 14969 /* Did we change the TSO size, if so log it */ 14970 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14971 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 14972 prev_fill = rack->r_via_fill_cw; 14973 if ((rack->rc_pace_to_cwnd) && 14974 (capped == 0) && 14975 (rack->use_fixed_rate == 0) && 14976 (rack->in_probe_rtt == 0) && 14977 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14978 /* 14979 * We want to pace at our rate *or* faster to 14980 * fill the cwnd to the max if its not full. 14981 */ 14982 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14983 } 14984 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14985 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14986 if ((rack->rack_hdw_pace_ena) && 14987 (can_start_hw_pacing > 0) && 14988 (rack->rack_hdrw_pacing == 0) && 14989 (rack->rack_attempt_hdwr_pace == 0)) { 14990 /* 14991 * Lets attempt to turn on hardware pacing 14992 * if we can. 14993 */ 14994 rack->rack_attempt_hdwr_pace = 1; 14995 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14996 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14997 rate_wanted, 14998 RS_PACING_GEQ, 14999 &err, &rack->r_ctl.crte_prev_rate); 15000 if (rack->r_ctl.crte) { 15001 rack->rack_hdrw_pacing = 1; 15002 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 15003 0, rack->r_ctl.crte, 15004 NULL); 15005 rack_log_hdwr_pacing(rack, 15006 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15007 err, 0); 15008 rack->r_ctl.last_hw_bw_req = rate_wanted; 15009 } else { 15010 counter_u64_add(rack_hw_pace_init_fail, 1); 15011 } 15012 } else if (rack->rack_hdrw_pacing && 15013 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 15014 /* Do we need to adjust our rate? */ 15015 const struct tcp_hwrate_limit_table *nrte; 15016 15017 if (rack->r_up_only && 15018 (rate_wanted < rack->r_ctl.crte->rate)) { 15019 /** 15020 * We have four possible states here 15021 * having to do with the previous time 15022 * and this time. 15023 * previous | this-time 15024 * A) 0 | 0 -- fill_cw not in the picture 15025 * B) 1 | 0 -- we were doing a fill-cw but now are not 15026 * C) 1 | 1 -- all rates from fill_cw 15027 * D) 0 | 1 -- we were doing non-fill and now we are filling 15028 * 15029 * For case A, C and D we don't allow a drop. But for 15030 * case B where we now our on our steady rate we do 15031 * allow a drop. 15032 * 15033 */ 15034 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 15035 goto done_w_hdwr; 15036 } 15037 if ((rate_wanted > rack->r_ctl.crte->rate) || 15038 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15039 if (rack_hw_rate_to_low && 15040 (bw_est < rack_hw_rate_to_low)) { 15041 /* 15042 * The pacing rate is too low for hardware, but 15043 * do allow hardware pacing to be restarted. 15044 */ 15045 rack_log_hdwr_pacing(rack, 15046 bw_est, rack->r_ctl.crte->rate, __LINE__, 15047 0, 5); 15048 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15049 rack->r_ctl.crte = NULL; 15050 rack->rack_attempt_hdwr_pace = 0; 15051 rack->rack_hdrw_pacing = 0; 15052 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15053 goto done_w_hdwr; 15054 } 15055 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15056 rack->rc_tp, 15057 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15058 rate_wanted, 15059 RS_PACING_GEQ, 15060 &err, &rack->r_ctl.crte_prev_rate); 15061 if (nrte == NULL) { 15062 /* Lost the rate */ 15063 rack->rack_hdrw_pacing = 0; 15064 rack->r_ctl.crte = NULL; 15065 rack_log_hdwr_pacing(rack, 15066 rate_wanted, 0, __LINE__, 15067 err, 1); 15068 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15069 counter_u64_add(rack_hw_pace_lost, 1); 15070 } else if (nrte != rack->r_ctl.crte) { 15071 rack->r_ctl.crte = nrte; 15072 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15073 segsiz, 0, 15074 rack->r_ctl.crte, 15075 NULL); 15076 rack_log_hdwr_pacing(rack, 15077 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15078 err, 2); 15079 rack->r_ctl.last_hw_bw_req = rate_wanted; 15080 } 15081 } else { 15082 /* We just need to adjust the segment size */ 15083 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15084 rack_log_hdwr_pacing(rack, 15085 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15086 0, 4); 15087 rack->r_ctl.last_hw_bw_req = rate_wanted; 15088 } 15089 } 15090 } 15091 if ((rack->r_ctl.crte != NULL) && 15092 (rack->r_ctl.crte->rate == rate_wanted)) { 15093 /* 15094 * We need to add a extra if the rates 15095 * are exactly matched. The idea is 15096 * we want the software to make sure the 15097 * queue is empty before adding more, this 15098 * gives us N MSS extra pace times where 15099 * N is our sysctl 15100 */ 15101 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15102 } 15103 done_w_hdwr: 15104 if (rack_limit_time_with_srtt && 15105 (rack->use_fixed_rate == 0) && 15106 #ifdef NETFLIX_PEAKRATE 15107 (rack->rc_tp->t_maxpeakrate == 0) && 15108 #endif 15109 (rack->rack_hdrw_pacing == 0)) { 15110 /* 15111 * Sanity check, we do not allow the pacing delay 15112 * to be longer than the SRTT of the path. If it is 15113 * a slow path, then adding a packet should increase 15114 * the RTT and compensate for this i.e. the srtt will 15115 * be greater so the allowed pacing time will be greater. 15116 * 15117 * Note this restriction is not for where a peak rate 15118 * is set, we are doing fixed pacing or hardware pacing. 15119 */ 15120 if (rack->rc_tp->t_srtt) 15121 srtt = rack->rc_tp->t_srtt; 15122 else 15123 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15124 if (srtt < slot) { 15125 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15126 slot = srtt; 15127 } 15128 } 15129 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15130 } 15131 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15132 /* 15133 * If this rate is seeing enobufs when it 15134 * goes to send then either the nic is out 15135 * of gas or we are mis-estimating the time 15136 * somehow and not letting the queue empty 15137 * completely. Lets add to the pacing time. 15138 */ 15139 int hw_boost_delay; 15140 15141 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15142 if (hw_boost_delay > rack_enobuf_hw_max) 15143 hw_boost_delay = rack_enobuf_hw_max; 15144 else if (hw_boost_delay < rack_enobuf_hw_min) 15145 hw_boost_delay = rack_enobuf_hw_min; 15146 slot += hw_boost_delay; 15147 } 15148 if (slot) 15149 counter_u64_add(rack_calc_nonzero, 1); 15150 else 15151 counter_u64_add(rack_calc_zero, 1); 15152 return (slot); 15153 } 15154 15155 static void 15156 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15157 tcp_seq startseq, uint32_t sb_offset) 15158 { 15159 struct rack_sendmap *my_rsm = NULL; 15160 struct rack_sendmap fe; 15161 15162 if (tp->t_state < TCPS_ESTABLISHED) { 15163 /* 15164 * We don't start any measurements if we are 15165 * not at least established. 15166 */ 15167 return; 15168 } 15169 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15170 /* 15171 * We will get no more data into the SB 15172 * this means we need to have the data available 15173 * before we start a measurement. 15174 */ 15175 15176 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15177 max(rc_init_window(rack), 15178 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15179 /* Nope not enough data */ 15180 return; 15181 } 15182 } 15183 tp->t_flags |= TF_GPUTINPROG; 15184 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15185 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15186 tp->gput_seq = startseq; 15187 rack->app_limited_needs_set = 0; 15188 if (rack->in_probe_rtt) 15189 rack->measure_saw_probe_rtt = 1; 15190 else if ((rack->measure_saw_probe_rtt) && 15191 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15192 rack->measure_saw_probe_rtt = 0; 15193 if (rack->rc_gp_filled) 15194 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15195 else { 15196 /* Special case initial measurement */ 15197 struct timeval tv; 15198 15199 tp->gput_ts = tcp_get_usecs(&tv); 15200 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15201 } 15202 /* 15203 * We take a guess out into the future, 15204 * if we have no measurement and no 15205 * initial rate, we measure the first 15206 * initial-windows worth of data to 15207 * speed up getting some GP measurement and 15208 * thus start pacing. 15209 */ 15210 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15211 rack->app_limited_needs_set = 1; 15212 tp->gput_ack = startseq + max(rc_init_window(rack), 15213 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15214 rack_log_pacing_delay_calc(rack, 15215 tp->gput_seq, 15216 tp->gput_ack, 15217 0, 15218 tp->gput_ts, 15219 rack->r_ctl.rc_app_limited_cnt, 15220 9, 15221 __LINE__, NULL, 0); 15222 return; 15223 } 15224 if (sb_offset) { 15225 /* 15226 * We are out somewhere in the sb 15227 * can we use the already outstanding data? 15228 */ 15229 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15230 /* 15231 * Yes first one is good and in this case 15232 * the tp->gput_ts is correctly set based on 15233 * the last ack that arrived (no need to 15234 * set things up when an ack comes in). 15235 */ 15236 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15237 if ((my_rsm == NULL) || 15238 (my_rsm->r_rtr_cnt != 1)) { 15239 /* retransmission? */ 15240 goto use_latest; 15241 } 15242 } else { 15243 if (rack->r_ctl.rc_first_appl == NULL) { 15244 /* 15245 * If rc_first_appl is NULL 15246 * then the cnt should be 0. 15247 * This is probably an error, maybe 15248 * a KASSERT would be approprate. 15249 */ 15250 goto use_latest; 15251 } 15252 /* 15253 * If we have a marker pointer to the last one that is 15254 * app limited we can use that, but we need to set 15255 * things up so that when it gets ack'ed we record 15256 * the ack time (if its not already acked). 15257 */ 15258 rack->app_limited_needs_set = 1; 15259 /* 15260 * We want to get to the rsm that is either 15261 * next with space i.e. over 1 MSS or the one 15262 * after that (after the app-limited). 15263 */ 15264 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15265 rack->r_ctl.rc_first_appl); 15266 if (my_rsm) { 15267 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15268 /* Have to use the next one */ 15269 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15270 my_rsm); 15271 else { 15272 /* Use after the first MSS of it is acked */ 15273 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15274 goto start_set; 15275 } 15276 } 15277 if ((my_rsm == NULL) || 15278 (my_rsm->r_rtr_cnt != 1)) { 15279 /* 15280 * Either its a retransmit or 15281 * the last is the app-limited one. 15282 */ 15283 goto use_latest; 15284 } 15285 } 15286 tp->gput_seq = my_rsm->r_start; 15287 start_set: 15288 if (my_rsm->r_flags & RACK_ACKED) { 15289 /* 15290 * This one has been acked use the arrival ack time 15291 */ 15292 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15293 rack->app_limited_needs_set = 0; 15294 } 15295 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15296 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15297 rack_log_pacing_delay_calc(rack, 15298 tp->gput_seq, 15299 tp->gput_ack, 15300 (uint64_t)my_rsm, 15301 tp->gput_ts, 15302 rack->r_ctl.rc_app_limited_cnt, 15303 9, 15304 __LINE__, NULL, 0); 15305 return; 15306 } 15307 15308 use_latest: 15309 /* 15310 * We don't know how long we may have been 15311 * idle or if this is the first-send. Lets 15312 * setup the flag so we will trim off 15313 * the first ack'd data so we get a true 15314 * measurement. 15315 */ 15316 rack->app_limited_needs_set = 1; 15317 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15318 /* Find this guy so we can pull the send time */ 15319 fe.r_start = startseq; 15320 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15321 if (my_rsm) { 15322 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15323 if (my_rsm->r_flags & RACK_ACKED) { 15324 /* 15325 * Unlikely since its probably what was 15326 * just transmitted (but I am paranoid). 15327 */ 15328 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15329 rack->app_limited_needs_set = 0; 15330 } 15331 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15332 /* This also is unlikely */ 15333 tp->gput_seq = my_rsm->r_start; 15334 } 15335 } else { 15336 /* 15337 * TSNH unless we have some send-map limit, 15338 * and even at that it should not be hitting 15339 * that limit (we should have stopped sending). 15340 */ 15341 struct timeval tv; 15342 15343 microuptime(&tv); 15344 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15345 } 15346 rack_log_pacing_delay_calc(rack, 15347 tp->gput_seq, 15348 tp->gput_ack, 15349 (uint64_t)my_rsm, 15350 tp->gput_ts, 15351 rack->r_ctl.rc_app_limited_cnt, 15352 9, __LINE__, NULL, 0); 15353 } 15354 15355 static inline uint32_t 15356 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15357 uint32_t avail, int32_t sb_offset) 15358 { 15359 uint32_t len; 15360 uint32_t sendwin; 15361 15362 if (tp->snd_wnd > cwnd_to_use) 15363 sendwin = cwnd_to_use; 15364 else 15365 sendwin = tp->snd_wnd; 15366 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15367 /* We never want to go over our peers rcv-window */ 15368 len = 0; 15369 } else { 15370 uint32_t flight; 15371 15372 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15373 if (flight >= sendwin) { 15374 /* 15375 * We have in flight what we are allowed by cwnd (if 15376 * it was rwnd blocking it would have hit above out 15377 * >= tp->snd_wnd). 15378 */ 15379 return (0); 15380 } 15381 len = sendwin - flight; 15382 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15383 /* We would send too much (beyond the rwnd) */ 15384 len = tp->snd_wnd - ctf_outstanding(tp); 15385 } 15386 if ((len + sb_offset) > avail) { 15387 /* 15388 * We don't have that much in the SB, how much is 15389 * there? 15390 */ 15391 len = avail - sb_offset; 15392 } 15393 } 15394 return (len); 15395 } 15396 15397 static void 15398 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15399 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15400 int rsm_is_null, int optlen, int line, uint16_t mode) 15401 { 15402 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15403 union tcp_log_stackspecific log; 15404 struct timeval tv; 15405 15406 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15407 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 15408 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 15409 log.u_bbr.flex1 = error; 15410 log.u_bbr.flex2 = flags; 15411 log.u_bbr.flex3 = rsm_is_null; 15412 log.u_bbr.flex4 = ipoptlen; 15413 log.u_bbr.flex5 = tp->rcv_numsacks; 15414 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15415 log.u_bbr.flex7 = optlen; 15416 log.u_bbr.flex8 = rack->r_fsb_inited; 15417 log.u_bbr.applimited = rack->r_fast_output; 15418 log.u_bbr.bw_inuse = rack_get_bw(rack); 15419 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15420 log.u_bbr.cwnd_gain = mode; 15421 log.u_bbr.pkts_out = orig_len; 15422 log.u_bbr.lt_epoch = len; 15423 log.u_bbr.delivered = line; 15424 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15425 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15426 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15427 len, &log, false, NULL, NULL, 0, &tv); 15428 } 15429 } 15430 15431 15432 static struct mbuf * 15433 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15434 struct rack_fast_send_blk *fsb, 15435 int32_t seglimit, int32_t segsize, int hw_tls) 15436 { 15437 #ifdef KERN_TLS 15438 struct ktls_session *tls, *ntls; 15439 struct mbuf *start; 15440 #endif 15441 struct mbuf *m, *n, **np, *smb; 15442 struct mbuf *top; 15443 int32_t off, soff; 15444 int32_t len = *plen; 15445 int32_t fragsize; 15446 int32_t len_cp = 0; 15447 uint32_t mlen, frags; 15448 15449 soff = off = the_off; 15450 smb = m = the_m; 15451 np = ⊤ 15452 top = NULL; 15453 #ifdef KERN_TLS 15454 if (hw_tls && (m->m_flags & M_EXTPG)) 15455 tls = m->m_epg_tls; 15456 else 15457 tls = NULL; 15458 start = m; 15459 #endif 15460 while (len > 0) { 15461 if (m == NULL) { 15462 *plen = len_cp; 15463 break; 15464 } 15465 #ifdef KERN_TLS 15466 if (hw_tls) { 15467 if (m->m_flags & M_EXTPG) 15468 ntls = m->m_epg_tls; 15469 else 15470 ntls = NULL; 15471 15472 /* 15473 * Avoid mixing TLS records with handshake 15474 * data or TLS records from different 15475 * sessions. 15476 */ 15477 if (tls != ntls) { 15478 MPASS(m != start); 15479 *plen = len_cp; 15480 break; 15481 } 15482 } 15483 #endif 15484 mlen = min(len, m->m_len - off); 15485 if (seglimit) { 15486 /* 15487 * For M_EXTPG mbufs, add 3 segments 15488 * + 1 in case we are crossing page boundaries 15489 * + 2 in case the TLS hdr/trailer are used 15490 * It is cheaper to just add the segments 15491 * than it is to take the cache miss to look 15492 * at the mbuf ext_pgs state in detail. 15493 */ 15494 if (m->m_flags & M_EXTPG) { 15495 fragsize = min(segsize, PAGE_SIZE); 15496 frags = 3; 15497 } else { 15498 fragsize = segsize; 15499 frags = 0; 15500 } 15501 15502 /* Break if we really can't fit anymore. */ 15503 if ((frags + 1) >= seglimit) { 15504 *plen = len_cp; 15505 break; 15506 } 15507 15508 /* 15509 * Reduce size if you can't copy the whole 15510 * mbuf. If we can't copy the whole mbuf, also 15511 * adjust len so the loop will end after this 15512 * mbuf. 15513 */ 15514 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15515 mlen = (seglimit - frags - 1) * fragsize; 15516 len = mlen; 15517 *plen = len_cp + len; 15518 } 15519 frags += howmany(mlen, fragsize); 15520 if (frags == 0) 15521 frags++; 15522 seglimit -= frags; 15523 KASSERT(seglimit > 0, 15524 ("%s: seglimit went too low", __func__)); 15525 } 15526 n = m_get(M_NOWAIT, m->m_type); 15527 *np = n; 15528 if (n == NULL) 15529 goto nospace; 15530 n->m_len = mlen; 15531 soff += mlen; 15532 len_cp += n->m_len; 15533 if (m->m_flags & (M_EXT|M_EXTPG)) { 15534 n->m_data = m->m_data + off; 15535 mb_dupcl(n, m); 15536 } else { 15537 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15538 (u_int)n->m_len); 15539 } 15540 len -= n->m_len; 15541 off = 0; 15542 m = m->m_next; 15543 np = &n->m_next; 15544 if (len || (soff == smb->m_len)) { 15545 /* 15546 * We have more so we move forward or 15547 * we have consumed the entire mbuf and 15548 * len has fell to 0. 15549 */ 15550 soff = 0; 15551 smb = m; 15552 } 15553 15554 } 15555 if (fsb != NULL) { 15556 fsb->m = smb; 15557 fsb->off = soff; 15558 if (smb) { 15559 /* 15560 * Save off the size of the mbuf. We do 15561 * this so that we can recognize when it 15562 * has been trimmed by sbcut() as acks 15563 * come in. 15564 */ 15565 fsb->o_m_len = smb->m_len; 15566 } else { 15567 /* 15568 * This is the case where the next mbuf went to NULL. This 15569 * means with this copy we have sent everything in the sb. 15570 * In theory we could clear the fast_output flag, but lets 15571 * not since its possible that we could get more added 15572 * and acks that call the extend function which would let 15573 * us send more. 15574 */ 15575 fsb->o_m_len = 0; 15576 } 15577 } 15578 return (top); 15579 nospace: 15580 if (top) 15581 m_freem(top); 15582 return (NULL); 15583 15584 } 15585 15586 /* 15587 * This is a copy of m_copym(), taking the TSO segment size/limit 15588 * constraints into account, and advancing the sndptr as it goes. 15589 */ 15590 static struct mbuf * 15591 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15592 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15593 { 15594 struct mbuf *m, *n; 15595 int32_t soff; 15596 15597 soff = rack->r_ctl.fsb.off; 15598 m = rack->r_ctl.fsb.m; 15599 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15600 /* 15601 * The mbuf had the front of it chopped off by an ack 15602 * we need to adjust the soff/off by that difference. 15603 */ 15604 uint32_t delta; 15605 15606 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15607 soff -= delta; 15608 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15609 /* 15610 * The mbuf was expanded probably by 15611 * a m_compress. Just update o_m_len. 15612 */ 15613 rack->r_ctl.fsb.o_m_len = m->m_len; 15614 } 15615 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15616 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15617 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15618 __FUNCTION__, 15619 rack, *plen, m, m->m_len)); 15620 /* Save off the right location before we copy and advance */ 15621 *s_soff = soff; 15622 *s_mb = rack->r_ctl.fsb.m; 15623 n = rack_fo_base_copym(m, soff, plen, 15624 &rack->r_ctl.fsb, 15625 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15626 return (n); 15627 } 15628 15629 static int 15630 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15631 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15632 { 15633 /* 15634 * Enter the fast retransmit path. We are given that a sched_pin is 15635 * in place (if accounting is compliled in) and the cycle count taken 15636 * at the entry is in the ts_val. The concept her is that the rsm 15637 * now holds the mbuf offsets and such so we can directly transmit 15638 * without a lot of overhead, the len field is already set for 15639 * us to prohibit us from sending too much (usually its 1MSS). 15640 */ 15641 struct ip *ip = NULL; 15642 struct udphdr *udp = NULL; 15643 struct tcphdr *th = NULL; 15644 struct mbuf *m = NULL; 15645 struct inpcb *inp; 15646 uint8_t *cpto; 15647 struct tcp_log_buffer *lgb; 15648 #ifdef TCP_ACCOUNTING 15649 uint64_t crtsc; 15650 int cnt_thru = 1; 15651 #endif 15652 struct tcpopt to; 15653 u_char opt[TCP_MAXOLEN]; 15654 uint32_t hdrlen, optlen; 15655 int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0; 15656 uint32_t us_cts; 15657 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15658 uint32_t if_hw_tsomaxsegsize; 15659 15660 #ifdef INET6 15661 struct ip6_hdr *ip6 = NULL; 15662 15663 if (rack->r_is_v6) { 15664 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15665 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15666 } else 15667 #endif /* INET6 */ 15668 { 15669 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15670 hdrlen = sizeof(struct tcpiphdr); 15671 } 15672 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15673 goto failed; 15674 } 15675 if (doing_tlp) { 15676 /* Its a TLP add the flag, it may already be there but be sure */ 15677 rsm->r_flags |= RACK_TLP; 15678 } else { 15679 /* If it was a TLP it is not not on this retransmit */ 15680 rsm->r_flags &= ~RACK_TLP; 15681 } 15682 startseq = rsm->r_start; 15683 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15684 inp = rack->rc_inp; 15685 to.to_flags = 0; 15686 flags = tcp_outflags[tp->t_state]; 15687 if (flags & (TH_SYN|TH_RST)) { 15688 goto failed; 15689 } 15690 if (rsm->r_flags & RACK_HAS_FIN) { 15691 /* We can't send a FIN here */ 15692 goto failed; 15693 } 15694 if (flags & TH_FIN) { 15695 /* We never send a FIN */ 15696 flags &= ~TH_FIN; 15697 } 15698 if (tp->t_flags & TF_RCVD_TSTMP) { 15699 to.to_tsval = ms_cts + tp->ts_offset; 15700 to.to_tsecr = tp->ts_recent; 15701 to.to_flags = TOF_TS; 15702 } 15703 optlen = tcp_addoptions(&to, opt); 15704 hdrlen += optlen; 15705 udp = rack->r_ctl.fsb.udp; 15706 if (udp) 15707 hdrlen += sizeof(struct udphdr); 15708 if (rack->r_ctl.rc_pace_max_segs) 15709 max_val = rack->r_ctl.rc_pace_max_segs; 15710 else if (rack->rc_user_set_max_segs) 15711 max_val = rack->rc_user_set_max_segs * segsiz; 15712 else 15713 max_val = len; 15714 if ((tp->t_flags & TF_TSO) && 15715 V_tcp_do_tso && 15716 (len > segsiz) && 15717 (tp->t_port == 0)) 15718 tso = 1; 15719 #ifdef INET6 15720 if (MHLEN < hdrlen + max_linkhdr) 15721 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15722 else 15723 #endif 15724 m = m_gethdr(M_NOWAIT, MT_DATA); 15725 if (m == NULL) 15726 goto failed; 15727 m->m_data += max_linkhdr; 15728 m->m_len = hdrlen; 15729 th = rack->r_ctl.fsb.th; 15730 /* Establish the len to send */ 15731 if (len > max_val) 15732 len = max_val; 15733 if ((tso) && (len + optlen > tp->t_maxseg)) { 15734 uint32_t if_hw_tsomax; 15735 int32_t max_len; 15736 15737 /* extract TSO information */ 15738 if_hw_tsomax = tp->t_tsomax; 15739 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15740 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15741 /* 15742 * Check if we should limit by maximum payload 15743 * length: 15744 */ 15745 if (if_hw_tsomax != 0) { 15746 /* compute maximum TSO length */ 15747 max_len = (if_hw_tsomax - hdrlen - 15748 max_linkhdr); 15749 if (max_len <= 0) { 15750 goto failed; 15751 } else if (len > max_len) { 15752 len = max_len; 15753 } 15754 } 15755 if (len <= segsiz) { 15756 /* 15757 * In case there are too many small fragments don't 15758 * use TSO: 15759 */ 15760 tso = 0; 15761 } 15762 } else { 15763 tso = 0; 15764 } 15765 if ((tso == 0) && (len > segsiz)) 15766 len = segsiz; 15767 us_cts = tcp_get_usecs(tv); 15768 if ((len == 0) || 15769 (len <= MHLEN - hdrlen - max_linkhdr)) { 15770 goto failed; 15771 } 15772 th->th_seq = htonl(rsm->r_start); 15773 th->th_ack = htonl(tp->rcv_nxt); 15774 /* 15775 * The PUSH bit should only be applied 15776 * if the full retransmission is made. If 15777 * we are sending less than this is the 15778 * left hand edge and should not have 15779 * the PUSH bit. 15780 */ 15781 if ((rsm->r_flags & RACK_HAD_PUSH) && 15782 (len == (rsm->r_end - rsm->r_start))) 15783 flags |= TH_PUSH; 15784 th->th_flags = flags; 15785 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15786 if (th->th_win == 0) { 15787 tp->t_sndzerowin++; 15788 tp->t_flags |= TF_RXWIN0SENT; 15789 } else 15790 tp->t_flags &= ~TF_RXWIN0SENT; 15791 if (rsm->r_flags & RACK_TLP) { 15792 /* 15793 * TLP should not count in retran count, but 15794 * in its own bin 15795 */ 15796 counter_u64_add(rack_tlp_retran, 1); 15797 counter_u64_add(rack_tlp_retran_bytes, len); 15798 } else { 15799 tp->t_sndrexmitpack++; 15800 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15801 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15802 } 15803 #ifdef STATS 15804 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15805 len); 15806 #endif 15807 if (rsm->m == NULL) 15808 goto failed; 15809 if (rsm->orig_m_len != rsm->m->m_len) { 15810 /* Fix up the orig_m_len and possibly the mbuf offset */ 15811 rack_adjust_orig_mlen(rsm); 15812 } 15813 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15814 if (len <= segsiz) { 15815 /* 15816 * Must have ran out of mbufs for the copy 15817 * shorten it to no longer need tso. Lets 15818 * not put on sendalot since we are low on 15819 * mbufs. 15820 */ 15821 tso = 0; 15822 } 15823 if ((m->m_next == NULL) || (len <= 0)){ 15824 goto failed; 15825 } 15826 if (udp) { 15827 if (rack->r_is_v6) 15828 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15829 else 15830 ulen = hdrlen + len - sizeof(struct ip); 15831 udp->uh_ulen = htons(ulen); 15832 } 15833 m->m_pkthdr.rcvif = (struct ifnet *)0; 15834 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15835 #ifdef INET6 15836 if (rack->r_is_v6) { 15837 if (tp->t_port) { 15838 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15839 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15840 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15841 th->th_sum = htons(0); 15842 UDPSTAT_INC(udps_opackets); 15843 } else { 15844 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15845 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15846 th->th_sum = in6_cksum_pseudo(ip6, 15847 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15848 0); 15849 } 15850 } 15851 #endif 15852 #if defined(INET6) && defined(INET) 15853 else 15854 #endif 15855 #ifdef INET 15856 { 15857 if (tp->t_port) { 15858 m->m_pkthdr.csum_flags = CSUM_UDP; 15859 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15860 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15861 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15862 th->th_sum = htons(0); 15863 UDPSTAT_INC(udps_opackets); 15864 } else { 15865 m->m_pkthdr.csum_flags = CSUM_TCP; 15866 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15867 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15868 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15869 IPPROTO_TCP + len + optlen)); 15870 } 15871 /* IP version must be set here for ipv4/ipv6 checking later */ 15872 KASSERT(ip->ip_v == IPVERSION, 15873 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15874 } 15875 #endif 15876 if (tso) { 15877 KASSERT(len > tp->t_maxseg - optlen, 15878 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15879 m->m_pkthdr.csum_flags |= CSUM_TSO; 15880 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15881 } 15882 #ifdef INET6 15883 if (rack->r_is_v6) { 15884 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15885 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15886 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15887 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15888 else 15889 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15890 } 15891 #endif 15892 #if defined(INET) && defined(INET6) 15893 else 15894 #endif 15895 #ifdef INET 15896 { 15897 ip->ip_len = htons(m->m_pkthdr.len); 15898 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15899 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15900 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15901 if (tp->t_port == 0 || len < V_tcp_minmss) { 15902 ip->ip_off |= htons(IP_DF); 15903 } 15904 } else { 15905 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15906 } 15907 } 15908 #endif 15909 /* Time to copy in our header */ 15910 cpto = mtod(m, uint8_t *); 15911 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15912 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15913 if (optlen) { 15914 bcopy(opt, th + 1, optlen); 15915 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15916 } else { 15917 th->th_off = sizeof(struct tcphdr) >> 2; 15918 } 15919 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15920 union tcp_log_stackspecific log; 15921 15922 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15923 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 15924 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 15925 if (rack->rack_no_prr) 15926 log.u_bbr.flex1 = 0; 15927 else 15928 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15929 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15930 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15931 log.u_bbr.flex4 = max_val; 15932 log.u_bbr.flex5 = 0; 15933 /* Save off the early/late values */ 15934 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15935 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15936 log.u_bbr.bw_inuse = rack_get_bw(rack); 15937 if (doing_tlp == 0) 15938 log.u_bbr.flex8 = 1; 15939 else 15940 log.u_bbr.flex8 = 2; 15941 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15942 log.u_bbr.flex7 = 55; 15943 log.u_bbr.pkts_out = tp->t_maxseg; 15944 log.u_bbr.timeStamp = cts; 15945 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15946 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15947 log.u_bbr.delivered = 0; 15948 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15949 len, &log, false, NULL, NULL, 0, tv); 15950 } else 15951 lgb = NULL; 15952 #ifdef INET6 15953 if (rack->r_is_v6) { 15954 error = ip6_output(m, NULL, 15955 &inp->inp_route6, 15956 0, NULL, NULL, inp); 15957 } 15958 #endif 15959 #if defined(INET) && defined(INET6) 15960 else 15961 #endif 15962 #ifdef INET 15963 { 15964 error = ip_output(m, NULL, 15965 &inp->inp_route, 15966 0, 0, inp); 15967 } 15968 #endif 15969 m = NULL; 15970 if (lgb) { 15971 lgb->tlb_errno = error; 15972 lgb = NULL; 15973 } 15974 if (error) { 15975 goto failed; 15976 } 15977 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15978 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15979 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15980 rack->rc_tlp_in_progress = 1; 15981 rack->r_ctl.rc_tlp_cnt_out++; 15982 } 15983 if (error == 0) { 15984 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15985 if (doing_tlp) { 15986 rack->rc_last_sent_tlp_past_cumack = 0; 15987 rack->rc_last_sent_tlp_seq_valid = 1; 15988 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 15989 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 15990 } 15991 } 15992 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15993 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15994 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15995 rack->r_ctl.retran_during_recovery += len; 15996 { 15997 int idx; 15998 15999 idx = (len / segsiz) + 3; 16000 if (idx >= TCP_MSS_ACCT_ATIMER) 16001 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16002 else 16003 counter_u64_add(rack_out_size[idx], 1); 16004 } 16005 if (tp->t_rtttime == 0) { 16006 tp->t_rtttime = ticks; 16007 tp->t_rtseq = startseq; 16008 KMOD_TCPSTAT_INC(tcps_segstimed); 16009 } 16010 counter_u64_add(rack_fto_rsm_send, 1); 16011 if (error && (error == ENOBUFS)) { 16012 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 16013 if (rack->rc_enobuf < 0x7f) 16014 rack->rc_enobuf++; 16015 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16016 slot = 10 * HPTS_USEC_IN_MSEC; 16017 } else 16018 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16019 if ((slot == 0) || 16020 (rack->rc_always_pace == 0) || 16021 (rack->r_rr_config == 1)) { 16022 /* 16023 * We have no pacing set or we 16024 * are using old-style rack or 16025 * we are overriden to use the old 1ms pacing. 16026 */ 16027 slot = rack->r_ctl.rc_min_to; 16028 } 16029 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16030 if (rack->r_must_retran) { 16031 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 16032 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 16033 /* 16034 * We have retransmitted all we need. 16035 */ 16036 rack->r_must_retran = 0; 16037 rack->r_ctl.rc_out_at_rto = 0; 16038 } 16039 } 16040 #ifdef TCP_ACCOUNTING 16041 crtsc = get_cyclecount(); 16042 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16043 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16044 } 16045 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16046 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16047 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16048 } 16049 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16050 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16051 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16052 } 16053 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16054 sched_unpin(); 16055 #endif 16056 return (0); 16057 failed: 16058 if (m) 16059 m_free(m); 16060 return (-1); 16061 } 16062 16063 static void 16064 rack_sndbuf_autoscale(struct tcp_rack *rack) 16065 { 16066 /* 16067 * Automatic sizing of send socket buffer. Often the send buffer 16068 * size is not optimally adjusted to the actual network conditions 16069 * at hand (delay bandwidth product). Setting the buffer size too 16070 * small limits throughput on links with high bandwidth and high 16071 * delay (eg. trans-continental/oceanic links). Setting the 16072 * buffer size too big consumes too much real kernel memory, 16073 * especially with many connections on busy servers. 16074 * 16075 * The criteria to step up the send buffer one notch are: 16076 * 1. receive window of remote host is larger than send buffer 16077 * (with a fudge factor of 5/4th); 16078 * 2. send buffer is filled to 7/8th with data (so we actually 16079 * have data to make use of it); 16080 * 3. send buffer fill has not hit maximal automatic size; 16081 * 4. our send window (slow start and cogestion controlled) is 16082 * larger than sent but unacknowledged data in send buffer. 16083 * 16084 * Note that the rack version moves things much faster since 16085 * we want to avoid hitting cache lines in the rack_fast_output() 16086 * path so this is called much less often and thus moves 16087 * the SB forward by a percentage. 16088 */ 16089 struct socket *so; 16090 struct tcpcb *tp; 16091 uint32_t sendwin, scaleup; 16092 16093 tp = rack->rc_tp; 16094 so = rack->rc_inp->inp_socket; 16095 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16096 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16097 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16098 sbused(&so->so_snd) >= 16099 (so->so_snd.sb_hiwat / 8 * 7) && 16100 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16101 sendwin >= (sbused(&so->so_snd) - 16102 (tp->snd_nxt - tp->snd_una))) { 16103 if (rack_autosndbuf_inc) 16104 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16105 else 16106 scaleup = V_tcp_autosndbuf_inc; 16107 if (scaleup < V_tcp_autosndbuf_inc) 16108 scaleup = V_tcp_autosndbuf_inc; 16109 scaleup += so->so_snd.sb_hiwat; 16110 if (scaleup > V_tcp_autosndbuf_max) 16111 scaleup = V_tcp_autosndbuf_max; 16112 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 16113 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16114 } 16115 } 16116 } 16117 16118 static int 16119 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16120 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16121 { 16122 /* 16123 * Enter to do fast output. We are given that the sched_pin is 16124 * in place (if accounting is compiled in) and the cycle count taken 16125 * at entry is in place in ts_val. The idea here is that 16126 * we know how many more bytes needs to be sent (presumably either 16127 * during pacing or to fill the cwnd and that was greater than 16128 * the max-burst). We have how much to send and all the info we 16129 * need to just send. 16130 */ 16131 struct ip *ip = NULL; 16132 struct udphdr *udp = NULL; 16133 struct tcphdr *th = NULL; 16134 struct mbuf *m, *s_mb; 16135 struct inpcb *inp; 16136 uint8_t *cpto; 16137 struct tcp_log_buffer *lgb; 16138 #ifdef TCP_ACCOUNTING 16139 uint64_t crtsc; 16140 #endif 16141 struct tcpopt to; 16142 u_char opt[TCP_MAXOLEN]; 16143 uint32_t hdrlen, optlen; 16144 int cnt_thru = 1; 16145 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0; 16146 uint32_t us_cts, s_soff; 16147 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16148 uint32_t if_hw_tsomaxsegsize; 16149 uint16_t add_flag = RACK_SENT_FP; 16150 #ifdef INET6 16151 struct ip6_hdr *ip6 = NULL; 16152 16153 if (rack->r_is_v6) { 16154 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16155 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16156 } else 16157 #endif /* INET6 */ 16158 { 16159 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16160 hdrlen = sizeof(struct tcpiphdr); 16161 } 16162 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16163 m = NULL; 16164 goto failed; 16165 } 16166 startseq = tp->snd_max; 16167 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16168 inp = rack->rc_inp; 16169 len = rack->r_ctl.fsb.left_to_send; 16170 to.to_flags = 0; 16171 flags = rack->r_ctl.fsb.tcp_flags; 16172 if (tp->t_flags & TF_RCVD_TSTMP) { 16173 to.to_tsval = ms_cts + tp->ts_offset; 16174 to.to_tsecr = tp->ts_recent; 16175 to.to_flags = TOF_TS; 16176 } 16177 optlen = tcp_addoptions(&to, opt); 16178 hdrlen += optlen; 16179 udp = rack->r_ctl.fsb.udp; 16180 if (udp) 16181 hdrlen += sizeof(struct udphdr); 16182 if (rack->r_ctl.rc_pace_max_segs) 16183 max_val = rack->r_ctl.rc_pace_max_segs; 16184 else if (rack->rc_user_set_max_segs) 16185 max_val = rack->rc_user_set_max_segs * segsiz; 16186 else 16187 max_val = len; 16188 if ((tp->t_flags & TF_TSO) && 16189 V_tcp_do_tso && 16190 (len > segsiz) && 16191 (tp->t_port == 0)) 16192 tso = 1; 16193 again: 16194 #ifdef INET6 16195 if (MHLEN < hdrlen + max_linkhdr) 16196 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16197 else 16198 #endif 16199 m = m_gethdr(M_NOWAIT, MT_DATA); 16200 if (m == NULL) 16201 goto failed; 16202 m->m_data += max_linkhdr; 16203 m->m_len = hdrlen; 16204 th = rack->r_ctl.fsb.th; 16205 /* Establish the len to send */ 16206 if (len > max_val) 16207 len = max_val; 16208 if ((tso) && (len + optlen > tp->t_maxseg)) { 16209 uint32_t if_hw_tsomax; 16210 int32_t max_len; 16211 16212 /* extract TSO information */ 16213 if_hw_tsomax = tp->t_tsomax; 16214 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16215 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16216 /* 16217 * Check if we should limit by maximum payload 16218 * length: 16219 */ 16220 if (if_hw_tsomax != 0) { 16221 /* compute maximum TSO length */ 16222 max_len = (if_hw_tsomax - hdrlen - 16223 max_linkhdr); 16224 if (max_len <= 0) { 16225 goto failed; 16226 } else if (len > max_len) { 16227 len = max_len; 16228 } 16229 } 16230 if (len <= segsiz) { 16231 /* 16232 * In case there are too many small fragments don't 16233 * use TSO: 16234 */ 16235 tso = 0; 16236 } 16237 } else { 16238 tso = 0; 16239 } 16240 if ((tso == 0) && (len > segsiz)) 16241 len = segsiz; 16242 us_cts = tcp_get_usecs(tv); 16243 if ((len == 0) || 16244 (len <= MHLEN - hdrlen - max_linkhdr)) { 16245 goto failed; 16246 } 16247 sb_offset = tp->snd_max - tp->snd_una; 16248 th->th_seq = htonl(tp->snd_max); 16249 th->th_ack = htonl(tp->rcv_nxt); 16250 th->th_flags = flags; 16251 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16252 if (th->th_win == 0) { 16253 tp->t_sndzerowin++; 16254 tp->t_flags |= TF_RXWIN0SENT; 16255 } else 16256 tp->t_flags &= ~TF_RXWIN0SENT; 16257 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16258 KMOD_TCPSTAT_INC(tcps_sndpack); 16259 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16260 #ifdef STATS 16261 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16262 len); 16263 #endif 16264 if (rack->r_ctl.fsb.m == NULL) 16265 goto failed; 16266 16267 /* s_mb and s_soff are saved for rack_log_output */ 16268 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16269 &s_mb, &s_soff); 16270 if (len <= segsiz) { 16271 /* 16272 * Must have ran out of mbufs for the copy 16273 * shorten it to no longer need tso. Lets 16274 * not put on sendalot since we are low on 16275 * mbufs. 16276 */ 16277 tso = 0; 16278 } 16279 if (rack->r_ctl.fsb.rfo_apply_push && 16280 (len == rack->r_ctl.fsb.left_to_send)) { 16281 th->th_flags |= TH_PUSH; 16282 add_flag |= RACK_HAD_PUSH; 16283 } 16284 if ((m->m_next == NULL) || (len <= 0)){ 16285 goto failed; 16286 } 16287 if (udp) { 16288 if (rack->r_is_v6) 16289 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16290 else 16291 ulen = hdrlen + len - sizeof(struct ip); 16292 udp->uh_ulen = htons(ulen); 16293 } 16294 m->m_pkthdr.rcvif = (struct ifnet *)0; 16295 if (tp->t_state == TCPS_ESTABLISHED && 16296 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16297 /* 16298 * If the peer has ECN, mark data packets with ECN capable 16299 * transmission (ECT). Ignore pure ack packets, 16300 * retransmissions. 16301 */ 16302 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) { 16303 #ifdef INET6 16304 if (rack->r_is_v6) 16305 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 16306 else 16307 #endif 16308 ip->ip_tos |= IPTOS_ECN_ECT0; 16309 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 16310 /* 16311 * Reply with proper ECN notifications. 16312 * Only set CWR on new data segments. 16313 */ 16314 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 16315 flags |= TH_CWR; 16316 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 16317 } 16318 } 16319 if (tp->t_flags2 & TF2_ECN_SND_ECE) 16320 flags |= TH_ECE; 16321 } 16322 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16323 #ifdef INET6 16324 if (rack->r_is_v6) { 16325 if (tp->t_port) { 16326 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16327 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16328 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16329 th->th_sum = htons(0); 16330 UDPSTAT_INC(udps_opackets); 16331 } else { 16332 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16333 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16334 th->th_sum = in6_cksum_pseudo(ip6, 16335 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16336 0); 16337 } 16338 } 16339 #endif 16340 #if defined(INET6) && defined(INET) 16341 else 16342 #endif 16343 #ifdef INET 16344 { 16345 if (tp->t_port) { 16346 m->m_pkthdr.csum_flags = CSUM_UDP; 16347 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16348 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16349 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16350 th->th_sum = htons(0); 16351 UDPSTAT_INC(udps_opackets); 16352 } else { 16353 m->m_pkthdr.csum_flags = CSUM_TCP; 16354 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16355 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16356 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16357 IPPROTO_TCP + len + optlen)); 16358 } 16359 /* IP version must be set here for ipv4/ipv6 checking later */ 16360 KASSERT(ip->ip_v == IPVERSION, 16361 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16362 } 16363 #endif 16364 if (tso) { 16365 KASSERT(len > tp->t_maxseg - optlen, 16366 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16367 m->m_pkthdr.csum_flags |= CSUM_TSO; 16368 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16369 } 16370 #ifdef INET6 16371 if (rack->r_is_v6) { 16372 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16373 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16374 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16375 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16376 else 16377 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16378 } 16379 #endif 16380 #if defined(INET) && defined(INET6) 16381 else 16382 #endif 16383 #ifdef INET 16384 { 16385 ip->ip_len = htons(m->m_pkthdr.len); 16386 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16387 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16388 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16389 if (tp->t_port == 0 || len < V_tcp_minmss) { 16390 ip->ip_off |= htons(IP_DF); 16391 } 16392 } else { 16393 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16394 } 16395 } 16396 #endif 16397 /* Time to copy in our header */ 16398 cpto = mtod(m, uint8_t *); 16399 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16400 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16401 if (optlen) { 16402 bcopy(opt, th + 1, optlen); 16403 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16404 } else { 16405 th->th_off = sizeof(struct tcphdr) >> 2; 16406 } 16407 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16408 union tcp_log_stackspecific log; 16409 16410 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16411 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 16412 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 16413 if (rack->rack_no_prr) 16414 log.u_bbr.flex1 = 0; 16415 else 16416 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16417 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16418 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16419 log.u_bbr.flex4 = max_val; 16420 log.u_bbr.flex5 = 0; 16421 /* Save off the early/late values */ 16422 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16423 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16424 log.u_bbr.bw_inuse = rack_get_bw(rack); 16425 log.u_bbr.flex8 = 0; 16426 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16427 log.u_bbr.flex7 = 44; 16428 log.u_bbr.pkts_out = tp->t_maxseg; 16429 log.u_bbr.timeStamp = cts; 16430 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16431 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16432 log.u_bbr.delivered = 0; 16433 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16434 len, &log, false, NULL, NULL, 0, tv); 16435 } else 16436 lgb = NULL; 16437 #ifdef INET6 16438 if (rack->r_is_v6) { 16439 error = ip6_output(m, NULL, 16440 &inp->inp_route6, 16441 0, NULL, NULL, inp); 16442 } 16443 #endif 16444 #if defined(INET) && defined(INET6) 16445 else 16446 #endif 16447 #ifdef INET 16448 { 16449 error = ip_output(m, NULL, 16450 &inp->inp_route, 16451 0, 0, inp); 16452 } 16453 #endif 16454 if (lgb) { 16455 lgb->tlb_errno = error; 16456 lgb = NULL; 16457 } 16458 if (error) { 16459 *send_err = error; 16460 m = NULL; 16461 goto failed; 16462 } 16463 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16464 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16465 m = NULL; 16466 if (tp->snd_una == tp->snd_max) { 16467 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16468 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16469 tp->t_acktime = ticks; 16470 } 16471 if (error == 0) 16472 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16473 16474 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16475 tot_len += len; 16476 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16477 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16478 tp->snd_max += len; 16479 tp->snd_nxt = tp->snd_max; 16480 { 16481 int idx; 16482 16483 idx = (len / segsiz) + 3; 16484 if (idx >= TCP_MSS_ACCT_ATIMER) 16485 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16486 else 16487 counter_u64_add(rack_out_size[idx], 1); 16488 } 16489 if (len <= rack->r_ctl.fsb.left_to_send) 16490 rack->r_ctl.fsb.left_to_send -= len; 16491 else 16492 rack->r_ctl.fsb.left_to_send = 0; 16493 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16494 rack->r_fast_output = 0; 16495 rack->r_ctl.fsb.left_to_send = 0; 16496 /* At the end of fast_output scale up the sb */ 16497 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16498 rack_sndbuf_autoscale(rack); 16499 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16500 } 16501 if (tp->t_rtttime == 0) { 16502 tp->t_rtttime = ticks; 16503 tp->t_rtseq = startseq; 16504 KMOD_TCPSTAT_INC(tcps_segstimed); 16505 } 16506 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16507 (max_val > len) && 16508 (tso == 0)) { 16509 max_val -= len; 16510 len = segsiz; 16511 th = rack->r_ctl.fsb.th; 16512 cnt_thru++; 16513 goto again; 16514 } 16515 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16516 counter_u64_add(rack_fto_send, 1); 16517 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16518 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16519 #ifdef TCP_ACCOUNTING 16520 crtsc = get_cyclecount(); 16521 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16522 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16523 } 16524 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16525 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16526 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16527 } 16528 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16529 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16530 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16531 } 16532 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16533 sched_unpin(); 16534 #endif 16535 return (0); 16536 failed: 16537 if (m) 16538 m_free(m); 16539 rack->r_fast_output = 0; 16540 return (-1); 16541 } 16542 16543 static int 16544 rack_output(struct tcpcb *tp) 16545 { 16546 struct socket *so; 16547 uint32_t recwin; 16548 uint32_t sb_offset, s_moff = 0; 16549 int32_t len, flags, error = 0; 16550 struct mbuf *m, *s_mb = NULL; 16551 struct mbuf *mb; 16552 uint32_t if_hw_tsomaxsegcount = 0; 16553 uint32_t if_hw_tsomaxsegsize; 16554 int32_t segsiz, minseg; 16555 long tot_len_this_send = 0; 16556 #ifdef INET 16557 struct ip *ip = NULL; 16558 #endif 16559 #ifdef TCPDEBUG 16560 struct ipovly *ipov = NULL; 16561 #endif 16562 struct udphdr *udp = NULL; 16563 struct tcp_rack *rack; 16564 struct tcphdr *th; 16565 uint8_t pass = 0; 16566 uint8_t mark = 0; 16567 uint8_t wanted_cookie = 0; 16568 u_char opt[TCP_MAXOLEN]; 16569 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16570 uint32_t rack_seq; 16571 16572 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16573 unsigned ipsec_optlen = 0; 16574 16575 #endif 16576 int32_t idle, sendalot; 16577 int32_t sub_from_prr = 0; 16578 volatile int32_t sack_rxmit; 16579 struct rack_sendmap *rsm = NULL; 16580 int32_t tso, mtu; 16581 struct tcpopt to; 16582 int32_t slot = 0; 16583 int32_t sup_rack = 0; 16584 uint32_t cts, ms_cts, delayed, early; 16585 uint16_t add_flag = RACK_SENT_SP; 16586 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16587 uint8_t hpts_calling, doing_tlp = 0; 16588 uint32_t cwnd_to_use, pace_max_seg; 16589 int32_t do_a_prefetch = 0; 16590 int32_t prefetch_rsm = 0; 16591 int32_t orig_len = 0; 16592 struct timeval tv; 16593 int32_t prefetch_so_done = 0; 16594 struct tcp_log_buffer *lgb; 16595 struct inpcb *inp; 16596 struct sockbuf *sb; 16597 uint64_t ts_val = 0; 16598 #ifdef TCP_ACCOUNTING 16599 uint64_t crtsc; 16600 #endif 16601 #ifdef INET6 16602 struct ip6_hdr *ip6 = NULL; 16603 int32_t isipv6; 16604 #endif 16605 uint8_t filled_all = 0; 16606 bool hw_tls = false; 16607 16608 /* setup and take the cache hits here */ 16609 rack = (struct tcp_rack *)tp->t_fb_ptr; 16610 #ifdef TCP_ACCOUNTING 16611 sched_pin(); 16612 ts_val = get_cyclecount(); 16613 #endif 16614 hpts_calling = rack->rc_inp->inp_hpts_calls; 16615 NET_EPOCH_ASSERT(); 16616 INP_WLOCK_ASSERT(rack->rc_inp); 16617 #ifdef TCP_OFFLOAD 16618 if (tp->t_flags & TF_TOE) { 16619 #ifdef TCP_ACCOUNTING 16620 sched_unpin(); 16621 #endif 16622 return (tcp_offload_output(tp)); 16623 } 16624 #endif 16625 /* 16626 * For TFO connections in SYN_RECEIVED, only allow the initial 16627 * SYN|ACK and those sent by the retransmit timer. 16628 */ 16629 if (IS_FASTOPEN(tp->t_flags) && 16630 (tp->t_state == TCPS_SYN_RECEIVED) && 16631 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16632 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16633 #ifdef TCP_ACCOUNTING 16634 sched_unpin(); 16635 #endif 16636 return (0); 16637 } 16638 #ifdef INET6 16639 if (rack->r_state) { 16640 /* Use the cache line loaded if possible */ 16641 isipv6 = rack->r_is_v6; 16642 } else { 16643 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16644 } 16645 #endif 16646 early = 0; 16647 cts = tcp_get_usecs(&tv); 16648 ms_cts = tcp_tv_to_mssectick(&tv); 16649 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16650 rack->rc_inp->inp_in_hpts) { 16651 /* 16652 * We are on the hpts for some timer but not hptsi output. 16653 * Remove from the hpts unconditionally. 16654 */ 16655 rack_timer_cancel(tp, rack, cts, __LINE__); 16656 } 16657 /* Are we pacing and late? */ 16658 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16659 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16660 /* We are delayed */ 16661 delayed = cts - rack->r_ctl.rc_last_output_to; 16662 } else { 16663 delayed = 0; 16664 } 16665 /* Do the timers, which may override the pacer */ 16666 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16667 if (rack_process_timers(tp, rack, cts, hpts_calling, &doing_tlp)) { 16668 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16669 #ifdef TCP_ACCOUNTING 16670 sched_unpin(); 16671 #endif 16672 return (0); 16673 } 16674 } 16675 if (rack->rc_in_persist) { 16676 if (rack->rc_inp->inp_in_hpts == 0) { 16677 /* Timer is not running */ 16678 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16679 } 16680 #ifdef TCP_ACCOUNTING 16681 sched_unpin(); 16682 #endif 16683 return (0); 16684 } 16685 if ((rack->r_timer_override) || 16686 (rack->rc_ack_can_sendout_data) || 16687 (delayed) || 16688 (tp->t_state < TCPS_ESTABLISHED)) { 16689 rack->rc_ack_can_sendout_data = 0; 16690 if (rack->rc_inp->inp_in_hpts) 16691 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 16692 } else if (rack->rc_inp->inp_in_hpts) { 16693 /* 16694 * On the hpts you can't pass even if ACKNOW is on, we will 16695 * when the hpts fires. 16696 */ 16697 #ifdef TCP_ACCOUNTING 16698 crtsc = get_cyclecount(); 16699 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16700 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16701 } 16702 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16703 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16704 tp->tcp_cnt_counters[SND_BLOCKED]++; 16705 } 16706 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16707 sched_unpin(); 16708 #endif 16709 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16710 return (0); 16711 } 16712 rack->rc_inp->inp_hpts_calls = 0; 16713 /* Finish out both pacing early and late accounting */ 16714 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16715 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16716 early = rack->r_ctl.rc_last_output_to - cts; 16717 } else 16718 early = 0; 16719 if (delayed) { 16720 rack->r_ctl.rc_agg_delayed += delayed; 16721 rack->r_late = 1; 16722 } else if (early) { 16723 rack->r_ctl.rc_agg_early += early; 16724 rack->r_early = 1; 16725 } 16726 /* Now that early/late accounting is done turn off the flag */ 16727 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16728 rack->r_wanted_output = 0; 16729 rack->r_timer_override = 0; 16730 if ((tp->t_state != rack->r_state) && 16731 TCPS_HAVEESTABLISHED(tp->t_state)) { 16732 rack_set_state(tp, rack); 16733 } 16734 if ((rack->r_fast_output) && 16735 (doing_tlp == 0) && 16736 (tp->rcv_numsacks == 0)) { 16737 int ret; 16738 16739 error = 0; 16740 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16741 if (ret >= 0) 16742 return(ret); 16743 else if (error) { 16744 inp = rack->rc_inp; 16745 so = inp->inp_socket; 16746 sb = &so->so_snd; 16747 goto nomore; 16748 } 16749 } 16750 inp = rack->rc_inp; 16751 /* 16752 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16753 * only allow the initial SYN or SYN|ACK and those sent 16754 * by the retransmit timer. 16755 */ 16756 if (IS_FASTOPEN(tp->t_flags) && 16757 ((tp->t_state == TCPS_SYN_RECEIVED) || 16758 (tp->t_state == TCPS_SYN_SENT)) && 16759 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16760 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16761 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16762 so = inp->inp_socket; 16763 sb = &so->so_snd; 16764 goto just_return_nolock; 16765 } 16766 /* 16767 * Determine length of data that should be transmitted, and flags 16768 * that will be used. If there is some data or critical controls 16769 * (SYN, RST) to send, then transmit; otherwise, investigate 16770 * further. 16771 */ 16772 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16773 if (tp->t_idle_reduce) { 16774 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16775 rack_cc_after_idle(rack, tp); 16776 } 16777 tp->t_flags &= ~TF_LASTIDLE; 16778 if (idle) { 16779 if (tp->t_flags & TF_MORETOCOME) { 16780 tp->t_flags |= TF_LASTIDLE; 16781 idle = 0; 16782 } 16783 } 16784 if ((tp->snd_una == tp->snd_max) && 16785 rack->r_ctl.rc_went_idle_time && 16786 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16787 idle = cts - rack->r_ctl.rc_went_idle_time; 16788 if (idle > rack_min_probertt_hold) { 16789 /* Count as a probe rtt */ 16790 if (rack->in_probe_rtt == 0) { 16791 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16792 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16793 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16794 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16795 } else { 16796 rack_exit_probertt(rack, cts); 16797 } 16798 } 16799 idle = 0; 16800 } 16801 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16802 rack_init_fsb_block(tp, rack); 16803 again: 16804 /* 16805 * If we've recently taken a timeout, snd_max will be greater than 16806 * snd_nxt. There may be SACK information that allows us to avoid 16807 * resending already delivered data. Adjust snd_nxt accordingly. 16808 */ 16809 sendalot = 0; 16810 cts = tcp_get_usecs(&tv); 16811 ms_cts = tcp_tv_to_mssectick(&tv); 16812 tso = 0; 16813 mtu = 0; 16814 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16815 minseg = segsiz; 16816 if (rack->r_ctl.rc_pace_max_segs == 0) 16817 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16818 else 16819 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16820 sb_offset = tp->snd_max - tp->snd_una; 16821 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16822 flags = tcp_outflags[tp->t_state]; 16823 while (rack->rc_free_cnt < rack_free_cache) { 16824 rsm = rack_alloc(rack); 16825 if (rsm == NULL) { 16826 if (inp->inp_hpts_calls) 16827 /* Retry in a ms */ 16828 slot = (1 * HPTS_USEC_IN_MSEC); 16829 so = inp->inp_socket; 16830 sb = &so->so_snd; 16831 goto just_return_nolock; 16832 } 16833 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16834 rack->rc_free_cnt++; 16835 rsm = NULL; 16836 } 16837 if (inp->inp_hpts_calls) 16838 inp->inp_hpts_calls = 0; 16839 sack_rxmit = 0; 16840 len = 0; 16841 rsm = NULL; 16842 if (flags & TH_RST) { 16843 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16844 so = inp->inp_socket; 16845 sb = &so->so_snd; 16846 goto send; 16847 } 16848 if (rack->r_ctl.rc_resend) { 16849 /* Retransmit timer */ 16850 rsm = rack->r_ctl.rc_resend; 16851 rack->r_ctl.rc_resend = NULL; 16852 len = rsm->r_end - rsm->r_start; 16853 sack_rxmit = 1; 16854 sendalot = 0; 16855 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16856 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16857 __func__, __LINE__, 16858 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16859 sb_offset = rsm->r_start - tp->snd_una; 16860 if (len >= segsiz) 16861 len = segsiz; 16862 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16863 /* We have a retransmit that takes precedence */ 16864 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16865 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16866 /* Enter recovery if not induced by a time-out */ 16867 rack->r_ctl.rc_rsm_start = rsm->r_start; 16868 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 16869 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 16870 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 16871 } 16872 #ifdef INVARIANTS 16873 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16874 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16875 tp, rack, rsm, rsm->r_start, tp->snd_una); 16876 } 16877 #endif 16878 len = rsm->r_end - rsm->r_start; 16879 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16880 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16881 __func__, __LINE__, 16882 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16883 sb_offset = rsm->r_start - tp->snd_una; 16884 sendalot = 0; 16885 if (len >= segsiz) 16886 len = segsiz; 16887 if (len > 0) { 16888 sack_rxmit = 1; 16889 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16890 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16891 min(len, segsiz)); 16892 counter_u64_add(rack_rtm_prr_retran, 1); 16893 } 16894 } else if (rack->r_ctl.rc_tlpsend) { 16895 /* Tail loss probe */ 16896 long cwin; 16897 long tlen; 16898 16899 /* 16900 * Check if we can do a TLP with a RACK'd packet 16901 * this can happen if we are not doing the rack 16902 * cheat and we skipped to a TLP and it 16903 * went off. 16904 */ 16905 rsm = rack->r_ctl.rc_tlpsend; 16906 /* We are doing a TLP make sure the flag is preent */ 16907 rsm->r_flags |= RACK_TLP; 16908 rack->r_ctl.rc_tlpsend = NULL; 16909 sack_rxmit = 1; 16910 tlen = rsm->r_end - rsm->r_start; 16911 if (tlen > segsiz) 16912 tlen = segsiz; 16913 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16914 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16915 __func__, __LINE__, 16916 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16917 sb_offset = rsm->r_start - tp->snd_una; 16918 cwin = min(tp->snd_wnd, tlen); 16919 len = cwin; 16920 } 16921 if (rack->r_must_retran && 16922 (rsm == NULL)) { 16923 /* 16924 * Non-Sack and we had a RTO or MTU change, we 16925 * need to retransmit until we reach 16926 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 16927 */ 16928 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 16929 int sendwin, flight; 16930 16931 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 16932 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 16933 if (flight >= sendwin) { 16934 so = inp->inp_socket; 16935 sb = &so->so_snd; 16936 goto just_return_nolock; 16937 } 16938 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16939 KASSERT(rsm != NULL, ("rsm is NULL rack:%p r_must_retran set", rack)); 16940 if (rsm == NULL) { 16941 /* TSNH */ 16942 rack->r_must_retran = 0; 16943 rack->r_ctl.rc_out_at_rto = 0; 16944 rack->r_must_retran = 0; 16945 so = inp->inp_socket; 16946 sb = &so->so_snd; 16947 goto just_return_nolock; 16948 } 16949 sack_rxmit = 1; 16950 len = rsm->r_end - rsm->r_start; 16951 sendalot = 0; 16952 sb_offset = rsm->r_start - tp->snd_una; 16953 if (len >= segsiz) 16954 len = segsiz; 16955 } else { 16956 /* We must be done if there is nothing outstanding */ 16957 rack->r_must_retran = 0; 16958 rack->r_ctl.rc_out_at_rto = 0; 16959 } 16960 } 16961 /* 16962 * Enforce a connection sendmap count limit if set 16963 * as long as we are not retransmiting. 16964 */ 16965 if ((rsm == NULL) && 16966 (rack->do_detection == 0) && 16967 (V_tcp_map_entries_limit > 0) && 16968 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 16969 counter_u64_add(rack_to_alloc_limited, 1); 16970 if (!rack->alloc_limit_reported) { 16971 rack->alloc_limit_reported = 1; 16972 counter_u64_add(rack_alloc_limited_conns, 1); 16973 } 16974 so = inp->inp_socket; 16975 sb = &so->so_snd; 16976 goto just_return_nolock; 16977 } 16978 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 16979 /* we are retransmitting the fin */ 16980 len--; 16981 if (len) { 16982 /* 16983 * When retransmitting data do *not* include the 16984 * FIN. This could happen from a TLP probe. 16985 */ 16986 flags &= ~TH_FIN; 16987 } 16988 } 16989 #ifdef INVARIANTS 16990 /* For debugging */ 16991 rack->r_ctl.rc_rsm_at_retran = rsm; 16992 #endif 16993 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 16994 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 16995 int ret; 16996 16997 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 16998 if (ret == 0) 16999 return (0); 17000 } 17001 so = inp->inp_socket; 17002 sb = &so->so_snd; 17003 if (do_a_prefetch == 0) { 17004 kern_prefetch(sb, &do_a_prefetch); 17005 do_a_prefetch = 1; 17006 } 17007 #ifdef NETFLIX_SHARED_CWND 17008 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17009 rack->rack_enable_scwnd) { 17010 /* We are doing cwnd sharing */ 17011 if (rack->gp_ready && 17012 (rack->rack_attempted_scwnd == 0) && 17013 (rack->r_ctl.rc_scw == NULL) && 17014 tp->t_lib) { 17015 /* The pcbid is in, lets make an attempt */ 17016 counter_u64_add(rack_try_scwnd, 1); 17017 rack->rack_attempted_scwnd = 1; 17018 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17019 &rack->r_ctl.rc_scw_index, 17020 segsiz); 17021 } 17022 if (rack->r_ctl.rc_scw && 17023 (rack->rack_scwnd_is_idle == 1) && 17024 sbavail(&so->so_snd)) { 17025 /* we are no longer out of data */ 17026 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17027 rack->rack_scwnd_is_idle = 0; 17028 } 17029 if (rack->r_ctl.rc_scw) { 17030 /* First lets update and get the cwnd */ 17031 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17032 rack->r_ctl.rc_scw_index, 17033 tp->snd_cwnd, tp->snd_wnd, segsiz); 17034 } 17035 } 17036 #endif 17037 /* 17038 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17039 * state flags. 17040 */ 17041 if (tp->t_flags & TF_NEEDFIN) 17042 flags |= TH_FIN; 17043 if (tp->t_flags & TF_NEEDSYN) 17044 flags |= TH_SYN; 17045 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17046 void *end_rsm; 17047 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17048 if (end_rsm) 17049 kern_prefetch(end_rsm, &prefetch_rsm); 17050 prefetch_rsm = 1; 17051 } 17052 SOCKBUF_LOCK(sb); 17053 /* 17054 * If snd_nxt == snd_max and we have transmitted a FIN, the 17055 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17056 * negative length. This can also occur when TCP opens up its 17057 * congestion window while receiving additional duplicate acks after 17058 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17059 * the fast-retransmit. 17060 * 17061 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17062 * set to snd_una, the sb_offset will be 0, and the length may wind 17063 * up 0. 17064 * 17065 * If sack_rxmit is true we are retransmitting from the scoreboard 17066 * in which case len is already set. 17067 */ 17068 if ((sack_rxmit == 0) && 17069 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17070 uint32_t avail; 17071 17072 avail = sbavail(sb); 17073 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17074 sb_offset = tp->snd_nxt - tp->snd_una; 17075 else 17076 sb_offset = 0; 17077 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17078 if (rack->r_ctl.rc_tlp_new_data) { 17079 /* TLP is forcing out new data */ 17080 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17081 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17082 } 17083 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17084 if (tp->snd_wnd > sb_offset) 17085 len = tp->snd_wnd - sb_offset; 17086 else 17087 len = 0; 17088 } else { 17089 len = rack->r_ctl.rc_tlp_new_data; 17090 } 17091 } else { 17092 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17093 } 17094 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17095 /* 17096 * For prr=off, we need to send only 1 MSS 17097 * at a time. We do this because another sack could 17098 * be arriving that causes us to send retransmits and 17099 * we don't want to be on a long pace due to a larger send 17100 * that keeps us from sending out the retransmit. 17101 */ 17102 len = segsiz; 17103 } 17104 } else { 17105 uint32_t outstanding; 17106 /* 17107 * We are inside of a Fast recovery episode, this 17108 * is caused by a SACK or 3 dup acks. At this point 17109 * we have sent all the retransmissions and we rely 17110 * on PRR to dictate what we will send in the form of 17111 * new data. 17112 */ 17113 17114 outstanding = tp->snd_max - tp->snd_una; 17115 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17116 if (tp->snd_wnd > outstanding) { 17117 len = tp->snd_wnd - outstanding; 17118 /* Check to see if we have the data */ 17119 if ((sb_offset + len) > avail) { 17120 /* It does not all fit */ 17121 if (avail > sb_offset) 17122 len = avail - sb_offset; 17123 else 17124 len = 0; 17125 } 17126 } else { 17127 len = 0; 17128 } 17129 } else if (avail > sb_offset) { 17130 len = avail - sb_offset; 17131 } else { 17132 len = 0; 17133 } 17134 if (len > 0) { 17135 if (len > rack->r_ctl.rc_prr_sndcnt) { 17136 len = rack->r_ctl.rc_prr_sndcnt; 17137 } 17138 if (len > 0) { 17139 sub_from_prr = 1; 17140 counter_u64_add(rack_rtm_prr_newdata, 1); 17141 } 17142 } 17143 if (len > segsiz) { 17144 /* 17145 * We should never send more than a MSS when 17146 * retransmitting or sending new data in prr 17147 * mode unless the override flag is on. Most 17148 * likely the PRR algorithm is not going to 17149 * let us send a lot as well :-) 17150 */ 17151 if (rack->r_ctl.rc_prr_sendalot == 0) { 17152 len = segsiz; 17153 } 17154 } else if (len < segsiz) { 17155 /* 17156 * Do we send any? The idea here is if the 17157 * send empty's the socket buffer we want to 17158 * do it. However if not then lets just wait 17159 * for our prr_sndcnt to get bigger. 17160 */ 17161 long leftinsb; 17162 17163 leftinsb = sbavail(sb) - sb_offset; 17164 if (leftinsb > len) { 17165 /* This send does not empty the sb */ 17166 len = 0; 17167 } 17168 } 17169 } 17170 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17171 /* 17172 * If you have not established 17173 * and are not doing FAST OPEN 17174 * no data please. 17175 */ 17176 if ((sack_rxmit == 0) && 17177 (!IS_FASTOPEN(tp->t_flags))){ 17178 len = 0; 17179 sb_offset = 0; 17180 } 17181 } 17182 if (prefetch_so_done == 0) { 17183 kern_prefetch(so, &prefetch_so_done); 17184 prefetch_so_done = 1; 17185 } 17186 /* 17187 * Lop off SYN bit if it has already been sent. However, if this is 17188 * SYN-SENT state and if segment contains data and if we don't know 17189 * that foreign host supports TAO, suppress sending segment. 17190 */ 17191 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17192 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17193 /* 17194 * When sending additional segments following a TFO SYN|ACK, 17195 * do not include the SYN bit. 17196 */ 17197 if (IS_FASTOPEN(tp->t_flags) && 17198 (tp->t_state == TCPS_SYN_RECEIVED)) 17199 flags &= ~TH_SYN; 17200 } 17201 /* 17202 * Be careful not to send data and/or FIN on SYN segments. This 17203 * measure is needed to prevent interoperability problems with not 17204 * fully conformant TCP implementations. 17205 */ 17206 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17207 len = 0; 17208 flags &= ~TH_FIN; 17209 } 17210 /* 17211 * On TFO sockets, ensure no data is sent in the following cases: 17212 * 17213 * - When retransmitting SYN|ACK on a passively-created socket 17214 * 17215 * - When retransmitting SYN on an actively created socket 17216 * 17217 * - When sending a zero-length cookie (cookie request) on an 17218 * actively created socket 17219 * 17220 * - When the socket is in the CLOSED state (RST is being sent) 17221 */ 17222 if (IS_FASTOPEN(tp->t_flags) && 17223 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17224 ((tp->t_state == TCPS_SYN_SENT) && 17225 (tp->t_tfo_client_cookie_len == 0)) || 17226 (flags & TH_RST))) { 17227 sack_rxmit = 0; 17228 len = 0; 17229 } 17230 /* Without fast-open there should never be data sent on a SYN */ 17231 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17232 tp->snd_nxt = tp->iss; 17233 len = 0; 17234 } 17235 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17236 /* We only send 1 MSS if we have a DSACK block */ 17237 add_flag |= RACK_SENT_W_DSACK; 17238 len = segsiz; 17239 } 17240 orig_len = len; 17241 if (len <= 0) { 17242 /* 17243 * If FIN has been sent but not acked, but we haven't been 17244 * called to retransmit, len will be < 0. Otherwise, window 17245 * shrank after we sent into it. If window shrank to 0, 17246 * cancel pending retransmit, pull snd_nxt back to (closed) 17247 * window, and set the persist timer if it isn't already 17248 * going. If the window didn't close completely, just wait 17249 * for an ACK. 17250 * 17251 * We also do a general check here to ensure that we will 17252 * set the persist timer when we have data to send, but a 17253 * 0-byte window. This makes sure the persist timer is set 17254 * even if the packet hits one of the "goto send" lines 17255 * below. 17256 */ 17257 len = 0; 17258 if ((tp->snd_wnd == 0) && 17259 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17260 (tp->snd_una == tp->snd_max) && 17261 (sb_offset < (int)sbavail(sb))) { 17262 rack_enter_persist(tp, rack, cts); 17263 } 17264 } else if ((rsm == NULL) && 17265 (doing_tlp == 0) && 17266 (len < pace_max_seg)) { 17267 /* 17268 * We are not sending a maximum sized segment for 17269 * some reason. Should we not send anything (think 17270 * sws or persists)? 17271 */ 17272 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17273 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17274 (len < minseg) && 17275 (len < (int)(sbavail(sb) - sb_offset))) { 17276 /* 17277 * Here the rwnd is less than 17278 * the minimum pacing size, this is not a retransmit, 17279 * we are established and 17280 * the send is not the last in the socket buffer 17281 * we send nothing, and we may enter persists 17282 * if nothing is outstanding. 17283 */ 17284 len = 0; 17285 if (tp->snd_max == tp->snd_una) { 17286 /* 17287 * Nothing out we can 17288 * go into persists. 17289 */ 17290 rack_enter_persist(tp, rack, cts); 17291 } 17292 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17293 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17294 (len < (int)(sbavail(sb) - sb_offset)) && 17295 (len < minseg)) { 17296 /* 17297 * Here we are not retransmitting, and 17298 * the cwnd is not so small that we could 17299 * not send at least a min size (rxt timer 17300 * not having gone off), We have 2 segments or 17301 * more already in flight, its not the tail end 17302 * of the socket buffer and the cwnd is blocking 17303 * us from sending out a minimum pacing segment size. 17304 * Lets not send anything. 17305 */ 17306 len = 0; 17307 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17308 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17309 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17310 (len < (int)(sbavail(sb) - sb_offset)) && 17311 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17312 /* 17313 * Here we have a send window but we have 17314 * filled it up and we can't send another pacing segment. 17315 * We also have in flight more than 2 segments 17316 * and we are not completing the sb i.e. we allow 17317 * the last bytes of the sb to go out even if 17318 * its not a full pacing segment. 17319 */ 17320 len = 0; 17321 } else if ((rack->r_ctl.crte != NULL) && 17322 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17323 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17324 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17325 (len < (int)(sbavail(sb) - sb_offset))) { 17326 /* 17327 * Here we are doing hardware pacing, this is not a TLP, 17328 * we are not sending a pace max segment size, there is rwnd 17329 * room to send at least N pace_max_seg, the cwnd is greater 17330 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17331 * more segments in flight and its not the tail of the socket buffer. 17332 * 17333 * We don't want to send instead we need to get more ack's in to 17334 * allow us to send a full pacing segment. Normally, if we are pacing 17335 * about the right speed, we should have finished our pacing 17336 * send as most of the acks have come back if we are at the 17337 * right rate. This is a bit fuzzy since return path delay 17338 * can delay the acks, which is why we want to make sure we 17339 * have cwnd space to have a bit more than a max pace segments in flight. 17340 * 17341 * If we have not gotten our acks back we are pacing at too high a 17342 * rate delaying will not hurt and will bring our GP estimate down by 17343 * injecting the delay. If we don't do this we will send 17344 * 2 MSS out in response to the acks being clocked in which 17345 * defeats the point of hw-pacing (i.e. to help us get 17346 * larger TSO's out). 17347 */ 17348 len = 0; 17349 17350 } 17351 17352 } 17353 /* len will be >= 0 after this point. */ 17354 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17355 rack_sndbuf_autoscale(rack); 17356 /* 17357 * Decide if we can use TCP Segmentation Offloading (if supported by 17358 * hardware). 17359 * 17360 * TSO may only be used if we are in a pure bulk sending state. The 17361 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17362 * options prevent using TSO. With TSO the TCP header is the same 17363 * (except for the sequence number) for all generated packets. This 17364 * makes it impossible to transmit any options which vary per 17365 * generated segment or packet. 17366 * 17367 * IPv4 handling has a clear separation of ip options and ip header 17368 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17369 * the right thing below to provide length of just ip options and thus 17370 * checking for ipoptlen is enough to decide if ip options are present. 17371 */ 17372 ipoptlen = 0; 17373 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17374 /* 17375 * Pre-calculate here as we save another lookup into the darknesses 17376 * of IPsec that way and can actually decide if TSO is ok. 17377 */ 17378 #ifdef INET6 17379 if (isipv6 && IPSEC_ENABLED(ipv6)) 17380 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17381 #ifdef INET 17382 else 17383 #endif 17384 #endif /* INET6 */ 17385 #ifdef INET 17386 if (IPSEC_ENABLED(ipv4)) 17387 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17388 #endif /* INET */ 17389 #endif 17390 17391 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17392 ipoptlen += ipsec_optlen; 17393 #endif 17394 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17395 (tp->t_port == 0) && 17396 ((tp->t_flags & TF_SIGNATURE) == 0) && 17397 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17398 ipoptlen == 0) 17399 tso = 1; 17400 { 17401 uint32_t outstanding; 17402 17403 outstanding = tp->snd_max - tp->snd_una; 17404 if (tp->t_flags & TF_SENTFIN) { 17405 /* 17406 * If we sent a fin, snd_max is 1 higher than 17407 * snd_una 17408 */ 17409 outstanding--; 17410 } 17411 if (sack_rxmit) { 17412 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17413 flags &= ~TH_FIN; 17414 } else { 17415 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17416 sbused(sb))) 17417 flags &= ~TH_FIN; 17418 } 17419 } 17420 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17421 (long)TCP_MAXWIN << tp->rcv_scale); 17422 17423 /* 17424 * Sender silly window avoidance. We transmit under the following 17425 * conditions when len is non-zero: 17426 * 17427 * - We have a full segment (or more with TSO) - This is the last 17428 * buffer in a write()/send() and we are either idle or running 17429 * NODELAY - we've timed out (e.g. persist timer) - we have more 17430 * then 1/2 the maximum send window's worth of data (receiver may be 17431 * limited the window size) - we need to retransmit 17432 */ 17433 if (len) { 17434 if (len >= segsiz) { 17435 goto send; 17436 } 17437 /* 17438 * NOTE! on localhost connections an 'ack' from the remote 17439 * end may occur synchronously with the output and cause us 17440 * to flush a buffer queued with moretocome. XXX 17441 * 17442 */ 17443 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17444 (idle || (tp->t_flags & TF_NODELAY)) && 17445 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17446 (tp->t_flags & TF_NOPUSH) == 0) { 17447 pass = 2; 17448 goto send; 17449 } 17450 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17451 pass = 22; 17452 goto send; 17453 } 17454 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17455 pass = 4; 17456 goto send; 17457 } 17458 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17459 pass = 5; 17460 goto send; 17461 } 17462 if (sack_rxmit) { 17463 pass = 6; 17464 goto send; 17465 } 17466 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17467 (ctf_outstanding(tp) < (segsiz * 2))) { 17468 /* 17469 * We have less than two MSS outstanding (delayed ack) 17470 * and our rwnd will not let us send a full sized 17471 * MSS. Lets go ahead and let this small segment 17472 * out because we want to try to have at least two 17473 * packets inflight to not be caught by delayed ack. 17474 */ 17475 pass = 12; 17476 goto send; 17477 } 17478 } 17479 /* 17480 * Sending of standalone window updates. 17481 * 17482 * Window updates are important when we close our window due to a 17483 * full socket buffer and are opening it again after the application 17484 * reads data from it. Once the window has opened again and the 17485 * remote end starts to send again the ACK clock takes over and 17486 * provides the most current window information. 17487 * 17488 * We must avoid the silly window syndrome whereas every read from 17489 * the receive buffer, no matter how small, causes a window update 17490 * to be sent. We also should avoid sending a flurry of window 17491 * updates when the socket buffer had queued a lot of data and the 17492 * application is doing small reads. 17493 * 17494 * Prevent a flurry of pointless window updates by only sending an 17495 * update when we can increase the advertized window by more than 17496 * 1/4th of the socket buffer capacity. When the buffer is getting 17497 * full or is very small be more aggressive and send an update 17498 * whenever we can increase by two mss sized segments. In all other 17499 * situations the ACK's to new incoming data will carry further 17500 * window increases. 17501 * 17502 * Don't send an independent window update if a delayed ACK is 17503 * pending (it will get piggy-backed on it) or the remote side 17504 * already has done a half-close and won't send more data. Skip 17505 * this if the connection is in T/TCP half-open state. 17506 */ 17507 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17508 !(tp->t_flags & TF_DELACK) && 17509 !TCPS_HAVERCVDFIN(tp->t_state)) { 17510 /* 17511 * "adv" is the amount we could increase the window, taking 17512 * into account that we are limited by TCP_MAXWIN << 17513 * tp->rcv_scale. 17514 */ 17515 int32_t adv; 17516 int oldwin; 17517 17518 adv = recwin; 17519 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17520 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17521 if (adv > oldwin) 17522 adv -= oldwin; 17523 else { 17524 /* We can't increase the window */ 17525 adv = 0; 17526 } 17527 } else 17528 oldwin = 0; 17529 17530 /* 17531 * If the new window size ends up being the same as or less 17532 * than the old size when it is scaled, then don't force 17533 * a window update. 17534 */ 17535 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17536 goto dontupdate; 17537 17538 if (adv >= (int32_t)(2 * segsiz) && 17539 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17540 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17541 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17542 pass = 7; 17543 goto send; 17544 } 17545 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17546 pass = 23; 17547 goto send; 17548 } 17549 } 17550 dontupdate: 17551 17552 /* 17553 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17554 * is also a catch-all for the retransmit timer timeout case. 17555 */ 17556 if (tp->t_flags & TF_ACKNOW) { 17557 pass = 8; 17558 goto send; 17559 } 17560 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17561 pass = 9; 17562 goto send; 17563 } 17564 /* 17565 * If our state indicates that FIN should be sent and we have not 17566 * yet done so, then we need to send. 17567 */ 17568 if ((flags & TH_FIN) && 17569 (tp->snd_nxt == tp->snd_una)) { 17570 pass = 11; 17571 goto send; 17572 } 17573 /* 17574 * No reason to send a segment, just return. 17575 */ 17576 just_return: 17577 SOCKBUF_UNLOCK(sb); 17578 just_return_nolock: 17579 { 17580 int app_limited = CTF_JR_SENT_DATA; 17581 17582 if (tot_len_this_send > 0) { 17583 /* Make sure snd_nxt is up to max */ 17584 rack->r_ctl.fsb.recwin = recwin; 17585 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17586 if ((error == 0) && 17587 rack_use_rfo && 17588 ((flags & (TH_SYN|TH_FIN)) == 0) && 17589 (ipoptlen == 0) && 17590 (tp->snd_nxt == tp->snd_max) && 17591 (tp->rcv_numsacks == 0) && 17592 rack->r_fsb_inited && 17593 TCPS_HAVEESTABLISHED(tp->t_state) && 17594 (rack->r_must_retran == 0) && 17595 ((tp->t_flags & TF_NEEDFIN) == 0) && 17596 (len > 0) && (orig_len > 0) && 17597 (orig_len > len) && 17598 ((orig_len - len) >= segsiz) && 17599 ((optlen == 0) || 17600 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17601 /* We can send at least one more MSS using our fsb */ 17602 17603 rack->r_fast_output = 1; 17604 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17605 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17606 rack->r_ctl.fsb.tcp_flags = flags; 17607 rack->r_ctl.fsb.left_to_send = orig_len - len; 17608 if (hw_tls) 17609 rack->r_ctl.fsb.hw_tls = 1; 17610 else 17611 rack->r_ctl.fsb.hw_tls = 0; 17612 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17613 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17614 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17615 (tp->snd_max - tp->snd_una))); 17616 if (rack->r_ctl.fsb.left_to_send < segsiz) 17617 rack->r_fast_output = 0; 17618 else { 17619 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17620 rack->r_ctl.fsb.rfo_apply_push = 1; 17621 else 17622 rack->r_ctl.fsb.rfo_apply_push = 0; 17623 } 17624 } else 17625 rack->r_fast_output = 0; 17626 17627 17628 rack_log_fsb(rack, tp, so, flags, 17629 ipoptlen, orig_len, len, 0, 17630 1, optlen, __LINE__, 1); 17631 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17632 tp->snd_nxt = tp->snd_max; 17633 } else { 17634 int end_window = 0; 17635 uint32_t seq = tp->gput_ack; 17636 17637 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17638 if (rsm) { 17639 /* 17640 * Mark the last sent that we just-returned (hinting 17641 * that delayed ack may play a role in any rtt measurement). 17642 */ 17643 rsm->r_just_ret = 1; 17644 } 17645 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17646 rack->r_ctl.rc_agg_delayed = 0; 17647 rack->r_early = 0; 17648 rack->r_late = 0; 17649 rack->r_ctl.rc_agg_early = 0; 17650 if ((ctf_outstanding(tp) + 17651 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17652 minseg)) >= tp->snd_wnd) { 17653 /* We are limited by the rwnd */ 17654 app_limited = CTF_JR_RWND_LIMITED; 17655 if (IN_FASTRECOVERY(tp->t_flags)) 17656 rack->r_ctl.rc_prr_sndcnt = 0; 17657 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17658 /* We are limited by whats available -- app limited */ 17659 app_limited = CTF_JR_APP_LIMITED; 17660 if (IN_FASTRECOVERY(tp->t_flags)) 17661 rack->r_ctl.rc_prr_sndcnt = 0; 17662 } else if ((idle == 0) && 17663 ((tp->t_flags & TF_NODELAY) == 0) && 17664 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17665 (len < segsiz)) { 17666 /* 17667 * No delay is not on and the 17668 * user is sending less than 1MSS. This 17669 * brings out SWS avoidance so we 17670 * don't send. Another app-limited case. 17671 */ 17672 app_limited = CTF_JR_APP_LIMITED; 17673 } else if (tp->t_flags & TF_NOPUSH) { 17674 /* 17675 * The user has requested no push of 17676 * the last segment and we are 17677 * at the last segment. Another app 17678 * limited case. 17679 */ 17680 app_limited = CTF_JR_APP_LIMITED; 17681 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17682 /* Its the cwnd */ 17683 app_limited = CTF_JR_CWND_LIMITED; 17684 } else if (IN_FASTRECOVERY(tp->t_flags) && 17685 (rack->rack_no_prr == 0) && 17686 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17687 app_limited = CTF_JR_PRR; 17688 } else { 17689 /* Now why here are we not sending? */ 17690 #ifdef NOW 17691 #ifdef INVARIANTS 17692 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17693 #endif 17694 #endif 17695 app_limited = CTF_JR_ASSESSING; 17696 } 17697 /* 17698 * App limited in some fashion, for our pacing GP 17699 * measurements we don't want any gap (even cwnd). 17700 * Close down the measurement window. 17701 */ 17702 if (rack_cwnd_block_ends_measure && 17703 ((app_limited == CTF_JR_CWND_LIMITED) || 17704 (app_limited == CTF_JR_PRR))) { 17705 /* 17706 * The reason we are not sending is 17707 * the cwnd (or prr). We have been configured 17708 * to end the measurement window in 17709 * this case. 17710 */ 17711 end_window = 1; 17712 } else if (rack_rwnd_block_ends_measure && 17713 (app_limited == CTF_JR_RWND_LIMITED)) { 17714 /* 17715 * We are rwnd limited and have been 17716 * configured to end the measurement 17717 * window in this case. 17718 */ 17719 end_window = 1; 17720 } else if (app_limited == CTF_JR_APP_LIMITED) { 17721 /* 17722 * A true application limited period, we have 17723 * ran out of data. 17724 */ 17725 end_window = 1; 17726 } else if (app_limited == CTF_JR_ASSESSING) { 17727 /* 17728 * In the assessing case we hit the end of 17729 * the if/else and had no known reason 17730 * This will panic us under invariants.. 17731 * 17732 * If we get this out in logs we need to 17733 * investagate which reason we missed. 17734 */ 17735 end_window = 1; 17736 } 17737 if (end_window) { 17738 uint8_t log = 0; 17739 17740 /* Adjust the Gput measurement */ 17741 if ((tp->t_flags & TF_GPUTINPROG) && 17742 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17743 tp->gput_ack = tp->snd_max; 17744 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17745 /* 17746 * There is not enough to measure. 17747 */ 17748 tp->t_flags &= ~TF_GPUTINPROG; 17749 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17750 rack->r_ctl.rc_gp_srtt /*flex1*/, 17751 tp->gput_seq, 17752 0, 0, 18, __LINE__, NULL, 0); 17753 } else 17754 log = 1; 17755 } 17756 /* Mark the last packet has app limited */ 17757 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17758 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17759 if (rack->r_ctl.rc_app_limited_cnt == 0) 17760 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17761 else { 17762 /* 17763 * Go out to the end app limited and mark 17764 * this new one as next and move the end_appl up 17765 * to this guy. 17766 */ 17767 if (rack->r_ctl.rc_end_appl) 17768 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17769 rack->r_ctl.rc_end_appl = rsm; 17770 } 17771 rsm->r_flags |= RACK_APP_LIMITED; 17772 rack->r_ctl.rc_app_limited_cnt++; 17773 } 17774 if (log) 17775 rack_log_pacing_delay_calc(rack, 17776 rack->r_ctl.rc_app_limited_cnt, seq, 17777 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17778 } 17779 } 17780 if (slot) { 17781 /* set the rack tcb into the slot N */ 17782 counter_u64_add(rack_paced_segments, 1); 17783 } else if (tot_len_this_send) { 17784 counter_u64_add(rack_unpaced_segments, 1); 17785 } 17786 /* Check if we need to go into persists or not */ 17787 if ((tp->snd_max == tp->snd_una) && 17788 TCPS_HAVEESTABLISHED(tp->t_state) && 17789 sbavail(sb) && 17790 (sbavail(sb) > tp->snd_wnd) && 17791 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17792 /* Yes lets make sure to move to persist before timer-start */ 17793 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17794 } 17795 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17796 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17797 } 17798 #ifdef NETFLIX_SHARED_CWND 17799 if ((sbavail(sb) == 0) && 17800 rack->r_ctl.rc_scw) { 17801 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17802 rack->rack_scwnd_is_idle = 1; 17803 } 17804 #endif 17805 #ifdef TCP_ACCOUNTING 17806 if (tot_len_this_send > 0) { 17807 crtsc = get_cyclecount(); 17808 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17809 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17810 } 17811 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17812 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17813 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17814 } 17815 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17816 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17817 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17818 } 17819 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17820 } else { 17821 crtsc = get_cyclecount(); 17822 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17823 tp->tcp_cnt_counters[SND_LIMITED]++; 17824 } 17825 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17826 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17827 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17828 } 17829 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17830 } 17831 sched_unpin(); 17832 #endif 17833 return (0); 17834 17835 send: 17836 if (rsm || sack_rxmit) 17837 counter_u64_add(rack_nfto_resend, 1); 17838 else 17839 counter_u64_add(rack_non_fto_send, 1); 17840 if ((flags & TH_FIN) && 17841 sbavail(sb)) { 17842 /* 17843 * We do not transmit a FIN 17844 * with data outstanding. We 17845 * need to make it so all data 17846 * is acked first. 17847 */ 17848 flags &= ~TH_FIN; 17849 } 17850 /* Enforce stack imposed max seg size if we have one */ 17851 if (rack->r_ctl.rc_pace_max_segs && 17852 (len > rack->r_ctl.rc_pace_max_segs)) { 17853 mark = 1; 17854 len = rack->r_ctl.rc_pace_max_segs; 17855 } 17856 SOCKBUF_LOCK_ASSERT(sb); 17857 if (len > 0) { 17858 if (len >= segsiz) 17859 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17860 else 17861 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17862 } 17863 /* 17864 * Before ESTABLISHED, force sending of initial options unless TCP 17865 * set not to do any options. NOTE: we assume that the IP/TCP header 17866 * plus TCP options always fit in a single mbuf, leaving room for a 17867 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17868 * + optlen <= MCLBYTES 17869 */ 17870 optlen = 0; 17871 #ifdef INET6 17872 if (isipv6) 17873 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17874 else 17875 #endif 17876 hdrlen = sizeof(struct tcpiphdr); 17877 17878 /* 17879 * Compute options for segment. We only have to care about SYN and 17880 * established connection segments. Options for SYN-ACK segments 17881 * are handled in TCP syncache. 17882 */ 17883 to.to_flags = 0; 17884 if ((tp->t_flags & TF_NOOPT) == 0) { 17885 /* Maximum segment size. */ 17886 if (flags & TH_SYN) { 17887 tp->snd_nxt = tp->iss; 17888 to.to_mss = tcp_mssopt(&inp->inp_inc); 17889 if (tp->t_port) 17890 to.to_mss -= V_tcp_udp_tunneling_overhead; 17891 to.to_flags |= TOF_MSS; 17892 17893 /* 17894 * On SYN or SYN|ACK transmits on TFO connections, 17895 * only include the TFO option if it is not a 17896 * retransmit, as the presence of the TFO option may 17897 * have caused the original SYN or SYN|ACK to have 17898 * been dropped by a middlebox. 17899 */ 17900 if (IS_FASTOPEN(tp->t_flags) && 17901 (tp->t_rxtshift == 0)) { 17902 if (tp->t_state == TCPS_SYN_RECEIVED) { 17903 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17904 to.to_tfo_cookie = 17905 (u_int8_t *)&tp->t_tfo_cookie.server; 17906 to.to_flags |= TOF_FASTOPEN; 17907 wanted_cookie = 1; 17908 } else if (tp->t_state == TCPS_SYN_SENT) { 17909 to.to_tfo_len = 17910 tp->t_tfo_client_cookie_len; 17911 to.to_tfo_cookie = 17912 tp->t_tfo_cookie.client; 17913 to.to_flags |= TOF_FASTOPEN; 17914 wanted_cookie = 1; 17915 /* 17916 * If we wind up having more data to 17917 * send with the SYN than can fit in 17918 * one segment, don't send any more 17919 * until the SYN|ACK comes back from 17920 * the other end. 17921 */ 17922 sendalot = 0; 17923 } 17924 } 17925 } 17926 /* Window scaling. */ 17927 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 17928 to.to_wscale = tp->request_r_scale; 17929 to.to_flags |= TOF_SCALE; 17930 } 17931 /* Timestamps. */ 17932 if ((tp->t_flags & TF_RCVD_TSTMP) || 17933 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 17934 to.to_tsval = ms_cts + tp->ts_offset; 17935 to.to_tsecr = tp->ts_recent; 17936 to.to_flags |= TOF_TS; 17937 } 17938 /* Set receive buffer autosizing timestamp. */ 17939 if (tp->rfbuf_ts == 0 && 17940 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 17941 tp->rfbuf_ts = tcp_ts_getticks(); 17942 /* Selective ACK's. */ 17943 if (tp->t_flags & TF_SACK_PERMIT) { 17944 if (flags & TH_SYN) 17945 to.to_flags |= TOF_SACKPERM; 17946 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 17947 tp->rcv_numsacks > 0) { 17948 to.to_flags |= TOF_SACK; 17949 to.to_nsacks = tp->rcv_numsacks; 17950 to.to_sacks = (u_char *)tp->sackblks; 17951 } 17952 } 17953 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 17954 /* TCP-MD5 (RFC2385). */ 17955 if (tp->t_flags & TF_SIGNATURE) 17956 to.to_flags |= TOF_SIGNATURE; 17957 #endif /* TCP_SIGNATURE */ 17958 17959 /* Processing the options. */ 17960 hdrlen += optlen = tcp_addoptions(&to, opt); 17961 /* 17962 * If we wanted a TFO option to be added, but it was unable 17963 * to fit, ensure no data is sent. 17964 */ 17965 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 17966 !(to.to_flags & TOF_FASTOPEN)) 17967 len = 0; 17968 } 17969 if (tp->t_port) { 17970 if (V_tcp_udp_tunneling_port == 0) { 17971 /* The port was removed?? */ 17972 SOCKBUF_UNLOCK(&so->so_snd); 17973 #ifdef TCP_ACCOUNTING 17974 crtsc = get_cyclecount(); 17975 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17976 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 17977 } 17978 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 17979 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17980 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 17981 } 17982 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 17983 sched_unpin(); 17984 #endif 17985 return (EHOSTUNREACH); 17986 } 17987 hdrlen += sizeof(struct udphdr); 17988 } 17989 #ifdef INET6 17990 if (isipv6) 17991 ipoptlen = ip6_optlen(tp->t_inpcb); 17992 else 17993 #endif 17994 if (tp->t_inpcb->inp_options) 17995 ipoptlen = tp->t_inpcb->inp_options->m_len - 17996 offsetof(struct ipoption, ipopt_list); 17997 else 17998 ipoptlen = 0; 17999 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18000 ipoptlen += ipsec_optlen; 18001 #endif 18002 18003 /* 18004 * Adjust data length if insertion of options will bump the packet 18005 * length beyond the t_maxseg length. Clear the FIN bit because we 18006 * cut off the tail of the segment. 18007 */ 18008 if (len + optlen + ipoptlen > tp->t_maxseg) { 18009 if (tso) { 18010 uint32_t if_hw_tsomax; 18011 uint32_t moff; 18012 int32_t max_len; 18013 18014 /* extract TSO information */ 18015 if_hw_tsomax = tp->t_tsomax; 18016 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18017 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18018 KASSERT(ipoptlen == 0, 18019 ("%s: TSO can't do IP options", __func__)); 18020 18021 /* 18022 * Check if we should limit by maximum payload 18023 * length: 18024 */ 18025 if (if_hw_tsomax != 0) { 18026 /* compute maximum TSO length */ 18027 max_len = (if_hw_tsomax - hdrlen - 18028 max_linkhdr); 18029 if (max_len <= 0) { 18030 len = 0; 18031 } else if (len > max_len) { 18032 sendalot = 1; 18033 len = max_len; 18034 mark = 2; 18035 } 18036 } 18037 /* 18038 * Prevent the last segment from being fractional 18039 * unless the send sockbuf can be emptied: 18040 */ 18041 max_len = (tp->t_maxseg - optlen); 18042 if ((sb_offset + len) < sbavail(sb)) { 18043 moff = len % (u_int)max_len; 18044 if (moff != 0) { 18045 mark = 3; 18046 len -= moff; 18047 } 18048 } 18049 /* 18050 * In case there are too many small fragments don't 18051 * use TSO: 18052 */ 18053 if (len <= segsiz) { 18054 mark = 4; 18055 tso = 0; 18056 } 18057 /* 18058 * Send the FIN in a separate segment after the bulk 18059 * sending is done. We don't trust the TSO 18060 * implementations to clear the FIN flag on all but 18061 * the last segment. 18062 */ 18063 if (tp->t_flags & TF_NEEDFIN) { 18064 sendalot = 4; 18065 } 18066 } else { 18067 mark = 5; 18068 if (optlen + ipoptlen >= tp->t_maxseg) { 18069 /* 18070 * Since we don't have enough space to put 18071 * the IP header chain and the TCP header in 18072 * one packet as required by RFC 7112, don't 18073 * send it. Also ensure that at least one 18074 * byte of the payload can be put into the 18075 * TCP segment. 18076 */ 18077 SOCKBUF_UNLOCK(&so->so_snd); 18078 error = EMSGSIZE; 18079 sack_rxmit = 0; 18080 goto out; 18081 } 18082 len = tp->t_maxseg - optlen - ipoptlen; 18083 sendalot = 5; 18084 } 18085 } else { 18086 tso = 0; 18087 mark = 6; 18088 } 18089 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18090 ("%s: len > IP_MAXPACKET", __func__)); 18091 #ifdef DIAGNOSTIC 18092 #ifdef INET6 18093 if (max_linkhdr + hdrlen > MCLBYTES) 18094 #else 18095 if (max_linkhdr + hdrlen > MHLEN) 18096 #endif 18097 panic("tcphdr too big"); 18098 #endif 18099 18100 /* 18101 * This KASSERT is here to catch edge cases at a well defined place. 18102 * Before, those had triggered (random) panic conditions further 18103 * down. 18104 */ 18105 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18106 if ((len == 0) && 18107 (flags & TH_FIN) && 18108 (sbused(sb))) { 18109 /* 18110 * We have outstanding data, don't send a fin by itself!. 18111 */ 18112 goto just_return; 18113 } 18114 /* 18115 * Grab a header mbuf, attaching a copy of data to be transmitted, 18116 * and initialize the header from the template for sends on this 18117 * connection. 18118 */ 18119 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18120 if (len) { 18121 uint32_t max_val; 18122 uint32_t moff; 18123 18124 if (rack->r_ctl.rc_pace_max_segs) 18125 max_val = rack->r_ctl.rc_pace_max_segs; 18126 else if (rack->rc_user_set_max_segs) 18127 max_val = rack->rc_user_set_max_segs * segsiz; 18128 else 18129 max_val = len; 18130 /* 18131 * We allow a limit on sending with hptsi. 18132 */ 18133 if (len > max_val) { 18134 mark = 7; 18135 len = max_val; 18136 } 18137 #ifdef INET6 18138 if (MHLEN < hdrlen + max_linkhdr) 18139 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18140 else 18141 #endif 18142 m = m_gethdr(M_NOWAIT, MT_DATA); 18143 18144 if (m == NULL) { 18145 SOCKBUF_UNLOCK(sb); 18146 error = ENOBUFS; 18147 sack_rxmit = 0; 18148 goto out; 18149 } 18150 m->m_data += max_linkhdr; 18151 m->m_len = hdrlen; 18152 18153 /* 18154 * Start the m_copy functions from the closest mbuf to the 18155 * sb_offset in the socket buffer chain. 18156 */ 18157 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18158 s_mb = mb; 18159 s_moff = moff; 18160 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18161 m_copydata(mb, moff, (int)len, 18162 mtod(m, caddr_t)+hdrlen); 18163 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18164 sbsndptr_adv(sb, mb, len); 18165 m->m_len += len; 18166 } else { 18167 struct sockbuf *msb; 18168 18169 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18170 msb = NULL; 18171 else 18172 msb = sb; 18173 m->m_next = tcp_m_copym( 18174 mb, moff, &len, 18175 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18176 ((rsm == NULL) ? hw_tls : 0) 18177 #ifdef NETFLIX_COPY_ARGS 18178 , &filled_all 18179 #endif 18180 ); 18181 if (len <= (tp->t_maxseg - optlen)) { 18182 /* 18183 * Must have ran out of mbufs for the copy 18184 * shorten it to no longer need tso. Lets 18185 * not put on sendalot since we are low on 18186 * mbufs. 18187 */ 18188 tso = 0; 18189 } 18190 if (m->m_next == NULL) { 18191 SOCKBUF_UNLOCK(sb); 18192 (void)m_free(m); 18193 error = ENOBUFS; 18194 sack_rxmit = 0; 18195 goto out; 18196 } 18197 } 18198 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18199 if (rsm && (rsm->r_flags & RACK_TLP)) { 18200 /* 18201 * TLP should not count in retran count, but 18202 * in its own bin 18203 */ 18204 counter_u64_add(rack_tlp_retran, 1); 18205 counter_u64_add(rack_tlp_retran_bytes, len); 18206 } else { 18207 tp->t_sndrexmitpack++; 18208 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18209 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18210 } 18211 #ifdef STATS 18212 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18213 len); 18214 #endif 18215 } else { 18216 KMOD_TCPSTAT_INC(tcps_sndpack); 18217 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18218 #ifdef STATS 18219 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18220 len); 18221 #endif 18222 } 18223 /* 18224 * If we're sending everything we've got, set PUSH. (This 18225 * will keep happy those implementations which only give 18226 * data to the user when a buffer fills or a PUSH comes in.) 18227 */ 18228 if (sb_offset + len == sbused(sb) && 18229 sbused(sb) && 18230 !(flags & TH_SYN)) { 18231 flags |= TH_PUSH; 18232 add_flag |= RACK_HAD_PUSH; 18233 } 18234 18235 SOCKBUF_UNLOCK(sb); 18236 } else { 18237 SOCKBUF_UNLOCK(sb); 18238 if (tp->t_flags & TF_ACKNOW) 18239 KMOD_TCPSTAT_INC(tcps_sndacks); 18240 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18241 KMOD_TCPSTAT_INC(tcps_sndctrl); 18242 else 18243 KMOD_TCPSTAT_INC(tcps_sndwinup); 18244 18245 m = m_gethdr(M_NOWAIT, MT_DATA); 18246 if (m == NULL) { 18247 error = ENOBUFS; 18248 sack_rxmit = 0; 18249 goto out; 18250 } 18251 #ifdef INET6 18252 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18253 MHLEN >= hdrlen) { 18254 M_ALIGN(m, hdrlen); 18255 } else 18256 #endif 18257 m->m_data += max_linkhdr; 18258 m->m_len = hdrlen; 18259 } 18260 SOCKBUF_UNLOCK_ASSERT(sb); 18261 m->m_pkthdr.rcvif = (struct ifnet *)0; 18262 #ifdef MAC 18263 mac_inpcb_create_mbuf(inp, m); 18264 #endif 18265 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18266 #ifdef INET6 18267 if (isipv6) 18268 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18269 else 18270 #endif /* INET6 */ 18271 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18272 th = rack->r_ctl.fsb.th; 18273 udp = rack->r_ctl.fsb.udp; 18274 if (udp) { 18275 #ifdef INET6 18276 if (isipv6) 18277 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18278 else 18279 #endif /* INET6 */ 18280 ulen = hdrlen + len - sizeof(struct ip); 18281 udp->uh_ulen = htons(ulen); 18282 } 18283 } else { 18284 #ifdef INET6 18285 if (isipv6) { 18286 ip6 = mtod(m, struct ip6_hdr *); 18287 if (tp->t_port) { 18288 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18289 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18290 udp->uh_dport = tp->t_port; 18291 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18292 udp->uh_ulen = htons(ulen); 18293 th = (struct tcphdr *)(udp + 1); 18294 } else 18295 th = (struct tcphdr *)(ip6 + 1); 18296 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18297 } else 18298 #endif /* INET6 */ 18299 { 18300 ip = mtod(m, struct ip *); 18301 #ifdef TCPDEBUG 18302 ipov = (struct ipovly *)ip; 18303 #endif 18304 if (tp->t_port) { 18305 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18306 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18307 udp->uh_dport = tp->t_port; 18308 ulen = hdrlen + len - sizeof(struct ip); 18309 udp->uh_ulen = htons(ulen); 18310 th = (struct tcphdr *)(udp + 1); 18311 } else 18312 th = (struct tcphdr *)(ip + 1); 18313 tcpip_fillheaders(inp, tp->t_port, ip, th); 18314 } 18315 } 18316 /* 18317 * Fill in fields, remembering maximum advertised window for use in 18318 * delaying messages about window sizes. If resending a FIN, be sure 18319 * not to use a new sequence number. 18320 */ 18321 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18322 tp->snd_nxt == tp->snd_max) 18323 tp->snd_nxt--; 18324 /* 18325 * If we are starting a connection, send ECN setup SYN packet. If we 18326 * are on a retransmit, we may resend those bits a number of times 18327 * as per RFC 3168. 18328 */ 18329 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 18330 if (tp->t_rxtshift >= 1) { 18331 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 18332 flags |= TH_ECE | TH_CWR; 18333 } else 18334 flags |= TH_ECE | TH_CWR; 18335 } 18336 /* Handle parallel SYN for ECN */ 18337 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18338 (tp->t_flags2 & TF2_ECN_SND_ECE)) { 18339 flags |= TH_ECE; 18340 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18341 } 18342 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18343 (tp->t_flags2 & TF2_ECN_PERMIT)) { 18344 /* 18345 * If the peer has ECN, mark data packets with ECN capable 18346 * transmission (ECT). Ignore pure ack packets, 18347 * retransmissions. 18348 */ 18349 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 18350 (sack_rxmit == 0)) { 18351 #ifdef INET6 18352 if (isipv6) 18353 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 18354 else 18355 #endif 18356 ip->ip_tos |= IPTOS_ECN_ECT0; 18357 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 18358 /* 18359 * Reply with proper ECN notifications. 18360 * Only set CWR on new data segments. 18361 */ 18362 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 18363 flags |= TH_CWR; 18364 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 18365 } 18366 } 18367 if (tp->t_flags2 & TF2_ECN_SND_ECE) 18368 flags |= TH_ECE; 18369 } 18370 /* 18371 * If we are doing retransmissions, then snd_nxt will not reflect 18372 * the first unsent octet. For ACK only packets, we do not want the 18373 * sequence number of the retransmitted packet, we want the sequence 18374 * number of the next unsent octet. So, if there is no data (and no 18375 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18376 * ti_seq. But if we are in persist state, snd_max might reflect 18377 * one byte beyond the right edge of the window, so use snd_nxt in 18378 * that case, since we know we aren't doing a retransmission. 18379 * (retransmit and persist are mutually exclusive...) 18380 */ 18381 if (sack_rxmit == 0) { 18382 if (len || (flags & (TH_SYN | TH_FIN))) { 18383 th->th_seq = htonl(tp->snd_nxt); 18384 rack_seq = tp->snd_nxt; 18385 } else { 18386 th->th_seq = htonl(tp->snd_max); 18387 rack_seq = tp->snd_max; 18388 } 18389 } else { 18390 th->th_seq = htonl(rsm->r_start); 18391 rack_seq = rsm->r_start; 18392 } 18393 th->th_ack = htonl(tp->rcv_nxt); 18394 th->th_flags = flags; 18395 /* 18396 * Calculate receive window. Don't shrink window, but avoid silly 18397 * window syndrome. 18398 * If a RST segment is sent, advertise a window of zero. 18399 */ 18400 if (flags & TH_RST) { 18401 recwin = 0; 18402 } else { 18403 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18404 recwin < (long)segsiz) { 18405 recwin = 0; 18406 } 18407 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18408 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18409 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18410 } 18411 18412 /* 18413 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18414 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18415 * handled in syncache. 18416 */ 18417 if (flags & TH_SYN) 18418 th->th_win = htons((u_short) 18419 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18420 else { 18421 /* Avoid shrinking window with window scaling. */ 18422 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18423 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18424 } 18425 /* 18426 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18427 * window. This may cause the remote transmitter to stall. This 18428 * flag tells soreceive() to disable delayed acknowledgements when 18429 * draining the buffer. This can occur if the receiver is 18430 * attempting to read more data than can be buffered prior to 18431 * transmitting on the connection. 18432 */ 18433 if (th->th_win == 0) { 18434 tp->t_sndzerowin++; 18435 tp->t_flags |= TF_RXWIN0SENT; 18436 } else 18437 tp->t_flags &= ~TF_RXWIN0SENT; 18438 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18439 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18440 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18441 uint8_t *cpto; 18442 18443 cpto = mtod(m, uint8_t *); 18444 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18445 /* 18446 * We have just copied in: 18447 * IP/IP6 18448 * <optional udphdr> 18449 * tcphdr (no options) 18450 * 18451 * We need to grab the correct pointers into the mbuf 18452 * for both the tcp header, and possibly the udp header (if tunneling). 18453 * We do this by using the offset in the copy buffer and adding it 18454 * to the mbuf base pointer (cpto). 18455 */ 18456 #ifdef INET6 18457 if (isipv6) 18458 ip6 = mtod(m, struct ip6_hdr *); 18459 else 18460 #endif /* INET6 */ 18461 ip = mtod(m, struct ip *); 18462 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18463 /* If we have a udp header lets set it into the mbuf as well */ 18464 if (udp) 18465 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18466 } 18467 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18468 if (to.to_flags & TOF_SIGNATURE) { 18469 /* 18470 * Calculate MD5 signature and put it into the place 18471 * determined before. 18472 * NOTE: since TCP options buffer doesn't point into 18473 * mbuf's data, calculate offset and use it. 18474 */ 18475 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18476 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18477 /* 18478 * Do not send segment if the calculation of MD5 18479 * digest has failed. 18480 */ 18481 goto out; 18482 } 18483 } 18484 #endif 18485 if (optlen) { 18486 bcopy(opt, th + 1, optlen); 18487 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18488 } 18489 /* 18490 * Put TCP length in extended header, and then checksum extended 18491 * header and data. 18492 */ 18493 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18494 #ifdef INET6 18495 if (isipv6) { 18496 /* 18497 * ip6_plen is not need to be filled now, and will be filled 18498 * in ip6_output. 18499 */ 18500 if (tp->t_port) { 18501 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18502 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18503 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18504 th->th_sum = htons(0); 18505 UDPSTAT_INC(udps_opackets); 18506 } else { 18507 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18508 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18509 th->th_sum = in6_cksum_pseudo(ip6, 18510 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18511 0); 18512 } 18513 } 18514 #endif 18515 #if defined(INET6) && defined(INET) 18516 else 18517 #endif 18518 #ifdef INET 18519 { 18520 if (tp->t_port) { 18521 m->m_pkthdr.csum_flags = CSUM_UDP; 18522 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18523 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18524 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18525 th->th_sum = htons(0); 18526 UDPSTAT_INC(udps_opackets); 18527 } else { 18528 m->m_pkthdr.csum_flags = CSUM_TCP; 18529 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18530 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18531 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18532 IPPROTO_TCP + len + optlen)); 18533 } 18534 /* IP version must be set here for ipv4/ipv6 checking later */ 18535 KASSERT(ip->ip_v == IPVERSION, 18536 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18537 } 18538 #endif 18539 /* 18540 * Enable TSO and specify the size of the segments. The TCP pseudo 18541 * header checksum is always provided. XXX: Fixme: This is currently 18542 * not the case for IPv6. 18543 */ 18544 if (tso) { 18545 KASSERT(len > tp->t_maxseg - optlen, 18546 ("%s: len <= tso_segsz", __func__)); 18547 m->m_pkthdr.csum_flags |= CSUM_TSO; 18548 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18549 } 18550 KASSERT(len + hdrlen == m_length(m, NULL), 18551 ("%s: mbuf chain different than expected: %d + %u != %u", 18552 __func__, len, hdrlen, m_length(m, NULL))); 18553 18554 #ifdef TCP_HHOOK 18555 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18556 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18557 #endif 18558 /* We're getting ready to send; log now. */ 18559 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18560 union tcp_log_stackspecific log; 18561 18562 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18563 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 18564 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 18565 if (rack->rack_no_prr) 18566 log.u_bbr.flex1 = 0; 18567 else 18568 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18569 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18570 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18571 log.u_bbr.flex4 = orig_len; 18572 if (filled_all) 18573 log.u_bbr.flex5 = 0x80000000; 18574 else 18575 log.u_bbr.flex5 = 0; 18576 /* Save off the early/late values */ 18577 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18578 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18579 log.u_bbr.bw_inuse = rack_get_bw(rack); 18580 if (rsm || sack_rxmit) { 18581 if (doing_tlp) 18582 log.u_bbr.flex8 = 2; 18583 else 18584 log.u_bbr.flex8 = 1; 18585 } else { 18586 if (doing_tlp) 18587 log.u_bbr.flex8 = 3; 18588 else 18589 log.u_bbr.flex8 = 0; 18590 } 18591 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18592 log.u_bbr.flex7 = mark; 18593 log.u_bbr.flex7 <<= 8; 18594 log.u_bbr.flex7 |= pass; 18595 log.u_bbr.pkts_out = tp->t_maxseg; 18596 log.u_bbr.timeStamp = cts; 18597 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18598 log.u_bbr.lt_epoch = cwnd_to_use; 18599 log.u_bbr.delivered = sendalot; 18600 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18601 len, &log, false, NULL, NULL, 0, &tv); 18602 } else 18603 lgb = NULL; 18604 18605 /* 18606 * Fill in IP length and desired time to live and send to IP level. 18607 * There should be a better way to handle ttl and tos; we could keep 18608 * them in the template, but need a way to checksum without them. 18609 */ 18610 /* 18611 * m->m_pkthdr.len should have been set before cksum calcuration, 18612 * because in6_cksum() need it. 18613 */ 18614 #ifdef INET6 18615 if (isipv6) { 18616 /* 18617 * we separately set hoplimit for every segment, since the 18618 * user might want to change the value via setsockopt. Also, 18619 * desired default hop limit might be changed via Neighbor 18620 * Discovery. 18621 */ 18622 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18623 18624 /* 18625 * Set the packet size here for the benefit of DTrace 18626 * probes. ip6_output() will set it properly; it's supposed 18627 * to include the option header lengths as well. 18628 */ 18629 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18630 18631 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18632 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18633 else 18634 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18635 18636 if (tp->t_state == TCPS_SYN_SENT) 18637 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18638 18639 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18640 /* TODO: IPv6 IP6TOS_ECT bit on */ 18641 error = ip6_output(m, 18642 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18643 inp->in6p_outputopts, 18644 #else 18645 NULL, 18646 #endif 18647 &inp->inp_route6, 18648 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18649 NULL, NULL, inp); 18650 18651 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18652 mtu = inp->inp_route6.ro_nh->nh_mtu; 18653 } 18654 #endif /* INET6 */ 18655 #if defined(INET) && defined(INET6) 18656 else 18657 #endif 18658 #ifdef INET 18659 { 18660 ip->ip_len = htons(m->m_pkthdr.len); 18661 #ifdef INET6 18662 if (inp->inp_vflag & INP_IPV6PROTO) 18663 ip->ip_ttl = in6_selecthlim(inp, NULL); 18664 #endif /* INET6 */ 18665 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18666 /* 18667 * If we do path MTU discovery, then we set DF on every 18668 * packet. This might not be the best thing to do according 18669 * to RFC3390 Section 2. However the tcp hostcache migitates 18670 * the problem so it affects only the first tcp connection 18671 * with a host. 18672 * 18673 * NB: Don't set DF on small MTU/MSS to have a safe 18674 * fallback. 18675 */ 18676 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18677 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18678 if (tp->t_port == 0 || len < V_tcp_minmss) { 18679 ip->ip_off |= htons(IP_DF); 18680 } 18681 } else { 18682 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18683 } 18684 18685 if (tp->t_state == TCPS_SYN_SENT) 18686 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18687 18688 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18689 18690 error = ip_output(m, 18691 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18692 inp->inp_options, 18693 #else 18694 NULL, 18695 #endif 18696 &inp->inp_route, 18697 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18698 inp); 18699 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18700 mtu = inp->inp_route.ro_nh->nh_mtu; 18701 } 18702 #endif /* INET */ 18703 18704 out: 18705 if (lgb) { 18706 lgb->tlb_errno = error; 18707 lgb = NULL; 18708 } 18709 /* 18710 * In transmit state, time the transmission and arrange for the 18711 * retransmit. In persist state, just set snd_max. 18712 */ 18713 if (error == 0) { 18714 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18715 if (rsm && doing_tlp) { 18716 rack->rc_last_sent_tlp_past_cumack = 0; 18717 rack->rc_last_sent_tlp_seq_valid = 1; 18718 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18719 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18720 } 18721 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18722 if (rsm && (doing_tlp == 0)) { 18723 /* Set we retransmitted */ 18724 rack->rc_gp_saw_rec = 1; 18725 } else { 18726 if (cwnd_to_use > tp->snd_ssthresh) { 18727 /* Set we sent in CA */ 18728 rack->rc_gp_saw_ca = 1; 18729 } else { 18730 /* Set we sent in SS */ 18731 rack->rc_gp_saw_ss = 1; 18732 } 18733 } 18734 if (doing_tlp && (rsm == NULL)) { 18735 /* Make sure new data TLP cnt is clear */ 18736 rack->r_ctl.rc_tlp_new_data = 0; 18737 } 18738 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18739 (tp->t_flags & TF_SACK_PERMIT) && 18740 tp->rcv_numsacks > 0) 18741 tcp_clean_dsack_blocks(tp); 18742 tot_len_this_send += len; 18743 if (len == 0) 18744 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18745 else if (len == 1) { 18746 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18747 } else if (len > 1) { 18748 int idx; 18749 18750 idx = (len / segsiz) + 3; 18751 if (idx >= TCP_MSS_ACCT_ATIMER) 18752 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18753 else 18754 counter_u64_add(rack_out_size[idx], 1); 18755 } 18756 } 18757 if ((rack->rack_no_prr == 0) && 18758 sub_from_prr && 18759 (error == 0)) { 18760 if (rack->r_ctl.rc_prr_sndcnt >= len) 18761 rack->r_ctl.rc_prr_sndcnt -= len; 18762 else 18763 rack->r_ctl.rc_prr_sndcnt = 0; 18764 } 18765 sub_from_prr = 0; 18766 if (doing_tlp) { 18767 /* Make sure the TLP is added */ 18768 add_flag |= RACK_TLP; 18769 } else if (rsm) { 18770 /* If its a resend without TLP then it must not have the flag */ 18771 rsm->r_flags &= ~RACK_TLP; 18772 } 18773 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18774 rack_to_usec_ts(&tv), 18775 rsm, add_flag, s_mb, s_moff, hw_tls); 18776 18777 18778 if ((error == 0) && 18779 (len > 0) && 18780 (tp->snd_una == tp->snd_max)) 18781 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18782 { 18783 tcp_seq startseq = tp->snd_nxt; 18784 18785 /* Track our lost count */ 18786 if (rsm && (doing_tlp == 0)) 18787 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18788 /* 18789 * Advance snd_nxt over sequence space of this segment. 18790 */ 18791 if (error) 18792 /* We don't log or do anything with errors */ 18793 goto nomore; 18794 if (doing_tlp == 0) { 18795 if (rsm == NULL) { 18796 /* 18797 * Not a retransmission of some 18798 * sort, new data is going out so 18799 * clear our TLP count and flag. 18800 */ 18801 rack->rc_tlp_in_progress = 0; 18802 rack->r_ctl.rc_tlp_cnt_out = 0; 18803 } 18804 } else { 18805 /* 18806 * We have just sent a TLP, mark that it is true 18807 * and make sure our in progress is set so we 18808 * continue to check the count. 18809 */ 18810 rack->rc_tlp_in_progress = 1; 18811 rack->r_ctl.rc_tlp_cnt_out++; 18812 } 18813 if (flags & (TH_SYN | TH_FIN)) { 18814 if (flags & TH_SYN) 18815 tp->snd_nxt++; 18816 if (flags & TH_FIN) { 18817 tp->snd_nxt++; 18818 tp->t_flags |= TF_SENTFIN; 18819 } 18820 } 18821 /* In the ENOBUFS case we do *not* update snd_max */ 18822 if (sack_rxmit) 18823 goto nomore; 18824 18825 tp->snd_nxt += len; 18826 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18827 if (tp->snd_una == tp->snd_max) { 18828 /* 18829 * Update the time we just added data since 18830 * none was outstanding. 18831 */ 18832 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18833 tp->t_acktime = ticks; 18834 } 18835 tp->snd_max = tp->snd_nxt; 18836 /* 18837 * Time this transmission if not a retransmission and 18838 * not currently timing anything. 18839 * This is only relevant in case of switching back to 18840 * the base stack. 18841 */ 18842 if (tp->t_rtttime == 0) { 18843 tp->t_rtttime = ticks; 18844 tp->t_rtseq = startseq; 18845 KMOD_TCPSTAT_INC(tcps_segstimed); 18846 } 18847 if (len && 18848 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18849 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18850 } 18851 /* 18852 * If we are doing FO we need to update the mbuf position and subtract 18853 * this happens when the peer sends us duplicate information and 18854 * we thus want to send a DSACK. 18855 * 18856 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18857 * turned off? If not then we are going to echo multiple DSACK blocks 18858 * out (with the TSO), which we should not be doing. 18859 */ 18860 if (rack->r_fast_output && len) { 18861 if (rack->r_ctl.fsb.left_to_send > len) 18862 rack->r_ctl.fsb.left_to_send -= len; 18863 else 18864 rack->r_ctl.fsb.left_to_send = 0; 18865 if (rack->r_ctl.fsb.left_to_send < segsiz) 18866 rack->r_fast_output = 0; 18867 if (rack->r_fast_output) { 18868 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18869 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18870 } 18871 } 18872 } 18873 nomore: 18874 if (error) { 18875 rack->r_ctl.rc_agg_delayed = 0; 18876 rack->r_early = 0; 18877 rack->r_late = 0; 18878 rack->r_ctl.rc_agg_early = 0; 18879 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18880 /* 18881 * Failures do not advance the seq counter above. For the 18882 * case of ENOBUFS we will fall out and retry in 1ms with 18883 * the hpts. Everything else will just have to retransmit 18884 * with the timer. 18885 * 18886 * In any case, we do not want to loop around for another 18887 * send without a good reason. 18888 */ 18889 sendalot = 0; 18890 switch (error) { 18891 case EPERM: 18892 tp->t_softerror = error; 18893 #ifdef TCP_ACCOUNTING 18894 crtsc = get_cyclecount(); 18895 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18896 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18897 } 18898 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18899 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18900 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18901 } 18902 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18903 sched_unpin(); 18904 #endif 18905 return (error); 18906 case ENOBUFS: 18907 /* 18908 * Pace us right away to retry in a some 18909 * time 18910 */ 18911 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18912 if (rack->rc_enobuf < 0x7f) 18913 rack->rc_enobuf++; 18914 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18915 slot = 10 * HPTS_USEC_IN_MSEC; 18916 if (rack->r_ctl.crte != NULL) { 18917 counter_u64_add(rack_saw_enobuf_hw, 1); 18918 tcp_rl_log_enobuf(rack->r_ctl.crte); 18919 } 18920 counter_u64_add(rack_saw_enobuf, 1); 18921 goto enobufs; 18922 case EMSGSIZE: 18923 /* 18924 * For some reason the interface we used initially 18925 * to send segments changed to another or lowered 18926 * its MTU. If TSO was active we either got an 18927 * interface without TSO capabilits or TSO was 18928 * turned off. If we obtained mtu from ip_output() 18929 * then update it and try again. 18930 */ 18931 if (tso) 18932 tp->t_flags &= ~TF_TSO; 18933 if (mtu != 0) { 18934 tcp_mss_update(tp, -1, mtu, NULL, NULL); 18935 goto again; 18936 } 18937 slot = 10 * HPTS_USEC_IN_MSEC; 18938 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18939 #ifdef TCP_ACCOUNTING 18940 crtsc = get_cyclecount(); 18941 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18942 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18943 } 18944 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18945 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18946 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18947 } 18948 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18949 sched_unpin(); 18950 #endif 18951 return (error); 18952 case ENETUNREACH: 18953 counter_u64_add(rack_saw_enetunreach, 1); 18954 case EHOSTDOWN: 18955 case EHOSTUNREACH: 18956 case ENETDOWN: 18957 if (TCPS_HAVERCVDSYN(tp->t_state)) { 18958 tp->t_softerror = error; 18959 } 18960 /* FALLTHROUGH */ 18961 default: 18962 slot = 10 * HPTS_USEC_IN_MSEC; 18963 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18964 #ifdef TCP_ACCOUNTING 18965 crtsc = get_cyclecount(); 18966 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18967 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18968 } 18969 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18970 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18971 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18972 } 18973 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18974 sched_unpin(); 18975 #endif 18976 return (error); 18977 } 18978 } else { 18979 rack->rc_enobuf = 0; 18980 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18981 rack->r_ctl.retran_during_recovery += len; 18982 } 18983 KMOD_TCPSTAT_INC(tcps_sndtotal); 18984 18985 /* 18986 * Data sent (as far as we can tell). If this advertises a larger 18987 * window than any other segment, then remember the size of the 18988 * advertised window. Any pending ACK has now been sent. 18989 */ 18990 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 18991 tp->rcv_adv = tp->rcv_nxt + recwin; 18992 18993 tp->last_ack_sent = tp->rcv_nxt; 18994 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18995 enobufs: 18996 if (sendalot) { 18997 /* Do we need to turn off sendalot? */ 18998 if (rack->r_ctl.rc_pace_max_segs && 18999 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19000 /* We hit our max. */ 19001 sendalot = 0; 19002 } else if ((rack->rc_user_set_max_segs) && 19003 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19004 /* We hit the user defined max */ 19005 sendalot = 0; 19006 } 19007 } 19008 if ((error == 0) && (flags & TH_FIN)) 19009 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19010 if (flags & TH_RST) { 19011 /* 19012 * We don't send again after sending a RST. 19013 */ 19014 slot = 0; 19015 sendalot = 0; 19016 if (error == 0) 19017 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19018 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19019 /* 19020 * Get our pacing rate, if an error 19021 * occurred in sending (ENOBUF) we would 19022 * hit the else if with slot preset. Other 19023 * errors return. 19024 */ 19025 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19026 } 19027 if (rsm && 19028 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19029 rack->use_rack_rr) { 19030 /* Its a retransmit and we use the rack cheat? */ 19031 if ((slot == 0) || 19032 (rack->rc_always_pace == 0) || 19033 (rack->r_rr_config == 1)) { 19034 /* 19035 * We have no pacing set or we 19036 * are using old-style rack or 19037 * we are overriden to use the old 1ms pacing. 19038 */ 19039 slot = rack->r_ctl.rc_min_to; 19040 } 19041 } 19042 /* We have sent clear the flag */ 19043 rack->r_ent_rec_ns = 0; 19044 if (rack->r_must_retran) { 19045 if (rsm) { 19046 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19047 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19048 /* 19049 * We have retransmitted all. 19050 */ 19051 rack->r_must_retran = 0; 19052 rack->r_ctl.rc_out_at_rto = 0; 19053 } 19054 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19055 /* 19056 * Sending new data will also kill 19057 * the loop. 19058 */ 19059 rack->r_must_retran = 0; 19060 rack->r_ctl.rc_out_at_rto = 0; 19061 } 19062 } 19063 rack->r_ctl.fsb.recwin = recwin; 19064 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19065 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19066 /* 19067 * We hit an RTO and now have past snd_max at the RTO 19068 * clear all the WAS flags. 19069 */ 19070 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19071 } 19072 if (slot) { 19073 /* set the rack tcb into the slot N */ 19074 counter_u64_add(rack_paced_segments, 1); 19075 if ((error == 0) && 19076 rack_use_rfo && 19077 ((flags & (TH_SYN|TH_FIN)) == 0) && 19078 (rsm == NULL) && 19079 (tp->snd_nxt == tp->snd_max) && 19080 (ipoptlen == 0) && 19081 (tp->rcv_numsacks == 0) && 19082 rack->r_fsb_inited && 19083 TCPS_HAVEESTABLISHED(tp->t_state) && 19084 (rack->r_must_retran == 0) && 19085 ((tp->t_flags & TF_NEEDFIN) == 0) && 19086 (len > 0) && (orig_len > 0) && 19087 (orig_len > len) && 19088 ((orig_len - len) >= segsiz) && 19089 ((optlen == 0) || 19090 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19091 /* We can send at least one more MSS using our fsb */ 19092 19093 rack->r_fast_output = 1; 19094 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19095 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19096 rack->r_ctl.fsb.tcp_flags = flags; 19097 rack->r_ctl.fsb.left_to_send = orig_len - len; 19098 if (hw_tls) 19099 rack->r_ctl.fsb.hw_tls = 1; 19100 else 19101 rack->r_ctl.fsb.hw_tls = 0; 19102 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19103 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19104 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19105 (tp->snd_max - tp->snd_una))); 19106 if (rack->r_ctl.fsb.left_to_send < segsiz) 19107 rack->r_fast_output = 0; 19108 else { 19109 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19110 rack->r_ctl.fsb.rfo_apply_push = 1; 19111 else 19112 rack->r_ctl.fsb.rfo_apply_push = 0; 19113 } 19114 } else 19115 rack->r_fast_output = 0; 19116 rack_log_fsb(rack, tp, so, flags, 19117 ipoptlen, orig_len, len, error, 19118 (rsm == NULL), optlen, __LINE__, 2); 19119 } else if (sendalot) { 19120 int ret; 19121 19122 if (len) 19123 counter_u64_add(rack_unpaced_segments, 1); 19124 sack_rxmit = 0; 19125 if ((error == 0) && 19126 rack_use_rfo && 19127 ((flags & (TH_SYN|TH_FIN)) == 0) && 19128 (rsm == NULL) && 19129 (ipoptlen == 0) && 19130 (tp->rcv_numsacks == 0) && 19131 (tp->snd_nxt == tp->snd_max) && 19132 (rack->r_must_retran == 0) && 19133 rack->r_fsb_inited && 19134 TCPS_HAVEESTABLISHED(tp->t_state) && 19135 ((tp->t_flags & TF_NEEDFIN) == 0) && 19136 (len > 0) && (orig_len > 0) && 19137 (orig_len > len) && 19138 ((orig_len - len) >= segsiz) && 19139 ((optlen == 0) || 19140 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19141 /* we can use fast_output for more */ 19142 19143 rack->r_fast_output = 1; 19144 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19145 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19146 rack->r_ctl.fsb.tcp_flags = flags; 19147 rack->r_ctl.fsb.left_to_send = orig_len - len; 19148 if (hw_tls) 19149 rack->r_ctl.fsb.hw_tls = 1; 19150 else 19151 rack->r_ctl.fsb.hw_tls = 0; 19152 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19153 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19154 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19155 (tp->snd_max - tp->snd_una))); 19156 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19157 rack->r_fast_output = 0; 19158 } 19159 if (rack->r_fast_output) { 19160 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19161 rack->r_ctl.fsb.rfo_apply_push = 1; 19162 else 19163 rack->r_ctl.fsb.rfo_apply_push = 0; 19164 rack_log_fsb(rack, tp, so, flags, 19165 ipoptlen, orig_len, len, error, 19166 (rsm == NULL), optlen, __LINE__, 3); 19167 error = 0; 19168 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19169 if (ret >= 0) 19170 return (ret); 19171 else if (error) 19172 goto nomore; 19173 19174 } 19175 } 19176 goto again; 19177 } else if (len) { 19178 counter_u64_add(rack_unpaced_segments, 1); 19179 } 19180 /* Assure when we leave that snd_nxt will point to top */ 19181 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19182 tp->snd_nxt = tp->snd_max; 19183 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19184 #ifdef TCP_ACCOUNTING 19185 crtsc = get_cyclecount() - ts_val; 19186 if (tot_len_this_send) { 19187 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19188 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19189 } 19190 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19191 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19192 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19193 } 19194 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19195 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19196 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19197 } 19198 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19199 } else { 19200 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19201 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19202 } 19203 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19204 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19205 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19206 } 19207 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19208 } 19209 sched_unpin(); 19210 #endif 19211 if (error == ENOBUFS) 19212 error = 0; 19213 return (error); 19214 } 19215 19216 static void 19217 rack_update_seg(struct tcp_rack *rack) 19218 { 19219 uint32_t orig_val; 19220 19221 orig_val = rack->r_ctl.rc_pace_max_segs; 19222 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19223 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19224 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19225 } 19226 19227 static void 19228 rack_mtu_change(struct tcpcb *tp) 19229 { 19230 /* 19231 * The MSS may have changed 19232 */ 19233 struct tcp_rack *rack; 19234 19235 rack = (struct tcp_rack *)tp->t_fb_ptr; 19236 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19237 /* 19238 * The MTU has changed we need to resend everything 19239 * since all we have sent is lost. We first fix 19240 * up the mtu though. 19241 */ 19242 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19243 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19244 rack_remxt_tmr(tp); 19245 rack->r_fast_output = 0; 19246 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19247 rack->r_ctl.rc_sacked); 19248 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19249 rack->r_must_retran = 1; 19250 19251 } 19252 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19253 /* We don't use snd_nxt to retransmit */ 19254 tp->snd_nxt = tp->snd_max; 19255 } 19256 19257 static int 19258 rack_set_profile(struct tcp_rack *rack, int prof) 19259 { 19260 int err = EINVAL; 19261 if (prof == 1) { 19262 /* pace_always=1 */ 19263 if (rack->rc_always_pace == 0) { 19264 if (tcp_can_enable_pacing() == 0) 19265 return (EBUSY); 19266 } 19267 rack->rc_always_pace = 1; 19268 if (rack->use_fixed_rate || rack->gp_ready) 19269 rack_set_cc_pacing(rack); 19270 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19271 rack->rack_attempt_hdwr_pace = 0; 19272 /* cmpack=1 */ 19273 if (rack_use_cmp_acks) 19274 rack->r_use_cmp_ack = 1; 19275 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19276 rack->r_use_cmp_ack) 19277 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19278 /* scwnd=1 */ 19279 rack->rack_enable_scwnd = 1; 19280 /* dynamic=100 */ 19281 rack->rc_gp_dyn_mul = 1; 19282 /* gp_inc_ca */ 19283 rack->r_ctl.rack_per_of_gp_ca = 100; 19284 /* rrr_conf=3 */ 19285 rack->r_rr_config = 3; 19286 /* npush=2 */ 19287 rack->r_ctl.rc_no_push_at_mrtt = 2; 19288 /* fillcw=1 */ 19289 rack->rc_pace_to_cwnd = 1; 19290 rack->rc_pace_fill_if_rttin_range = 0; 19291 rack->rtt_limit_mul = 0; 19292 /* noprr=1 */ 19293 rack->rack_no_prr = 1; 19294 /* lscwnd=1 */ 19295 rack->r_limit_scw = 1; 19296 /* gp_inc_rec */ 19297 rack->r_ctl.rack_per_of_gp_rec = 90; 19298 err = 0; 19299 19300 } else if (prof == 3) { 19301 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19302 /* pace_always=1 */ 19303 if (rack->rc_always_pace == 0) { 19304 if (tcp_can_enable_pacing() == 0) 19305 return (EBUSY); 19306 } 19307 rack->rc_always_pace = 1; 19308 if (rack->use_fixed_rate || rack->gp_ready) 19309 rack_set_cc_pacing(rack); 19310 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19311 rack->rack_attempt_hdwr_pace = 0; 19312 /* cmpack=1 */ 19313 if (rack_use_cmp_acks) 19314 rack->r_use_cmp_ack = 1; 19315 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19316 rack->r_use_cmp_ack) 19317 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19318 /* scwnd=1 */ 19319 rack->rack_enable_scwnd = 1; 19320 /* dynamic=100 */ 19321 rack->rc_gp_dyn_mul = 1; 19322 /* gp_inc_ca */ 19323 rack->r_ctl.rack_per_of_gp_ca = 100; 19324 /* rrr_conf=3 */ 19325 rack->r_rr_config = 3; 19326 /* npush=2 */ 19327 rack->r_ctl.rc_no_push_at_mrtt = 2; 19328 /* fillcw=2 */ 19329 rack->rc_pace_to_cwnd = 1; 19330 rack->r_fill_less_agg = 1; 19331 rack->rc_pace_fill_if_rttin_range = 0; 19332 rack->rtt_limit_mul = 0; 19333 /* noprr=1 */ 19334 rack->rack_no_prr = 1; 19335 /* lscwnd=1 */ 19336 rack->r_limit_scw = 1; 19337 /* gp_inc_rec */ 19338 rack->r_ctl.rack_per_of_gp_rec = 90; 19339 err = 0; 19340 19341 19342 } else if (prof == 2) { 19343 /* cmpack=1 */ 19344 if (rack->rc_always_pace == 0) { 19345 if (tcp_can_enable_pacing() == 0) 19346 return (EBUSY); 19347 } 19348 rack->rc_always_pace = 1; 19349 if (rack->use_fixed_rate || rack->gp_ready) 19350 rack_set_cc_pacing(rack); 19351 rack->r_use_cmp_ack = 1; 19352 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19353 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19354 /* pace_always=1 */ 19355 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19356 /* scwnd=1 */ 19357 rack->rack_enable_scwnd = 1; 19358 /* dynamic=100 */ 19359 rack->rc_gp_dyn_mul = 1; 19360 rack->r_ctl.rack_per_of_gp_ca = 100; 19361 /* rrr_conf=3 */ 19362 rack->r_rr_config = 3; 19363 /* npush=2 */ 19364 rack->r_ctl.rc_no_push_at_mrtt = 2; 19365 /* fillcw=1 */ 19366 rack->rc_pace_to_cwnd = 1; 19367 rack->rc_pace_fill_if_rttin_range = 0; 19368 rack->rtt_limit_mul = 0; 19369 /* noprr=1 */ 19370 rack->rack_no_prr = 1; 19371 /* lscwnd=0 */ 19372 rack->r_limit_scw = 0; 19373 err = 0; 19374 } else if (prof == 0) { 19375 /* This changes things back to the default settings */ 19376 err = 0; 19377 if (rack->rc_always_pace) { 19378 tcp_decrement_paced_conn(); 19379 rack_undo_cc_pacing(rack); 19380 rack->rc_always_pace = 0; 19381 } 19382 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19383 rack->rc_always_pace = 1; 19384 if (rack->use_fixed_rate || rack->gp_ready) 19385 rack_set_cc_pacing(rack); 19386 } else 19387 rack->rc_always_pace = 0; 19388 if (rack_dsack_std_based & 0x1) { 19389 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19390 rack->rc_rack_tmr_std_based = 1; 19391 } 19392 if (rack_dsack_std_based & 0x2) { 19393 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19394 rack->rc_rack_use_dsack = 1; 19395 } 19396 if (rack_use_cmp_acks) 19397 rack->r_use_cmp_ack = 1; 19398 else 19399 rack->r_use_cmp_ack = 0; 19400 if (rack_disable_prr) 19401 rack->rack_no_prr = 1; 19402 else 19403 rack->rack_no_prr = 0; 19404 if (rack_gp_no_rec_chg) 19405 rack->rc_gp_no_rec_chg = 1; 19406 else 19407 rack->rc_gp_no_rec_chg = 0; 19408 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19409 rack->r_mbuf_queue = 1; 19410 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19411 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19412 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19413 } else { 19414 rack->r_mbuf_queue = 0; 19415 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19416 } 19417 if (rack_enable_shared_cwnd) 19418 rack->rack_enable_scwnd = 1; 19419 else 19420 rack->rack_enable_scwnd = 0; 19421 if (rack_do_dyn_mul) { 19422 /* When dynamic adjustment is on CA needs to start at 100% */ 19423 rack->rc_gp_dyn_mul = 1; 19424 if (rack_do_dyn_mul >= 100) 19425 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19426 } else { 19427 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19428 rack->rc_gp_dyn_mul = 0; 19429 } 19430 rack->r_rr_config = 0; 19431 rack->r_ctl.rc_no_push_at_mrtt = 0; 19432 rack->rc_pace_to_cwnd = 0; 19433 rack->rc_pace_fill_if_rttin_range = 0; 19434 rack->rtt_limit_mul = 0; 19435 19436 if (rack_enable_hw_pacing) 19437 rack->rack_hdw_pace_ena = 1; 19438 else 19439 rack->rack_hdw_pace_ena = 0; 19440 if (rack_disable_prr) 19441 rack->rack_no_prr = 1; 19442 else 19443 rack->rack_no_prr = 0; 19444 if (rack_limits_scwnd) 19445 rack->r_limit_scw = 1; 19446 else 19447 rack->r_limit_scw = 0; 19448 err = 0; 19449 } 19450 return (err); 19451 } 19452 19453 static int 19454 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19455 { 19456 struct deferred_opt_list *dol; 19457 19458 dol = malloc(sizeof(struct deferred_opt_list), 19459 M_TCPFSB, M_NOWAIT|M_ZERO); 19460 if (dol == NULL) { 19461 /* 19462 * No space yikes -- fail out.. 19463 */ 19464 return (0); 19465 } 19466 dol->optname = sopt_name; 19467 dol->optval = loptval; 19468 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19469 return (1); 19470 } 19471 19472 static int 19473 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19474 uint32_t optval, uint64_t loptval) 19475 { 19476 struct epoch_tracker et; 19477 struct sockopt sopt; 19478 struct cc_newreno_opts opt; 19479 uint64_t val; 19480 int error = 0; 19481 uint16_t ca, ss; 19482 19483 switch (sopt_name) { 19484 19485 case TCP_RACK_DSACK_OPT: 19486 RACK_OPTS_INC(tcp_rack_dsack_opt); 19487 if (optval & 0x1) { 19488 rack->rc_rack_tmr_std_based = 1; 19489 } else { 19490 rack->rc_rack_tmr_std_based = 0; 19491 } 19492 if (optval & 0x2) { 19493 rack->rc_rack_use_dsack = 1; 19494 } else { 19495 rack->rc_rack_use_dsack = 0; 19496 } 19497 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19498 break; 19499 case TCP_RACK_PACING_BETA: 19500 RACK_OPTS_INC(tcp_rack_beta); 19501 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19502 /* This only works for newreno. */ 19503 error = EINVAL; 19504 break; 19505 } 19506 if (rack->rc_pacing_cc_set) { 19507 /* 19508 * Set them into the real CC module 19509 * whats in the rack pcb is the old values 19510 * to be used on restoral/ 19511 */ 19512 sopt.sopt_dir = SOPT_SET; 19513 opt.name = CC_NEWRENO_BETA; 19514 opt.val = optval; 19515 if (CC_ALGO(tp)->ctl_output != NULL) 19516 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19517 else { 19518 error = ENOENT; 19519 break; 19520 } 19521 } else { 19522 /* 19523 * Not pacing yet so set it into our local 19524 * rack pcb storage. 19525 */ 19526 rack->r_ctl.rc_saved_beta.beta = optval; 19527 } 19528 break; 19529 case TCP_RACK_TIMER_SLOP: 19530 RACK_OPTS_INC(tcp_rack_timer_slop); 19531 rack->r_ctl.timer_slop = optval; 19532 if (rack->rc_tp->t_srtt) { 19533 /* 19534 * If we have an SRTT lets update t_rxtcur 19535 * to have the new slop. 19536 */ 19537 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19538 rack_rto_min, rack_rto_max, 19539 rack->r_ctl.timer_slop); 19540 } 19541 break; 19542 case TCP_RACK_PACING_BETA_ECN: 19543 RACK_OPTS_INC(tcp_rack_beta_ecn); 19544 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19545 /* This only works for newreno. */ 19546 error = EINVAL; 19547 break; 19548 } 19549 if (rack->rc_pacing_cc_set) { 19550 /* 19551 * Set them into the real CC module 19552 * whats in the rack pcb is the old values 19553 * to be used on restoral/ 19554 */ 19555 sopt.sopt_dir = SOPT_SET; 19556 opt.name = CC_NEWRENO_BETA_ECN; 19557 opt.val = optval; 19558 if (CC_ALGO(tp)->ctl_output != NULL) 19559 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19560 else 19561 error = ENOENT; 19562 } else { 19563 /* 19564 * Not pacing yet so set it into our local 19565 * rack pcb storage. 19566 */ 19567 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19568 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN; 19569 } 19570 break; 19571 case TCP_DEFER_OPTIONS: 19572 RACK_OPTS_INC(tcp_defer_opt); 19573 if (optval) { 19574 if (rack->gp_ready) { 19575 /* Too late */ 19576 error = EINVAL; 19577 break; 19578 } 19579 rack->defer_options = 1; 19580 } else 19581 rack->defer_options = 0; 19582 break; 19583 case TCP_RACK_MEASURE_CNT: 19584 RACK_OPTS_INC(tcp_rack_measure_cnt); 19585 if (optval && (optval <= 0xff)) { 19586 rack->r_ctl.req_measurements = optval; 19587 } else 19588 error = EINVAL; 19589 break; 19590 case TCP_REC_ABC_VAL: 19591 RACK_OPTS_INC(tcp_rec_abc_val); 19592 if (optval > 0) 19593 rack->r_use_labc_for_rec = 1; 19594 else 19595 rack->r_use_labc_for_rec = 0; 19596 break; 19597 case TCP_RACK_ABC_VAL: 19598 RACK_OPTS_INC(tcp_rack_abc_val); 19599 if ((optval > 0) && (optval < 255)) 19600 rack->rc_labc = optval; 19601 else 19602 error = EINVAL; 19603 break; 19604 case TCP_HDWR_UP_ONLY: 19605 RACK_OPTS_INC(tcp_pacing_up_only); 19606 if (optval) 19607 rack->r_up_only = 1; 19608 else 19609 rack->r_up_only = 0; 19610 break; 19611 case TCP_PACING_RATE_CAP: 19612 RACK_OPTS_INC(tcp_pacing_rate_cap); 19613 rack->r_ctl.bw_rate_cap = loptval; 19614 break; 19615 case TCP_RACK_PROFILE: 19616 RACK_OPTS_INC(tcp_profile); 19617 error = rack_set_profile(rack, optval); 19618 break; 19619 case TCP_USE_CMP_ACKS: 19620 RACK_OPTS_INC(tcp_use_cmp_acks); 19621 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19622 /* You can't turn it off once its on! */ 19623 error = EINVAL; 19624 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19625 rack->r_use_cmp_ack = 1; 19626 rack->r_mbuf_queue = 1; 19627 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19628 } 19629 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19630 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19631 break; 19632 case TCP_SHARED_CWND_TIME_LIMIT: 19633 RACK_OPTS_INC(tcp_lscwnd); 19634 if (optval) 19635 rack->r_limit_scw = 1; 19636 else 19637 rack->r_limit_scw = 0; 19638 break; 19639 case TCP_RACK_PACE_TO_FILL: 19640 RACK_OPTS_INC(tcp_fillcw); 19641 if (optval == 0) 19642 rack->rc_pace_to_cwnd = 0; 19643 else { 19644 rack->rc_pace_to_cwnd = 1; 19645 if (optval > 1) 19646 rack->r_fill_less_agg = 1; 19647 } 19648 if ((optval >= rack_gp_rtt_maxmul) && 19649 rack_gp_rtt_maxmul && 19650 (optval < 0xf)) { 19651 rack->rc_pace_fill_if_rttin_range = 1; 19652 rack->rtt_limit_mul = optval; 19653 } else { 19654 rack->rc_pace_fill_if_rttin_range = 0; 19655 rack->rtt_limit_mul = 0; 19656 } 19657 break; 19658 case TCP_RACK_NO_PUSH_AT_MAX: 19659 RACK_OPTS_INC(tcp_npush); 19660 if (optval == 0) 19661 rack->r_ctl.rc_no_push_at_mrtt = 0; 19662 else if (optval < 0xff) 19663 rack->r_ctl.rc_no_push_at_mrtt = optval; 19664 else 19665 error = EINVAL; 19666 break; 19667 case TCP_SHARED_CWND_ENABLE: 19668 RACK_OPTS_INC(tcp_rack_scwnd); 19669 if (optval == 0) 19670 rack->rack_enable_scwnd = 0; 19671 else 19672 rack->rack_enable_scwnd = 1; 19673 break; 19674 case TCP_RACK_MBUF_QUEUE: 19675 /* Now do we use the LRO mbuf-queue feature */ 19676 RACK_OPTS_INC(tcp_rack_mbufq); 19677 if (optval || rack->r_use_cmp_ack) 19678 rack->r_mbuf_queue = 1; 19679 else 19680 rack->r_mbuf_queue = 0; 19681 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19682 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19683 else 19684 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19685 break; 19686 case TCP_RACK_NONRXT_CFG_RATE: 19687 RACK_OPTS_INC(tcp_rack_cfg_rate); 19688 if (optval == 0) 19689 rack->rack_rec_nonrxt_use_cr = 0; 19690 else 19691 rack->rack_rec_nonrxt_use_cr = 1; 19692 break; 19693 case TCP_NO_PRR: 19694 RACK_OPTS_INC(tcp_rack_noprr); 19695 if (optval == 0) 19696 rack->rack_no_prr = 0; 19697 else if (optval == 1) 19698 rack->rack_no_prr = 1; 19699 else if (optval == 2) 19700 rack->no_prr_addback = 1; 19701 else 19702 error = EINVAL; 19703 break; 19704 case TCP_TIMELY_DYN_ADJ: 19705 RACK_OPTS_INC(tcp_timely_dyn); 19706 if (optval == 0) 19707 rack->rc_gp_dyn_mul = 0; 19708 else { 19709 rack->rc_gp_dyn_mul = 1; 19710 if (optval >= 100) { 19711 /* 19712 * If the user sets something 100 or more 19713 * its the gp_ca value. 19714 */ 19715 rack->r_ctl.rack_per_of_gp_ca = optval; 19716 } 19717 } 19718 break; 19719 case TCP_RACK_DO_DETECTION: 19720 RACK_OPTS_INC(tcp_rack_do_detection); 19721 if (optval == 0) 19722 rack->do_detection = 0; 19723 else 19724 rack->do_detection = 1; 19725 break; 19726 case TCP_RACK_TLP_USE: 19727 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19728 error = EINVAL; 19729 break; 19730 } 19731 RACK_OPTS_INC(tcp_tlp_use); 19732 rack->rack_tlp_threshold_use = optval; 19733 break; 19734 case TCP_RACK_TLP_REDUCE: 19735 /* RACK TLP cwnd reduction (bool) */ 19736 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19737 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19738 break; 19739 /* Pacing related ones */ 19740 case TCP_RACK_PACE_ALWAYS: 19741 /* 19742 * zero is old rack method, 1 is new 19743 * method using a pacing rate. 19744 */ 19745 RACK_OPTS_INC(tcp_rack_pace_always); 19746 if (optval > 0) { 19747 if (rack->rc_always_pace) { 19748 error = EALREADY; 19749 break; 19750 } else if (tcp_can_enable_pacing()) { 19751 rack->rc_always_pace = 1; 19752 if (rack->use_fixed_rate || rack->gp_ready) 19753 rack_set_cc_pacing(rack); 19754 } 19755 else { 19756 error = ENOSPC; 19757 break; 19758 } 19759 } else { 19760 if (rack->rc_always_pace) { 19761 tcp_decrement_paced_conn(); 19762 rack->rc_always_pace = 0; 19763 rack_undo_cc_pacing(rack); 19764 } 19765 } 19766 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19767 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19768 else 19769 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19770 /* A rate may be set irate or other, if so set seg size */ 19771 rack_update_seg(rack); 19772 break; 19773 case TCP_BBR_RACK_INIT_RATE: 19774 RACK_OPTS_INC(tcp_initial_rate); 19775 val = optval; 19776 /* Change from kbits per second to bytes per second */ 19777 val *= 1000; 19778 val /= 8; 19779 rack->r_ctl.init_rate = val; 19780 if (rack->rc_init_win != rack_default_init_window) { 19781 uint32_t win, snt; 19782 19783 /* 19784 * Options don't always get applied 19785 * in the order you think. So in order 19786 * to assure we update a cwnd we need 19787 * to check and see if we are still 19788 * where we should raise the cwnd. 19789 */ 19790 win = rc_init_window(rack); 19791 if (SEQ_GT(tp->snd_max, tp->iss)) 19792 snt = tp->snd_max - tp->iss; 19793 else 19794 snt = 0; 19795 if ((snt < win) && 19796 (tp->snd_cwnd < win)) 19797 tp->snd_cwnd = win; 19798 } 19799 if (rack->rc_always_pace) 19800 rack_update_seg(rack); 19801 break; 19802 case TCP_BBR_IWINTSO: 19803 RACK_OPTS_INC(tcp_initial_win); 19804 if (optval && (optval <= 0xff)) { 19805 uint32_t win, snt; 19806 19807 rack->rc_init_win = optval; 19808 win = rc_init_window(rack); 19809 if (SEQ_GT(tp->snd_max, tp->iss)) 19810 snt = tp->snd_max - tp->iss; 19811 else 19812 snt = 0; 19813 if ((snt < win) && 19814 (tp->t_srtt | 19815 #ifdef NETFLIX_PEAKRATE 19816 tp->t_maxpeakrate | 19817 #endif 19818 rack->r_ctl.init_rate)) { 19819 /* 19820 * We are not past the initial window 19821 * and we have some bases for pacing, 19822 * so we need to possibly adjust up 19823 * the cwnd. Note even if we don't set 19824 * the cwnd, its still ok to raise the rc_init_win 19825 * which can be used coming out of idle when we 19826 * would have a rate. 19827 */ 19828 if (tp->snd_cwnd < win) 19829 tp->snd_cwnd = win; 19830 } 19831 if (rack->rc_always_pace) 19832 rack_update_seg(rack); 19833 } else 19834 error = EINVAL; 19835 break; 19836 case TCP_RACK_FORCE_MSEG: 19837 RACK_OPTS_INC(tcp_rack_force_max_seg); 19838 if (optval) 19839 rack->rc_force_max_seg = 1; 19840 else 19841 rack->rc_force_max_seg = 0; 19842 break; 19843 case TCP_RACK_PACE_MAX_SEG: 19844 /* Max segments size in a pace in bytes */ 19845 RACK_OPTS_INC(tcp_rack_max_seg); 19846 rack->rc_user_set_max_segs = optval; 19847 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19848 break; 19849 case TCP_RACK_PACE_RATE_REC: 19850 /* Set the fixed pacing rate in Bytes per second ca */ 19851 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19852 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19853 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19854 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19855 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19856 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19857 rack->use_fixed_rate = 1; 19858 if (rack->rc_always_pace) 19859 rack_set_cc_pacing(rack); 19860 rack_log_pacing_delay_calc(rack, 19861 rack->r_ctl.rc_fixed_pacing_rate_ss, 19862 rack->r_ctl.rc_fixed_pacing_rate_ca, 19863 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19864 __LINE__, NULL,0); 19865 break; 19866 19867 case TCP_RACK_PACE_RATE_SS: 19868 /* Set the fixed pacing rate in Bytes per second ca */ 19869 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19870 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19871 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19872 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19873 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19874 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19875 rack->use_fixed_rate = 1; 19876 if (rack->rc_always_pace) 19877 rack_set_cc_pacing(rack); 19878 rack_log_pacing_delay_calc(rack, 19879 rack->r_ctl.rc_fixed_pacing_rate_ss, 19880 rack->r_ctl.rc_fixed_pacing_rate_ca, 19881 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19882 __LINE__, NULL, 0); 19883 break; 19884 19885 case TCP_RACK_PACE_RATE_CA: 19886 /* Set the fixed pacing rate in Bytes per second ca */ 19887 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19888 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19889 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19890 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19891 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19892 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19893 rack->use_fixed_rate = 1; 19894 if (rack->rc_always_pace) 19895 rack_set_cc_pacing(rack); 19896 rack_log_pacing_delay_calc(rack, 19897 rack->r_ctl.rc_fixed_pacing_rate_ss, 19898 rack->r_ctl.rc_fixed_pacing_rate_ca, 19899 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19900 __LINE__, NULL, 0); 19901 break; 19902 case TCP_RACK_GP_INCREASE_REC: 19903 RACK_OPTS_INC(tcp_gp_inc_rec); 19904 rack->r_ctl.rack_per_of_gp_rec = optval; 19905 rack_log_pacing_delay_calc(rack, 19906 rack->r_ctl.rack_per_of_gp_ss, 19907 rack->r_ctl.rack_per_of_gp_ca, 19908 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19909 __LINE__, NULL, 0); 19910 break; 19911 case TCP_RACK_GP_INCREASE_CA: 19912 RACK_OPTS_INC(tcp_gp_inc_ca); 19913 ca = optval; 19914 if (ca < 100) { 19915 /* 19916 * We don't allow any reduction 19917 * over the GP b/w. 19918 */ 19919 error = EINVAL; 19920 break; 19921 } 19922 rack->r_ctl.rack_per_of_gp_ca = ca; 19923 rack_log_pacing_delay_calc(rack, 19924 rack->r_ctl.rack_per_of_gp_ss, 19925 rack->r_ctl.rack_per_of_gp_ca, 19926 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19927 __LINE__, NULL, 0); 19928 break; 19929 case TCP_RACK_GP_INCREASE_SS: 19930 RACK_OPTS_INC(tcp_gp_inc_ss); 19931 ss = optval; 19932 if (ss < 100) { 19933 /* 19934 * We don't allow any reduction 19935 * over the GP b/w. 19936 */ 19937 error = EINVAL; 19938 break; 19939 } 19940 rack->r_ctl.rack_per_of_gp_ss = ss; 19941 rack_log_pacing_delay_calc(rack, 19942 rack->r_ctl.rack_per_of_gp_ss, 19943 rack->r_ctl.rack_per_of_gp_ca, 19944 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19945 __LINE__, NULL, 0); 19946 break; 19947 case TCP_RACK_RR_CONF: 19948 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 19949 if (optval && optval <= 3) 19950 rack->r_rr_config = optval; 19951 else 19952 rack->r_rr_config = 0; 19953 break; 19954 case TCP_HDWR_RATE_CAP: 19955 RACK_OPTS_INC(tcp_hdwr_rate_cap); 19956 if (optval) { 19957 if (rack->r_rack_hw_rate_caps == 0) 19958 rack->r_rack_hw_rate_caps = 1; 19959 else 19960 error = EALREADY; 19961 } else { 19962 rack->r_rack_hw_rate_caps = 0; 19963 } 19964 break; 19965 case TCP_BBR_HDWR_PACE: 19966 RACK_OPTS_INC(tcp_hdwr_pacing); 19967 if (optval){ 19968 if (rack->rack_hdrw_pacing == 0) { 19969 rack->rack_hdw_pace_ena = 1; 19970 rack->rack_attempt_hdwr_pace = 0; 19971 } else 19972 error = EALREADY; 19973 } else { 19974 rack->rack_hdw_pace_ena = 0; 19975 #ifdef RATELIMIT 19976 if (rack->r_ctl.crte != NULL) { 19977 rack->rack_hdrw_pacing = 0; 19978 rack->rack_attempt_hdwr_pace = 0; 19979 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 19980 rack->r_ctl.crte = NULL; 19981 } 19982 #endif 19983 } 19984 break; 19985 /* End Pacing related ones */ 19986 case TCP_RACK_PRR_SENDALOT: 19987 /* Allow PRR to send more than one seg */ 19988 RACK_OPTS_INC(tcp_rack_prr_sendalot); 19989 rack->r_ctl.rc_prr_sendalot = optval; 19990 break; 19991 case TCP_RACK_MIN_TO: 19992 /* Minimum time between rack t-o's in ms */ 19993 RACK_OPTS_INC(tcp_rack_min_to); 19994 rack->r_ctl.rc_min_to = optval; 19995 break; 19996 case TCP_RACK_EARLY_SEG: 19997 /* If early recovery max segments */ 19998 RACK_OPTS_INC(tcp_rack_early_seg); 19999 rack->r_ctl.rc_early_recovery_segs = optval; 20000 break; 20001 case TCP_RACK_REORD_THRESH: 20002 /* RACK reorder threshold (shift amount) */ 20003 RACK_OPTS_INC(tcp_rack_reord_thresh); 20004 if ((optval > 0) && (optval < 31)) 20005 rack->r_ctl.rc_reorder_shift = optval; 20006 else 20007 error = EINVAL; 20008 break; 20009 case TCP_RACK_REORD_FADE: 20010 /* Does reordering fade after ms time */ 20011 RACK_OPTS_INC(tcp_rack_reord_fade); 20012 rack->r_ctl.rc_reorder_fade = optval; 20013 break; 20014 case TCP_RACK_TLP_THRESH: 20015 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20016 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20017 if (optval) 20018 rack->r_ctl.rc_tlp_threshold = optval; 20019 else 20020 error = EINVAL; 20021 break; 20022 case TCP_BBR_USE_RACK_RR: 20023 RACK_OPTS_INC(tcp_rack_rr); 20024 if (optval) 20025 rack->use_rack_rr = 1; 20026 else 20027 rack->use_rack_rr = 0; 20028 break; 20029 case TCP_FAST_RSM_HACK: 20030 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20031 if (optval) 20032 rack->fast_rsm_hack = 1; 20033 else 20034 rack->fast_rsm_hack = 0; 20035 break; 20036 case TCP_RACK_PKT_DELAY: 20037 /* RACK added ms i.e. rack-rtt + reord + N */ 20038 RACK_OPTS_INC(tcp_rack_pkt_delay); 20039 rack->r_ctl.rc_pkt_delay = optval; 20040 break; 20041 case TCP_DELACK: 20042 RACK_OPTS_INC(tcp_rack_delayed_ack); 20043 if (optval == 0) 20044 tp->t_delayed_ack = 0; 20045 else 20046 tp->t_delayed_ack = 1; 20047 if (tp->t_flags & TF_DELACK) { 20048 tp->t_flags &= ~TF_DELACK; 20049 tp->t_flags |= TF_ACKNOW; 20050 NET_EPOCH_ENTER(et); 20051 rack_output(tp); 20052 NET_EPOCH_EXIT(et); 20053 } 20054 break; 20055 20056 case TCP_BBR_RACK_RTT_USE: 20057 RACK_OPTS_INC(tcp_rack_rtt_use); 20058 if ((optval != USE_RTT_HIGH) && 20059 (optval != USE_RTT_LOW) && 20060 (optval != USE_RTT_AVG)) 20061 error = EINVAL; 20062 else 20063 rack->r_ctl.rc_rate_sample_method = optval; 20064 break; 20065 case TCP_DATA_AFTER_CLOSE: 20066 RACK_OPTS_INC(tcp_data_after_close); 20067 if (optval) 20068 rack->rc_allow_data_af_clo = 1; 20069 else 20070 rack->rc_allow_data_af_clo = 0; 20071 break; 20072 default: 20073 break; 20074 } 20075 #ifdef NETFLIX_STATS 20076 tcp_log_socket_option(tp, sopt_name, optval, error); 20077 #endif 20078 return (error); 20079 } 20080 20081 20082 static void 20083 rack_apply_deferred_options(struct tcp_rack *rack) 20084 { 20085 struct deferred_opt_list *dol, *sdol; 20086 uint32_t s_optval; 20087 20088 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20089 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20090 /* Disadvantage of deferal is you loose the error return */ 20091 s_optval = (uint32_t)dol->optval; 20092 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20093 free(dol, M_TCPDO); 20094 } 20095 } 20096 20097 static void 20098 rack_hw_tls_change(struct tcpcb *tp, int chg) 20099 { 20100 /* 20101 * HW tls state has changed.. fix all 20102 * rsm's in flight. 20103 */ 20104 struct tcp_rack *rack; 20105 struct rack_sendmap *rsm; 20106 20107 rack = (struct tcp_rack *)tp->t_fb_ptr; 20108 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20109 if (chg) 20110 rsm->r_hw_tls = 1; 20111 else 20112 rsm->r_hw_tls = 0; 20113 } 20114 if (chg) 20115 rack->r_ctl.fsb.hw_tls = 1; 20116 else 20117 rack->r_ctl.fsb.hw_tls = 0; 20118 } 20119 20120 static int 20121 rack_pru_options(struct tcpcb *tp, int flags) 20122 { 20123 if (flags & PRUS_OOB) 20124 return (EOPNOTSUPP); 20125 return (0); 20126 } 20127 20128 static struct tcp_function_block __tcp_rack = { 20129 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20130 .tfb_tcp_output = rack_output, 20131 .tfb_do_queued_segments = ctf_do_queued_segments, 20132 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20133 .tfb_tcp_do_segment = rack_do_segment, 20134 .tfb_tcp_ctloutput = rack_ctloutput, 20135 .tfb_tcp_fb_init = rack_init, 20136 .tfb_tcp_fb_fini = rack_fini, 20137 .tfb_tcp_timer_stop_all = rack_stopall, 20138 .tfb_tcp_timer_activate = rack_timer_activate, 20139 .tfb_tcp_timer_active = rack_timer_active, 20140 .tfb_tcp_timer_stop = rack_timer_stop, 20141 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20142 .tfb_tcp_handoff_ok = rack_handoff_ok, 20143 .tfb_tcp_mtu_chg = rack_mtu_change, 20144 .tfb_pru_options = rack_pru_options, 20145 .tfb_hwtls_change = rack_hw_tls_change, 20146 }; 20147 20148 /* 20149 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20150 * socket option arguments. When it re-acquires the lock after the copy, it 20151 * has to revalidate that the connection is still valid for the socket 20152 * option. 20153 */ 20154 static int 20155 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 20156 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 20157 { 20158 uint64_t loptval; 20159 int32_t error = 0, optval; 20160 20161 switch (sopt->sopt_name) { 20162 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20163 /* Pacing related ones */ 20164 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20165 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20166 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20167 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20168 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20169 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20170 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20171 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20172 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20173 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20174 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20175 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20176 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20177 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20178 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20179 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20180 /* End pacing related */ 20181 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20182 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20183 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20184 case TCP_RACK_MIN_TO: /* URL:min_to */ 20185 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20186 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20187 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20188 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20189 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20190 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20191 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20192 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20193 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20194 case TCP_NO_PRR: /* URL:noprr */ 20195 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20196 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20197 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20198 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20199 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20200 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20201 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20202 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20203 case TCP_RACK_PROFILE: /* URL:profile */ 20204 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20205 case TCP_RACK_ABC_VAL: /* URL:labc */ 20206 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20207 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20208 case TCP_DEFER_OPTIONS: /* URL:defer */ 20209 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20210 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20211 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20212 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20213 break; 20214 default: 20215 /* Filter off all unknown options to the base stack */ 20216 return (tcp_default_ctloutput(so, sopt, inp, tp)); 20217 break; 20218 } 20219 INP_WUNLOCK(inp); 20220 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20221 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20222 /* 20223 * We truncate it down to 32 bits for the socket-option trace this 20224 * means rates > 34Gbps won't show right, but thats probably ok. 20225 */ 20226 optval = (uint32_t)loptval; 20227 } else { 20228 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20229 /* Save it in 64 bit form too */ 20230 loptval = optval; 20231 } 20232 if (error) 20233 return (error); 20234 INP_WLOCK(inp); 20235 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20236 INP_WUNLOCK(inp); 20237 return (ECONNRESET); 20238 } 20239 if (tp->t_fb != &__tcp_rack) { 20240 INP_WUNLOCK(inp); 20241 return (ENOPROTOOPT); 20242 } 20243 if (rack->defer_options && (rack->gp_ready == 0) && 20244 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20245 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20246 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20247 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20248 /* Options are beind deferred */ 20249 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20250 INP_WUNLOCK(inp); 20251 return (0); 20252 } else { 20253 /* No memory to defer, fail */ 20254 INP_WUNLOCK(inp); 20255 return (ENOMEM); 20256 } 20257 } 20258 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20259 INP_WUNLOCK(inp); 20260 return (error); 20261 } 20262 20263 static void 20264 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20265 { 20266 20267 INP_WLOCK_ASSERT(tp->t_inpcb); 20268 bzero(ti, sizeof(*ti)); 20269 20270 ti->tcpi_state = tp->t_state; 20271 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20272 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20273 if (tp->t_flags & TF_SACK_PERMIT) 20274 ti->tcpi_options |= TCPI_OPT_SACK; 20275 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20276 ti->tcpi_options |= TCPI_OPT_WSCALE; 20277 ti->tcpi_snd_wscale = tp->snd_scale; 20278 ti->tcpi_rcv_wscale = tp->rcv_scale; 20279 } 20280 if (tp->t_flags2 & TF2_ECN_PERMIT) 20281 ti->tcpi_options |= TCPI_OPT_ECN; 20282 if (tp->t_flags & TF_FASTOPEN) 20283 ti->tcpi_options |= TCPI_OPT_TFO; 20284 /* still kept in ticks is t_rcvtime */ 20285 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20286 /* Since we hold everything in precise useconds this is easy */ 20287 ti->tcpi_rtt = tp->t_srtt; 20288 ti->tcpi_rttvar = tp->t_rttvar; 20289 ti->tcpi_rto = tp->t_rxtcur; 20290 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20291 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20292 /* 20293 * FreeBSD-specific extension fields for tcp_info. 20294 */ 20295 ti->tcpi_rcv_space = tp->rcv_wnd; 20296 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20297 ti->tcpi_snd_wnd = tp->snd_wnd; 20298 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20299 ti->tcpi_snd_nxt = tp->snd_nxt; 20300 ti->tcpi_snd_mss = tp->t_maxseg; 20301 ti->tcpi_rcv_mss = tp->t_maxseg; 20302 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20303 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20304 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20305 #ifdef NETFLIX_STATS 20306 ti->tcpi_total_tlp = tp->t_sndtlppack; 20307 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20308 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20309 #endif 20310 #ifdef TCP_OFFLOAD 20311 if (tp->t_flags & TF_TOE) { 20312 ti->tcpi_options |= TCPI_OPT_TOE; 20313 tcp_offload_tcp_info(tp, ti); 20314 } 20315 #endif 20316 } 20317 20318 static int 20319 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 20320 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 20321 { 20322 int32_t error, optval; 20323 uint64_t val, loptval; 20324 struct tcp_info ti; 20325 /* 20326 * Because all our options are either boolean or an int, we can just 20327 * pull everything into optval and then unlock and copy. If we ever 20328 * add a option that is not a int, then this will have quite an 20329 * impact to this routine. 20330 */ 20331 error = 0; 20332 switch (sopt->sopt_name) { 20333 case TCP_INFO: 20334 /* First get the info filled */ 20335 rack_fill_info(tp, &ti); 20336 /* Fix up the rtt related fields if needed */ 20337 INP_WUNLOCK(inp); 20338 error = sooptcopyout(sopt, &ti, sizeof ti); 20339 return (error); 20340 /* 20341 * Beta is the congestion control value for NewReno that influences how 20342 * much of a backoff happens when loss is detected. It is normally set 20343 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20344 * when you exit recovery. 20345 */ 20346 case TCP_RACK_PACING_BETA: 20347 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20348 error = EINVAL; 20349 else if (rack->rc_pacing_cc_set == 0) 20350 optval = rack->r_ctl.rc_saved_beta.beta; 20351 else { 20352 /* 20353 * Reach out into the CC data and report back what 20354 * I have previously set. Yeah it looks hackish but 20355 * we don't want to report the saved values. 20356 */ 20357 if (tp->ccv->cc_data) 20358 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20359 else 20360 error = EINVAL; 20361 } 20362 break; 20363 /* 20364 * Beta_ecn is the congestion control value for NewReno that influences how 20365 * much of a backoff happens when a ECN mark is detected. It is normally set 20366 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20367 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20368 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20369 */ 20370 20371 case TCP_RACK_PACING_BETA_ECN: 20372 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20373 error = EINVAL; 20374 else if (rack->rc_pacing_cc_set == 0) 20375 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20376 else { 20377 /* 20378 * Reach out into the CC data and report back what 20379 * I have previously set. Yeah it looks hackish but 20380 * we don't want to report the saved values. 20381 */ 20382 if (tp->ccv->cc_data) 20383 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20384 else 20385 error = EINVAL; 20386 } 20387 break; 20388 case TCP_RACK_DSACK_OPT: 20389 optval = 0; 20390 if (rack->rc_rack_tmr_std_based) { 20391 optval |= 1; 20392 } 20393 if (rack->rc_rack_use_dsack) { 20394 optval |= 2; 20395 } 20396 break; 20397 case TCP_FAST_RSM_HACK: 20398 optval = rack->fast_rsm_hack; 20399 break; 20400 case TCP_DEFER_OPTIONS: 20401 optval = rack->defer_options; 20402 break; 20403 case TCP_RACK_MEASURE_CNT: 20404 optval = rack->r_ctl.req_measurements; 20405 break; 20406 case TCP_REC_ABC_VAL: 20407 optval = rack->r_use_labc_for_rec; 20408 break; 20409 case TCP_RACK_ABC_VAL: 20410 optval = rack->rc_labc; 20411 break; 20412 case TCP_HDWR_UP_ONLY: 20413 optval= rack->r_up_only; 20414 break; 20415 case TCP_PACING_RATE_CAP: 20416 loptval = rack->r_ctl.bw_rate_cap; 20417 break; 20418 case TCP_RACK_PROFILE: 20419 /* You cannot retrieve a profile, its write only */ 20420 error = EINVAL; 20421 break; 20422 case TCP_USE_CMP_ACKS: 20423 optval = rack->r_use_cmp_ack; 20424 break; 20425 case TCP_RACK_PACE_TO_FILL: 20426 optval = rack->rc_pace_to_cwnd; 20427 if (optval && rack->r_fill_less_agg) 20428 optval++; 20429 break; 20430 case TCP_RACK_NO_PUSH_AT_MAX: 20431 optval = rack->r_ctl.rc_no_push_at_mrtt; 20432 break; 20433 case TCP_SHARED_CWND_ENABLE: 20434 optval = rack->rack_enable_scwnd; 20435 break; 20436 case TCP_RACK_NONRXT_CFG_RATE: 20437 optval = rack->rack_rec_nonrxt_use_cr; 20438 break; 20439 case TCP_NO_PRR: 20440 if (rack->rack_no_prr == 1) 20441 optval = 1; 20442 else if (rack->no_prr_addback == 1) 20443 optval = 2; 20444 else 20445 optval = 0; 20446 break; 20447 case TCP_RACK_DO_DETECTION: 20448 optval = rack->do_detection; 20449 break; 20450 case TCP_RACK_MBUF_QUEUE: 20451 /* Now do we use the LRO mbuf-queue feature */ 20452 optval = rack->r_mbuf_queue; 20453 break; 20454 case TCP_TIMELY_DYN_ADJ: 20455 optval = rack->rc_gp_dyn_mul; 20456 break; 20457 case TCP_BBR_IWINTSO: 20458 optval = rack->rc_init_win; 20459 break; 20460 case TCP_RACK_TLP_REDUCE: 20461 /* RACK TLP cwnd reduction (bool) */ 20462 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20463 break; 20464 case TCP_BBR_RACK_INIT_RATE: 20465 val = rack->r_ctl.init_rate; 20466 /* convert to kbits per sec */ 20467 val *= 8; 20468 val /= 1000; 20469 optval = (uint32_t)val; 20470 break; 20471 case TCP_RACK_FORCE_MSEG: 20472 optval = rack->rc_force_max_seg; 20473 break; 20474 case TCP_RACK_PACE_MAX_SEG: 20475 /* Max segments in a pace */ 20476 optval = rack->rc_user_set_max_segs; 20477 break; 20478 case TCP_RACK_PACE_ALWAYS: 20479 /* Use the always pace method */ 20480 optval = rack->rc_always_pace; 20481 break; 20482 case TCP_RACK_PRR_SENDALOT: 20483 /* Allow PRR to send more than one seg */ 20484 optval = rack->r_ctl.rc_prr_sendalot; 20485 break; 20486 case TCP_RACK_MIN_TO: 20487 /* Minimum time between rack t-o's in ms */ 20488 optval = rack->r_ctl.rc_min_to; 20489 break; 20490 case TCP_RACK_EARLY_SEG: 20491 /* If early recovery max segments */ 20492 optval = rack->r_ctl.rc_early_recovery_segs; 20493 break; 20494 case TCP_RACK_REORD_THRESH: 20495 /* RACK reorder threshold (shift amount) */ 20496 optval = rack->r_ctl.rc_reorder_shift; 20497 break; 20498 case TCP_RACK_REORD_FADE: 20499 /* Does reordering fade after ms time */ 20500 optval = rack->r_ctl.rc_reorder_fade; 20501 break; 20502 case TCP_BBR_USE_RACK_RR: 20503 /* Do we use the rack cheat for rxt */ 20504 optval = rack->use_rack_rr; 20505 break; 20506 case TCP_RACK_RR_CONF: 20507 optval = rack->r_rr_config; 20508 break; 20509 case TCP_HDWR_RATE_CAP: 20510 optval = rack->r_rack_hw_rate_caps; 20511 break; 20512 case TCP_BBR_HDWR_PACE: 20513 optval = rack->rack_hdw_pace_ena; 20514 break; 20515 case TCP_RACK_TLP_THRESH: 20516 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20517 optval = rack->r_ctl.rc_tlp_threshold; 20518 break; 20519 case TCP_RACK_PKT_DELAY: 20520 /* RACK added ms i.e. rack-rtt + reord + N */ 20521 optval = rack->r_ctl.rc_pkt_delay; 20522 break; 20523 case TCP_RACK_TLP_USE: 20524 optval = rack->rack_tlp_threshold_use; 20525 break; 20526 case TCP_RACK_PACE_RATE_CA: 20527 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20528 break; 20529 case TCP_RACK_PACE_RATE_SS: 20530 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20531 break; 20532 case TCP_RACK_PACE_RATE_REC: 20533 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20534 break; 20535 case TCP_RACK_GP_INCREASE_SS: 20536 optval = rack->r_ctl.rack_per_of_gp_ca; 20537 break; 20538 case TCP_RACK_GP_INCREASE_CA: 20539 optval = rack->r_ctl.rack_per_of_gp_ss; 20540 break; 20541 case TCP_BBR_RACK_RTT_USE: 20542 optval = rack->r_ctl.rc_rate_sample_method; 20543 break; 20544 case TCP_DELACK: 20545 optval = tp->t_delayed_ack; 20546 break; 20547 case TCP_DATA_AFTER_CLOSE: 20548 optval = rack->rc_allow_data_af_clo; 20549 break; 20550 case TCP_SHARED_CWND_TIME_LIMIT: 20551 optval = rack->r_limit_scw; 20552 break; 20553 case TCP_RACK_TIMER_SLOP: 20554 optval = rack->r_ctl.timer_slop; 20555 break; 20556 default: 20557 return (tcp_default_ctloutput(so, sopt, inp, tp)); 20558 break; 20559 } 20560 INP_WUNLOCK(inp); 20561 if (error == 0) { 20562 if (TCP_PACING_RATE_CAP) 20563 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20564 else 20565 error = sooptcopyout(sopt, &optval, sizeof optval); 20566 } 20567 return (error); 20568 } 20569 20570 static int 20571 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 20572 { 20573 int32_t error = EINVAL; 20574 struct tcp_rack *rack; 20575 20576 rack = (struct tcp_rack *)tp->t_fb_ptr; 20577 if (rack == NULL) { 20578 /* Huh? */ 20579 goto out; 20580 } 20581 if (sopt->sopt_dir == SOPT_SET) { 20582 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 20583 } else if (sopt->sopt_dir == SOPT_GET) { 20584 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 20585 } 20586 out: 20587 INP_WUNLOCK(inp); 20588 return (error); 20589 } 20590 20591 static const char *rack_stack_names[] = { 20592 __XSTRING(STACKNAME), 20593 #ifdef STACKALIAS 20594 __XSTRING(STACKALIAS), 20595 #endif 20596 }; 20597 20598 static int 20599 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20600 { 20601 memset(mem, 0, size); 20602 return (0); 20603 } 20604 20605 static void 20606 rack_dtor(void *mem, int32_t size, void *arg) 20607 { 20608 20609 } 20610 20611 static bool rack_mod_inited = false; 20612 20613 static int 20614 tcp_addrack(module_t mod, int32_t type, void *data) 20615 { 20616 int32_t err = 0; 20617 int num_stacks; 20618 20619 switch (type) { 20620 case MOD_LOAD: 20621 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20622 sizeof(struct rack_sendmap), 20623 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20624 20625 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20626 sizeof(struct tcp_rack), 20627 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20628 20629 sysctl_ctx_init(&rack_sysctl_ctx); 20630 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20631 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20632 OID_AUTO, 20633 #ifdef STACKALIAS 20634 __XSTRING(STACKALIAS), 20635 #else 20636 __XSTRING(STACKNAME), 20637 #endif 20638 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20639 ""); 20640 if (rack_sysctl_root == NULL) { 20641 printf("Failed to add sysctl node\n"); 20642 err = EFAULT; 20643 goto free_uma; 20644 } 20645 rack_init_sysctls(); 20646 num_stacks = nitems(rack_stack_names); 20647 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20648 rack_stack_names, &num_stacks); 20649 if (err) { 20650 printf("Failed to register %s stack name for " 20651 "%s module\n", rack_stack_names[num_stacks], 20652 __XSTRING(MODNAME)); 20653 sysctl_ctx_free(&rack_sysctl_ctx); 20654 free_uma: 20655 uma_zdestroy(rack_zone); 20656 uma_zdestroy(rack_pcb_zone); 20657 rack_counter_destroy(); 20658 printf("Failed to register rack module -- err:%d\n", err); 20659 return (err); 20660 } 20661 tcp_lro_reg_mbufq(); 20662 rack_mod_inited = true; 20663 break; 20664 case MOD_QUIESCE: 20665 err = deregister_tcp_functions(&__tcp_rack, true, false); 20666 break; 20667 case MOD_UNLOAD: 20668 err = deregister_tcp_functions(&__tcp_rack, false, true); 20669 if (err == EBUSY) 20670 break; 20671 if (rack_mod_inited) { 20672 uma_zdestroy(rack_zone); 20673 uma_zdestroy(rack_pcb_zone); 20674 sysctl_ctx_free(&rack_sysctl_ctx); 20675 rack_counter_destroy(); 20676 rack_mod_inited = false; 20677 } 20678 tcp_lro_dereg_mbufq(); 20679 err = 0; 20680 break; 20681 default: 20682 return (EOPNOTSUPP); 20683 } 20684 return (err); 20685 } 20686 20687 static moduledata_t tcp_rack = { 20688 .name = __XSTRING(MODNAME), 20689 .evhand = tcp_addrack, 20690 .priv = 0 20691 }; 20692 20693 MODULE_VERSION(MODNAME, 1); 20694 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20695 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20696