1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCPDEBUG 109 #include <netinet/tcp_debug.h> 110 #endif /* TCPDEBUG */ 111 #ifdef TCP_OFFLOAD 112 #include <netinet/tcp_offload.h> 113 #endif 114 #ifdef INET6 115 #include <netinet6/tcp6_var.h> 116 #endif 117 #include <netinet/tcp_ecn.h> 118 119 #include <netipsec/ipsec_support.h> 120 121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 122 #include <netipsec/ipsec.h> 123 #include <netipsec/ipsec6.h> 124 #endif /* IPSEC */ 125 126 #include <netinet/udp.h> 127 #include <netinet/udp_var.h> 128 #include <machine/in_cksum.h> 129 130 #ifdef MAC 131 #include <security/mac/mac_framework.h> 132 #endif 133 #include "sack_filter.h" 134 #include "tcp_rack.h" 135 #include "rack_bbr_common.h" 136 137 uma_zone_t rack_zone; 138 uma_zone_t rack_pcb_zone; 139 140 #ifndef TICKS2SBT 141 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 142 #endif 143 144 VNET_DECLARE(uint32_t, newreno_beta); 145 VNET_DECLARE(uint32_t, newreno_beta_ecn); 146 #define V_newreno_beta VNET(newreno_beta) 147 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 148 149 150 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 151 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 152 153 struct sysctl_ctx_list rack_sysctl_ctx; 154 struct sysctl_oid *rack_sysctl_root; 155 156 #define CUM_ACKED 1 157 #define SACKED 2 158 159 /* 160 * The RACK module incorporates a number of 161 * TCP ideas that have been put out into the IETF 162 * over the last few years: 163 * - Matt Mathis's Rate Halving which slowly drops 164 * the congestion window so that the ack clock can 165 * be maintained during a recovery. 166 * - Yuchung Cheng's RACK TCP (for which its named) that 167 * will stop us using the number of dup acks and instead 168 * use time as the gage of when we retransmit. 169 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 170 * of Dukkipati et.al. 171 * RACK depends on SACK, so if an endpoint arrives that 172 * cannot do SACK the state machine below will shuttle the 173 * connection back to using the "default" TCP stack that is 174 * in FreeBSD. 175 * 176 * To implement RACK the original TCP stack was first decomposed 177 * into a functional state machine with individual states 178 * for each of the possible TCP connection states. The do_segment 179 * functions role in life is to mandate the connection supports SACK 180 * initially and then assure that the RACK state matches the conenction 181 * state before calling the states do_segment function. Each 182 * state is simplified due to the fact that the original do_segment 183 * has been decomposed and we *know* what state we are in (no 184 * switches on the state) and all tests for SACK are gone. This 185 * greatly simplifies what each state does. 186 * 187 * TCP output is also over-written with a new version since it 188 * must maintain the new rack scoreboard. 189 * 190 */ 191 static int32_t rack_tlp_thresh = 1; 192 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 193 static int32_t rack_tlp_use_greater = 1; 194 static int32_t rack_reorder_thresh = 2; 195 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 196 * - 60 seconds */ 197 static uint8_t rack_req_measurements = 1; 198 /* Attack threshold detections */ 199 static uint32_t rack_highest_sack_thresh_seen = 0; 200 static uint32_t rack_highest_move_thresh_seen = 0; 201 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 202 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 203 static int32_t rack_hw_rate_caps = 1; /* 1; */ 204 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 205 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 206 static int32_t rack_hw_up_only = 1; 207 static int32_t rack_stats_gets_ms_rtt = 1; 208 static int32_t rack_prr_addbackmax = 2; 209 static int32_t rack_do_hystart = 0; 210 static int32_t rack_apply_rtt_with_reduced_conf = 0; 211 212 static int32_t rack_pkt_delay = 1000; 213 static int32_t rack_send_a_lot_in_prr = 1; 214 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 215 static int32_t rack_verbose_logging = 0; 216 static int32_t rack_ignore_data_after_close = 1; 217 static int32_t rack_enable_shared_cwnd = 1; 218 static int32_t rack_use_cmp_acks = 1; 219 static int32_t rack_use_fsb = 1; 220 static int32_t rack_use_rfo = 1; 221 static int32_t rack_use_rsm_rfo = 1; 222 static int32_t rack_max_abc_post_recovery = 2; 223 static int32_t rack_client_low_buf = 0; 224 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 225 #ifdef TCP_ACCOUNTING 226 static int32_t rack_tcp_accounting = 0; 227 #endif 228 static int32_t rack_limits_scwnd = 1; 229 static int32_t rack_enable_mqueue_for_nonpaced = 0; 230 static int32_t rack_disable_prr = 0; 231 static int32_t use_rack_rr = 1; 232 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 233 static int32_t rack_persist_min = 250000; /* 250usec */ 234 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 235 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 236 static int32_t rack_default_init_window = 0; /* Use system default */ 237 static int32_t rack_limit_time_with_srtt = 0; 238 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 239 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 240 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 241 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 242 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 243 /* 244 * Currently regular tcp has a rto_min of 30ms 245 * the backoff goes 12 times so that ends up 246 * being a total of 122.850 seconds before a 247 * connection is killed. 248 */ 249 static uint32_t rack_def_data_window = 20; 250 static uint32_t rack_goal_bdp = 2; 251 static uint32_t rack_min_srtts = 1; 252 static uint32_t rack_min_measure_usec = 0; 253 static int32_t rack_tlp_min = 10000; /* 10ms */ 254 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 255 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 256 static const int32_t rack_free_cache = 2; 257 static int32_t rack_hptsi_segments = 40; 258 static int32_t rack_rate_sample_method = USE_RTT_LOW; 259 static int32_t rack_pace_every_seg = 0; 260 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 261 static int32_t rack_slot_reduction = 4; 262 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 263 static int32_t rack_cwnd_block_ends_measure = 0; 264 static int32_t rack_rwnd_block_ends_measure = 0; 265 static int32_t rack_def_profile = 0; 266 267 static int32_t rack_lower_cwnd_at_tlp = 0; 268 static int32_t rack_limited_retran = 0; 269 static int32_t rack_always_send_oldest = 0; 270 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 271 272 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 273 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 274 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 275 276 /* Probertt */ 277 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 278 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 279 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 280 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 281 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 282 283 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 284 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 285 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 286 static uint32_t rack_probertt_use_min_rtt_exit = 0; 287 static uint32_t rack_probe_rtt_sets_cwnd = 0; 288 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 289 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 290 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 291 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 292 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 293 static uint32_t rack_probertt_filter_life = 10000000; 294 static uint32_t rack_probertt_lower_within = 10; 295 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 296 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 297 static int32_t rack_probertt_clear_is = 1; 298 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 299 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 300 301 /* Part of pacing */ 302 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 303 304 /* Timely information */ 305 /* Combine these two gives the range of 'no change' to bw */ 306 /* ie the up/down provide the upper and lower bound */ 307 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 308 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 309 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 310 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 311 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 312 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 313 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 314 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 315 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 316 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 317 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 318 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 319 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 320 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 321 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 322 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 323 static int32_t rack_use_max_for_nobackoff = 0; 324 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 325 static int32_t rack_timely_no_stopping = 0; 326 static int32_t rack_down_raise_thresh = 100; 327 static int32_t rack_req_segs = 1; 328 static uint64_t rack_bw_rate_cap = 0; 329 330 /* Weird delayed ack mode */ 331 static int32_t rack_use_imac_dack = 0; 332 /* Rack specific counters */ 333 counter_u64_t rack_saw_enobuf; 334 counter_u64_t rack_saw_enobuf_hw; 335 counter_u64_t rack_saw_enetunreach; 336 counter_u64_t rack_persists_sends; 337 counter_u64_t rack_persists_acks; 338 counter_u64_t rack_persists_loss; 339 counter_u64_t rack_persists_lost_ends; 340 #ifdef INVARIANTS 341 counter_u64_t rack_adjust_map_bw; 342 #endif 343 /* Tail loss probe counters */ 344 counter_u64_t rack_tlp_tot; 345 counter_u64_t rack_tlp_newdata; 346 counter_u64_t rack_tlp_retran; 347 counter_u64_t rack_tlp_retran_bytes; 348 counter_u64_t rack_to_tot; 349 counter_u64_t rack_hot_alloc; 350 counter_u64_t rack_to_alloc; 351 counter_u64_t rack_to_alloc_hard; 352 counter_u64_t rack_to_alloc_emerg; 353 counter_u64_t rack_to_alloc_limited; 354 counter_u64_t rack_alloc_limited_conns; 355 counter_u64_t rack_split_limited; 356 357 counter_u64_t rack_multi_single_eq; 358 counter_u64_t rack_proc_non_comp_ack; 359 360 counter_u64_t rack_fto_send; 361 counter_u64_t rack_fto_rsm_send; 362 counter_u64_t rack_nfto_resend; 363 counter_u64_t rack_non_fto_send; 364 counter_u64_t rack_extended_rfo; 365 366 counter_u64_t rack_sack_proc_all; 367 counter_u64_t rack_sack_proc_short; 368 counter_u64_t rack_sack_proc_restart; 369 counter_u64_t rack_sack_attacks_detected; 370 counter_u64_t rack_sack_attacks_reversed; 371 counter_u64_t rack_sack_used_next_merge; 372 counter_u64_t rack_sack_splits; 373 counter_u64_t rack_sack_used_prev_merge; 374 counter_u64_t rack_sack_skipped_acked; 375 counter_u64_t rack_ack_total; 376 counter_u64_t rack_express_sack; 377 counter_u64_t rack_sack_total; 378 counter_u64_t rack_move_none; 379 counter_u64_t rack_move_some; 380 381 counter_u64_t rack_input_idle_reduces; 382 counter_u64_t rack_collapsed_win; 383 counter_u64_t rack_try_scwnd; 384 counter_u64_t rack_hw_pace_init_fail; 385 counter_u64_t rack_hw_pace_lost; 386 387 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 388 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 389 390 391 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 392 393 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 394 (tv) = (value) + slop; \ 395 if ((u_long)(tv) < (u_long)(tvmin)) \ 396 (tv) = (tvmin); \ 397 if ((u_long)(tv) > (u_long)(tvmax)) \ 398 (tv) = (tvmax); \ 399 } while (0) 400 401 static void 402 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 403 404 static int 405 rack_process_ack(struct mbuf *m, struct tcphdr *th, 406 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 407 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 408 static int 409 rack_process_data(struct mbuf *m, struct tcphdr *th, 410 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 411 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 412 static void 413 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 414 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 415 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 416 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 417 uint8_t limit_type); 418 static struct rack_sendmap * 419 rack_check_recovery_mode(struct tcpcb *tp, 420 uint32_t tsused); 421 static void 422 rack_cong_signal(struct tcpcb *tp, 423 uint32_t type, uint32_t ack); 424 static void rack_counter_destroy(void); 425 static int 426 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 427 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 428 static void 429 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 430 static void 431 rack_do_segment(struct mbuf *m, struct tcphdr *th, 432 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 433 uint8_t iptos); 434 static void rack_dtor(void *mem, int32_t size, void *arg); 435 static void 436 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 437 uint32_t flex1, uint32_t flex2, 438 uint32_t flex3, uint32_t flex4, 439 uint32_t flex5, uint32_t flex6, 440 uint16_t flex7, uint8_t mod); 441 442 static void 443 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 444 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 445 struct rack_sendmap *rsm, uint8_t quality); 446 static struct rack_sendmap * 447 rack_find_high_nonack(struct tcp_rack *rack, 448 struct rack_sendmap *rsm); 449 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 450 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 451 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 452 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 453 static void 454 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 455 tcp_seq th_ack, int line, uint8_t quality); 456 static uint32_t 457 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 458 static int32_t rack_handoff_ok(struct tcpcb *tp); 459 static int32_t rack_init(struct tcpcb *tp); 460 static void rack_init_sysctls(void); 461 static void 462 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 463 struct tcphdr *th, int entered_rec, int dup_ack_struck); 464 static void 465 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 466 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 467 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 468 469 static void 470 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 471 struct rack_sendmap *rsm); 472 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 473 static int32_t rack_output(struct tcpcb *tp); 474 475 static uint32_t 476 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 477 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 478 uint32_t cts, int *moved_two); 479 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 480 static void rack_remxt_tmr(struct tcpcb *tp); 481 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 482 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 483 static int32_t rack_stopall(struct tcpcb *tp); 484 static void 485 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 486 uint32_t delta); 487 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 488 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 489 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 490 static uint32_t 491 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 492 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 493 static void 494 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 495 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 496 static int 497 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 498 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 499 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 500 static int 501 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 502 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 503 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 504 static int 505 rack_do_closing(struct mbuf *m, struct tcphdr *th, 506 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 507 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 508 static int 509 rack_do_established(struct mbuf *m, struct tcphdr *th, 510 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 511 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 512 static int 513 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 514 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 515 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 516 static int 517 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 518 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 519 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 520 static int 521 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 522 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 523 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 524 static int 525 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 526 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 527 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 528 static int 529 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 532 static int 533 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 536 struct rack_sendmap * 537 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 538 uint32_t tsused); 539 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 540 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 541 static void 542 tcp_rack_partialack(struct tcpcb *tp); 543 static int 544 rack_set_profile(struct tcp_rack *rack, int prof); 545 static void 546 rack_apply_deferred_options(struct tcp_rack *rack); 547 548 int32_t rack_clear_counter=0; 549 550 static void 551 rack_set_cc_pacing(struct tcp_rack *rack) 552 { 553 struct sockopt sopt; 554 struct cc_newreno_opts opt; 555 struct newreno old, *ptr; 556 struct tcpcb *tp; 557 int error; 558 559 if (rack->rc_pacing_cc_set) 560 return; 561 562 tp = rack->rc_tp; 563 if (tp->cc_algo == NULL) { 564 /* Tcb is leaving */ 565 printf("No cc algorithm?\n"); 566 return; 567 } 568 rack->rc_pacing_cc_set = 1; 569 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 570 /* Not new-reno we can't play games with beta! */ 571 goto out; 572 } 573 ptr = ((struct newreno *)tp->ccv->cc_data); 574 if (CC_ALGO(tp)->ctl_output == NULL) { 575 /* Huh, why does new_reno no longer have a set function? */ 576 goto out; 577 } 578 if (ptr == NULL) { 579 /* Just the default values */ 580 old.beta = V_newreno_beta_ecn; 581 old.beta_ecn = V_newreno_beta_ecn; 582 old.newreno_flags = 0; 583 } else { 584 old.beta = ptr->beta; 585 old.beta_ecn = ptr->beta_ecn; 586 old.newreno_flags = ptr->newreno_flags; 587 } 588 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 589 sopt.sopt_dir = SOPT_SET; 590 opt.name = CC_NEWRENO_BETA; 591 opt.val = rack->r_ctl.rc_saved_beta.beta; 592 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 593 if (error) { 594 goto out; 595 } 596 /* 597 * Hack alert we need to set in our newreno_flags 598 * so that Abe behavior is also applied. 599 */ 600 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 601 opt.name = CC_NEWRENO_BETA_ECN; 602 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 603 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 604 if (error) { 605 goto out; 606 } 607 /* Save off the original values for restoral */ 608 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 609 out: 610 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 611 union tcp_log_stackspecific log; 612 struct timeval tv; 613 614 ptr = ((struct newreno *)tp->ccv->cc_data); 615 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 616 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 617 if (ptr) { 618 log.u_bbr.flex1 = ptr->beta; 619 log.u_bbr.flex2 = ptr->beta_ecn; 620 log.u_bbr.flex3 = ptr->newreno_flags; 621 } 622 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 623 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 624 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 625 log.u_bbr.flex7 = rack->gp_ready; 626 log.u_bbr.flex7 <<= 1; 627 log.u_bbr.flex7 |= rack->use_fixed_rate; 628 log.u_bbr.flex7 <<= 1; 629 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 630 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 631 log.u_bbr.flex8 = 3; 632 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 633 0, &log, false, NULL, NULL, 0, &tv); 634 } 635 } 636 637 static void 638 rack_undo_cc_pacing(struct tcp_rack *rack) 639 { 640 struct newreno old, *ptr; 641 struct tcpcb *tp; 642 643 if (rack->rc_pacing_cc_set == 0) 644 return; 645 tp = rack->rc_tp; 646 rack->rc_pacing_cc_set = 0; 647 if (tp->cc_algo == NULL) 648 /* Tcb is leaving */ 649 return; 650 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 651 /* Not new-reno nothing to do! */ 652 return; 653 } 654 ptr = ((struct newreno *)tp->ccv->cc_data); 655 if (ptr == NULL) { 656 /* 657 * This happens at rack_fini() if the 658 * cc module gets freed on us. In that 659 * case we loose our "new" settings but 660 * thats ok, since the tcb is going away anyway. 661 */ 662 return; 663 } 664 /* Grab out our set values */ 665 memcpy(&old, ptr, sizeof(struct newreno)); 666 /* Copy back in the original values */ 667 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 668 /* Now save back the values we had set in (for when pacing is restored) */ 669 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 670 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 671 union tcp_log_stackspecific log; 672 struct timeval tv; 673 674 ptr = ((struct newreno *)tp->ccv->cc_data); 675 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 676 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 677 log.u_bbr.flex1 = ptr->beta; 678 log.u_bbr.flex2 = ptr->beta_ecn; 679 log.u_bbr.flex3 = ptr->newreno_flags; 680 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 681 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 682 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 683 log.u_bbr.flex7 = rack->gp_ready; 684 log.u_bbr.flex7 <<= 1; 685 log.u_bbr.flex7 |= rack->use_fixed_rate; 686 log.u_bbr.flex7 <<= 1; 687 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 688 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 689 log.u_bbr.flex8 = 4; 690 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 691 0, &log, false, NULL, NULL, 0, &tv); 692 } 693 } 694 695 #ifdef NETFLIX_PEAKRATE 696 static inline void 697 rack_update_peakrate_thr(struct tcpcb *tp) 698 { 699 /* Keep in mind that t_maxpeakrate is in B/s. */ 700 uint64_t peak; 701 peak = uqmax((tp->t_maxseg * 2), 702 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 703 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 704 } 705 #endif 706 707 static int 708 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 709 { 710 uint32_t stat; 711 int32_t error; 712 713 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 714 if (error || req->newptr == NULL) 715 return error; 716 717 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 718 if (error) 719 return (error); 720 if (stat == 1) { 721 #ifdef INVARIANTS 722 printf("Clearing RACK counters\n"); 723 #endif 724 counter_u64_zero(rack_tlp_tot); 725 counter_u64_zero(rack_tlp_newdata); 726 counter_u64_zero(rack_tlp_retran); 727 counter_u64_zero(rack_tlp_retran_bytes); 728 counter_u64_zero(rack_to_tot); 729 counter_u64_zero(rack_saw_enobuf); 730 counter_u64_zero(rack_saw_enobuf_hw); 731 counter_u64_zero(rack_saw_enetunreach); 732 counter_u64_zero(rack_persists_sends); 733 counter_u64_zero(rack_persists_acks); 734 counter_u64_zero(rack_persists_loss); 735 counter_u64_zero(rack_persists_lost_ends); 736 #ifdef INVARIANTS 737 counter_u64_zero(rack_adjust_map_bw); 738 #endif 739 counter_u64_zero(rack_to_alloc_hard); 740 counter_u64_zero(rack_to_alloc_emerg); 741 counter_u64_zero(rack_sack_proc_all); 742 counter_u64_zero(rack_fto_send); 743 counter_u64_zero(rack_fto_rsm_send); 744 counter_u64_zero(rack_extended_rfo); 745 counter_u64_zero(rack_hw_pace_init_fail); 746 counter_u64_zero(rack_hw_pace_lost); 747 counter_u64_zero(rack_non_fto_send); 748 counter_u64_zero(rack_nfto_resend); 749 counter_u64_zero(rack_sack_proc_short); 750 counter_u64_zero(rack_sack_proc_restart); 751 counter_u64_zero(rack_to_alloc); 752 counter_u64_zero(rack_to_alloc_limited); 753 counter_u64_zero(rack_alloc_limited_conns); 754 counter_u64_zero(rack_split_limited); 755 counter_u64_zero(rack_multi_single_eq); 756 counter_u64_zero(rack_proc_non_comp_ack); 757 counter_u64_zero(rack_sack_attacks_detected); 758 counter_u64_zero(rack_sack_attacks_reversed); 759 counter_u64_zero(rack_sack_used_next_merge); 760 counter_u64_zero(rack_sack_used_prev_merge); 761 counter_u64_zero(rack_sack_splits); 762 counter_u64_zero(rack_sack_skipped_acked); 763 counter_u64_zero(rack_ack_total); 764 counter_u64_zero(rack_express_sack); 765 counter_u64_zero(rack_sack_total); 766 counter_u64_zero(rack_move_none); 767 counter_u64_zero(rack_move_some); 768 counter_u64_zero(rack_try_scwnd); 769 counter_u64_zero(rack_collapsed_win); 770 } 771 rack_clear_counter = 0; 772 return (0); 773 } 774 775 static void 776 rack_init_sysctls(void) 777 { 778 struct sysctl_oid *rack_counters; 779 struct sysctl_oid *rack_attack; 780 struct sysctl_oid *rack_pacing; 781 struct sysctl_oid *rack_timely; 782 struct sysctl_oid *rack_timers; 783 struct sysctl_oid *rack_tlp; 784 struct sysctl_oid *rack_misc; 785 struct sysctl_oid *rack_features; 786 struct sysctl_oid *rack_measure; 787 struct sysctl_oid *rack_probertt; 788 struct sysctl_oid *rack_hw_pacing; 789 790 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 791 SYSCTL_CHILDREN(rack_sysctl_root), 792 OID_AUTO, 793 "sack_attack", 794 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 795 "Rack Sack Attack Counters and Controls"); 796 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 797 SYSCTL_CHILDREN(rack_sysctl_root), 798 OID_AUTO, 799 "stats", 800 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 801 "Rack Counters"); 802 SYSCTL_ADD_S32(&rack_sysctl_ctx, 803 SYSCTL_CHILDREN(rack_sysctl_root), 804 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 805 &rack_rate_sample_method , USE_RTT_LOW, 806 "What method should we use for rate sampling 0=high, 1=low "); 807 /* Probe rtt related controls */ 808 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 809 SYSCTL_CHILDREN(rack_sysctl_root), 810 OID_AUTO, 811 "probertt", 812 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 813 "ProbeRTT related Controls"); 814 SYSCTL_ADD_U16(&rack_sysctl_ctx, 815 SYSCTL_CHILDREN(rack_probertt), 816 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 817 &rack_atexit_prtt_hbp, 130, 818 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 819 SYSCTL_ADD_U16(&rack_sysctl_ctx, 820 SYSCTL_CHILDREN(rack_probertt), 821 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 822 &rack_atexit_prtt, 130, 823 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 824 SYSCTL_ADD_U16(&rack_sysctl_ctx, 825 SYSCTL_CHILDREN(rack_probertt), 826 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 827 &rack_per_of_gp_probertt, 60, 828 "What percentage of goodput do we pace at in probertt"); 829 SYSCTL_ADD_U16(&rack_sysctl_ctx, 830 SYSCTL_CHILDREN(rack_probertt), 831 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 832 &rack_per_of_gp_probertt_reduce, 10, 833 "What percentage of goodput do we reduce every gp_srtt"); 834 SYSCTL_ADD_U16(&rack_sysctl_ctx, 835 SYSCTL_CHILDREN(rack_probertt), 836 OID_AUTO, "gp_per_low", CTLFLAG_RW, 837 &rack_per_of_gp_lowthresh, 40, 838 "What percentage of goodput do we allow the multiplier to fall to"); 839 SYSCTL_ADD_U32(&rack_sysctl_ctx, 840 SYSCTL_CHILDREN(rack_probertt), 841 OID_AUTO, "time_between", CTLFLAG_RW, 842 & rack_time_between_probertt, 96000000, 843 "How many useconds between the lowest rtt falling must past before we enter probertt"); 844 SYSCTL_ADD_U32(&rack_sysctl_ctx, 845 SYSCTL_CHILDREN(rack_probertt), 846 OID_AUTO, "safety", CTLFLAG_RW, 847 &rack_probe_rtt_safety_val, 2000000, 848 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 849 SYSCTL_ADD_U32(&rack_sysctl_ctx, 850 SYSCTL_CHILDREN(rack_probertt), 851 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 852 &rack_probe_rtt_sets_cwnd, 0, 853 "Do we set the cwnd too (if always_lower is on)"); 854 SYSCTL_ADD_U32(&rack_sysctl_ctx, 855 SYSCTL_CHILDREN(rack_probertt), 856 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 857 &rack_max_drain_wait, 2, 858 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 859 SYSCTL_ADD_U32(&rack_sysctl_ctx, 860 SYSCTL_CHILDREN(rack_probertt), 861 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 862 &rack_must_drain, 1, 863 "We must drain this many gp_srtt's waiting for flight to reach goal"); 864 SYSCTL_ADD_U32(&rack_sysctl_ctx, 865 SYSCTL_CHILDREN(rack_probertt), 866 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 867 &rack_probertt_use_min_rtt_entry, 1, 868 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 869 SYSCTL_ADD_U32(&rack_sysctl_ctx, 870 SYSCTL_CHILDREN(rack_probertt), 871 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 872 &rack_probertt_use_min_rtt_exit, 0, 873 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 874 SYSCTL_ADD_U32(&rack_sysctl_ctx, 875 SYSCTL_CHILDREN(rack_probertt), 876 OID_AUTO, "length_div", CTLFLAG_RW, 877 &rack_probertt_gpsrtt_cnt_div, 0, 878 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 879 SYSCTL_ADD_U32(&rack_sysctl_ctx, 880 SYSCTL_CHILDREN(rack_probertt), 881 OID_AUTO, "length_mul", CTLFLAG_RW, 882 &rack_probertt_gpsrtt_cnt_mul, 0, 883 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 884 SYSCTL_ADD_U32(&rack_sysctl_ctx, 885 SYSCTL_CHILDREN(rack_probertt), 886 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 887 &rack_min_probertt_hold, 200000, 888 "What is the minimum time we hold probertt at target"); 889 SYSCTL_ADD_U32(&rack_sysctl_ctx, 890 SYSCTL_CHILDREN(rack_probertt), 891 OID_AUTO, "filter_life", CTLFLAG_RW, 892 &rack_probertt_filter_life, 10000000, 893 "What is the time for the filters life in useconds"); 894 SYSCTL_ADD_U32(&rack_sysctl_ctx, 895 SYSCTL_CHILDREN(rack_probertt), 896 OID_AUTO, "lower_within", CTLFLAG_RW, 897 &rack_probertt_lower_within, 10, 898 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 899 SYSCTL_ADD_U32(&rack_sysctl_ctx, 900 SYSCTL_CHILDREN(rack_probertt), 901 OID_AUTO, "must_move", CTLFLAG_RW, 902 &rack_min_rtt_movement, 250, 903 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 904 SYSCTL_ADD_U32(&rack_sysctl_ctx, 905 SYSCTL_CHILDREN(rack_probertt), 906 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 907 &rack_probertt_clear_is, 1, 908 "Do we clear I/S counts on exiting probe-rtt"); 909 SYSCTL_ADD_S32(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_probertt), 911 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 912 &rack_max_drain_hbp, 1, 913 "How many extra drain gpsrtt's do we get in highly buffered paths"); 914 SYSCTL_ADD_S32(&rack_sysctl_ctx, 915 SYSCTL_CHILDREN(rack_probertt), 916 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 917 &rack_hbp_thresh, 3, 918 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 919 /* Pacing related sysctls */ 920 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 921 SYSCTL_CHILDREN(rack_sysctl_root), 922 OID_AUTO, 923 "pacing", 924 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 925 "Pacing related Controls"); 926 SYSCTL_ADD_S32(&rack_sysctl_ctx, 927 SYSCTL_CHILDREN(rack_pacing), 928 OID_AUTO, "max_pace_over", CTLFLAG_RW, 929 &rack_max_per_above, 30, 930 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 931 SYSCTL_ADD_S32(&rack_sysctl_ctx, 932 SYSCTL_CHILDREN(rack_pacing), 933 OID_AUTO, "pace_to_one", CTLFLAG_RW, 934 &rack_pace_one_seg, 0, 935 "Do we allow low b/w pacing of 1MSS instead of two"); 936 SYSCTL_ADD_S32(&rack_sysctl_ctx, 937 SYSCTL_CHILDREN(rack_pacing), 938 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 939 &rack_limit_time_with_srtt, 0, 940 "Do we limit pacing time based on srtt"); 941 SYSCTL_ADD_S32(&rack_sysctl_ctx, 942 SYSCTL_CHILDREN(rack_pacing), 943 OID_AUTO, "init_win", CTLFLAG_RW, 944 &rack_default_init_window, 0, 945 "Do we have a rack initial window 0 = system default"); 946 SYSCTL_ADD_U16(&rack_sysctl_ctx, 947 SYSCTL_CHILDREN(rack_pacing), 948 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 949 &rack_per_of_gp_ss, 250, 950 "If non zero, what percentage of goodput to pace at in slow start"); 951 SYSCTL_ADD_U16(&rack_sysctl_ctx, 952 SYSCTL_CHILDREN(rack_pacing), 953 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 954 &rack_per_of_gp_ca, 150, 955 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 956 SYSCTL_ADD_U16(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_pacing), 958 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 959 &rack_per_of_gp_rec, 200, 960 "If non zero, what percentage of goodput to pace at in recovery"); 961 SYSCTL_ADD_S32(&rack_sysctl_ctx, 962 SYSCTL_CHILDREN(rack_pacing), 963 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 964 &rack_hptsi_segments, 40, 965 "What size is the max for TSO segments in pacing and burst mitigation"); 966 SYSCTL_ADD_S32(&rack_sysctl_ctx, 967 SYSCTL_CHILDREN(rack_pacing), 968 OID_AUTO, "burst_reduces", CTLFLAG_RW, 969 &rack_slot_reduction, 4, 970 "When doing only burst mitigation what is the reduce divisor"); 971 SYSCTL_ADD_S32(&rack_sysctl_ctx, 972 SYSCTL_CHILDREN(rack_sysctl_root), 973 OID_AUTO, "use_pacing", CTLFLAG_RW, 974 &rack_pace_every_seg, 0, 975 "If set we use pacing, if clear we use only the original burst mitigation"); 976 SYSCTL_ADD_U64(&rack_sysctl_ctx, 977 SYSCTL_CHILDREN(rack_pacing), 978 OID_AUTO, "rate_cap", CTLFLAG_RW, 979 &rack_bw_rate_cap, 0, 980 "If set we apply this value to the absolute rate cap used by pacing"); 981 SYSCTL_ADD_U8(&rack_sysctl_ctx, 982 SYSCTL_CHILDREN(rack_sysctl_root), 983 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 984 &rack_req_measurements, 1, 985 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 986 /* Hardware pacing */ 987 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 988 SYSCTL_CHILDREN(rack_sysctl_root), 989 OID_AUTO, 990 "hdwr_pacing", 991 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 992 "Pacing related Controls"); 993 SYSCTL_ADD_S32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_hw_pacing), 995 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 996 &rack_hw_rwnd_factor, 2, 997 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 998 SYSCTL_ADD_S32(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_hw_pacing), 1000 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1001 &rack_enobuf_hw_boost_mult, 2, 1002 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1003 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_hw_pacing), 1005 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1006 &rack_enobuf_hw_max, 2, 1007 "What is the max boost the pacing time if we see a ENOBUFS?"); 1008 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_hw_pacing), 1010 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1011 &rack_enobuf_hw_min, 2, 1012 "What is the min boost the pacing time if we see a ENOBUFS?"); 1013 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_hw_pacing), 1015 OID_AUTO, "enable", CTLFLAG_RW, 1016 &rack_enable_hw_pacing, 0, 1017 "Should RACK attempt to use hw pacing?"); 1018 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_hw_pacing), 1020 OID_AUTO, "rate_cap", CTLFLAG_RW, 1021 &rack_hw_rate_caps, 1, 1022 "Does the highest hardware pacing rate cap the rate we will send at??"); 1023 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1024 SYSCTL_CHILDREN(rack_hw_pacing), 1025 OID_AUTO, "rate_min", CTLFLAG_RW, 1026 &rack_hw_rate_min, 0, 1027 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1028 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1029 SYSCTL_CHILDREN(rack_hw_pacing), 1030 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1031 &rack_hw_rate_to_low, 0, 1032 "If we fall below this rate, dis-engage hw pacing?"); 1033 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_hw_pacing), 1035 OID_AUTO, "up_only", CTLFLAG_RW, 1036 &rack_hw_up_only, 1, 1037 "Do we allow hw pacing to lower the rate selected?"); 1038 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1039 SYSCTL_CHILDREN(rack_hw_pacing), 1040 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1041 &rack_hw_pace_extra_slots, 2, 1042 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1043 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1044 SYSCTL_CHILDREN(rack_sysctl_root), 1045 OID_AUTO, 1046 "timely", 1047 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1048 "Rack Timely RTT Controls"); 1049 /* Timely based GP dynmics */ 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_timely), 1052 OID_AUTO, "upper", CTLFLAG_RW, 1053 &rack_gp_per_bw_mul_up, 2, 1054 "Rack timely upper range for equal b/w (in percentage)"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_timely), 1057 OID_AUTO, "lower", CTLFLAG_RW, 1058 &rack_gp_per_bw_mul_down, 4, 1059 "Rack timely lower range for equal b/w (in percentage)"); 1060 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_timely), 1062 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1063 &rack_gp_rtt_maxmul, 3, 1064 "Rack timely multipler of lowest rtt for rtt_max"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_timely), 1067 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1068 &rack_gp_rtt_mindiv, 4, 1069 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_timely), 1072 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1073 &rack_gp_rtt_minmul, 1, 1074 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_timely), 1077 OID_AUTO, "decrease", CTLFLAG_RW, 1078 &rack_gp_decrease_per, 20, 1079 "Rack timely decrease percentage of our GP multiplication factor"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_timely), 1082 OID_AUTO, "increase", CTLFLAG_RW, 1083 &rack_gp_increase_per, 2, 1084 "Rack timely increase perentage of our GP multiplication factor"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_timely), 1087 OID_AUTO, "lowerbound", CTLFLAG_RW, 1088 &rack_per_lower_bound, 50, 1089 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_timely), 1092 OID_AUTO, "upperboundss", CTLFLAG_RW, 1093 &rack_per_upper_bound_ss, 0, 1094 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1095 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_timely), 1097 OID_AUTO, "upperboundca", CTLFLAG_RW, 1098 &rack_per_upper_bound_ca, 0, 1099 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1100 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1101 SYSCTL_CHILDREN(rack_timely), 1102 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1103 &rack_do_dyn_mul, 0, 1104 "Rack timely do we enable dynmaic timely goodput by default"); 1105 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1106 SYSCTL_CHILDREN(rack_timely), 1107 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1108 &rack_gp_no_rec_chg, 1, 1109 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1110 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1111 SYSCTL_CHILDREN(rack_timely), 1112 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1113 &rack_timely_dec_clear, 6, 1114 "Rack timely what threshold do we count to before another boost during b/w decent"); 1115 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1116 SYSCTL_CHILDREN(rack_timely), 1117 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1118 &rack_timely_max_push_rise, 3, 1119 "Rack timely how many times do we push up with b/w increase"); 1120 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1121 SYSCTL_CHILDREN(rack_timely), 1122 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1123 &rack_timely_max_push_drop, 3, 1124 "Rack timely how many times do we push back on b/w decent"); 1125 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1126 SYSCTL_CHILDREN(rack_timely), 1127 OID_AUTO, "min_segs", CTLFLAG_RW, 1128 &rack_timely_min_segs, 4, 1129 "Rack timely when setting the cwnd what is the min num segments"); 1130 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1131 SYSCTL_CHILDREN(rack_timely), 1132 OID_AUTO, "noback_max", CTLFLAG_RW, 1133 &rack_use_max_for_nobackoff, 0, 1134 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1135 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1136 SYSCTL_CHILDREN(rack_timely), 1137 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1138 &rack_timely_int_timely_only, 0, 1139 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1140 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1141 SYSCTL_CHILDREN(rack_timely), 1142 OID_AUTO, "nonstop", CTLFLAG_RW, 1143 &rack_timely_no_stopping, 0, 1144 "Rack timely don't stop increase"); 1145 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1146 SYSCTL_CHILDREN(rack_timely), 1147 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1148 &rack_down_raise_thresh, 100, 1149 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1150 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1151 SYSCTL_CHILDREN(rack_timely), 1152 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1153 &rack_req_segs, 1, 1154 "Bottom dragging if not these many segments outstanding and room"); 1155 1156 /* TLP and Rack related parameters */ 1157 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1158 SYSCTL_CHILDREN(rack_sysctl_root), 1159 OID_AUTO, 1160 "tlp", 1161 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1162 "TLP and Rack related Controls"); 1163 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1164 SYSCTL_CHILDREN(rack_tlp), 1165 OID_AUTO, "use_rrr", CTLFLAG_RW, 1166 &use_rack_rr, 1, 1167 "Do we use Rack Rapid Recovery"); 1168 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1169 SYSCTL_CHILDREN(rack_tlp), 1170 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1171 &rack_max_abc_post_recovery, 2, 1172 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1173 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1174 SYSCTL_CHILDREN(rack_tlp), 1175 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1176 &rack_non_rxt_use_cr, 0, 1177 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1178 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1179 SYSCTL_CHILDREN(rack_tlp), 1180 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1181 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1182 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1183 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1184 SYSCTL_CHILDREN(rack_tlp), 1185 OID_AUTO, "limit", CTLFLAG_RW, 1186 &rack_tlp_limit, 2, 1187 "How many TLP's can be sent without sending new data"); 1188 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1189 SYSCTL_CHILDREN(rack_tlp), 1190 OID_AUTO, "use_greater", CTLFLAG_RW, 1191 &rack_tlp_use_greater, 1, 1192 "Should we use the rack_rtt time if its greater than srtt"); 1193 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1194 SYSCTL_CHILDREN(rack_tlp), 1195 OID_AUTO, "tlpminto", CTLFLAG_RW, 1196 &rack_tlp_min, 10000, 1197 "TLP minimum timeout per the specification (in microseconds)"); 1198 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1199 SYSCTL_CHILDREN(rack_tlp), 1200 OID_AUTO, "send_oldest", CTLFLAG_RW, 1201 &rack_always_send_oldest, 0, 1202 "Should we always send the oldest TLP and RACK-TLP"); 1203 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1204 SYSCTL_CHILDREN(rack_tlp), 1205 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1206 &rack_limited_retran, 0, 1207 "How many times can a rack timeout drive out sends"); 1208 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1209 SYSCTL_CHILDREN(rack_tlp), 1210 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1211 &rack_lower_cwnd_at_tlp, 0, 1212 "When a TLP completes a retran should we enter recovery"); 1213 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1214 SYSCTL_CHILDREN(rack_tlp), 1215 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1216 &rack_reorder_thresh, 2, 1217 "What factor for rack will be added when seeing reordering (shift right)"); 1218 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1219 SYSCTL_CHILDREN(rack_tlp), 1220 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1221 &rack_tlp_thresh, 1, 1222 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1223 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1224 SYSCTL_CHILDREN(rack_tlp), 1225 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1226 &rack_reorder_fade, 60000000, 1227 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1228 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1229 SYSCTL_CHILDREN(rack_tlp), 1230 OID_AUTO, "pktdelay", CTLFLAG_RW, 1231 &rack_pkt_delay, 1000, 1232 "Extra RACK time (in microseconds) besides reordering thresh"); 1233 1234 /* Timer related controls */ 1235 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1236 SYSCTL_CHILDREN(rack_sysctl_root), 1237 OID_AUTO, 1238 "timers", 1239 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1240 "Timer related controls"); 1241 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1242 SYSCTL_CHILDREN(rack_timers), 1243 OID_AUTO, "persmin", CTLFLAG_RW, 1244 &rack_persist_min, 250000, 1245 "What is the minimum time in microseconds between persists"); 1246 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1247 SYSCTL_CHILDREN(rack_timers), 1248 OID_AUTO, "persmax", CTLFLAG_RW, 1249 &rack_persist_max, 2000000, 1250 "What is the largest delay in microseconds between persists"); 1251 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1252 SYSCTL_CHILDREN(rack_timers), 1253 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1254 &rack_delayed_ack_time, 40000, 1255 "Delayed ack time (40ms in microseconds)"); 1256 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1257 SYSCTL_CHILDREN(rack_timers), 1258 OID_AUTO, "minrto", CTLFLAG_RW, 1259 &rack_rto_min, 30000, 1260 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1261 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1262 SYSCTL_CHILDREN(rack_timers), 1263 OID_AUTO, "maxrto", CTLFLAG_RW, 1264 &rack_rto_max, 4000000, 1265 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1266 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1267 SYSCTL_CHILDREN(rack_timers), 1268 OID_AUTO, "minto", CTLFLAG_RW, 1269 &rack_min_to, 1000, 1270 "Minimum rack timeout in microseconds"); 1271 /* Measure controls */ 1272 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1273 SYSCTL_CHILDREN(rack_sysctl_root), 1274 OID_AUTO, 1275 "measure", 1276 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1277 "Measure related controls"); 1278 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1279 SYSCTL_CHILDREN(rack_measure), 1280 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1281 &rack_wma_divisor, 8, 1282 "When doing b/w calculation what is the divisor for the WMA"); 1283 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1284 SYSCTL_CHILDREN(rack_measure), 1285 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1286 &rack_cwnd_block_ends_measure, 0, 1287 "Does a cwnd just-return end the measurement window (app limited)"); 1288 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1289 SYSCTL_CHILDREN(rack_measure), 1290 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1291 &rack_rwnd_block_ends_measure, 0, 1292 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1293 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1294 SYSCTL_CHILDREN(rack_measure), 1295 OID_AUTO, "min_target", CTLFLAG_RW, 1296 &rack_def_data_window, 20, 1297 "What is the minimum target window (in mss) for a GP measurements"); 1298 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1299 SYSCTL_CHILDREN(rack_measure), 1300 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1301 &rack_goal_bdp, 2, 1302 "What is the goal BDP to measure"); 1303 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1304 SYSCTL_CHILDREN(rack_measure), 1305 OID_AUTO, "min_srtts", CTLFLAG_RW, 1306 &rack_min_srtts, 1, 1307 "What is the goal BDP to measure"); 1308 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1309 SYSCTL_CHILDREN(rack_measure), 1310 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1311 &rack_min_measure_usec, 0, 1312 "What is the Minimum time time for a measurement if 0, this is off"); 1313 /* Features */ 1314 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1315 SYSCTL_CHILDREN(rack_sysctl_root), 1316 OID_AUTO, 1317 "features", 1318 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1319 "Feature controls"); 1320 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1321 SYSCTL_CHILDREN(rack_features), 1322 OID_AUTO, "cmpack", CTLFLAG_RW, 1323 &rack_use_cmp_acks, 1, 1324 "Should RACK have LRO send compressed acks"); 1325 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1326 SYSCTL_CHILDREN(rack_features), 1327 OID_AUTO, "fsb", CTLFLAG_RW, 1328 &rack_use_fsb, 1, 1329 "Should RACK use the fast send block?"); 1330 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1331 SYSCTL_CHILDREN(rack_features), 1332 OID_AUTO, "rfo", CTLFLAG_RW, 1333 &rack_use_rfo, 1, 1334 "Should RACK use rack_fast_output()?"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_features), 1337 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1338 &rack_use_rsm_rfo, 1, 1339 "Should RACK use rack_fast_rsm_output()?"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_features), 1342 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1343 &rack_enable_mqueue_for_nonpaced, 0, 1344 "Should RACK use mbuf queuing for non-paced connections"); 1345 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_features), 1347 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1348 &rack_do_hystart, 0, 1349 "Should RACK enable HyStart++ on connections?"); 1350 /* Misc rack controls */ 1351 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1352 SYSCTL_CHILDREN(rack_sysctl_root), 1353 OID_AUTO, 1354 "misc", 1355 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1356 "Misc related controls"); 1357 #ifdef TCP_ACCOUNTING 1358 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1359 SYSCTL_CHILDREN(rack_misc), 1360 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1361 &rack_tcp_accounting, 0, 1362 "Should we turn on TCP accounting for all rack sessions?"); 1363 #endif 1364 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1365 SYSCTL_CHILDREN(rack_misc), 1366 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1367 &rack_apply_rtt_with_reduced_conf, 0, 1368 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1369 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1370 SYSCTL_CHILDREN(rack_misc), 1371 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1372 &rack_dsack_std_based, 3, 1373 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1374 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1375 SYSCTL_CHILDREN(rack_misc), 1376 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1377 &rack_prr_addbackmax, 2, 1378 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1379 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1380 SYSCTL_CHILDREN(rack_misc), 1381 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1382 &rack_stats_gets_ms_rtt, 1, 1383 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1384 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1385 SYSCTL_CHILDREN(rack_misc), 1386 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1387 &rack_client_low_buf, 0, 1388 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1389 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1390 SYSCTL_CHILDREN(rack_misc), 1391 OID_AUTO, "defprofile", CTLFLAG_RW, 1392 &rack_def_profile, 0, 1393 "Should RACK use a default profile (0=no, num == profile num)?"); 1394 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1395 SYSCTL_CHILDREN(rack_misc), 1396 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1397 &rack_enable_shared_cwnd, 1, 1398 "Should RACK try to use the shared cwnd on connections where allowed"); 1399 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1400 SYSCTL_CHILDREN(rack_misc), 1401 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1402 &rack_limits_scwnd, 1, 1403 "Should RACK place low end time limits on the shared cwnd feature"); 1404 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1405 SYSCTL_CHILDREN(rack_misc), 1406 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1407 &rack_use_imac_dack, 0, 1408 "Should RACK try to emulate iMac delayed ack"); 1409 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1410 SYSCTL_CHILDREN(rack_misc), 1411 OID_AUTO, "no_prr", CTLFLAG_RW, 1412 &rack_disable_prr, 0, 1413 "Should RACK not use prr and only pace (must have pacing on)"); 1414 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1415 SYSCTL_CHILDREN(rack_misc), 1416 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1417 &rack_verbose_logging, 0, 1418 "Should RACK black box logging be verbose"); 1419 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1420 SYSCTL_CHILDREN(rack_misc), 1421 OID_AUTO, "data_after_close", CTLFLAG_RW, 1422 &rack_ignore_data_after_close, 1, 1423 "Do we hold off sending a RST until all pending data is ack'd"); 1424 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1425 SYSCTL_CHILDREN(rack_misc), 1426 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1427 &rack_sack_not_required, 1, 1428 "Do we allow rack to run on connections not supporting SACK"); 1429 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1430 SYSCTL_CHILDREN(rack_misc), 1431 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1432 &rack_send_a_lot_in_prr, 1, 1433 "Send a lot in prr"); 1434 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1435 SYSCTL_CHILDREN(rack_misc), 1436 OID_AUTO, "autoscale", CTLFLAG_RW, 1437 &rack_autosndbuf_inc, 20, 1438 "What percentage should rack scale up its snd buffer by?"); 1439 /* Sack Attacker detection stuff */ 1440 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1441 SYSCTL_CHILDREN(rack_attack), 1442 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1443 &rack_highest_sack_thresh_seen, 0, 1444 "Highest sack to ack ratio seen"); 1445 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1446 SYSCTL_CHILDREN(rack_attack), 1447 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1448 &rack_highest_move_thresh_seen, 0, 1449 "Highest move to non-move ratio seen"); 1450 rack_ack_total = counter_u64_alloc(M_WAITOK); 1451 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1452 SYSCTL_CHILDREN(rack_attack), 1453 OID_AUTO, "acktotal", CTLFLAG_RD, 1454 &rack_ack_total, 1455 "Total number of Ack's"); 1456 rack_express_sack = counter_u64_alloc(M_WAITOK); 1457 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1458 SYSCTL_CHILDREN(rack_attack), 1459 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1460 &rack_express_sack, 1461 "Total expresss number of Sack's"); 1462 rack_sack_total = counter_u64_alloc(M_WAITOK); 1463 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1464 SYSCTL_CHILDREN(rack_attack), 1465 OID_AUTO, "sacktotal", CTLFLAG_RD, 1466 &rack_sack_total, 1467 "Total number of SACKs"); 1468 rack_move_none = counter_u64_alloc(M_WAITOK); 1469 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1470 SYSCTL_CHILDREN(rack_attack), 1471 OID_AUTO, "move_none", CTLFLAG_RD, 1472 &rack_move_none, 1473 "Total number of SACK index reuse of postions under threshold"); 1474 rack_move_some = counter_u64_alloc(M_WAITOK); 1475 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1476 SYSCTL_CHILDREN(rack_attack), 1477 OID_AUTO, "move_some", CTLFLAG_RD, 1478 &rack_move_some, 1479 "Total number of SACK index reuse of postions over threshold"); 1480 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1481 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1482 SYSCTL_CHILDREN(rack_attack), 1483 OID_AUTO, "attacks", CTLFLAG_RD, 1484 &rack_sack_attacks_detected, 1485 "Total number of SACK attackers that had sack disabled"); 1486 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1487 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1488 SYSCTL_CHILDREN(rack_attack), 1489 OID_AUTO, "reversed", CTLFLAG_RD, 1490 &rack_sack_attacks_reversed, 1491 "Total number of SACK attackers that were later determined false positive"); 1492 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1493 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1494 SYSCTL_CHILDREN(rack_attack), 1495 OID_AUTO, "nextmerge", CTLFLAG_RD, 1496 &rack_sack_used_next_merge, 1497 "Total number of times we used the next merge"); 1498 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1499 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1500 SYSCTL_CHILDREN(rack_attack), 1501 OID_AUTO, "prevmerge", CTLFLAG_RD, 1502 &rack_sack_used_prev_merge, 1503 "Total number of times we used the prev merge"); 1504 /* Counters */ 1505 rack_fto_send = counter_u64_alloc(M_WAITOK); 1506 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1507 SYSCTL_CHILDREN(rack_counters), 1508 OID_AUTO, "fto_send", CTLFLAG_RD, 1509 &rack_fto_send, "Total number of rack_fast_output sends"); 1510 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1511 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1512 SYSCTL_CHILDREN(rack_counters), 1513 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1514 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1515 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1516 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1517 SYSCTL_CHILDREN(rack_counters), 1518 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1519 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1520 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_counters), 1523 OID_AUTO, "nfto_send", CTLFLAG_RD, 1524 &rack_non_fto_send, "Total number of rack_output first sends"); 1525 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1526 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1527 SYSCTL_CHILDREN(rack_counters), 1528 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1529 &rack_extended_rfo, "Total number of times we extended rfo"); 1530 1531 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1532 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1533 SYSCTL_CHILDREN(rack_counters), 1534 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1535 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1536 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1537 1538 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1539 SYSCTL_CHILDREN(rack_counters), 1540 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1541 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1542 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1543 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1544 SYSCTL_CHILDREN(rack_counters), 1545 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1546 &rack_tlp_tot, 1547 "Total number of tail loss probe expirations"); 1548 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1549 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1550 SYSCTL_CHILDREN(rack_counters), 1551 OID_AUTO, "tlp_new", CTLFLAG_RD, 1552 &rack_tlp_newdata, 1553 "Total number of tail loss probe sending new data"); 1554 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1555 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1556 SYSCTL_CHILDREN(rack_counters), 1557 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1558 &rack_tlp_retran, 1559 "Total number of tail loss probe sending retransmitted data"); 1560 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1561 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1562 SYSCTL_CHILDREN(rack_counters), 1563 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1564 &rack_tlp_retran_bytes, 1565 "Total bytes of tail loss probe sending retransmitted data"); 1566 rack_to_tot = counter_u64_alloc(M_WAITOK); 1567 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1568 SYSCTL_CHILDREN(rack_counters), 1569 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1570 &rack_to_tot, 1571 "Total number of times the rack to expired"); 1572 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_counters), 1575 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1576 &rack_saw_enobuf, 1577 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1578 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1579 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1580 SYSCTL_CHILDREN(rack_counters), 1581 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1582 &rack_saw_enobuf_hw, 1583 "Total number of times a send returned enobuf for hdwr paced connections"); 1584 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1585 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1586 SYSCTL_CHILDREN(rack_counters), 1587 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1588 &rack_saw_enetunreach, 1589 "Total number of times a send received a enetunreachable"); 1590 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1591 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1592 SYSCTL_CHILDREN(rack_counters), 1593 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1594 &rack_hot_alloc, 1595 "Total allocations from the top of our list"); 1596 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1597 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1598 SYSCTL_CHILDREN(rack_counters), 1599 OID_AUTO, "allocs", CTLFLAG_RD, 1600 &rack_to_alloc, 1601 "Total allocations of tracking structures"); 1602 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1603 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1604 SYSCTL_CHILDREN(rack_counters), 1605 OID_AUTO, "allochard", CTLFLAG_RD, 1606 &rack_to_alloc_hard, 1607 "Total allocations done with sleeping the hard way"); 1608 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1609 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1610 SYSCTL_CHILDREN(rack_counters), 1611 OID_AUTO, "allocemerg", CTLFLAG_RD, 1612 &rack_to_alloc_emerg, 1613 "Total allocations done from emergency cache"); 1614 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1615 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1616 SYSCTL_CHILDREN(rack_counters), 1617 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1618 &rack_to_alloc_limited, 1619 "Total allocations dropped due to limit"); 1620 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1621 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1622 SYSCTL_CHILDREN(rack_counters), 1623 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1624 &rack_alloc_limited_conns, 1625 "Connections with allocations dropped due to limit"); 1626 rack_split_limited = counter_u64_alloc(M_WAITOK); 1627 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1628 SYSCTL_CHILDREN(rack_counters), 1629 OID_AUTO, "split_limited", CTLFLAG_RD, 1630 &rack_split_limited, 1631 "Split allocations dropped due to limit"); 1632 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1633 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1634 SYSCTL_CHILDREN(rack_counters), 1635 OID_AUTO, "persist_sends", CTLFLAG_RD, 1636 &rack_persists_sends, 1637 "Number of times we sent a persist probe"); 1638 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1639 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1640 SYSCTL_CHILDREN(rack_counters), 1641 OID_AUTO, "persist_acks", CTLFLAG_RD, 1642 &rack_persists_acks, 1643 "Number of times a persist probe was acked"); 1644 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1645 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1646 SYSCTL_CHILDREN(rack_counters), 1647 OID_AUTO, "persist_loss", CTLFLAG_RD, 1648 &rack_persists_loss, 1649 "Number of times we detected a lost persist probe (no ack)"); 1650 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1651 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1652 SYSCTL_CHILDREN(rack_counters), 1653 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1654 &rack_persists_lost_ends, 1655 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1656 #ifdef INVARIANTS 1657 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1658 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1659 SYSCTL_CHILDREN(rack_counters), 1660 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1661 &rack_adjust_map_bw, 1662 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1663 #endif 1664 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1665 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_counters), 1667 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1668 &rack_multi_single_eq, 1669 "Number of compressed acks total represented"); 1670 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1671 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1672 SYSCTL_CHILDREN(rack_counters), 1673 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1674 &rack_proc_non_comp_ack, 1675 "Number of non compresseds acks that we processed"); 1676 1677 1678 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "sack_long", CTLFLAG_RD, 1682 &rack_sack_proc_all, 1683 "Total times we had to walk whole list for sack processing"); 1684 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "sack_restart", CTLFLAG_RD, 1688 &rack_sack_proc_restart, 1689 "Total times we had to walk whole list due to a restart"); 1690 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1692 SYSCTL_CHILDREN(rack_counters), 1693 OID_AUTO, "sack_short", CTLFLAG_RD, 1694 &rack_sack_proc_short, 1695 "Total times we took shortcut for sack processing"); 1696 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1698 SYSCTL_CHILDREN(rack_attack), 1699 OID_AUTO, "skipacked", CTLFLAG_RD, 1700 &rack_sack_skipped_acked, 1701 "Total number of times we skipped previously sacked"); 1702 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1704 SYSCTL_CHILDREN(rack_attack), 1705 OID_AUTO, "ofsplit", CTLFLAG_RD, 1706 &rack_sack_splits, 1707 "Total number of times we did the old fashion tree split"); 1708 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1710 SYSCTL_CHILDREN(rack_counters), 1711 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1712 &rack_input_idle_reduces, 1713 "Total number of idle reductions on input"); 1714 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1716 SYSCTL_CHILDREN(rack_counters), 1717 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1718 &rack_collapsed_win, 1719 "Total number of collapsed windows"); 1720 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1722 SYSCTL_CHILDREN(rack_counters), 1723 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1724 &rack_try_scwnd, 1725 "Total number of scwnd attempts"); 1726 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1727 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1728 OID_AUTO, "outsize", CTLFLAG_RD, 1729 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1730 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1732 OID_AUTO, "opts", CTLFLAG_RD, 1733 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1734 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1735 SYSCTL_CHILDREN(rack_sysctl_root), 1736 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1737 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1738 } 1739 1740 static __inline int 1741 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1742 { 1743 if (SEQ_GEQ(b->r_start, a->r_start) && 1744 SEQ_LT(b->r_start, a->r_end)) { 1745 /* 1746 * The entry b is within the 1747 * block a. i.e.: 1748 * a -- |-------------| 1749 * b -- |----| 1750 * <or> 1751 * b -- |------| 1752 * <or> 1753 * b -- |-----------| 1754 */ 1755 return (0); 1756 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1757 /* 1758 * b falls as either the next 1759 * sequence block after a so a 1760 * is said to be smaller than b. 1761 * i.e: 1762 * a -- |------| 1763 * b -- |--------| 1764 * or 1765 * b -- |-----| 1766 */ 1767 return (1); 1768 } 1769 /* 1770 * Whats left is where a is 1771 * larger than b. i.e: 1772 * a -- |-------| 1773 * b -- |---| 1774 * or even possibly 1775 * b -- |--------------| 1776 */ 1777 return (-1); 1778 } 1779 1780 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1781 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1782 1783 static uint32_t 1784 rc_init_window(struct tcp_rack *rack) 1785 { 1786 uint32_t win; 1787 1788 if (rack->rc_init_win == 0) { 1789 /* 1790 * Nothing set by the user, use the system stack 1791 * default. 1792 */ 1793 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1794 } 1795 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1796 return (win); 1797 } 1798 1799 static uint64_t 1800 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1801 { 1802 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1803 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1804 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1805 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1806 else 1807 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1808 } 1809 1810 static uint64_t 1811 rack_get_bw(struct tcp_rack *rack) 1812 { 1813 if (rack->use_fixed_rate) { 1814 /* Return the fixed pacing rate */ 1815 return (rack_get_fixed_pacing_bw(rack)); 1816 } 1817 if (rack->r_ctl.gp_bw == 0) { 1818 /* 1819 * We have yet no b/w measurement, 1820 * if we have a user set initial bw 1821 * return it. If we don't have that and 1822 * we have an srtt, use the tcp IW (10) to 1823 * calculate a fictional b/w over the SRTT 1824 * which is more or less a guess. Note 1825 * we don't use our IW from rack on purpose 1826 * so if we have like IW=30, we are not 1827 * calculating a "huge" b/w. 1828 */ 1829 uint64_t bw, srtt; 1830 if (rack->r_ctl.init_rate) 1831 return (rack->r_ctl.init_rate); 1832 1833 /* Has the user set a max peak rate? */ 1834 #ifdef NETFLIX_PEAKRATE 1835 if (rack->rc_tp->t_maxpeakrate) 1836 return (rack->rc_tp->t_maxpeakrate); 1837 #endif 1838 /* Ok lets come up with the IW guess, if we have a srtt */ 1839 if (rack->rc_tp->t_srtt == 0) { 1840 /* 1841 * Go with old pacing method 1842 * i.e. burst mitigation only. 1843 */ 1844 return (0); 1845 } 1846 /* Ok lets get the initial TCP win (not racks) */ 1847 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1848 srtt = (uint64_t)rack->rc_tp->t_srtt; 1849 bw *= (uint64_t)USECS_IN_SECOND; 1850 bw /= srtt; 1851 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1852 bw = rack->r_ctl.bw_rate_cap; 1853 return (bw); 1854 } else { 1855 uint64_t bw; 1856 1857 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1858 /* Averaging is done, we can return the value */ 1859 bw = rack->r_ctl.gp_bw; 1860 } else { 1861 /* Still doing initial average must calculate */ 1862 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1863 } 1864 #ifdef NETFLIX_PEAKRATE 1865 if ((rack->rc_tp->t_maxpeakrate) && 1866 (bw > rack->rc_tp->t_maxpeakrate)) { 1867 /* The user has set a peak rate to pace at 1868 * don't allow us to pace faster than that. 1869 */ 1870 return (rack->rc_tp->t_maxpeakrate); 1871 } 1872 #endif 1873 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1874 bw = rack->r_ctl.bw_rate_cap; 1875 return (bw); 1876 } 1877 } 1878 1879 static uint16_t 1880 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1881 { 1882 if (rack->use_fixed_rate) { 1883 return (100); 1884 } else if (rack->in_probe_rtt && (rsm == NULL)) 1885 return (rack->r_ctl.rack_per_of_gp_probertt); 1886 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1887 rack->r_ctl.rack_per_of_gp_rec)) { 1888 if (rsm) { 1889 /* a retransmission always use the recovery rate */ 1890 return (rack->r_ctl.rack_per_of_gp_rec); 1891 } else if (rack->rack_rec_nonrxt_use_cr) { 1892 /* Directed to use the configured rate */ 1893 goto configured_rate; 1894 } else if (rack->rack_no_prr && 1895 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1896 /* No PRR, lets just use the b/w estimate only */ 1897 return (100); 1898 } else { 1899 /* 1900 * Here we may have a non-retransmit but we 1901 * have no overrides, so just use the recovery 1902 * rate (prr is in effect). 1903 */ 1904 return (rack->r_ctl.rack_per_of_gp_rec); 1905 } 1906 } 1907 configured_rate: 1908 /* For the configured rate we look at our cwnd vs the ssthresh */ 1909 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1910 return (rack->r_ctl.rack_per_of_gp_ss); 1911 else 1912 return (rack->r_ctl.rack_per_of_gp_ca); 1913 } 1914 1915 static void 1916 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1917 { 1918 /* 1919 * Types of logs (mod value) 1920 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1921 * 2 = a dsack round begins, persist is reset to 16. 1922 * 3 = a dsack round ends 1923 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1924 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1925 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1926 */ 1927 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1928 union tcp_log_stackspecific log; 1929 struct timeval tv; 1930 1931 memset(&log, 0, sizeof(log)); 1932 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 1933 log.u_bbr.flex1 <<= 1; 1934 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 1935 log.u_bbr.flex1 <<= 1; 1936 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 1937 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 1938 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 1939 log.u_bbr.flex4 = flex4; 1940 log.u_bbr.flex5 = flex5; 1941 log.u_bbr.flex6 = flex6; 1942 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 1943 log.u_bbr.flex8 = mod; 1944 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1945 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1946 &rack->rc_inp->inp_socket->so_rcv, 1947 &rack->rc_inp->inp_socket->so_snd, 1948 RACK_DSACK_HANDLING, 0, 1949 0, &log, false, &tv); 1950 } 1951 } 1952 1953 static void 1954 rack_log_hdwr_pacing(struct tcp_rack *rack, 1955 uint64_t rate, uint64_t hw_rate, int line, 1956 int error, uint16_t mod) 1957 { 1958 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1959 union tcp_log_stackspecific log; 1960 struct timeval tv; 1961 const struct ifnet *ifp; 1962 1963 memset(&log, 0, sizeof(log)); 1964 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 1965 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 1966 if (rack->r_ctl.crte) { 1967 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 1968 } else if (rack->rc_inp->inp_route.ro_nh && 1969 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 1970 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 1971 } else 1972 ifp = NULL; 1973 if (ifp) { 1974 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 1975 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 1976 } 1977 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1978 log.u_bbr.bw_inuse = rate; 1979 log.u_bbr.flex5 = line; 1980 log.u_bbr.flex6 = error; 1981 log.u_bbr.flex7 = mod; 1982 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 1983 log.u_bbr.flex8 = rack->use_fixed_rate; 1984 log.u_bbr.flex8 <<= 1; 1985 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 1986 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 1987 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 1988 if (rack->r_ctl.crte) 1989 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 1990 else 1991 log.u_bbr.cur_del_rate = 0; 1992 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 1993 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1994 &rack->rc_inp->inp_socket->so_rcv, 1995 &rack->rc_inp->inp_socket->so_snd, 1996 BBR_LOG_HDWR_PACE, 0, 1997 0, &log, false, &tv); 1998 } 1999 } 2000 2001 static uint64_t 2002 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2003 { 2004 /* 2005 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2006 */ 2007 uint64_t bw_est, high_rate; 2008 uint64_t gain; 2009 2010 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2011 bw_est = bw * gain; 2012 bw_est /= (uint64_t)100; 2013 /* Never fall below the minimum (def 64kbps) */ 2014 if (bw_est < RACK_MIN_BW) 2015 bw_est = RACK_MIN_BW; 2016 if (rack->r_rack_hw_rate_caps) { 2017 /* Rate caps are in place */ 2018 if (rack->r_ctl.crte != NULL) { 2019 /* We have a hdwr rate already */ 2020 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2021 if (bw_est >= high_rate) { 2022 /* We are capping bw at the highest rate table entry */ 2023 rack_log_hdwr_pacing(rack, 2024 bw_est, high_rate, __LINE__, 2025 0, 3); 2026 bw_est = high_rate; 2027 if (capped) 2028 *capped = 1; 2029 } 2030 } else if ((rack->rack_hdrw_pacing == 0) && 2031 (rack->rack_hdw_pace_ena) && 2032 (rack->rack_attempt_hdwr_pace == 0) && 2033 (rack->rc_inp->inp_route.ro_nh != NULL) && 2034 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2035 /* 2036 * Special case, we have not yet attempted hardware 2037 * pacing, and yet we may, when we do, find out if we are 2038 * above the highest rate. We need to know the maxbw for the interface 2039 * in question (if it supports ratelimiting). We get back 2040 * a 0, if the interface is not found in the RL lists. 2041 */ 2042 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2043 if (high_rate) { 2044 /* Yep, we have a rate is it above this rate? */ 2045 if (bw_est > high_rate) { 2046 bw_est = high_rate; 2047 if (capped) 2048 *capped = 1; 2049 } 2050 } 2051 } 2052 } 2053 return (bw_est); 2054 } 2055 2056 static void 2057 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2058 { 2059 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2060 union tcp_log_stackspecific log; 2061 struct timeval tv; 2062 2063 if ((mod != 1) && (rack_verbose_logging == 0)) { 2064 /* 2065 * We get 3 values currently for mod 2066 * 1 - We are retransmitting and this tells the reason. 2067 * 2 - We are clearing a dup-ack count. 2068 * 3 - We are incrementing a dup-ack count. 2069 * 2070 * The clear/increment are only logged 2071 * if you have BBverbose on. 2072 */ 2073 return; 2074 } 2075 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2076 log.u_bbr.flex1 = tsused; 2077 log.u_bbr.flex2 = thresh; 2078 log.u_bbr.flex3 = rsm->r_flags; 2079 log.u_bbr.flex4 = rsm->r_dupack; 2080 log.u_bbr.flex5 = rsm->r_start; 2081 log.u_bbr.flex6 = rsm->r_end; 2082 log.u_bbr.flex8 = mod; 2083 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2084 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2085 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2086 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2087 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2088 log.u_bbr.pacing_gain = rack->r_must_retran; 2089 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2090 &rack->rc_inp->inp_socket->so_rcv, 2091 &rack->rc_inp->inp_socket->so_snd, 2092 BBR_LOG_SETTINGS_CHG, 0, 2093 0, &log, false, &tv); 2094 } 2095 } 2096 2097 static void 2098 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2099 { 2100 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2101 union tcp_log_stackspecific log; 2102 struct timeval tv; 2103 2104 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2105 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2106 log.u_bbr.flex2 = to; 2107 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2108 log.u_bbr.flex4 = slot; 2109 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2110 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2111 log.u_bbr.flex7 = rack->rc_in_persist; 2112 log.u_bbr.flex8 = which; 2113 if (rack->rack_no_prr) 2114 log.u_bbr.pkts_out = 0; 2115 else 2116 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2117 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2118 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2119 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2120 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2121 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2122 log.u_bbr.pacing_gain = rack->r_must_retran; 2123 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2124 log.u_bbr.lost = rack_rto_min; 2125 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2126 &rack->rc_inp->inp_socket->so_rcv, 2127 &rack->rc_inp->inp_socket->so_snd, 2128 BBR_LOG_TIMERSTAR, 0, 2129 0, &log, false, &tv); 2130 } 2131 } 2132 2133 static void 2134 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2135 { 2136 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2137 union tcp_log_stackspecific log; 2138 struct timeval tv; 2139 2140 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2141 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2142 log.u_bbr.flex8 = to_num; 2143 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2144 log.u_bbr.flex2 = rack->rc_rack_rtt; 2145 if (rsm == NULL) 2146 log.u_bbr.flex3 = 0; 2147 else 2148 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2149 if (rack->rack_no_prr) 2150 log.u_bbr.flex5 = 0; 2151 else 2152 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2153 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2154 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2155 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2156 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2157 log.u_bbr.pacing_gain = rack->r_must_retran; 2158 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2159 &rack->rc_inp->inp_socket->so_rcv, 2160 &rack->rc_inp->inp_socket->so_snd, 2161 BBR_LOG_RTO, 0, 2162 0, &log, false, &tv); 2163 } 2164 } 2165 2166 static void 2167 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2168 struct rack_sendmap *prev, 2169 struct rack_sendmap *rsm, 2170 struct rack_sendmap *next, 2171 int flag, uint32_t th_ack, int line) 2172 { 2173 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2174 union tcp_log_stackspecific log; 2175 struct timeval tv; 2176 2177 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2178 log.u_bbr.flex8 = flag; 2179 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2180 log.u_bbr.cur_del_rate = (uint64_t)prev; 2181 log.u_bbr.delRate = (uint64_t)rsm; 2182 log.u_bbr.rttProp = (uint64_t)next; 2183 log.u_bbr.flex7 = 0; 2184 if (prev) { 2185 log.u_bbr.flex1 = prev->r_start; 2186 log.u_bbr.flex2 = prev->r_end; 2187 log.u_bbr.flex7 |= 0x4; 2188 } 2189 if (rsm) { 2190 log.u_bbr.flex3 = rsm->r_start; 2191 log.u_bbr.flex4 = rsm->r_end; 2192 log.u_bbr.flex7 |= 0x2; 2193 } 2194 if (next) { 2195 log.u_bbr.flex5 = next->r_start; 2196 log.u_bbr.flex6 = next->r_end; 2197 log.u_bbr.flex7 |= 0x1; 2198 } 2199 log.u_bbr.applimited = line; 2200 log.u_bbr.pkts_out = th_ack; 2201 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2202 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2203 if (rack->rack_no_prr) 2204 log.u_bbr.lost = 0; 2205 else 2206 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2207 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2208 &rack->rc_inp->inp_socket->so_rcv, 2209 &rack->rc_inp->inp_socket->so_snd, 2210 TCP_LOG_MAPCHG, 0, 2211 0, &log, false, &tv); 2212 } 2213 } 2214 2215 static void 2216 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2217 struct rack_sendmap *rsm, int conf) 2218 { 2219 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2220 union tcp_log_stackspecific log; 2221 struct timeval tv; 2222 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2223 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2224 log.u_bbr.flex1 = t; 2225 log.u_bbr.flex2 = len; 2226 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2227 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2228 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2229 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2230 log.u_bbr.flex7 = conf; 2231 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2232 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2233 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2234 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2235 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2236 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2237 if (rsm) { 2238 log.u_bbr.pkt_epoch = rsm->r_start; 2239 log.u_bbr.lost = rsm->r_end; 2240 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2241 /* We loose any upper of the 24 bits */ 2242 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2243 } else { 2244 /* Its a SYN */ 2245 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2246 log.u_bbr.lost = 0; 2247 log.u_bbr.cwnd_gain = 0; 2248 log.u_bbr.pacing_gain = 0; 2249 } 2250 /* Write out general bits of interest rrs here */ 2251 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2252 log.u_bbr.use_lt_bw <<= 1; 2253 log.u_bbr.use_lt_bw |= rack->forced_ack; 2254 log.u_bbr.use_lt_bw <<= 1; 2255 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2256 log.u_bbr.use_lt_bw <<= 1; 2257 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2258 log.u_bbr.use_lt_bw <<= 1; 2259 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2260 log.u_bbr.use_lt_bw <<= 1; 2261 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2262 log.u_bbr.use_lt_bw <<= 1; 2263 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2264 log.u_bbr.use_lt_bw <<= 1; 2265 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2266 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2267 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2268 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2269 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2270 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2271 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2272 log.u_bbr.bw_inuse <<= 32; 2273 if (rsm) 2274 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2275 TCP_LOG_EVENTP(tp, NULL, 2276 &rack->rc_inp->inp_socket->so_rcv, 2277 &rack->rc_inp->inp_socket->so_snd, 2278 BBR_LOG_BBRRTT, 0, 2279 0, &log, false, &tv); 2280 2281 2282 } 2283 } 2284 2285 static void 2286 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2287 { 2288 /* 2289 * Log the rtt sample we are 2290 * applying to the srtt algorithm in 2291 * useconds. 2292 */ 2293 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2294 union tcp_log_stackspecific log; 2295 struct timeval tv; 2296 2297 /* Convert our ms to a microsecond */ 2298 memset(&log, 0, sizeof(log)); 2299 log.u_bbr.flex1 = rtt; 2300 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2301 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2302 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2303 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2304 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2305 log.u_bbr.flex7 = 1; 2306 log.u_bbr.flex8 = rack->sack_attack_disable; 2307 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2308 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2309 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2310 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2311 log.u_bbr.pacing_gain = rack->r_must_retran; 2312 /* 2313 * We capture in delRate the upper 32 bits as 2314 * the confidence level we had declared, and the 2315 * lower 32 bits as the actual RTT using the arrival 2316 * timestamp. 2317 */ 2318 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2319 log.u_bbr.delRate <<= 32; 2320 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2321 /* Lets capture all the things that make up t_rtxcur */ 2322 log.u_bbr.applimited = rack_rto_min; 2323 log.u_bbr.epoch = rack_rto_max; 2324 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2325 log.u_bbr.lost = rack_rto_min; 2326 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2327 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2328 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2329 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2330 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2331 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2332 &rack->rc_inp->inp_socket->so_rcv, 2333 &rack->rc_inp->inp_socket->so_snd, 2334 TCP_LOG_RTT, 0, 2335 0, &log, false, &tv); 2336 } 2337 } 2338 2339 static void 2340 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2341 { 2342 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2343 union tcp_log_stackspecific log; 2344 struct timeval tv; 2345 2346 /* Convert our ms to a microsecond */ 2347 memset(&log, 0, sizeof(log)); 2348 log.u_bbr.flex1 = rtt; 2349 log.u_bbr.flex2 = send_time; 2350 log.u_bbr.flex3 = ack_time; 2351 log.u_bbr.flex4 = where; 2352 log.u_bbr.flex7 = 2; 2353 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2354 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2355 &rack->rc_inp->inp_socket->so_rcv, 2356 &rack->rc_inp->inp_socket->so_snd, 2357 TCP_LOG_RTT, 0, 2358 0, &log, false, &tv); 2359 } 2360 } 2361 2362 2363 2364 static inline void 2365 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2366 { 2367 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2368 union tcp_log_stackspecific log; 2369 struct timeval tv; 2370 2371 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2372 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2373 log.u_bbr.flex1 = line; 2374 log.u_bbr.flex2 = tick; 2375 log.u_bbr.flex3 = tp->t_maxunacktime; 2376 log.u_bbr.flex4 = tp->t_acktime; 2377 log.u_bbr.flex8 = event; 2378 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2379 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2380 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2381 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2382 log.u_bbr.pacing_gain = rack->r_must_retran; 2383 TCP_LOG_EVENTP(tp, NULL, 2384 &rack->rc_inp->inp_socket->so_rcv, 2385 &rack->rc_inp->inp_socket->so_snd, 2386 BBR_LOG_PROGRESS, 0, 2387 0, &log, false, &tv); 2388 } 2389 } 2390 2391 static void 2392 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2393 { 2394 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2395 union tcp_log_stackspecific log; 2396 2397 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2398 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2399 log.u_bbr.flex1 = slot; 2400 if (rack->rack_no_prr) 2401 log.u_bbr.flex2 = 0; 2402 else 2403 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2404 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2405 log.u_bbr.flex8 = rack->rc_in_persist; 2406 log.u_bbr.timeStamp = cts; 2407 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2408 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2409 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2410 log.u_bbr.pacing_gain = rack->r_must_retran; 2411 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2412 &rack->rc_inp->inp_socket->so_rcv, 2413 &rack->rc_inp->inp_socket->so_snd, 2414 BBR_LOG_BBRSND, 0, 2415 0, &log, false, tv); 2416 } 2417 } 2418 2419 static void 2420 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2421 { 2422 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2423 union tcp_log_stackspecific log; 2424 struct timeval tv; 2425 2426 memset(&log, 0, sizeof(log)); 2427 log.u_bbr.flex1 = did_out; 2428 log.u_bbr.flex2 = nxt_pkt; 2429 log.u_bbr.flex3 = way_out; 2430 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2431 if (rack->rack_no_prr) 2432 log.u_bbr.flex5 = 0; 2433 else 2434 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2435 log.u_bbr.flex6 = nsegs; 2436 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2437 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2438 log.u_bbr.flex7 <<= 1; 2439 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2440 log.u_bbr.flex7 <<= 1; 2441 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2442 log.u_bbr.flex8 = rack->rc_in_persist; 2443 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2444 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2445 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2446 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2447 log.u_bbr.use_lt_bw <<= 1; 2448 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2449 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2450 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2451 log.u_bbr.pacing_gain = rack->r_must_retran; 2452 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2453 &rack->rc_inp->inp_socket->so_rcv, 2454 &rack->rc_inp->inp_socket->so_snd, 2455 BBR_LOG_DOSEG_DONE, 0, 2456 0, &log, false, &tv); 2457 } 2458 } 2459 2460 static void 2461 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2462 { 2463 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2464 union tcp_log_stackspecific log; 2465 struct timeval tv; 2466 2467 memset(&log, 0, sizeof(log)); 2468 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2469 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2470 log.u_bbr.flex4 = arg1; 2471 log.u_bbr.flex5 = arg2; 2472 log.u_bbr.flex6 = arg3; 2473 log.u_bbr.flex8 = frm; 2474 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2475 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2476 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2477 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2478 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2479 log.u_bbr.pacing_gain = rack->r_must_retran; 2480 TCP_LOG_EVENTP(tp, NULL, 2481 &tp->t_inpcb->inp_socket->so_rcv, 2482 &tp->t_inpcb->inp_socket->so_snd, 2483 TCP_HDWR_PACE_SIZE, 0, 2484 0, &log, false, &tv); 2485 } 2486 } 2487 2488 static void 2489 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2490 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2491 { 2492 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2493 union tcp_log_stackspecific log; 2494 struct timeval tv; 2495 2496 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2497 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2498 log.u_bbr.flex1 = slot; 2499 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2500 log.u_bbr.flex4 = reason; 2501 if (rack->rack_no_prr) 2502 log.u_bbr.flex5 = 0; 2503 else 2504 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2505 log.u_bbr.flex7 = hpts_calling; 2506 log.u_bbr.flex8 = rack->rc_in_persist; 2507 log.u_bbr.lt_epoch = cwnd_to_use; 2508 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2509 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2510 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2511 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2512 log.u_bbr.pacing_gain = rack->r_must_retran; 2513 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2514 &rack->rc_inp->inp_socket->so_rcv, 2515 &rack->rc_inp->inp_socket->so_snd, 2516 BBR_LOG_JUSTRET, 0, 2517 tlen, &log, false, &tv); 2518 } 2519 } 2520 2521 static void 2522 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2523 struct timeval *tv, uint32_t flags_on_entry) 2524 { 2525 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2526 union tcp_log_stackspecific log; 2527 2528 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2529 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2530 log.u_bbr.flex1 = line; 2531 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2532 log.u_bbr.flex3 = flags_on_entry; 2533 log.u_bbr.flex4 = us_cts; 2534 if (rack->rack_no_prr) 2535 log.u_bbr.flex5 = 0; 2536 else 2537 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2538 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2539 log.u_bbr.flex7 = hpts_removed; 2540 log.u_bbr.flex8 = 1; 2541 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2542 log.u_bbr.timeStamp = us_cts; 2543 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2544 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2545 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2546 log.u_bbr.pacing_gain = rack->r_must_retran; 2547 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2548 &rack->rc_inp->inp_socket->so_rcv, 2549 &rack->rc_inp->inp_socket->so_snd, 2550 BBR_LOG_TIMERCANC, 0, 2551 0, &log, false, tv); 2552 } 2553 } 2554 2555 static void 2556 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2557 uint32_t flex1, uint32_t flex2, 2558 uint32_t flex3, uint32_t flex4, 2559 uint32_t flex5, uint32_t flex6, 2560 uint16_t flex7, uint8_t mod) 2561 { 2562 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2563 union tcp_log_stackspecific log; 2564 struct timeval tv; 2565 2566 if (mod == 1) { 2567 /* No you can't use 1, its for the real to cancel */ 2568 return; 2569 } 2570 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2571 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2572 log.u_bbr.flex1 = flex1; 2573 log.u_bbr.flex2 = flex2; 2574 log.u_bbr.flex3 = flex3; 2575 log.u_bbr.flex4 = flex4; 2576 log.u_bbr.flex5 = flex5; 2577 log.u_bbr.flex6 = flex6; 2578 log.u_bbr.flex7 = flex7; 2579 log.u_bbr.flex8 = mod; 2580 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2581 &rack->rc_inp->inp_socket->so_rcv, 2582 &rack->rc_inp->inp_socket->so_snd, 2583 BBR_LOG_TIMERCANC, 0, 2584 0, &log, false, &tv); 2585 } 2586 } 2587 2588 static void 2589 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2590 { 2591 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2592 union tcp_log_stackspecific log; 2593 struct timeval tv; 2594 2595 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2596 log.u_bbr.flex1 = timers; 2597 log.u_bbr.flex2 = ret; 2598 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2599 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2600 log.u_bbr.flex5 = cts; 2601 if (rack->rack_no_prr) 2602 log.u_bbr.flex6 = 0; 2603 else 2604 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2605 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2606 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2607 log.u_bbr.pacing_gain = rack->r_must_retran; 2608 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2609 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2610 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2611 &rack->rc_inp->inp_socket->so_rcv, 2612 &rack->rc_inp->inp_socket->so_snd, 2613 BBR_LOG_TO_PROCESS, 0, 2614 0, &log, false, &tv); 2615 } 2616 } 2617 2618 static void 2619 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd) 2620 { 2621 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2622 union tcp_log_stackspecific log; 2623 struct timeval tv; 2624 2625 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2626 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2627 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2628 if (rack->rack_no_prr) 2629 log.u_bbr.flex3 = 0; 2630 else 2631 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2632 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2633 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2634 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2635 log.u_bbr.flex8 = frm; 2636 log.u_bbr.pkts_out = orig_cwnd; 2637 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2638 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2639 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2640 log.u_bbr.use_lt_bw <<= 1; 2641 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2642 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2643 &rack->rc_inp->inp_socket->so_rcv, 2644 &rack->rc_inp->inp_socket->so_snd, 2645 BBR_LOG_BBRUPD, 0, 2646 0, &log, false, &tv); 2647 } 2648 } 2649 2650 #ifdef NETFLIX_EXP_DETECTION 2651 static void 2652 rack_log_sad(struct tcp_rack *rack, int event) 2653 { 2654 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2655 union tcp_log_stackspecific log; 2656 struct timeval tv; 2657 2658 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2659 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2660 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2661 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2662 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2663 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2664 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2665 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2666 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2667 log.u_bbr.lt_epoch |= rack->do_detection; 2668 log.u_bbr.applimited = tcp_map_minimum; 2669 log.u_bbr.flex7 = rack->sack_attack_disable; 2670 log.u_bbr.flex8 = event; 2671 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2672 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2673 log.u_bbr.delivered = tcp_sad_decay_val; 2674 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2675 &rack->rc_inp->inp_socket->so_rcv, 2676 &rack->rc_inp->inp_socket->so_snd, 2677 TCP_SAD_DETECTION, 0, 2678 0, &log, false, &tv); 2679 } 2680 } 2681 #endif 2682 2683 static void 2684 rack_counter_destroy(void) 2685 { 2686 counter_u64_free(rack_fto_send); 2687 counter_u64_free(rack_fto_rsm_send); 2688 counter_u64_free(rack_nfto_resend); 2689 counter_u64_free(rack_hw_pace_init_fail); 2690 counter_u64_free(rack_hw_pace_lost); 2691 counter_u64_free(rack_non_fto_send); 2692 counter_u64_free(rack_extended_rfo); 2693 counter_u64_free(rack_ack_total); 2694 counter_u64_free(rack_express_sack); 2695 counter_u64_free(rack_sack_total); 2696 counter_u64_free(rack_move_none); 2697 counter_u64_free(rack_move_some); 2698 counter_u64_free(rack_sack_attacks_detected); 2699 counter_u64_free(rack_sack_attacks_reversed); 2700 counter_u64_free(rack_sack_used_next_merge); 2701 counter_u64_free(rack_sack_used_prev_merge); 2702 counter_u64_free(rack_tlp_tot); 2703 counter_u64_free(rack_tlp_newdata); 2704 counter_u64_free(rack_tlp_retran); 2705 counter_u64_free(rack_tlp_retran_bytes); 2706 counter_u64_free(rack_to_tot); 2707 counter_u64_free(rack_saw_enobuf); 2708 counter_u64_free(rack_saw_enobuf_hw); 2709 counter_u64_free(rack_saw_enetunreach); 2710 counter_u64_free(rack_hot_alloc); 2711 counter_u64_free(rack_to_alloc); 2712 counter_u64_free(rack_to_alloc_hard); 2713 counter_u64_free(rack_to_alloc_emerg); 2714 counter_u64_free(rack_to_alloc_limited); 2715 counter_u64_free(rack_alloc_limited_conns); 2716 counter_u64_free(rack_split_limited); 2717 counter_u64_free(rack_multi_single_eq); 2718 counter_u64_free(rack_proc_non_comp_ack); 2719 counter_u64_free(rack_sack_proc_all); 2720 counter_u64_free(rack_sack_proc_restart); 2721 counter_u64_free(rack_sack_proc_short); 2722 counter_u64_free(rack_sack_skipped_acked); 2723 counter_u64_free(rack_sack_splits); 2724 counter_u64_free(rack_input_idle_reduces); 2725 counter_u64_free(rack_collapsed_win); 2726 counter_u64_free(rack_try_scwnd); 2727 counter_u64_free(rack_persists_sends); 2728 counter_u64_free(rack_persists_acks); 2729 counter_u64_free(rack_persists_loss); 2730 counter_u64_free(rack_persists_lost_ends); 2731 #ifdef INVARIANTS 2732 counter_u64_free(rack_adjust_map_bw); 2733 #endif 2734 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2735 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2736 } 2737 2738 static struct rack_sendmap * 2739 rack_alloc(struct tcp_rack *rack) 2740 { 2741 struct rack_sendmap *rsm; 2742 2743 /* 2744 * First get the top of the list it in 2745 * theory is the "hottest" rsm we have, 2746 * possibly just freed by ack processing. 2747 */ 2748 if (rack->rc_free_cnt > rack_free_cache) { 2749 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2750 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2751 counter_u64_add(rack_hot_alloc, 1); 2752 rack->rc_free_cnt--; 2753 return (rsm); 2754 } 2755 /* 2756 * Once we get under our free cache we probably 2757 * no longer have a "hot" one available. Lets 2758 * get one from UMA. 2759 */ 2760 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2761 if (rsm) { 2762 rack->r_ctl.rc_num_maps_alloced++; 2763 counter_u64_add(rack_to_alloc, 1); 2764 return (rsm); 2765 } 2766 /* 2767 * Dig in to our aux rsm's (the last two) since 2768 * UMA failed to get us one. 2769 */ 2770 if (rack->rc_free_cnt) { 2771 counter_u64_add(rack_to_alloc_emerg, 1); 2772 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2773 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2774 rack->rc_free_cnt--; 2775 return (rsm); 2776 } 2777 return (NULL); 2778 } 2779 2780 static struct rack_sendmap * 2781 rack_alloc_full_limit(struct tcp_rack *rack) 2782 { 2783 if ((V_tcp_map_entries_limit > 0) && 2784 (rack->do_detection == 0) && 2785 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2786 counter_u64_add(rack_to_alloc_limited, 1); 2787 if (!rack->alloc_limit_reported) { 2788 rack->alloc_limit_reported = 1; 2789 counter_u64_add(rack_alloc_limited_conns, 1); 2790 } 2791 return (NULL); 2792 } 2793 return (rack_alloc(rack)); 2794 } 2795 2796 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2797 static struct rack_sendmap * 2798 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2799 { 2800 struct rack_sendmap *rsm; 2801 2802 if (limit_type) { 2803 /* currently there is only one limit type */ 2804 if (V_tcp_map_split_limit > 0 && 2805 (rack->do_detection == 0) && 2806 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2807 counter_u64_add(rack_split_limited, 1); 2808 if (!rack->alloc_limit_reported) { 2809 rack->alloc_limit_reported = 1; 2810 counter_u64_add(rack_alloc_limited_conns, 1); 2811 } 2812 return (NULL); 2813 } 2814 } 2815 2816 /* allocate and mark in the limit type, if set */ 2817 rsm = rack_alloc(rack); 2818 if (rsm != NULL && limit_type) { 2819 rsm->r_limit_type = limit_type; 2820 rack->r_ctl.rc_num_split_allocs++; 2821 } 2822 return (rsm); 2823 } 2824 2825 static void 2826 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2827 { 2828 if (rsm->r_flags & RACK_APP_LIMITED) { 2829 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2830 rack->r_ctl.rc_app_limited_cnt--; 2831 } 2832 } 2833 if (rsm->r_limit_type) { 2834 /* currently there is only one limit type */ 2835 rack->r_ctl.rc_num_split_allocs--; 2836 } 2837 if (rsm == rack->r_ctl.rc_first_appl) { 2838 if (rack->r_ctl.rc_app_limited_cnt == 0) 2839 rack->r_ctl.rc_first_appl = NULL; 2840 else { 2841 /* Follow the next one out */ 2842 struct rack_sendmap fe; 2843 2844 fe.r_start = rsm->r_nseq_appl; 2845 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2846 } 2847 } 2848 if (rsm == rack->r_ctl.rc_resend) 2849 rack->r_ctl.rc_resend = NULL; 2850 if (rsm == rack->r_ctl.rc_end_appl) 2851 rack->r_ctl.rc_end_appl = NULL; 2852 if (rack->r_ctl.rc_tlpsend == rsm) 2853 rack->r_ctl.rc_tlpsend = NULL; 2854 if (rack->r_ctl.rc_sacklast == rsm) 2855 rack->r_ctl.rc_sacklast = NULL; 2856 memset(rsm, 0, sizeof(struct rack_sendmap)); 2857 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2858 rack->rc_free_cnt++; 2859 } 2860 2861 static void 2862 rack_free_trim(struct tcp_rack *rack) 2863 { 2864 struct rack_sendmap *rsm; 2865 2866 /* 2867 * Free up all the tail entries until 2868 * we get our list down to the limit. 2869 */ 2870 while (rack->rc_free_cnt > rack_free_cache) { 2871 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2872 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2873 rack->rc_free_cnt--; 2874 uma_zfree(rack_zone, rsm); 2875 } 2876 } 2877 2878 2879 static uint32_t 2880 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2881 { 2882 uint64_t srtt, bw, len, tim; 2883 uint32_t segsiz, def_len, minl; 2884 2885 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2886 def_len = rack_def_data_window * segsiz; 2887 if (rack->rc_gp_filled == 0) { 2888 /* 2889 * We have no measurement (IW is in flight?) so 2890 * we can only guess using our data_window sysctl 2891 * value (usually 20MSS). 2892 */ 2893 return (def_len); 2894 } 2895 /* 2896 * Now we have a number of factors to consider. 2897 * 2898 * 1) We have a desired BDP which is usually 2899 * at least 2. 2900 * 2) We have a minimum number of rtt's usually 1 SRTT 2901 * but we allow it too to be more. 2902 * 3) We want to make sure a measurement last N useconds (if 2903 * we have set rack_min_measure_usec. 2904 * 2905 * We handle the first concern here by trying to create a data 2906 * window of max(rack_def_data_window, DesiredBDP). The 2907 * second concern we handle in not letting the measurement 2908 * window end normally until at least the required SRTT's 2909 * have gone by which is done further below in 2910 * rack_enough_for_measurement(). Finally the third concern 2911 * we also handle here by calculating how long that time 2912 * would take at the current BW and then return the 2913 * max of our first calculation and that length. Note 2914 * that if rack_min_measure_usec is 0, we don't deal 2915 * with concern 3. Also for both Concern 1 and 3 an 2916 * application limited period could end the measurement 2917 * earlier. 2918 * 2919 * So lets calculate the BDP with the "known" b/w using 2920 * the SRTT has our rtt and then multiply it by the 2921 * goal. 2922 */ 2923 bw = rack_get_bw(rack); 2924 srtt = (uint64_t)tp->t_srtt; 2925 len = bw * srtt; 2926 len /= (uint64_t)HPTS_USEC_IN_SEC; 2927 len *= max(1, rack_goal_bdp); 2928 /* Now we need to round up to the nearest MSS */ 2929 len = roundup(len, segsiz); 2930 if (rack_min_measure_usec) { 2931 /* Now calculate our min length for this b/w */ 2932 tim = rack_min_measure_usec; 2933 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 2934 if (minl == 0) 2935 minl = 1; 2936 minl = roundup(minl, segsiz); 2937 if (len < minl) 2938 len = minl; 2939 } 2940 /* 2941 * Now if we have a very small window we want 2942 * to attempt to get the window that is 2943 * as small as possible. This happens on 2944 * low b/w connections and we don't want to 2945 * span huge numbers of rtt's between measurements. 2946 * 2947 * We basically include 2 over our "MIN window" so 2948 * that the measurement can be shortened (possibly) by 2949 * an ack'ed packet. 2950 */ 2951 if (len < def_len) 2952 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 2953 else 2954 return (max((uint32_t)len, def_len)); 2955 2956 } 2957 2958 static int 2959 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 2960 { 2961 uint32_t tim, srtts, segsiz; 2962 2963 /* 2964 * Has enough time passed for the GP measurement to be valid? 2965 */ 2966 if ((tp->snd_max == tp->snd_una) || 2967 (th_ack == tp->snd_max)){ 2968 /* All is acked */ 2969 *quality = RACK_QUALITY_ALLACKED; 2970 return (1); 2971 } 2972 if (SEQ_LT(th_ack, tp->gput_seq)) { 2973 /* Not enough bytes yet */ 2974 return (0); 2975 } 2976 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2977 if (SEQ_LT(th_ack, tp->gput_ack) && 2978 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 2979 /* Not enough bytes yet */ 2980 return (0); 2981 } 2982 if (rack->r_ctl.rc_first_appl && 2983 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 2984 /* 2985 * We are up to the app limited send point 2986 * we have to measure irrespective of the time.. 2987 */ 2988 *quality = RACK_QUALITY_APPLIMITED; 2989 return (1); 2990 } 2991 /* Now what about time? */ 2992 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 2993 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 2994 if (tim >= srtts) { 2995 *quality = RACK_QUALITY_HIGH; 2996 return (1); 2997 } 2998 /* Nope not even a full SRTT has passed */ 2999 return (0); 3000 } 3001 3002 static void 3003 rack_log_timely(struct tcp_rack *rack, 3004 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3005 uint64_t up_bnd, int line, uint8_t method) 3006 { 3007 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3008 union tcp_log_stackspecific log; 3009 struct timeval tv; 3010 3011 memset(&log, 0, sizeof(log)); 3012 log.u_bbr.flex1 = logged; 3013 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3014 log.u_bbr.flex2 <<= 4; 3015 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3016 log.u_bbr.flex2 <<= 4; 3017 log.u_bbr.flex2 |= rack->rc_gp_incr; 3018 log.u_bbr.flex2 <<= 4; 3019 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3020 log.u_bbr.flex3 = rack->rc_gp_incr; 3021 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3022 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3023 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3024 log.u_bbr.flex7 = rack->rc_gp_bwred; 3025 log.u_bbr.flex8 = method; 3026 log.u_bbr.cur_del_rate = cur_bw; 3027 log.u_bbr.delRate = low_bnd; 3028 log.u_bbr.bw_inuse = up_bnd; 3029 log.u_bbr.rttProp = rack_get_bw(rack); 3030 log.u_bbr.pkt_epoch = line; 3031 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3032 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3033 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3034 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3035 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3036 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3037 log.u_bbr.cwnd_gain <<= 1; 3038 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3039 log.u_bbr.cwnd_gain <<= 1; 3040 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3041 log.u_bbr.cwnd_gain <<= 1; 3042 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3043 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3044 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3045 &rack->rc_inp->inp_socket->so_rcv, 3046 &rack->rc_inp->inp_socket->so_snd, 3047 TCP_TIMELY_WORK, 0, 3048 0, &log, false, &tv); 3049 } 3050 } 3051 3052 static int 3053 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3054 { 3055 /* 3056 * Before we increase we need to know if 3057 * the estimate just made was less than 3058 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3059 * 3060 * If we already are pacing at a fast enough 3061 * rate to push us faster there is no sense of 3062 * increasing. 3063 * 3064 * We first caculate our actual pacing rate (ss or ca multipler 3065 * times our cur_bw). 3066 * 3067 * Then we take the last measured rate and multipy by our 3068 * maximum pacing overage to give us a max allowable rate. 3069 * 3070 * If our act_rate is smaller than our max_allowable rate 3071 * then we should increase. Else we should hold steady. 3072 * 3073 */ 3074 uint64_t act_rate, max_allow_rate; 3075 3076 if (rack_timely_no_stopping) 3077 return (1); 3078 3079 if ((cur_bw == 0) || (last_bw_est == 0)) { 3080 /* 3081 * Initial startup case or 3082 * everything is acked case. 3083 */ 3084 rack_log_timely(rack, mult, cur_bw, 0, 0, 3085 __LINE__, 9); 3086 return (1); 3087 } 3088 if (mult <= 100) { 3089 /* 3090 * We can always pace at or slightly above our rate. 3091 */ 3092 rack_log_timely(rack, mult, cur_bw, 0, 0, 3093 __LINE__, 9); 3094 return (1); 3095 } 3096 act_rate = cur_bw * (uint64_t)mult; 3097 act_rate /= 100; 3098 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3099 max_allow_rate /= 100; 3100 if (act_rate < max_allow_rate) { 3101 /* 3102 * Here the rate we are actually pacing at 3103 * is smaller than 10% above our last measurement. 3104 * This means we are pacing below what we would 3105 * like to try to achieve (plus some wiggle room). 3106 */ 3107 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3108 __LINE__, 9); 3109 return (1); 3110 } else { 3111 /* 3112 * Here we are already pacing at least rack_max_per_above(10%) 3113 * what we are getting back. This indicates most likely 3114 * that we are being limited (cwnd/rwnd/app) and can't 3115 * get any more b/w. There is no sense of trying to 3116 * raise up the pacing rate its not speeding us up 3117 * and we already are pacing faster than we are getting. 3118 */ 3119 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3120 __LINE__, 8); 3121 return (0); 3122 } 3123 } 3124 3125 static void 3126 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3127 { 3128 /* 3129 * When we drag bottom, we want to assure 3130 * that no multiplier is below 1.0, if so 3131 * we want to restore it to at least that. 3132 */ 3133 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3134 /* This is unlikely we usually do not touch recovery */ 3135 rack->r_ctl.rack_per_of_gp_rec = 100; 3136 } 3137 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3138 rack->r_ctl.rack_per_of_gp_ca = 100; 3139 } 3140 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3141 rack->r_ctl.rack_per_of_gp_ss = 100; 3142 } 3143 } 3144 3145 static void 3146 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3147 { 3148 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3149 rack->r_ctl.rack_per_of_gp_ca = 100; 3150 } 3151 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3152 rack->r_ctl.rack_per_of_gp_ss = 100; 3153 } 3154 } 3155 3156 static void 3157 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3158 { 3159 int32_t calc, logged, plus; 3160 3161 logged = 0; 3162 3163 if (override) { 3164 /* 3165 * override is passed when we are 3166 * loosing b/w and making one last 3167 * gasp at trying to not loose out 3168 * to a new-reno flow. 3169 */ 3170 goto extra_boost; 3171 } 3172 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3173 if (rack->rc_gp_incr && 3174 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3175 /* 3176 * Reset and get 5 strokes more before the boost. Note 3177 * that the count is 0 based so we have to add one. 3178 */ 3179 extra_boost: 3180 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3181 rack->rc_gp_timely_inc_cnt = 0; 3182 } else 3183 plus = (uint32_t)rack_gp_increase_per; 3184 /* Must be at least 1% increase for true timely increases */ 3185 if ((plus < 1) && 3186 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3187 plus = 1; 3188 if (rack->rc_gp_saw_rec && 3189 (rack->rc_gp_no_rec_chg == 0) && 3190 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3191 rack->r_ctl.rack_per_of_gp_rec)) { 3192 /* We have been in recovery ding it too */ 3193 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3194 if (calc > 0xffff) 3195 calc = 0xffff; 3196 logged |= 1; 3197 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3198 if (rack_per_upper_bound_ss && 3199 (rack->rc_dragged_bottom == 0) && 3200 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3201 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3202 } 3203 if (rack->rc_gp_saw_ca && 3204 (rack->rc_gp_saw_ss == 0) && 3205 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3206 rack->r_ctl.rack_per_of_gp_ca)) { 3207 /* In CA */ 3208 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3209 if (calc > 0xffff) 3210 calc = 0xffff; 3211 logged |= 2; 3212 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3213 if (rack_per_upper_bound_ca && 3214 (rack->rc_dragged_bottom == 0) && 3215 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3216 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3217 } 3218 if (rack->rc_gp_saw_ss && 3219 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3220 rack->r_ctl.rack_per_of_gp_ss)) { 3221 /* In SS */ 3222 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3223 if (calc > 0xffff) 3224 calc = 0xffff; 3225 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3226 if (rack_per_upper_bound_ss && 3227 (rack->rc_dragged_bottom == 0) && 3228 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3229 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3230 logged |= 4; 3231 } 3232 if (logged && 3233 (rack->rc_gp_incr == 0)){ 3234 /* Go into increment mode */ 3235 rack->rc_gp_incr = 1; 3236 rack->rc_gp_timely_inc_cnt = 0; 3237 } 3238 if (rack->rc_gp_incr && 3239 logged && 3240 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3241 rack->rc_gp_timely_inc_cnt++; 3242 } 3243 rack_log_timely(rack, logged, plus, 0, 0, 3244 __LINE__, 1); 3245 } 3246 3247 static uint32_t 3248 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3249 { 3250 /* 3251 * norm_grad = rtt_diff / minrtt; 3252 * new_per = curper * (1 - B * norm_grad) 3253 * 3254 * B = rack_gp_decrease_per (default 10%) 3255 * rtt_dif = input var current rtt-diff 3256 * curper = input var current percentage 3257 * minrtt = from rack filter 3258 * 3259 */ 3260 uint64_t perf; 3261 3262 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3263 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3264 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3265 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3266 (uint64_t)1000000)) / 3267 (uint64_t)1000000); 3268 if (perf > curper) { 3269 /* TSNH */ 3270 perf = curper - 1; 3271 } 3272 return ((uint32_t)perf); 3273 } 3274 3275 static uint32_t 3276 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3277 { 3278 /* 3279 * highrttthresh 3280 * result = curper * (1 - (B * ( 1 - ------ )) 3281 * gp_srtt 3282 * 3283 * B = rack_gp_decrease_per (default 10%) 3284 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3285 */ 3286 uint64_t perf; 3287 uint32_t highrttthresh; 3288 3289 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3290 3291 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3292 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3293 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3294 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3295 return (perf); 3296 } 3297 3298 static void 3299 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3300 { 3301 uint64_t logvar, logvar2, logvar3; 3302 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3303 3304 if (rack->rc_gp_incr) { 3305 /* Turn off increment counting */ 3306 rack->rc_gp_incr = 0; 3307 rack->rc_gp_timely_inc_cnt = 0; 3308 } 3309 ss_red = ca_red = rec_red = 0; 3310 logged = 0; 3311 /* Calculate the reduction value */ 3312 if (rtt_diff < 0) { 3313 rtt_diff *= -1; 3314 } 3315 /* Must be at least 1% reduction */ 3316 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3317 /* We have been in recovery ding it too */ 3318 if (timely_says == 2) { 3319 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3320 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3321 if (alt < new_per) 3322 val = alt; 3323 else 3324 val = new_per; 3325 } else 3326 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3327 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3328 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3329 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3330 } else { 3331 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3332 rec_red = 0; 3333 } 3334 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3335 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3336 logged |= 1; 3337 } 3338 if (rack->rc_gp_saw_ss) { 3339 /* Sent in SS */ 3340 if (timely_says == 2) { 3341 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3342 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3343 if (alt < new_per) 3344 val = alt; 3345 else 3346 val = new_per; 3347 } else 3348 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3349 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3350 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3351 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3352 } else { 3353 ss_red = new_per; 3354 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3355 logvar = new_per; 3356 logvar <<= 32; 3357 logvar |= alt; 3358 logvar2 = (uint32_t)rtt; 3359 logvar2 <<= 32; 3360 logvar2 |= (uint32_t)rtt_diff; 3361 logvar3 = rack_gp_rtt_maxmul; 3362 logvar3 <<= 32; 3363 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3364 rack_log_timely(rack, timely_says, 3365 logvar2, logvar3, 3366 logvar, __LINE__, 10); 3367 } 3368 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3369 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3370 logged |= 4; 3371 } else if (rack->rc_gp_saw_ca) { 3372 /* Sent in CA */ 3373 if (timely_says == 2) { 3374 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3375 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3376 if (alt < new_per) 3377 val = alt; 3378 else 3379 val = new_per; 3380 } else 3381 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3382 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3383 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3384 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3385 } else { 3386 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3387 ca_red = 0; 3388 logvar = new_per; 3389 logvar <<= 32; 3390 logvar |= alt; 3391 logvar2 = (uint32_t)rtt; 3392 logvar2 <<= 32; 3393 logvar2 |= (uint32_t)rtt_diff; 3394 logvar3 = rack_gp_rtt_maxmul; 3395 logvar3 <<= 32; 3396 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3397 rack_log_timely(rack, timely_says, 3398 logvar2, logvar3, 3399 logvar, __LINE__, 10); 3400 } 3401 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3402 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3403 logged |= 2; 3404 } 3405 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3406 rack->rc_gp_timely_dec_cnt++; 3407 if (rack_timely_dec_clear && 3408 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3409 rack->rc_gp_timely_dec_cnt = 0; 3410 } 3411 logvar = ss_red; 3412 logvar <<= 32; 3413 logvar |= ca_red; 3414 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3415 __LINE__, 2); 3416 } 3417 3418 static void 3419 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3420 uint32_t rtt, uint32_t line, uint8_t reas) 3421 { 3422 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3423 union tcp_log_stackspecific log; 3424 struct timeval tv; 3425 3426 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3427 log.u_bbr.flex1 = line; 3428 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3429 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3430 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3431 log.u_bbr.flex5 = rtt; 3432 log.u_bbr.flex6 = rack->rc_highly_buffered; 3433 log.u_bbr.flex6 <<= 1; 3434 log.u_bbr.flex6 |= rack->forced_ack; 3435 log.u_bbr.flex6 <<= 1; 3436 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3437 log.u_bbr.flex6 <<= 1; 3438 log.u_bbr.flex6 |= rack->in_probe_rtt; 3439 log.u_bbr.flex6 <<= 1; 3440 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3441 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3442 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3443 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3444 log.u_bbr.flex8 = reas; 3445 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3446 log.u_bbr.delRate = rack_get_bw(rack); 3447 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3448 log.u_bbr.cur_del_rate <<= 32; 3449 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3450 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3451 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3452 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3453 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3454 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3455 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3456 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3457 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3458 log.u_bbr.rttProp = us_cts; 3459 log.u_bbr.rttProp <<= 32; 3460 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3461 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3462 &rack->rc_inp->inp_socket->so_rcv, 3463 &rack->rc_inp->inp_socket->so_snd, 3464 BBR_LOG_RTT_SHRINKS, 0, 3465 0, &log, false, &rack->r_ctl.act_rcv_time); 3466 } 3467 } 3468 3469 static void 3470 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3471 { 3472 uint64_t bwdp; 3473 3474 bwdp = rack_get_bw(rack); 3475 bwdp *= (uint64_t)rtt; 3476 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3477 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3478 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3479 /* 3480 * A window protocol must be able to have 4 packets 3481 * outstanding as the floor in order to function 3482 * (especially considering delayed ack :D). 3483 */ 3484 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3485 } 3486 } 3487 3488 static void 3489 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3490 { 3491 /** 3492 * ProbeRTT is a bit different in rack_pacing than in 3493 * BBR. It is like BBR in that it uses the lowering of 3494 * the RTT as a signal that we saw something new and 3495 * counts from there for how long between. But it is 3496 * different in that its quite simple. It does not 3497 * play with the cwnd and wait until we get down 3498 * to N segments outstanding and hold that for 3499 * 200ms. Instead it just sets the pacing reduction 3500 * rate to a set percentage (70 by default) and hold 3501 * that for a number of recent GP Srtt's. 3502 */ 3503 uint32_t segsiz; 3504 3505 if (rack->rc_gp_dyn_mul == 0) 3506 return; 3507 3508 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3509 /* We are idle */ 3510 return; 3511 } 3512 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3513 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3514 /* 3515 * Stop the goodput now, the idea here is 3516 * that future measurements with in_probe_rtt 3517 * won't register if they are not greater so 3518 * we want to get what info (if any) is available 3519 * now. 3520 */ 3521 rack_do_goodput_measurement(rack->rc_tp, rack, 3522 rack->rc_tp->snd_una, __LINE__, 3523 RACK_QUALITY_PROBERTT); 3524 } 3525 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3526 rack->r_ctl.rc_time_probertt_entered = us_cts; 3527 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3528 rack->r_ctl.rc_pace_min_segs); 3529 rack->in_probe_rtt = 1; 3530 rack->measure_saw_probe_rtt = 1; 3531 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3532 rack->r_ctl.rc_time_probertt_starts = 0; 3533 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3534 if (rack_probertt_use_min_rtt_entry) 3535 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3536 else 3537 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3538 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3539 __LINE__, RACK_RTTS_ENTERPROBE); 3540 } 3541 3542 static void 3543 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3544 { 3545 struct rack_sendmap *rsm; 3546 uint32_t segsiz; 3547 3548 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3549 rack->r_ctl.rc_pace_min_segs); 3550 rack->in_probe_rtt = 0; 3551 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3552 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3553 /* 3554 * Stop the goodput now, the idea here is 3555 * that future measurements with in_probe_rtt 3556 * won't register if they are not greater so 3557 * we want to get what info (if any) is available 3558 * now. 3559 */ 3560 rack_do_goodput_measurement(rack->rc_tp, rack, 3561 rack->rc_tp->snd_una, __LINE__, 3562 RACK_QUALITY_PROBERTT); 3563 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3564 /* 3565 * We don't have enough data to make a measurement. 3566 * So lets just stop and start here after exiting 3567 * probe-rtt. We probably are not interested in 3568 * the results anyway. 3569 */ 3570 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3571 } 3572 /* 3573 * Measurements through the current snd_max are going 3574 * to be limited by the slower pacing rate. 3575 * 3576 * We need to mark these as app-limited so we 3577 * don't collapse the b/w. 3578 */ 3579 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3580 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3581 if (rack->r_ctl.rc_app_limited_cnt == 0) 3582 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3583 else { 3584 /* 3585 * Go out to the end app limited and mark 3586 * this new one as next and move the end_appl up 3587 * to this guy. 3588 */ 3589 if (rack->r_ctl.rc_end_appl) 3590 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3591 rack->r_ctl.rc_end_appl = rsm; 3592 } 3593 rsm->r_flags |= RACK_APP_LIMITED; 3594 rack->r_ctl.rc_app_limited_cnt++; 3595 } 3596 /* 3597 * Now, we need to examine our pacing rate multipliers. 3598 * If its under 100%, we need to kick it back up to 3599 * 100%. We also don't let it be over our "max" above 3600 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3601 * Note setting clamp_atexit_prtt to 0 has the effect 3602 * of setting CA/SS to 100% always at exit (which is 3603 * the default behavior). 3604 */ 3605 if (rack_probertt_clear_is) { 3606 rack->rc_gp_incr = 0; 3607 rack->rc_gp_bwred = 0; 3608 rack->rc_gp_timely_inc_cnt = 0; 3609 rack->rc_gp_timely_dec_cnt = 0; 3610 } 3611 /* Do we do any clamping at exit? */ 3612 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3613 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3614 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3615 } 3616 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3617 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3618 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3619 } 3620 /* 3621 * Lets set rtt_diff to 0, so that we will get a "boost" 3622 * after exiting. 3623 */ 3624 rack->r_ctl.rc_rtt_diff = 0; 3625 3626 /* Clear all flags so we start fresh */ 3627 rack->rc_tp->t_bytes_acked = 0; 3628 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3629 /* 3630 * If configured to, set the cwnd and ssthresh to 3631 * our targets. 3632 */ 3633 if (rack_probe_rtt_sets_cwnd) { 3634 uint64_t ebdp; 3635 uint32_t setto; 3636 3637 /* Set ssthresh so we get into CA once we hit our target */ 3638 if (rack_probertt_use_min_rtt_exit == 1) { 3639 /* Set to min rtt */ 3640 rack_set_prtt_target(rack, segsiz, 3641 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3642 } else if (rack_probertt_use_min_rtt_exit == 2) { 3643 /* Set to current gp rtt */ 3644 rack_set_prtt_target(rack, segsiz, 3645 rack->r_ctl.rc_gp_srtt); 3646 } else if (rack_probertt_use_min_rtt_exit == 3) { 3647 /* Set to entry gp rtt */ 3648 rack_set_prtt_target(rack, segsiz, 3649 rack->r_ctl.rc_entry_gp_rtt); 3650 } else { 3651 uint64_t sum; 3652 uint32_t setval; 3653 3654 sum = rack->r_ctl.rc_entry_gp_rtt; 3655 sum *= 10; 3656 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3657 if (sum >= 20) { 3658 /* 3659 * A highly buffered path needs 3660 * cwnd space for timely to work. 3661 * Lets set things up as if 3662 * we are heading back here again. 3663 */ 3664 setval = rack->r_ctl.rc_entry_gp_rtt; 3665 } else if (sum >= 15) { 3666 /* 3667 * Lets take the smaller of the 3668 * two since we are just somewhat 3669 * buffered. 3670 */ 3671 setval = rack->r_ctl.rc_gp_srtt; 3672 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3673 setval = rack->r_ctl.rc_entry_gp_rtt; 3674 } else { 3675 /* 3676 * Here we are not highly buffered 3677 * and should pick the min we can to 3678 * keep from causing loss. 3679 */ 3680 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3681 } 3682 rack_set_prtt_target(rack, segsiz, 3683 setval); 3684 } 3685 if (rack_probe_rtt_sets_cwnd > 1) { 3686 /* There is a percentage here to boost */ 3687 ebdp = rack->r_ctl.rc_target_probertt_flight; 3688 ebdp *= rack_probe_rtt_sets_cwnd; 3689 ebdp /= 100; 3690 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3691 } else 3692 setto = rack->r_ctl.rc_target_probertt_flight; 3693 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3694 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3695 /* Enforce a min */ 3696 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3697 } 3698 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3699 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3700 } 3701 rack_log_rtt_shrinks(rack, us_cts, 3702 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3703 __LINE__, RACK_RTTS_EXITPROBE); 3704 /* Clear times last so log has all the info */ 3705 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3706 rack->r_ctl.rc_time_probertt_entered = us_cts; 3707 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3708 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3709 } 3710 3711 static void 3712 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3713 { 3714 /* Check in on probe-rtt */ 3715 if (rack->rc_gp_filled == 0) { 3716 /* We do not do p-rtt unless we have gp measurements */ 3717 return; 3718 } 3719 if (rack->in_probe_rtt) { 3720 uint64_t no_overflow; 3721 uint32_t endtime, must_stay; 3722 3723 if (rack->r_ctl.rc_went_idle_time && 3724 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3725 /* 3726 * We went idle during prtt, just exit now. 3727 */ 3728 rack_exit_probertt(rack, us_cts); 3729 } else if (rack_probe_rtt_safety_val && 3730 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3731 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3732 /* 3733 * Probe RTT safety value triggered! 3734 */ 3735 rack_log_rtt_shrinks(rack, us_cts, 3736 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3737 __LINE__, RACK_RTTS_SAFETY); 3738 rack_exit_probertt(rack, us_cts); 3739 } 3740 /* Calculate the max we will wait */ 3741 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3742 if (rack->rc_highly_buffered) 3743 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3744 /* Calculate the min we must wait */ 3745 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3746 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3747 TSTMP_LT(us_cts, endtime)) { 3748 uint32_t calc; 3749 /* Do we lower more? */ 3750 no_exit: 3751 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3752 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3753 else 3754 calc = 0; 3755 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3756 if (calc) { 3757 /* Maybe */ 3758 calc *= rack_per_of_gp_probertt_reduce; 3759 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3760 /* Limit it too */ 3761 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3762 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3763 } 3764 /* We must reach target or the time set */ 3765 return; 3766 } 3767 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3768 if ((TSTMP_LT(us_cts, must_stay) && 3769 rack->rc_highly_buffered) || 3770 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3771 rack->r_ctl.rc_target_probertt_flight)) { 3772 /* We are not past the must_stay time */ 3773 goto no_exit; 3774 } 3775 rack_log_rtt_shrinks(rack, us_cts, 3776 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3777 __LINE__, RACK_RTTS_REACHTARGET); 3778 rack->r_ctl.rc_time_probertt_starts = us_cts; 3779 if (rack->r_ctl.rc_time_probertt_starts == 0) 3780 rack->r_ctl.rc_time_probertt_starts = 1; 3781 /* Restore back to our rate we want to pace at in prtt */ 3782 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3783 } 3784 /* 3785 * Setup our end time, some number of gp_srtts plus 200ms. 3786 */ 3787 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3788 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3789 if (rack_probertt_gpsrtt_cnt_div) 3790 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3791 else 3792 endtime = 0; 3793 endtime += rack_min_probertt_hold; 3794 endtime += rack->r_ctl.rc_time_probertt_starts; 3795 if (TSTMP_GEQ(us_cts, endtime)) { 3796 /* yes, exit probertt */ 3797 rack_exit_probertt(rack, us_cts); 3798 } 3799 3800 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3801 /* Go into probertt, its been too long since we went lower */ 3802 rack_enter_probertt(rack, us_cts); 3803 } 3804 } 3805 3806 static void 3807 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3808 uint32_t rtt, int32_t rtt_diff) 3809 { 3810 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3811 uint32_t losses; 3812 3813 if ((rack->rc_gp_dyn_mul == 0) || 3814 (rack->use_fixed_rate) || 3815 (rack->in_probe_rtt) || 3816 (rack->rc_always_pace == 0)) { 3817 /* No dynamic GP multipler in play */ 3818 return; 3819 } 3820 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3821 cur_bw = rack_get_bw(rack); 3822 /* Calculate our up and down range */ 3823 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3824 up_bnd /= 100; 3825 up_bnd += rack->r_ctl.last_gp_comp_bw; 3826 3827 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3828 subfr /= 100; 3829 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3830 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3831 /* 3832 * This is the case where our RTT is above 3833 * the max target and we have been configured 3834 * to just do timely no bonus up stuff in that case. 3835 * 3836 * There are two configurations, set to 1, and we 3837 * just do timely if we are over our max. If its 3838 * set above 1 then we slam the multipliers down 3839 * to 100 and then decrement per timely. 3840 */ 3841 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3842 __LINE__, 3); 3843 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3844 rack_validate_multipliers_at_or_below_100(rack); 3845 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3846 } else if ((last_bw_est < low_bnd) && !losses) { 3847 /* 3848 * We are decreasing this is a bit complicated this 3849 * means we are loosing ground. This could be 3850 * because another flow entered and we are competing 3851 * for b/w with it. This will push the RTT up which 3852 * makes timely unusable unless we want to get shoved 3853 * into a corner and just be backed off (the age 3854 * old problem with delay based CC). 3855 * 3856 * On the other hand if it was a route change we 3857 * would like to stay somewhat contained and not 3858 * blow out the buffers. 3859 */ 3860 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3861 __LINE__, 3); 3862 rack->r_ctl.last_gp_comp_bw = cur_bw; 3863 if (rack->rc_gp_bwred == 0) { 3864 /* Go into reduction counting */ 3865 rack->rc_gp_bwred = 1; 3866 rack->rc_gp_timely_dec_cnt = 0; 3867 } 3868 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3869 (timely_says == 0)) { 3870 /* 3871 * Push another time with a faster pacing 3872 * to try to gain back (we include override to 3873 * get a full raise factor). 3874 */ 3875 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3876 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3877 (timely_says == 0) || 3878 (rack_down_raise_thresh == 0)) { 3879 /* 3880 * Do an override up in b/w if we were 3881 * below the threshold or if the threshold 3882 * is zero we always do the raise. 3883 */ 3884 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3885 } else { 3886 /* Log it stays the same */ 3887 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3888 __LINE__, 11); 3889 } 3890 rack->rc_gp_timely_dec_cnt++; 3891 /* We are not incrementing really no-count */ 3892 rack->rc_gp_incr = 0; 3893 rack->rc_gp_timely_inc_cnt = 0; 3894 } else { 3895 /* 3896 * Lets just use the RTT 3897 * information and give up 3898 * pushing. 3899 */ 3900 goto use_timely; 3901 } 3902 } else if ((timely_says != 2) && 3903 !losses && 3904 (last_bw_est > up_bnd)) { 3905 /* 3906 * We are increasing b/w lets keep going, updating 3907 * our b/w and ignoring any timely input, unless 3908 * of course we are at our max raise (if there is one). 3909 */ 3910 3911 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3912 __LINE__, 3); 3913 rack->r_ctl.last_gp_comp_bw = cur_bw; 3914 if (rack->rc_gp_saw_ss && 3915 rack_per_upper_bound_ss && 3916 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3917 /* 3918 * In cases where we can't go higher 3919 * we should just use timely. 3920 */ 3921 goto use_timely; 3922 } 3923 if (rack->rc_gp_saw_ca && 3924 rack_per_upper_bound_ca && 3925 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 3926 /* 3927 * In cases where we can't go higher 3928 * we should just use timely. 3929 */ 3930 goto use_timely; 3931 } 3932 rack->rc_gp_bwred = 0; 3933 rack->rc_gp_timely_dec_cnt = 0; 3934 /* You get a set number of pushes if timely is trying to reduce */ 3935 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 3936 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3937 } else { 3938 /* Log it stays the same */ 3939 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 3940 __LINE__, 12); 3941 } 3942 return; 3943 } else { 3944 /* 3945 * We are staying between the lower and upper range bounds 3946 * so use timely to decide. 3947 */ 3948 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3949 __LINE__, 3); 3950 use_timely: 3951 if (timely_says) { 3952 rack->rc_gp_incr = 0; 3953 rack->rc_gp_timely_inc_cnt = 0; 3954 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 3955 !losses && 3956 (last_bw_est < low_bnd)) { 3957 /* We are loosing ground */ 3958 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3959 rack->rc_gp_timely_dec_cnt++; 3960 /* We are not incrementing really no-count */ 3961 rack->rc_gp_incr = 0; 3962 rack->rc_gp_timely_inc_cnt = 0; 3963 } else 3964 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3965 } else { 3966 rack->rc_gp_bwred = 0; 3967 rack->rc_gp_timely_dec_cnt = 0; 3968 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3969 } 3970 } 3971 } 3972 3973 static int32_t 3974 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 3975 { 3976 int32_t timely_says; 3977 uint64_t log_mult, log_rtt_a_diff; 3978 3979 log_rtt_a_diff = rtt; 3980 log_rtt_a_diff <<= 32; 3981 log_rtt_a_diff |= (uint32_t)rtt_diff; 3982 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 3983 rack_gp_rtt_maxmul)) { 3984 /* Reduce the b/w multipler */ 3985 timely_says = 2; 3986 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3987 log_mult <<= 32; 3988 log_mult |= prev_rtt; 3989 rack_log_timely(rack, timely_says, log_mult, 3990 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3991 log_rtt_a_diff, __LINE__, 4); 3992 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 3993 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 3994 max(rack_gp_rtt_mindiv , 1)))) { 3995 /* Increase the b/w multipler */ 3996 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 3997 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 3998 max(rack_gp_rtt_mindiv , 1)); 3999 log_mult <<= 32; 4000 log_mult |= prev_rtt; 4001 timely_says = 0; 4002 rack_log_timely(rack, timely_says, log_mult , 4003 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4004 log_rtt_a_diff, __LINE__, 5); 4005 } else { 4006 /* 4007 * Use a gradient to find it the timely gradient 4008 * is: 4009 * grad = rc_rtt_diff / min_rtt; 4010 * 4011 * anything below or equal to 0 will be 4012 * a increase indication. Anything above 4013 * zero is a decrease. Note we take care 4014 * of the actual gradient calculation 4015 * in the reduction (its not needed for 4016 * increase). 4017 */ 4018 log_mult = prev_rtt; 4019 if (rtt_diff <= 0) { 4020 /* 4021 * Rttdiff is less than zero, increase the 4022 * b/w multipler (its 0 or negative) 4023 */ 4024 timely_says = 0; 4025 rack_log_timely(rack, timely_says, log_mult, 4026 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4027 } else { 4028 /* Reduce the b/w multipler */ 4029 timely_says = 1; 4030 rack_log_timely(rack, timely_says, log_mult, 4031 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4032 } 4033 } 4034 return (timely_says); 4035 } 4036 4037 static void 4038 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4039 tcp_seq th_ack, int line, uint8_t quality) 4040 { 4041 uint64_t tim, bytes_ps, ltim, stim, utim; 4042 uint32_t segsiz, bytes, reqbytes, us_cts; 4043 int32_t gput, new_rtt_diff, timely_says; 4044 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4045 int did_add = 0; 4046 4047 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4048 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4049 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4050 tim = us_cts - tp->gput_ts; 4051 else 4052 tim = 0; 4053 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4054 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4055 else 4056 stim = 0; 4057 /* 4058 * Use the larger of the send time or ack time. This prevents us 4059 * from being influenced by ack artifacts to come up with too 4060 * high of measurement. Note that since we are spanning over many more 4061 * bytes in most of our measurements hopefully that is less likely to 4062 * occur. 4063 */ 4064 if (tim > stim) 4065 utim = max(tim, 1); 4066 else 4067 utim = max(stim, 1); 4068 /* Lets get a msec time ltim too for the old stuff */ 4069 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4070 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4071 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4072 if ((tim == 0) && (stim == 0)) { 4073 /* 4074 * Invalid measurement time, maybe 4075 * all on one ack/one send? 4076 */ 4077 bytes = 0; 4078 bytes_ps = 0; 4079 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4080 0, 0, 0, 10, __LINE__, NULL, quality); 4081 goto skip_measurement; 4082 } 4083 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4084 /* We never made a us_rtt measurement? */ 4085 bytes = 0; 4086 bytes_ps = 0; 4087 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4088 0, 0, 0, 10, __LINE__, NULL, quality); 4089 goto skip_measurement; 4090 } 4091 /* 4092 * Calculate the maximum possible b/w this connection 4093 * could have. We base our calculation on the lowest 4094 * rtt we have seen during the measurement and the 4095 * largest rwnd the client has given us in that time. This 4096 * forms a BDP that is the maximum that we could ever 4097 * get to the client. Anything larger is not valid. 4098 * 4099 * I originally had code here that rejected measurements 4100 * where the time was less than 1/2 the latest us_rtt. 4101 * But after thinking on that I realized its wrong since 4102 * say you had a 150Mbps or even 1Gbps link, and you 4103 * were a long way away.. example I am in Europe (100ms rtt) 4104 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4105 * bytes my time would be 1.2ms, and yet my rtt would say 4106 * the measurement was invalid the time was < 50ms. The 4107 * same thing is true for 150Mb (8ms of time). 4108 * 4109 * A better way I realized is to look at what the maximum 4110 * the connection could possibly do. This is gated on 4111 * the lowest RTT we have seen and the highest rwnd. 4112 * We should in theory never exceed that, if we are 4113 * then something on the path is storing up packets 4114 * and then feeding them all at once to our endpoint 4115 * messing up our measurement. 4116 */ 4117 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4118 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4119 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4120 if (SEQ_LT(th_ack, tp->gput_seq)) { 4121 /* No measurement can be made */ 4122 bytes = 0; 4123 bytes_ps = 0; 4124 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4125 0, 0, 0, 10, __LINE__, NULL, quality); 4126 goto skip_measurement; 4127 } else 4128 bytes = (th_ack - tp->gput_seq); 4129 bytes_ps = (uint64_t)bytes; 4130 /* 4131 * Don't measure a b/w for pacing unless we have gotten at least 4132 * an initial windows worth of data in this measurement interval. 4133 * 4134 * Small numbers of bytes get badly influenced by delayed ack and 4135 * other artifacts. Note we take the initial window or our 4136 * defined minimum GP (defaulting to 10 which hopefully is the 4137 * IW). 4138 */ 4139 if (rack->rc_gp_filled == 0) { 4140 /* 4141 * The initial estimate is special. We 4142 * have blasted out an IW worth of packets 4143 * without a real valid ack ts results. We 4144 * then setup the app_limited_needs_set flag, 4145 * this should get the first ack in (probably 2 4146 * MSS worth) to be recorded as the timestamp. 4147 * We thus allow a smaller number of bytes i.e. 4148 * IW - 2MSS. 4149 */ 4150 reqbytes -= (2 * segsiz); 4151 /* Also lets fill previous for our first measurement to be neutral */ 4152 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4153 } 4154 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4155 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4156 rack->r_ctl.rc_app_limited_cnt, 4157 0, 0, 10, __LINE__, NULL, quality); 4158 goto skip_measurement; 4159 } 4160 /* 4161 * We now need to calculate the Timely like status so 4162 * we can update (possibly) the b/w multipliers. 4163 */ 4164 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4165 if (rack->rc_gp_filled == 0) { 4166 /* No previous reading */ 4167 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4168 } else { 4169 if (rack->measure_saw_probe_rtt == 0) { 4170 /* 4171 * We don't want a probertt to be counted 4172 * since it will be negative incorrectly. We 4173 * expect to be reducing the RTT when we 4174 * pace at a slower rate. 4175 */ 4176 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4177 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4178 } 4179 } 4180 timely_says = rack_make_timely_judgement(rack, 4181 rack->r_ctl.rc_gp_srtt, 4182 rack->r_ctl.rc_rtt_diff, 4183 rack->r_ctl.rc_prev_gp_srtt 4184 ); 4185 bytes_ps *= HPTS_USEC_IN_SEC; 4186 bytes_ps /= utim; 4187 if (bytes_ps > rack->r_ctl.last_max_bw) { 4188 /* 4189 * Something is on path playing 4190 * since this b/w is not possible based 4191 * on our BDP (highest rwnd and lowest rtt 4192 * we saw in the measurement window). 4193 * 4194 * Another option here would be to 4195 * instead skip the measurement. 4196 */ 4197 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4198 bytes_ps, rack->r_ctl.last_max_bw, 0, 4199 11, __LINE__, NULL, quality); 4200 bytes_ps = rack->r_ctl.last_max_bw; 4201 } 4202 /* We store gp for b/w in bytes per second */ 4203 if (rack->rc_gp_filled == 0) { 4204 /* Initial measurement */ 4205 if (bytes_ps) { 4206 rack->r_ctl.gp_bw = bytes_ps; 4207 rack->rc_gp_filled = 1; 4208 rack->r_ctl.num_measurements = 1; 4209 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4210 } else { 4211 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4212 rack->r_ctl.rc_app_limited_cnt, 4213 0, 0, 10, __LINE__, NULL, quality); 4214 } 4215 if (tcp_in_hpts(rack->rc_inp) && 4216 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4217 /* 4218 * Ok we can't trust the pacer in this case 4219 * where we transition from un-paced to paced. 4220 * Or for that matter when the burst mitigation 4221 * was making a wild guess and got it wrong. 4222 * Stop the pacer and clear up all the aggregate 4223 * delays etc. 4224 */ 4225 tcp_hpts_remove(rack->rc_inp); 4226 rack->r_ctl.rc_hpts_flags = 0; 4227 rack->r_ctl.rc_last_output_to = 0; 4228 } 4229 did_add = 2; 4230 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4231 /* Still a small number run an average */ 4232 rack->r_ctl.gp_bw += bytes_ps; 4233 addpart = rack->r_ctl.num_measurements; 4234 rack->r_ctl.num_measurements++; 4235 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4236 /* We have collected enought to move forward */ 4237 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4238 } 4239 did_add = 3; 4240 } else { 4241 /* 4242 * We want to take 1/wma of the goodput and add in to 7/8th 4243 * of the old value weighted by the srtt. So if your measurement 4244 * period is say 2 SRTT's long you would get 1/4 as the 4245 * value, if it was like 1/2 SRTT then you would get 1/16th. 4246 * 4247 * But we must be careful not to take too much i.e. if the 4248 * srtt is say 20ms and the measurement is taken over 4249 * 400ms our weight would be 400/20 i.e. 20. On the 4250 * other hand if we get a measurement over 1ms with a 4251 * 10ms rtt we only want to take a much smaller portion. 4252 */ 4253 if (rack->r_ctl.num_measurements < 0xff) { 4254 rack->r_ctl.num_measurements++; 4255 } 4256 srtt = (uint64_t)tp->t_srtt; 4257 if (srtt == 0) { 4258 /* 4259 * Strange why did t_srtt go back to zero? 4260 */ 4261 if (rack->r_ctl.rc_rack_min_rtt) 4262 srtt = rack->r_ctl.rc_rack_min_rtt; 4263 else 4264 srtt = HPTS_USEC_IN_MSEC; 4265 } 4266 /* 4267 * XXXrrs: Note for reviewers, in playing with 4268 * dynamic pacing I discovered this GP calculation 4269 * as done originally leads to some undesired results. 4270 * Basically you can get longer measurements contributing 4271 * too much to the WMA. Thus I changed it if you are doing 4272 * dynamic adjustments to only do the aportioned adjustment 4273 * if we have a very small (time wise) measurement. Longer 4274 * measurements just get there weight (defaulting to 1/8) 4275 * add to the WMA. We may want to think about changing 4276 * this to always do that for both sides i.e. dynamic 4277 * and non-dynamic... but considering lots of folks 4278 * were playing with this I did not want to change the 4279 * calculation per.se. without your thoughts.. Lawerence? 4280 * Peter?? 4281 */ 4282 if (rack->rc_gp_dyn_mul == 0) { 4283 subpart = rack->r_ctl.gp_bw * utim; 4284 subpart /= (srtt * 8); 4285 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4286 /* 4287 * The b/w update takes no more 4288 * away then 1/2 our running total 4289 * so factor it in. 4290 */ 4291 addpart = bytes_ps * utim; 4292 addpart /= (srtt * 8); 4293 } else { 4294 /* 4295 * Don't allow a single measurement 4296 * to account for more than 1/2 of the 4297 * WMA. This could happen on a retransmission 4298 * where utim becomes huge compared to 4299 * srtt (multiple retransmissions when using 4300 * the sending rate which factors in all the 4301 * transmissions from the first one). 4302 */ 4303 subpart = rack->r_ctl.gp_bw / 2; 4304 addpart = bytes_ps / 2; 4305 } 4306 resid_bw = rack->r_ctl.gp_bw - subpart; 4307 rack->r_ctl.gp_bw = resid_bw + addpart; 4308 did_add = 1; 4309 } else { 4310 if ((utim / srtt) <= 1) { 4311 /* 4312 * The b/w update was over a small period 4313 * of time. The idea here is to prevent a small 4314 * measurement time period from counting 4315 * too much. So we scale it based on the 4316 * time so it attributes less than 1/rack_wma_divisor 4317 * of its measurement. 4318 */ 4319 subpart = rack->r_ctl.gp_bw * utim; 4320 subpart /= (srtt * rack_wma_divisor); 4321 addpart = bytes_ps * utim; 4322 addpart /= (srtt * rack_wma_divisor); 4323 } else { 4324 /* 4325 * The scaled measurement was long 4326 * enough so lets just add in the 4327 * portion of the measurement i.e. 1/rack_wma_divisor 4328 */ 4329 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4330 addpart = bytes_ps / rack_wma_divisor; 4331 } 4332 if ((rack->measure_saw_probe_rtt == 0) || 4333 (bytes_ps > rack->r_ctl.gp_bw)) { 4334 /* 4335 * For probe-rtt we only add it in 4336 * if its larger, all others we just 4337 * add in. 4338 */ 4339 did_add = 1; 4340 resid_bw = rack->r_ctl.gp_bw - subpart; 4341 rack->r_ctl.gp_bw = resid_bw + addpart; 4342 } 4343 } 4344 } 4345 if ((rack->gp_ready == 0) && 4346 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4347 /* We have enough measurements now */ 4348 rack->gp_ready = 1; 4349 rack_set_cc_pacing(rack); 4350 if (rack->defer_options) 4351 rack_apply_deferred_options(rack); 4352 } 4353 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4354 rack_get_bw(rack), 22, did_add, NULL, quality); 4355 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4356 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4357 rack_update_multiplier(rack, timely_says, bytes_ps, 4358 rack->r_ctl.rc_gp_srtt, 4359 rack->r_ctl.rc_rtt_diff); 4360 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4361 rack_get_bw(rack), 3, line, NULL, quality); 4362 /* reset the gp srtt and setup the new prev */ 4363 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4364 /* Record the lost count for the next measurement */ 4365 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4366 /* 4367 * We restart our diffs based on the gpsrtt in the 4368 * measurement window. 4369 */ 4370 rack->rc_gp_rtt_set = 0; 4371 rack->rc_gp_saw_rec = 0; 4372 rack->rc_gp_saw_ca = 0; 4373 rack->rc_gp_saw_ss = 0; 4374 rack->rc_dragged_bottom = 0; 4375 skip_measurement: 4376 4377 #ifdef STATS 4378 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4379 gput); 4380 /* 4381 * XXXLAS: This is a temporary hack, and should be 4382 * chained off VOI_TCP_GPUT when stats(9) grows an 4383 * API to deal with chained VOIs. 4384 */ 4385 if (tp->t_stats_gput_prev > 0) 4386 stats_voi_update_abs_s32(tp->t_stats, 4387 VOI_TCP_GPUT_ND, 4388 ((gput - tp->t_stats_gput_prev) * 100) / 4389 tp->t_stats_gput_prev); 4390 #endif 4391 tp->t_flags &= ~TF_GPUTINPROG; 4392 tp->t_stats_gput_prev = gput; 4393 /* 4394 * Now are we app limited now and there is space from where we 4395 * were to where we want to go? 4396 * 4397 * We don't do the other case i.e. non-applimited here since 4398 * the next send will trigger us picking up the missing data. 4399 */ 4400 if (rack->r_ctl.rc_first_appl && 4401 TCPS_HAVEESTABLISHED(tp->t_state) && 4402 rack->r_ctl.rc_app_limited_cnt && 4403 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4404 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4405 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4406 /* 4407 * Yep there is enough outstanding to make a measurement here. 4408 */ 4409 struct rack_sendmap *rsm, fe; 4410 4411 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4412 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4413 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4414 rack->app_limited_needs_set = 0; 4415 tp->gput_seq = th_ack; 4416 if (rack->in_probe_rtt) 4417 rack->measure_saw_probe_rtt = 1; 4418 else if ((rack->measure_saw_probe_rtt) && 4419 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4420 rack->measure_saw_probe_rtt = 0; 4421 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4422 /* There is a full window to gain info from */ 4423 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4424 } else { 4425 /* We can only measure up to the applimited point */ 4426 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4427 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4428 /* 4429 * We don't have enough to make a measurement. 4430 */ 4431 tp->t_flags &= ~TF_GPUTINPROG; 4432 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4433 0, 0, 0, 6, __LINE__, NULL, quality); 4434 return; 4435 } 4436 } 4437 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4438 /* 4439 * We will get no more data into the SB 4440 * this means we need to have the data available 4441 * before we start a measurement. 4442 */ 4443 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4444 /* Nope not enough data. */ 4445 return; 4446 } 4447 } 4448 tp->t_flags |= TF_GPUTINPROG; 4449 /* 4450 * Now we need to find the timestamp of the send at tp->gput_seq 4451 * for the send based measurement. 4452 */ 4453 fe.r_start = tp->gput_seq; 4454 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4455 if (rsm) { 4456 /* Ok send-based limit is set */ 4457 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4458 /* 4459 * Move back to include the earlier part 4460 * so our ack time lines up right (this may 4461 * make an overlapping measurement but thats 4462 * ok). 4463 */ 4464 tp->gput_seq = rsm->r_start; 4465 } 4466 if (rsm->r_flags & RACK_ACKED) 4467 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4468 else 4469 rack->app_limited_needs_set = 1; 4470 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4471 } else { 4472 /* 4473 * If we don't find the rsm due to some 4474 * send-limit set the current time, which 4475 * basically disables the send-limit. 4476 */ 4477 struct timeval tv; 4478 4479 microuptime(&tv); 4480 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4481 } 4482 rack_log_pacing_delay_calc(rack, 4483 tp->gput_seq, 4484 tp->gput_ack, 4485 (uint64_t)rsm, 4486 tp->gput_ts, 4487 rack->r_ctl.rc_app_limited_cnt, 4488 9, 4489 __LINE__, NULL, quality); 4490 } 4491 } 4492 4493 /* 4494 * CC wrapper hook functions 4495 */ 4496 static void 4497 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4498 uint16_t type, int32_t recovery) 4499 { 4500 uint32_t prior_cwnd, acked; 4501 struct tcp_log_buffer *lgb = NULL; 4502 uint8_t labc_to_use, quality; 4503 4504 INP_WLOCK_ASSERT(tp->t_inpcb); 4505 tp->ccv->nsegs = nsegs; 4506 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4507 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4508 uint32_t max; 4509 4510 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4511 if (tp->ccv->bytes_this_ack > max) { 4512 tp->ccv->bytes_this_ack = max; 4513 } 4514 } 4515 #ifdef STATS 4516 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4517 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4518 #endif 4519 quality = RACK_QUALITY_NONE; 4520 if ((tp->t_flags & TF_GPUTINPROG) && 4521 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4522 /* Measure the Goodput */ 4523 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4524 #ifdef NETFLIX_PEAKRATE 4525 if ((type == CC_ACK) && 4526 (tp->t_maxpeakrate)) { 4527 /* 4528 * We update t_peakrate_thr. This gives us roughly 4529 * one update per round trip time. Note 4530 * it will only be used if pace_always is off i.e 4531 * we don't do this for paced flows. 4532 */ 4533 rack_update_peakrate_thr(tp); 4534 } 4535 #endif 4536 } 4537 /* Which way our we limited, if not cwnd limited no advance in CA */ 4538 if (tp->snd_cwnd <= tp->snd_wnd) 4539 tp->ccv->flags |= CCF_CWND_LIMITED; 4540 else 4541 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4542 if (tp->snd_cwnd > tp->snd_ssthresh) { 4543 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4544 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4545 /* For the setting of a window past use the actual scwnd we are using */ 4546 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4547 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4548 tp->ccv->flags |= CCF_ABC_SENTAWND; 4549 } 4550 } else { 4551 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4552 tp->t_bytes_acked = 0; 4553 } 4554 prior_cwnd = tp->snd_cwnd; 4555 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4556 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4557 labc_to_use = rack->rc_labc; 4558 else 4559 labc_to_use = rack_max_abc_post_recovery; 4560 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4561 union tcp_log_stackspecific log; 4562 struct timeval tv; 4563 4564 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4565 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4566 log.u_bbr.flex1 = th_ack; 4567 log.u_bbr.flex2 = tp->ccv->flags; 4568 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4569 log.u_bbr.flex4 = tp->ccv->nsegs; 4570 log.u_bbr.flex5 = labc_to_use; 4571 log.u_bbr.flex6 = prior_cwnd; 4572 log.u_bbr.flex7 = V_tcp_do_newsack; 4573 log.u_bbr.flex8 = 1; 4574 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4575 0, &log, false, NULL, NULL, 0, &tv); 4576 } 4577 if (CC_ALGO(tp)->ack_received != NULL) { 4578 /* XXXLAS: Find a way to live without this */ 4579 tp->ccv->curack = th_ack; 4580 tp->ccv->labc = labc_to_use; 4581 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4582 CC_ALGO(tp)->ack_received(tp->ccv, type); 4583 } 4584 if (lgb) { 4585 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4586 } 4587 if (rack->r_must_retran) { 4588 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4589 /* 4590 * We now are beyond the rxt point so lets disable 4591 * the flag. 4592 */ 4593 rack->r_ctl.rc_out_at_rto = 0; 4594 rack->r_must_retran = 0; 4595 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4596 /* 4597 * Only decrement the rc_out_at_rto if the cwnd advances 4598 * at least a whole segment. Otherwise next time the peer 4599 * acks, we won't be able to send this generaly happens 4600 * when we are in Congestion Avoidance. 4601 */ 4602 if (acked <= rack->r_ctl.rc_out_at_rto){ 4603 rack->r_ctl.rc_out_at_rto -= acked; 4604 } else { 4605 rack->r_ctl.rc_out_at_rto = 0; 4606 } 4607 } 4608 } 4609 #ifdef STATS 4610 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4611 #endif 4612 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4613 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4614 } 4615 #ifdef NETFLIX_PEAKRATE 4616 /* we enforce max peak rate if it is set and we are not pacing */ 4617 if ((rack->rc_always_pace == 0) && 4618 tp->t_peakrate_thr && 4619 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4620 tp->snd_cwnd = tp->t_peakrate_thr; 4621 } 4622 #endif 4623 } 4624 4625 static void 4626 tcp_rack_partialack(struct tcpcb *tp) 4627 { 4628 struct tcp_rack *rack; 4629 4630 rack = (struct tcp_rack *)tp->t_fb_ptr; 4631 INP_WLOCK_ASSERT(tp->t_inpcb); 4632 /* 4633 * If we are doing PRR and have enough 4634 * room to send <or> we are pacing and prr 4635 * is disabled we will want to see if we 4636 * can send data (by setting r_wanted_output to 4637 * true). 4638 */ 4639 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4640 rack->rack_no_prr) 4641 rack->r_wanted_output = 1; 4642 } 4643 4644 static void 4645 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4646 { 4647 struct tcp_rack *rack; 4648 uint32_t orig_cwnd; 4649 4650 orig_cwnd = tp->snd_cwnd; 4651 INP_WLOCK_ASSERT(tp->t_inpcb); 4652 rack = (struct tcp_rack *)tp->t_fb_ptr; 4653 /* only alert CC if we alerted when we entered */ 4654 if (CC_ALGO(tp)->post_recovery != NULL) { 4655 tp->ccv->curack = th_ack; 4656 CC_ALGO(tp)->post_recovery(tp->ccv); 4657 if (tp->snd_cwnd < tp->snd_ssthresh) { 4658 /* 4659 * Rack has burst control and pacing 4660 * so lets not set this any lower than 4661 * snd_ssthresh per RFC-6582 (option 2). 4662 */ 4663 tp->snd_cwnd = tp->snd_ssthresh; 4664 } 4665 } 4666 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4667 union tcp_log_stackspecific log; 4668 struct timeval tv; 4669 4670 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4671 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4672 log.u_bbr.flex1 = th_ack; 4673 log.u_bbr.flex2 = tp->ccv->flags; 4674 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4675 log.u_bbr.flex4 = tp->ccv->nsegs; 4676 log.u_bbr.flex5 = V_tcp_abc_l_var; 4677 log.u_bbr.flex6 = orig_cwnd; 4678 log.u_bbr.flex7 = V_tcp_do_newsack; 4679 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4680 log.u_bbr.flex8 = 2; 4681 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4682 0, &log, false, NULL, NULL, 0, &tv); 4683 } 4684 if ((rack->rack_no_prr == 0) && 4685 (rack->no_prr_addback == 0) && 4686 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4687 /* 4688 * Suck the next prr cnt back into cwnd, but 4689 * only do that if we are not application limited. 4690 */ 4691 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4692 /* 4693 * We are allowed to add back to the cwnd the amount we did 4694 * not get out if: 4695 * a) no_prr_addback is off. 4696 * b) we are not app limited 4697 * c) we are doing prr 4698 * <and> 4699 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4700 */ 4701 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4702 rack->r_ctl.rc_prr_sndcnt); 4703 } 4704 rack->r_ctl.rc_prr_sndcnt = 0; 4705 rack_log_to_prr(rack, 1, 0); 4706 } 4707 rack_log_to_prr(rack, 14, orig_cwnd); 4708 tp->snd_recover = tp->snd_una; 4709 if (rack->r_ctl.dsack_persist) { 4710 rack->r_ctl.dsack_persist--; 4711 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4712 rack->r_ctl.num_dsack = 0; 4713 } 4714 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4715 } 4716 EXIT_RECOVERY(tp->t_flags); 4717 } 4718 4719 static void 4720 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack) 4721 { 4722 struct tcp_rack *rack; 4723 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4724 4725 INP_WLOCK_ASSERT(tp->t_inpcb); 4726 #ifdef STATS 4727 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4728 #endif 4729 if (IN_RECOVERY(tp->t_flags) == 0) { 4730 in_rec_at_entry = 0; 4731 ssthresh_enter = tp->snd_ssthresh; 4732 cwnd_enter = tp->snd_cwnd; 4733 } else 4734 in_rec_at_entry = 1; 4735 rack = (struct tcp_rack *)tp->t_fb_ptr; 4736 switch (type) { 4737 case CC_NDUPACK: 4738 tp->t_flags &= ~TF_WASFRECOVERY; 4739 tp->t_flags &= ~TF_WASCRECOVERY; 4740 if (!IN_FASTRECOVERY(tp->t_flags)) { 4741 rack->r_ctl.rc_prr_delivered = 0; 4742 rack->r_ctl.rc_prr_out = 0; 4743 if (rack->rack_no_prr == 0) { 4744 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4745 rack_log_to_prr(rack, 2, in_rec_at_entry); 4746 } 4747 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4748 tp->snd_recover = tp->snd_max; 4749 if (tp->t_flags2 & TF2_ECN_PERMIT) 4750 tp->t_flags2 |= TF2_ECN_SND_CWR; 4751 } 4752 break; 4753 case CC_ECN: 4754 if (!IN_CONGRECOVERY(tp->t_flags) || 4755 /* 4756 * Allow ECN reaction on ACK to CWR, if 4757 * that data segment was also CE marked. 4758 */ 4759 SEQ_GEQ(ack, tp->snd_recover)) { 4760 EXIT_CONGRECOVERY(tp->t_flags); 4761 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4762 tp->snd_recover = tp->snd_max + 1; 4763 if (tp->t_flags2 & TF2_ECN_PERMIT) 4764 tp->t_flags2 |= TF2_ECN_SND_CWR; 4765 } 4766 break; 4767 case CC_RTO: 4768 tp->t_dupacks = 0; 4769 tp->t_bytes_acked = 0; 4770 EXIT_RECOVERY(tp->t_flags); 4771 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4772 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4773 orig_cwnd = tp->snd_cwnd; 4774 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4775 rack_log_to_prr(rack, 16, orig_cwnd); 4776 if (tp->t_flags2 & TF2_ECN_PERMIT) 4777 tp->t_flags2 |= TF2_ECN_SND_CWR; 4778 break; 4779 case CC_RTO_ERR: 4780 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4781 /* RTO was unnecessary, so reset everything. */ 4782 tp->snd_cwnd = tp->snd_cwnd_prev; 4783 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4784 tp->snd_recover = tp->snd_recover_prev; 4785 if (tp->t_flags & TF_WASFRECOVERY) { 4786 ENTER_FASTRECOVERY(tp->t_flags); 4787 tp->t_flags &= ~TF_WASFRECOVERY; 4788 } 4789 if (tp->t_flags & TF_WASCRECOVERY) { 4790 ENTER_CONGRECOVERY(tp->t_flags); 4791 tp->t_flags &= ~TF_WASCRECOVERY; 4792 } 4793 tp->snd_nxt = tp->snd_max; 4794 tp->t_badrxtwin = 0; 4795 break; 4796 } 4797 if ((CC_ALGO(tp)->cong_signal != NULL) && 4798 (type != CC_RTO)){ 4799 tp->ccv->curack = ack; 4800 CC_ALGO(tp)->cong_signal(tp->ccv, type); 4801 } 4802 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4803 rack_log_to_prr(rack, 15, cwnd_enter); 4804 rack->r_ctl.dsack_byte_cnt = 0; 4805 rack->r_ctl.retran_during_recovery = 0; 4806 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4807 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4808 rack->r_ent_rec_ns = 1; 4809 } 4810 } 4811 4812 static inline void 4813 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4814 { 4815 uint32_t i_cwnd; 4816 4817 INP_WLOCK_ASSERT(tp->t_inpcb); 4818 4819 #ifdef NETFLIX_STATS 4820 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4821 if (tp->t_state == TCPS_ESTABLISHED) 4822 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4823 #endif 4824 if (CC_ALGO(tp)->after_idle != NULL) 4825 CC_ALGO(tp)->after_idle(tp->ccv); 4826 4827 if (tp->snd_cwnd == 1) 4828 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4829 else 4830 i_cwnd = rc_init_window(rack); 4831 4832 /* 4833 * Being idle is no different than the initial window. If the cc 4834 * clamps it down below the initial window raise it to the initial 4835 * window. 4836 */ 4837 if (tp->snd_cwnd < i_cwnd) { 4838 tp->snd_cwnd = i_cwnd; 4839 } 4840 } 4841 4842 /* 4843 * Indicate whether this ack should be delayed. We can delay the ack if 4844 * following conditions are met: 4845 * - There is no delayed ack timer in progress. 4846 * - Our last ack wasn't a 0-sized window. We never want to delay 4847 * the ack that opens up a 0-sized window. 4848 * - LRO wasn't used for this segment. We make sure by checking that the 4849 * segment size is not larger than the MSS. 4850 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4851 * connection. 4852 */ 4853 #define DELAY_ACK(tp, tlen) \ 4854 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4855 ((tp->t_flags & TF_DELACK) == 0) && \ 4856 (tlen <= tp->t_maxseg) && \ 4857 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4858 4859 static struct rack_sendmap * 4860 rack_find_lowest_rsm(struct tcp_rack *rack) 4861 { 4862 struct rack_sendmap *rsm; 4863 4864 /* 4865 * Walk the time-order transmitted list looking for an rsm that is 4866 * not acked. This will be the one that was sent the longest time 4867 * ago that is still outstanding. 4868 */ 4869 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4870 if (rsm->r_flags & RACK_ACKED) { 4871 continue; 4872 } 4873 goto finish; 4874 } 4875 finish: 4876 return (rsm); 4877 } 4878 4879 static struct rack_sendmap * 4880 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4881 { 4882 struct rack_sendmap *prsm; 4883 4884 /* 4885 * Walk the sequence order list backward until we hit and arrive at 4886 * the highest seq not acked. In theory when this is called it 4887 * should be the last segment (which it was not). 4888 */ 4889 prsm = rsm; 4890 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4891 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4892 continue; 4893 } 4894 return (prsm); 4895 } 4896 return (NULL); 4897 } 4898 4899 static uint32_t 4900 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4901 { 4902 int32_t lro; 4903 uint32_t thresh; 4904 4905 /* 4906 * lro is the flag we use to determine if we have seen reordering. 4907 * If it gets set we have seen reordering. The reorder logic either 4908 * works in one of two ways: 4909 * 4910 * If reorder-fade is configured, then we track the last time we saw 4911 * re-ordering occur. If we reach the point where enough time as 4912 * passed we no longer consider reordering has occuring. 4913 * 4914 * Or if reorder-face is 0, then once we see reordering we consider 4915 * the connection to alway be subject to reordering and just set lro 4916 * to 1. 4917 * 4918 * In the end if lro is non-zero we add the extra time for 4919 * reordering in. 4920 */ 4921 if (srtt == 0) 4922 srtt = 1; 4923 if (rack->r_ctl.rc_reorder_ts) { 4924 if (rack->r_ctl.rc_reorder_fade) { 4925 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 4926 lro = cts - rack->r_ctl.rc_reorder_ts; 4927 if (lro == 0) { 4928 /* 4929 * No time as passed since the last 4930 * reorder, mark it as reordering. 4931 */ 4932 lro = 1; 4933 } 4934 } else { 4935 /* Negative time? */ 4936 lro = 0; 4937 } 4938 if (lro > rack->r_ctl.rc_reorder_fade) { 4939 /* Turn off reordering seen too */ 4940 rack->r_ctl.rc_reorder_ts = 0; 4941 lro = 0; 4942 } 4943 } else { 4944 /* Reodering does not fade */ 4945 lro = 1; 4946 } 4947 } else { 4948 lro = 0; 4949 } 4950 if (rack->rc_rack_tmr_std_based == 0) { 4951 thresh = srtt + rack->r_ctl.rc_pkt_delay; 4952 } else { 4953 /* Standards based pkt-delay is 1/4 srtt */ 4954 thresh = srtt + (srtt >> 2); 4955 } 4956 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 4957 /* It must be set, if not you get 1/4 rtt */ 4958 if (rack->r_ctl.rc_reorder_shift) 4959 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 4960 else 4961 thresh += (srtt >> 2); 4962 } 4963 if (rack->rc_rack_use_dsack && 4964 lro && 4965 (rack->r_ctl.num_dsack > 0)) { 4966 /* 4967 * We only increase the reordering window if we 4968 * have seen reordering <and> we have a DSACK count. 4969 */ 4970 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 4971 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 4972 } 4973 /* SRTT * 2 is the ceiling */ 4974 if (thresh > (srtt * 2)) { 4975 thresh = srtt * 2; 4976 } 4977 /* And we don't want it above the RTO max either */ 4978 if (thresh > rack_rto_max) { 4979 thresh = rack_rto_max; 4980 } 4981 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 4982 return (thresh); 4983 } 4984 4985 static uint32_t 4986 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 4987 struct rack_sendmap *rsm, uint32_t srtt) 4988 { 4989 struct rack_sendmap *prsm; 4990 uint32_t thresh, len; 4991 int segsiz; 4992 4993 if (srtt == 0) 4994 srtt = 1; 4995 if (rack->r_ctl.rc_tlp_threshold) 4996 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 4997 else 4998 thresh = (srtt * 2); 4999 5000 /* Get the previous sent packet, if any */ 5001 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5002 len = rsm->r_end - rsm->r_start; 5003 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5004 /* Exactly like the ID */ 5005 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5006 uint32_t alt_thresh; 5007 /* 5008 * Compensate for delayed-ack with the d-ack time. 5009 */ 5010 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5011 if (alt_thresh > thresh) 5012 thresh = alt_thresh; 5013 } 5014 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5015 /* 2.1 behavior */ 5016 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5017 if (prsm && (len <= segsiz)) { 5018 /* 5019 * Two packets outstanding, thresh should be (2*srtt) + 5020 * possible inter-packet delay (if any). 5021 */ 5022 uint32_t inter_gap = 0; 5023 int idx, nidx; 5024 5025 idx = rsm->r_rtr_cnt - 1; 5026 nidx = prsm->r_rtr_cnt - 1; 5027 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5028 /* Yes it was sent later (or at the same time) */ 5029 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5030 } 5031 thresh += inter_gap; 5032 } else if (len <= segsiz) { 5033 /* 5034 * Possibly compensate for delayed-ack. 5035 */ 5036 uint32_t alt_thresh; 5037 5038 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5039 if (alt_thresh > thresh) 5040 thresh = alt_thresh; 5041 } 5042 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5043 /* 2.2 behavior */ 5044 if (len <= segsiz) { 5045 uint32_t alt_thresh; 5046 /* 5047 * Compensate for delayed-ack with the d-ack time. 5048 */ 5049 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5050 if (alt_thresh > thresh) 5051 thresh = alt_thresh; 5052 } 5053 } 5054 /* Not above an RTO */ 5055 if (thresh > tp->t_rxtcur) { 5056 thresh = tp->t_rxtcur; 5057 } 5058 /* Not above a RTO max */ 5059 if (thresh > rack_rto_max) { 5060 thresh = rack_rto_max; 5061 } 5062 /* Apply user supplied min TLP */ 5063 if (thresh < rack_tlp_min) { 5064 thresh = rack_tlp_min; 5065 } 5066 return (thresh); 5067 } 5068 5069 static uint32_t 5070 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5071 { 5072 /* 5073 * We want the rack_rtt which is the 5074 * last rtt we measured. However if that 5075 * does not exist we fallback to the srtt (which 5076 * we probably will never do) and then as a last 5077 * resort we use RACK_INITIAL_RTO if no srtt is 5078 * yet set. 5079 */ 5080 if (rack->rc_rack_rtt) 5081 return (rack->rc_rack_rtt); 5082 else if (tp->t_srtt == 0) 5083 return (RACK_INITIAL_RTO); 5084 return (tp->t_srtt); 5085 } 5086 5087 static struct rack_sendmap * 5088 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5089 { 5090 /* 5091 * Check to see that we don't need to fall into recovery. We will 5092 * need to do so if our oldest transmit is past the time we should 5093 * have had an ack. 5094 */ 5095 struct tcp_rack *rack; 5096 struct rack_sendmap *rsm; 5097 int32_t idx; 5098 uint32_t srtt, thresh; 5099 5100 rack = (struct tcp_rack *)tp->t_fb_ptr; 5101 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5102 return (NULL); 5103 } 5104 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5105 if (rsm == NULL) 5106 return (NULL); 5107 5108 if (rsm->r_flags & RACK_ACKED) { 5109 rsm = rack_find_lowest_rsm(rack); 5110 if (rsm == NULL) 5111 return (NULL); 5112 } 5113 idx = rsm->r_rtr_cnt - 1; 5114 srtt = rack_grab_rtt(tp, rack); 5115 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5116 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5117 return (NULL); 5118 } 5119 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5120 return (NULL); 5121 } 5122 /* Ok if we reach here we are over-due and this guy can be sent */ 5123 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 5124 return (rsm); 5125 } 5126 5127 static uint32_t 5128 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5129 { 5130 int32_t t; 5131 int32_t tt; 5132 uint32_t ret_val; 5133 5134 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5135 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5136 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5137 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5138 ret_val = (uint32_t)tt; 5139 return (ret_val); 5140 } 5141 5142 static uint32_t 5143 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5144 { 5145 /* 5146 * Start the FR timer, we do this based on getting the first one in 5147 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5148 * events we need to stop the running timer (if its running) before 5149 * starting the new one. 5150 */ 5151 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5152 uint32_t srtt_cur; 5153 int32_t idx; 5154 int32_t is_tlp_timer = 0; 5155 struct rack_sendmap *rsm; 5156 5157 if (rack->t_timers_stopped) { 5158 /* All timers have been stopped none are to run */ 5159 return (0); 5160 } 5161 if (rack->rc_in_persist) { 5162 /* We can't start any timer in persists */ 5163 return (rack_get_persists_timer_val(tp, rack)); 5164 } 5165 rack->rc_on_min_to = 0; 5166 if ((tp->t_state < TCPS_ESTABLISHED) || 5167 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5168 goto activate_rxt; 5169 } 5170 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5171 if ((rsm == NULL) || sup_rack) { 5172 /* Nothing on the send map or no rack */ 5173 activate_rxt: 5174 time_since_sent = 0; 5175 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5176 if (rsm) { 5177 /* 5178 * Should we discount the RTX timer any? 5179 * 5180 * We want to discount it the smallest amount. 5181 * If a timer (Rack/TLP or RXT) has gone off more 5182 * recently thats the discount we want to use (now - timer time). 5183 * If the retransmit of the oldest packet was more recent then 5184 * we want to use that (now - oldest-packet-last_transmit_time). 5185 * 5186 */ 5187 idx = rsm->r_rtr_cnt - 1; 5188 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5189 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5190 else 5191 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5192 if (TSTMP_GT(cts, tstmp_touse)) 5193 time_since_sent = cts - tstmp_touse; 5194 } 5195 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5196 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5197 to = tp->t_rxtcur; 5198 if (to > time_since_sent) 5199 to -= time_since_sent; 5200 else 5201 to = rack->r_ctl.rc_min_to; 5202 if (to == 0) 5203 to = 1; 5204 /* Special case for KEEPINIT */ 5205 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5206 (TP_KEEPINIT(tp) != 0) && 5207 rsm) { 5208 /* 5209 * We have to put a ceiling on the rxt timer 5210 * of the keep-init timeout. 5211 */ 5212 uint32_t max_time, red; 5213 5214 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5215 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5216 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5217 if (red < max_time) 5218 max_time -= red; 5219 else 5220 max_time = 1; 5221 } 5222 /* Reduce timeout to the keep value if needed */ 5223 if (max_time < to) 5224 to = max_time; 5225 } 5226 return (to); 5227 } 5228 return (0); 5229 } 5230 if (rsm->r_flags & RACK_ACKED) { 5231 rsm = rack_find_lowest_rsm(rack); 5232 if (rsm == NULL) { 5233 /* No lowest? */ 5234 goto activate_rxt; 5235 } 5236 } 5237 if (rack->sack_attack_disable) { 5238 /* 5239 * We don't want to do 5240 * any TLP's if you are an attacker. 5241 * Though if you are doing what 5242 * is expected you may still have 5243 * SACK-PASSED marks. 5244 */ 5245 goto activate_rxt; 5246 } 5247 /* Convert from ms to usecs */ 5248 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5249 if ((tp->t_flags & TF_SENTFIN) && 5250 ((tp->snd_max - tp->snd_una) == 1) && 5251 (rsm->r_flags & RACK_HAS_FIN)) { 5252 /* 5253 * We don't start a rack timer if all we have is a 5254 * FIN outstanding. 5255 */ 5256 goto activate_rxt; 5257 } 5258 if ((rack->use_rack_rr == 0) && 5259 (IN_FASTRECOVERY(tp->t_flags)) && 5260 (rack->rack_no_prr == 0) && 5261 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5262 /* 5263 * We are not cheating, in recovery and 5264 * not enough ack's to yet get our next 5265 * retransmission out. 5266 * 5267 * Note that classified attackers do not 5268 * get to use the rack-cheat. 5269 */ 5270 goto activate_tlp; 5271 } 5272 srtt = rack_grab_rtt(tp, rack); 5273 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5274 idx = rsm->r_rtr_cnt - 1; 5275 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5276 if (SEQ_GEQ(exp, cts)) { 5277 to = exp - cts; 5278 if (to < rack->r_ctl.rc_min_to) { 5279 to = rack->r_ctl.rc_min_to; 5280 if (rack->r_rr_config == 3) 5281 rack->rc_on_min_to = 1; 5282 } 5283 } else { 5284 to = rack->r_ctl.rc_min_to; 5285 if (rack->r_rr_config == 3) 5286 rack->rc_on_min_to = 1; 5287 } 5288 } else { 5289 /* Ok we need to do a TLP not RACK */ 5290 activate_tlp: 5291 if ((rack->rc_tlp_in_progress != 0) && 5292 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5293 /* 5294 * The previous send was a TLP and we have sent 5295 * N TLP's without sending new data. 5296 */ 5297 goto activate_rxt; 5298 } 5299 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5300 if (rsm == NULL) { 5301 /* We found no rsm to TLP with. */ 5302 goto activate_rxt; 5303 } 5304 if (rsm->r_flags & RACK_HAS_FIN) { 5305 /* If its a FIN we dont do TLP */ 5306 rsm = NULL; 5307 goto activate_rxt; 5308 } 5309 idx = rsm->r_rtr_cnt - 1; 5310 time_since_sent = 0; 5311 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5312 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5313 else 5314 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5315 if (TSTMP_GT(cts, tstmp_touse)) 5316 time_since_sent = cts - tstmp_touse; 5317 is_tlp_timer = 1; 5318 if (tp->t_srtt) { 5319 if ((rack->rc_srtt_measure_made == 0) && 5320 (tp->t_srtt == 1)) { 5321 /* 5322 * If another stack as run and set srtt to 1, 5323 * then the srtt was 0, so lets use the initial. 5324 */ 5325 srtt = RACK_INITIAL_RTO; 5326 } else { 5327 srtt_cur = tp->t_srtt; 5328 srtt = srtt_cur; 5329 } 5330 } else 5331 srtt = RACK_INITIAL_RTO; 5332 /* 5333 * If the SRTT is not keeping up and the 5334 * rack RTT has spiked we want to use 5335 * the last RTT not the smoothed one. 5336 */ 5337 if (rack_tlp_use_greater && 5338 tp->t_srtt && 5339 (srtt < rack_grab_rtt(tp, rack))) { 5340 srtt = rack_grab_rtt(tp, rack); 5341 } 5342 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5343 if (thresh > time_since_sent) { 5344 to = thresh - time_since_sent; 5345 } else { 5346 to = rack->r_ctl.rc_min_to; 5347 rack_log_alt_to_to_cancel(rack, 5348 thresh, /* flex1 */ 5349 time_since_sent, /* flex2 */ 5350 tstmp_touse, /* flex3 */ 5351 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5352 (uint32_t)rsm->r_tim_lastsent[idx], 5353 srtt, 5354 idx, 99); 5355 } 5356 if (to < rack_tlp_min) { 5357 to = rack_tlp_min; 5358 } 5359 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5360 /* 5361 * If the TLP time works out to larger than the max 5362 * RTO lets not do TLP.. just RTO. 5363 */ 5364 goto activate_rxt; 5365 } 5366 } 5367 if (is_tlp_timer == 0) { 5368 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5369 } else { 5370 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5371 } 5372 if (to == 0) 5373 to = 1; 5374 return (to); 5375 } 5376 5377 static void 5378 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5379 { 5380 if (rack->rc_in_persist == 0) { 5381 if (tp->t_flags & TF_GPUTINPROG) { 5382 /* 5383 * Stop the goodput now, the calling of the 5384 * measurement function clears the flag. 5385 */ 5386 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5387 RACK_QUALITY_PERSIST); 5388 } 5389 #ifdef NETFLIX_SHARED_CWND 5390 if (rack->r_ctl.rc_scw) { 5391 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5392 rack->rack_scwnd_is_idle = 1; 5393 } 5394 #endif 5395 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5396 if (rack->r_ctl.rc_went_idle_time == 0) 5397 rack->r_ctl.rc_went_idle_time = 1; 5398 rack_timer_cancel(tp, rack, cts, __LINE__); 5399 rack->r_ctl.persist_lost_ends = 0; 5400 rack->probe_not_answered = 0; 5401 rack->forced_ack = 0; 5402 tp->t_rxtshift = 0; 5403 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5404 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5405 rack->rc_in_persist = 1; 5406 } 5407 } 5408 5409 static void 5410 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5411 { 5412 if (tcp_in_hpts(rack->rc_inp)) { 5413 tcp_hpts_remove(rack->rc_inp); 5414 rack->r_ctl.rc_hpts_flags = 0; 5415 } 5416 #ifdef NETFLIX_SHARED_CWND 5417 if (rack->r_ctl.rc_scw) { 5418 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5419 rack->rack_scwnd_is_idle = 0; 5420 } 5421 #endif 5422 if (rack->rc_gp_dyn_mul && 5423 (rack->use_fixed_rate == 0) && 5424 (rack->rc_always_pace)) { 5425 /* 5426 * Do we count this as if a probe-rtt just 5427 * finished? 5428 */ 5429 uint32_t time_idle, idle_min; 5430 5431 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5432 idle_min = rack_min_probertt_hold; 5433 if (rack_probertt_gpsrtt_cnt_div) { 5434 uint64_t extra; 5435 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5436 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5437 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5438 idle_min += (uint32_t)extra; 5439 } 5440 if (time_idle >= idle_min) { 5441 /* Yes, we count it as a probe-rtt. */ 5442 uint32_t us_cts; 5443 5444 us_cts = tcp_get_usecs(NULL); 5445 if (rack->in_probe_rtt == 0) { 5446 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5447 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5448 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5449 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5450 } else { 5451 rack_exit_probertt(rack, us_cts); 5452 } 5453 } 5454 } 5455 rack->rc_in_persist = 0; 5456 rack->r_ctl.rc_went_idle_time = 0; 5457 tp->t_rxtshift = 0; 5458 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5459 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5460 rack->r_ctl.rc_agg_delayed = 0; 5461 rack->r_early = 0; 5462 rack->r_late = 0; 5463 rack->r_ctl.rc_agg_early = 0; 5464 } 5465 5466 static void 5467 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5468 struct hpts_diag *diag, struct timeval *tv) 5469 { 5470 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5471 union tcp_log_stackspecific log; 5472 5473 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5474 log.u_bbr.flex1 = diag->p_nxt_slot; 5475 log.u_bbr.flex2 = diag->p_cur_slot; 5476 log.u_bbr.flex3 = diag->slot_req; 5477 log.u_bbr.flex4 = diag->inp_hptsslot; 5478 log.u_bbr.flex5 = diag->slot_remaining; 5479 log.u_bbr.flex6 = diag->need_new_to; 5480 log.u_bbr.flex7 = diag->p_hpts_active; 5481 log.u_bbr.flex8 = diag->p_on_min_sleep; 5482 /* Hijack other fields as needed */ 5483 log.u_bbr.epoch = diag->have_slept; 5484 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5485 log.u_bbr.pkts_out = diag->co_ret; 5486 log.u_bbr.applimited = diag->hpts_sleep_time; 5487 log.u_bbr.delivered = diag->p_prev_slot; 5488 log.u_bbr.inflight = diag->p_runningslot; 5489 log.u_bbr.bw_inuse = diag->wheel_slot; 5490 log.u_bbr.rttProp = diag->wheel_cts; 5491 log.u_bbr.timeStamp = cts; 5492 log.u_bbr.delRate = diag->maxslots; 5493 log.u_bbr.cur_del_rate = diag->p_curtick; 5494 log.u_bbr.cur_del_rate <<= 32; 5495 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5496 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5497 &rack->rc_inp->inp_socket->so_rcv, 5498 &rack->rc_inp->inp_socket->so_snd, 5499 BBR_LOG_HPTSDIAG, 0, 5500 0, &log, false, tv); 5501 } 5502 5503 } 5504 5505 static void 5506 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5507 { 5508 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5509 union tcp_log_stackspecific log; 5510 struct timeval tv; 5511 5512 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5513 log.u_bbr.flex1 = sb->sb_flags; 5514 log.u_bbr.flex2 = len; 5515 log.u_bbr.flex3 = sb->sb_state; 5516 log.u_bbr.flex8 = type; 5517 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5518 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5519 &rack->rc_inp->inp_socket->so_rcv, 5520 &rack->rc_inp->inp_socket->so_snd, 5521 TCP_LOG_SB_WAKE, 0, 5522 len, &log, false, &tv); 5523 } 5524 } 5525 5526 static void 5527 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5528 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5529 { 5530 struct hpts_diag diag; 5531 struct inpcb *inp; 5532 struct timeval tv; 5533 uint32_t delayed_ack = 0; 5534 uint32_t hpts_timeout; 5535 uint32_t entry_slot = slot; 5536 uint8_t stopped; 5537 uint32_t left = 0; 5538 uint32_t us_cts; 5539 5540 inp = tp->t_inpcb; 5541 if ((tp->t_state == TCPS_CLOSED) || 5542 (tp->t_state == TCPS_LISTEN)) { 5543 return; 5544 } 5545 if (tcp_in_hpts(inp)) { 5546 /* Already on the pacer */ 5547 return; 5548 } 5549 stopped = rack->rc_tmr_stopped; 5550 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5551 left = rack->r_ctl.rc_timer_exp - cts; 5552 } 5553 rack->r_ctl.rc_timer_exp = 0; 5554 rack->r_ctl.rc_hpts_flags = 0; 5555 us_cts = tcp_get_usecs(&tv); 5556 /* Now early/late accounting */ 5557 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5558 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5559 /* 5560 * We have a early carry over set, 5561 * we can always add more time so we 5562 * can always make this compensation. 5563 * 5564 * Note if ack's are allowed to wake us do not 5565 * penalize the next timer for being awoke 5566 * by an ack aka the rc_agg_early (non-paced mode). 5567 */ 5568 slot += rack->r_ctl.rc_agg_early; 5569 rack->r_early = 0; 5570 rack->r_ctl.rc_agg_early = 0; 5571 } 5572 if (rack->r_late) { 5573 /* 5574 * This is harder, we can 5575 * compensate some but it 5576 * really depends on what 5577 * the current pacing time is. 5578 */ 5579 if (rack->r_ctl.rc_agg_delayed >= slot) { 5580 /* 5581 * We can't compensate for it all. 5582 * And we have to have some time 5583 * on the clock. We always have a min 5584 * 10 slots (10 x 10 i.e. 100 usecs). 5585 */ 5586 if (slot <= HPTS_TICKS_PER_SLOT) { 5587 /* We gain delay */ 5588 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5589 slot = HPTS_TICKS_PER_SLOT; 5590 } else { 5591 /* We take off some */ 5592 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5593 slot = HPTS_TICKS_PER_SLOT; 5594 } 5595 } else { 5596 slot -= rack->r_ctl.rc_agg_delayed; 5597 rack->r_ctl.rc_agg_delayed = 0; 5598 /* Make sure we have 100 useconds at minimum */ 5599 if (slot < HPTS_TICKS_PER_SLOT) { 5600 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5601 slot = HPTS_TICKS_PER_SLOT; 5602 } 5603 if (rack->r_ctl.rc_agg_delayed == 0) 5604 rack->r_late = 0; 5605 } 5606 } 5607 if (slot) { 5608 /* We are pacing too */ 5609 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5610 } 5611 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5612 #ifdef NETFLIX_EXP_DETECTION 5613 if (rack->sack_attack_disable && 5614 (slot < tcp_sad_pacing_interval)) { 5615 /* 5616 * We have a potential attacker on 5617 * the line. We have possibly some 5618 * (or now) pacing time set. We want to 5619 * slow down the processing of sacks by some 5620 * amount (if it is an attacker). Set the default 5621 * slot for attackers in place (unless the orginal 5622 * interval is longer). Its stored in 5623 * micro-seconds, so lets convert to msecs. 5624 */ 5625 slot = tcp_sad_pacing_interval; 5626 } 5627 #endif 5628 if (tp->t_flags & TF_DELACK) { 5629 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5630 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5631 } 5632 if (delayed_ack && ((hpts_timeout == 0) || 5633 (delayed_ack < hpts_timeout))) 5634 hpts_timeout = delayed_ack; 5635 else 5636 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5637 /* 5638 * If no timers are going to run and we will fall off the hptsi 5639 * wheel, we resort to a keep-alive timer if its configured. 5640 */ 5641 if ((hpts_timeout == 0) && 5642 (slot == 0)) { 5643 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5644 (tp->t_state <= TCPS_CLOSING)) { 5645 /* 5646 * Ok we have no timer (persists, rack, tlp, rxt or 5647 * del-ack), we don't have segments being paced. So 5648 * all that is left is the keepalive timer. 5649 */ 5650 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5651 /* Get the established keep-alive time */ 5652 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5653 } else { 5654 /* 5655 * Get the initial setup keep-alive time, 5656 * note that this is probably not going to 5657 * happen, since rack will be running a rxt timer 5658 * if a SYN of some sort is outstanding. It is 5659 * actually handled in rack_timeout_rxt(). 5660 */ 5661 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5662 } 5663 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5664 if (rack->in_probe_rtt) { 5665 /* 5666 * We want to instead not wake up a long time from 5667 * now but to wake up about the time we would 5668 * exit probe-rtt and initiate a keep-alive ack. 5669 * This will get us out of probe-rtt and update 5670 * our min-rtt. 5671 */ 5672 hpts_timeout = rack_min_probertt_hold; 5673 } 5674 } 5675 } 5676 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5677 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5678 /* 5679 * RACK, TLP, persists and RXT timers all are restartable 5680 * based on actions input .. i.e we received a packet (ack 5681 * or sack) and that changes things (rw, or snd_una etc). 5682 * Thus we can restart them with a new value. For 5683 * keep-alive, delayed_ack we keep track of what was left 5684 * and restart the timer with a smaller value. 5685 */ 5686 if (left < hpts_timeout) 5687 hpts_timeout = left; 5688 } 5689 if (hpts_timeout) { 5690 /* 5691 * Hack alert for now we can't time-out over 2,147,483 5692 * seconds (a bit more than 596 hours), which is probably ok 5693 * :). 5694 */ 5695 if (hpts_timeout > 0x7ffffffe) 5696 hpts_timeout = 0x7ffffffe; 5697 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5698 } 5699 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5700 if ((rack->gp_ready == 0) && 5701 (rack->use_fixed_rate == 0) && 5702 (hpts_timeout < slot) && 5703 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5704 /* 5705 * We have no good estimate yet for the 5706 * old clunky burst mitigation or the 5707 * real pacing. And the tlp or rxt is smaller 5708 * than the pacing calculation. Lets not 5709 * pace that long since we know the calculation 5710 * so far is not accurate. 5711 */ 5712 slot = hpts_timeout; 5713 } 5714 /** 5715 * Turn off all the flags for queuing by default. The 5716 * flags have important meanings to what happens when 5717 * LRO interacts with the transport. Most likely (by default now) 5718 * mbuf_queueing and ack compression are on. So the transport 5719 * has a couple of flags that control what happens (if those 5720 * are not on then these flags won't have any effect since it 5721 * won't go through the queuing LRO path). 5722 * 5723 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5724 * pacing output, so don't disturb. But 5725 * it also means LRO can wake me if there 5726 * is a SACK arrival. 5727 * 5728 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5729 * with the above flag (QUEUE_READY) and 5730 * when present it says don't even wake me 5731 * if a SACK arrives. 5732 * 5733 * The idea behind these flags is that if we are pacing we 5734 * set the MBUF_QUEUE_READY and only get woken up if 5735 * a SACK arrives (which could change things) or if 5736 * our pacing timer expires. If, however, we have a rack 5737 * timer running, then we don't even want a sack to wake 5738 * us since the rack timer has to expire before we can send. 5739 * 5740 * Other cases should usually have none of the flags set 5741 * so LRO can call into us. 5742 */ 5743 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5744 if (slot) { 5745 rack->r_ctl.rc_last_output_to = us_cts + slot; 5746 /* 5747 * A pacing timer (slot) is being set, in 5748 * such a case we cannot send (we are blocked by 5749 * the timer). So lets tell LRO that it should not 5750 * wake us unless there is a SACK. Note this only 5751 * will be effective if mbuf queueing is on or 5752 * compressed acks are being processed. 5753 */ 5754 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5755 /* 5756 * But wait if we have a Rack timer running 5757 * even a SACK should not disturb us (with 5758 * the exception of r_rr_config 3). 5759 */ 5760 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5761 (rack->r_rr_config != 3)) 5762 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5763 if (rack->rc_ack_can_sendout_data) { 5764 /* 5765 * Ahh but wait, this is that special case 5766 * where the pacing timer can be disturbed 5767 * backout the changes (used for non-paced 5768 * burst limiting). 5769 */ 5770 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5771 } 5772 if ((rack->use_rack_rr) && 5773 (rack->r_rr_config < 2) && 5774 ((hpts_timeout) && (hpts_timeout < slot))) { 5775 /* 5776 * Arrange for the hpts to kick back in after the 5777 * t-o if the t-o does not cause a send. 5778 */ 5779 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5780 __LINE__, &diag); 5781 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5782 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5783 } else { 5784 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 5785 __LINE__, &diag); 5786 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5787 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5788 } 5789 } else if (hpts_timeout) { 5790 /* 5791 * With respect to inp_flags2 here, lets let any new acks wake 5792 * us up here. Since we are not pacing (no pacing timer), output 5793 * can happen so we should let it. If its a Rack timer, then any inbound 5794 * packet probably won't change the sending (we will be blocked) 5795 * but it may change the prr stats so letting it in (the set defaults 5796 * at the start of this block) are good enough. 5797 */ 5798 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5799 __LINE__, &diag); 5800 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5801 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5802 } else { 5803 /* No timer starting */ 5804 #ifdef INVARIANTS 5805 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5806 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5807 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5808 } 5809 #endif 5810 } 5811 rack->rc_tmr_stopped = 0; 5812 if (slot) 5813 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5814 } 5815 5816 /* 5817 * RACK Timer, here we simply do logging and house keeping. 5818 * the normal rack_output() function will call the 5819 * appropriate thing to check if we need to do a RACK retransmit. 5820 * We return 1, saying don't proceed with rack_output only 5821 * when all timers have been stopped (destroyed PCB?). 5822 */ 5823 static int 5824 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5825 { 5826 /* 5827 * This timer simply provides an internal trigger to send out data. 5828 * The check_recovery_mode call will see if there are needed 5829 * retransmissions, if so we will enter fast-recovery. The output 5830 * call may or may not do the same thing depending on sysctl 5831 * settings. 5832 */ 5833 struct rack_sendmap *rsm; 5834 5835 if (tp->t_timers->tt_flags & TT_STOPPED) { 5836 return (1); 5837 } 5838 counter_u64_add(rack_to_tot, 1); 5839 if (rack->r_state && (rack->r_state != tp->t_state)) 5840 rack_set_state(tp, rack); 5841 rack->rc_on_min_to = 0; 5842 rsm = rack_check_recovery_mode(tp, cts); 5843 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5844 if (rsm) { 5845 rack->r_ctl.rc_resend = rsm; 5846 rack->r_timer_override = 1; 5847 if (rack->use_rack_rr) { 5848 /* 5849 * Don't accumulate extra pacing delay 5850 * we are allowing the rack timer to 5851 * over-ride pacing i.e. rrr takes precedence 5852 * if the pacing interval is longer than the rrr 5853 * time (in other words we get the min pacing 5854 * time versus rrr pacing time). 5855 */ 5856 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5857 } 5858 } 5859 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5860 if (rsm == NULL) { 5861 /* restart a timer and return 1 */ 5862 rack_start_hpts_timer(rack, tp, cts, 5863 0, 0, 0); 5864 return (1); 5865 } 5866 return (0); 5867 } 5868 5869 static void 5870 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5871 { 5872 if (rsm->m->m_len > rsm->orig_m_len) { 5873 /* 5874 * Mbuf grew, caused by sbcompress, our offset does 5875 * not change. 5876 */ 5877 rsm->orig_m_len = rsm->m->m_len; 5878 } else if (rsm->m->m_len < rsm->orig_m_len) { 5879 /* 5880 * Mbuf shrank, trimmed off the top by an ack, our 5881 * offset changes. 5882 */ 5883 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5884 rsm->orig_m_len = rsm->m->m_len; 5885 } 5886 } 5887 5888 static void 5889 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5890 { 5891 struct mbuf *m; 5892 uint32_t soff; 5893 5894 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5895 /* Fix up the orig_m_len and possibly the mbuf offset */ 5896 rack_adjust_orig_mlen(src_rsm); 5897 } 5898 m = src_rsm->m; 5899 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5900 while (soff >= m->m_len) { 5901 /* Move out past this mbuf */ 5902 soff -= m->m_len; 5903 m = m->m_next; 5904 KASSERT((m != NULL), 5905 ("rsm:%p nrsm:%p hit at soff:%u null m", 5906 src_rsm, rsm, soff)); 5907 } 5908 rsm->m = m; 5909 rsm->soff = soff; 5910 rsm->orig_m_len = m->m_len; 5911 } 5912 5913 static __inline void 5914 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5915 struct rack_sendmap *rsm, uint32_t start) 5916 { 5917 int idx; 5918 5919 nrsm->r_start = start; 5920 nrsm->r_end = rsm->r_end; 5921 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 5922 nrsm->r_flags = rsm->r_flags; 5923 nrsm->r_dupack = rsm->r_dupack; 5924 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 5925 nrsm->r_rtr_bytes = 0; 5926 nrsm->r_fas = rsm->r_fas; 5927 rsm->r_end = nrsm->r_start; 5928 nrsm->r_just_ret = rsm->r_just_ret; 5929 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 5930 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 5931 } 5932 /* Now if we have SYN flag we keep it on the left edge */ 5933 if (nrsm->r_flags & RACK_HAS_SYN) 5934 nrsm->r_flags &= ~RACK_HAS_SYN; 5935 /* Now if we have a FIN flag we keep it on the right edge */ 5936 if (rsm->r_flags & RACK_HAS_FIN) 5937 rsm->r_flags &= ~RACK_HAS_FIN; 5938 /* Push bit must go to the right edge as well */ 5939 if (rsm->r_flags & RACK_HAD_PUSH) 5940 rsm->r_flags &= ~RACK_HAD_PUSH; 5941 /* Clone over the state of the hw_tls flag */ 5942 nrsm->r_hw_tls = rsm->r_hw_tls; 5943 /* 5944 * Now we need to find nrsm's new location in the mbuf chain 5945 * we basically calculate a new offset, which is soff + 5946 * how much is left in original rsm. Then we walk out the mbuf 5947 * chain to find the righ postion, it may be the same mbuf 5948 * or maybe not. 5949 */ 5950 KASSERT(((rsm->m != NULL) || 5951 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 5952 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 5953 if (rsm->m) 5954 rack_setup_offset_for_rsm(rsm, nrsm); 5955 } 5956 5957 static struct rack_sendmap * 5958 rack_merge_rsm(struct tcp_rack *rack, 5959 struct rack_sendmap *l_rsm, 5960 struct rack_sendmap *r_rsm) 5961 { 5962 /* 5963 * We are merging two ack'd RSM's, 5964 * the l_rsm is on the left (lower seq 5965 * values) and the r_rsm is on the right 5966 * (higher seq value). The simplest way 5967 * to merge these is to move the right 5968 * one into the left. I don't think there 5969 * is any reason we need to try to find 5970 * the oldest (or last oldest retransmitted). 5971 */ 5972 #ifdef INVARIANTS 5973 struct rack_sendmap *rm; 5974 #endif 5975 rack_log_map_chg(rack->rc_tp, rack, NULL, 5976 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 5977 l_rsm->r_end = r_rsm->r_end; 5978 if (l_rsm->r_dupack < r_rsm->r_dupack) 5979 l_rsm->r_dupack = r_rsm->r_dupack; 5980 if (r_rsm->r_rtr_bytes) 5981 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 5982 if (r_rsm->r_in_tmap) { 5983 /* This really should not happen */ 5984 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 5985 r_rsm->r_in_tmap = 0; 5986 } 5987 5988 /* Now the flags */ 5989 if (r_rsm->r_flags & RACK_HAS_FIN) 5990 l_rsm->r_flags |= RACK_HAS_FIN; 5991 if (r_rsm->r_flags & RACK_TLP) 5992 l_rsm->r_flags |= RACK_TLP; 5993 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 5994 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 5995 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 5996 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 5997 /* 5998 * If both are app-limited then let the 5999 * free lower the count. If right is app 6000 * limited and left is not, transfer. 6001 */ 6002 l_rsm->r_flags |= RACK_APP_LIMITED; 6003 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6004 if (r_rsm == rack->r_ctl.rc_first_appl) 6005 rack->r_ctl.rc_first_appl = l_rsm; 6006 } 6007 #ifndef INVARIANTS 6008 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6009 #else 6010 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6011 if (rm != r_rsm) { 6012 panic("removing head in rack:%p rsm:%p rm:%p", 6013 rack, r_rsm, rm); 6014 } 6015 #endif 6016 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6017 /* Transfer the split limit to the map we free */ 6018 r_rsm->r_limit_type = l_rsm->r_limit_type; 6019 l_rsm->r_limit_type = 0; 6020 } 6021 rack_free(rack, r_rsm); 6022 return (l_rsm); 6023 } 6024 6025 /* 6026 * TLP Timer, here we simply setup what segment we want to 6027 * have the TLP expire on, the normal rack_output() will then 6028 * send it out. 6029 * 6030 * We return 1, saying don't proceed with rack_output only 6031 * when all timers have been stopped (destroyed PCB?). 6032 */ 6033 static int 6034 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6035 { 6036 /* 6037 * Tail Loss Probe. 6038 */ 6039 struct rack_sendmap *rsm = NULL; 6040 #ifdef INVARIANTS 6041 struct rack_sendmap *insret; 6042 #endif 6043 struct socket *so; 6044 uint32_t amm; 6045 uint32_t out, avail; 6046 int collapsed_win = 0; 6047 6048 if (tp->t_timers->tt_flags & TT_STOPPED) { 6049 return (1); 6050 } 6051 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6052 /* Its not time yet */ 6053 return (0); 6054 } 6055 if (ctf_progress_timeout_check(tp, true)) { 6056 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6057 return (-ETIMEDOUT); /* tcp_drop() */ 6058 } 6059 /* 6060 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6061 * need to figure out how to force a full MSS segment out. 6062 */ 6063 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6064 rack->r_ctl.retran_during_recovery = 0; 6065 rack->r_ctl.dsack_byte_cnt = 0; 6066 counter_u64_add(rack_tlp_tot, 1); 6067 if (rack->r_state && (rack->r_state != tp->t_state)) 6068 rack_set_state(tp, rack); 6069 so = tp->t_inpcb->inp_socket; 6070 avail = sbavail(&so->so_snd); 6071 out = tp->snd_max - tp->snd_una; 6072 if (out > tp->snd_wnd) { 6073 /* special case, we need a retransmission */ 6074 collapsed_win = 1; 6075 goto need_retran; 6076 } 6077 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6078 rack->r_ctl.dsack_persist--; 6079 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6080 rack->r_ctl.num_dsack = 0; 6081 } 6082 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6083 } 6084 if ((tp->t_flags & TF_GPUTINPROG) && 6085 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6086 /* 6087 * If this is the second in a row 6088 * TLP and we are doing a measurement 6089 * its time to abandon the measurement. 6090 * Something is likely broken on 6091 * the clients network and measuring a 6092 * broken network does us no good. 6093 */ 6094 tp->t_flags &= ~TF_GPUTINPROG; 6095 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6096 rack->r_ctl.rc_gp_srtt /*flex1*/, 6097 tp->gput_seq, 6098 0, 0, 18, __LINE__, NULL, 0); 6099 } 6100 /* 6101 * Check our send oldest always settings, and if 6102 * there is an oldest to send jump to the need_retran. 6103 */ 6104 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6105 goto need_retran; 6106 6107 if (avail > out) { 6108 /* New data is available */ 6109 amm = avail - out; 6110 if (amm > ctf_fixed_maxseg(tp)) { 6111 amm = ctf_fixed_maxseg(tp); 6112 if ((amm + out) > tp->snd_wnd) { 6113 /* We are rwnd limited */ 6114 goto need_retran; 6115 } 6116 } else if (amm < ctf_fixed_maxseg(tp)) { 6117 /* not enough to fill a MTU */ 6118 goto need_retran; 6119 } 6120 if (IN_FASTRECOVERY(tp->t_flags)) { 6121 /* Unlikely */ 6122 if (rack->rack_no_prr == 0) { 6123 if (out + amm <= tp->snd_wnd) { 6124 rack->r_ctl.rc_prr_sndcnt = amm; 6125 rack->r_ctl.rc_tlp_new_data = amm; 6126 rack_log_to_prr(rack, 4, 0); 6127 } 6128 } else 6129 goto need_retran; 6130 } else { 6131 /* Set the send-new override */ 6132 if (out + amm <= tp->snd_wnd) 6133 rack->r_ctl.rc_tlp_new_data = amm; 6134 else 6135 goto need_retran; 6136 } 6137 rack->r_ctl.rc_tlpsend = NULL; 6138 counter_u64_add(rack_tlp_newdata, 1); 6139 goto send; 6140 } 6141 need_retran: 6142 /* 6143 * Ok we need to arrange the last un-acked segment to be re-sent, or 6144 * optionally the first un-acked segment. 6145 */ 6146 if (collapsed_win == 0) { 6147 if (rack_always_send_oldest) 6148 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6149 else { 6150 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6151 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6152 rsm = rack_find_high_nonack(rack, rsm); 6153 } 6154 } 6155 if (rsm == NULL) { 6156 #ifdef TCP_BLACKBOX 6157 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6158 #endif 6159 goto out; 6160 } 6161 } else { 6162 /* 6163 * We must find the last segment 6164 * that was acceptable by the client. 6165 */ 6166 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6167 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6168 /* Found one */ 6169 break; 6170 } 6171 } 6172 if (rsm == NULL) { 6173 /* None? if so send the first */ 6174 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6175 if (rsm == NULL) { 6176 #ifdef TCP_BLACKBOX 6177 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6178 #endif 6179 goto out; 6180 } 6181 } 6182 } 6183 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6184 /* 6185 * We need to split this the last segment in two. 6186 */ 6187 struct rack_sendmap *nrsm; 6188 6189 nrsm = rack_alloc_full_limit(rack); 6190 if (nrsm == NULL) { 6191 /* 6192 * No memory to split, we will just exit and punt 6193 * off to the RXT timer. 6194 */ 6195 goto out; 6196 } 6197 rack_clone_rsm(rack, nrsm, rsm, 6198 (rsm->r_end - ctf_fixed_maxseg(tp))); 6199 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6200 #ifndef INVARIANTS 6201 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6202 #else 6203 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6204 if (insret != NULL) { 6205 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6206 nrsm, insret, rack, rsm); 6207 } 6208 #endif 6209 if (rsm->r_in_tmap) { 6210 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6211 nrsm->r_in_tmap = 1; 6212 } 6213 rsm = nrsm; 6214 } 6215 rack->r_ctl.rc_tlpsend = rsm; 6216 send: 6217 /* Make sure output path knows we are doing a TLP */ 6218 *doing_tlp = 1; 6219 rack->r_timer_override = 1; 6220 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6221 return (0); 6222 out: 6223 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6224 return (0); 6225 } 6226 6227 /* 6228 * Delayed ack Timer, here we simply need to setup the 6229 * ACK_NOW flag and remove the DELACK flag. From there 6230 * the output routine will send the ack out. 6231 * 6232 * We only return 1, saying don't proceed, if all timers 6233 * are stopped (destroyed PCB?). 6234 */ 6235 static int 6236 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6237 { 6238 if (tp->t_timers->tt_flags & TT_STOPPED) { 6239 return (1); 6240 } 6241 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6242 tp->t_flags &= ~TF_DELACK; 6243 tp->t_flags |= TF_ACKNOW; 6244 KMOD_TCPSTAT_INC(tcps_delack); 6245 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6246 return (0); 6247 } 6248 6249 /* 6250 * Persists timer, here we simply send the 6251 * same thing as a keepalive will. 6252 * the one byte send. 6253 * 6254 * We only return 1, saying don't proceed, if all timers 6255 * are stopped (destroyed PCB?). 6256 */ 6257 static int 6258 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6259 { 6260 struct tcptemp *t_template; 6261 #ifdef INVARIANTS 6262 struct inpcb *inp = tp->t_inpcb; 6263 #endif 6264 int32_t retval = 1; 6265 6266 if (tp->t_timers->tt_flags & TT_STOPPED) { 6267 return (1); 6268 } 6269 if (rack->rc_in_persist == 0) 6270 return (0); 6271 if (ctf_progress_timeout_check(tp, false)) { 6272 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6273 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6274 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6275 return (-ETIMEDOUT); /* tcp_drop() */ 6276 } 6277 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6278 /* 6279 * Persistence timer into zero window. Force a byte to be output, if 6280 * possible. 6281 */ 6282 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6283 /* 6284 * Hack: if the peer is dead/unreachable, we do not time out if the 6285 * window is closed. After a full backoff, drop the connection if 6286 * the idle time (no responses to probes) reaches the maximum 6287 * backoff that we would use if retransmitting. 6288 */ 6289 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6290 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6291 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6292 KMOD_TCPSTAT_INC(tcps_persistdrop); 6293 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6294 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6295 retval = -ETIMEDOUT; /* tcp_drop() */ 6296 goto out; 6297 } 6298 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6299 tp->snd_una == tp->snd_max) 6300 rack_exit_persist(tp, rack, cts); 6301 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6302 /* 6303 * If the user has closed the socket then drop a persisting 6304 * connection after a much reduced timeout. 6305 */ 6306 if (tp->t_state > TCPS_CLOSE_WAIT && 6307 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6308 KMOD_TCPSTAT_INC(tcps_persistdrop); 6309 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6310 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6311 retval = -ETIMEDOUT; /* tcp_drop() */ 6312 goto out; 6313 } 6314 t_template = tcpip_maketemplate(rack->rc_inp); 6315 if (t_template) { 6316 /* only set it if we were answered */ 6317 if (rack->forced_ack == 0) { 6318 rack->forced_ack = 1; 6319 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6320 } else { 6321 rack->probe_not_answered = 1; 6322 counter_u64_add(rack_persists_loss, 1); 6323 rack->r_ctl.persist_lost_ends++; 6324 } 6325 counter_u64_add(rack_persists_sends, 1); 6326 tcp_respond(tp, t_template->tt_ipgen, 6327 &t_template->tt_t, (struct mbuf *)NULL, 6328 tp->rcv_nxt, tp->snd_una - 1, 0); 6329 /* This sends an ack */ 6330 if (tp->t_flags & TF_DELACK) 6331 tp->t_flags &= ~TF_DELACK; 6332 free(t_template, M_TEMP); 6333 } 6334 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6335 tp->t_rxtshift++; 6336 out: 6337 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6338 rack_start_hpts_timer(rack, tp, cts, 6339 0, 0, 0); 6340 return (retval); 6341 } 6342 6343 /* 6344 * If a keepalive goes off, we had no other timers 6345 * happening. We always return 1 here since this 6346 * routine either drops the connection or sends 6347 * out a segment with respond. 6348 */ 6349 static int 6350 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6351 { 6352 struct tcptemp *t_template; 6353 struct inpcb *inp; 6354 6355 if (tp->t_timers->tt_flags & TT_STOPPED) { 6356 return (1); 6357 } 6358 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6359 inp = tp->t_inpcb; 6360 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6361 /* 6362 * Keep-alive timer went off; send something or drop connection if 6363 * idle for too long. 6364 */ 6365 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6366 if (tp->t_state < TCPS_ESTABLISHED) 6367 goto dropit; 6368 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6369 tp->t_state <= TCPS_CLOSING) { 6370 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6371 goto dropit; 6372 /* 6373 * Send a packet designed to force a response if the peer is 6374 * up and reachable: either an ACK if the connection is 6375 * still alive, or an RST if the peer has closed the 6376 * connection due to timeout or reboot. Using sequence 6377 * number tp->snd_una-1 causes the transmitted zero-length 6378 * segment to lie outside the receive window; by the 6379 * protocol spec, this requires the correspondent TCP to 6380 * respond. 6381 */ 6382 KMOD_TCPSTAT_INC(tcps_keepprobe); 6383 t_template = tcpip_maketemplate(inp); 6384 if (t_template) { 6385 if (rack->forced_ack == 0) { 6386 rack->forced_ack = 1; 6387 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6388 } else { 6389 rack->probe_not_answered = 1; 6390 } 6391 tcp_respond(tp, t_template->tt_ipgen, 6392 &t_template->tt_t, (struct mbuf *)NULL, 6393 tp->rcv_nxt, tp->snd_una - 1, 0); 6394 free(t_template, M_TEMP); 6395 } 6396 } 6397 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6398 return (1); 6399 dropit: 6400 KMOD_TCPSTAT_INC(tcps_keepdrops); 6401 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6402 return (-ETIMEDOUT); /* tcp_drop() */ 6403 } 6404 6405 /* 6406 * Retransmit helper function, clear up all the ack 6407 * flags and take care of important book keeping. 6408 */ 6409 static void 6410 rack_remxt_tmr(struct tcpcb *tp) 6411 { 6412 /* 6413 * The retransmit timer went off, all sack'd blocks must be 6414 * un-acked. 6415 */ 6416 struct rack_sendmap *rsm, *trsm = NULL; 6417 struct tcp_rack *rack; 6418 6419 rack = (struct tcp_rack *)tp->t_fb_ptr; 6420 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6421 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6422 if (rack->r_state && (rack->r_state != tp->t_state)) 6423 rack_set_state(tp, rack); 6424 /* 6425 * Ideally we would like to be able to 6426 * mark SACK-PASS on anything not acked here. 6427 * 6428 * However, if we do that we would burst out 6429 * all that data 1ms apart. This would be unwise, 6430 * so for now we will just let the normal rxt timer 6431 * and tlp timer take care of it. 6432 * 6433 * Also we really need to stick them back in sequence 6434 * order. This way we send in the proper order and any 6435 * sacks that come floating in will "re-ack" the data. 6436 * To do this we zap the tmap with an INIT and then 6437 * walk through and place every rsm in the RB tree 6438 * back in its seq ordered place. 6439 */ 6440 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6441 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6442 rsm->r_dupack = 0; 6443 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6444 /* We must re-add it back to the tlist */ 6445 if (trsm == NULL) { 6446 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6447 } else { 6448 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6449 } 6450 rsm->r_in_tmap = 1; 6451 trsm = rsm; 6452 if (rsm->r_flags & RACK_ACKED) 6453 rsm->r_flags |= RACK_WAS_ACKED; 6454 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6455 rsm->r_flags |= RACK_MUST_RXT; 6456 } 6457 /* Clear the count (we just un-acked them) */ 6458 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6459 rack->r_ctl.rc_sacked = 0; 6460 rack->r_ctl.rc_sacklast = NULL; 6461 rack->r_ctl.rc_agg_delayed = 0; 6462 rack->r_early = 0; 6463 rack->r_ctl.rc_agg_early = 0; 6464 rack->r_late = 0; 6465 /* Clear the tlp rtx mark */ 6466 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6467 if (rack->r_ctl.rc_resend != NULL) 6468 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6469 rack->r_ctl.rc_prr_sndcnt = 0; 6470 rack_log_to_prr(rack, 6, 0); 6471 rack->r_timer_override = 1; 6472 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6473 #ifdef NETFLIX_EXP_DETECTION 6474 || (rack->sack_attack_disable != 0) 6475 #endif 6476 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6477 /* 6478 * For non-sack customers new data 6479 * needs to go out as retransmits until 6480 * we retransmit up to snd_max. 6481 */ 6482 rack->r_must_retran = 1; 6483 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6484 rack->r_ctl.rc_sacked); 6485 } 6486 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6487 } 6488 6489 static void 6490 rack_convert_rtts(struct tcpcb *tp) 6491 { 6492 if (tp->t_srtt > 1) { 6493 uint32_t val, frac; 6494 6495 val = tp->t_srtt >> TCP_RTT_SHIFT; 6496 frac = tp->t_srtt & 0x1f; 6497 tp->t_srtt = TICKS_2_USEC(val); 6498 /* 6499 * frac is the fractional part of the srtt (if any) 6500 * but its in ticks and every bit represents 6501 * 1/32nd of a hz. 6502 */ 6503 if (frac) { 6504 if (hz == 1000) { 6505 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6506 } else { 6507 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6508 } 6509 tp->t_srtt += frac; 6510 } 6511 } 6512 if (tp->t_rttvar) { 6513 uint32_t val, frac; 6514 6515 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6516 frac = tp->t_rttvar & 0x1f; 6517 tp->t_rttvar = TICKS_2_USEC(val); 6518 /* 6519 * frac is the fractional part of the srtt (if any) 6520 * but its in ticks and every bit represents 6521 * 1/32nd of a hz. 6522 */ 6523 if (frac) { 6524 if (hz == 1000) { 6525 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6526 } else { 6527 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6528 } 6529 tp->t_rttvar += frac; 6530 } 6531 } 6532 tp->t_rxtcur = RACK_REXMTVAL(tp); 6533 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6534 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6535 } 6536 if (tp->t_rxtcur > rack_rto_max) { 6537 tp->t_rxtcur = rack_rto_max; 6538 } 6539 } 6540 6541 static void 6542 rack_cc_conn_init(struct tcpcb *tp) 6543 { 6544 struct tcp_rack *rack; 6545 uint32_t srtt; 6546 6547 rack = (struct tcp_rack *)tp->t_fb_ptr; 6548 srtt = tp->t_srtt; 6549 cc_conn_init(tp); 6550 /* 6551 * Now convert to rack's internal format, 6552 * if required. 6553 */ 6554 if ((srtt == 0) && (tp->t_srtt != 0)) 6555 rack_convert_rtts(tp); 6556 /* 6557 * We want a chance to stay in slowstart as 6558 * we create a connection. TCP spec says that 6559 * initially ssthresh is infinite. For our 6560 * purposes that is the snd_wnd. 6561 */ 6562 if (tp->snd_ssthresh < tp->snd_wnd) { 6563 tp->snd_ssthresh = tp->snd_wnd; 6564 } 6565 /* 6566 * We also want to assure a IW worth of 6567 * data can get inflight. 6568 */ 6569 if (rc_init_window(rack) < tp->snd_cwnd) 6570 tp->snd_cwnd = rc_init_window(rack); 6571 } 6572 6573 /* 6574 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6575 * we will setup to retransmit the lowest seq number outstanding. 6576 */ 6577 static int 6578 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6579 { 6580 int32_t rexmt; 6581 int32_t retval = 0; 6582 bool isipv6; 6583 6584 if (tp->t_timers->tt_flags & TT_STOPPED) { 6585 return (1); 6586 } 6587 if ((tp->t_flags & TF_GPUTINPROG) && 6588 (tp->t_rxtshift)) { 6589 /* 6590 * We have had a second timeout 6591 * measurements on successive rxt's are not profitable. 6592 * It is unlikely to be of any use (the network is 6593 * broken or the client went away). 6594 */ 6595 tp->t_flags &= ~TF_GPUTINPROG; 6596 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6597 rack->r_ctl.rc_gp_srtt /*flex1*/, 6598 tp->gput_seq, 6599 0, 0, 18, __LINE__, NULL, 0); 6600 } 6601 if (ctf_progress_timeout_check(tp, false)) { 6602 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6603 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6604 return (-ETIMEDOUT); /* tcp_drop() */ 6605 } 6606 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6607 rack->r_ctl.retran_during_recovery = 0; 6608 rack->r_ctl.dsack_byte_cnt = 0; 6609 if (IN_FASTRECOVERY(tp->t_flags)) 6610 tp->t_flags |= TF_WASFRECOVERY; 6611 else 6612 tp->t_flags &= ~TF_WASFRECOVERY; 6613 if (IN_CONGRECOVERY(tp->t_flags)) 6614 tp->t_flags |= TF_WASCRECOVERY; 6615 else 6616 tp->t_flags &= ~TF_WASCRECOVERY; 6617 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6618 (tp->snd_una == tp->snd_max)) { 6619 /* Nothing outstanding .. nothing to do */ 6620 return (0); 6621 } 6622 if (rack->r_ctl.dsack_persist) { 6623 rack->r_ctl.dsack_persist--; 6624 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6625 rack->r_ctl.num_dsack = 0; 6626 } 6627 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6628 } 6629 /* 6630 * Rack can only run one timer at a time, so we cannot 6631 * run a KEEPINIT (gating SYN sending) and a retransmit 6632 * timer for the SYN. So if we are in a front state and 6633 * have a KEEPINIT timer we need to check the first transmit 6634 * against now to see if we have exceeded the KEEPINIT time 6635 * (if one is set). 6636 */ 6637 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6638 (TP_KEEPINIT(tp) != 0)) { 6639 struct rack_sendmap *rsm; 6640 6641 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6642 if (rsm) { 6643 /* Ok we have something outstanding to test keepinit with */ 6644 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6645 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6646 /* We have exceeded the KEEPINIT time */ 6647 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6648 goto drop_it; 6649 } 6650 } 6651 } 6652 /* 6653 * Retransmission timer went off. Message has not been acked within 6654 * retransmit interval. Back off to a longer retransmit interval 6655 * and retransmit one segment. 6656 */ 6657 rack_remxt_tmr(tp); 6658 if ((rack->r_ctl.rc_resend == NULL) || 6659 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6660 /* 6661 * If the rwnd collapsed on 6662 * the one we are retransmitting 6663 * it does not count against the 6664 * rxt count. 6665 */ 6666 tp->t_rxtshift++; 6667 } 6668 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6669 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6670 drop_it: 6671 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6672 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6673 /* XXXGL: previously t_softerror was casted to uint16_t */ 6674 MPASS(tp->t_softerror >= 0); 6675 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6676 goto out; /* tcp_drop() */ 6677 } 6678 if (tp->t_state == TCPS_SYN_SENT) { 6679 /* 6680 * If the SYN was retransmitted, indicate CWND to be limited 6681 * to 1 segment in cc_conn_init(). 6682 */ 6683 tp->snd_cwnd = 1; 6684 } else if (tp->t_rxtshift == 1) { 6685 /* 6686 * first retransmit; record ssthresh and cwnd so they can be 6687 * recovered if this turns out to be a "bad" retransmit. A 6688 * retransmit is considered "bad" if an ACK for this segment 6689 * is received within RTT/2 interval; the assumption here is 6690 * that the ACK was already in flight. See "On Estimating 6691 * End-to-End Network Path Properties" by Allman and Paxson 6692 * for more details. 6693 */ 6694 tp->snd_cwnd_prev = tp->snd_cwnd; 6695 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6696 tp->snd_recover_prev = tp->snd_recover; 6697 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6698 tp->t_flags |= TF_PREVVALID; 6699 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6700 tp->t_flags &= ~TF_PREVVALID; 6701 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6702 if ((tp->t_state == TCPS_SYN_SENT) || 6703 (tp->t_state == TCPS_SYN_RECEIVED)) 6704 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6705 else 6706 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6707 6708 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6709 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6710 /* 6711 * We enter the path for PLMTUD if connection is established or, if 6712 * connection is FIN_WAIT_1 status, reason for the last is that if 6713 * amount of data we send is very small, we could send it in couple 6714 * of packets and process straight to FIN. In that case we won't 6715 * catch ESTABLISHED state. 6716 */ 6717 #ifdef INET6 6718 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6719 #else 6720 isipv6 = false; 6721 #endif 6722 if (((V_tcp_pmtud_blackhole_detect == 1) || 6723 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6724 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6725 ((tp->t_state == TCPS_ESTABLISHED) || 6726 (tp->t_state == TCPS_FIN_WAIT_1))) { 6727 /* 6728 * Idea here is that at each stage of mtu probe (usually, 6729 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6730 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6731 * should take care of that. 6732 */ 6733 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6734 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6735 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6736 tp->t_rxtshift % 2 == 0)) { 6737 /* 6738 * Enter Path MTU Black-hole Detection mechanism: - 6739 * Disable Path MTU Discovery (IP "DF" bit). - 6740 * Reduce MTU to lower value than what we negotiated 6741 * with peer. 6742 */ 6743 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6744 /* Record that we may have found a black hole. */ 6745 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6746 /* Keep track of previous MSS. */ 6747 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6748 } 6749 6750 /* 6751 * Reduce the MSS to blackhole value or to the 6752 * default in an attempt to retransmit. 6753 */ 6754 #ifdef INET6 6755 if (isipv6 && 6756 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6757 /* Use the sysctl tuneable blackhole MSS. */ 6758 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6759 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6760 } else if (isipv6) { 6761 /* Use the default MSS. */ 6762 tp->t_maxseg = V_tcp_v6mssdflt; 6763 /* 6764 * Disable Path MTU Discovery when we switch 6765 * to minmss. 6766 */ 6767 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6768 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6769 } 6770 #endif 6771 #if defined(INET6) && defined(INET) 6772 else 6773 #endif 6774 #ifdef INET 6775 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6776 /* Use the sysctl tuneable blackhole MSS. */ 6777 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6778 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6779 } else { 6780 /* Use the default MSS. */ 6781 tp->t_maxseg = V_tcp_mssdflt; 6782 /* 6783 * Disable Path MTU Discovery when we switch 6784 * to minmss. 6785 */ 6786 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6787 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6788 } 6789 #endif 6790 } else { 6791 /* 6792 * If further retransmissions are still unsuccessful 6793 * with a lowered MTU, maybe this isn't a blackhole 6794 * and we restore the previous MSS and blackhole 6795 * detection flags. The limit '6' is determined by 6796 * giving each probe stage (1448, 1188, 524) 2 6797 * chances to recover. 6798 */ 6799 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6800 (tp->t_rxtshift >= 6)) { 6801 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6802 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6803 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6804 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6805 } 6806 } 6807 } 6808 /* 6809 * Disable RFC1323 and SACK if we haven't got any response to 6810 * our third SYN to work-around some broken terminal servers 6811 * (most of which have hopefully been retired) that have bad VJ 6812 * header compression code which trashes TCP segments containing 6813 * unknown-to-them TCP options. 6814 */ 6815 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6816 (tp->t_rxtshift == 3)) 6817 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6818 /* 6819 * If we backed off this far, our srtt estimate is probably bogus. 6820 * Clobber it so we'll take the next rtt measurement as our srtt; 6821 * move the current srtt into rttvar to keep the current retransmit 6822 * times until then. 6823 */ 6824 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6825 #ifdef INET6 6826 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 6827 in6_losing(tp->t_inpcb); 6828 else 6829 #endif 6830 in_losing(tp->t_inpcb); 6831 tp->t_rttvar += tp->t_srtt; 6832 tp->t_srtt = 0; 6833 } 6834 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6835 tp->snd_recover = tp->snd_max; 6836 tp->t_flags |= TF_ACKNOW; 6837 tp->t_rtttime = 0; 6838 rack_cong_signal(tp, CC_RTO, tp->snd_una); 6839 out: 6840 return (retval); 6841 } 6842 6843 static int 6844 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6845 { 6846 int32_t ret = 0; 6847 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6848 6849 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6850 (tp->t_flags & TF_GPUTINPROG)) { 6851 /* 6852 * We have a goodput in progress 6853 * and we have entered a late state. 6854 * Do we have enough data in the sb 6855 * to handle the GPUT request? 6856 */ 6857 uint32_t bytes; 6858 6859 bytes = tp->gput_ack - tp->gput_seq; 6860 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6861 bytes += tp->gput_seq - tp->snd_una; 6862 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 6863 /* 6864 * There are not enough bytes in the socket 6865 * buffer that have been sent to cover this 6866 * measurement. Cancel it. 6867 */ 6868 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6869 rack->r_ctl.rc_gp_srtt /*flex1*/, 6870 tp->gput_seq, 6871 0, 0, 18, __LINE__, NULL, 0); 6872 tp->t_flags &= ~TF_GPUTINPROG; 6873 } 6874 } 6875 if (timers == 0) { 6876 return (0); 6877 } 6878 if (tp->t_state == TCPS_LISTEN) { 6879 /* no timers on listen sockets */ 6880 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6881 return (0); 6882 return (1); 6883 } 6884 if ((timers & PACE_TMR_RACK) && 6885 rack->rc_on_min_to) { 6886 /* 6887 * For the rack timer when we 6888 * are on a min-timeout (which means rrr_conf = 3) 6889 * we don't want to check the timer. It may 6890 * be going off for a pace and thats ok we 6891 * want to send the retransmit (if its ready). 6892 * 6893 * If its on a normal rack timer (non-min) then 6894 * we will check if its expired. 6895 */ 6896 goto skip_time_check; 6897 } 6898 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6899 uint32_t left; 6900 6901 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6902 ret = -1; 6903 rack_log_to_processing(rack, cts, ret, 0); 6904 return (0); 6905 } 6906 if (hpts_calling == 0) { 6907 /* 6908 * A user send or queued mbuf (sack) has called us? We 6909 * return 0 and let the pacing guards 6910 * deal with it if they should or 6911 * should not cause a send. 6912 */ 6913 ret = -2; 6914 rack_log_to_processing(rack, cts, ret, 0); 6915 return (0); 6916 } 6917 /* 6918 * Ok our timer went off early and we are not paced false 6919 * alarm, go back to sleep. 6920 */ 6921 ret = -3; 6922 left = rack->r_ctl.rc_timer_exp - cts; 6923 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 6924 rack_log_to_processing(rack, cts, ret, left); 6925 return (1); 6926 } 6927 skip_time_check: 6928 rack->rc_tmr_stopped = 0; 6929 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 6930 if (timers & PACE_TMR_DELACK) { 6931 ret = rack_timeout_delack(tp, rack, cts); 6932 } else if (timers & PACE_TMR_RACK) { 6933 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6934 rack->r_fast_output = 0; 6935 ret = rack_timeout_rack(tp, rack, cts); 6936 } else if (timers & PACE_TMR_TLP) { 6937 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6938 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 6939 } else if (timers & PACE_TMR_RXT) { 6940 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6941 rack->r_fast_output = 0; 6942 ret = rack_timeout_rxt(tp, rack, cts); 6943 } else if (timers & PACE_TMR_PERSIT) { 6944 ret = rack_timeout_persist(tp, rack, cts); 6945 } else if (timers & PACE_TMR_KEEP) { 6946 ret = rack_timeout_keepalive(tp, rack, cts); 6947 } 6948 rack_log_to_processing(rack, cts, ret, timers); 6949 return (ret); 6950 } 6951 6952 static void 6953 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 6954 { 6955 struct timeval tv; 6956 uint32_t us_cts, flags_on_entry; 6957 uint8_t hpts_removed = 0; 6958 6959 flags_on_entry = rack->r_ctl.rc_hpts_flags; 6960 us_cts = tcp_get_usecs(&tv); 6961 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 6962 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 6963 ((tp->snd_max - tp->snd_una) == 0))) { 6964 tcp_hpts_remove(rack->rc_inp); 6965 hpts_removed = 1; 6966 /* If we were not delayed cancel out the flag. */ 6967 if ((tp->snd_max - tp->snd_una) == 0) 6968 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6969 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6970 } 6971 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 6972 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 6973 if (tcp_in_hpts(rack->rc_inp) && 6974 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 6975 /* 6976 * Canceling timer's when we have no output being 6977 * paced. We also must remove ourselves from the 6978 * hpts. 6979 */ 6980 tcp_hpts_remove(rack->rc_inp); 6981 hpts_removed = 1; 6982 } 6983 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 6984 } 6985 if (hpts_removed == 0) 6986 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6987 } 6988 6989 static void 6990 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 6991 { 6992 return; 6993 } 6994 6995 static int 6996 rack_stopall(struct tcpcb *tp) 6997 { 6998 struct tcp_rack *rack; 6999 rack = (struct tcp_rack *)tp->t_fb_ptr; 7000 rack->t_timers_stopped = 1; 7001 return (0); 7002 } 7003 7004 static void 7005 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7006 { 7007 return; 7008 } 7009 7010 static int 7011 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7012 { 7013 return (0); 7014 } 7015 7016 static void 7017 rack_stop_all_timers(struct tcpcb *tp) 7018 { 7019 struct tcp_rack *rack; 7020 7021 /* 7022 * Assure no timers are running. 7023 */ 7024 if (tcp_timer_active(tp, TT_PERSIST)) { 7025 /* We enter in persists, set the flag appropriately */ 7026 rack = (struct tcp_rack *)tp->t_fb_ptr; 7027 rack->rc_in_persist = 1; 7028 } 7029 tcp_timer_suspend(tp, TT_PERSIST); 7030 tcp_timer_suspend(tp, TT_REXMT); 7031 tcp_timer_suspend(tp, TT_KEEP); 7032 tcp_timer_suspend(tp, TT_DELACK); 7033 } 7034 7035 static void 7036 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7037 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7038 { 7039 int32_t idx; 7040 7041 rsm->r_rtr_cnt++; 7042 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7043 rsm->r_dupack = 0; 7044 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7045 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7046 rsm->r_flags |= RACK_OVERMAX; 7047 } 7048 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7049 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7050 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7051 } 7052 idx = rsm->r_rtr_cnt - 1; 7053 rsm->r_tim_lastsent[idx] = ts; 7054 /* 7055 * Here we don't add in the len of send, since its already 7056 * in snduna <->snd_max. 7057 */ 7058 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7059 rack->r_ctl.rc_sacked); 7060 if (rsm->r_flags & RACK_ACKED) { 7061 /* Problably MTU discovery messing with us */ 7062 rsm->r_flags &= ~RACK_ACKED; 7063 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7064 } 7065 if (rsm->r_in_tmap) { 7066 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7067 rsm->r_in_tmap = 0; 7068 } 7069 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7070 rsm->r_in_tmap = 1; 7071 if (rsm->r_flags & RACK_SACK_PASSED) { 7072 /* We have retransmitted due to the SACK pass */ 7073 rsm->r_flags &= ~RACK_SACK_PASSED; 7074 rsm->r_flags |= RACK_WAS_SACKPASS; 7075 } 7076 } 7077 7078 static uint32_t 7079 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7080 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7081 { 7082 /* 7083 * We (re-)transmitted starting at rsm->r_start for some length 7084 * (possibly less than r_end. 7085 */ 7086 struct rack_sendmap *nrsm; 7087 #ifdef INVARIANTS 7088 struct rack_sendmap *insret; 7089 #endif 7090 uint32_t c_end; 7091 int32_t len; 7092 7093 len = *lenp; 7094 c_end = rsm->r_start + len; 7095 if (SEQ_GEQ(c_end, rsm->r_end)) { 7096 /* 7097 * We retransmitted the whole piece or more than the whole 7098 * slopping into the next rsm. 7099 */ 7100 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7101 if (c_end == rsm->r_end) { 7102 *lenp = 0; 7103 return (0); 7104 } else { 7105 int32_t act_len; 7106 7107 /* Hangs over the end return whats left */ 7108 act_len = rsm->r_end - rsm->r_start; 7109 *lenp = (len - act_len); 7110 return (rsm->r_end); 7111 } 7112 /* We don't get out of this block. */ 7113 } 7114 /* 7115 * Here we retransmitted less than the whole thing which means we 7116 * have to split this into what was transmitted and what was not. 7117 */ 7118 nrsm = rack_alloc_full_limit(rack); 7119 if (nrsm == NULL) { 7120 /* 7121 * We can't get memory, so lets not proceed. 7122 */ 7123 *lenp = 0; 7124 return (0); 7125 } 7126 /* 7127 * So here we are going to take the original rsm and make it what we 7128 * retransmitted. nrsm will be the tail portion we did not 7129 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7130 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7131 * 1, 6 and the new piece will be 6, 11. 7132 */ 7133 rack_clone_rsm(rack, nrsm, rsm, c_end); 7134 nrsm->r_dupack = 0; 7135 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7136 #ifndef INVARIANTS 7137 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7138 #else 7139 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7140 if (insret != NULL) { 7141 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7142 nrsm, insret, rack, rsm); 7143 } 7144 #endif 7145 if (rsm->r_in_tmap) { 7146 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7147 nrsm->r_in_tmap = 1; 7148 } 7149 rsm->r_flags &= (~RACK_HAS_FIN); 7150 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7151 /* Log a split of rsm into rsm and nrsm */ 7152 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7153 *lenp = 0; 7154 return (0); 7155 } 7156 7157 static void 7158 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7159 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7160 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7161 { 7162 struct tcp_rack *rack; 7163 struct rack_sendmap *rsm, *nrsm, fe; 7164 #ifdef INVARIANTS 7165 struct rack_sendmap *insret; 7166 #endif 7167 register uint32_t snd_max, snd_una; 7168 7169 /* 7170 * Add to the RACK log of packets in flight or retransmitted. If 7171 * there is a TS option we will use the TS echoed, if not we will 7172 * grab a TS. 7173 * 7174 * Retransmissions will increment the count and move the ts to its 7175 * proper place. Note that if options do not include TS's then we 7176 * won't be able to effectively use the ACK for an RTT on a retran. 7177 * 7178 * Notes about r_start and r_end. Lets consider a send starting at 7179 * sequence 1 for 10 bytes. In such an example the r_start would be 7180 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7181 * This means that r_end is actually the first sequence for the next 7182 * slot (11). 7183 * 7184 */ 7185 /* 7186 * If err is set what do we do XXXrrs? should we not add the thing? 7187 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7188 * i.e. proceed with add ** do this for now. 7189 */ 7190 INP_WLOCK_ASSERT(tp->t_inpcb); 7191 if (err) 7192 /* 7193 * We don't log errors -- we could but snd_max does not 7194 * advance in this case either. 7195 */ 7196 return; 7197 7198 if (th_flags & TH_RST) { 7199 /* 7200 * We don't log resets and we return immediately from 7201 * sending 7202 */ 7203 return; 7204 } 7205 rack = (struct tcp_rack *)tp->t_fb_ptr; 7206 snd_una = tp->snd_una; 7207 snd_max = tp->snd_max; 7208 if (th_flags & (TH_SYN | TH_FIN)) { 7209 /* 7210 * The call to rack_log_output is made before bumping 7211 * snd_max. This means we can record one extra byte on a SYN 7212 * or FIN if seq_out is adding more on and a FIN is present 7213 * (and we are not resending). 7214 */ 7215 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7216 len++; 7217 if (th_flags & TH_FIN) 7218 len++; 7219 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7220 /* 7221 * The add/update as not been done for the FIN/SYN 7222 * yet. 7223 */ 7224 snd_max = tp->snd_nxt; 7225 } 7226 } 7227 if (SEQ_LEQ((seq_out + len), snd_una)) { 7228 /* Are sending an old segment to induce an ack (keep-alive)? */ 7229 return; 7230 } 7231 if (SEQ_LT(seq_out, snd_una)) { 7232 /* huh? should we panic? */ 7233 uint32_t end; 7234 7235 end = seq_out + len; 7236 seq_out = snd_una; 7237 if (SEQ_GEQ(end, seq_out)) 7238 len = end - seq_out; 7239 else 7240 len = 0; 7241 } 7242 if (len == 0) { 7243 /* We don't log zero window probes */ 7244 return; 7245 } 7246 if (IN_FASTRECOVERY(tp->t_flags)) { 7247 rack->r_ctl.rc_prr_out += len; 7248 } 7249 /* First question is it a retransmission or new? */ 7250 if (seq_out == snd_max) { 7251 /* Its new */ 7252 again: 7253 rsm = rack_alloc(rack); 7254 if (rsm == NULL) { 7255 /* 7256 * Hmm out of memory and the tcb got destroyed while 7257 * we tried to wait. 7258 */ 7259 return; 7260 } 7261 if (th_flags & TH_FIN) { 7262 rsm->r_flags = RACK_HAS_FIN|add_flag; 7263 } else { 7264 rsm->r_flags = add_flag; 7265 } 7266 if (hw_tls) 7267 rsm->r_hw_tls = 1; 7268 rsm->r_tim_lastsent[0] = cts; 7269 rsm->r_rtr_cnt = 1; 7270 rsm->r_rtr_bytes = 0; 7271 if (th_flags & TH_SYN) { 7272 /* The data space is one beyond snd_una */ 7273 rsm->r_flags |= RACK_HAS_SYN; 7274 } 7275 rsm->r_start = seq_out; 7276 rsm->r_end = rsm->r_start + len; 7277 rsm->r_dupack = 0; 7278 /* 7279 * save off the mbuf location that 7280 * sndmbuf_noadv returned (which is 7281 * where we started copying from).. 7282 */ 7283 rsm->m = s_mb; 7284 rsm->soff = s_moff; 7285 /* 7286 * Here we do add in the len of send, since its not yet 7287 * reflected in in snduna <->snd_max 7288 */ 7289 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7290 rack->r_ctl.rc_sacked) + 7291 (rsm->r_end - rsm->r_start)); 7292 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7293 if (rsm->m) { 7294 if (rsm->m->m_len <= rsm->soff) { 7295 /* 7296 * XXXrrs Question, will this happen? 7297 * 7298 * If sbsndptr is set at the correct place 7299 * then s_moff should always be somewhere 7300 * within rsm->m. But if the sbsndptr was 7301 * off then that won't be true. If it occurs 7302 * we need to walkout to the correct location. 7303 */ 7304 struct mbuf *lm; 7305 7306 lm = rsm->m; 7307 while (lm->m_len <= rsm->soff) { 7308 rsm->soff -= lm->m_len; 7309 lm = lm->m_next; 7310 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7311 __func__, rack, s_moff, s_mb, rsm->soff)); 7312 } 7313 rsm->m = lm; 7314 } 7315 rsm->orig_m_len = rsm->m->m_len; 7316 } else 7317 rsm->orig_m_len = 0; 7318 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7319 /* Log a new rsm */ 7320 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7321 #ifndef INVARIANTS 7322 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7323 #else 7324 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7325 if (insret != NULL) { 7326 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7327 nrsm, insret, rack, rsm); 7328 } 7329 #endif 7330 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7331 rsm->r_in_tmap = 1; 7332 /* 7333 * Special case detection, is there just a single 7334 * packet outstanding when we are not in recovery? 7335 * 7336 * If this is true mark it so. 7337 */ 7338 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7339 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7340 struct rack_sendmap *prsm; 7341 7342 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7343 if (prsm) 7344 prsm->r_one_out_nr = 1; 7345 } 7346 return; 7347 } 7348 /* 7349 * If we reach here its a retransmission and we need to find it. 7350 */ 7351 memset(&fe, 0, sizeof(fe)); 7352 more: 7353 if (hintrsm && (hintrsm->r_start == seq_out)) { 7354 rsm = hintrsm; 7355 hintrsm = NULL; 7356 } else { 7357 /* No hints sorry */ 7358 rsm = NULL; 7359 } 7360 if ((rsm) && (rsm->r_start == seq_out)) { 7361 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7362 if (len == 0) { 7363 return; 7364 } else { 7365 goto more; 7366 } 7367 } 7368 /* Ok it was not the last pointer go through it the hard way. */ 7369 refind: 7370 fe.r_start = seq_out; 7371 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7372 if (rsm) { 7373 if (rsm->r_start == seq_out) { 7374 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7375 if (len == 0) { 7376 return; 7377 } else { 7378 goto refind; 7379 } 7380 } 7381 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7382 /* Transmitted within this piece */ 7383 /* 7384 * Ok we must split off the front and then let the 7385 * update do the rest 7386 */ 7387 nrsm = rack_alloc_full_limit(rack); 7388 if (nrsm == NULL) { 7389 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7390 return; 7391 } 7392 /* 7393 * copy rsm to nrsm and then trim the front of rsm 7394 * to not include this part. 7395 */ 7396 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7397 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7398 #ifndef INVARIANTS 7399 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7400 #else 7401 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7402 if (insret != NULL) { 7403 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7404 nrsm, insret, rack, rsm); 7405 } 7406 #endif 7407 if (rsm->r_in_tmap) { 7408 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7409 nrsm->r_in_tmap = 1; 7410 } 7411 rsm->r_flags &= (~RACK_HAS_FIN); 7412 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7413 if (len == 0) { 7414 return; 7415 } else if (len > 0) 7416 goto refind; 7417 } 7418 } 7419 /* 7420 * Hmm not found in map did they retransmit both old and on into the 7421 * new? 7422 */ 7423 if (seq_out == tp->snd_max) { 7424 goto again; 7425 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7426 #ifdef INVARIANTS 7427 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7428 seq_out, len, tp->snd_una, tp->snd_max); 7429 printf("Starting Dump of all rack entries\n"); 7430 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7431 printf("rsm:%p start:%u end:%u\n", 7432 rsm, rsm->r_start, rsm->r_end); 7433 } 7434 printf("Dump complete\n"); 7435 panic("seq_out not found rack:%p tp:%p", 7436 rack, tp); 7437 #endif 7438 } else { 7439 #ifdef INVARIANTS 7440 /* 7441 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7442 * flag) 7443 */ 7444 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7445 seq_out, len, tp->snd_max, tp); 7446 #endif 7447 } 7448 } 7449 7450 /* 7451 * Record one of the RTT updates from an ack into 7452 * our sample structure. 7453 */ 7454 7455 static void 7456 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7457 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7458 { 7459 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7460 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7461 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7462 } 7463 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7464 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7465 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7466 } 7467 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7468 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7469 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7470 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7471 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7472 } 7473 if ((confidence == 1) && 7474 ((rsm == NULL) || 7475 (rsm->r_just_ret) || 7476 (rsm->r_one_out_nr && 7477 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7478 /* 7479 * If the rsm had a just return 7480 * hit it then we can't trust the 7481 * rtt measurement for buffer deterimination 7482 * Note that a confidence of 2, indicates 7483 * SACK'd which overrides the r_just_ret or 7484 * the r_one_out_nr. If it was a CUM-ACK and 7485 * we had only two outstanding, but get an 7486 * ack for only 1. Then that also lowers our 7487 * confidence. 7488 */ 7489 confidence = 0; 7490 } 7491 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7492 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7493 if (rack->r_ctl.rack_rs.confidence == 0) { 7494 /* 7495 * We take anything with no current confidence 7496 * saved. 7497 */ 7498 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7499 rack->r_ctl.rack_rs.confidence = confidence; 7500 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7501 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7502 /* 7503 * Once we have a confident number, 7504 * we can update it with a smaller 7505 * value since this confident number 7506 * may include the DSACK time until 7507 * the next segment (the second one) arrived. 7508 */ 7509 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7510 rack->r_ctl.rack_rs.confidence = confidence; 7511 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7512 } 7513 } 7514 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7515 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7516 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7517 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7518 } 7519 7520 /* 7521 * Collect new round-trip time estimate 7522 * and update averages and current timeout. 7523 */ 7524 static void 7525 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7526 { 7527 int32_t delta; 7528 int32_t rtt; 7529 7530 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7531 /* No valid sample */ 7532 return; 7533 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7534 /* We are to use the lowest RTT seen in a single ack */ 7535 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7536 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7537 /* We are to use the highest RTT seen in a single ack */ 7538 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7539 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7540 /* We are to use the average RTT seen in a single ack */ 7541 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7542 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7543 } else { 7544 #ifdef INVARIANTS 7545 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7546 #endif 7547 return; 7548 } 7549 if (rtt == 0) 7550 rtt = 1; 7551 if (rack->rc_gp_rtt_set == 0) { 7552 /* 7553 * With no RTT we have to accept 7554 * even one we are not confident of. 7555 */ 7556 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7557 rack->rc_gp_rtt_set = 1; 7558 } else if (rack->r_ctl.rack_rs.confidence) { 7559 /* update the running gp srtt */ 7560 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7561 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7562 } 7563 if (rack->r_ctl.rack_rs.confidence) { 7564 /* 7565 * record the low and high for highly buffered path computation, 7566 * we only do this if we are confident (not a retransmission). 7567 */ 7568 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7569 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7570 } 7571 if (rack->rc_highly_buffered == 0) { 7572 /* 7573 * Currently once we declare a path has 7574 * highly buffered there is no going 7575 * back, which may be a problem... 7576 */ 7577 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7578 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7579 rack->r_ctl.rc_highest_us_rtt, 7580 rack->r_ctl.rc_lowest_us_rtt, 7581 RACK_RTTS_SEEHBP); 7582 rack->rc_highly_buffered = 1; 7583 } 7584 } 7585 } 7586 if ((rack->r_ctl.rack_rs.confidence) || 7587 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7588 /* 7589 * If we are highly confident of it <or> it was 7590 * never retransmitted we accept it as the last us_rtt. 7591 */ 7592 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7593 /* The lowest rtt can be set if its was not retransmited */ 7594 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7595 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7596 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7597 rack->r_ctl.rc_lowest_us_rtt = 1; 7598 } 7599 } 7600 rack = (struct tcp_rack *)tp->t_fb_ptr; 7601 if (tp->t_srtt != 0) { 7602 /* 7603 * We keep a simple srtt in microseconds, like our rtt 7604 * measurement. We don't need to do any tricks with shifting 7605 * etc. Instead we just add in 1/8th of the new measurement 7606 * and subtract out 1/8 of the old srtt. We do the same with 7607 * the variance after finding the absolute value of the 7608 * difference between this sample and the current srtt. 7609 */ 7610 delta = tp->t_srtt - rtt; 7611 /* Take off 1/8th of the current sRTT */ 7612 tp->t_srtt -= (tp->t_srtt >> 3); 7613 /* Add in 1/8th of the new RTT just measured */ 7614 tp->t_srtt += (rtt >> 3); 7615 if (tp->t_srtt <= 0) 7616 tp->t_srtt = 1; 7617 /* Now lets make the absolute value of the variance */ 7618 if (delta < 0) 7619 delta = -delta; 7620 /* Subtract out 1/8th */ 7621 tp->t_rttvar -= (tp->t_rttvar >> 3); 7622 /* Add in 1/8th of the new variance we just saw */ 7623 tp->t_rttvar += (delta >> 3); 7624 if (tp->t_rttvar <= 0) 7625 tp->t_rttvar = 1; 7626 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7627 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7628 } else { 7629 /* 7630 * No rtt measurement yet - use the unsmoothed rtt. Set the 7631 * variance to half the rtt (so our first retransmit happens 7632 * at 3*rtt). 7633 */ 7634 tp->t_srtt = rtt; 7635 tp->t_rttvar = rtt >> 1; 7636 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7637 } 7638 rack->rc_srtt_measure_made = 1; 7639 KMOD_TCPSTAT_INC(tcps_rttupdated); 7640 tp->t_rttupdated++; 7641 #ifdef STATS 7642 if (rack_stats_gets_ms_rtt == 0) { 7643 /* Send in the microsecond rtt used for rxt timeout purposes */ 7644 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7645 } else if (rack_stats_gets_ms_rtt == 1) { 7646 /* Send in the millisecond rtt used for rxt timeout purposes */ 7647 int32_t ms_rtt; 7648 7649 /* Round up */ 7650 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7651 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7652 } else if (rack_stats_gets_ms_rtt == 2) { 7653 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7654 int32_t ms_rtt; 7655 7656 /* Round up */ 7657 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7658 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7659 } else { 7660 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7661 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7662 } 7663 7664 #endif 7665 /* 7666 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7667 * way we do the smoothing, srtt and rttvar will each average +1/2 7668 * tick of bias. When we compute the retransmit timer, we want 1/2 7669 * tick of rounding and 1 extra tick because of +-1/2 tick 7670 * uncertainty in the firing of the timer. The bias will give us 7671 * exactly the 1.5 tick we need. But, because the bias is 7672 * statistical, we have to test that we don't drop below the minimum 7673 * feasible timer (which is 2 ticks). 7674 */ 7675 tp->t_rxtshift = 0; 7676 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7677 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7678 rack_log_rtt_sample(rack, rtt); 7679 tp->t_softerror = 0; 7680 } 7681 7682 7683 static void 7684 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7685 { 7686 /* 7687 * Apply to filter the inbound us-rtt at us_cts. 7688 */ 7689 uint32_t old_rtt; 7690 7691 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7692 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7693 us_rtt, us_cts); 7694 if (old_rtt > us_rtt) { 7695 /* We just hit a new lower rtt time */ 7696 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7697 __LINE__, RACK_RTTS_NEWRTT); 7698 /* 7699 * Only count it if its lower than what we saw within our 7700 * calculated range. 7701 */ 7702 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7703 if (rack_probertt_lower_within && 7704 rack->rc_gp_dyn_mul && 7705 (rack->use_fixed_rate == 0) && 7706 (rack->rc_always_pace)) { 7707 /* 7708 * We are seeing a new lower rtt very close 7709 * to the time that we would have entered probe-rtt. 7710 * This is probably due to the fact that a peer flow 7711 * has entered probe-rtt. Lets go in now too. 7712 */ 7713 uint32_t val; 7714 7715 val = rack_probertt_lower_within * rack_time_between_probertt; 7716 val /= 100; 7717 if ((rack->in_probe_rtt == 0) && 7718 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7719 rack_enter_probertt(rack, us_cts); 7720 } 7721 } 7722 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7723 } 7724 } 7725 } 7726 7727 static int 7728 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7729 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7730 { 7731 uint32_t us_rtt; 7732 int32_t i, all; 7733 uint32_t t, len_acked; 7734 7735 if ((rsm->r_flags & RACK_ACKED) || 7736 (rsm->r_flags & RACK_WAS_ACKED)) 7737 /* Already done */ 7738 return (0); 7739 if (rsm->r_no_rtt_allowed) { 7740 /* Not allowed */ 7741 return (0); 7742 } 7743 if (ack_type == CUM_ACKED) { 7744 if (SEQ_GT(th_ack, rsm->r_end)) { 7745 len_acked = rsm->r_end - rsm->r_start; 7746 all = 1; 7747 } else { 7748 len_acked = th_ack - rsm->r_start; 7749 all = 0; 7750 } 7751 } else { 7752 len_acked = rsm->r_end - rsm->r_start; 7753 all = 0; 7754 } 7755 if (rsm->r_rtr_cnt == 1) { 7756 7757 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7758 if ((int)t <= 0) 7759 t = 1; 7760 if (!tp->t_rttlow || tp->t_rttlow > t) 7761 tp->t_rttlow = t; 7762 if (!rack->r_ctl.rc_rack_min_rtt || 7763 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7764 rack->r_ctl.rc_rack_min_rtt = t; 7765 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7766 rack->r_ctl.rc_rack_min_rtt = 1; 7767 } 7768 } 7769 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7770 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7771 else 7772 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7773 if (us_rtt == 0) 7774 us_rtt = 1; 7775 if (CC_ALGO(tp)->rttsample != NULL) { 7776 /* Kick the RTT to the CC */ 7777 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7778 } 7779 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7780 if (ack_type == SACKED) { 7781 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7782 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7783 } else { 7784 /* 7785 * We need to setup what our confidence 7786 * is in this ack. 7787 * 7788 * If the rsm was app limited and it is 7789 * less than a mss in length (the end 7790 * of the send) then we have a gap. If we 7791 * were app limited but say we were sending 7792 * multiple MSS's then we are more confident 7793 * int it. 7794 * 7795 * When we are not app-limited then we see if 7796 * the rsm is being included in the current 7797 * measurement, we tell this by the app_limited_needs_set 7798 * flag. 7799 * 7800 * Note that being cwnd blocked is not applimited 7801 * as well as the pacing delay between packets which 7802 * are sending only 1 or 2 MSS's also will show up 7803 * in the RTT. We probably need to examine this algorithm 7804 * a bit more and enhance it to account for the delay 7805 * between rsm's. We could do that by saving off the 7806 * pacing delay of each rsm (in an rsm) and then 7807 * factoring that in somehow though for now I am 7808 * not sure how :) 7809 */ 7810 int calc_conf = 0; 7811 7812 if (rsm->r_flags & RACK_APP_LIMITED) { 7813 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7814 calc_conf = 0; 7815 else 7816 calc_conf = 1; 7817 } else if (rack->app_limited_needs_set == 0) { 7818 calc_conf = 1; 7819 } else { 7820 calc_conf = 0; 7821 } 7822 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7823 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7824 calc_conf, rsm, rsm->r_rtr_cnt); 7825 } 7826 if ((rsm->r_flags & RACK_TLP) && 7827 (!IN_FASTRECOVERY(tp->t_flags))) { 7828 /* Segment was a TLP and our retrans matched */ 7829 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7830 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 7831 } 7832 } 7833 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7834 /* New more recent rack_tmit_time */ 7835 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7836 rack->rc_rack_rtt = t; 7837 } 7838 return (1); 7839 } 7840 /* 7841 * We clear the soft/rxtshift since we got an ack. 7842 * There is no assurance we will call the commit() function 7843 * so we need to clear these to avoid incorrect handling. 7844 */ 7845 tp->t_rxtshift = 0; 7846 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7847 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7848 tp->t_softerror = 0; 7849 if (to && (to->to_flags & TOF_TS) && 7850 (ack_type == CUM_ACKED) && 7851 (to->to_tsecr) && 7852 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7853 /* 7854 * Now which timestamp does it match? In this block the ACK 7855 * must be coming from a previous transmission. 7856 */ 7857 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7858 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7859 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7860 if ((int)t <= 0) 7861 t = 1; 7862 if (CC_ALGO(tp)->rttsample != NULL) { 7863 /* 7864 * Kick the RTT to the CC, here 7865 * we lie a bit in that we know the 7866 * retransmission is correct even though 7867 * we retransmitted. This is because 7868 * we match the timestamps. 7869 */ 7870 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7871 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7872 else 7873 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7874 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7875 } 7876 if ((i + 1) < rsm->r_rtr_cnt) { 7877 /* 7878 * The peer ack'd from our previous 7879 * transmission. We have a spurious 7880 * retransmission and thus we dont 7881 * want to update our rack_rtt. 7882 * 7883 * Hmm should there be a CC revert here? 7884 * 7885 */ 7886 return (0); 7887 } 7888 if (!tp->t_rttlow || tp->t_rttlow > t) 7889 tp->t_rttlow = t; 7890 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7891 rack->r_ctl.rc_rack_min_rtt = t; 7892 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7893 rack->r_ctl.rc_rack_min_rtt = 1; 7894 } 7895 } 7896 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7897 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7898 /* New more recent rack_tmit_time */ 7899 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7900 rack->rc_rack_rtt = t; 7901 } 7902 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7903 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7904 rsm->r_rtr_cnt); 7905 return (1); 7906 } 7907 } 7908 goto ts_not_found; 7909 } else { 7910 /* 7911 * Ok its a SACK block that we retransmitted. or a windows 7912 * machine without timestamps. We can tell nothing from the 7913 * time-stamp since its not there or the time the peer last 7914 * recieved a segment that moved forward its cum-ack point. 7915 */ 7916 ts_not_found: 7917 i = rsm->r_rtr_cnt - 1; 7918 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7919 if ((int)t <= 0) 7920 t = 1; 7921 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7922 /* 7923 * We retransmitted and the ack came back in less 7924 * than the smallest rtt we have observed. We most 7925 * likely did an improper retransmit as outlined in 7926 * 6.2 Step 2 point 2 in the rack-draft so we 7927 * don't want to update our rack_rtt. We in 7928 * theory (in future) might want to think about reverting our 7929 * cwnd state but we won't for now. 7930 */ 7931 return (0); 7932 } else if (rack->r_ctl.rc_rack_min_rtt) { 7933 /* 7934 * We retransmitted it and the retransmit did the 7935 * job. 7936 */ 7937 if (!rack->r_ctl.rc_rack_min_rtt || 7938 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7939 rack->r_ctl.rc_rack_min_rtt = t; 7940 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7941 rack->r_ctl.rc_rack_min_rtt = 1; 7942 } 7943 } 7944 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 7945 /* New more recent rack_tmit_time */ 7946 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 7947 rack->rc_rack_rtt = t; 7948 } 7949 return (1); 7950 } 7951 } 7952 return (0); 7953 } 7954 7955 /* 7956 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7957 */ 7958 static void 7959 rack_log_sack_passed(struct tcpcb *tp, 7960 struct tcp_rack *rack, struct rack_sendmap *rsm) 7961 { 7962 struct rack_sendmap *nrsm; 7963 7964 nrsm = rsm; 7965 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 7966 rack_head, r_tnext) { 7967 if (nrsm == rsm) { 7968 /* Skip orginal segment he is acked */ 7969 continue; 7970 } 7971 if (nrsm->r_flags & RACK_ACKED) { 7972 /* 7973 * Skip ack'd segments, though we 7974 * should not see these, since tmap 7975 * should not have ack'd segments. 7976 */ 7977 continue; 7978 } 7979 if (nrsm->r_flags & RACK_SACK_PASSED) { 7980 /* 7981 * We found one that is already marked 7982 * passed, we have been here before and 7983 * so all others below this are marked. 7984 */ 7985 break; 7986 } 7987 nrsm->r_flags |= RACK_SACK_PASSED; 7988 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 7989 } 7990 } 7991 7992 static void 7993 rack_need_set_test(struct tcpcb *tp, 7994 struct tcp_rack *rack, 7995 struct rack_sendmap *rsm, 7996 tcp_seq th_ack, 7997 int line, 7998 int use_which) 7999 { 8000 8001 if ((tp->t_flags & TF_GPUTINPROG) && 8002 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8003 /* 8004 * We were app limited, and this ack 8005 * butts up or goes beyond the point where we want 8006 * to start our next measurement. We need 8007 * to record the new gput_ts as here and 8008 * possibly update the start sequence. 8009 */ 8010 uint32_t seq, ts; 8011 8012 if (rsm->r_rtr_cnt > 1) { 8013 /* 8014 * This is a retransmit, can we 8015 * really make any assessment at this 8016 * point? We are not really sure of 8017 * the timestamp, is it this or the 8018 * previous transmission? 8019 * 8020 * Lets wait for something better that 8021 * is not retransmitted. 8022 */ 8023 return; 8024 } 8025 seq = tp->gput_seq; 8026 ts = tp->gput_ts; 8027 rack->app_limited_needs_set = 0; 8028 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8029 /* Do we start at a new end? */ 8030 if ((use_which == RACK_USE_BEG) && 8031 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8032 /* 8033 * When we get an ACK that just eats 8034 * up some of the rsm, we set RACK_USE_BEG 8035 * since whats at r_start (i.e. th_ack) 8036 * is left unacked and thats where the 8037 * measurement not starts. 8038 */ 8039 tp->gput_seq = rsm->r_start; 8040 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8041 } 8042 if ((use_which == RACK_USE_END) && 8043 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8044 /* 8045 * We use the end when the cumack 8046 * is moving forward and completely 8047 * deleting the rsm passed so basically 8048 * r_end holds th_ack. 8049 * 8050 * For SACK's we also want to use the end 8051 * since this piece just got sacked and 8052 * we want to target anything after that 8053 * in our measurement. 8054 */ 8055 tp->gput_seq = rsm->r_end; 8056 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8057 } 8058 if (use_which == RACK_USE_END_OR_THACK) { 8059 /* 8060 * special case for ack moving forward, 8061 * not a sack, we need to move all the 8062 * way up to where this ack cum-ack moves 8063 * to. 8064 */ 8065 if (SEQ_GT(th_ack, rsm->r_end)) 8066 tp->gput_seq = th_ack; 8067 else 8068 tp->gput_seq = rsm->r_end; 8069 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8070 } 8071 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8072 /* 8073 * We moved beyond this guy's range, re-calculate 8074 * the new end point. 8075 */ 8076 if (rack->rc_gp_filled == 0) { 8077 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8078 } else { 8079 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8080 } 8081 } 8082 /* 8083 * We are moving the goal post, we may be able to clear the 8084 * measure_saw_probe_rtt flag. 8085 */ 8086 if ((rack->in_probe_rtt == 0) && 8087 (rack->measure_saw_probe_rtt) && 8088 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8089 rack->measure_saw_probe_rtt = 0; 8090 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8091 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8092 if (rack->rc_gp_filled && 8093 ((tp->gput_ack - tp->gput_seq) < 8094 max(rc_init_window(rack), (MIN_GP_WIN * 8095 ctf_fixed_maxseg(tp))))) { 8096 uint32_t ideal_amount; 8097 8098 ideal_amount = rack_get_measure_window(tp, rack); 8099 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8100 /* 8101 * There is no sense of continuing this measurement 8102 * because its too small to gain us anything we 8103 * trust. Skip it and that way we can start a new 8104 * measurement quicker. 8105 */ 8106 tp->t_flags &= ~TF_GPUTINPROG; 8107 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8108 0, 0, 0, 6, __LINE__, NULL, 0); 8109 } else { 8110 /* 8111 * Reset the window further out. 8112 */ 8113 tp->gput_ack = tp->gput_seq + ideal_amount; 8114 } 8115 } 8116 } 8117 } 8118 8119 static inline int 8120 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8121 { 8122 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8123 /* Behind our TLP definition or right at */ 8124 return (0); 8125 } 8126 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8127 /* The start is beyond or right at our end of TLP definition */ 8128 return (0); 8129 } 8130 /* It has to be a sub-part of the original TLP recorded */ 8131 return (1); 8132 } 8133 8134 8135 static uint32_t 8136 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8137 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8138 { 8139 uint32_t start, end, changed = 0; 8140 struct rack_sendmap stack_map; 8141 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8142 #ifdef INVARIANTS 8143 struct rack_sendmap *insret; 8144 #endif 8145 int32_t used_ref = 1; 8146 int moved = 0; 8147 8148 start = sack->start; 8149 end = sack->end; 8150 rsm = *prsm; 8151 memset(&fe, 0, sizeof(fe)); 8152 do_rest_ofb: 8153 if ((rsm == NULL) || 8154 (SEQ_LT(end, rsm->r_start)) || 8155 (SEQ_GEQ(start, rsm->r_end)) || 8156 (SEQ_LT(start, rsm->r_start))) { 8157 /* 8158 * We are not in the right spot, 8159 * find the correct spot in the tree. 8160 */ 8161 used_ref = 0; 8162 fe.r_start = start; 8163 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8164 moved++; 8165 } 8166 if (rsm == NULL) { 8167 /* TSNH */ 8168 goto out; 8169 } 8170 /* Ok we have an ACK for some piece of this rsm */ 8171 if (rsm->r_start != start) { 8172 if ((rsm->r_flags & RACK_ACKED) == 0) { 8173 /* 8174 * Before any splitting or hookery is 8175 * done is it a TLP of interest i.e. rxt? 8176 */ 8177 if ((rsm->r_flags & RACK_TLP) && 8178 (rsm->r_rtr_cnt > 1)) { 8179 /* 8180 * We are splitting a rxt TLP, check 8181 * if we need to save off the start/end 8182 */ 8183 if (rack->rc_last_tlp_acked_set && 8184 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8185 /* 8186 * We already turned this on since we are inside 8187 * the previous one was a partially sack now we 8188 * are getting another one (maybe all of it). 8189 * 8190 */ 8191 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8192 /* 8193 * Lets make sure we have all of it though. 8194 */ 8195 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8196 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8197 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8198 rack->r_ctl.last_tlp_acked_end); 8199 } 8200 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8201 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8202 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8203 rack->r_ctl.last_tlp_acked_end); 8204 } 8205 } else { 8206 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8207 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8208 rack->rc_last_tlp_past_cumack = 0; 8209 rack->rc_last_tlp_acked_set = 1; 8210 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8211 } 8212 } 8213 /** 8214 * Need to split this in two pieces the before and after, 8215 * the before remains in the map, the after must be 8216 * added. In other words we have: 8217 * rsm |--------------| 8218 * sackblk |-------> 8219 * rsm will become 8220 * rsm |---| 8221 * and nrsm will be the sacked piece 8222 * nrsm |----------| 8223 * 8224 * But before we start down that path lets 8225 * see if the sack spans over on top of 8226 * the next guy and it is already sacked. 8227 * 8228 */ 8229 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8230 if (next && (next->r_flags & RACK_ACKED) && 8231 SEQ_GEQ(end, next->r_start)) { 8232 /** 8233 * So the next one is already acked, and 8234 * we can thus by hookery use our stack_map 8235 * to reflect the piece being sacked and 8236 * then adjust the two tree entries moving 8237 * the start and ends around. So we start like: 8238 * rsm |------------| (not-acked) 8239 * next |-----------| (acked) 8240 * sackblk |--------> 8241 * We want to end like so: 8242 * rsm |------| (not-acked) 8243 * next |-----------------| (acked) 8244 * nrsm |-----| 8245 * Where nrsm is a temporary stack piece we 8246 * use to update all the gizmos. 8247 */ 8248 /* Copy up our fudge block */ 8249 nrsm = &stack_map; 8250 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8251 /* Now adjust our tree blocks */ 8252 rsm->r_end = start; 8253 next->r_start = start; 8254 /* Now we must adjust back where next->m is */ 8255 rack_setup_offset_for_rsm(rsm, next); 8256 8257 /* We don't need to adjust rsm, it did not change */ 8258 /* Clear out the dup ack count of the remainder */ 8259 rsm->r_dupack = 0; 8260 rsm->r_just_ret = 0; 8261 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8262 /* Now lets make sure our fudge block is right */ 8263 nrsm->r_start = start; 8264 /* Now lets update all the stats and such */ 8265 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8266 if (rack->app_limited_needs_set) 8267 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8268 changed += (nrsm->r_end - nrsm->r_start); 8269 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8270 if (nrsm->r_flags & RACK_SACK_PASSED) { 8271 rack->r_ctl.rc_reorder_ts = cts; 8272 } 8273 /* 8274 * Now we want to go up from rsm (the 8275 * one left un-acked) to the next one 8276 * in the tmap. We do this so when 8277 * we walk backwards we include marking 8278 * sack-passed on rsm (The one passed in 8279 * is skipped since it is generally called 8280 * on something sacked before removing it 8281 * from the tmap). 8282 */ 8283 if (rsm->r_in_tmap) { 8284 nrsm = TAILQ_NEXT(rsm, r_tnext); 8285 /* 8286 * Now that we have the next 8287 * one walk backwards from there. 8288 */ 8289 if (nrsm && nrsm->r_in_tmap) 8290 rack_log_sack_passed(tp, rack, nrsm); 8291 } 8292 /* Now are we done? */ 8293 if (SEQ_LT(end, next->r_end) || 8294 (end == next->r_end)) { 8295 /* Done with block */ 8296 goto out; 8297 } 8298 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8299 counter_u64_add(rack_sack_used_next_merge, 1); 8300 /* Postion for the next block */ 8301 start = next->r_end; 8302 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8303 if (rsm == NULL) 8304 goto out; 8305 } else { 8306 /** 8307 * We can't use any hookery here, so we 8308 * need to split the map. We enter like 8309 * so: 8310 * rsm |--------| 8311 * sackblk |-----> 8312 * We will add the new block nrsm and 8313 * that will be the new portion, and then 8314 * fall through after reseting rsm. So we 8315 * split and look like this: 8316 * rsm |----| 8317 * sackblk |-----> 8318 * nrsm |---| 8319 * We then fall through reseting 8320 * rsm to nrsm, so the next block 8321 * picks it up. 8322 */ 8323 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8324 if (nrsm == NULL) { 8325 /* 8326 * failed XXXrrs what can we do but loose the sack 8327 * info? 8328 */ 8329 goto out; 8330 } 8331 counter_u64_add(rack_sack_splits, 1); 8332 rack_clone_rsm(rack, nrsm, rsm, start); 8333 rsm->r_just_ret = 0; 8334 #ifndef INVARIANTS 8335 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8336 #else 8337 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8338 if (insret != NULL) { 8339 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8340 nrsm, insret, rack, rsm); 8341 } 8342 #endif 8343 if (rsm->r_in_tmap) { 8344 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8345 nrsm->r_in_tmap = 1; 8346 } 8347 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8348 rsm->r_flags &= (~RACK_HAS_FIN); 8349 /* Position us to point to the new nrsm that starts the sack blk */ 8350 rsm = nrsm; 8351 } 8352 } else { 8353 /* Already sacked this piece */ 8354 counter_u64_add(rack_sack_skipped_acked, 1); 8355 moved++; 8356 if (end == rsm->r_end) { 8357 /* Done with block */ 8358 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8359 goto out; 8360 } else if (SEQ_LT(end, rsm->r_end)) { 8361 /* A partial sack to a already sacked block */ 8362 moved++; 8363 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8364 goto out; 8365 } else { 8366 /* 8367 * The end goes beyond this guy 8368 * repostion the start to the 8369 * next block. 8370 */ 8371 start = rsm->r_end; 8372 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8373 if (rsm == NULL) 8374 goto out; 8375 } 8376 } 8377 } 8378 if (SEQ_GEQ(end, rsm->r_end)) { 8379 /** 8380 * The end of this block is either beyond this guy or right 8381 * at this guy. I.e.: 8382 * rsm --- |-----| 8383 * end |-----| 8384 * <or> 8385 * end |---------| 8386 */ 8387 if ((rsm->r_flags & RACK_ACKED) == 0) { 8388 /* 8389 * Is it a TLP of interest? 8390 */ 8391 if ((rsm->r_flags & RACK_TLP) && 8392 (rsm->r_rtr_cnt > 1)) { 8393 /* 8394 * We are splitting a rxt TLP, check 8395 * if we need to save off the start/end 8396 */ 8397 if (rack->rc_last_tlp_acked_set && 8398 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8399 /* 8400 * We already turned this on since we are inside 8401 * the previous one was a partially sack now we 8402 * are getting another one (maybe all of it). 8403 */ 8404 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8405 /* 8406 * Lets make sure we have all of it though. 8407 */ 8408 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8409 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8410 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8411 rack->r_ctl.last_tlp_acked_end); 8412 } 8413 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8414 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8415 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8416 rack->r_ctl.last_tlp_acked_end); 8417 } 8418 } else { 8419 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8420 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8421 rack->rc_last_tlp_past_cumack = 0; 8422 rack->rc_last_tlp_acked_set = 1; 8423 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8424 } 8425 } 8426 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8427 changed += (rsm->r_end - rsm->r_start); 8428 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8429 if (rsm->r_in_tmap) /* should be true */ 8430 rack_log_sack_passed(tp, rack, rsm); 8431 /* Is Reordering occuring? */ 8432 if (rsm->r_flags & RACK_SACK_PASSED) { 8433 rsm->r_flags &= ~RACK_SACK_PASSED; 8434 rack->r_ctl.rc_reorder_ts = cts; 8435 } 8436 if (rack->app_limited_needs_set) 8437 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8438 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8439 rsm->r_flags |= RACK_ACKED; 8440 if (rsm->r_in_tmap) { 8441 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8442 rsm->r_in_tmap = 0; 8443 } 8444 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8445 } else { 8446 counter_u64_add(rack_sack_skipped_acked, 1); 8447 moved++; 8448 } 8449 if (end == rsm->r_end) { 8450 /* This block only - done, setup for next */ 8451 goto out; 8452 } 8453 /* 8454 * There is more not coverend by this rsm move on 8455 * to the next block in the RB tree. 8456 */ 8457 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8458 start = rsm->r_end; 8459 rsm = nrsm; 8460 if (rsm == NULL) 8461 goto out; 8462 goto do_rest_ofb; 8463 } 8464 /** 8465 * The end of this sack block is smaller than 8466 * our rsm i.e.: 8467 * rsm --- |-----| 8468 * end |--| 8469 */ 8470 if ((rsm->r_flags & RACK_ACKED) == 0) { 8471 /* 8472 * Is it a TLP of interest? 8473 */ 8474 if ((rsm->r_flags & RACK_TLP) && 8475 (rsm->r_rtr_cnt > 1)) { 8476 /* 8477 * We are splitting a rxt TLP, check 8478 * if we need to save off the start/end 8479 */ 8480 if (rack->rc_last_tlp_acked_set && 8481 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8482 /* 8483 * We already turned this on since we are inside 8484 * the previous one was a partially sack now we 8485 * are getting another one (maybe all of it). 8486 */ 8487 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8488 /* 8489 * Lets make sure we have all of it though. 8490 */ 8491 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8492 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8493 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8494 rack->r_ctl.last_tlp_acked_end); 8495 } 8496 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8497 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8498 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8499 rack->r_ctl.last_tlp_acked_end); 8500 } 8501 } else { 8502 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8503 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8504 rack->rc_last_tlp_past_cumack = 0; 8505 rack->rc_last_tlp_acked_set = 1; 8506 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8507 } 8508 } 8509 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8510 if (prev && 8511 (prev->r_flags & RACK_ACKED)) { 8512 /** 8513 * Goal, we want the right remainder of rsm to shrink 8514 * in place and span from (rsm->r_start = end) to rsm->r_end. 8515 * We want to expand prev to go all the way 8516 * to prev->r_end <- end. 8517 * so in the tree we have before: 8518 * prev |--------| (acked) 8519 * rsm |-------| (non-acked) 8520 * sackblk |-| 8521 * We churn it so we end up with 8522 * prev |----------| (acked) 8523 * rsm |-----| (non-acked) 8524 * nrsm |-| (temporary) 8525 * 8526 * Note if either prev/rsm is a TLP we don't 8527 * do this. 8528 */ 8529 nrsm = &stack_map; 8530 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8531 prev->r_end = end; 8532 rsm->r_start = end; 8533 /* Now adjust nrsm (stack copy) to be 8534 * the one that is the small 8535 * piece that was "sacked". 8536 */ 8537 nrsm->r_end = end; 8538 rsm->r_dupack = 0; 8539 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8540 /* 8541 * Now that the rsm has had its start moved forward 8542 * lets go ahead and get its new place in the world. 8543 */ 8544 rack_setup_offset_for_rsm(prev, rsm); 8545 /* 8546 * Now nrsm is our new little piece 8547 * that is acked (which was merged 8548 * to prev). Update the rtt and changed 8549 * based on that. Also check for reordering. 8550 */ 8551 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8552 if (rack->app_limited_needs_set) 8553 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8554 changed += (nrsm->r_end - nrsm->r_start); 8555 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8556 if (nrsm->r_flags & RACK_SACK_PASSED) { 8557 rack->r_ctl.rc_reorder_ts = cts; 8558 } 8559 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8560 rsm = prev; 8561 counter_u64_add(rack_sack_used_prev_merge, 1); 8562 } else { 8563 /** 8564 * This is the case where our previous 8565 * block is not acked either, so we must 8566 * split the block in two. 8567 */ 8568 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8569 if (nrsm == NULL) { 8570 /* failed rrs what can we do but loose the sack info? */ 8571 goto out; 8572 } 8573 if ((rsm->r_flags & RACK_TLP) && 8574 (rsm->r_rtr_cnt > 1)) { 8575 /* 8576 * We are splitting a rxt TLP, check 8577 * if we need to save off the start/end 8578 */ 8579 if (rack->rc_last_tlp_acked_set && 8580 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8581 /* 8582 * We already turned this on since this block is inside 8583 * the previous one was a partially sack now we 8584 * are getting another one (maybe all of it). 8585 */ 8586 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8587 /* 8588 * Lets make sure we have all of it though. 8589 */ 8590 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8591 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8592 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8593 rack->r_ctl.last_tlp_acked_end); 8594 } 8595 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8596 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8597 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8598 rack->r_ctl.last_tlp_acked_end); 8599 } 8600 } else { 8601 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8602 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8603 rack->rc_last_tlp_acked_set = 1; 8604 rack->rc_last_tlp_past_cumack = 0; 8605 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8606 } 8607 } 8608 /** 8609 * In this case nrsm becomes 8610 * nrsm->r_start = end; 8611 * nrsm->r_end = rsm->r_end; 8612 * which is un-acked. 8613 * <and> 8614 * rsm->r_end = nrsm->r_start; 8615 * i.e. the remaining un-acked 8616 * piece is left on the left 8617 * hand side. 8618 * 8619 * So we start like this 8620 * rsm |----------| (not acked) 8621 * sackblk |---| 8622 * build it so we have 8623 * rsm |---| (acked) 8624 * nrsm |------| (not acked) 8625 */ 8626 counter_u64_add(rack_sack_splits, 1); 8627 rack_clone_rsm(rack, nrsm, rsm, end); 8628 rsm->r_flags &= (~RACK_HAS_FIN); 8629 rsm->r_just_ret = 0; 8630 #ifndef INVARIANTS 8631 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8632 #else 8633 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8634 if (insret != NULL) { 8635 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8636 nrsm, insret, rack, rsm); 8637 } 8638 #endif 8639 if (rsm->r_in_tmap) { 8640 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8641 nrsm->r_in_tmap = 1; 8642 } 8643 nrsm->r_dupack = 0; 8644 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8645 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8646 changed += (rsm->r_end - rsm->r_start); 8647 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8648 if (rsm->r_in_tmap) /* should be true */ 8649 rack_log_sack_passed(tp, rack, rsm); 8650 /* Is Reordering occuring? */ 8651 if (rsm->r_flags & RACK_SACK_PASSED) { 8652 rsm->r_flags &= ~RACK_SACK_PASSED; 8653 rack->r_ctl.rc_reorder_ts = cts; 8654 } 8655 if (rack->app_limited_needs_set) 8656 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8657 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8658 rsm->r_flags |= RACK_ACKED; 8659 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8660 if (rsm->r_in_tmap) { 8661 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8662 rsm->r_in_tmap = 0; 8663 } 8664 } 8665 } else if (start != end){ 8666 /* 8667 * The block was already acked. 8668 */ 8669 counter_u64_add(rack_sack_skipped_acked, 1); 8670 moved++; 8671 } 8672 out: 8673 if (rsm && 8674 ((rsm->r_flags & RACK_TLP) == 0) && 8675 (rsm->r_flags & RACK_ACKED)) { 8676 /* 8677 * Now can we merge where we worked 8678 * with either the previous or 8679 * next block? 8680 */ 8681 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8682 while (next) { 8683 if (next->r_flags & RACK_TLP) 8684 break; 8685 if (next->r_flags & RACK_ACKED) { 8686 /* yep this and next can be merged */ 8687 rsm = rack_merge_rsm(rack, rsm, next); 8688 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8689 } else 8690 break; 8691 } 8692 /* Now what about the previous? */ 8693 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8694 while (prev) { 8695 if (prev->r_flags & RACK_TLP) 8696 break; 8697 if (prev->r_flags & RACK_ACKED) { 8698 /* yep the previous and this can be merged */ 8699 rsm = rack_merge_rsm(rack, prev, rsm); 8700 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8701 } else 8702 break; 8703 } 8704 } 8705 if (used_ref == 0) { 8706 counter_u64_add(rack_sack_proc_all, 1); 8707 } else { 8708 counter_u64_add(rack_sack_proc_short, 1); 8709 } 8710 /* Save off the next one for quick reference. */ 8711 if (rsm) 8712 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8713 else 8714 nrsm = NULL; 8715 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8716 /* Pass back the moved. */ 8717 *moved_two = moved; 8718 return (changed); 8719 } 8720 8721 static void inline 8722 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8723 { 8724 struct rack_sendmap *tmap; 8725 8726 tmap = NULL; 8727 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8728 /* Its no longer sacked, mark it so */ 8729 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8730 #ifdef INVARIANTS 8731 if (rsm->r_in_tmap) { 8732 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8733 rack, rsm, rsm->r_flags); 8734 } 8735 #endif 8736 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8737 /* Rebuild it into our tmap */ 8738 if (tmap == NULL) { 8739 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8740 tmap = rsm; 8741 } else { 8742 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8743 tmap = rsm; 8744 } 8745 tmap->r_in_tmap = 1; 8746 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8747 } 8748 /* 8749 * Now lets possibly clear the sack filter so we start 8750 * recognizing sacks that cover this area. 8751 */ 8752 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8753 8754 } 8755 8756 static void 8757 rack_do_decay(struct tcp_rack *rack) 8758 { 8759 struct timeval res; 8760 8761 #define timersub(tvp, uvp, vvp) \ 8762 do { \ 8763 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8764 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8765 if ((vvp)->tv_usec < 0) { \ 8766 (vvp)->tv_sec--; \ 8767 (vvp)->tv_usec += 1000000; \ 8768 } \ 8769 } while (0) 8770 8771 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8772 #undef timersub 8773 8774 rack->r_ctl.input_pkt++; 8775 if ((rack->rc_in_persist) || 8776 (res.tv_sec >= 1) || 8777 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8778 /* 8779 * Check for decay of non-SAD, 8780 * we want all SAD detection metrics to 8781 * decay 1/4 per second (or more) passed. 8782 */ 8783 #ifdef NETFLIX_EXP_DETECTION 8784 uint32_t pkt_delta; 8785 8786 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8787 #endif 8788 /* Update our saved tracking values */ 8789 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8790 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8791 /* Now do we escape without decay? */ 8792 #ifdef NETFLIX_EXP_DETECTION 8793 if (rack->rc_in_persist || 8794 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8795 (pkt_delta < tcp_sad_low_pps)){ 8796 /* 8797 * We don't decay idle connections 8798 * or ones that have a low input pps. 8799 */ 8800 return; 8801 } 8802 /* Decay the counters */ 8803 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8804 tcp_sad_decay_val); 8805 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8806 tcp_sad_decay_val); 8807 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8808 tcp_sad_decay_val); 8809 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8810 tcp_sad_decay_val); 8811 #endif 8812 } 8813 } 8814 8815 static void 8816 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8817 { 8818 struct rack_sendmap *rsm; 8819 #ifdef INVARIANTS 8820 struct rack_sendmap *rm; 8821 #endif 8822 8823 /* 8824 * The ACK point is advancing to th_ack, we must drop off 8825 * the packets in the rack log and calculate any eligble 8826 * RTT's. 8827 */ 8828 rack->r_wanted_output = 1; 8829 8830 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8831 if ((rack->rc_last_tlp_acked_set == 1)&& 8832 (rack->rc_last_tlp_past_cumack == 1) && 8833 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8834 /* 8835 * We have reached the point where our last rack 8836 * tlp retransmit sequence is ahead of the cum-ack. 8837 * This can only happen when the cum-ack moves all 8838 * the way around (its been a full 2^^31+1 bytes 8839 * or more since we sent a retransmitted TLP). Lets 8840 * turn off the valid flag since its not really valid. 8841 * 8842 * Note since sack's also turn on this event we have 8843 * a complication, we have to wait to age it out until 8844 * the cum-ack is by the TLP before checking which is 8845 * what the next else clause does. 8846 */ 8847 rack_log_dsack_event(rack, 9, __LINE__, 8848 rack->r_ctl.last_tlp_acked_start, 8849 rack->r_ctl.last_tlp_acked_end); 8850 rack->rc_last_tlp_acked_set = 0; 8851 rack->rc_last_tlp_past_cumack = 0; 8852 } else if ((rack->rc_last_tlp_acked_set == 1) && 8853 (rack->rc_last_tlp_past_cumack == 0) && 8854 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8855 /* 8856 * It is safe to start aging TLP's out. 8857 */ 8858 rack->rc_last_tlp_past_cumack = 1; 8859 } 8860 /* We do the same for the tlp send seq as well */ 8861 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8862 (rack->rc_last_sent_tlp_past_cumack == 1) && 8863 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8864 rack_log_dsack_event(rack, 9, __LINE__, 8865 rack->r_ctl.last_sent_tlp_seq, 8866 (rack->r_ctl.last_sent_tlp_seq + 8867 rack->r_ctl.last_sent_tlp_len)); 8868 rack->rc_last_sent_tlp_seq_valid = 0; 8869 rack->rc_last_sent_tlp_past_cumack = 0; 8870 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8871 (rack->rc_last_sent_tlp_past_cumack == 0) && 8872 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8873 /* 8874 * It is safe to start aging TLP's send. 8875 */ 8876 rack->rc_last_sent_tlp_past_cumack = 1; 8877 } 8878 more: 8879 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8880 if (rsm == NULL) { 8881 if ((th_ack - 1) == tp->iss) { 8882 /* 8883 * For the SYN incoming case we will not 8884 * have called tcp_output for the sending of 8885 * the SYN, so there will be no map. All 8886 * other cases should probably be a panic. 8887 */ 8888 return; 8889 } 8890 if (tp->t_flags & TF_SENTFIN) { 8891 /* if we sent a FIN we often will not have map */ 8892 return; 8893 } 8894 #ifdef INVARIANTS 8895 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8896 tp, 8897 tp->t_state, th_ack, rack, 8898 tp->snd_una, tp->snd_max, tp->snd_nxt); 8899 #endif 8900 return; 8901 } 8902 if (SEQ_LT(th_ack, rsm->r_start)) { 8903 /* Huh map is missing this */ 8904 #ifdef INVARIANTS 8905 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 8906 rsm->r_start, 8907 th_ack, tp->t_state, rack->r_state); 8908 #endif 8909 return; 8910 } 8911 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 8912 8913 /* Now was it a retransmitted TLP? */ 8914 if ((rsm->r_flags & RACK_TLP) && 8915 (rsm->r_rtr_cnt > 1)) { 8916 /* 8917 * Yes, this rsm was a TLP and retransmitted, remember that 8918 * since if a DSACK comes back on this we don't want 8919 * to think of it as a reordered segment. This may 8920 * get updated again with possibly even other TLPs 8921 * in flight, but thats ok. Only when we don't send 8922 * a retransmitted TLP for 1/2 the sequences space 8923 * will it get turned off (above). 8924 */ 8925 if (rack->rc_last_tlp_acked_set && 8926 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8927 /* 8928 * We already turned this on since the end matches, 8929 * the previous one was a partially ack now we 8930 * are getting another one (maybe all of it). 8931 */ 8932 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8933 /* 8934 * Lets make sure we have all of it though. 8935 */ 8936 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8937 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8938 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8939 rack->r_ctl.last_tlp_acked_end); 8940 } 8941 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8942 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8943 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8944 rack->r_ctl.last_tlp_acked_end); 8945 } 8946 } else { 8947 rack->rc_last_tlp_past_cumack = 1; 8948 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8949 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8950 rack->rc_last_tlp_acked_set = 1; 8951 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8952 } 8953 } 8954 /* Now do we consume the whole thing? */ 8955 if (SEQ_GEQ(th_ack, rsm->r_end)) { 8956 /* Its all consumed. */ 8957 uint32_t left; 8958 uint8_t newly_acked; 8959 8960 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 8961 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 8962 rsm->r_rtr_bytes = 0; 8963 /* Record the time of highest cumack sent */ 8964 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8965 #ifndef INVARIANTS 8966 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8967 #else 8968 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8969 if (rm != rsm) { 8970 panic("removing head in rack:%p rsm:%p rm:%p", 8971 rack, rsm, rm); 8972 } 8973 #endif 8974 if (rsm->r_in_tmap) { 8975 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8976 rsm->r_in_tmap = 0; 8977 } 8978 newly_acked = 1; 8979 if (rsm->r_flags & RACK_ACKED) { 8980 /* 8981 * It was acked on the scoreboard -- remove 8982 * it from total 8983 */ 8984 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8985 newly_acked = 0; 8986 } else if (rsm->r_flags & RACK_SACK_PASSED) { 8987 /* 8988 * There are segments ACKED on the 8989 * scoreboard further up. We are seeing 8990 * reordering. 8991 */ 8992 rsm->r_flags &= ~RACK_SACK_PASSED; 8993 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8994 rsm->r_flags |= RACK_ACKED; 8995 rack->r_ctl.rc_reorder_ts = cts; 8996 if (rack->r_ent_rec_ns) { 8997 /* 8998 * We have sent no more, and we saw an sack 8999 * then ack arrive. 9000 */ 9001 rack->r_might_revert = 1; 9002 } 9003 } 9004 if ((rsm->r_flags & RACK_TO_REXT) && 9005 (tp->t_flags & TF_RCVD_TSTMP) && 9006 (to->to_flags & TOF_TS) && 9007 (to->to_tsecr != 0) && 9008 (tp->t_flags & TF_PREVVALID)) { 9009 /* 9010 * We can use the timestamp to see 9011 * if this retransmission was from the 9012 * first transmit. If so we made a mistake. 9013 */ 9014 tp->t_flags &= ~TF_PREVVALID; 9015 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9016 /* The first transmit is what this ack is for */ 9017 rack_cong_signal(tp, CC_RTO_ERR, th_ack); 9018 } 9019 } 9020 left = th_ack - rsm->r_end; 9021 if (rack->app_limited_needs_set && newly_acked) 9022 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9023 /* Free back to zone */ 9024 rack_free(rack, rsm); 9025 if (left) { 9026 goto more; 9027 } 9028 /* Check for reneging */ 9029 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9030 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9031 /* 9032 * The peer has moved snd_una up to 9033 * the edge of this send, i.e. one 9034 * that it had previously acked. The only 9035 * way that can be true if the peer threw 9036 * away data (space issues) that it had 9037 * previously sacked (else it would have 9038 * given us snd_una up to (rsm->r_end). 9039 * We need to undo the acked markings here. 9040 * 9041 * Note we have to look to make sure th_ack is 9042 * our rsm->r_start in case we get an old ack 9043 * where th_ack is behind snd_una. 9044 */ 9045 rack_peer_reneges(rack, rsm, th_ack); 9046 } 9047 return; 9048 } 9049 if (rsm->r_flags & RACK_ACKED) { 9050 /* 9051 * It was acked on the scoreboard -- remove it from 9052 * total for the part being cum-acked. 9053 */ 9054 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9055 } 9056 /* 9057 * Clear the dup ack count for 9058 * the piece that remains. 9059 */ 9060 rsm->r_dupack = 0; 9061 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9062 if (rsm->r_rtr_bytes) { 9063 /* 9064 * It was retransmitted adjust the 9065 * sack holes for what was acked. 9066 */ 9067 int ack_am; 9068 9069 ack_am = (th_ack - rsm->r_start); 9070 if (ack_am >= rsm->r_rtr_bytes) { 9071 rack->r_ctl.rc_holes_rxt -= ack_am; 9072 rsm->r_rtr_bytes -= ack_am; 9073 } 9074 } 9075 /* 9076 * Update where the piece starts and record 9077 * the time of send of highest cumack sent. 9078 */ 9079 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9080 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9081 /* Now we need to move our offset forward too */ 9082 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9083 /* Fix up the orig_m_len and possibly the mbuf offset */ 9084 rack_adjust_orig_mlen(rsm); 9085 } 9086 rsm->soff += (th_ack - rsm->r_start); 9087 rsm->r_start = th_ack; 9088 /* Now do we need to move the mbuf fwd too? */ 9089 if (rsm->m) { 9090 while (rsm->soff >= rsm->m->m_len) { 9091 rsm->soff -= rsm->m->m_len; 9092 rsm->m = rsm->m->m_next; 9093 KASSERT((rsm->m != NULL), 9094 (" nrsm:%p hit at soff:%u null m", 9095 rsm, rsm->soff)); 9096 } 9097 rsm->orig_m_len = rsm->m->m_len; 9098 } 9099 if (rack->app_limited_needs_set) 9100 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9101 } 9102 9103 static void 9104 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9105 { 9106 struct rack_sendmap *rsm; 9107 int sack_pass_fnd = 0; 9108 9109 if (rack->r_might_revert) { 9110 /* 9111 * Ok we have reordering, have not sent anything, we 9112 * might want to revert the congestion state if nothing 9113 * further has SACK_PASSED on it. Lets check. 9114 * 9115 * We also get here when we have DSACKs come in for 9116 * all the data that we FR'd. Note that a rxt or tlp 9117 * timer clears this from happening. 9118 */ 9119 9120 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9121 if (rsm->r_flags & RACK_SACK_PASSED) { 9122 sack_pass_fnd = 1; 9123 break; 9124 } 9125 } 9126 if (sack_pass_fnd == 0) { 9127 /* 9128 * We went into recovery 9129 * incorrectly due to reordering! 9130 */ 9131 int orig_cwnd; 9132 9133 rack->r_ent_rec_ns = 0; 9134 orig_cwnd = tp->snd_cwnd; 9135 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9136 tp->snd_recover = tp->snd_una; 9137 rack_log_to_prr(rack, 14, orig_cwnd); 9138 EXIT_RECOVERY(tp->t_flags); 9139 } 9140 rack->r_might_revert = 0; 9141 } 9142 } 9143 9144 #ifdef NETFLIX_EXP_DETECTION 9145 static void 9146 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9147 { 9148 if ((rack->do_detection || tcp_force_detection) && 9149 tcp_sack_to_ack_thresh && 9150 tcp_sack_to_move_thresh && 9151 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9152 /* 9153 * We have thresholds set to find 9154 * possible attackers and disable sack. 9155 * Check them. 9156 */ 9157 uint64_t ackratio, moveratio, movetotal; 9158 9159 /* Log detecting */ 9160 rack_log_sad(rack, 1); 9161 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9162 ackratio *= (uint64_t)(1000); 9163 if (rack->r_ctl.ack_count) 9164 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9165 else { 9166 /* We really should not hit here */ 9167 ackratio = 1000; 9168 } 9169 if ((rack->sack_attack_disable == 0) && 9170 (ackratio > rack_highest_sack_thresh_seen)) 9171 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9172 movetotal = rack->r_ctl.sack_moved_extra; 9173 movetotal += rack->r_ctl.sack_noextra_move; 9174 moveratio = rack->r_ctl.sack_moved_extra; 9175 moveratio *= (uint64_t)1000; 9176 if (movetotal) 9177 moveratio /= movetotal; 9178 else { 9179 /* No moves, thats pretty good */ 9180 moveratio = 0; 9181 } 9182 if ((rack->sack_attack_disable == 0) && 9183 (moveratio > rack_highest_move_thresh_seen)) 9184 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9185 if (rack->sack_attack_disable == 0) { 9186 if ((ackratio > tcp_sack_to_ack_thresh) && 9187 (moveratio > tcp_sack_to_move_thresh)) { 9188 /* Disable sack processing */ 9189 rack->sack_attack_disable = 1; 9190 if (rack->r_rep_attack == 0) { 9191 rack->r_rep_attack = 1; 9192 counter_u64_add(rack_sack_attacks_detected, 1); 9193 } 9194 if (tcp_attack_on_turns_on_logging) { 9195 /* 9196 * Turn on logging, used for debugging 9197 * false positives. 9198 */ 9199 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9200 } 9201 /* Clamp the cwnd at flight size */ 9202 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9203 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9204 rack_log_sad(rack, 2); 9205 } 9206 } else { 9207 /* We are sack-disabled check for false positives */ 9208 if ((ackratio <= tcp_restoral_thresh) || 9209 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9210 rack->sack_attack_disable = 0; 9211 rack_log_sad(rack, 3); 9212 /* Restart counting */ 9213 rack->r_ctl.sack_count = 0; 9214 rack->r_ctl.sack_moved_extra = 0; 9215 rack->r_ctl.sack_noextra_move = 1; 9216 rack->r_ctl.ack_count = max(1, 9217 (bytes_this_ack / segsiz)); 9218 9219 if (rack->r_rep_reverse == 0) { 9220 rack->r_rep_reverse = 1; 9221 counter_u64_add(rack_sack_attacks_reversed, 1); 9222 } 9223 /* Restore the cwnd */ 9224 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9225 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9226 } 9227 } 9228 } 9229 } 9230 #endif 9231 9232 static int 9233 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9234 { 9235 9236 uint32_t am, l_end; 9237 int was_tlp = 0; 9238 9239 if (SEQ_GT(end, start)) 9240 am = end - start; 9241 else 9242 am = 0; 9243 if ((rack->rc_last_tlp_acked_set ) && 9244 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9245 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9246 /* 9247 * The DSACK is because of a TLP which we don't 9248 * do anything with the reordering window over since 9249 * it was not reordering that caused the DSACK but 9250 * our previous retransmit TLP. 9251 */ 9252 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9253 was_tlp = 1; 9254 goto skip_dsack_round; 9255 } 9256 if (rack->rc_last_sent_tlp_seq_valid) { 9257 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9258 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9259 (SEQ_LEQ(end, l_end))) { 9260 /* 9261 * This dsack is from the last sent TLP, ignore it 9262 * for reordering purposes. 9263 */ 9264 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9265 was_tlp = 1; 9266 goto skip_dsack_round; 9267 } 9268 } 9269 if (rack->rc_dsack_round_seen == 0) { 9270 rack->rc_dsack_round_seen = 1; 9271 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9272 rack->r_ctl.num_dsack++; 9273 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9274 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9275 } 9276 skip_dsack_round: 9277 /* 9278 * We keep track of how many DSACK blocks we get 9279 * after a recovery incident. 9280 */ 9281 rack->r_ctl.dsack_byte_cnt += am; 9282 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9283 rack->r_ctl.retran_during_recovery && 9284 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9285 /* 9286 * False recovery most likely culprit is reordering. If 9287 * nothing else is missing we need to revert. 9288 */ 9289 rack->r_might_revert = 1; 9290 rack_handle_might_revert(rack->rc_tp, rack); 9291 rack->r_might_revert = 0; 9292 rack->r_ctl.retran_during_recovery = 0; 9293 rack->r_ctl.dsack_byte_cnt = 0; 9294 } 9295 return (was_tlp); 9296 } 9297 9298 static void 9299 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9300 { 9301 /* Deal with changed and PRR here (in recovery only) */ 9302 uint32_t pipe, snd_una; 9303 9304 rack->r_ctl.rc_prr_delivered += changed; 9305 9306 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9307 /* 9308 * It is all outstanding, we are application limited 9309 * and thus we don't need more room to send anything. 9310 * Note we use tp->snd_una here and not th_ack because 9311 * the data as yet not been cut from the sb. 9312 */ 9313 rack->r_ctl.rc_prr_sndcnt = 0; 9314 return; 9315 } 9316 /* Compute prr_sndcnt */ 9317 if (SEQ_GT(tp->snd_una, th_ack)) { 9318 snd_una = tp->snd_una; 9319 } else { 9320 snd_una = th_ack; 9321 } 9322 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9323 if (pipe > tp->snd_ssthresh) { 9324 long sndcnt; 9325 9326 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9327 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9328 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9329 else { 9330 rack->r_ctl.rc_prr_sndcnt = 0; 9331 rack_log_to_prr(rack, 9, 0); 9332 sndcnt = 0; 9333 } 9334 sndcnt++; 9335 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9336 sndcnt -= rack->r_ctl.rc_prr_out; 9337 else 9338 sndcnt = 0; 9339 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9340 rack_log_to_prr(rack, 10, 0); 9341 } else { 9342 uint32_t limit; 9343 9344 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9345 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9346 else 9347 limit = 0; 9348 if (changed > limit) 9349 limit = changed; 9350 limit += ctf_fixed_maxseg(tp); 9351 if (tp->snd_ssthresh > pipe) { 9352 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9353 rack_log_to_prr(rack, 11, 0); 9354 } else { 9355 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9356 rack_log_to_prr(rack, 12, 0); 9357 } 9358 } 9359 } 9360 9361 static void 9362 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9363 { 9364 uint32_t changed; 9365 struct tcp_rack *rack; 9366 struct rack_sendmap *rsm; 9367 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9368 register uint32_t th_ack; 9369 int32_t i, j, k, num_sack_blks = 0; 9370 uint32_t cts, acked, ack_point; 9371 int loop_start = 0, moved_two = 0; 9372 uint32_t tsused; 9373 9374 9375 INP_WLOCK_ASSERT(tp->t_inpcb); 9376 if (tcp_get_flags(th) & TH_RST) { 9377 /* We don't log resets */ 9378 return; 9379 } 9380 rack = (struct tcp_rack *)tp->t_fb_ptr; 9381 cts = tcp_get_usecs(NULL); 9382 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9383 changed = 0; 9384 th_ack = th->th_ack; 9385 if (rack->sack_attack_disable == 0) 9386 rack_do_decay(rack); 9387 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9388 /* 9389 * You only get credit for 9390 * MSS and greater (and you get extra 9391 * credit for larger cum-ack moves). 9392 */ 9393 int ac; 9394 9395 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9396 rack->r_ctl.ack_count += ac; 9397 counter_u64_add(rack_ack_total, ac); 9398 } 9399 if (rack->r_ctl.ack_count > 0xfff00000) { 9400 /* 9401 * reduce the number to keep us under 9402 * a uint32_t. 9403 */ 9404 rack->r_ctl.ack_count /= 2; 9405 rack->r_ctl.sack_count /= 2; 9406 } 9407 if (SEQ_GT(th_ack, tp->snd_una)) { 9408 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9409 tp->t_acktime = ticks; 9410 } 9411 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9412 changed = th_ack - rsm->r_start; 9413 if (changed) { 9414 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9415 } 9416 if ((to->to_flags & TOF_SACK) == 0) { 9417 /* We are done nothing left and no sack. */ 9418 rack_handle_might_revert(tp, rack); 9419 /* 9420 * For cases where we struck a dup-ack 9421 * with no SACK, add to the changes so 9422 * PRR will work right. 9423 */ 9424 if (dup_ack_struck && (changed == 0)) { 9425 changed += ctf_fixed_maxseg(rack->rc_tp); 9426 } 9427 goto out; 9428 } 9429 /* Sack block processing */ 9430 if (SEQ_GT(th_ack, tp->snd_una)) 9431 ack_point = th_ack; 9432 else 9433 ack_point = tp->snd_una; 9434 for (i = 0; i < to->to_nsacks; i++) { 9435 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9436 &sack, sizeof(sack)); 9437 sack.start = ntohl(sack.start); 9438 sack.end = ntohl(sack.end); 9439 if (SEQ_GT(sack.end, sack.start) && 9440 SEQ_GT(sack.start, ack_point) && 9441 SEQ_LT(sack.start, tp->snd_max) && 9442 SEQ_GT(sack.end, ack_point) && 9443 SEQ_LEQ(sack.end, tp->snd_max)) { 9444 sack_blocks[num_sack_blks] = sack; 9445 num_sack_blks++; 9446 } else if (SEQ_LEQ(sack.start, th_ack) && 9447 SEQ_LEQ(sack.end, th_ack)) { 9448 int was_tlp; 9449 9450 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9451 /* 9452 * Its a D-SACK block. 9453 */ 9454 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9455 } 9456 } 9457 if (rack->rc_dsack_round_seen) { 9458 /* Is the dsack roound over? */ 9459 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9460 /* Yes it is */ 9461 rack->rc_dsack_round_seen = 0; 9462 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9463 } 9464 } 9465 /* 9466 * Sort the SACK blocks so we can update the rack scoreboard with 9467 * just one pass. 9468 */ 9469 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9470 num_sack_blks, th->th_ack); 9471 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9472 if (num_sack_blks == 0) { 9473 /* Nothing to sack (DSACKs?) */ 9474 goto out_with_totals; 9475 } 9476 if (num_sack_blks < 2) { 9477 /* Only one, we don't need to sort */ 9478 goto do_sack_work; 9479 } 9480 /* Sort the sacks */ 9481 for (i = 0; i < num_sack_blks; i++) { 9482 for (j = i + 1; j < num_sack_blks; j++) { 9483 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9484 sack = sack_blocks[i]; 9485 sack_blocks[i] = sack_blocks[j]; 9486 sack_blocks[j] = sack; 9487 } 9488 } 9489 } 9490 /* 9491 * Now are any of the sack block ends the same (yes some 9492 * implementations send these)? 9493 */ 9494 again: 9495 if (num_sack_blks == 0) 9496 goto out_with_totals; 9497 if (num_sack_blks > 1) { 9498 for (i = 0; i < num_sack_blks; i++) { 9499 for (j = i + 1; j < num_sack_blks; j++) { 9500 if (sack_blocks[i].end == sack_blocks[j].end) { 9501 /* 9502 * Ok these two have the same end we 9503 * want the smallest end and then 9504 * throw away the larger and start 9505 * again. 9506 */ 9507 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9508 /* 9509 * The second block covers 9510 * more area use that 9511 */ 9512 sack_blocks[i].start = sack_blocks[j].start; 9513 } 9514 /* 9515 * Now collapse out the dup-sack and 9516 * lower the count 9517 */ 9518 for (k = (j + 1); k < num_sack_blks; k++) { 9519 sack_blocks[j].start = sack_blocks[k].start; 9520 sack_blocks[j].end = sack_blocks[k].end; 9521 j++; 9522 } 9523 num_sack_blks--; 9524 goto again; 9525 } 9526 } 9527 } 9528 } 9529 do_sack_work: 9530 /* 9531 * First lets look to see if 9532 * we have retransmitted and 9533 * can use the transmit next? 9534 */ 9535 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9536 if (rsm && 9537 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9538 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9539 /* 9540 * We probably did the FR and the next 9541 * SACK in continues as we would expect. 9542 */ 9543 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9544 if (acked) { 9545 rack->r_wanted_output = 1; 9546 changed += acked; 9547 } 9548 if (num_sack_blks == 1) { 9549 /* 9550 * This is what we would expect from 9551 * a normal implementation to happen 9552 * after we have retransmitted the FR, 9553 * i.e the sack-filter pushes down 9554 * to 1 block and the next to be retransmitted 9555 * is the sequence in the sack block (has more 9556 * are acked). Count this as ACK'd data to boost 9557 * up the chances of recovering any false positives. 9558 */ 9559 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9560 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9561 counter_u64_add(rack_express_sack, 1); 9562 if (rack->r_ctl.ack_count > 0xfff00000) { 9563 /* 9564 * reduce the number to keep us under 9565 * a uint32_t. 9566 */ 9567 rack->r_ctl.ack_count /= 2; 9568 rack->r_ctl.sack_count /= 2; 9569 } 9570 goto out_with_totals; 9571 } else { 9572 /* 9573 * Start the loop through the 9574 * rest of blocks, past the first block. 9575 */ 9576 moved_two = 0; 9577 loop_start = 1; 9578 } 9579 } 9580 /* Its a sack of some sort */ 9581 rack->r_ctl.sack_count++; 9582 if (rack->r_ctl.sack_count > 0xfff00000) { 9583 /* 9584 * reduce the number to keep us under 9585 * a uint32_t. 9586 */ 9587 rack->r_ctl.ack_count /= 2; 9588 rack->r_ctl.sack_count /= 2; 9589 } 9590 counter_u64_add(rack_sack_total, 1); 9591 if (rack->sack_attack_disable) { 9592 /* An attacker disablement is in place */ 9593 if (num_sack_blks > 1) { 9594 rack->r_ctl.sack_count += (num_sack_blks - 1); 9595 rack->r_ctl.sack_moved_extra++; 9596 counter_u64_add(rack_move_some, 1); 9597 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9598 rack->r_ctl.sack_moved_extra /= 2; 9599 rack->r_ctl.sack_noextra_move /= 2; 9600 } 9601 } 9602 goto out; 9603 } 9604 rsm = rack->r_ctl.rc_sacklast; 9605 for (i = loop_start; i < num_sack_blks; i++) { 9606 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9607 if (acked) { 9608 rack->r_wanted_output = 1; 9609 changed += acked; 9610 } 9611 if (moved_two) { 9612 /* 9613 * If we did not get a SACK for at least a MSS and 9614 * had to move at all, or if we moved more than our 9615 * threshold, it counts against the "extra" move. 9616 */ 9617 rack->r_ctl.sack_moved_extra += moved_two; 9618 counter_u64_add(rack_move_some, 1); 9619 } else { 9620 /* 9621 * else we did not have to move 9622 * any more than we would expect. 9623 */ 9624 rack->r_ctl.sack_noextra_move++; 9625 counter_u64_add(rack_move_none, 1); 9626 } 9627 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9628 /* 9629 * If the SACK was not a full MSS then 9630 * we add to sack_count the number of 9631 * MSS's (or possibly more than 9632 * a MSS if its a TSO send) we had to skip by. 9633 */ 9634 rack->r_ctl.sack_count += moved_two; 9635 counter_u64_add(rack_sack_total, moved_two); 9636 } 9637 /* 9638 * Now we need to setup for the next 9639 * round. First we make sure we won't 9640 * exceed the size of our uint32_t on 9641 * the various counts, and then clear out 9642 * moved_two. 9643 */ 9644 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9645 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9646 rack->r_ctl.sack_moved_extra /= 2; 9647 rack->r_ctl.sack_noextra_move /= 2; 9648 } 9649 if (rack->r_ctl.sack_count > 0xfff00000) { 9650 rack->r_ctl.ack_count /= 2; 9651 rack->r_ctl.sack_count /= 2; 9652 } 9653 moved_two = 0; 9654 } 9655 out_with_totals: 9656 if (num_sack_blks > 1) { 9657 /* 9658 * You get an extra stroke if 9659 * you have more than one sack-blk, this 9660 * could be where we are skipping forward 9661 * and the sack-filter is still working, or 9662 * it could be an attacker constantly 9663 * moving us. 9664 */ 9665 rack->r_ctl.sack_moved_extra++; 9666 counter_u64_add(rack_move_some, 1); 9667 } 9668 out: 9669 #ifdef NETFLIX_EXP_DETECTION 9670 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9671 #endif 9672 if (changed) { 9673 /* Something changed cancel the rack timer */ 9674 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9675 } 9676 tsused = tcp_get_usecs(NULL); 9677 rsm = tcp_rack_output(tp, rack, tsused); 9678 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9679 rsm) { 9680 /* Enter recovery */ 9681 entered_recovery = 1; 9682 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 9683 /* 9684 * When we enter recovery we need to assure we send 9685 * one packet. 9686 */ 9687 if (rack->rack_no_prr == 0) { 9688 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9689 rack_log_to_prr(rack, 8, 0); 9690 } 9691 rack->r_timer_override = 1; 9692 rack->r_early = 0; 9693 rack->r_ctl.rc_agg_early = 0; 9694 } else if (IN_FASTRECOVERY(tp->t_flags) && 9695 rsm && 9696 (rack->r_rr_config == 3)) { 9697 /* 9698 * Assure we can output and we get no 9699 * remembered pace time except the retransmit. 9700 */ 9701 rack->r_timer_override = 1; 9702 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9703 rack->r_ctl.rc_resend = rsm; 9704 } 9705 if (IN_FASTRECOVERY(tp->t_flags) && 9706 (rack->rack_no_prr == 0) && 9707 (entered_recovery == 0)) { 9708 rack_update_prr(tp, rack, changed, th_ack); 9709 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9710 ((tcp_in_hpts(rack->rc_inp) == 0) && 9711 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9712 /* 9713 * If you are pacing output you don't want 9714 * to override. 9715 */ 9716 rack->r_early = 0; 9717 rack->r_ctl.rc_agg_early = 0; 9718 rack->r_timer_override = 1; 9719 } 9720 } 9721 } 9722 9723 static void 9724 rack_strike_dupack(struct tcp_rack *rack) 9725 { 9726 struct rack_sendmap *rsm; 9727 9728 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9729 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9730 rsm = TAILQ_NEXT(rsm, r_tnext); 9731 } 9732 if (rsm && (rsm->r_dupack < 0xff)) { 9733 rsm->r_dupack++; 9734 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9735 struct timeval tv; 9736 uint32_t cts; 9737 /* 9738 * Here we see if we need to retransmit. For 9739 * a SACK type connection if enough time has passed 9740 * we will get a return of the rsm. For a non-sack 9741 * connection we will get the rsm returned if the 9742 * dupack value is 3 or more. 9743 */ 9744 cts = tcp_get_usecs(&tv); 9745 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9746 if (rack->r_ctl.rc_resend != NULL) { 9747 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9748 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9749 rack->rc_tp->snd_una); 9750 } 9751 rack->r_wanted_output = 1; 9752 rack->r_timer_override = 1; 9753 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9754 } 9755 } else { 9756 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9757 } 9758 } 9759 } 9760 9761 static void 9762 rack_check_bottom_drag(struct tcpcb *tp, 9763 struct tcp_rack *rack, 9764 struct socket *so, int32_t acked) 9765 { 9766 uint32_t segsiz, minseg; 9767 9768 segsiz = ctf_fixed_maxseg(tp); 9769 minseg = segsiz; 9770 9771 if (tp->snd_max == tp->snd_una) { 9772 /* 9773 * We are doing dynamic pacing and we are way 9774 * under. Basically everything got acked while 9775 * we were still waiting on the pacer to expire. 9776 * 9777 * This means we need to boost the b/w in 9778 * addition to any earlier boosting of 9779 * the multipler. 9780 */ 9781 rack->rc_dragged_bottom = 1; 9782 rack_validate_multipliers_at_or_above100(rack); 9783 /* 9784 * Lets use the segment bytes acked plus 9785 * the lowest RTT seen as the basis to 9786 * form a b/w estimate. This will be off 9787 * due to the fact that the true estimate 9788 * should be around 1/2 the time of the RTT 9789 * but we can settle for that. 9790 */ 9791 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9792 acked) { 9793 uint64_t bw, calc_bw, rtt; 9794 9795 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9796 if (rtt == 0) { 9797 /* no us sample is there a ms one? */ 9798 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9799 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9800 } else { 9801 goto no_measurement; 9802 } 9803 } 9804 bw = acked; 9805 calc_bw = bw * 1000000; 9806 calc_bw /= rtt; 9807 if (rack->r_ctl.last_max_bw && 9808 (rack->r_ctl.last_max_bw < calc_bw)) { 9809 /* 9810 * If we have a last calculated max bw 9811 * enforce it. 9812 */ 9813 calc_bw = rack->r_ctl.last_max_bw; 9814 } 9815 /* now plop it in */ 9816 if (rack->rc_gp_filled == 0) { 9817 if (calc_bw > ONE_POINT_TWO_MEG) { 9818 /* 9819 * If we have no measurement 9820 * don't let us set in more than 9821 * 1.2Mbps. If we are still too 9822 * low after pacing with this we 9823 * will hopefully have a max b/w 9824 * available to sanity check things. 9825 */ 9826 calc_bw = ONE_POINT_TWO_MEG; 9827 } 9828 rack->r_ctl.rc_rtt_diff = 0; 9829 rack->r_ctl.gp_bw = calc_bw; 9830 rack->rc_gp_filled = 1; 9831 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9832 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9833 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9834 } else if (calc_bw > rack->r_ctl.gp_bw) { 9835 rack->r_ctl.rc_rtt_diff = 0; 9836 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9837 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9838 rack->r_ctl.gp_bw = calc_bw; 9839 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9840 } else 9841 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9842 if ((rack->gp_ready == 0) && 9843 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9844 /* We have enough measurements now */ 9845 rack->gp_ready = 1; 9846 rack_set_cc_pacing(rack); 9847 if (rack->defer_options) 9848 rack_apply_deferred_options(rack); 9849 } 9850 /* 9851 * For acks over 1mss we do a extra boost to simulate 9852 * where we would get 2 acks (we want 110 for the mul). 9853 */ 9854 if (acked > segsiz) 9855 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9856 } else { 9857 /* 9858 * zero rtt possibly?, settle for just an old increase. 9859 */ 9860 no_measurement: 9861 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9862 } 9863 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9864 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9865 minseg)) && 9866 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9867 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9868 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9869 (segsiz * rack_req_segs))) { 9870 /* 9871 * We are doing dynamic GP pacing and 9872 * we have everything except 1MSS or less 9873 * bytes left out. We are still pacing away. 9874 * And there is data that could be sent, This 9875 * means we are inserting delayed ack time in 9876 * our measurements because we are pacing too slow. 9877 */ 9878 rack_validate_multipliers_at_or_above100(rack); 9879 rack->rc_dragged_bottom = 1; 9880 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9881 } 9882 } 9883 9884 9885 9886 static void 9887 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 9888 { 9889 /* 9890 * The fast output path is enabled and we 9891 * have moved the cumack forward. Lets see if 9892 * we can expand forward the fast path length by 9893 * that amount. What we would ideally like to 9894 * do is increase the number of bytes in the 9895 * fast path block (left_to_send) by the 9896 * acked amount. However we have to gate that 9897 * by two factors: 9898 * 1) The amount outstanding and the rwnd of the peer 9899 * (i.e. we don't want to exceed the rwnd of the peer). 9900 * <and> 9901 * 2) The amount of data left in the socket buffer (i.e. 9902 * we can't send beyond what is in the buffer). 9903 * 9904 * Note that this does not take into account any increase 9905 * in the cwnd. We will only extend the fast path by 9906 * what was acked. 9907 */ 9908 uint32_t new_total, gating_val; 9909 9910 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 9911 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 9912 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 9913 if (new_total <= gating_val) { 9914 /* We can increase left_to_send by the acked amount */ 9915 counter_u64_add(rack_extended_rfo, 1); 9916 rack->r_ctl.fsb.left_to_send = new_total; 9917 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 9918 ("rack:%p left_to_send:%u sbavail:%u out:%u", 9919 rack, rack->r_ctl.fsb.left_to_send, 9920 sbavail(&rack->rc_inp->inp_socket->so_snd), 9921 (tp->snd_max - tp->snd_una))); 9922 9923 } 9924 } 9925 9926 static void 9927 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 9928 { 9929 /* 9930 * Here any sendmap entry that points to the 9931 * beginning mbuf must be adjusted to the correct 9932 * offset. This must be called with: 9933 * 1) The socket buffer locked 9934 * 2) snd_una adjusted to its new postion. 9935 * 9936 * Note that (2) implies rack_ack_received has also 9937 * been called. 9938 * 9939 * We grab the first mbuf in the socket buffer and 9940 * then go through the front of the sendmap, recalculating 9941 * the stored offset for any sendmap entry that has 9942 * that mbuf. We must use the sb functions to do this 9943 * since its possible an add was done has well as 9944 * the subtraction we may have just completed. This should 9945 * not be a penalty though, since we just referenced the sb 9946 * to go in and trim off the mbufs that we freed (of course 9947 * there will be a penalty for the sendmap references though). 9948 */ 9949 struct mbuf *m; 9950 struct rack_sendmap *rsm; 9951 9952 SOCKBUF_LOCK_ASSERT(sb); 9953 m = sb->sb_mb; 9954 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9955 if ((rsm == NULL) || (m == NULL)) { 9956 /* Nothing outstanding */ 9957 return; 9958 } 9959 while (rsm->m && (rsm->m == m)) { 9960 /* one to adjust */ 9961 #ifdef INVARIANTS 9962 struct mbuf *tm; 9963 uint32_t soff; 9964 9965 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 9966 if (rsm->orig_m_len != m->m_len) { 9967 rack_adjust_orig_mlen(rsm); 9968 } 9969 if (rsm->soff != soff) { 9970 /* 9971 * This is not a fatal error, we anticipate it 9972 * might happen (the else code), so we count it here 9973 * so that under invariant we can see that it really 9974 * does happen. 9975 */ 9976 counter_u64_add(rack_adjust_map_bw, 1); 9977 } 9978 rsm->m = tm; 9979 rsm->soff = soff; 9980 if (tm) 9981 rsm->orig_m_len = rsm->m->m_len; 9982 else 9983 rsm->orig_m_len = 0; 9984 #else 9985 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 9986 if (rsm->m) 9987 rsm->orig_m_len = rsm->m->m_len; 9988 else 9989 rsm->orig_m_len = 0; 9990 #endif 9991 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 9992 rsm); 9993 if (rsm == NULL) 9994 break; 9995 } 9996 } 9997 9998 /* 9999 * Return value of 1, we do not need to call rack_process_data(). 10000 * return value of 0, rack_process_data can be called. 10001 * For ret_val if its 0 the TCP is locked, if its non-zero 10002 * its unlocked and probably unsafe to touch the TCB. 10003 */ 10004 static int 10005 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10006 struct tcpcb *tp, struct tcpopt *to, 10007 uint32_t tiwin, int32_t tlen, 10008 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10009 { 10010 int32_t ourfinisacked = 0; 10011 int32_t nsegs, acked_amount; 10012 int32_t acked; 10013 struct mbuf *mfree; 10014 struct tcp_rack *rack; 10015 int32_t under_pacing = 0; 10016 int32_t recovery = 0; 10017 10018 rack = (struct tcp_rack *)tp->t_fb_ptr; 10019 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10020 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10021 &rack->r_ctl.challenge_ack_ts, 10022 &rack->r_ctl.challenge_ack_cnt); 10023 rack->r_wanted_output = 1; 10024 return (1); 10025 } 10026 if (rack->gp_ready && 10027 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10028 under_pacing = 1; 10029 } 10030 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10031 int in_rec, dup_ack_struck = 0; 10032 10033 in_rec = IN_FASTRECOVERY(tp->t_flags); 10034 if (rack->rc_in_persist) { 10035 tp->t_rxtshift = 0; 10036 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10037 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10038 } 10039 if ((th->th_ack == tp->snd_una) && 10040 (tiwin == tp->snd_wnd) && 10041 ((to->to_flags & TOF_SACK) == 0)) { 10042 rack_strike_dupack(rack); 10043 dup_ack_struck = 1; 10044 } 10045 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10046 } 10047 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10048 /* 10049 * Old ack, behind (or duplicate to) the last one rcv'd 10050 * Note: We mark reordering is occuring if its 10051 * less than and we have not closed our window. 10052 */ 10053 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10054 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10055 } 10056 return (0); 10057 } 10058 /* 10059 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10060 * something we sent. 10061 */ 10062 if (tp->t_flags & TF_NEEDSYN) { 10063 /* 10064 * T/TCP: Connection was half-synchronized, and our SYN has 10065 * been ACK'd (so connection is now fully synchronized). Go 10066 * to non-starred state, increment snd_una for ACK of SYN, 10067 * and check if we can do window scaling. 10068 */ 10069 tp->t_flags &= ~TF_NEEDSYN; 10070 tp->snd_una++; 10071 /* Do window scaling? */ 10072 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10073 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10074 tp->rcv_scale = tp->request_r_scale; 10075 /* Send window already scaled. */ 10076 } 10077 } 10078 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10079 INP_WLOCK_ASSERT(tp->t_inpcb); 10080 10081 acked = BYTES_THIS_ACK(tp, th); 10082 if (acked) { 10083 /* 10084 * Any time we move the cum-ack forward clear 10085 * keep-alive tied probe-not-answered. The 10086 * persists clears its own on entry. 10087 */ 10088 rack->probe_not_answered = 0; 10089 } 10090 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10091 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10092 /* 10093 * If we just performed our first retransmit, and the ACK arrives 10094 * within our recovery window, then it was a mistake to do the 10095 * retransmit in the first place. Recover our original cwnd and 10096 * ssthresh, and proceed to transmit where we left off. 10097 */ 10098 if ((tp->t_flags & TF_PREVVALID) && 10099 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10100 tp->t_flags &= ~TF_PREVVALID; 10101 if (tp->t_rxtshift == 1 && 10102 (int)(ticks - tp->t_badrxtwin) < 0) 10103 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10104 } 10105 if (acked) { 10106 /* assure we are not backed off */ 10107 tp->t_rxtshift = 0; 10108 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10109 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10110 rack->rc_tlp_in_progress = 0; 10111 rack->r_ctl.rc_tlp_cnt_out = 0; 10112 /* 10113 * If it is the RXT timer we want to 10114 * stop it, so we can restart a TLP. 10115 */ 10116 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10117 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10118 #ifdef NETFLIX_HTTP_LOGGING 10119 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10120 #endif 10121 } 10122 /* 10123 * If we have a timestamp reply, update smoothed round trip time. If 10124 * no timestamp is present but transmit timer is running and timed 10125 * sequence number was acked, update smoothed round trip time. Since 10126 * we now have an rtt measurement, cancel the timer backoff (cf., 10127 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10128 * timer. 10129 * 10130 * Some boxes send broken timestamp replies during the SYN+ACK 10131 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10132 * and blow up the retransmit timer. 10133 */ 10134 /* 10135 * If all outstanding data is acked, stop retransmit timer and 10136 * remember to restart (more output or persist). If there is more 10137 * data to be acked, restart retransmit timer, using current 10138 * (possibly backed-off) value. 10139 */ 10140 if (acked == 0) { 10141 if (ofia) 10142 *ofia = ourfinisacked; 10143 return (0); 10144 } 10145 if (IN_RECOVERY(tp->t_flags)) { 10146 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10147 (SEQ_LT(th->th_ack, tp->snd_max))) { 10148 tcp_rack_partialack(tp); 10149 } else { 10150 rack_post_recovery(tp, th->th_ack); 10151 recovery = 1; 10152 } 10153 } 10154 /* 10155 * Let the congestion control algorithm update congestion control 10156 * related information. This typically means increasing the 10157 * congestion window. 10158 */ 10159 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10160 SOCKBUF_LOCK(&so->so_snd); 10161 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10162 tp->snd_wnd -= acked_amount; 10163 mfree = sbcut_locked(&so->so_snd, acked_amount); 10164 if ((sbused(&so->so_snd) == 0) && 10165 (acked > acked_amount) && 10166 (tp->t_state >= TCPS_FIN_WAIT_1) && 10167 (tp->t_flags & TF_SENTFIN)) { 10168 /* 10169 * We must be sure our fin 10170 * was sent and acked (we can be 10171 * in FIN_WAIT_1 without having 10172 * sent the fin). 10173 */ 10174 ourfinisacked = 1; 10175 } 10176 tp->snd_una = th->th_ack; 10177 if (acked_amount && sbavail(&so->so_snd)) 10178 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10179 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10180 /* NB: sowwakeup_locked() does an implicit unlock. */ 10181 sowwakeup_locked(so); 10182 m_freem(mfree); 10183 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10184 tp->snd_recover = tp->snd_una; 10185 10186 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10187 tp->snd_nxt = tp->snd_una; 10188 } 10189 if (under_pacing && 10190 (rack->use_fixed_rate == 0) && 10191 (rack->in_probe_rtt == 0) && 10192 rack->rc_gp_dyn_mul && 10193 rack->rc_always_pace) { 10194 /* Check if we are dragging bottom */ 10195 rack_check_bottom_drag(tp, rack, so, acked); 10196 } 10197 if (tp->snd_una == tp->snd_max) { 10198 /* Nothing left outstanding */ 10199 tp->t_flags &= ~TF_PREVVALID; 10200 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10201 rack->r_ctl.retran_during_recovery = 0; 10202 rack->r_ctl.dsack_byte_cnt = 0; 10203 if (rack->r_ctl.rc_went_idle_time == 0) 10204 rack->r_ctl.rc_went_idle_time = 1; 10205 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10206 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10207 tp->t_acktime = 0; 10208 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10209 /* Set need output so persist might get set */ 10210 rack->r_wanted_output = 1; 10211 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10212 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10213 (sbavail(&so->so_snd) == 0) && 10214 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10215 /* 10216 * The socket was gone and the 10217 * peer sent data (now or in the past), time to 10218 * reset him. 10219 */ 10220 *ret_val = 1; 10221 /* tcp_close will kill the inp pre-log the Reset */ 10222 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10223 tp = tcp_close(tp); 10224 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10225 return (1); 10226 } 10227 } 10228 if (ofia) 10229 *ofia = ourfinisacked; 10230 return (0); 10231 } 10232 10233 static void 10234 rack_collapsed_window(struct tcp_rack *rack) 10235 { 10236 /* 10237 * Now we must walk the 10238 * send map and divide the 10239 * ones left stranded. These 10240 * guys can't cause us to abort 10241 * the connection and are really 10242 * "unsent". However if a buggy 10243 * client actually did keep some 10244 * of the data i.e. collapsed the win 10245 * and refused to ack and then opened 10246 * the win and acked that data. We would 10247 * get into an ack war, the simplier 10248 * method then of just pretending we 10249 * did not send those segments something 10250 * won't work. 10251 */ 10252 struct rack_sendmap *rsm, *nrsm, fe; 10253 #ifdef INVARIANTS 10254 struct rack_sendmap *insret; 10255 #endif 10256 tcp_seq max_seq; 10257 10258 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10259 memset(&fe, 0, sizeof(fe)); 10260 fe.r_start = max_seq; 10261 /* Find the first seq past or at maxseq */ 10262 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10263 if (rsm == NULL) { 10264 /* Nothing to do strange */ 10265 rack->rc_has_collapsed = 0; 10266 return; 10267 } 10268 /* 10269 * Now do we need to split at 10270 * the collapse point? 10271 */ 10272 if (SEQ_GT(max_seq, rsm->r_start)) { 10273 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10274 if (nrsm == NULL) { 10275 /* We can't get a rsm, mark all? */ 10276 nrsm = rsm; 10277 goto no_split; 10278 } 10279 /* Clone it */ 10280 rack_clone_rsm(rack, nrsm, rsm, max_seq); 10281 #ifndef INVARIANTS 10282 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10283 #else 10284 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10285 if (insret != NULL) { 10286 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10287 nrsm, insret, rack, rsm); 10288 } 10289 #endif 10290 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 10291 if (rsm->r_in_tmap) { 10292 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10293 nrsm->r_in_tmap = 1; 10294 } 10295 /* 10296 * Set in the new RSM as the 10297 * collapsed starting point 10298 */ 10299 rsm = nrsm; 10300 } 10301 no_split: 10302 counter_u64_add(rack_collapsed_win, 1); 10303 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10304 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10305 } 10306 rack->rc_has_collapsed = 1; 10307 } 10308 10309 static void 10310 rack_un_collapse_window(struct tcp_rack *rack) 10311 { 10312 struct rack_sendmap *rsm; 10313 10314 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10315 if (rsm->r_flags & RACK_RWND_COLLAPSED) 10316 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10317 else 10318 break; 10319 } 10320 rack->rc_has_collapsed = 0; 10321 } 10322 10323 static void 10324 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10325 int32_t tlen, int32_t tfo_syn) 10326 { 10327 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10328 if (rack->rc_dack_mode && 10329 (tlen > 500) && 10330 (rack->rc_dack_toggle == 1)) { 10331 goto no_delayed_ack; 10332 } 10333 rack_timer_cancel(tp, rack, 10334 rack->r_ctl.rc_rcvtime, __LINE__); 10335 tp->t_flags |= TF_DELACK; 10336 } else { 10337 no_delayed_ack: 10338 rack->r_wanted_output = 1; 10339 tp->t_flags |= TF_ACKNOW; 10340 if (rack->rc_dack_mode) { 10341 if (tp->t_flags & TF_DELACK) 10342 rack->rc_dack_toggle = 1; 10343 else 10344 rack->rc_dack_toggle = 0; 10345 } 10346 } 10347 } 10348 10349 static void 10350 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10351 { 10352 /* 10353 * If fast output is in progress, lets validate that 10354 * the new window did not shrink on us and make it 10355 * so fast output should end. 10356 */ 10357 if (rack->r_fast_output) { 10358 uint32_t out; 10359 10360 /* 10361 * Calculate what we will send if left as is 10362 * and compare that to our send window. 10363 */ 10364 out = ctf_outstanding(tp); 10365 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10366 /* ok we have an issue */ 10367 if (out >= tp->snd_wnd) { 10368 /* Turn off fast output the window is met or collapsed */ 10369 rack->r_fast_output = 0; 10370 } else { 10371 /* we have some room left */ 10372 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10373 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10374 /* If not at least 1 full segment never mind */ 10375 rack->r_fast_output = 0; 10376 } 10377 } 10378 } 10379 } 10380 } 10381 10382 10383 /* 10384 * Return value of 1, the TCB is unlocked and most 10385 * likely gone, return value of 0, the TCP is still 10386 * locked. 10387 */ 10388 static int 10389 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10390 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10391 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10392 { 10393 /* 10394 * Update window information. Don't look at window if no ACK: TAC's 10395 * send garbage on first SYN. 10396 */ 10397 int32_t nsegs; 10398 int32_t tfo_syn; 10399 struct tcp_rack *rack; 10400 10401 rack = (struct tcp_rack *)tp->t_fb_ptr; 10402 INP_WLOCK_ASSERT(tp->t_inpcb); 10403 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10404 if ((thflags & TH_ACK) && 10405 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10406 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10407 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10408 /* keep track of pure window updates */ 10409 if (tlen == 0 && 10410 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10411 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10412 tp->snd_wnd = tiwin; 10413 rack_validate_fo_sendwin_up(tp, rack); 10414 tp->snd_wl1 = th->th_seq; 10415 tp->snd_wl2 = th->th_ack; 10416 if (tp->snd_wnd > tp->max_sndwnd) 10417 tp->max_sndwnd = tp->snd_wnd; 10418 rack->r_wanted_output = 1; 10419 } else if (thflags & TH_ACK) { 10420 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10421 tp->snd_wnd = tiwin; 10422 rack_validate_fo_sendwin_up(tp, rack); 10423 tp->snd_wl1 = th->th_seq; 10424 tp->snd_wl2 = th->th_ack; 10425 } 10426 } 10427 if (tp->snd_wnd < ctf_outstanding(tp)) 10428 /* The peer collapsed the window */ 10429 rack_collapsed_window(rack); 10430 else if (rack->rc_has_collapsed) 10431 rack_un_collapse_window(rack); 10432 /* Was persist timer active and now we have window space? */ 10433 if ((rack->rc_in_persist != 0) && 10434 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10435 rack->r_ctl.rc_pace_min_segs))) { 10436 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10437 tp->snd_nxt = tp->snd_max; 10438 /* Make sure we output to start the timer */ 10439 rack->r_wanted_output = 1; 10440 } 10441 /* Do we enter persists? */ 10442 if ((rack->rc_in_persist == 0) && 10443 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10444 TCPS_HAVEESTABLISHED(tp->t_state) && 10445 (tp->snd_max == tp->snd_una) && 10446 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10447 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10448 /* 10449 * Here the rwnd is less than 10450 * the pacing size, we are established, 10451 * nothing is outstanding, and there is 10452 * data to send. Enter persists. 10453 */ 10454 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10455 } 10456 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10457 m_freem(m); 10458 return (0); 10459 } 10460 /* 10461 * don't process the URG bit, ignore them drag 10462 * along the up. 10463 */ 10464 tp->rcv_up = tp->rcv_nxt; 10465 INP_WLOCK_ASSERT(tp->t_inpcb); 10466 10467 /* 10468 * Process the segment text, merging it into the TCP sequencing 10469 * queue, and arranging for acknowledgment of receipt if necessary. 10470 * This process logically involves adjusting tp->rcv_wnd as data is 10471 * presented to the user (this happens in tcp_usrreq.c, case 10472 * PRU_RCVD). If a FIN has already been received on this connection 10473 * then we just ignore the text. 10474 */ 10475 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10476 IS_FASTOPEN(tp->t_flags)); 10477 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10478 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10479 tcp_seq save_start = th->th_seq; 10480 tcp_seq save_rnxt = tp->rcv_nxt; 10481 int save_tlen = tlen; 10482 10483 m_adj(m, drop_hdrlen); /* delayed header drop */ 10484 /* 10485 * Insert segment which includes th into TCP reassembly 10486 * queue with control block tp. Set thflags to whether 10487 * reassembly now includes a segment with FIN. This handles 10488 * the common case inline (segment is the next to be 10489 * received on an established connection, and the queue is 10490 * empty), avoiding linkage into and removal from the queue 10491 * and repetition of various conversions. Set DELACK for 10492 * segments received in order, but ack immediately when 10493 * segments are out of order (so fast retransmit can work). 10494 */ 10495 if (th->th_seq == tp->rcv_nxt && 10496 SEGQ_EMPTY(tp) && 10497 (TCPS_HAVEESTABLISHED(tp->t_state) || 10498 tfo_syn)) { 10499 #ifdef NETFLIX_SB_LIMITS 10500 u_int mcnt, appended; 10501 10502 if (so->so_rcv.sb_shlim) { 10503 mcnt = m_memcnt(m); 10504 appended = 0; 10505 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10506 CFO_NOSLEEP, NULL) == false) { 10507 counter_u64_add(tcp_sb_shlim_fails, 1); 10508 m_freem(m); 10509 return (0); 10510 } 10511 } 10512 #endif 10513 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10514 tp->rcv_nxt += tlen; 10515 if (tlen && 10516 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10517 (tp->t_fbyte_in == 0)) { 10518 tp->t_fbyte_in = ticks; 10519 if (tp->t_fbyte_in == 0) 10520 tp->t_fbyte_in = 1; 10521 if (tp->t_fbyte_out && tp->t_fbyte_in) 10522 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10523 } 10524 thflags = tcp_get_flags(th) & TH_FIN; 10525 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10526 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10527 SOCKBUF_LOCK(&so->so_rcv); 10528 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10529 m_freem(m); 10530 } else 10531 #ifdef NETFLIX_SB_LIMITS 10532 appended = 10533 #endif 10534 sbappendstream_locked(&so->so_rcv, m, 0); 10535 10536 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10537 /* NB: sorwakeup_locked() does an implicit unlock. */ 10538 sorwakeup_locked(so); 10539 #ifdef NETFLIX_SB_LIMITS 10540 if (so->so_rcv.sb_shlim && appended != mcnt) 10541 counter_fo_release(so->so_rcv.sb_shlim, 10542 mcnt - appended); 10543 #endif 10544 } else { 10545 /* 10546 * XXX: Due to the header drop above "th" is 10547 * theoretically invalid by now. Fortunately 10548 * m_adj() doesn't actually frees any mbufs when 10549 * trimming from the head. 10550 */ 10551 tcp_seq temp = save_start; 10552 10553 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10554 tp->t_flags |= TF_ACKNOW; 10555 if (tp->t_flags & TF_WAKESOR) { 10556 tp->t_flags &= ~TF_WAKESOR; 10557 /* NB: sorwakeup_locked() does an implicit unlock. */ 10558 sorwakeup_locked(so); 10559 } 10560 } 10561 if ((tp->t_flags & TF_SACK_PERMIT) && 10562 (save_tlen > 0) && 10563 TCPS_HAVEESTABLISHED(tp->t_state)) { 10564 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10565 /* 10566 * DSACK actually handled in the fastpath 10567 * above. 10568 */ 10569 RACK_OPTS_INC(tcp_sack_path_1); 10570 tcp_update_sack_list(tp, save_start, 10571 save_start + save_tlen); 10572 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10573 if ((tp->rcv_numsacks >= 1) && 10574 (tp->sackblks[0].end == save_start)) { 10575 /* 10576 * Partial overlap, recorded at todrop 10577 * above. 10578 */ 10579 RACK_OPTS_INC(tcp_sack_path_2a); 10580 tcp_update_sack_list(tp, 10581 tp->sackblks[0].start, 10582 tp->sackblks[0].end); 10583 } else { 10584 RACK_OPTS_INC(tcp_sack_path_2b); 10585 tcp_update_dsack_list(tp, save_start, 10586 save_start + save_tlen); 10587 } 10588 } else if (tlen >= save_tlen) { 10589 /* Update of sackblks. */ 10590 RACK_OPTS_INC(tcp_sack_path_3); 10591 tcp_update_dsack_list(tp, save_start, 10592 save_start + save_tlen); 10593 } else if (tlen > 0) { 10594 RACK_OPTS_INC(tcp_sack_path_4); 10595 tcp_update_dsack_list(tp, save_start, 10596 save_start + tlen); 10597 } 10598 } 10599 } else { 10600 m_freem(m); 10601 thflags &= ~TH_FIN; 10602 } 10603 10604 /* 10605 * If FIN is received ACK the FIN and let the user know that the 10606 * connection is closing. 10607 */ 10608 if (thflags & TH_FIN) { 10609 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10610 /* The socket upcall is handled by socantrcvmore. */ 10611 socantrcvmore(so); 10612 /* 10613 * If connection is half-synchronized (ie NEEDSYN 10614 * flag on) then delay ACK, so it may be piggybacked 10615 * when SYN is sent. Otherwise, since we received a 10616 * FIN then no more input can be expected, send ACK 10617 * now. 10618 */ 10619 if (tp->t_flags & TF_NEEDSYN) { 10620 rack_timer_cancel(tp, rack, 10621 rack->r_ctl.rc_rcvtime, __LINE__); 10622 tp->t_flags |= TF_DELACK; 10623 } else { 10624 tp->t_flags |= TF_ACKNOW; 10625 } 10626 tp->rcv_nxt++; 10627 } 10628 switch (tp->t_state) { 10629 /* 10630 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10631 * CLOSE_WAIT state. 10632 */ 10633 case TCPS_SYN_RECEIVED: 10634 tp->t_starttime = ticks; 10635 /* FALLTHROUGH */ 10636 case TCPS_ESTABLISHED: 10637 rack_timer_cancel(tp, rack, 10638 rack->r_ctl.rc_rcvtime, __LINE__); 10639 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10640 break; 10641 10642 /* 10643 * If still in FIN_WAIT_1 STATE FIN has not been 10644 * acked so enter the CLOSING state. 10645 */ 10646 case TCPS_FIN_WAIT_1: 10647 rack_timer_cancel(tp, rack, 10648 rack->r_ctl.rc_rcvtime, __LINE__); 10649 tcp_state_change(tp, TCPS_CLOSING); 10650 break; 10651 10652 /* 10653 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10654 * starting the time-wait timer, turning off the 10655 * other standard timers. 10656 */ 10657 case TCPS_FIN_WAIT_2: 10658 rack_timer_cancel(tp, rack, 10659 rack->r_ctl.rc_rcvtime, __LINE__); 10660 tcp_twstart(tp); 10661 return (1); 10662 } 10663 } 10664 /* 10665 * Return any desired output. 10666 */ 10667 if ((tp->t_flags & TF_ACKNOW) || 10668 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10669 rack->r_wanted_output = 1; 10670 } 10671 INP_WLOCK_ASSERT(tp->t_inpcb); 10672 return (0); 10673 } 10674 10675 /* 10676 * Here nothing is really faster, its just that we 10677 * have broken out the fast-data path also just like 10678 * the fast-ack. 10679 */ 10680 static int 10681 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10682 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10683 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10684 { 10685 int32_t nsegs; 10686 int32_t newsize = 0; /* automatic sockbuf scaling */ 10687 struct tcp_rack *rack; 10688 #ifdef NETFLIX_SB_LIMITS 10689 u_int mcnt, appended; 10690 #endif 10691 #ifdef TCPDEBUG 10692 /* 10693 * The size of tcp_saveipgen must be the size of the max ip header, 10694 * now IPv6. 10695 */ 10696 u_char tcp_saveipgen[IP6_HDR_LEN]; 10697 struct tcphdr tcp_savetcp; 10698 short ostate = 0; 10699 10700 #endif 10701 /* 10702 * If last ACK falls within this segment's sequence numbers, record 10703 * the timestamp. NOTE that the test is modified according to the 10704 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10705 */ 10706 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10707 return (0); 10708 } 10709 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10710 return (0); 10711 } 10712 if (tiwin && tiwin != tp->snd_wnd) { 10713 return (0); 10714 } 10715 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10716 return (0); 10717 } 10718 if (__predict_false((to->to_flags & TOF_TS) && 10719 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10720 return (0); 10721 } 10722 if (__predict_false((th->th_ack != tp->snd_una))) { 10723 return (0); 10724 } 10725 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10726 return (0); 10727 } 10728 if ((to->to_flags & TOF_TS) != 0 && 10729 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10730 tp->ts_recent_age = tcp_ts_getticks(); 10731 tp->ts_recent = to->to_tsval; 10732 } 10733 rack = (struct tcp_rack *)tp->t_fb_ptr; 10734 /* 10735 * This is a pure, in-sequence data packet with nothing on the 10736 * reassembly queue and we have enough buffer space to take it. 10737 */ 10738 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10739 10740 #ifdef NETFLIX_SB_LIMITS 10741 if (so->so_rcv.sb_shlim) { 10742 mcnt = m_memcnt(m); 10743 appended = 0; 10744 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10745 CFO_NOSLEEP, NULL) == false) { 10746 counter_u64_add(tcp_sb_shlim_fails, 1); 10747 m_freem(m); 10748 return (1); 10749 } 10750 } 10751 #endif 10752 /* Clean receiver SACK report if present */ 10753 if (tp->rcv_numsacks) 10754 tcp_clean_sackreport(tp); 10755 KMOD_TCPSTAT_INC(tcps_preddat); 10756 tp->rcv_nxt += tlen; 10757 if (tlen && 10758 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10759 (tp->t_fbyte_in == 0)) { 10760 tp->t_fbyte_in = ticks; 10761 if (tp->t_fbyte_in == 0) 10762 tp->t_fbyte_in = 1; 10763 if (tp->t_fbyte_out && tp->t_fbyte_in) 10764 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10765 } 10766 /* 10767 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10768 */ 10769 tp->snd_wl1 = th->th_seq; 10770 /* 10771 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10772 */ 10773 tp->rcv_up = tp->rcv_nxt; 10774 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10775 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10776 #ifdef TCPDEBUG 10777 if (so->so_options & SO_DEBUG) 10778 tcp_trace(TA_INPUT, ostate, tp, 10779 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10780 #endif 10781 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10782 10783 /* Add data to socket buffer. */ 10784 SOCKBUF_LOCK(&so->so_rcv); 10785 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10786 m_freem(m); 10787 } else { 10788 /* 10789 * Set new socket buffer size. Give up when limit is 10790 * reached. 10791 */ 10792 if (newsize) 10793 if (!sbreserve_locked(&so->so_rcv, 10794 newsize, so, NULL)) 10795 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10796 m_adj(m, drop_hdrlen); /* delayed header drop */ 10797 #ifdef NETFLIX_SB_LIMITS 10798 appended = 10799 #endif 10800 sbappendstream_locked(&so->so_rcv, m, 0); 10801 ctf_calc_rwin(so, tp); 10802 } 10803 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10804 /* NB: sorwakeup_locked() does an implicit unlock. */ 10805 sorwakeup_locked(so); 10806 #ifdef NETFLIX_SB_LIMITS 10807 if (so->so_rcv.sb_shlim && mcnt != appended) 10808 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10809 #endif 10810 rack_handle_delayed_ack(tp, rack, tlen, 0); 10811 if (tp->snd_una == tp->snd_max) 10812 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10813 return (1); 10814 } 10815 10816 /* 10817 * This subfunction is used to try to highly optimize the 10818 * fast path. We again allow window updates that are 10819 * in sequence to remain in the fast-path. We also add 10820 * in the __predict's to attempt to help the compiler. 10821 * Note that if we return a 0, then we can *not* process 10822 * it and the caller should push the packet into the 10823 * slow-path. 10824 */ 10825 static int 10826 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10827 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10828 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10829 { 10830 int32_t acked; 10831 int32_t nsegs; 10832 #ifdef TCPDEBUG 10833 /* 10834 * The size of tcp_saveipgen must be the size of the max ip header, 10835 * now IPv6. 10836 */ 10837 u_char tcp_saveipgen[IP6_HDR_LEN]; 10838 struct tcphdr tcp_savetcp; 10839 short ostate = 0; 10840 #endif 10841 int32_t under_pacing = 0; 10842 struct tcp_rack *rack; 10843 10844 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10845 /* Old ack, behind (or duplicate to) the last one rcv'd */ 10846 return (0); 10847 } 10848 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 10849 /* Above what we have sent? */ 10850 return (0); 10851 } 10852 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10853 /* We are retransmitting */ 10854 return (0); 10855 } 10856 if (__predict_false(tiwin == 0)) { 10857 /* zero window */ 10858 return (0); 10859 } 10860 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 10861 /* We need a SYN or a FIN, unlikely.. */ 10862 return (0); 10863 } 10864 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 10865 /* Timestamp is behind .. old ack with seq wrap? */ 10866 return (0); 10867 } 10868 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 10869 /* Still recovering */ 10870 return (0); 10871 } 10872 rack = (struct tcp_rack *)tp->t_fb_ptr; 10873 if (rack->r_ctl.rc_sacked) { 10874 /* We have sack holes on our scoreboard */ 10875 return (0); 10876 } 10877 /* Ok if we reach here, we can process a fast-ack */ 10878 if (rack->gp_ready && 10879 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10880 under_pacing = 1; 10881 } 10882 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10883 rack_log_ack(tp, to, th, 0, 0); 10884 /* Did the window get updated? */ 10885 if (tiwin != tp->snd_wnd) { 10886 tp->snd_wnd = tiwin; 10887 rack_validate_fo_sendwin_up(tp, rack); 10888 tp->snd_wl1 = th->th_seq; 10889 if (tp->snd_wnd > tp->max_sndwnd) 10890 tp->max_sndwnd = tp->snd_wnd; 10891 } 10892 /* Do we exit persists? */ 10893 if ((rack->rc_in_persist != 0) && 10894 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10895 rack->r_ctl.rc_pace_min_segs))) { 10896 rack_exit_persist(tp, rack, cts); 10897 } 10898 /* Do we enter persists? */ 10899 if ((rack->rc_in_persist == 0) && 10900 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10901 TCPS_HAVEESTABLISHED(tp->t_state) && 10902 (tp->snd_max == tp->snd_una) && 10903 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10904 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10905 /* 10906 * Here the rwnd is less than 10907 * the pacing size, we are established, 10908 * nothing is outstanding, and there is 10909 * data to send. Enter persists. 10910 */ 10911 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10912 } 10913 /* 10914 * If last ACK falls within this segment's sequence numbers, record 10915 * the timestamp. NOTE that the test is modified according to the 10916 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10917 */ 10918 if ((to->to_flags & TOF_TS) != 0 && 10919 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10920 tp->ts_recent_age = tcp_ts_getticks(); 10921 tp->ts_recent = to->to_tsval; 10922 } 10923 /* 10924 * This is a pure ack for outstanding data. 10925 */ 10926 KMOD_TCPSTAT_INC(tcps_predack); 10927 10928 /* 10929 * "bad retransmit" recovery. 10930 */ 10931 if ((tp->t_flags & TF_PREVVALID) && 10932 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10933 tp->t_flags &= ~TF_PREVVALID; 10934 if (tp->t_rxtshift == 1 && 10935 (int)(ticks - tp->t_badrxtwin) < 0) 10936 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10937 } 10938 /* 10939 * Recalculate the transmit timer / rtt. 10940 * 10941 * Some boxes send broken timestamp replies during the SYN+ACK 10942 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10943 * and blow up the retransmit timer. 10944 */ 10945 acked = BYTES_THIS_ACK(tp, th); 10946 10947 #ifdef TCP_HHOOK 10948 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 10949 hhook_run_tcp_est_in(tp, th, to); 10950 #endif 10951 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10952 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10953 if (acked) { 10954 struct mbuf *mfree; 10955 10956 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 10957 SOCKBUF_LOCK(&so->so_snd); 10958 mfree = sbcut_locked(&so->so_snd, acked); 10959 tp->snd_una = th->th_ack; 10960 /* Note we want to hold the sb lock through the sendmap adjust */ 10961 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10962 /* Wake up the socket if we have room to write more */ 10963 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10964 sowwakeup_locked(so); 10965 m_freem(mfree); 10966 tp->t_rxtshift = 0; 10967 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10968 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10969 rack->rc_tlp_in_progress = 0; 10970 rack->r_ctl.rc_tlp_cnt_out = 0; 10971 /* 10972 * If it is the RXT timer we want to 10973 * stop it, so we can restart a TLP. 10974 */ 10975 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10976 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10977 #ifdef NETFLIX_HTTP_LOGGING 10978 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10979 #endif 10980 } 10981 /* 10982 * Let the congestion control algorithm update congestion control 10983 * related information. This typically means increasing the 10984 * congestion window. 10985 */ 10986 if (tp->snd_wnd < ctf_outstanding(tp)) { 10987 /* The peer collapsed the window */ 10988 rack_collapsed_window(rack); 10989 } else if (rack->rc_has_collapsed) 10990 rack_un_collapse_window(rack); 10991 10992 /* 10993 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 10994 */ 10995 tp->snd_wl2 = th->th_ack; 10996 tp->t_dupacks = 0; 10997 m_freem(m); 10998 /* ND6_HINT(tp); *//* Some progress has been made. */ 10999 11000 /* 11001 * If all outstanding data are acked, stop retransmit timer, 11002 * otherwise restart timer using current (possibly backed-off) 11003 * value. If process is waiting for space, wakeup/selwakeup/signal. 11004 * If data are ready to send, let tcp_output decide between more 11005 * output or persist. 11006 */ 11007 #ifdef TCPDEBUG 11008 if (so->so_options & SO_DEBUG) 11009 tcp_trace(TA_INPUT, ostate, tp, 11010 (void *)tcp_saveipgen, 11011 &tcp_savetcp, 0); 11012 #endif 11013 if (under_pacing && 11014 (rack->use_fixed_rate == 0) && 11015 (rack->in_probe_rtt == 0) && 11016 rack->rc_gp_dyn_mul && 11017 rack->rc_always_pace) { 11018 /* Check if we are dragging bottom */ 11019 rack_check_bottom_drag(tp, rack, so, acked); 11020 } 11021 if (tp->snd_una == tp->snd_max) { 11022 tp->t_flags &= ~TF_PREVVALID; 11023 rack->r_ctl.retran_during_recovery = 0; 11024 rack->r_ctl.dsack_byte_cnt = 0; 11025 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11026 if (rack->r_ctl.rc_went_idle_time == 0) 11027 rack->r_ctl.rc_went_idle_time = 1; 11028 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11029 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11030 tp->t_acktime = 0; 11031 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11032 } 11033 if (acked && rack->r_fast_output) 11034 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11035 if (sbavail(&so->so_snd)) { 11036 rack->r_wanted_output = 1; 11037 } 11038 return (1); 11039 } 11040 11041 /* 11042 * Return value of 1, the TCB is unlocked and most 11043 * likely gone, return value of 0, the TCP is still 11044 * locked. 11045 */ 11046 static int 11047 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11048 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11049 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11050 { 11051 int32_t ret_val = 0; 11052 int32_t todrop; 11053 int32_t ourfinisacked = 0; 11054 struct tcp_rack *rack; 11055 11056 ctf_calc_rwin(so, tp); 11057 /* 11058 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11059 * SYN, drop the input. if seg contains a RST, then drop the 11060 * connection. if seg does not contain SYN, then drop it. Otherwise 11061 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11062 * tp->irs if seg contains ack then advance tp->snd_una if seg 11063 * contains an ECE and ECN support is enabled, the stream is ECN 11064 * capable. if SYN has been acked change to ESTABLISHED else 11065 * SYN_RCVD state arrange for segment to be acked (eventually) 11066 * continue processing rest of data/controls. 11067 */ 11068 if ((thflags & TH_ACK) && 11069 (SEQ_LEQ(th->th_ack, tp->iss) || 11070 SEQ_GT(th->th_ack, tp->snd_max))) { 11071 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11072 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11073 return (1); 11074 } 11075 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11076 TCP_PROBE5(connect__refused, NULL, tp, 11077 mtod(m, const char *), tp, th); 11078 tp = tcp_drop(tp, ECONNREFUSED); 11079 ctf_do_drop(m, tp); 11080 return (1); 11081 } 11082 if (thflags & TH_RST) { 11083 ctf_do_drop(m, tp); 11084 return (1); 11085 } 11086 if (!(thflags & TH_SYN)) { 11087 ctf_do_drop(m, tp); 11088 return (1); 11089 } 11090 tp->irs = th->th_seq; 11091 tcp_rcvseqinit(tp); 11092 rack = (struct tcp_rack *)tp->t_fb_ptr; 11093 if (thflags & TH_ACK) { 11094 int tfo_partial = 0; 11095 11096 KMOD_TCPSTAT_INC(tcps_connects); 11097 soisconnected(so); 11098 #ifdef MAC 11099 mac_socketpeer_set_from_mbuf(m, so); 11100 #endif 11101 /* Do window scaling on this connection? */ 11102 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11103 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11104 tp->rcv_scale = tp->request_r_scale; 11105 } 11106 tp->rcv_adv += min(tp->rcv_wnd, 11107 TCP_MAXWIN << tp->rcv_scale); 11108 /* 11109 * If not all the data that was sent in the TFO SYN 11110 * has been acked, resend the remainder right away. 11111 */ 11112 if (IS_FASTOPEN(tp->t_flags) && 11113 (tp->snd_una != tp->snd_max)) { 11114 tp->snd_nxt = th->th_ack; 11115 tfo_partial = 1; 11116 } 11117 /* 11118 * If there's data, delay ACK; if there's also a FIN ACKNOW 11119 * will be turned on later. 11120 */ 11121 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11122 rack_timer_cancel(tp, rack, 11123 rack->r_ctl.rc_rcvtime, __LINE__); 11124 tp->t_flags |= TF_DELACK; 11125 } else { 11126 rack->r_wanted_output = 1; 11127 tp->t_flags |= TF_ACKNOW; 11128 rack->rc_dack_toggle = 0; 11129 } 11130 11131 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11132 11133 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11134 /* 11135 * We advance snd_una for the 11136 * fast open case. If th_ack is 11137 * acknowledging data beyond 11138 * snd_una we can't just call 11139 * ack-processing since the 11140 * data stream in our send-map 11141 * will start at snd_una + 1 (one 11142 * beyond the SYN). If its just 11143 * equal we don't need to do that 11144 * and there is no send_map. 11145 */ 11146 tp->snd_una++; 11147 } 11148 /* 11149 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11150 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11151 */ 11152 tp->t_starttime = ticks; 11153 if (tp->t_flags & TF_NEEDFIN) { 11154 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11155 tp->t_flags &= ~TF_NEEDFIN; 11156 thflags &= ~TH_SYN; 11157 } else { 11158 tcp_state_change(tp, TCPS_ESTABLISHED); 11159 TCP_PROBE5(connect__established, NULL, tp, 11160 mtod(m, const char *), tp, th); 11161 rack_cc_conn_init(tp); 11162 } 11163 } else { 11164 /* 11165 * Received initial SYN in SYN-SENT[*] state => simultaneous 11166 * open. If segment contains CC option and there is a 11167 * cached CC, apply TAO test. If it succeeds, connection is * 11168 * half-synchronized. Otherwise, do 3-way handshake: 11169 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11170 * there was no CC option, clear cached CC value. 11171 */ 11172 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 11173 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11174 } 11175 INP_WLOCK_ASSERT(tp->t_inpcb); 11176 /* 11177 * Advance th->th_seq to correspond to first data byte. If data, 11178 * trim to stay within window, dropping FIN if necessary. 11179 */ 11180 th->th_seq++; 11181 if (tlen > tp->rcv_wnd) { 11182 todrop = tlen - tp->rcv_wnd; 11183 m_adj(m, -todrop); 11184 tlen = tp->rcv_wnd; 11185 thflags &= ~TH_FIN; 11186 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11187 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11188 } 11189 tp->snd_wl1 = th->th_seq - 1; 11190 tp->rcv_up = th->th_seq; 11191 /* 11192 * Client side of transaction: already sent SYN and data. If the 11193 * remote host used T/TCP to validate the SYN, our data will be 11194 * ACK'd; if so, enter normal data segment processing in the middle 11195 * of step 5, ack processing. Otherwise, goto step 6. 11196 */ 11197 if (thflags & TH_ACK) { 11198 /* For syn-sent we need to possibly update the rtt */ 11199 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11200 uint32_t t, mcts; 11201 11202 mcts = tcp_ts_getticks(); 11203 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11204 if (!tp->t_rttlow || tp->t_rttlow > t) 11205 tp->t_rttlow = t; 11206 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11207 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11208 tcp_rack_xmit_timer_commit(rack, tp); 11209 } 11210 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11211 return (ret_val); 11212 /* We may have changed to FIN_WAIT_1 above */ 11213 if (tp->t_state == TCPS_FIN_WAIT_1) { 11214 /* 11215 * In FIN_WAIT_1 STATE in addition to the processing 11216 * for the ESTABLISHED state if our FIN is now 11217 * acknowledged then enter FIN_WAIT_2. 11218 */ 11219 if (ourfinisacked) { 11220 /* 11221 * If we can't receive any more data, then 11222 * closing user can proceed. Starting the 11223 * timer is contrary to the specification, 11224 * but if we don't get a FIN we'll hang 11225 * forever. 11226 * 11227 * XXXjl: we should release the tp also, and 11228 * use a compressed state. 11229 */ 11230 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11231 soisdisconnected(so); 11232 tcp_timer_activate(tp, TT_2MSL, 11233 (tcp_fast_finwait2_recycle ? 11234 tcp_finwait2_timeout : 11235 TP_MAXIDLE(tp))); 11236 } 11237 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11238 } 11239 } 11240 } 11241 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11242 tiwin, thflags, nxt_pkt)); 11243 } 11244 11245 /* 11246 * Return value of 1, the TCB is unlocked and most 11247 * likely gone, return value of 0, the TCP is still 11248 * locked. 11249 */ 11250 static int 11251 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11252 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11253 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11254 { 11255 struct tcp_rack *rack; 11256 int32_t ret_val = 0; 11257 int32_t ourfinisacked = 0; 11258 11259 ctf_calc_rwin(so, tp); 11260 if ((thflags & TH_ACK) && 11261 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11262 SEQ_GT(th->th_ack, tp->snd_max))) { 11263 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11264 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11265 return (1); 11266 } 11267 rack = (struct tcp_rack *)tp->t_fb_ptr; 11268 if (IS_FASTOPEN(tp->t_flags)) { 11269 /* 11270 * When a TFO connection is in SYN_RECEIVED, the 11271 * only valid packets are the initial SYN, a 11272 * retransmit/copy of the initial SYN (possibly with 11273 * a subset of the original data), a valid ACK, a 11274 * FIN, or a RST. 11275 */ 11276 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11277 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11278 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11279 return (1); 11280 } else if (thflags & TH_SYN) { 11281 /* non-initial SYN is ignored */ 11282 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11283 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11284 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11285 ctf_do_drop(m, NULL); 11286 return (0); 11287 } 11288 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11289 ctf_do_drop(m, NULL); 11290 return (0); 11291 } 11292 } 11293 11294 if ((thflags & TH_RST) || 11295 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11296 return (__ctf_process_rst(m, th, so, tp, 11297 &rack->r_ctl.challenge_ack_ts, 11298 &rack->r_ctl.challenge_ack_cnt)); 11299 /* 11300 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11301 * it's less than ts_recent, drop it. 11302 */ 11303 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11304 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11305 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11306 return (ret_val); 11307 } 11308 /* 11309 * In the SYN-RECEIVED state, validate that the packet belongs to 11310 * this connection before trimming the data to fit the receive 11311 * window. Check the sequence number versus IRS since we know the 11312 * sequence numbers haven't wrapped. This is a partial fix for the 11313 * "LAND" DoS attack. 11314 */ 11315 if (SEQ_LT(th->th_seq, tp->irs)) { 11316 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11317 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11318 return (1); 11319 } 11320 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11321 &rack->r_ctl.challenge_ack_ts, 11322 &rack->r_ctl.challenge_ack_cnt)) { 11323 return (ret_val); 11324 } 11325 /* 11326 * If last ACK falls within this segment's sequence numbers, record 11327 * its timestamp. NOTE: 1) That the test incorporates suggestions 11328 * from the latest proposal of the tcplw@cray.com list (Braden 11329 * 1993/04/26). 2) That updating only on newer timestamps interferes 11330 * with our earlier PAWS tests, so this check should be solely 11331 * predicated on the sequence space of this segment. 3) That we 11332 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11333 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11334 * SEG.Len, This modified check allows us to overcome RFC1323's 11335 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11336 * p.869. In such cases, we can still calculate the RTT correctly 11337 * when RCV.NXT == Last.ACK.Sent. 11338 */ 11339 if ((to->to_flags & TOF_TS) != 0 && 11340 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11341 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11342 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11343 tp->ts_recent_age = tcp_ts_getticks(); 11344 tp->ts_recent = to->to_tsval; 11345 } 11346 tp->snd_wnd = tiwin; 11347 rack_validate_fo_sendwin_up(tp, rack); 11348 /* 11349 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11350 * is on (half-synchronized state), then queue data for later 11351 * processing; else drop segment and return. 11352 */ 11353 if ((thflags & TH_ACK) == 0) { 11354 if (IS_FASTOPEN(tp->t_flags)) { 11355 rack_cc_conn_init(tp); 11356 } 11357 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11358 tiwin, thflags, nxt_pkt)); 11359 } 11360 KMOD_TCPSTAT_INC(tcps_connects); 11361 soisconnected(so); 11362 /* Do window scaling? */ 11363 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11364 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11365 tp->rcv_scale = tp->request_r_scale; 11366 } 11367 /* 11368 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11369 * FIN-WAIT-1 11370 */ 11371 tp->t_starttime = ticks; 11372 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11373 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11374 tp->t_tfo_pending = NULL; 11375 } 11376 if (tp->t_flags & TF_NEEDFIN) { 11377 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11378 tp->t_flags &= ~TF_NEEDFIN; 11379 } else { 11380 tcp_state_change(tp, TCPS_ESTABLISHED); 11381 TCP_PROBE5(accept__established, NULL, tp, 11382 mtod(m, const char *), tp, th); 11383 /* 11384 * TFO connections call cc_conn_init() during SYN 11385 * processing. Calling it again here for such connections 11386 * is not harmless as it would undo the snd_cwnd reduction 11387 * that occurs when a TFO SYN|ACK is retransmitted. 11388 */ 11389 if (!IS_FASTOPEN(tp->t_flags)) 11390 rack_cc_conn_init(tp); 11391 } 11392 /* 11393 * Account for the ACK of our SYN prior to 11394 * regular ACK processing below, except for 11395 * simultaneous SYN, which is handled later. 11396 */ 11397 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11398 tp->snd_una++; 11399 /* 11400 * If segment contains data or ACK, will call tcp_reass() later; if 11401 * not, do so now to pass queued data to user. 11402 */ 11403 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11404 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11405 (struct mbuf *)0); 11406 if (tp->t_flags & TF_WAKESOR) { 11407 tp->t_flags &= ~TF_WAKESOR; 11408 /* NB: sorwakeup_locked() does an implicit unlock. */ 11409 sorwakeup_locked(so); 11410 } 11411 } 11412 tp->snd_wl1 = th->th_seq - 1; 11413 /* For syn-recv we need to possibly update the rtt */ 11414 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11415 uint32_t t, mcts; 11416 11417 mcts = tcp_ts_getticks(); 11418 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11419 if (!tp->t_rttlow || tp->t_rttlow > t) 11420 tp->t_rttlow = t; 11421 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11422 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11423 tcp_rack_xmit_timer_commit(rack, tp); 11424 } 11425 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11426 return (ret_val); 11427 } 11428 if (tp->t_state == TCPS_FIN_WAIT_1) { 11429 /* We could have went to FIN_WAIT_1 (or EST) above */ 11430 /* 11431 * In FIN_WAIT_1 STATE in addition to the processing for the 11432 * ESTABLISHED state if our FIN is now acknowledged then 11433 * enter FIN_WAIT_2. 11434 */ 11435 if (ourfinisacked) { 11436 /* 11437 * If we can't receive any more data, then closing 11438 * user can proceed. Starting the timer is contrary 11439 * to the specification, but if we don't get a FIN 11440 * we'll hang forever. 11441 * 11442 * XXXjl: we should release the tp also, and use a 11443 * compressed state. 11444 */ 11445 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11446 soisdisconnected(so); 11447 tcp_timer_activate(tp, TT_2MSL, 11448 (tcp_fast_finwait2_recycle ? 11449 tcp_finwait2_timeout : 11450 TP_MAXIDLE(tp))); 11451 } 11452 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11453 } 11454 } 11455 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11456 tiwin, thflags, nxt_pkt)); 11457 } 11458 11459 /* 11460 * Return value of 1, the TCB is unlocked and most 11461 * likely gone, return value of 0, the TCP is still 11462 * locked. 11463 */ 11464 static int 11465 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11466 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11467 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11468 { 11469 int32_t ret_val = 0; 11470 struct tcp_rack *rack; 11471 11472 /* 11473 * Header prediction: check for the two common cases of a 11474 * uni-directional data xfer. If the packet has no control flags, 11475 * is in-sequence, the window didn't change and we're not 11476 * retransmitting, it's a candidate. If the length is zero and the 11477 * ack moved forward, we're the sender side of the xfer. Just free 11478 * the data acked & wake any higher level process that was blocked 11479 * waiting for space. If the length is non-zero and the ack didn't 11480 * move, we're the receiver side. If we're getting packets in-order 11481 * (the reassembly queue is empty), add the data toc The socket 11482 * buffer and note that we need a delayed ack. Make sure that the 11483 * hidden state-flags are also off. Since we check for 11484 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11485 */ 11486 rack = (struct tcp_rack *)tp->t_fb_ptr; 11487 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11488 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11489 __predict_true(SEGQ_EMPTY(tp)) && 11490 __predict_true(th->th_seq == tp->rcv_nxt)) { 11491 if (tlen == 0) { 11492 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11493 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11494 return (0); 11495 } 11496 } else { 11497 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11498 tiwin, nxt_pkt, iptos)) { 11499 return (0); 11500 } 11501 } 11502 } 11503 ctf_calc_rwin(so, tp); 11504 11505 if ((thflags & TH_RST) || 11506 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11507 return (__ctf_process_rst(m, th, so, tp, 11508 &rack->r_ctl.challenge_ack_ts, 11509 &rack->r_ctl.challenge_ack_cnt)); 11510 11511 /* 11512 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11513 * synchronized state. 11514 */ 11515 if (thflags & TH_SYN) { 11516 ctf_challenge_ack(m, th, tp, &ret_val); 11517 return (ret_val); 11518 } 11519 /* 11520 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11521 * it's less than ts_recent, drop it. 11522 */ 11523 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11524 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11525 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11526 return (ret_val); 11527 } 11528 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11529 &rack->r_ctl.challenge_ack_ts, 11530 &rack->r_ctl.challenge_ack_cnt)) { 11531 return (ret_val); 11532 } 11533 /* 11534 * If last ACK falls within this segment's sequence numbers, record 11535 * its timestamp. NOTE: 1) That the test incorporates suggestions 11536 * from the latest proposal of the tcplw@cray.com list (Braden 11537 * 1993/04/26). 2) That updating only on newer timestamps interferes 11538 * with our earlier PAWS tests, so this check should be solely 11539 * predicated on the sequence space of this segment. 3) That we 11540 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11541 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11542 * SEG.Len, This modified check allows us to overcome RFC1323's 11543 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11544 * p.869. In such cases, we can still calculate the RTT correctly 11545 * when RCV.NXT == Last.ACK.Sent. 11546 */ 11547 if ((to->to_flags & TOF_TS) != 0 && 11548 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11549 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11550 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11551 tp->ts_recent_age = tcp_ts_getticks(); 11552 tp->ts_recent = to->to_tsval; 11553 } 11554 /* 11555 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11556 * is on (half-synchronized state), then queue data for later 11557 * processing; else drop segment and return. 11558 */ 11559 if ((thflags & TH_ACK) == 0) { 11560 if (tp->t_flags & TF_NEEDSYN) { 11561 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11562 tiwin, thflags, nxt_pkt)); 11563 11564 } else if (tp->t_flags & TF_ACKNOW) { 11565 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11566 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11567 return (ret_val); 11568 } else { 11569 ctf_do_drop(m, NULL); 11570 return (0); 11571 } 11572 } 11573 /* 11574 * Ack processing. 11575 */ 11576 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11577 return (ret_val); 11578 } 11579 if (sbavail(&so->so_snd)) { 11580 if (ctf_progress_timeout_check(tp, true)) { 11581 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11582 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11583 return (1); 11584 } 11585 } 11586 /* State changes only happen in rack_process_data() */ 11587 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11588 tiwin, thflags, nxt_pkt)); 11589 } 11590 11591 /* 11592 * Return value of 1, the TCB is unlocked and most 11593 * likely gone, return value of 0, the TCP is still 11594 * locked. 11595 */ 11596 static int 11597 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11598 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11599 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11600 { 11601 int32_t ret_val = 0; 11602 struct tcp_rack *rack; 11603 11604 rack = (struct tcp_rack *)tp->t_fb_ptr; 11605 ctf_calc_rwin(so, tp); 11606 if ((thflags & TH_RST) || 11607 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11608 return (__ctf_process_rst(m, th, so, tp, 11609 &rack->r_ctl.challenge_ack_ts, 11610 &rack->r_ctl.challenge_ack_cnt)); 11611 /* 11612 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11613 * synchronized state. 11614 */ 11615 if (thflags & TH_SYN) { 11616 ctf_challenge_ack(m, th, tp, &ret_val); 11617 return (ret_val); 11618 } 11619 /* 11620 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11621 * it's less than ts_recent, drop it. 11622 */ 11623 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11624 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11625 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11626 return (ret_val); 11627 } 11628 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11629 &rack->r_ctl.challenge_ack_ts, 11630 &rack->r_ctl.challenge_ack_cnt)) { 11631 return (ret_val); 11632 } 11633 /* 11634 * If last ACK falls within this segment's sequence numbers, record 11635 * its timestamp. NOTE: 1) That the test incorporates suggestions 11636 * from the latest proposal of the tcplw@cray.com list (Braden 11637 * 1993/04/26). 2) That updating only on newer timestamps interferes 11638 * with our earlier PAWS tests, so this check should be solely 11639 * predicated on the sequence space of this segment. 3) That we 11640 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11641 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11642 * SEG.Len, This modified check allows us to overcome RFC1323's 11643 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11644 * p.869. In such cases, we can still calculate the RTT correctly 11645 * when RCV.NXT == Last.ACK.Sent. 11646 */ 11647 if ((to->to_flags & TOF_TS) != 0 && 11648 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11649 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11650 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11651 tp->ts_recent_age = tcp_ts_getticks(); 11652 tp->ts_recent = to->to_tsval; 11653 } 11654 /* 11655 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11656 * is on (half-synchronized state), then queue data for later 11657 * processing; else drop segment and return. 11658 */ 11659 if ((thflags & TH_ACK) == 0) { 11660 if (tp->t_flags & TF_NEEDSYN) { 11661 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11662 tiwin, thflags, nxt_pkt)); 11663 11664 } else if (tp->t_flags & TF_ACKNOW) { 11665 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11666 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11667 return (ret_val); 11668 } else { 11669 ctf_do_drop(m, NULL); 11670 return (0); 11671 } 11672 } 11673 /* 11674 * Ack processing. 11675 */ 11676 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11677 return (ret_val); 11678 } 11679 if (sbavail(&so->so_snd)) { 11680 if (ctf_progress_timeout_check(tp, true)) { 11681 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11682 tp, tick, PROGRESS_DROP, __LINE__); 11683 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11684 return (1); 11685 } 11686 } 11687 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11688 tiwin, thflags, nxt_pkt)); 11689 } 11690 11691 static int 11692 rack_check_data_after_close(struct mbuf *m, 11693 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11694 { 11695 struct tcp_rack *rack; 11696 11697 rack = (struct tcp_rack *)tp->t_fb_ptr; 11698 if (rack->rc_allow_data_af_clo == 0) { 11699 close_now: 11700 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11701 /* tcp_close will kill the inp pre-log the Reset */ 11702 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11703 tp = tcp_close(tp); 11704 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11705 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11706 return (1); 11707 } 11708 if (sbavail(&so->so_snd) == 0) 11709 goto close_now; 11710 /* Ok we allow data that is ignored and a followup reset */ 11711 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11712 tp->rcv_nxt = th->th_seq + *tlen; 11713 tp->t_flags2 |= TF2_DROP_AF_DATA; 11714 rack->r_wanted_output = 1; 11715 *tlen = 0; 11716 return (0); 11717 } 11718 11719 /* 11720 * Return value of 1, the TCB is unlocked and most 11721 * likely gone, return value of 0, the TCP is still 11722 * locked. 11723 */ 11724 static int 11725 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11726 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11727 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11728 { 11729 int32_t ret_val = 0; 11730 int32_t ourfinisacked = 0; 11731 struct tcp_rack *rack; 11732 11733 rack = (struct tcp_rack *)tp->t_fb_ptr; 11734 ctf_calc_rwin(so, tp); 11735 11736 if ((thflags & TH_RST) || 11737 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11738 return (__ctf_process_rst(m, th, so, tp, 11739 &rack->r_ctl.challenge_ack_ts, 11740 &rack->r_ctl.challenge_ack_cnt)); 11741 /* 11742 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11743 * synchronized state. 11744 */ 11745 if (thflags & TH_SYN) { 11746 ctf_challenge_ack(m, th, tp, &ret_val); 11747 return (ret_val); 11748 } 11749 /* 11750 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11751 * it's less than ts_recent, drop it. 11752 */ 11753 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11754 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11755 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11756 return (ret_val); 11757 } 11758 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11759 &rack->r_ctl.challenge_ack_ts, 11760 &rack->r_ctl.challenge_ack_cnt)) { 11761 return (ret_val); 11762 } 11763 /* 11764 * If new data are received on a connection after the user processes 11765 * are gone, then RST the other end. 11766 */ 11767 if ((so->so_state & SS_NOFDREF) && tlen) { 11768 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11769 return (1); 11770 } 11771 /* 11772 * If last ACK falls within this segment's sequence numbers, record 11773 * its timestamp. NOTE: 1) That the test incorporates suggestions 11774 * from the latest proposal of the tcplw@cray.com list (Braden 11775 * 1993/04/26). 2) That updating only on newer timestamps interferes 11776 * with our earlier PAWS tests, so this check should be solely 11777 * predicated on the sequence space of this segment. 3) That we 11778 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11779 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11780 * SEG.Len, This modified check allows us to overcome RFC1323's 11781 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11782 * p.869. In such cases, we can still calculate the RTT correctly 11783 * when RCV.NXT == Last.ACK.Sent. 11784 */ 11785 if ((to->to_flags & TOF_TS) != 0 && 11786 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11787 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11788 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11789 tp->ts_recent_age = tcp_ts_getticks(); 11790 tp->ts_recent = to->to_tsval; 11791 } 11792 /* 11793 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11794 * is on (half-synchronized state), then queue data for later 11795 * processing; else drop segment and return. 11796 */ 11797 if ((thflags & TH_ACK) == 0) { 11798 if (tp->t_flags & TF_NEEDSYN) { 11799 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11800 tiwin, thflags, nxt_pkt)); 11801 } else if (tp->t_flags & TF_ACKNOW) { 11802 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11803 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11804 return (ret_val); 11805 } else { 11806 ctf_do_drop(m, NULL); 11807 return (0); 11808 } 11809 } 11810 /* 11811 * Ack processing. 11812 */ 11813 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11814 return (ret_val); 11815 } 11816 if (ourfinisacked) { 11817 /* 11818 * If we can't receive any more data, then closing user can 11819 * proceed. Starting the timer is contrary to the 11820 * specification, but if we don't get a FIN we'll hang 11821 * forever. 11822 * 11823 * XXXjl: we should release the tp also, and use a 11824 * compressed state. 11825 */ 11826 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11827 soisdisconnected(so); 11828 tcp_timer_activate(tp, TT_2MSL, 11829 (tcp_fast_finwait2_recycle ? 11830 tcp_finwait2_timeout : 11831 TP_MAXIDLE(tp))); 11832 } 11833 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11834 } 11835 if (sbavail(&so->so_snd)) { 11836 if (ctf_progress_timeout_check(tp, true)) { 11837 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11838 tp, tick, PROGRESS_DROP, __LINE__); 11839 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11840 return (1); 11841 } 11842 } 11843 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11844 tiwin, thflags, nxt_pkt)); 11845 } 11846 11847 /* 11848 * Return value of 1, the TCB is unlocked and most 11849 * likely gone, return value of 0, the TCP is still 11850 * locked. 11851 */ 11852 static int 11853 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 11854 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11855 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11856 { 11857 int32_t ret_val = 0; 11858 int32_t ourfinisacked = 0; 11859 struct tcp_rack *rack; 11860 11861 rack = (struct tcp_rack *)tp->t_fb_ptr; 11862 ctf_calc_rwin(so, tp); 11863 11864 if ((thflags & TH_RST) || 11865 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11866 return (__ctf_process_rst(m, th, so, tp, 11867 &rack->r_ctl.challenge_ack_ts, 11868 &rack->r_ctl.challenge_ack_cnt)); 11869 /* 11870 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11871 * synchronized state. 11872 */ 11873 if (thflags & TH_SYN) { 11874 ctf_challenge_ack(m, th, tp, &ret_val); 11875 return (ret_val); 11876 } 11877 /* 11878 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11879 * it's less than ts_recent, drop it. 11880 */ 11881 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11882 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11883 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11884 return (ret_val); 11885 } 11886 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11887 &rack->r_ctl.challenge_ack_ts, 11888 &rack->r_ctl.challenge_ack_cnt)) { 11889 return (ret_val); 11890 } 11891 /* 11892 * If new data are received on a connection after the user processes 11893 * are gone, then RST the other end. 11894 */ 11895 if ((so->so_state & SS_NOFDREF) && tlen) { 11896 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11897 return (1); 11898 } 11899 /* 11900 * If last ACK falls within this segment's sequence numbers, record 11901 * its timestamp. NOTE: 1) That the test incorporates suggestions 11902 * from the latest proposal of the tcplw@cray.com list (Braden 11903 * 1993/04/26). 2) That updating only on newer timestamps interferes 11904 * with our earlier PAWS tests, so this check should be solely 11905 * predicated on the sequence space of this segment. 3) That we 11906 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11907 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11908 * SEG.Len, This modified check allows us to overcome RFC1323's 11909 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11910 * p.869. In such cases, we can still calculate the RTT correctly 11911 * when RCV.NXT == Last.ACK.Sent. 11912 */ 11913 if ((to->to_flags & TOF_TS) != 0 && 11914 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11915 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11916 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11917 tp->ts_recent_age = tcp_ts_getticks(); 11918 tp->ts_recent = to->to_tsval; 11919 } 11920 /* 11921 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11922 * is on (half-synchronized state), then queue data for later 11923 * processing; else drop segment and return. 11924 */ 11925 if ((thflags & TH_ACK) == 0) { 11926 if (tp->t_flags & TF_NEEDSYN) { 11927 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11928 tiwin, thflags, nxt_pkt)); 11929 } else if (tp->t_flags & TF_ACKNOW) { 11930 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11931 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11932 return (ret_val); 11933 } else { 11934 ctf_do_drop(m, NULL); 11935 return (0); 11936 } 11937 } 11938 /* 11939 * Ack processing. 11940 */ 11941 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11942 return (ret_val); 11943 } 11944 if (ourfinisacked) { 11945 tcp_twstart(tp); 11946 m_freem(m); 11947 return (1); 11948 } 11949 if (sbavail(&so->so_snd)) { 11950 if (ctf_progress_timeout_check(tp, true)) { 11951 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11952 tp, tick, PROGRESS_DROP, __LINE__); 11953 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11954 return (1); 11955 } 11956 } 11957 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11958 tiwin, thflags, nxt_pkt)); 11959 } 11960 11961 /* 11962 * Return value of 1, the TCB is unlocked and most 11963 * likely gone, return value of 0, the TCP is still 11964 * locked. 11965 */ 11966 static int 11967 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11968 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11969 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11970 { 11971 int32_t ret_val = 0; 11972 int32_t ourfinisacked = 0; 11973 struct tcp_rack *rack; 11974 11975 rack = (struct tcp_rack *)tp->t_fb_ptr; 11976 ctf_calc_rwin(so, tp); 11977 11978 if ((thflags & TH_RST) || 11979 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11980 return (__ctf_process_rst(m, th, so, tp, 11981 &rack->r_ctl.challenge_ack_ts, 11982 &rack->r_ctl.challenge_ack_cnt)); 11983 /* 11984 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11985 * synchronized state. 11986 */ 11987 if (thflags & TH_SYN) { 11988 ctf_challenge_ack(m, th, tp, &ret_val); 11989 return (ret_val); 11990 } 11991 /* 11992 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11993 * it's less than ts_recent, drop it. 11994 */ 11995 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11996 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11997 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11998 return (ret_val); 11999 } 12000 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12001 &rack->r_ctl.challenge_ack_ts, 12002 &rack->r_ctl.challenge_ack_cnt)) { 12003 return (ret_val); 12004 } 12005 /* 12006 * If new data are received on a connection after the user processes 12007 * are gone, then RST the other end. 12008 */ 12009 if ((so->so_state & SS_NOFDREF) && tlen) { 12010 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12011 return (1); 12012 } 12013 /* 12014 * If last ACK falls within this segment's sequence numbers, record 12015 * its timestamp. NOTE: 1) That the test incorporates suggestions 12016 * from the latest proposal of the tcplw@cray.com list (Braden 12017 * 1993/04/26). 2) That updating only on newer timestamps interferes 12018 * with our earlier PAWS tests, so this check should be solely 12019 * predicated on the sequence space of this segment. 3) That we 12020 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12021 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12022 * SEG.Len, This modified check allows us to overcome RFC1323's 12023 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12024 * p.869. In such cases, we can still calculate the RTT correctly 12025 * when RCV.NXT == Last.ACK.Sent. 12026 */ 12027 if ((to->to_flags & TOF_TS) != 0 && 12028 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12029 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12030 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12031 tp->ts_recent_age = tcp_ts_getticks(); 12032 tp->ts_recent = to->to_tsval; 12033 } 12034 /* 12035 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12036 * is on (half-synchronized state), then queue data for later 12037 * processing; else drop segment and return. 12038 */ 12039 if ((thflags & TH_ACK) == 0) { 12040 if (tp->t_flags & TF_NEEDSYN) { 12041 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12042 tiwin, thflags, nxt_pkt)); 12043 } else if (tp->t_flags & TF_ACKNOW) { 12044 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12045 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12046 return (ret_val); 12047 } else { 12048 ctf_do_drop(m, NULL); 12049 return (0); 12050 } 12051 } 12052 /* 12053 * case TCPS_LAST_ACK: Ack processing. 12054 */ 12055 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12056 return (ret_val); 12057 } 12058 if (ourfinisacked) { 12059 tp = tcp_close(tp); 12060 ctf_do_drop(m, tp); 12061 return (1); 12062 } 12063 if (sbavail(&so->so_snd)) { 12064 if (ctf_progress_timeout_check(tp, true)) { 12065 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12066 tp, tick, PROGRESS_DROP, __LINE__); 12067 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12068 return (1); 12069 } 12070 } 12071 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12072 tiwin, thflags, nxt_pkt)); 12073 } 12074 12075 /* 12076 * Return value of 1, the TCB is unlocked and most 12077 * likely gone, return value of 0, the TCP is still 12078 * locked. 12079 */ 12080 static int 12081 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12082 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12083 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12084 { 12085 int32_t ret_val = 0; 12086 int32_t ourfinisacked = 0; 12087 struct tcp_rack *rack; 12088 12089 rack = (struct tcp_rack *)tp->t_fb_ptr; 12090 ctf_calc_rwin(so, tp); 12091 12092 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12093 if ((thflags & TH_RST) || 12094 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12095 return (__ctf_process_rst(m, th, so, tp, 12096 &rack->r_ctl.challenge_ack_ts, 12097 &rack->r_ctl.challenge_ack_cnt)); 12098 /* 12099 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12100 * synchronized state. 12101 */ 12102 if (thflags & TH_SYN) { 12103 ctf_challenge_ack(m, th, tp, &ret_val); 12104 return (ret_val); 12105 } 12106 /* 12107 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12108 * it's less than ts_recent, drop it. 12109 */ 12110 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12111 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12112 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12113 return (ret_val); 12114 } 12115 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12116 &rack->r_ctl.challenge_ack_ts, 12117 &rack->r_ctl.challenge_ack_cnt)) { 12118 return (ret_val); 12119 } 12120 /* 12121 * If new data are received on a connection after the user processes 12122 * are gone, then RST the other end. 12123 */ 12124 if ((so->so_state & SS_NOFDREF) && 12125 tlen) { 12126 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12127 return (1); 12128 } 12129 /* 12130 * If last ACK falls within this segment's sequence numbers, record 12131 * its timestamp. NOTE: 1) That the test incorporates suggestions 12132 * from the latest proposal of the tcplw@cray.com list (Braden 12133 * 1993/04/26). 2) That updating only on newer timestamps interferes 12134 * with our earlier PAWS tests, so this check should be solely 12135 * predicated on the sequence space of this segment. 3) That we 12136 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12137 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12138 * SEG.Len, This modified check allows us to overcome RFC1323's 12139 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12140 * p.869. In such cases, we can still calculate the RTT correctly 12141 * when RCV.NXT == Last.ACK.Sent. 12142 */ 12143 if ((to->to_flags & TOF_TS) != 0 && 12144 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12145 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12146 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12147 tp->ts_recent_age = tcp_ts_getticks(); 12148 tp->ts_recent = to->to_tsval; 12149 } 12150 /* 12151 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12152 * is on (half-synchronized state), then queue data for later 12153 * processing; else drop segment and return. 12154 */ 12155 if ((thflags & TH_ACK) == 0) { 12156 if (tp->t_flags & TF_NEEDSYN) { 12157 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12158 tiwin, thflags, nxt_pkt)); 12159 } else if (tp->t_flags & TF_ACKNOW) { 12160 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12161 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12162 return (ret_val); 12163 } else { 12164 ctf_do_drop(m, NULL); 12165 return (0); 12166 } 12167 } 12168 /* 12169 * Ack processing. 12170 */ 12171 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12172 return (ret_val); 12173 } 12174 if (sbavail(&so->so_snd)) { 12175 if (ctf_progress_timeout_check(tp, true)) { 12176 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12177 tp, tick, PROGRESS_DROP, __LINE__); 12178 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12179 return (1); 12180 } 12181 } 12182 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12183 tiwin, thflags, nxt_pkt)); 12184 } 12185 12186 static void inline 12187 rack_clear_rate_sample(struct tcp_rack *rack) 12188 { 12189 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12190 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12191 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12192 } 12193 12194 static void 12195 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12196 { 12197 uint64_t bw_est, rate_wanted; 12198 int chged = 0; 12199 uint32_t user_max, orig_min, orig_max; 12200 12201 orig_min = rack->r_ctl.rc_pace_min_segs; 12202 orig_max = rack->r_ctl.rc_pace_max_segs; 12203 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12204 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12205 chged = 1; 12206 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12207 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12208 if (user_max != rack->r_ctl.rc_pace_max_segs) 12209 chged = 1; 12210 } 12211 if (rack->rc_force_max_seg) { 12212 rack->r_ctl.rc_pace_max_segs = user_max; 12213 } else if (rack->use_fixed_rate) { 12214 bw_est = rack_get_bw(rack); 12215 if ((rack->r_ctl.crte == NULL) || 12216 (bw_est != rack->r_ctl.crte->rate)) { 12217 rack->r_ctl.rc_pace_max_segs = user_max; 12218 } else { 12219 /* We are pacing right at the hardware rate */ 12220 uint32_t segsiz; 12221 12222 segsiz = min(ctf_fixed_maxseg(tp), 12223 rack->r_ctl.rc_pace_min_segs); 12224 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12225 tp, bw_est, segsiz, 0, 12226 rack->r_ctl.crte, NULL); 12227 } 12228 } else if (rack->rc_always_pace) { 12229 if (rack->r_ctl.gp_bw || 12230 #ifdef NETFLIX_PEAKRATE 12231 rack->rc_tp->t_maxpeakrate || 12232 #endif 12233 rack->r_ctl.init_rate) { 12234 /* We have a rate of some sort set */ 12235 uint32_t orig; 12236 12237 bw_est = rack_get_bw(rack); 12238 orig = rack->r_ctl.rc_pace_max_segs; 12239 if (fill_override) 12240 rate_wanted = *fill_override; 12241 else 12242 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12243 if (rate_wanted) { 12244 /* We have something */ 12245 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12246 rate_wanted, 12247 ctf_fixed_maxseg(rack->rc_tp)); 12248 } else 12249 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12250 if (orig != rack->r_ctl.rc_pace_max_segs) 12251 chged = 1; 12252 } else if ((rack->r_ctl.gp_bw == 0) && 12253 (rack->r_ctl.rc_pace_max_segs == 0)) { 12254 /* 12255 * If we have nothing limit us to bursting 12256 * out IW sized pieces. 12257 */ 12258 chged = 1; 12259 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12260 } 12261 } 12262 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12263 chged = 1; 12264 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12265 } 12266 if (chged) 12267 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12268 } 12269 12270 12271 static void 12272 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12273 { 12274 #ifdef INET6 12275 struct ip6_hdr *ip6 = NULL; 12276 #endif 12277 #ifdef INET 12278 struct ip *ip = NULL; 12279 #endif 12280 struct udphdr *udp = NULL; 12281 12282 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12283 #ifdef INET6 12284 if (rack->r_is_v6) { 12285 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12286 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12287 if (tp->t_port) { 12288 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12289 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12290 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12291 udp->uh_dport = tp->t_port; 12292 rack->r_ctl.fsb.udp = udp; 12293 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12294 } else 12295 { 12296 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12297 rack->r_ctl.fsb.udp = NULL; 12298 } 12299 tcpip_fillheaders(rack->rc_inp, 12300 tp->t_port, 12301 ip6, rack->r_ctl.fsb.th); 12302 } else 12303 #endif /* INET6 */ 12304 { 12305 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12306 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12307 if (tp->t_port) { 12308 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12309 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12310 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12311 udp->uh_dport = tp->t_port; 12312 rack->r_ctl.fsb.udp = udp; 12313 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12314 } else 12315 { 12316 rack->r_ctl.fsb.udp = NULL; 12317 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12318 } 12319 tcpip_fillheaders(rack->rc_inp, 12320 tp->t_port, 12321 ip, rack->r_ctl.fsb.th); 12322 } 12323 rack->r_fsb_inited = 1; 12324 } 12325 12326 static int 12327 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12328 { 12329 /* 12330 * Allocate the larger of spaces V6 if available else just 12331 * V4 and include udphdr (overbook) 12332 */ 12333 #ifdef INET6 12334 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12335 #else 12336 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12337 #endif 12338 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12339 M_TCPFSB, M_NOWAIT|M_ZERO); 12340 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12341 return (ENOMEM); 12342 } 12343 rack->r_fsb_inited = 0; 12344 return (0); 12345 } 12346 12347 static int 12348 rack_init(struct tcpcb *tp) 12349 { 12350 struct tcp_rack *rack = NULL; 12351 #ifdef INVARIANTS 12352 struct rack_sendmap *insret; 12353 #endif 12354 uint32_t iwin, snt, us_cts; 12355 int err; 12356 12357 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12358 if (tp->t_fb_ptr == NULL) { 12359 /* 12360 * We need to allocate memory but cant. The INP and INP_INFO 12361 * locks and they are recusive (happens during setup. So a 12362 * scheme to drop the locks fails :( 12363 * 12364 */ 12365 return (ENOMEM); 12366 } 12367 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12368 12369 rack = (struct tcp_rack *)tp->t_fb_ptr; 12370 RB_INIT(&rack->r_ctl.rc_mtree); 12371 TAILQ_INIT(&rack->r_ctl.rc_free); 12372 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12373 rack->rc_tp = tp; 12374 rack->rc_inp = tp->t_inpcb; 12375 /* Set the flag */ 12376 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12377 /* Probably not needed but lets be sure */ 12378 rack_clear_rate_sample(rack); 12379 /* 12380 * Save off the default values, socket options will poke 12381 * at these if pacing is not on or we have not yet 12382 * reached where pacing is on (gp_ready/fixed enabled). 12383 * When they get set into the CC module (when gp_ready 12384 * is enabled or we enable fixed) then we will set these 12385 * values into the CC and place in here the old values 12386 * so we have a restoral. Then we will set the flag 12387 * rc_pacing_cc_set. That way whenever we turn off pacing 12388 * or switch off this stack, we will know to go restore 12389 * the saved values. 12390 */ 12391 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12392 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12393 /* We want abe like behavior as well */ 12394 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12395 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12396 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12397 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12398 rack->r_ctl.roundends = tp->snd_max; 12399 if (use_rack_rr) 12400 rack->use_rack_rr = 1; 12401 if (V_tcp_delack_enabled) 12402 tp->t_delayed_ack = 1; 12403 else 12404 tp->t_delayed_ack = 0; 12405 #ifdef TCP_ACCOUNTING 12406 if (rack_tcp_accounting) { 12407 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12408 } 12409 #endif 12410 if (rack_enable_shared_cwnd) 12411 rack->rack_enable_scwnd = 1; 12412 rack->rc_user_set_max_segs = rack_hptsi_segments; 12413 rack->rc_force_max_seg = 0; 12414 if (rack_use_imac_dack) 12415 rack->rc_dack_mode = 1; 12416 TAILQ_INIT(&rack->r_ctl.opt_list); 12417 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12418 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12419 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12420 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12421 rack->r_ctl.rc_highest_us_rtt = 0; 12422 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12423 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12424 if (rack_use_cmp_acks) 12425 rack->r_use_cmp_ack = 1; 12426 if (rack_disable_prr) 12427 rack->rack_no_prr = 1; 12428 if (rack_gp_no_rec_chg) 12429 rack->rc_gp_no_rec_chg = 1; 12430 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12431 rack->rc_always_pace = 1; 12432 if (rack->use_fixed_rate || rack->gp_ready) 12433 rack_set_cc_pacing(rack); 12434 } else 12435 rack->rc_always_pace = 0; 12436 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12437 rack->r_mbuf_queue = 1; 12438 else 12439 rack->r_mbuf_queue = 0; 12440 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12441 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12442 else 12443 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12444 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12445 if (rack_limits_scwnd) 12446 rack->r_limit_scw = 1; 12447 else 12448 rack->r_limit_scw = 0; 12449 rack->rc_labc = V_tcp_abc_l_var; 12450 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12451 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12452 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12453 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12454 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12455 rack->r_ctl.rc_min_to = rack_min_to; 12456 microuptime(&rack->r_ctl.act_rcv_time); 12457 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12458 rack->rc_init_win = rack_default_init_window; 12459 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12460 if (rack_hw_up_only) 12461 rack->r_up_only = 1; 12462 if (rack_do_dyn_mul) { 12463 /* When dynamic adjustment is on CA needs to start at 100% */ 12464 rack->rc_gp_dyn_mul = 1; 12465 if (rack_do_dyn_mul >= 100) 12466 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12467 } else 12468 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12469 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12470 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12471 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12472 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12473 rack_probertt_filter_life); 12474 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12475 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12476 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12477 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12478 rack->r_ctl.rc_time_probertt_starts = 0; 12479 if (rack_dsack_std_based & 0x1) { 12480 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12481 rack->rc_rack_tmr_std_based = 1; 12482 } 12483 if (rack_dsack_std_based & 0x2) { 12484 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12485 rack->rc_rack_use_dsack = 1; 12486 } 12487 /* We require at least one measurement, even if the sysctl is 0 */ 12488 if (rack_req_measurements) 12489 rack->r_ctl.req_measurements = rack_req_measurements; 12490 else 12491 rack->r_ctl.req_measurements = 1; 12492 if (rack_enable_hw_pacing) 12493 rack->rack_hdw_pace_ena = 1; 12494 if (rack_hw_rate_caps) 12495 rack->r_rack_hw_rate_caps = 1; 12496 /* Do we force on detection? */ 12497 #ifdef NETFLIX_EXP_DETECTION 12498 if (tcp_force_detection) 12499 rack->do_detection = 1; 12500 else 12501 #endif 12502 rack->do_detection = 0; 12503 if (rack_non_rxt_use_cr) 12504 rack->rack_rec_nonrxt_use_cr = 1; 12505 err = rack_init_fsb(tp, rack); 12506 if (err) { 12507 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12508 tp->t_fb_ptr = NULL; 12509 return (err); 12510 } 12511 if (tp->snd_una != tp->snd_max) { 12512 /* Create a send map for the current outstanding data */ 12513 struct rack_sendmap *rsm; 12514 12515 rsm = rack_alloc(rack); 12516 if (rsm == NULL) { 12517 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12518 tp->t_fb_ptr = NULL; 12519 return (ENOMEM); 12520 } 12521 rsm->r_no_rtt_allowed = 1; 12522 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12523 rsm->r_rtr_cnt = 1; 12524 rsm->r_rtr_bytes = 0; 12525 if (tp->t_flags & TF_SENTFIN) { 12526 rsm->r_end = tp->snd_max - 1; 12527 rsm->r_flags |= RACK_HAS_FIN; 12528 } else { 12529 rsm->r_end = tp->snd_max; 12530 } 12531 if (tp->snd_una == tp->iss) { 12532 /* The data space is one beyond snd_una */ 12533 rsm->r_flags |= RACK_HAS_SYN; 12534 rsm->r_start = tp->iss; 12535 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12536 } else 12537 rsm->r_start = tp->snd_una; 12538 rsm->r_dupack = 0; 12539 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12540 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12541 if (rsm->m) 12542 rsm->orig_m_len = rsm->m->m_len; 12543 else 12544 rsm->orig_m_len = 0; 12545 } else { 12546 /* 12547 * This can happen if we have a stand-alone FIN or 12548 * SYN. 12549 */ 12550 rsm->m = NULL; 12551 rsm->orig_m_len = 0; 12552 rsm->soff = 0; 12553 } 12554 #ifndef INVARIANTS 12555 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12556 #else 12557 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12558 if (insret != NULL) { 12559 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12560 insret, rack, rsm); 12561 } 12562 #endif 12563 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12564 rsm->r_in_tmap = 1; 12565 } 12566 /* 12567 * Timers in Rack are kept in microseconds so lets 12568 * convert any initial incoming variables 12569 * from ticks into usecs. Note that we 12570 * also change the values of t_srtt and t_rttvar, if 12571 * they are non-zero. They are kept with a 5 12572 * bit decimal so we have to carefully convert 12573 * these to get the full precision. 12574 */ 12575 rack_convert_rtts(tp); 12576 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12577 if (rack_do_hystart) { 12578 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 12579 if (rack_do_hystart > 1) 12580 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 12581 if (rack_do_hystart > 2) 12582 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 12583 } 12584 if (rack_def_profile) 12585 rack_set_profile(rack, rack_def_profile); 12586 /* Cancel the GP measurement in progress */ 12587 tp->t_flags &= ~TF_GPUTINPROG; 12588 if (SEQ_GT(tp->snd_max, tp->iss)) 12589 snt = tp->snd_max - tp->iss; 12590 else 12591 snt = 0; 12592 iwin = rc_init_window(rack); 12593 if (snt < iwin) { 12594 /* We are not past the initial window 12595 * so we need to make sure cwnd is 12596 * correct. 12597 */ 12598 if (tp->snd_cwnd < iwin) 12599 tp->snd_cwnd = iwin; 12600 /* 12601 * If we are within the initial window 12602 * we want ssthresh to be unlimited. Setting 12603 * it to the rwnd (which the default stack does 12604 * and older racks) is not really a good idea 12605 * since we want to be in SS and grow both the 12606 * cwnd and the rwnd (via dynamic rwnd growth). If 12607 * we set it to the rwnd then as the peer grows its 12608 * rwnd we will be stuck in CA and never hit SS. 12609 * 12610 * Its far better to raise it up high (this takes the 12611 * risk that there as been a loss already, probably 12612 * we should have an indicator in all stacks of loss 12613 * but we don't), but considering the normal use this 12614 * is a risk worth taking. The consequences of not 12615 * hitting SS are far worse than going one more time 12616 * into it early on (before we have sent even a IW). 12617 * It is highly unlikely that we will have had a loss 12618 * before getting the IW out. 12619 */ 12620 tp->snd_ssthresh = 0xffffffff; 12621 } 12622 rack_stop_all_timers(tp); 12623 /* Lets setup the fsb block */ 12624 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12625 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12626 __LINE__, RACK_RTTS_INIT); 12627 return (0); 12628 } 12629 12630 static int 12631 rack_handoff_ok(struct tcpcb *tp) 12632 { 12633 if ((tp->t_state == TCPS_CLOSED) || 12634 (tp->t_state == TCPS_LISTEN)) { 12635 /* Sure no problem though it may not stick */ 12636 return (0); 12637 } 12638 if ((tp->t_state == TCPS_SYN_SENT) || 12639 (tp->t_state == TCPS_SYN_RECEIVED)) { 12640 /* 12641 * We really don't know if you support sack, 12642 * you have to get to ESTAB or beyond to tell. 12643 */ 12644 return (EAGAIN); 12645 } 12646 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12647 /* 12648 * Rack will only send a FIN after all data is acknowledged. 12649 * So in this case we have more data outstanding. We can't 12650 * switch stacks until either all data and only the FIN 12651 * is left (in which case rack_init() now knows how 12652 * to deal with that) <or> all is acknowledged and we 12653 * are only left with incoming data, though why you 12654 * would want to switch to rack after all data is acknowledged 12655 * I have no idea (rrs)! 12656 */ 12657 return (EAGAIN); 12658 } 12659 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12660 return (0); 12661 } 12662 /* 12663 * If we reach here we don't do SACK on this connection so we can 12664 * never do rack. 12665 */ 12666 return (EINVAL); 12667 } 12668 12669 12670 static void 12671 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12672 { 12673 if (tp->t_fb_ptr) { 12674 struct tcp_rack *rack; 12675 struct rack_sendmap *rsm, *nrsm; 12676 #ifdef INVARIANTS 12677 struct rack_sendmap *rm; 12678 #endif 12679 12680 rack = (struct tcp_rack *)tp->t_fb_ptr; 12681 if (tp->t_in_pkt) { 12682 /* 12683 * It is unsafe to process the packets since a 12684 * reset may be lurking in them (its rare but it 12685 * can occur). If we were to find a RST, then we 12686 * would end up dropping the connection and the 12687 * INP lock, so when we return the caller (tcp_usrreq) 12688 * will blow up when it trys to unlock the inp. 12689 */ 12690 struct mbuf *save, *m; 12691 12692 m = tp->t_in_pkt; 12693 tp->t_in_pkt = NULL; 12694 tp->t_tail_pkt = NULL; 12695 while (m) { 12696 save = m->m_nextpkt; 12697 m->m_nextpkt = NULL; 12698 m_freem(m); 12699 m = save; 12700 } 12701 } 12702 tp->t_flags &= ~TF_FORCEDATA; 12703 #ifdef NETFLIX_SHARED_CWND 12704 if (rack->r_ctl.rc_scw) { 12705 uint32_t limit; 12706 12707 if (rack->r_limit_scw) 12708 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12709 else 12710 limit = 0; 12711 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12712 rack->r_ctl.rc_scw_index, 12713 limit); 12714 rack->r_ctl.rc_scw = NULL; 12715 } 12716 #endif 12717 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12718 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12719 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12720 rack->r_ctl.fsb.th = NULL; 12721 } 12722 /* Convert back to ticks, with */ 12723 if (tp->t_srtt > 1) { 12724 uint32_t val, frac; 12725 12726 val = USEC_2_TICKS(tp->t_srtt); 12727 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12728 tp->t_srtt = val << TCP_RTT_SHIFT; 12729 /* 12730 * frac is the fractional part here is left 12731 * over from converting to hz and shifting. 12732 * We need to convert this to the 5 bit 12733 * remainder. 12734 */ 12735 if (frac) { 12736 if (hz == 1000) { 12737 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12738 } else { 12739 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12740 } 12741 tp->t_srtt += frac; 12742 } 12743 } 12744 if (tp->t_rttvar) { 12745 uint32_t val, frac; 12746 12747 val = USEC_2_TICKS(tp->t_rttvar); 12748 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12749 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12750 /* 12751 * frac is the fractional part here is left 12752 * over from converting to hz and shifting. 12753 * We need to convert this to the 5 bit 12754 * remainder. 12755 */ 12756 if (frac) { 12757 if (hz == 1000) { 12758 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12759 } else { 12760 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12761 } 12762 tp->t_rttvar += frac; 12763 } 12764 } 12765 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12766 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12767 if (rack->rc_always_pace) { 12768 tcp_decrement_paced_conn(); 12769 rack_undo_cc_pacing(rack); 12770 rack->rc_always_pace = 0; 12771 } 12772 /* Clean up any options if they were not applied */ 12773 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12774 struct deferred_opt_list *dol; 12775 12776 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12777 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12778 free(dol, M_TCPDO); 12779 } 12780 /* rack does not use force data but other stacks may clear it */ 12781 if (rack->r_ctl.crte != NULL) { 12782 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12783 rack->rack_hdrw_pacing = 0; 12784 rack->r_ctl.crte = NULL; 12785 } 12786 #ifdef TCP_BLACKBOX 12787 tcp_log_flowend(tp); 12788 #endif 12789 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12790 #ifndef INVARIANTS 12791 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12792 #else 12793 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12794 if (rm != rsm) { 12795 panic("At fini, rack:%p rsm:%p rm:%p", 12796 rack, rsm, rm); 12797 } 12798 #endif 12799 uma_zfree(rack_zone, rsm); 12800 } 12801 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12802 while (rsm) { 12803 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12804 uma_zfree(rack_zone, rsm); 12805 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12806 } 12807 rack->rc_free_cnt = 0; 12808 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12809 tp->t_fb_ptr = NULL; 12810 } 12811 if (tp->t_inpcb) { 12812 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12813 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12814 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12815 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 12816 /* Cancel the GP measurement in progress */ 12817 tp->t_flags &= ~TF_GPUTINPROG; 12818 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 12819 } 12820 /* Make sure snd_nxt is correctly set */ 12821 tp->snd_nxt = tp->snd_max; 12822 } 12823 12824 static void 12825 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12826 { 12827 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12828 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12829 } 12830 switch (tp->t_state) { 12831 case TCPS_SYN_SENT: 12832 rack->r_state = TCPS_SYN_SENT; 12833 rack->r_substate = rack_do_syn_sent; 12834 break; 12835 case TCPS_SYN_RECEIVED: 12836 rack->r_state = TCPS_SYN_RECEIVED; 12837 rack->r_substate = rack_do_syn_recv; 12838 break; 12839 case TCPS_ESTABLISHED: 12840 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12841 rack->r_state = TCPS_ESTABLISHED; 12842 rack->r_substate = rack_do_established; 12843 break; 12844 case TCPS_CLOSE_WAIT: 12845 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12846 rack->r_state = TCPS_CLOSE_WAIT; 12847 rack->r_substate = rack_do_close_wait; 12848 break; 12849 case TCPS_FIN_WAIT_1: 12850 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12851 rack->r_state = TCPS_FIN_WAIT_1; 12852 rack->r_substate = rack_do_fin_wait_1; 12853 break; 12854 case TCPS_CLOSING: 12855 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12856 rack->r_state = TCPS_CLOSING; 12857 rack->r_substate = rack_do_closing; 12858 break; 12859 case TCPS_LAST_ACK: 12860 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12861 rack->r_state = TCPS_LAST_ACK; 12862 rack->r_substate = rack_do_lastack; 12863 break; 12864 case TCPS_FIN_WAIT_2: 12865 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12866 rack->r_state = TCPS_FIN_WAIT_2; 12867 rack->r_substate = rack_do_fin_wait_2; 12868 break; 12869 case TCPS_LISTEN: 12870 case TCPS_CLOSED: 12871 case TCPS_TIME_WAIT: 12872 default: 12873 break; 12874 }; 12875 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 12876 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 12877 12878 } 12879 12880 static void 12881 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 12882 { 12883 /* 12884 * We received an ack, and then did not 12885 * call send or were bounced out due to the 12886 * hpts was running. Now a timer is up as well, is 12887 * it the right timer? 12888 */ 12889 struct rack_sendmap *rsm; 12890 int tmr_up; 12891 12892 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 12893 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 12894 return; 12895 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12896 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 12897 (tmr_up == PACE_TMR_RXT)) { 12898 /* Should be an RXT */ 12899 return; 12900 } 12901 if (rsm == NULL) { 12902 /* Nothing outstanding? */ 12903 if (tp->t_flags & TF_DELACK) { 12904 if (tmr_up == PACE_TMR_DELACK) 12905 /* We are supposed to have delayed ack up and we do */ 12906 return; 12907 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 12908 /* 12909 * if we hit enobufs then we would expect the possiblity 12910 * of nothing outstanding and the RXT up (and the hptsi timer). 12911 */ 12912 return; 12913 } else if (((V_tcp_always_keepalive || 12914 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 12915 (tp->t_state <= TCPS_CLOSING)) && 12916 (tmr_up == PACE_TMR_KEEP) && 12917 (tp->snd_max == tp->snd_una)) { 12918 /* We should have keep alive up and we do */ 12919 return; 12920 } 12921 } 12922 if (SEQ_GT(tp->snd_max, tp->snd_una) && 12923 ((tmr_up == PACE_TMR_TLP) || 12924 (tmr_up == PACE_TMR_RACK) || 12925 (tmr_up == PACE_TMR_RXT))) { 12926 /* 12927 * Either a Rack, TLP or RXT is fine if we 12928 * have outstanding data. 12929 */ 12930 return; 12931 } else if (tmr_up == PACE_TMR_DELACK) { 12932 /* 12933 * If the delayed ack was going to go off 12934 * before the rtx/tlp/rack timer were going to 12935 * expire, then that would be the timer in control. 12936 * Note we don't check the time here trusting the 12937 * code is correct. 12938 */ 12939 return; 12940 } 12941 /* 12942 * Ok the timer originally started is not what we want now. 12943 * We will force the hpts to be stopped if any, and restart 12944 * with the slot set to what was in the saved slot. 12945 */ 12946 if (tcp_in_hpts(rack->rc_inp)) { 12947 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 12948 uint32_t us_cts; 12949 12950 us_cts = tcp_get_usecs(NULL); 12951 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 12952 rack->r_early = 1; 12953 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 12954 } 12955 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 12956 } 12957 tcp_hpts_remove(tp->t_inpcb); 12958 } 12959 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12960 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12961 } 12962 12963 12964 static void 12965 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 12966 { 12967 if ((SEQ_LT(tp->snd_wl1, seq) || 12968 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 12969 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 12970 /* keep track of pure window updates */ 12971 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 12972 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 12973 tp->snd_wnd = tiwin; 12974 rack_validate_fo_sendwin_up(tp, rack); 12975 tp->snd_wl1 = seq; 12976 tp->snd_wl2 = ack; 12977 if (tp->snd_wnd > tp->max_sndwnd) 12978 tp->max_sndwnd = tp->snd_wnd; 12979 rack->r_wanted_output = 1; 12980 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 12981 tp->snd_wnd = tiwin; 12982 rack_validate_fo_sendwin_up(tp, rack); 12983 tp->snd_wl1 = seq; 12984 tp->snd_wl2 = ack; 12985 } else { 12986 /* Not a valid win update */ 12987 return; 12988 } 12989 if (tp->snd_wnd > tp->max_sndwnd) 12990 tp->max_sndwnd = tp->snd_wnd; 12991 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 12992 /* The peer collapsed the window */ 12993 rack_collapsed_window(rack); 12994 } else if (rack->rc_has_collapsed) 12995 rack_un_collapse_window(rack); 12996 /* Do we exit persists? */ 12997 if ((rack->rc_in_persist != 0) && 12998 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12999 rack->r_ctl.rc_pace_min_segs))) { 13000 rack_exit_persist(tp, rack, cts); 13001 } 13002 /* Do we enter persists? */ 13003 if ((rack->rc_in_persist == 0) && 13004 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13005 TCPS_HAVEESTABLISHED(tp->t_state) && 13006 (tp->snd_max == tp->snd_una) && 13007 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13008 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13009 /* 13010 * Here the rwnd is less than 13011 * the pacing size, we are established, 13012 * nothing is outstanding, and there is 13013 * data to send. Enter persists. 13014 */ 13015 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13016 } 13017 } 13018 13019 static void 13020 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13021 { 13022 13023 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13024 union tcp_log_stackspecific log; 13025 struct timeval ltv; 13026 char tcp_hdr_buf[60]; 13027 struct tcphdr *th; 13028 struct timespec ts; 13029 uint32_t orig_snd_una; 13030 uint8_t xx = 0; 13031 13032 #ifdef NETFLIX_HTTP_LOGGING 13033 struct http_sendfile_track *http_req; 13034 13035 if (SEQ_GT(ae->ack, tp->snd_una)) { 13036 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13037 } else { 13038 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13039 } 13040 #endif 13041 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13042 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13043 if (rack->rack_no_prr == 0) 13044 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13045 else 13046 log.u_bbr.flex1 = 0; 13047 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13048 log.u_bbr.use_lt_bw <<= 1; 13049 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13050 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13051 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13052 log.u_bbr.pkts_out = tp->t_maxseg; 13053 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13054 log.u_bbr.flex7 = 1; 13055 log.u_bbr.lost = ae->flags; 13056 log.u_bbr.cwnd_gain = ackval; 13057 log.u_bbr.pacing_gain = 0x2; 13058 if (ae->flags & TSTMP_HDWR) { 13059 /* Record the hardware timestamp if present */ 13060 log.u_bbr.flex3 = M_TSTMP; 13061 ts.tv_sec = ae->timestamp / 1000000000; 13062 ts.tv_nsec = ae->timestamp % 1000000000; 13063 ltv.tv_sec = ts.tv_sec; 13064 ltv.tv_usec = ts.tv_nsec / 1000; 13065 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13066 } else if (ae->flags & TSTMP_LRO) { 13067 /* Record the LRO the arrival timestamp */ 13068 log.u_bbr.flex3 = M_TSTMP_LRO; 13069 ts.tv_sec = ae->timestamp / 1000000000; 13070 ts.tv_nsec = ae->timestamp % 1000000000; 13071 ltv.tv_sec = ts.tv_sec; 13072 ltv.tv_usec = ts.tv_nsec / 1000; 13073 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13074 } 13075 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13076 /* Log the rcv time */ 13077 log.u_bbr.delRate = ae->timestamp; 13078 #ifdef NETFLIX_HTTP_LOGGING 13079 log.u_bbr.applimited = tp->t_http_closed; 13080 log.u_bbr.applimited <<= 8; 13081 log.u_bbr.applimited |= tp->t_http_open; 13082 log.u_bbr.applimited <<= 8; 13083 log.u_bbr.applimited |= tp->t_http_req; 13084 if (http_req) { 13085 /* Copy out any client req info */ 13086 /* seconds */ 13087 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13088 /* useconds */ 13089 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13090 log.u_bbr.rttProp = http_req->timestamp; 13091 log.u_bbr.cur_del_rate = http_req->start; 13092 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13093 log.u_bbr.flex8 |= 1; 13094 } else { 13095 log.u_bbr.flex8 |= 2; 13096 log.u_bbr.bw_inuse = http_req->end; 13097 } 13098 log.u_bbr.flex6 = http_req->start_seq; 13099 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13100 log.u_bbr.flex8 |= 4; 13101 log.u_bbr.epoch = http_req->end_seq; 13102 } 13103 } 13104 #endif 13105 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13106 th = (struct tcphdr *)tcp_hdr_buf; 13107 th->th_seq = ae->seq; 13108 th->th_ack = ae->ack; 13109 th->th_win = ae->win; 13110 /* Now fill in the ports */ 13111 th->th_sport = tp->t_inpcb->inp_fport; 13112 th->th_dport = tp->t_inpcb->inp_lport; 13113 tcp_set_flags(th, ae->flags); 13114 /* Now do we have a timestamp option? */ 13115 if (ae->flags & HAS_TSTMP) { 13116 u_char *cp; 13117 uint32_t val; 13118 13119 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13120 cp = (u_char *)(th + 1); 13121 *cp = TCPOPT_NOP; 13122 cp++; 13123 *cp = TCPOPT_NOP; 13124 cp++; 13125 *cp = TCPOPT_TIMESTAMP; 13126 cp++; 13127 *cp = TCPOLEN_TIMESTAMP; 13128 cp++; 13129 val = htonl(ae->ts_value); 13130 bcopy((char *)&val, 13131 (char *)cp, sizeof(uint32_t)); 13132 val = htonl(ae->ts_echo); 13133 bcopy((char *)&val, 13134 (char *)(cp + 4), sizeof(uint32_t)); 13135 } else 13136 th->th_off = (sizeof(struct tcphdr) >> 2); 13137 13138 /* 13139 * For sane logging we need to play a little trick. 13140 * If the ack were fully processed we would have moved 13141 * snd_una to high_seq, but since compressed acks are 13142 * processed in two phases, at this point (logging) snd_una 13143 * won't be advanced. So we would see multiple acks showing 13144 * the advancement. We can prevent that by "pretending" that 13145 * snd_una was advanced and then un-advancing it so that the 13146 * logging code has the right value for tlb_snd_una. 13147 */ 13148 if (tp->snd_una != high_seq) { 13149 orig_snd_una = tp->snd_una; 13150 tp->snd_una = high_seq; 13151 xx = 1; 13152 } else 13153 xx = 0; 13154 TCP_LOG_EVENTP(tp, th, 13155 &tp->t_inpcb->inp_socket->so_rcv, 13156 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13157 0, &log, true, <v); 13158 if (xx) { 13159 tp->snd_una = orig_snd_una; 13160 } 13161 } 13162 13163 } 13164 13165 static void 13166 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13167 { 13168 uint32_t us_rtt; 13169 /* 13170 * A persist or keep-alive was forced out, update our 13171 * min rtt time. Note now worry about lost responses. 13172 * When a subsequent keep-alive or persist times out 13173 * and forced_ack is still on, then the last probe 13174 * was not responded to. In such cases we have a 13175 * sysctl that controls the behavior. Either we apply 13176 * the rtt but with reduced confidence (0). Or we just 13177 * plain don't apply the rtt estimate. Having data flow 13178 * will clear the probe_not_answered flag i.e. cum-ack 13179 * move forward <or> exiting and reentering persists. 13180 */ 13181 13182 rack->forced_ack = 0; 13183 rack->rc_tp->t_rxtshift = 0; 13184 if ((rack->rc_in_persist && 13185 (tiwin == rack->rc_tp->snd_wnd)) || 13186 (rack->rc_in_persist == 0)) { 13187 /* 13188 * In persists only apply the RTT update if this is 13189 * a response to our window probe. And that 13190 * means the rwnd sent must match the current 13191 * snd_wnd. If it does not, then we got a 13192 * window update ack instead. For keepalive 13193 * we allow the answer no matter what the window. 13194 * 13195 * Note that if the probe_not_answered is set then 13196 * the forced_ack_ts is the oldest one i.e. the first 13197 * probe sent that might have been lost. This assures 13198 * us that if we do calculate an RTT it is longer not 13199 * some short thing. 13200 */ 13201 if (rack->rc_in_persist) 13202 counter_u64_add(rack_persists_acks, 1); 13203 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13204 if (us_rtt == 0) 13205 us_rtt = 1; 13206 if (rack->probe_not_answered == 0) { 13207 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13208 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13209 } else { 13210 /* We have a retransmitted probe here too */ 13211 if (rack_apply_rtt_with_reduced_conf) { 13212 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13213 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13214 } 13215 } 13216 } 13217 } 13218 13219 static int 13220 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13221 { 13222 /* 13223 * Handle a "special" compressed ack mbuf. Each incoming 13224 * ack has only four possible dispositions: 13225 * 13226 * A) It moves the cum-ack forward 13227 * B) It is behind the cum-ack. 13228 * C) It is a window-update ack. 13229 * D) It is a dup-ack. 13230 * 13231 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13232 * in the incoming mbuf. We also need to still pay attention 13233 * to nxt_pkt since there may be another packet after this 13234 * one. 13235 */ 13236 #ifdef TCP_ACCOUNTING 13237 uint64_t ts_val; 13238 uint64_t rdstc; 13239 #endif 13240 int segsiz; 13241 struct timespec ts; 13242 struct tcp_rack *rack; 13243 struct tcp_ackent *ae; 13244 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13245 int cnt, i, did_out, ourfinisacked = 0; 13246 struct tcpopt to_holder, *to = NULL; 13247 #ifdef TCP_ACCOUNTING 13248 int win_up_req = 0; 13249 #endif 13250 int nsegs = 0; 13251 int under_pacing = 1; 13252 int recovery = 0; 13253 #ifdef TCP_ACCOUNTING 13254 sched_pin(); 13255 #endif 13256 rack = (struct tcp_rack *)tp->t_fb_ptr; 13257 if (rack->gp_ready && 13258 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13259 under_pacing = 0; 13260 else 13261 under_pacing = 1; 13262 13263 if (rack->r_state != tp->t_state) 13264 rack_set_state(tp, rack); 13265 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13266 (tp->t_flags & TF_GPUTINPROG)) { 13267 /* 13268 * We have a goodput in progress 13269 * and we have entered a late state. 13270 * Do we have enough data in the sb 13271 * to handle the GPUT request? 13272 */ 13273 uint32_t bytes; 13274 13275 bytes = tp->gput_ack - tp->gput_seq; 13276 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13277 bytes += tp->gput_seq - tp->snd_una; 13278 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13279 /* 13280 * There are not enough bytes in the socket 13281 * buffer that have been sent to cover this 13282 * measurement. Cancel it. 13283 */ 13284 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13285 rack->r_ctl.rc_gp_srtt /*flex1*/, 13286 tp->gput_seq, 13287 0, 0, 18, __LINE__, NULL, 0); 13288 tp->t_flags &= ~TF_GPUTINPROG; 13289 } 13290 } 13291 to = &to_holder; 13292 to->to_flags = 0; 13293 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13294 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13295 cnt = m->m_len / sizeof(struct tcp_ackent); 13296 counter_u64_add(rack_multi_single_eq, cnt); 13297 high_seq = tp->snd_una; 13298 the_win = tp->snd_wnd; 13299 win_seq = tp->snd_wl1; 13300 win_upd_ack = tp->snd_wl2; 13301 cts = tcp_tv_to_usectick(tv); 13302 ms_cts = tcp_tv_to_mssectick(tv); 13303 rack->r_ctl.rc_rcvtime = cts; 13304 segsiz = ctf_fixed_maxseg(tp); 13305 if ((rack->rc_gp_dyn_mul) && 13306 (rack->use_fixed_rate == 0) && 13307 (rack->rc_always_pace)) { 13308 /* Check in on probertt */ 13309 rack_check_probe_rtt(rack, cts); 13310 } 13311 for (i = 0; i < cnt; i++) { 13312 #ifdef TCP_ACCOUNTING 13313 ts_val = get_cyclecount(); 13314 #endif 13315 rack_clear_rate_sample(rack); 13316 ae = ((mtod(m, struct tcp_ackent *)) + i); 13317 /* Setup the window */ 13318 tiwin = ae->win << tp->snd_scale; 13319 if (tiwin > rack->r_ctl.rc_high_rwnd) 13320 rack->r_ctl.rc_high_rwnd = tiwin; 13321 /* figure out the type of ack */ 13322 if (SEQ_LT(ae->ack, high_seq)) { 13323 /* Case B*/ 13324 ae->ack_val_set = ACK_BEHIND; 13325 } else if (SEQ_GT(ae->ack, high_seq)) { 13326 /* Case A */ 13327 ae->ack_val_set = ACK_CUMACK; 13328 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13329 /* Case D */ 13330 ae->ack_val_set = ACK_DUPACK; 13331 } else { 13332 /* Case C */ 13333 ae->ack_val_set = ACK_RWND; 13334 } 13335 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13336 /* Validate timestamp */ 13337 if (ae->flags & HAS_TSTMP) { 13338 /* Setup for a timestamp */ 13339 to->to_flags = TOF_TS; 13340 ae->ts_echo -= tp->ts_offset; 13341 to->to_tsecr = ae->ts_echo; 13342 to->to_tsval = ae->ts_value; 13343 /* 13344 * If echoed timestamp is later than the current time, fall back to 13345 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13346 * were used when this connection was established. 13347 */ 13348 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13349 to->to_tsecr = 0; 13350 if (tp->ts_recent && 13351 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13352 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13353 #ifdef TCP_ACCOUNTING 13354 rdstc = get_cyclecount(); 13355 if (rdstc > ts_val) { 13356 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13357 (rdstc - ts_val)); 13358 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13359 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13360 } 13361 } 13362 #endif 13363 continue; 13364 } 13365 } 13366 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13367 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13368 tp->ts_recent_age = tcp_ts_getticks(); 13369 tp->ts_recent = ae->ts_value; 13370 } 13371 } else { 13372 /* Setup for a no options */ 13373 to->to_flags = 0; 13374 } 13375 /* Update the rcv time and perform idle reduction possibly */ 13376 if (tp->t_idle_reduce && 13377 (tp->snd_max == tp->snd_una) && 13378 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13379 counter_u64_add(rack_input_idle_reduces, 1); 13380 rack_cc_after_idle(rack, tp); 13381 } 13382 tp->t_rcvtime = ticks; 13383 /* Now what about ECN? */ 13384 if (tcp_ecn_input_segment(tp, ae->flags, ae->codepoint)) 13385 rack_cong_signal(tp, CC_ECN, ae->ack); 13386 #ifdef TCP_ACCOUNTING 13387 /* Count for the specific type of ack in */ 13388 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13389 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13390 tp->tcp_cnt_counters[ae->ack_val_set]++; 13391 } 13392 #endif 13393 /* 13394 * Note how we could move up these in the determination 13395 * above, but we don't so that way the timestamp checks (and ECN) 13396 * is done first before we do any processing on the ACK. 13397 * The non-compressed path through the code has this 13398 * weakness (noted by @jtl) that it actually does some 13399 * processing before verifying the timestamp information. 13400 * We don't take that path here which is why we set 13401 * the ack_val_set first, do the timestamp and ecn 13402 * processing, and then look at what we have setup. 13403 */ 13404 if (ae->ack_val_set == ACK_BEHIND) { 13405 /* 13406 * Case B flag reordering, if window is not closed 13407 * or it could be a keep-alive or persists 13408 */ 13409 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13410 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13411 } 13412 } else if (ae->ack_val_set == ACK_DUPACK) { 13413 /* Case D */ 13414 rack_strike_dupack(rack); 13415 } else if (ae->ack_val_set == ACK_RWND) { 13416 /* Case C */ 13417 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13418 ts.tv_sec = ae->timestamp / 1000000000; 13419 ts.tv_nsec = ae->timestamp % 1000000000; 13420 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13421 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13422 } else { 13423 rack->r_ctl.act_rcv_time = *tv; 13424 } 13425 if (rack->forced_ack) { 13426 rack_handle_probe_response(rack, tiwin, 13427 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13428 } 13429 #ifdef TCP_ACCOUNTING 13430 win_up_req = 1; 13431 #endif 13432 win_upd_ack = ae->ack; 13433 win_seq = ae->seq; 13434 the_win = tiwin; 13435 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13436 } else { 13437 /* Case A */ 13438 if (SEQ_GT(ae->ack, tp->snd_max)) { 13439 /* 13440 * We just send an ack since the incoming 13441 * ack is beyond the largest seq we sent. 13442 */ 13443 if ((tp->t_flags & TF_ACKNOW) == 0) { 13444 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13445 if (tp->t_flags && TF_ACKNOW) 13446 rack->r_wanted_output = 1; 13447 } 13448 } else { 13449 nsegs++; 13450 /* If the window changed setup to update */ 13451 if (tiwin != tp->snd_wnd) { 13452 win_upd_ack = ae->ack; 13453 win_seq = ae->seq; 13454 the_win = tiwin; 13455 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13456 } 13457 #ifdef TCP_ACCOUNTING 13458 /* Account for the acks */ 13459 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13460 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13461 } 13462 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13463 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13464 #endif 13465 high_seq = ae->ack; 13466 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13467 union tcp_log_stackspecific log; 13468 struct timeval tv; 13469 13470 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13471 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13472 log.u_bbr.flex1 = high_seq; 13473 log.u_bbr.flex2 = rack->r_ctl.roundends; 13474 log.u_bbr.flex3 = rack->r_ctl.current_round; 13475 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13476 log.u_bbr.flex8 = 8; 13477 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13478 0, &log, false, NULL, NULL, 0, &tv); 13479 } 13480 /* 13481 * The draft (v3) calls for us to use SEQ_GEQ, but that 13482 * causes issues when we are just going app limited. Lets 13483 * instead use SEQ_GT <or> where its equal but more data 13484 * is outstanding. 13485 */ 13486 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13487 ((high_seq == rack->r_ctl.roundends) && 13488 SEQ_GT(tp->snd_max, tp->snd_una))) { 13489 rack->r_ctl.current_round++; 13490 rack->r_ctl.roundends = tp->snd_max; 13491 if (CC_ALGO(tp)->newround != NULL) { 13492 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13493 } 13494 } 13495 /* Setup our act_rcv_time */ 13496 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13497 ts.tv_sec = ae->timestamp / 1000000000; 13498 ts.tv_nsec = ae->timestamp % 1000000000; 13499 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13500 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13501 } else { 13502 rack->r_ctl.act_rcv_time = *tv; 13503 } 13504 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13505 if (rack->rc_dsack_round_seen) { 13506 /* Is the dsack round over? */ 13507 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13508 /* Yes it is */ 13509 rack->rc_dsack_round_seen = 0; 13510 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13511 } 13512 } 13513 } 13514 } 13515 /* And lets be sure to commit the rtt measurements for this ack */ 13516 tcp_rack_xmit_timer_commit(rack, tp); 13517 #ifdef TCP_ACCOUNTING 13518 rdstc = get_cyclecount(); 13519 if (rdstc > ts_val) { 13520 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13521 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13522 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13523 if (ae->ack_val_set == ACK_CUMACK) 13524 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13525 } 13526 } 13527 #endif 13528 } 13529 #ifdef TCP_ACCOUNTING 13530 ts_val = get_cyclecount(); 13531 #endif 13532 acked_amount = acked = (high_seq - tp->snd_una); 13533 if (acked) { 13534 /* 13535 * Clear the probe not answered flag 13536 * since cum-ack moved forward. 13537 */ 13538 rack->probe_not_answered = 0; 13539 if (rack->sack_attack_disable == 0) 13540 rack_do_decay(rack); 13541 if (acked >= segsiz) { 13542 /* 13543 * You only get credit for 13544 * MSS and greater (and you get extra 13545 * credit for larger cum-ack moves). 13546 */ 13547 int ac; 13548 13549 ac = acked / segsiz; 13550 rack->r_ctl.ack_count += ac; 13551 counter_u64_add(rack_ack_total, ac); 13552 } 13553 if (rack->r_ctl.ack_count > 0xfff00000) { 13554 /* 13555 * reduce the number to keep us under 13556 * a uint32_t. 13557 */ 13558 rack->r_ctl.ack_count /= 2; 13559 rack->r_ctl.sack_count /= 2; 13560 } 13561 if (tp->t_flags & TF_NEEDSYN) { 13562 /* 13563 * T/TCP: Connection was half-synchronized, and our SYN has 13564 * been ACK'd (so connection is now fully synchronized). Go 13565 * to non-starred state, increment snd_una for ACK of SYN, 13566 * and check if we can do window scaling. 13567 */ 13568 tp->t_flags &= ~TF_NEEDSYN; 13569 tp->snd_una++; 13570 acked_amount = acked = (high_seq - tp->snd_una); 13571 } 13572 if (acked > sbavail(&so->so_snd)) 13573 acked_amount = sbavail(&so->so_snd); 13574 #ifdef NETFLIX_EXP_DETECTION 13575 /* 13576 * We only care on a cum-ack move if we are in a sack-disabled 13577 * state. We have already added in to the ack_count, and we never 13578 * would disable on a cum-ack move, so we only care to do the 13579 * detection if it may "undo" it, i.e. we were in disabled already. 13580 */ 13581 if (rack->sack_attack_disable) 13582 rack_do_detection(tp, rack, acked_amount, segsiz); 13583 #endif 13584 if (IN_FASTRECOVERY(tp->t_flags) && 13585 (rack->rack_no_prr == 0)) 13586 rack_update_prr(tp, rack, acked_amount, high_seq); 13587 if (IN_RECOVERY(tp->t_flags)) { 13588 if (SEQ_LT(high_seq, tp->snd_recover) && 13589 (SEQ_LT(high_seq, tp->snd_max))) { 13590 tcp_rack_partialack(tp); 13591 } else { 13592 rack_post_recovery(tp, high_seq); 13593 recovery = 1; 13594 } 13595 } 13596 /* Handle the rack-log-ack part (sendmap) */ 13597 if ((sbused(&so->so_snd) == 0) && 13598 (acked > acked_amount) && 13599 (tp->t_state >= TCPS_FIN_WAIT_1) && 13600 (tp->t_flags & TF_SENTFIN)) { 13601 /* 13602 * We must be sure our fin 13603 * was sent and acked (we can be 13604 * in FIN_WAIT_1 without having 13605 * sent the fin). 13606 */ 13607 ourfinisacked = 1; 13608 /* 13609 * Lets make sure snd_una is updated 13610 * since most likely acked_amount = 0 (it 13611 * should be). 13612 */ 13613 tp->snd_una = high_seq; 13614 } 13615 /* Did we make a RTO error? */ 13616 if ((tp->t_flags & TF_PREVVALID) && 13617 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13618 tp->t_flags &= ~TF_PREVVALID; 13619 if (tp->t_rxtshift == 1 && 13620 (int)(ticks - tp->t_badrxtwin) < 0) 13621 rack_cong_signal(tp, CC_RTO_ERR, high_seq); 13622 } 13623 /* Handle the data in the socket buffer */ 13624 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13625 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13626 if (acked_amount > 0) { 13627 struct mbuf *mfree; 13628 13629 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13630 SOCKBUF_LOCK(&so->so_snd); 13631 mfree = sbcut_locked(&so->so_snd, acked_amount); 13632 tp->snd_una = high_seq; 13633 /* Note we want to hold the sb lock through the sendmap adjust */ 13634 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13635 /* Wake up the socket if we have room to write more */ 13636 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13637 sowwakeup_locked(so); 13638 m_freem(mfree); 13639 } 13640 /* update progress */ 13641 tp->t_acktime = ticks; 13642 rack_log_progress_event(rack, tp, tp->t_acktime, 13643 PROGRESS_UPDATE, __LINE__); 13644 /* Clear out shifts and such */ 13645 tp->t_rxtshift = 0; 13646 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13647 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13648 rack->rc_tlp_in_progress = 0; 13649 rack->r_ctl.rc_tlp_cnt_out = 0; 13650 /* Send recover and snd_nxt must be dragged along */ 13651 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13652 tp->snd_recover = tp->snd_una; 13653 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13654 tp->snd_nxt = tp->snd_una; 13655 /* 13656 * If the RXT timer is running we want to 13657 * stop it, so we can restart a TLP (or new RXT). 13658 */ 13659 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13660 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13661 #ifdef NETFLIX_HTTP_LOGGING 13662 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13663 #endif 13664 tp->snd_wl2 = high_seq; 13665 tp->t_dupacks = 0; 13666 if (under_pacing && 13667 (rack->use_fixed_rate == 0) && 13668 (rack->in_probe_rtt == 0) && 13669 rack->rc_gp_dyn_mul && 13670 rack->rc_always_pace) { 13671 /* Check if we are dragging bottom */ 13672 rack_check_bottom_drag(tp, rack, so, acked); 13673 } 13674 if (tp->snd_una == tp->snd_max) { 13675 tp->t_flags &= ~TF_PREVVALID; 13676 rack->r_ctl.retran_during_recovery = 0; 13677 rack->r_ctl.dsack_byte_cnt = 0; 13678 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13679 if (rack->r_ctl.rc_went_idle_time == 0) 13680 rack->r_ctl.rc_went_idle_time = 1; 13681 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13682 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13683 tp->t_acktime = 0; 13684 /* Set so we might enter persists... */ 13685 rack->r_wanted_output = 1; 13686 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13687 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13688 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13689 (sbavail(&so->so_snd) == 0) && 13690 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13691 /* 13692 * The socket was gone and the 13693 * peer sent data (not now in the past), time to 13694 * reset him. 13695 */ 13696 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13697 /* tcp_close will kill the inp pre-log the Reset */ 13698 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13699 #ifdef TCP_ACCOUNTING 13700 rdstc = get_cyclecount(); 13701 if (rdstc > ts_val) { 13702 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13703 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13704 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13705 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13706 } 13707 } 13708 #endif 13709 m_freem(m); 13710 tp = tcp_close(tp); 13711 if (tp == NULL) { 13712 #ifdef TCP_ACCOUNTING 13713 sched_unpin(); 13714 #endif 13715 return (1); 13716 } 13717 /* 13718 * We would normally do drop-with-reset which would 13719 * send back a reset. We can't since we don't have 13720 * all the needed bits. Instead lets arrange for 13721 * a call to tcp_output(). That way since we 13722 * are in the closed state we will generate a reset. 13723 * 13724 * Note if tcp_accounting is on we don't unpin since 13725 * we do that after the goto label. 13726 */ 13727 goto send_out_a_rst; 13728 } 13729 if ((sbused(&so->so_snd) == 0) && 13730 (tp->t_state >= TCPS_FIN_WAIT_1) && 13731 (tp->t_flags & TF_SENTFIN)) { 13732 /* 13733 * If we can't receive any more data, then closing user can 13734 * proceed. Starting the timer is contrary to the 13735 * specification, but if we don't get a FIN we'll hang 13736 * forever. 13737 * 13738 */ 13739 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13740 soisdisconnected(so); 13741 tcp_timer_activate(tp, TT_2MSL, 13742 (tcp_fast_finwait2_recycle ? 13743 tcp_finwait2_timeout : 13744 TP_MAXIDLE(tp))); 13745 } 13746 if (ourfinisacked == 0) { 13747 /* 13748 * We don't change to fin-wait-2 if we have our fin acked 13749 * which means we are probably in TCPS_CLOSING. 13750 */ 13751 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13752 } 13753 } 13754 } 13755 /* Wake up the socket if we have room to write more */ 13756 if (sbavail(&so->so_snd)) { 13757 rack->r_wanted_output = 1; 13758 if (ctf_progress_timeout_check(tp, true)) { 13759 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13760 tp, tick, PROGRESS_DROP, __LINE__); 13761 /* 13762 * We cheat here and don't send a RST, we should send one 13763 * when the pacer drops the connection. 13764 */ 13765 #ifdef TCP_ACCOUNTING 13766 rdstc = get_cyclecount(); 13767 if (rdstc > ts_val) { 13768 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13769 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13770 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13771 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13772 } 13773 } 13774 sched_unpin(); 13775 #endif 13776 (void)tcp_drop(tp, ETIMEDOUT); 13777 m_freem(m); 13778 return (1); 13779 } 13780 } 13781 if (ourfinisacked) { 13782 switch(tp->t_state) { 13783 case TCPS_CLOSING: 13784 #ifdef TCP_ACCOUNTING 13785 rdstc = get_cyclecount(); 13786 if (rdstc > ts_val) { 13787 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13788 (rdstc - ts_val)); 13789 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13790 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13791 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13792 } 13793 } 13794 sched_unpin(); 13795 #endif 13796 tcp_twstart(tp); 13797 m_freem(m); 13798 return (1); 13799 break; 13800 case TCPS_LAST_ACK: 13801 #ifdef TCP_ACCOUNTING 13802 rdstc = get_cyclecount(); 13803 if (rdstc > ts_val) { 13804 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13805 (rdstc - ts_val)); 13806 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13807 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13808 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13809 } 13810 } 13811 sched_unpin(); 13812 #endif 13813 tp = tcp_close(tp); 13814 ctf_do_drop(m, tp); 13815 return (1); 13816 break; 13817 case TCPS_FIN_WAIT_1: 13818 #ifdef TCP_ACCOUNTING 13819 rdstc = get_cyclecount(); 13820 if (rdstc > ts_val) { 13821 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13822 (rdstc - ts_val)); 13823 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13824 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13825 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13826 } 13827 } 13828 #endif 13829 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13830 soisdisconnected(so); 13831 tcp_timer_activate(tp, TT_2MSL, 13832 (tcp_fast_finwait2_recycle ? 13833 tcp_finwait2_timeout : 13834 TP_MAXIDLE(tp))); 13835 } 13836 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13837 break; 13838 default: 13839 break; 13840 } 13841 } 13842 if (rack->r_fast_output) { 13843 /* 13844 * We re doing fast output.. can we expand that? 13845 */ 13846 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13847 } 13848 #ifdef TCP_ACCOUNTING 13849 rdstc = get_cyclecount(); 13850 if (rdstc > ts_val) { 13851 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13852 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13853 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13854 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13855 } 13856 } 13857 13858 } else if (win_up_req) { 13859 rdstc = get_cyclecount(); 13860 if (rdstc > ts_val) { 13861 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 13862 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13863 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13864 } 13865 } 13866 #endif 13867 } 13868 /* Now is there a next packet, if so we are done */ 13869 m_freem(m); 13870 did_out = 0; 13871 if (nxt_pkt) { 13872 #ifdef TCP_ACCOUNTING 13873 sched_unpin(); 13874 #endif 13875 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13876 return (0); 13877 } 13878 rack_handle_might_revert(tp, rack); 13879 ctf_calc_rwin(so, tp); 13880 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13881 send_out_a_rst: 13882 if (tcp_output(tp) < 0) { 13883 #ifdef TCP_ACCOUNTING 13884 sched_unpin(); 13885 #endif 13886 return (1); 13887 } 13888 did_out = 1; 13889 } 13890 rack_free_trim(rack); 13891 #ifdef TCP_ACCOUNTING 13892 sched_unpin(); 13893 #endif 13894 rack_timer_audit(tp, rack, &so->so_snd); 13895 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13896 return (0); 13897 } 13898 13899 13900 static int 13901 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13902 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13903 int32_t nxt_pkt, struct timeval *tv) 13904 { 13905 #ifdef TCP_ACCOUNTING 13906 uint64_t ts_val; 13907 #endif 13908 int32_t thflags, retval, did_out = 0; 13909 int32_t way_out = 0; 13910 /* 13911 * cts - is the current time from tv (caller gets ts) in microseconds. 13912 * ms_cts - is the current time from tv in milliseconds. 13913 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 13914 */ 13915 uint32_t cts, us_cts, ms_cts; 13916 uint32_t tiwin, high_seq; 13917 struct timespec ts; 13918 struct tcpopt to; 13919 struct tcp_rack *rack; 13920 struct rack_sendmap *rsm; 13921 int32_t prev_state = 0; 13922 #ifdef TCP_ACCOUNTING 13923 int ack_val_set = 0xf; 13924 #endif 13925 int nsegs; 13926 /* 13927 * tv passed from common code is from either M_TSTMP_LRO or 13928 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 13929 */ 13930 rack = (struct tcp_rack *)tp->t_fb_ptr; 13931 if (m->m_flags & M_ACKCMP) { 13932 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 13933 } 13934 if (m->m_flags & M_ACKCMP) { 13935 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 13936 } 13937 cts = tcp_tv_to_usectick(tv); 13938 ms_cts = tcp_tv_to_mssectick(tv); 13939 nsegs = m->m_pkthdr.lro_nsegs; 13940 counter_u64_add(rack_proc_non_comp_ack, 1); 13941 thflags = tcp_get_flags(th); 13942 #ifdef TCP_ACCOUNTING 13943 sched_pin(); 13944 if (thflags & TH_ACK) 13945 ts_val = get_cyclecount(); 13946 #endif 13947 if ((m->m_flags & M_TSTMP) || 13948 (m->m_flags & M_TSTMP_LRO)) { 13949 mbuf_tstmp2timespec(m, &ts); 13950 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13951 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13952 } else 13953 rack->r_ctl.act_rcv_time = *tv; 13954 kern_prefetch(rack, &prev_state); 13955 prev_state = 0; 13956 /* 13957 * Unscale the window into a 32-bit value. For the SYN_SENT state 13958 * the scale is zero. 13959 */ 13960 tiwin = th->th_win << tp->snd_scale; 13961 #ifdef TCP_ACCOUNTING 13962 if (thflags & TH_ACK) { 13963 /* 13964 * We have a tradeoff here. We can either do what we are 13965 * doing i.e. pinning to this CPU and then doing the accounting 13966 * <or> we could do a critical enter, setup the rdtsc and cpu 13967 * as in below, and then validate we are on the same CPU on 13968 * exit. I have choosen to not do the critical enter since 13969 * that often will gain you a context switch, and instead lock 13970 * us (line above this if) to the same CPU with sched_pin(). This 13971 * means we may be context switched out for a higher priority 13972 * interupt but we won't be moved to another CPU. 13973 * 13974 * If this occurs (which it won't very often since we most likely 13975 * are running this code in interupt context and only a higher 13976 * priority will bump us ... clock?) we will falsely add in 13977 * to the time the interupt processing time plus the ack processing 13978 * time. This is ok since its a rare event. 13979 */ 13980 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 13981 ctf_fixed_maxseg(tp)); 13982 } 13983 #endif 13984 /* 13985 * Parse options on any incoming segment. 13986 */ 13987 memset(&to, 0, sizeof(to)); 13988 tcp_dooptions(&to, (u_char *)(th + 1), 13989 (th->th_off << 2) - sizeof(struct tcphdr), 13990 (thflags & TH_SYN) ? TO_SYN : 0); 13991 NET_EPOCH_ASSERT(); 13992 INP_WLOCK_ASSERT(tp->t_inpcb); 13993 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 13994 __func__)); 13995 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 13996 __func__)); 13997 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13998 (tp->t_flags & TF_GPUTINPROG)) { 13999 /* 14000 * We have a goodput in progress 14001 * and we have entered a late state. 14002 * Do we have enough data in the sb 14003 * to handle the GPUT request? 14004 */ 14005 uint32_t bytes; 14006 14007 bytes = tp->gput_ack - tp->gput_seq; 14008 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14009 bytes += tp->gput_seq - tp->snd_una; 14010 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14011 /* 14012 * There are not enough bytes in the socket 14013 * buffer that have been sent to cover this 14014 * measurement. Cancel it. 14015 */ 14016 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14017 rack->r_ctl.rc_gp_srtt /*flex1*/, 14018 tp->gput_seq, 14019 0, 0, 18, __LINE__, NULL, 0); 14020 tp->t_flags &= ~TF_GPUTINPROG; 14021 } 14022 } 14023 high_seq = th->th_ack; 14024 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14025 union tcp_log_stackspecific log; 14026 struct timeval ltv; 14027 #ifdef NETFLIX_HTTP_LOGGING 14028 struct http_sendfile_track *http_req; 14029 14030 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14031 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14032 } else { 14033 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14034 } 14035 #endif 14036 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14037 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14038 if (rack->rack_no_prr == 0) 14039 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14040 else 14041 log.u_bbr.flex1 = 0; 14042 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14043 log.u_bbr.use_lt_bw <<= 1; 14044 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14045 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14046 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14047 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14048 log.u_bbr.flex3 = m->m_flags; 14049 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14050 log.u_bbr.lost = thflags; 14051 log.u_bbr.pacing_gain = 0x1; 14052 #ifdef TCP_ACCOUNTING 14053 log.u_bbr.cwnd_gain = ack_val_set; 14054 #endif 14055 log.u_bbr.flex7 = 2; 14056 if (m->m_flags & M_TSTMP) { 14057 /* Record the hardware timestamp if present */ 14058 mbuf_tstmp2timespec(m, &ts); 14059 ltv.tv_sec = ts.tv_sec; 14060 ltv.tv_usec = ts.tv_nsec / 1000; 14061 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14062 } else if (m->m_flags & M_TSTMP_LRO) { 14063 /* Record the LRO the arrival timestamp */ 14064 mbuf_tstmp2timespec(m, &ts); 14065 ltv.tv_sec = ts.tv_sec; 14066 ltv.tv_usec = ts.tv_nsec / 1000; 14067 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14068 } 14069 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14070 /* Log the rcv time */ 14071 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14072 #ifdef NETFLIX_HTTP_LOGGING 14073 log.u_bbr.applimited = tp->t_http_closed; 14074 log.u_bbr.applimited <<= 8; 14075 log.u_bbr.applimited |= tp->t_http_open; 14076 log.u_bbr.applimited <<= 8; 14077 log.u_bbr.applimited |= tp->t_http_req; 14078 if (http_req) { 14079 /* Copy out any client req info */ 14080 /* seconds */ 14081 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14082 /* useconds */ 14083 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14084 log.u_bbr.rttProp = http_req->timestamp; 14085 log.u_bbr.cur_del_rate = http_req->start; 14086 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14087 log.u_bbr.flex8 |= 1; 14088 } else { 14089 log.u_bbr.flex8 |= 2; 14090 log.u_bbr.bw_inuse = http_req->end; 14091 } 14092 log.u_bbr.flex6 = http_req->start_seq; 14093 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14094 log.u_bbr.flex8 |= 4; 14095 log.u_bbr.epoch = http_req->end_seq; 14096 } 14097 } 14098 #endif 14099 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14100 tlen, &log, true, <v); 14101 } 14102 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14103 way_out = 4; 14104 retval = 0; 14105 m_freem(m); 14106 goto done_with_input; 14107 } 14108 /* 14109 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14110 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14111 */ 14112 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14113 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14114 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14115 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14116 #ifdef TCP_ACCOUNTING 14117 sched_unpin(); 14118 #endif 14119 return (1); 14120 } 14121 /* 14122 * If timestamps were negotiated during SYN/ACK and a 14123 * segment without a timestamp is received, silently drop 14124 * the segment, unless it is a RST segment or missing timestamps are 14125 * tolerated. 14126 * See section 3.2 of RFC 7323. 14127 */ 14128 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14129 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14130 way_out = 5; 14131 retval = 0; 14132 m_freem(m); 14133 goto done_with_input; 14134 } 14135 14136 /* 14137 * Segment received on connection. Reset idle time and keep-alive 14138 * timer. XXX: This should be done after segment validation to 14139 * ignore broken/spoofed segs. 14140 */ 14141 if (tp->t_idle_reduce && 14142 (tp->snd_max == tp->snd_una) && 14143 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14144 counter_u64_add(rack_input_idle_reduces, 1); 14145 rack_cc_after_idle(rack, tp); 14146 } 14147 tp->t_rcvtime = ticks; 14148 #ifdef STATS 14149 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14150 #endif 14151 if (tiwin > rack->r_ctl.rc_high_rwnd) 14152 rack->r_ctl.rc_high_rwnd = tiwin; 14153 /* 14154 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14155 * this to occur after we've validated the segment. 14156 */ 14157 if (tcp_ecn_input_segment(tp, thflags, iptos)) 14158 rack_cong_signal(tp, CC_ECN, th->th_ack); 14159 14160 /* 14161 * If echoed timestamp is later than the current time, fall back to 14162 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14163 * were used when this connection was established. 14164 */ 14165 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14166 to.to_tsecr -= tp->ts_offset; 14167 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14168 to.to_tsecr = 0; 14169 } 14170 14171 /* 14172 * If its the first time in we need to take care of options and 14173 * verify we can do SACK for rack! 14174 */ 14175 if (rack->r_state == 0) { 14176 /* Should be init'd by rack_init() */ 14177 KASSERT(rack->rc_inp != NULL, 14178 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14179 if (rack->rc_inp == NULL) { 14180 rack->rc_inp = tp->t_inpcb; 14181 } 14182 14183 /* 14184 * Process options only when we get SYN/ACK back. The SYN 14185 * case for incoming connections is handled in tcp_syncache. 14186 * According to RFC1323 the window field in a SYN (i.e., a 14187 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14188 * this is traditional behavior, may need to be cleaned up. 14189 */ 14190 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14191 /* Handle parallel SYN for ECN */ 14192 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14193 if ((to.to_flags & TOF_SCALE) && 14194 (tp->t_flags & TF_REQ_SCALE)) { 14195 tp->t_flags |= TF_RCVD_SCALE; 14196 tp->snd_scale = to.to_wscale; 14197 } else 14198 tp->t_flags &= ~TF_REQ_SCALE; 14199 /* 14200 * Initial send window. It will be updated with the 14201 * next incoming segment to the scaled value. 14202 */ 14203 tp->snd_wnd = th->th_win; 14204 rack_validate_fo_sendwin_up(tp, rack); 14205 if ((to.to_flags & TOF_TS) && 14206 (tp->t_flags & TF_REQ_TSTMP)) { 14207 tp->t_flags |= TF_RCVD_TSTMP; 14208 tp->ts_recent = to.to_tsval; 14209 tp->ts_recent_age = cts; 14210 } else 14211 tp->t_flags &= ~TF_REQ_TSTMP; 14212 if (to.to_flags & TOF_MSS) { 14213 tcp_mss(tp, to.to_mss); 14214 } 14215 if ((tp->t_flags & TF_SACK_PERMIT) && 14216 (to.to_flags & TOF_SACKPERM) == 0) 14217 tp->t_flags &= ~TF_SACK_PERMIT; 14218 if (IS_FASTOPEN(tp->t_flags)) { 14219 if (to.to_flags & TOF_FASTOPEN) { 14220 uint16_t mss; 14221 14222 if (to.to_flags & TOF_MSS) 14223 mss = to.to_mss; 14224 else 14225 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14226 mss = TCP6_MSS; 14227 else 14228 mss = TCP_MSS; 14229 tcp_fastopen_update_cache(tp, mss, 14230 to.to_tfo_len, to.to_tfo_cookie); 14231 } else 14232 tcp_fastopen_disable_path(tp); 14233 } 14234 } 14235 /* 14236 * At this point we are at the initial call. Here we decide 14237 * if we are doing RACK or not. We do this by seeing if 14238 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14239 * The code now does do dup-ack counting so if you don't 14240 * switch back you won't get rack & TLP, but you will still 14241 * get this stack. 14242 */ 14243 14244 if ((rack_sack_not_required == 0) && 14245 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14246 tcp_switch_back_to_default(tp); 14247 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14248 tlen, iptos); 14249 #ifdef TCP_ACCOUNTING 14250 sched_unpin(); 14251 #endif 14252 return (1); 14253 } 14254 tcp_set_hpts(tp->t_inpcb); 14255 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14256 } 14257 if (thflags & TH_FIN) 14258 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14259 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14260 if ((rack->rc_gp_dyn_mul) && 14261 (rack->use_fixed_rate == 0) && 14262 (rack->rc_always_pace)) { 14263 /* Check in on probertt */ 14264 rack_check_probe_rtt(rack, us_cts); 14265 } 14266 rack_clear_rate_sample(rack); 14267 if ((rack->forced_ack) && 14268 ((tcp_get_flags(th) & TH_RST) == 0)) { 14269 rack_handle_probe_response(rack, tiwin, us_cts); 14270 } 14271 /* 14272 * This is the one exception case where we set the rack state 14273 * always. All other times (timers etc) we must have a rack-state 14274 * set (so we assure we have done the checks above for SACK). 14275 */ 14276 rack->r_ctl.rc_rcvtime = cts; 14277 if (rack->r_state != tp->t_state) 14278 rack_set_state(tp, rack); 14279 if (SEQ_GT(th->th_ack, tp->snd_una) && 14280 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14281 kern_prefetch(rsm, &prev_state); 14282 prev_state = rack->r_state; 14283 retval = (*rack->r_substate) (m, th, so, 14284 tp, &to, drop_hdrlen, 14285 tlen, tiwin, thflags, nxt_pkt, iptos); 14286 #ifdef INVARIANTS 14287 if ((retval == 0) && 14288 (tp->t_inpcb == NULL)) { 14289 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14290 retval, tp, prev_state); 14291 } 14292 #endif 14293 if (retval == 0) { 14294 /* 14295 * If retval is 1 the tcb is unlocked and most likely the tp 14296 * is gone. 14297 */ 14298 INP_WLOCK_ASSERT(tp->t_inpcb); 14299 if ((rack->rc_gp_dyn_mul) && 14300 (rack->rc_always_pace) && 14301 (rack->use_fixed_rate == 0) && 14302 rack->in_probe_rtt && 14303 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14304 /* 14305 * If we are going for target, lets recheck before 14306 * we output. 14307 */ 14308 rack_check_probe_rtt(rack, us_cts); 14309 } 14310 if (rack->set_pacing_done_a_iw == 0) { 14311 /* How much has been acked? */ 14312 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14313 /* We have enough to set in the pacing segment size */ 14314 rack->set_pacing_done_a_iw = 1; 14315 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14316 } 14317 } 14318 tcp_rack_xmit_timer_commit(rack, tp); 14319 #ifdef TCP_ACCOUNTING 14320 /* 14321 * If we set the ack_val_se to what ack processing we are doing 14322 * we also want to track how many cycles we burned. Note 14323 * the bits after tcp_output we let be "free". This is because 14324 * we are also tracking the tcp_output times as well. Note the 14325 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14326 * 0xf cannot be returned and is what we initialize it too to 14327 * indicate we are not doing the tabulations. 14328 */ 14329 if (ack_val_set != 0xf) { 14330 uint64_t crtsc; 14331 14332 crtsc = get_cyclecount(); 14333 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14334 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14335 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14336 } 14337 } 14338 #endif 14339 if (nxt_pkt == 0) { 14340 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14341 do_output_now: 14342 if (tcp_output(tp) < 0) 14343 return (1); 14344 did_out = 1; 14345 } 14346 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14347 rack_free_trim(rack); 14348 } 14349 /* Update any rounds needed */ 14350 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14351 union tcp_log_stackspecific log; 14352 struct timeval tv; 14353 14354 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14355 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14356 log.u_bbr.flex1 = high_seq; 14357 log.u_bbr.flex2 = rack->r_ctl.roundends; 14358 log.u_bbr.flex3 = rack->r_ctl.current_round; 14359 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14360 log.u_bbr.flex8 = 9; 14361 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14362 0, &log, false, NULL, NULL, 0, &tv); 14363 } 14364 /* 14365 * The draft (v3) calls for us to use SEQ_GEQ, but that 14366 * causes issues when we are just going app limited. Lets 14367 * instead use SEQ_GT <or> where its equal but more data 14368 * is outstanding. 14369 */ 14370 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14371 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14372 rack->r_ctl.current_round++; 14373 rack->r_ctl.roundends = tp->snd_max; 14374 if (CC_ALGO(tp)->newround != NULL) { 14375 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14376 } 14377 } 14378 if ((nxt_pkt == 0) && 14379 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14380 (SEQ_GT(tp->snd_max, tp->snd_una) || 14381 (tp->t_flags & TF_DELACK) || 14382 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14383 (tp->t_state <= TCPS_CLOSING)))) { 14384 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14385 if ((tp->snd_max == tp->snd_una) && 14386 ((tp->t_flags & TF_DELACK) == 0) && 14387 (tcp_in_hpts(rack->rc_inp)) && 14388 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14389 /* keep alive not needed if we are hptsi output yet */ 14390 ; 14391 } else { 14392 int late = 0; 14393 if (tcp_in_hpts(rack->rc_inp)) { 14394 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14395 us_cts = tcp_get_usecs(NULL); 14396 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14397 rack->r_early = 1; 14398 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14399 } else 14400 late = 1; 14401 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14402 } 14403 tcp_hpts_remove(tp->t_inpcb); 14404 } 14405 if (late && (did_out == 0)) { 14406 /* 14407 * We are late in the sending 14408 * and we did not call the output 14409 * (this probably should not happen). 14410 */ 14411 goto do_output_now; 14412 } 14413 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14414 } 14415 way_out = 1; 14416 } else if (nxt_pkt == 0) { 14417 /* Do we have the correct timer running? */ 14418 rack_timer_audit(tp, rack, &so->so_snd); 14419 way_out = 2; 14420 } 14421 done_with_input: 14422 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14423 if (did_out) 14424 rack->r_wanted_output = 0; 14425 #ifdef INVARIANTS 14426 if (tp->t_inpcb == NULL) { 14427 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14428 did_out, 14429 retval, tp, prev_state); 14430 } 14431 #endif 14432 #ifdef TCP_ACCOUNTING 14433 } else { 14434 /* 14435 * Track the time (see above). 14436 */ 14437 if (ack_val_set != 0xf) { 14438 uint64_t crtsc; 14439 14440 crtsc = get_cyclecount(); 14441 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14442 /* 14443 * Note we *DO NOT* increment the per-tcb counters since 14444 * in the else the TP may be gone!! 14445 */ 14446 } 14447 #endif 14448 } 14449 #ifdef TCP_ACCOUNTING 14450 sched_unpin(); 14451 #endif 14452 return (retval); 14453 } 14454 14455 void 14456 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14457 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14458 { 14459 struct timeval tv; 14460 14461 /* First lets see if we have old packets */ 14462 if (tp->t_in_pkt) { 14463 if (ctf_do_queued_segments(so, tp, 1)) { 14464 m_freem(m); 14465 return; 14466 } 14467 } 14468 if (m->m_flags & M_TSTMP_LRO) { 14469 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14470 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14471 } else { 14472 /* Should not be should we kassert instead? */ 14473 tcp_get_usecs(&tv); 14474 } 14475 if (rack_do_segment_nounlock(m, th, so, tp, 14476 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14477 INP_WUNLOCK(tp->t_inpcb); 14478 } 14479 } 14480 14481 struct rack_sendmap * 14482 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14483 { 14484 struct rack_sendmap *rsm = NULL; 14485 int32_t idx; 14486 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14487 14488 /* Return the next guy to be re-transmitted */ 14489 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14490 return (NULL); 14491 } 14492 if (tp->t_flags & TF_SENTFIN) { 14493 /* retran the end FIN? */ 14494 return (NULL); 14495 } 14496 /* ok lets look at this one */ 14497 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14498 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14499 goto check_it; 14500 } 14501 rsm = rack_find_lowest_rsm(rack); 14502 if (rsm == NULL) { 14503 return (NULL); 14504 } 14505 check_it: 14506 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14507 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14508 /* 14509 * No sack so we automatically do the 3 strikes and 14510 * retransmit (no rack timer would be started). 14511 */ 14512 14513 return (rsm); 14514 } 14515 if (rsm->r_flags & RACK_ACKED) { 14516 return (NULL); 14517 } 14518 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14519 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14520 /* Its not yet ready */ 14521 return (NULL); 14522 } 14523 srtt = rack_grab_rtt(tp, rack); 14524 idx = rsm->r_rtr_cnt - 1; 14525 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14526 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14527 if ((tsused == ts_low) || 14528 (TSTMP_LT(tsused, ts_low))) { 14529 /* No time since sending */ 14530 return (NULL); 14531 } 14532 if ((tsused - ts_low) < thresh) { 14533 /* It has not been long enough yet */ 14534 return (NULL); 14535 } 14536 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14537 ((rsm->r_flags & RACK_SACK_PASSED) && 14538 (rack->sack_attack_disable == 0))) { 14539 /* 14540 * We have passed the dup-ack threshold <or> 14541 * a SACK has indicated this is missing. 14542 * Note that if you are a declared attacker 14543 * it is only the dup-ack threshold that 14544 * will cause retransmits. 14545 */ 14546 /* log retransmit reason */ 14547 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14548 rack->r_fast_output = 0; 14549 return (rsm); 14550 } 14551 return (NULL); 14552 } 14553 14554 static void 14555 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14556 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14557 int line, struct rack_sendmap *rsm, uint8_t quality) 14558 { 14559 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14560 union tcp_log_stackspecific log; 14561 struct timeval tv; 14562 14563 memset(&log, 0, sizeof(log)); 14564 log.u_bbr.flex1 = slot; 14565 log.u_bbr.flex2 = len; 14566 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14567 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14568 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14569 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14570 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14571 log.u_bbr.use_lt_bw <<= 1; 14572 log.u_bbr.use_lt_bw |= rack->r_late; 14573 log.u_bbr.use_lt_bw <<= 1; 14574 log.u_bbr.use_lt_bw |= rack->r_early; 14575 log.u_bbr.use_lt_bw <<= 1; 14576 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14577 log.u_bbr.use_lt_bw <<= 1; 14578 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14579 log.u_bbr.use_lt_bw <<= 1; 14580 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14581 log.u_bbr.use_lt_bw <<= 1; 14582 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14583 log.u_bbr.use_lt_bw <<= 1; 14584 log.u_bbr.use_lt_bw |= rack->gp_ready; 14585 log.u_bbr.pkt_epoch = line; 14586 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14587 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14588 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14589 log.u_bbr.bw_inuse = bw_est; 14590 log.u_bbr.delRate = bw; 14591 if (rack->r_ctl.gp_bw == 0) 14592 log.u_bbr.cur_del_rate = 0; 14593 else 14594 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14595 log.u_bbr.rttProp = len_time; 14596 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14597 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14598 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14599 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14600 /* We are in slow start */ 14601 log.u_bbr.flex7 = 1; 14602 } else { 14603 /* we are on congestion avoidance */ 14604 log.u_bbr.flex7 = 0; 14605 } 14606 log.u_bbr.flex8 = method; 14607 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14608 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14609 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14610 log.u_bbr.cwnd_gain <<= 1; 14611 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14612 log.u_bbr.cwnd_gain <<= 1; 14613 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14614 log.u_bbr.bbr_substate = quality; 14615 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14616 &rack->rc_inp->inp_socket->so_rcv, 14617 &rack->rc_inp->inp_socket->so_snd, 14618 BBR_LOG_HPTSI_CALC, 0, 14619 0, &log, false, &tv); 14620 } 14621 } 14622 14623 static uint32_t 14624 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14625 { 14626 uint32_t new_tso, user_max; 14627 14628 user_max = rack->rc_user_set_max_segs * mss; 14629 if (rack->rc_force_max_seg) { 14630 return (user_max); 14631 } 14632 if (rack->use_fixed_rate && 14633 ((rack->r_ctl.crte == NULL) || 14634 (bw != rack->r_ctl.crte->rate))) { 14635 /* Use the user mss since we are not exactly matched */ 14636 return (user_max); 14637 } 14638 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14639 if (new_tso > user_max) 14640 new_tso = user_max; 14641 return (new_tso); 14642 } 14643 14644 static int32_t 14645 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14646 { 14647 uint64_t lentim, fill_bw; 14648 14649 /* Lets first see if we are full, if so continue with normal rate */ 14650 rack->r_via_fill_cw = 0; 14651 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14652 return (slot); 14653 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14654 return (slot); 14655 if (rack->r_ctl.rc_last_us_rtt == 0) 14656 return (slot); 14657 if (rack->rc_pace_fill_if_rttin_range && 14658 (rack->r_ctl.rc_last_us_rtt >= 14659 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14660 /* The rtt is huge, N * smallest, lets not fill */ 14661 return (slot); 14662 } 14663 /* 14664 * first lets calculate the b/w based on the last us-rtt 14665 * and the sndwnd. 14666 */ 14667 fill_bw = rack->r_ctl.cwnd_to_use; 14668 /* Take the rwnd if its smaller */ 14669 if (fill_bw > rack->rc_tp->snd_wnd) 14670 fill_bw = rack->rc_tp->snd_wnd; 14671 if (rack->r_fill_less_agg) { 14672 /* 14673 * Now take away the inflight (this will reduce our 14674 * aggressiveness and yeah, if we get that much out in 1RTT 14675 * we will have had acks come back and still be behind). 14676 */ 14677 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14678 } 14679 /* Now lets make it into a b/w */ 14680 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14681 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14682 /* We are below the min b/w */ 14683 if (non_paced) 14684 *rate_wanted = fill_bw; 14685 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14686 return (slot); 14687 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14688 fill_bw = rack->r_ctl.bw_rate_cap; 14689 rack->r_via_fill_cw = 1; 14690 if (rack->r_rack_hw_rate_caps && 14691 (rack->r_ctl.crte != NULL)) { 14692 uint64_t high_rate; 14693 14694 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14695 if (fill_bw > high_rate) { 14696 /* We are capping bw at the highest rate table entry */ 14697 if (*rate_wanted > high_rate) { 14698 /* The original rate was also capped */ 14699 rack->r_via_fill_cw = 0; 14700 } 14701 rack_log_hdwr_pacing(rack, 14702 fill_bw, high_rate, __LINE__, 14703 0, 3); 14704 fill_bw = high_rate; 14705 if (capped) 14706 *capped = 1; 14707 } 14708 } else if ((rack->r_ctl.crte == NULL) && 14709 (rack->rack_hdrw_pacing == 0) && 14710 (rack->rack_hdw_pace_ena) && 14711 rack->r_rack_hw_rate_caps && 14712 (rack->rack_attempt_hdwr_pace == 0) && 14713 (rack->rc_inp->inp_route.ro_nh != NULL) && 14714 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14715 /* 14716 * Ok we may have a first attempt that is greater than our top rate 14717 * lets check. 14718 */ 14719 uint64_t high_rate; 14720 14721 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14722 if (high_rate) { 14723 if (fill_bw > high_rate) { 14724 fill_bw = high_rate; 14725 if (capped) 14726 *capped = 1; 14727 } 14728 } 14729 } 14730 /* 14731 * Ok fill_bw holds our mythical b/w to fill the cwnd 14732 * in a rtt, what does that time wise equate too? 14733 */ 14734 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14735 lentim /= fill_bw; 14736 *rate_wanted = fill_bw; 14737 if (non_paced || (lentim < slot)) { 14738 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14739 0, lentim, 12, __LINE__, NULL, 0); 14740 return ((int32_t)lentim); 14741 } else 14742 return (slot); 14743 } 14744 14745 static int32_t 14746 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14747 { 14748 uint64_t srtt; 14749 int32_t slot = 0; 14750 int can_start_hw_pacing = 1; 14751 int err; 14752 14753 if (rack->rc_always_pace == 0) { 14754 /* 14755 * We use the most optimistic possible cwnd/srtt for 14756 * sending calculations. This will make our 14757 * calculation anticipate getting more through 14758 * quicker then possible. But thats ok we don't want 14759 * the peer to have a gap in data sending. 14760 */ 14761 uint64_t cwnd, tr_perms = 0; 14762 int32_t reduce = 0; 14763 14764 old_method: 14765 /* 14766 * We keep no precise pacing with the old method 14767 * instead we use the pacer to mitigate bursts. 14768 */ 14769 if (rack->r_ctl.rc_rack_min_rtt) 14770 srtt = rack->r_ctl.rc_rack_min_rtt; 14771 else 14772 srtt = max(tp->t_srtt, 1); 14773 if (rack->r_ctl.rc_rack_largest_cwnd) 14774 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14775 else 14776 cwnd = rack->r_ctl.cwnd_to_use; 14777 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14778 tr_perms = (cwnd * 1000) / srtt; 14779 if (tr_perms == 0) { 14780 tr_perms = ctf_fixed_maxseg(tp); 14781 } 14782 /* 14783 * Calculate how long this will take to drain, if 14784 * the calculation comes out to zero, thats ok we 14785 * will use send_a_lot to possibly spin around for 14786 * more increasing tot_len_this_send to the point 14787 * that its going to require a pace, or we hit the 14788 * cwnd. Which in that case we are just waiting for 14789 * a ACK. 14790 */ 14791 slot = len / tr_perms; 14792 /* Now do we reduce the time so we don't run dry? */ 14793 if (slot && rack_slot_reduction) { 14794 reduce = (slot / rack_slot_reduction); 14795 if (reduce < slot) { 14796 slot -= reduce; 14797 } else 14798 slot = 0; 14799 } 14800 slot *= HPTS_USEC_IN_MSEC; 14801 if (rack->rc_pace_to_cwnd) { 14802 uint64_t rate_wanted = 0; 14803 14804 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14805 rack->rc_ack_can_sendout_data = 1; 14806 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14807 } else 14808 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14809 } else { 14810 uint64_t bw_est, res, lentim, rate_wanted; 14811 uint32_t orig_val, segs, oh; 14812 int capped = 0; 14813 int prev_fill; 14814 14815 if ((rack->r_rr_config == 1) && rsm) { 14816 return (rack->r_ctl.rc_min_to); 14817 } 14818 if (rack->use_fixed_rate) { 14819 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14820 } else if ((rack->r_ctl.init_rate == 0) && 14821 #ifdef NETFLIX_PEAKRATE 14822 (rack->rc_tp->t_maxpeakrate == 0) && 14823 #endif 14824 (rack->r_ctl.gp_bw == 0)) { 14825 /* no way to yet do an estimate */ 14826 bw_est = rate_wanted = 0; 14827 } else { 14828 bw_est = rack_get_bw(rack); 14829 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14830 } 14831 if ((bw_est == 0) || (rate_wanted == 0) || 14832 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14833 /* 14834 * No way yet to make a b/w estimate or 14835 * our raise is set incorrectly. 14836 */ 14837 goto old_method; 14838 } 14839 /* We need to account for all the overheads */ 14840 segs = (len + segsiz - 1) / segsiz; 14841 /* 14842 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14843 * and how much data we put in each packet. Yes this 14844 * means we may be off if we are larger than 1500 bytes 14845 * or smaller. But this just makes us more conservative. 14846 */ 14847 if (rack_hw_rate_min && 14848 (bw_est < rack_hw_rate_min)) 14849 can_start_hw_pacing = 0; 14850 if (ETHERNET_SEGMENT_SIZE > segsiz) 14851 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14852 else 14853 oh = 0; 14854 segs *= oh; 14855 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14856 res = lentim / rate_wanted; 14857 slot = (uint32_t)res; 14858 orig_val = rack->r_ctl.rc_pace_max_segs; 14859 if (rack->r_ctl.crte == NULL) { 14860 /* 14861 * Only do this if we are not hardware pacing 14862 * since if we are doing hw-pacing below we will 14863 * set make a call after setting up or changing 14864 * the rate. 14865 */ 14866 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14867 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14868 /* 14869 * We lost our rate somehow, this can happen 14870 * if the interface changed underneath us. 14871 */ 14872 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14873 rack->r_ctl.crte = NULL; 14874 /* Lets re-allow attempting to setup pacing */ 14875 rack->rack_hdrw_pacing = 0; 14876 rack->rack_attempt_hdwr_pace = 0; 14877 rack_log_hdwr_pacing(rack, 14878 rate_wanted, bw_est, __LINE__, 14879 0, 6); 14880 } 14881 /* Did we change the TSO size, if so log it */ 14882 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14883 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 14884 prev_fill = rack->r_via_fill_cw; 14885 if ((rack->rc_pace_to_cwnd) && 14886 (capped == 0) && 14887 (rack->use_fixed_rate == 0) && 14888 (rack->in_probe_rtt == 0) && 14889 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14890 /* 14891 * We want to pace at our rate *or* faster to 14892 * fill the cwnd to the max if its not full. 14893 */ 14894 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14895 } 14896 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14897 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14898 if ((rack->rack_hdw_pace_ena) && 14899 (can_start_hw_pacing > 0) && 14900 (rack->rack_hdrw_pacing == 0) && 14901 (rack->rack_attempt_hdwr_pace == 0)) { 14902 /* 14903 * Lets attempt to turn on hardware pacing 14904 * if we can. 14905 */ 14906 rack->rack_attempt_hdwr_pace = 1; 14907 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14908 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14909 rate_wanted, 14910 RS_PACING_GEQ, 14911 &err, &rack->r_ctl.crte_prev_rate); 14912 if (rack->r_ctl.crte) { 14913 rack->rack_hdrw_pacing = 1; 14914 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 14915 0, rack->r_ctl.crte, 14916 NULL); 14917 rack_log_hdwr_pacing(rack, 14918 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14919 err, 0); 14920 rack->r_ctl.last_hw_bw_req = rate_wanted; 14921 } else { 14922 counter_u64_add(rack_hw_pace_init_fail, 1); 14923 } 14924 } else if (rack->rack_hdrw_pacing && 14925 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 14926 /* Do we need to adjust our rate? */ 14927 const struct tcp_hwrate_limit_table *nrte; 14928 14929 if (rack->r_up_only && 14930 (rate_wanted < rack->r_ctl.crte->rate)) { 14931 /** 14932 * We have four possible states here 14933 * having to do with the previous time 14934 * and this time. 14935 * previous | this-time 14936 * A) 0 | 0 -- fill_cw not in the picture 14937 * B) 1 | 0 -- we were doing a fill-cw but now are not 14938 * C) 1 | 1 -- all rates from fill_cw 14939 * D) 0 | 1 -- we were doing non-fill and now we are filling 14940 * 14941 * For case A, C and D we don't allow a drop. But for 14942 * case B where we now our on our steady rate we do 14943 * allow a drop. 14944 * 14945 */ 14946 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 14947 goto done_w_hdwr; 14948 } 14949 if ((rate_wanted > rack->r_ctl.crte->rate) || 14950 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 14951 if (rack_hw_rate_to_low && 14952 (bw_est < rack_hw_rate_to_low)) { 14953 /* 14954 * The pacing rate is too low for hardware, but 14955 * do allow hardware pacing to be restarted. 14956 */ 14957 rack_log_hdwr_pacing(rack, 14958 bw_est, rack->r_ctl.crte->rate, __LINE__, 14959 0, 5); 14960 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14961 rack->r_ctl.crte = NULL; 14962 rack->rack_attempt_hdwr_pace = 0; 14963 rack->rack_hdrw_pacing = 0; 14964 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14965 goto done_w_hdwr; 14966 } 14967 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 14968 rack->rc_tp, 14969 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14970 rate_wanted, 14971 RS_PACING_GEQ, 14972 &err, &rack->r_ctl.crte_prev_rate); 14973 if (nrte == NULL) { 14974 /* Lost the rate */ 14975 rack->rack_hdrw_pacing = 0; 14976 rack->r_ctl.crte = NULL; 14977 rack_log_hdwr_pacing(rack, 14978 rate_wanted, 0, __LINE__, 14979 err, 1); 14980 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14981 counter_u64_add(rack_hw_pace_lost, 1); 14982 } else if (nrte != rack->r_ctl.crte) { 14983 rack->r_ctl.crte = nrte; 14984 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 14985 segsiz, 0, 14986 rack->r_ctl.crte, 14987 NULL); 14988 rack_log_hdwr_pacing(rack, 14989 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14990 err, 2); 14991 rack->r_ctl.last_hw_bw_req = rate_wanted; 14992 } 14993 } else { 14994 /* We just need to adjust the segment size */ 14995 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14996 rack_log_hdwr_pacing(rack, 14997 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14998 0, 4); 14999 rack->r_ctl.last_hw_bw_req = rate_wanted; 15000 } 15001 } 15002 } 15003 if ((rack->r_ctl.crte != NULL) && 15004 (rack->r_ctl.crte->rate == rate_wanted)) { 15005 /* 15006 * We need to add a extra if the rates 15007 * are exactly matched. The idea is 15008 * we want the software to make sure the 15009 * queue is empty before adding more, this 15010 * gives us N MSS extra pace times where 15011 * N is our sysctl 15012 */ 15013 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15014 } 15015 done_w_hdwr: 15016 if (rack_limit_time_with_srtt && 15017 (rack->use_fixed_rate == 0) && 15018 #ifdef NETFLIX_PEAKRATE 15019 (rack->rc_tp->t_maxpeakrate == 0) && 15020 #endif 15021 (rack->rack_hdrw_pacing == 0)) { 15022 /* 15023 * Sanity check, we do not allow the pacing delay 15024 * to be longer than the SRTT of the path. If it is 15025 * a slow path, then adding a packet should increase 15026 * the RTT and compensate for this i.e. the srtt will 15027 * be greater so the allowed pacing time will be greater. 15028 * 15029 * Note this restriction is not for where a peak rate 15030 * is set, we are doing fixed pacing or hardware pacing. 15031 */ 15032 if (rack->rc_tp->t_srtt) 15033 srtt = rack->rc_tp->t_srtt; 15034 else 15035 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15036 if (srtt < (uint64_t)slot) { 15037 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15038 slot = srtt; 15039 } 15040 } 15041 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15042 } 15043 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15044 /* 15045 * If this rate is seeing enobufs when it 15046 * goes to send then either the nic is out 15047 * of gas or we are mis-estimating the time 15048 * somehow and not letting the queue empty 15049 * completely. Lets add to the pacing time. 15050 */ 15051 int hw_boost_delay; 15052 15053 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15054 if (hw_boost_delay > rack_enobuf_hw_max) 15055 hw_boost_delay = rack_enobuf_hw_max; 15056 else if (hw_boost_delay < rack_enobuf_hw_min) 15057 hw_boost_delay = rack_enobuf_hw_min; 15058 slot += hw_boost_delay; 15059 } 15060 return (slot); 15061 } 15062 15063 static void 15064 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15065 tcp_seq startseq, uint32_t sb_offset) 15066 { 15067 struct rack_sendmap *my_rsm = NULL; 15068 struct rack_sendmap fe; 15069 15070 if (tp->t_state < TCPS_ESTABLISHED) { 15071 /* 15072 * We don't start any measurements if we are 15073 * not at least established. 15074 */ 15075 return; 15076 } 15077 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15078 /* 15079 * We will get no more data into the SB 15080 * this means we need to have the data available 15081 * before we start a measurement. 15082 */ 15083 15084 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15085 max(rc_init_window(rack), 15086 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15087 /* Nope not enough data */ 15088 return; 15089 } 15090 } 15091 tp->t_flags |= TF_GPUTINPROG; 15092 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15093 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15094 tp->gput_seq = startseq; 15095 rack->app_limited_needs_set = 0; 15096 if (rack->in_probe_rtt) 15097 rack->measure_saw_probe_rtt = 1; 15098 else if ((rack->measure_saw_probe_rtt) && 15099 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15100 rack->measure_saw_probe_rtt = 0; 15101 if (rack->rc_gp_filled) 15102 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15103 else { 15104 /* Special case initial measurement */ 15105 struct timeval tv; 15106 15107 tp->gput_ts = tcp_get_usecs(&tv); 15108 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15109 } 15110 /* 15111 * We take a guess out into the future, 15112 * if we have no measurement and no 15113 * initial rate, we measure the first 15114 * initial-windows worth of data to 15115 * speed up getting some GP measurement and 15116 * thus start pacing. 15117 */ 15118 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15119 rack->app_limited_needs_set = 1; 15120 tp->gput_ack = startseq + max(rc_init_window(rack), 15121 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15122 rack_log_pacing_delay_calc(rack, 15123 tp->gput_seq, 15124 tp->gput_ack, 15125 0, 15126 tp->gput_ts, 15127 rack->r_ctl.rc_app_limited_cnt, 15128 9, 15129 __LINE__, NULL, 0); 15130 return; 15131 } 15132 if (sb_offset) { 15133 /* 15134 * We are out somewhere in the sb 15135 * can we use the already outstanding data? 15136 */ 15137 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15138 /* 15139 * Yes first one is good and in this case 15140 * the tp->gput_ts is correctly set based on 15141 * the last ack that arrived (no need to 15142 * set things up when an ack comes in). 15143 */ 15144 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15145 if ((my_rsm == NULL) || 15146 (my_rsm->r_rtr_cnt != 1)) { 15147 /* retransmission? */ 15148 goto use_latest; 15149 } 15150 } else { 15151 if (rack->r_ctl.rc_first_appl == NULL) { 15152 /* 15153 * If rc_first_appl is NULL 15154 * then the cnt should be 0. 15155 * This is probably an error, maybe 15156 * a KASSERT would be approprate. 15157 */ 15158 goto use_latest; 15159 } 15160 /* 15161 * If we have a marker pointer to the last one that is 15162 * app limited we can use that, but we need to set 15163 * things up so that when it gets ack'ed we record 15164 * the ack time (if its not already acked). 15165 */ 15166 rack->app_limited_needs_set = 1; 15167 /* 15168 * We want to get to the rsm that is either 15169 * next with space i.e. over 1 MSS or the one 15170 * after that (after the app-limited). 15171 */ 15172 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15173 rack->r_ctl.rc_first_appl); 15174 if (my_rsm) { 15175 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15176 /* Have to use the next one */ 15177 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15178 my_rsm); 15179 else { 15180 /* Use after the first MSS of it is acked */ 15181 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15182 goto start_set; 15183 } 15184 } 15185 if ((my_rsm == NULL) || 15186 (my_rsm->r_rtr_cnt != 1)) { 15187 /* 15188 * Either its a retransmit or 15189 * the last is the app-limited one. 15190 */ 15191 goto use_latest; 15192 } 15193 } 15194 tp->gput_seq = my_rsm->r_start; 15195 start_set: 15196 if (my_rsm->r_flags & RACK_ACKED) { 15197 /* 15198 * This one has been acked use the arrival ack time 15199 */ 15200 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15201 rack->app_limited_needs_set = 0; 15202 } 15203 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15204 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15205 rack_log_pacing_delay_calc(rack, 15206 tp->gput_seq, 15207 tp->gput_ack, 15208 (uint64_t)my_rsm, 15209 tp->gput_ts, 15210 rack->r_ctl.rc_app_limited_cnt, 15211 9, 15212 __LINE__, NULL, 0); 15213 return; 15214 } 15215 15216 use_latest: 15217 /* 15218 * We don't know how long we may have been 15219 * idle or if this is the first-send. Lets 15220 * setup the flag so we will trim off 15221 * the first ack'd data so we get a true 15222 * measurement. 15223 */ 15224 rack->app_limited_needs_set = 1; 15225 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15226 /* Find this guy so we can pull the send time */ 15227 fe.r_start = startseq; 15228 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15229 if (my_rsm) { 15230 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15231 if (my_rsm->r_flags & RACK_ACKED) { 15232 /* 15233 * Unlikely since its probably what was 15234 * just transmitted (but I am paranoid). 15235 */ 15236 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15237 rack->app_limited_needs_set = 0; 15238 } 15239 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15240 /* This also is unlikely */ 15241 tp->gput_seq = my_rsm->r_start; 15242 } 15243 } else { 15244 /* 15245 * TSNH unless we have some send-map limit, 15246 * and even at that it should not be hitting 15247 * that limit (we should have stopped sending). 15248 */ 15249 struct timeval tv; 15250 15251 microuptime(&tv); 15252 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15253 } 15254 rack_log_pacing_delay_calc(rack, 15255 tp->gput_seq, 15256 tp->gput_ack, 15257 (uint64_t)my_rsm, 15258 tp->gput_ts, 15259 rack->r_ctl.rc_app_limited_cnt, 15260 9, __LINE__, NULL, 0); 15261 } 15262 15263 static inline uint32_t 15264 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15265 uint32_t avail, int32_t sb_offset) 15266 { 15267 uint32_t len; 15268 uint32_t sendwin; 15269 15270 if (tp->snd_wnd > cwnd_to_use) 15271 sendwin = cwnd_to_use; 15272 else 15273 sendwin = tp->snd_wnd; 15274 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15275 /* We never want to go over our peers rcv-window */ 15276 len = 0; 15277 } else { 15278 uint32_t flight; 15279 15280 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15281 if (flight >= sendwin) { 15282 /* 15283 * We have in flight what we are allowed by cwnd (if 15284 * it was rwnd blocking it would have hit above out 15285 * >= tp->snd_wnd). 15286 */ 15287 return (0); 15288 } 15289 len = sendwin - flight; 15290 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15291 /* We would send too much (beyond the rwnd) */ 15292 len = tp->snd_wnd - ctf_outstanding(tp); 15293 } 15294 if ((len + sb_offset) > avail) { 15295 /* 15296 * We don't have that much in the SB, how much is 15297 * there? 15298 */ 15299 len = avail - sb_offset; 15300 } 15301 } 15302 return (len); 15303 } 15304 15305 static void 15306 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15307 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15308 int rsm_is_null, int optlen, int line, uint16_t mode) 15309 { 15310 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15311 union tcp_log_stackspecific log; 15312 struct timeval tv; 15313 15314 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15315 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15316 log.u_bbr.flex1 = error; 15317 log.u_bbr.flex2 = flags; 15318 log.u_bbr.flex3 = rsm_is_null; 15319 log.u_bbr.flex4 = ipoptlen; 15320 log.u_bbr.flex5 = tp->rcv_numsacks; 15321 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15322 log.u_bbr.flex7 = optlen; 15323 log.u_bbr.flex8 = rack->r_fsb_inited; 15324 log.u_bbr.applimited = rack->r_fast_output; 15325 log.u_bbr.bw_inuse = rack_get_bw(rack); 15326 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15327 log.u_bbr.cwnd_gain = mode; 15328 log.u_bbr.pkts_out = orig_len; 15329 log.u_bbr.lt_epoch = len; 15330 log.u_bbr.delivered = line; 15331 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15332 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15333 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15334 len, &log, false, NULL, NULL, 0, &tv); 15335 } 15336 } 15337 15338 15339 static struct mbuf * 15340 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15341 struct rack_fast_send_blk *fsb, 15342 int32_t seglimit, int32_t segsize, int hw_tls) 15343 { 15344 #ifdef KERN_TLS 15345 struct ktls_session *tls, *ntls; 15346 #ifdef INVARIANTS 15347 struct mbuf *start; 15348 #endif 15349 #endif 15350 struct mbuf *m, *n, **np, *smb; 15351 struct mbuf *top; 15352 int32_t off, soff; 15353 int32_t len = *plen; 15354 int32_t fragsize; 15355 int32_t len_cp = 0; 15356 uint32_t mlen, frags; 15357 15358 soff = off = the_off; 15359 smb = m = the_m; 15360 np = ⊤ 15361 top = NULL; 15362 #ifdef KERN_TLS 15363 if (hw_tls && (m->m_flags & M_EXTPG)) 15364 tls = m->m_epg_tls; 15365 else 15366 tls = NULL; 15367 #ifdef INVARIANTS 15368 start = m; 15369 #endif 15370 #endif 15371 while (len > 0) { 15372 if (m == NULL) { 15373 *plen = len_cp; 15374 break; 15375 } 15376 #ifdef KERN_TLS 15377 if (hw_tls) { 15378 if (m->m_flags & M_EXTPG) 15379 ntls = m->m_epg_tls; 15380 else 15381 ntls = NULL; 15382 15383 /* 15384 * Avoid mixing TLS records with handshake 15385 * data or TLS records from different 15386 * sessions. 15387 */ 15388 if (tls != ntls) { 15389 MPASS(m != start); 15390 *plen = len_cp; 15391 break; 15392 } 15393 } 15394 #endif 15395 mlen = min(len, m->m_len - off); 15396 if (seglimit) { 15397 /* 15398 * For M_EXTPG mbufs, add 3 segments 15399 * + 1 in case we are crossing page boundaries 15400 * + 2 in case the TLS hdr/trailer are used 15401 * It is cheaper to just add the segments 15402 * than it is to take the cache miss to look 15403 * at the mbuf ext_pgs state in detail. 15404 */ 15405 if (m->m_flags & M_EXTPG) { 15406 fragsize = min(segsize, PAGE_SIZE); 15407 frags = 3; 15408 } else { 15409 fragsize = segsize; 15410 frags = 0; 15411 } 15412 15413 /* Break if we really can't fit anymore. */ 15414 if ((frags + 1) >= seglimit) { 15415 *plen = len_cp; 15416 break; 15417 } 15418 15419 /* 15420 * Reduce size if you can't copy the whole 15421 * mbuf. If we can't copy the whole mbuf, also 15422 * adjust len so the loop will end after this 15423 * mbuf. 15424 */ 15425 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15426 mlen = (seglimit - frags - 1) * fragsize; 15427 len = mlen; 15428 *plen = len_cp + len; 15429 } 15430 frags += howmany(mlen, fragsize); 15431 if (frags == 0) 15432 frags++; 15433 seglimit -= frags; 15434 KASSERT(seglimit > 0, 15435 ("%s: seglimit went too low", __func__)); 15436 } 15437 n = m_get(M_NOWAIT, m->m_type); 15438 *np = n; 15439 if (n == NULL) 15440 goto nospace; 15441 n->m_len = mlen; 15442 soff += mlen; 15443 len_cp += n->m_len; 15444 if (m->m_flags & (M_EXT|M_EXTPG)) { 15445 n->m_data = m->m_data + off; 15446 mb_dupcl(n, m); 15447 } else { 15448 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15449 (u_int)n->m_len); 15450 } 15451 len -= n->m_len; 15452 off = 0; 15453 m = m->m_next; 15454 np = &n->m_next; 15455 if (len || (soff == smb->m_len)) { 15456 /* 15457 * We have more so we move forward or 15458 * we have consumed the entire mbuf and 15459 * len has fell to 0. 15460 */ 15461 soff = 0; 15462 smb = m; 15463 } 15464 15465 } 15466 if (fsb != NULL) { 15467 fsb->m = smb; 15468 fsb->off = soff; 15469 if (smb) { 15470 /* 15471 * Save off the size of the mbuf. We do 15472 * this so that we can recognize when it 15473 * has been trimmed by sbcut() as acks 15474 * come in. 15475 */ 15476 fsb->o_m_len = smb->m_len; 15477 } else { 15478 /* 15479 * This is the case where the next mbuf went to NULL. This 15480 * means with this copy we have sent everything in the sb. 15481 * In theory we could clear the fast_output flag, but lets 15482 * not since its possible that we could get more added 15483 * and acks that call the extend function which would let 15484 * us send more. 15485 */ 15486 fsb->o_m_len = 0; 15487 } 15488 } 15489 return (top); 15490 nospace: 15491 if (top) 15492 m_freem(top); 15493 return (NULL); 15494 15495 } 15496 15497 /* 15498 * This is a copy of m_copym(), taking the TSO segment size/limit 15499 * constraints into account, and advancing the sndptr as it goes. 15500 */ 15501 static struct mbuf * 15502 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15503 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15504 { 15505 struct mbuf *m, *n; 15506 int32_t soff; 15507 15508 soff = rack->r_ctl.fsb.off; 15509 m = rack->r_ctl.fsb.m; 15510 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15511 /* 15512 * The mbuf had the front of it chopped off by an ack 15513 * we need to adjust the soff/off by that difference. 15514 */ 15515 uint32_t delta; 15516 15517 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15518 soff -= delta; 15519 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15520 /* 15521 * The mbuf was expanded probably by 15522 * a m_compress. Just update o_m_len. 15523 */ 15524 rack->r_ctl.fsb.o_m_len = m->m_len; 15525 } 15526 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15527 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15528 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15529 __FUNCTION__, 15530 rack, *plen, m, m->m_len)); 15531 /* Save off the right location before we copy and advance */ 15532 *s_soff = soff; 15533 *s_mb = rack->r_ctl.fsb.m; 15534 n = rack_fo_base_copym(m, soff, plen, 15535 &rack->r_ctl.fsb, 15536 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15537 return (n); 15538 } 15539 15540 static int 15541 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15542 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15543 { 15544 /* 15545 * Enter the fast retransmit path. We are given that a sched_pin is 15546 * in place (if accounting is compliled in) and the cycle count taken 15547 * at the entry is in the ts_val. The concept her is that the rsm 15548 * now holds the mbuf offsets and such so we can directly transmit 15549 * without a lot of overhead, the len field is already set for 15550 * us to prohibit us from sending too much (usually its 1MSS). 15551 */ 15552 struct ip *ip = NULL; 15553 struct udphdr *udp = NULL; 15554 struct tcphdr *th = NULL; 15555 struct mbuf *m = NULL; 15556 struct inpcb *inp; 15557 uint8_t *cpto; 15558 struct tcp_log_buffer *lgb; 15559 #ifdef TCP_ACCOUNTING 15560 uint64_t crtsc; 15561 int cnt_thru = 1; 15562 #endif 15563 struct tcpopt to; 15564 u_char opt[TCP_MAXOLEN]; 15565 uint32_t hdrlen, optlen; 15566 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0; 15567 uint16_t flags; 15568 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15569 uint32_t if_hw_tsomaxsegsize; 15570 15571 #ifdef INET6 15572 struct ip6_hdr *ip6 = NULL; 15573 15574 if (rack->r_is_v6) { 15575 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15576 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15577 } else 15578 #endif /* INET6 */ 15579 { 15580 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15581 hdrlen = sizeof(struct tcpiphdr); 15582 } 15583 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15584 goto failed; 15585 } 15586 if (doing_tlp) { 15587 /* Its a TLP add the flag, it may already be there but be sure */ 15588 rsm->r_flags |= RACK_TLP; 15589 } else { 15590 /* If it was a TLP it is not not on this retransmit */ 15591 rsm->r_flags &= ~RACK_TLP; 15592 } 15593 startseq = rsm->r_start; 15594 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15595 inp = rack->rc_inp; 15596 to.to_flags = 0; 15597 flags = tcp_outflags[tp->t_state]; 15598 if (flags & (TH_SYN|TH_RST)) { 15599 goto failed; 15600 } 15601 if (rsm->r_flags & RACK_HAS_FIN) { 15602 /* We can't send a FIN here */ 15603 goto failed; 15604 } 15605 if (flags & TH_FIN) { 15606 /* We never send a FIN */ 15607 flags &= ~TH_FIN; 15608 } 15609 if (tp->t_flags & TF_RCVD_TSTMP) { 15610 to.to_tsval = ms_cts + tp->ts_offset; 15611 to.to_tsecr = tp->ts_recent; 15612 to.to_flags = TOF_TS; 15613 } 15614 optlen = tcp_addoptions(&to, opt); 15615 hdrlen += optlen; 15616 udp = rack->r_ctl.fsb.udp; 15617 if (udp) 15618 hdrlen += sizeof(struct udphdr); 15619 if (rack->r_ctl.rc_pace_max_segs) 15620 max_val = rack->r_ctl.rc_pace_max_segs; 15621 else if (rack->rc_user_set_max_segs) 15622 max_val = rack->rc_user_set_max_segs * segsiz; 15623 else 15624 max_val = len; 15625 if ((tp->t_flags & TF_TSO) && 15626 V_tcp_do_tso && 15627 (len > segsiz) && 15628 (tp->t_port == 0)) 15629 tso = 1; 15630 #ifdef INET6 15631 if (MHLEN < hdrlen + max_linkhdr) 15632 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15633 else 15634 #endif 15635 m = m_gethdr(M_NOWAIT, MT_DATA); 15636 if (m == NULL) 15637 goto failed; 15638 m->m_data += max_linkhdr; 15639 m->m_len = hdrlen; 15640 th = rack->r_ctl.fsb.th; 15641 /* Establish the len to send */ 15642 if (len > max_val) 15643 len = max_val; 15644 if ((tso) && (len + optlen > tp->t_maxseg)) { 15645 uint32_t if_hw_tsomax; 15646 int32_t max_len; 15647 15648 /* extract TSO information */ 15649 if_hw_tsomax = tp->t_tsomax; 15650 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15651 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15652 /* 15653 * Check if we should limit by maximum payload 15654 * length: 15655 */ 15656 if (if_hw_tsomax != 0) { 15657 /* compute maximum TSO length */ 15658 max_len = (if_hw_tsomax - hdrlen - 15659 max_linkhdr); 15660 if (max_len <= 0) { 15661 goto failed; 15662 } else if (len > max_len) { 15663 len = max_len; 15664 } 15665 } 15666 if (len <= segsiz) { 15667 /* 15668 * In case there are too many small fragments don't 15669 * use TSO: 15670 */ 15671 tso = 0; 15672 } 15673 } else { 15674 tso = 0; 15675 } 15676 if ((tso == 0) && (len > segsiz)) 15677 len = segsiz; 15678 if ((len == 0) || 15679 (len <= MHLEN - hdrlen - max_linkhdr)) { 15680 goto failed; 15681 } 15682 th->th_seq = htonl(rsm->r_start); 15683 th->th_ack = htonl(tp->rcv_nxt); 15684 /* 15685 * The PUSH bit should only be applied 15686 * if the full retransmission is made. If 15687 * we are sending less than this is the 15688 * left hand edge and should not have 15689 * the PUSH bit. 15690 */ 15691 if ((rsm->r_flags & RACK_HAD_PUSH) && 15692 (len == (rsm->r_end - rsm->r_start))) 15693 flags |= TH_PUSH; 15694 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15695 if (th->th_win == 0) { 15696 tp->t_sndzerowin++; 15697 tp->t_flags |= TF_RXWIN0SENT; 15698 } else 15699 tp->t_flags &= ~TF_RXWIN0SENT; 15700 if (rsm->r_flags & RACK_TLP) { 15701 /* 15702 * TLP should not count in retran count, but 15703 * in its own bin 15704 */ 15705 counter_u64_add(rack_tlp_retran, 1); 15706 counter_u64_add(rack_tlp_retran_bytes, len); 15707 } else { 15708 tp->t_sndrexmitpack++; 15709 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15710 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15711 } 15712 #ifdef STATS 15713 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15714 len); 15715 #endif 15716 if (rsm->m == NULL) 15717 goto failed; 15718 if (rsm->orig_m_len != rsm->m->m_len) { 15719 /* Fix up the orig_m_len and possibly the mbuf offset */ 15720 rack_adjust_orig_mlen(rsm); 15721 } 15722 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15723 if (len <= segsiz) { 15724 /* 15725 * Must have ran out of mbufs for the copy 15726 * shorten it to no longer need tso. Lets 15727 * not put on sendalot since we are low on 15728 * mbufs. 15729 */ 15730 tso = 0; 15731 } 15732 if ((m->m_next == NULL) || (len <= 0)){ 15733 goto failed; 15734 } 15735 if (udp) { 15736 if (rack->r_is_v6) 15737 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15738 else 15739 ulen = hdrlen + len - sizeof(struct ip); 15740 udp->uh_ulen = htons(ulen); 15741 } 15742 m->m_pkthdr.rcvif = (struct ifnet *)0; 15743 if (TCPS_HAVERCVDSYN(tp->t_state) && 15744 (tp->t_flags2 & TF2_ECN_PERMIT)) { 15745 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15746 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15747 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15748 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15749 #ifdef INET6 15750 if (rack->r_is_v6) { 15751 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15752 ip6->ip6_flow |= htonl(ect << 20); 15753 } 15754 else 15755 #endif 15756 { 15757 ip->ip_tos &= ~IPTOS_ECN_MASK; 15758 ip->ip_tos |= ect; 15759 } 15760 } 15761 tcp_set_flags(th, flags); 15762 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15763 #ifdef INET6 15764 if (rack->r_is_v6) { 15765 if (tp->t_port) { 15766 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15767 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15768 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15769 th->th_sum = htons(0); 15770 UDPSTAT_INC(udps_opackets); 15771 } else { 15772 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15773 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15774 th->th_sum = in6_cksum_pseudo(ip6, 15775 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15776 0); 15777 } 15778 } 15779 #endif 15780 #if defined(INET6) && defined(INET) 15781 else 15782 #endif 15783 #ifdef INET 15784 { 15785 if (tp->t_port) { 15786 m->m_pkthdr.csum_flags = CSUM_UDP; 15787 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15788 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15789 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15790 th->th_sum = htons(0); 15791 UDPSTAT_INC(udps_opackets); 15792 } else { 15793 m->m_pkthdr.csum_flags = CSUM_TCP; 15794 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15795 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15796 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15797 IPPROTO_TCP + len + optlen)); 15798 } 15799 /* IP version must be set here for ipv4/ipv6 checking later */ 15800 KASSERT(ip->ip_v == IPVERSION, 15801 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15802 } 15803 #endif 15804 if (tso) { 15805 KASSERT(len > tp->t_maxseg - optlen, 15806 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15807 m->m_pkthdr.csum_flags |= CSUM_TSO; 15808 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15809 } 15810 #ifdef INET6 15811 if (rack->r_is_v6) { 15812 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15813 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15814 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15815 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15816 else 15817 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15818 } 15819 #endif 15820 #if defined(INET) && defined(INET6) 15821 else 15822 #endif 15823 #ifdef INET 15824 { 15825 ip->ip_len = htons(m->m_pkthdr.len); 15826 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15827 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15828 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15829 if (tp->t_port == 0 || len < V_tcp_minmss) { 15830 ip->ip_off |= htons(IP_DF); 15831 } 15832 } else { 15833 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15834 } 15835 } 15836 #endif 15837 /* Time to copy in our header */ 15838 cpto = mtod(m, uint8_t *); 15839 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15840 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15841 if (optlen) { 15842 bcopy(opt, th + 1, optlen); 15843 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15844 } else { 15845 th->th_off = sizeof(struct tcphdr) >> 2; 15846 } 15847 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15848 union tcp_log_stackspecific log; 15849 15850 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15851 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15852 if (rack->rack_no_prr) 15853 log.u_bbr.flex1 = 0; 15854 else 15855 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15856 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15857 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15858 log.u_bbr.flex4 = max_val; 15859 log.u_bbr.flex5 = 0; 15860 /* Save off the early/late values */ 15861 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15862 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15863 log.u_bbr.bw_inuse = rack_get_bw(rack); 15864 if (doing_tlp == 0) 15865 log.u_bbr.flex8 = 1; 15866 else 15867 log.u_bbr.flex8 = 2; 15868 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15869 log.u_bbr.flex7 = 55; 15870 log.u_bbr.pkts_out = tp->t_maxseg; 15871 log.u_bbr.timeStamp = cts; 15872 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15873 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15874 log.u_bbr.delivered = 0; 15875 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15876 len, &log, false, NULL, NULL, 0, tv); 15877 } else 15878 lgb = NULL; 15879 #ifdef INET6 15880 if (rack->r_is_v6) { 15881 error = ip6_output(m, NULL, 15882 &inp->inp_route6, 15883 0, NULL, NULL, inp); 15884 } 15885 #endif 15886 #if defined(INET) && defined(INET6) 15887 else 15888 #endif 15889 #ifdef INET 15890 { 15891 error = ip_output(m, NULL, 15892 &inp->inp_route, 15893 0, 0, inp); 15894 } 15895 #endif 15896 m = NULL; 15897 if (lgb) { 15898 lgb->tlb_errno = error; 15899 lgb = NULL; 15900 } 15901 if (error) { 15902 goto failed; 15903 } 15904 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15905 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15906 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15907 rack->rc_tlp_in_progress = 1; 15908 rack->r_ctl.rc_tlp_cnt_out++; 15909 } 15910 if (error == 0) { 15911 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15912 if (doing_tlp) { 15913 rack->rc_last_sent_tlp_past_cumack = 0; 15914 rack->rc_last_sent_tlp_seq_valid = 1; 15915 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 15916 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 15917 } 15918 } 15919 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15920 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15921 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15922 rack->r_ctl.retran_during_recovery += len; 15923 { 15924 int idx; 15925 15926 idx = (len / segsiz) + 3; 15927 if (idx >= TCP_MSS_ACCT_ATIMER) 15928 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15929 else 15930 counter_u64_add(rack_out_size[idx], 1); 15931 } 15932 if (tp->t_rtttime == 0) { 15933 tp->t_rtttime = ticks; 15934 tp->t_rtseq = startseq; 15935 KMOD_TCPSTAT_INC(tcps_segstimed); 15936 } 15937 counter_u64_add(rack_fto_rsm_send, 1); 15938 if (error && (error == ENOBUFS)) { 15939 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 15940 if (rack->rc_enobuf < 0x7f) 15941 rack->rc_enobuf++; 15942 if (slot < (10 * HPTS_USEC_IN_MSEC)) 15943 slot = 10 * HPTS_USEC_IN_MSEC; 15944 } else 15945 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 15946 if ((slot == 0) || 15947 (rack->rc_always_pace == 0) || 15948 (rack->r_rr_config == 1)) { 15949 /* 15950 * We have no pacing set or we 15951 * are using old-style rack or 15952 * we are overriden to use the old 1ms pacing. 15953 */ 15954 slot = rack->r_ctl.rc_min_to; 15955 } 15956 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 15957 if (rack->r_must_retran) { 15958 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 15959 if ((SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) || 15960 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 15961 /* 15962 * We have retransmitted all we need. If 15963 * RACK_MUST_RXT is not set then we need to 15964 * not retransmit this guy. 15965 */ 15966 rack->r_must_retran = 0; 15967 rack->r_ctl.rc_out_at_rto = 0; 15968 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 15969 /* Not one we should rxt */ 15970 goto failed; 15971 } else { 15972 /* Clear the flag */ 15973 rsm->r_flags &= ~RACK_MUST_RXT; 15974 } 15975 } else { 15976 /* Remove the flag */ 15977 rsm->r_flags &= ~RACK_MUST_RXT; 15978 } 15979 } 15980 #ifdef TCP_ACCOUNTING 15981 crtsc = get_cyclecount(); 15982 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15983 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 15984 } 15985 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 15986 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15987 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 15988 } 15989 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 15990 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15991 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 15992 } 15993 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 15994 sched_unpin(); 15995 #endif 15996 return (0); 15997 failed: 15998 if (m) 15999 m_free(m); 16000 return (-1); 16001 } 16002 16003 static void 16004 rack_sndbuf_autoscale(struct tcp_rack *rack) 16005 { 16006 /* 16007 * Automatic sizing of send socket buffer. Often the send buffer 16008 * size is not optimally adjusted to the actual network conditions 16009 * at hand (delay bandwidth product). Setting the buffer size too 16010 * small limits throughput on links with high bandwidth and high 16011 * delay (eg. trans-continental/oceanic links). Setting the 16012 * buffer size too big consumes too much real kernel memory, 16013 * especially with many connections on busy servers. 16014 * 16015 * The criteria to step up the send buffer one notch are: 16016 * 1. receive window of remote host is larger than send buffer 16017 * (with a fudge factor of 5/4th); 16018 * 2. send buffer is filled to 7/8th with data (so we actually 16019 * have data to make use of it); 16020 * 3. send buffer fill has not hit maximal automatic size; 16021 * 4. our send window (slow start and cogestion controlled) is 16022 * larger than sent but unacknowledged data in send buffer. 16023 * 16024 * Note that the rack version moves things much faster since 16025 * we want to avoid hitting cache lines in the rack_fast_output() 16026 * path so this is called much less often and thus moves 16027 * the SB forward by a percentage. 16028 */ 16029 struct socket *so; 16030 struct tcpcb *tp; 16031 uint32_t sendwin, scaleup; 16032 16033 tp = rack->rc_tp; 16034 so = rack->rc_inp->inp_socket; 16035 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16036 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16037 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16038 sbused(&so->so_snd) >= 16039 (so->so_snd.sb_hiwat / 8 * 7) && 16040 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16041 sendwin >= (sbused(&so->so_snd) - 16042 (tp->snd_nxt - tp->snd_una))) { 16043 if (rack_autosndbuf_inc) 16044 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16045 else 16046 scaleup = V_tcp_autosndbuf_inc; 16047 if (scaleup < V_tcp_autosndbuf_inc) 16048 scaleup = V_tcp_autosndbuf_inc; 16049 scaleup += so->so_snd.sb_hiwat; 16050 if (scaleup > V_tcp_autosndbuf_max) 16051 scaleup = V_tcp_autosndbuf_max; 16052 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 16053 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16054 } 16055 } 16056 } 16057 16058 static int 16059 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16060 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16061 { 16062 /* 16063 * Enter to do fast output. We are given that the sched_pin is 16064 * in place (if accounting is compiled in) and the cycle count taken 16065 * at entry is in place in ts_val. The idea here is that 16066 * we know how many more bytes needs to be sent (presumably either 16067 * during pacing or to fill the cwnd and that was greater than 16068 * the max-burst). We have how much to send and all the info we 16069 * need to just send. 16070 */ 16071 struct ip *ip = NULL; 16072 struct udphdr *udp = NULL; 16073 struct tcphdr *th = NULL; 16074 struct mbuf *m, *s_mb; 16075 struct inpcb *inp; 16076 uint8_t *cpto; 16077 struct tcp_log_buffer *lgb; 16078 #ifdef TCP_ACCOUNTING 16079 uint64_t crtsc; 16080 #endif 16081 struct tcpopt to; 16082 u_char opt[TCP_MAXOLEN]; 16083 uint32_t hdrlen, optlen; 16084 int cnt_thru = 1; 16085 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16086 uint16_t flags; 16087 uint32_t s_soff; 16088 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16089 uint32_t if_hw_tsomaxsegsize; 16090 uint16_t add_flag = RACK_SENT_FP; 16091 #ifdef INET6 16092 struct ip6_hdr *ip6 = NULL; 16093 16094 if (rack->r_is_v6) { 16095 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16096 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16097 } else 16098 #endif /* INET6 */ 16099 { 16100 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16101 hdrlen = sizeof(struct tcpiphdr); 16102 } 16103 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16104 m = NULL; 16105 goto failed; 16106 } 16107 startseq = tp->snd_max; 16108 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16109 inp = rack->rc_inp; 16110 len = rack->r_ctl.fsb.left_to_send; 16111 to.to_flags = 0; 16112 flags = rack->r_ctl.fsb.tcp_flags; 16113 if (tp->t_flags & TF_RCVD_TSTMP) { 16114 to.to_tsval = ms_cts + tp->ts_offset; 16115 to.to_tsecr = tp->ts_recent; 16116 to.to_flags = TOF_TS; 16117 } 16118 optlen = tcp_addoptions(&to, opt); 16119 hdrlen += optlen; 16120 udp = rack->r_ctl.fsb.udp; 16121 if (udp) 16122 hdrlen += sizeof(struct udphdr); 16123 if (rack->r_ctl.rc_pace_max_segs) 16124 max_val = rack->r_ctl.rc_pace_max_segs; 16125 else if (rack->rc_user_set_max_segs) 16126 max_val = rack->rc_user_set_max_segs * segsiz; 16127 else 16128 max_val = len; 16129 if ((tp->t_flags & TF_TSO) && 16130 V_tcp_do_tso && 16131 (len > segsiz) && 16132 (tp->t_port == 0)) 16133 tso = 1; 16134 again: 16135 #ifdef INET6 16136 if (MHLEN < hdrlen + max_linkhdr) 16137 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16138 else 16139 #endif 16140 m = m_gethdr(M_NOWAIT, MT_DATA); 16141 if (m == NULL) 16142 goto failed; 16143 m->m_data += max_linkhdr; 16144 m->m_len = hdrlen; 16145 th = rack->r_ctl.fsb.th; 16146 /* Establish the len to send */ 16147 if (len > max_val) 16148 len = max_val; 16149 if ((tso) && (len + optlen > tp->t_maxseg)) { 16150 uint32_t if_hw_tsomax; 16151 int32_t max_len; 16152 16153 /* extract TSO information */ 16154 if_hw_tsomax = tp->t_tsomax; 16155 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16156 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16157 /* 16158 * Check if we should limit by maximum payload 16159 * length: 16160 */ 16161 if (if_hw_tsomax != 0) { 16162 /* compute maximum TSO length */ 16163 max_len = (if_hw_tsomax - hdrlen - 16164 max_linkhdr); 16165 if (max_len <= 0) { 16166 goto failed; 16167 } else if (len > max_len) { 16168 len = max_len; 16169 } 16170 } 16171 if (len <= segsiz) { 16172 /* 16173 * In case there are too many small fragments don't 16174 * use TSO: 16175 */ 16176 tso = 0; 16177 } 16178 } else { 16179 tso = 0; 16180 } 16181 if ((tso == 0) && (len > segsiz)) 16182 len = segsiz; 16183 if ((len == 0) || 16184 (len <= MHLEN - hdrlen - max_linkhdr)) { 16185 goto failed; 16186 } 16187 sb_offset = tp->snd_max - tp->snd_una; 16188 th->th_seq = htonl(tp->snd_max); 16189 th->th_ack = htonl(tp->rcv_nxt); 16190 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16191 if (th->th_win == 0) { 16192 tp->t_sndzerowin++; 16193 tp->t_flags |= TF_RXWIN0SENT; 16194 } else 16195 tp->t_flags &= ~TF_RXWIN0SENT; 16196 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16197 KMOD_TCPSTAT_INC(tcps_sndpack); 16198 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16199 #ifdef STATS 16200 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16201 len); 16202 #endif 16203 if (rack->r_ctl.fsb.m == NULL) 16204 goto failed; 16205 16206 /* s_mb and s_soff are saved for rack_log_output */ 16207 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16208 &s_mb, &s_soff); 16209 if (len <= segsiz) { 16210 /* 16211 * Must have ran out of mbufs for the copy 16212 * shorten it to no longer need tso. Lets 16213 * not put on sendalot since we are low on 16214 * mbufs. 16215 */ 16216 tso = 0; 16217 } 16218 if (rack->r_ctl.fsb.rfo_apply_push && 16219 (len == rack->r_ctl.fsb.left_to_send)) { 16220 flags |= TH_PUSH; 16221 add_flag |= RACK_HAD_PUSH; 16222 } 16223 if ((m->m_next == NULL) || (len <= 0)){ 16224 goto failed; 16225 } 16226 if (udp) { 16227 if (rack->r_is_v6) 16228 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16229 else 16230 ulen = hdrlen + len - sizeof(struct ip); 16231 udp->uh_ulen = htons(ulen); 16232 } 16233 m->m_pkthdr.rcvif = (struct ifnet *)0; 16234 if (TCPS_HAVERCVDSYN(tp->t_state) && 16235 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16236 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16237 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16238 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16239 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16240 #ifdef INET6 16241 if (rack->r_is_v6) { 16242 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16243 ip6->ip6_flow |= htonl(ect << 20); 16244 } 16245 else 16246 #endif 16247 { 16248 ip->ip_tos &= ~IPTOS_ECN_MASK; 16249 ip->ip_tos |= ect; 16250 } 16251 } 16252 tcp_set_flags(th, flags); 16253 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16254 #ifdef INET6 16255 if (rack->r_is_v6) { 16256 if (tp->t_port) { 16257 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16258 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16259 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16260 th->th_sum = htons(0); 16261 UDPSTAT_INC(udps_opackets); 16262 } else { 16263 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16264 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16265 th->th_sum = in6_cksum_pseudo(ip6, 16266 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16267 0); 16268 } 16269 } 16270 #endif 16271 #if defined(INET6) && defined(INET) 16272 else 16273 #endif 16274 #ifdef INET 16275 { 16276 if (tp->t_port) { 16277 m->m_pkthdr.csum_flags = CSUM_UDP; 16278 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16279 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16280 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16281 th->th_sum = htons(0); 16282 UDPSTAT_INC(udps_opackets); 16283 } else { 16284 m->m_pkthdr.csum_flags = CSUM_TCP; 16285 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16286 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16287 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16288 IPPROTO_TCP + len + optlen)); 16289 } 16290 /* IP version must be set here for ipv4/ipv6 checking later */ 16291 KASSERT(ip->ip_v == IPVERSION, 16292 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16293 } 16294 #endif 16295 if (tso) { 16296 KASSERT(len > tp->t_maxseg - optlen, 16297 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16298 m->m_pkthdr.csum_flags |= CSUM_TSO; 16299 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16300 } 16301 #ifdef INET6 16302 if (rack->r_is_v6) { 16303 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16304 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16305 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16306 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16307 else 16308 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16309 } 16310 #endif 16311 #if defined(INET) && defined(INET6) 16312 else 16313 #endif 16314 #ifdef INET 16315 { 16316 ip->ip_len = htons(m->m_pkthdr.len); 16317 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16318 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16319 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16320 if (tp->t_port == 0 || len < V_tcp_minmss) { 16321 ip->ip_off |= htons(IP_DF); 16322 } 16323 } else { 16324 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16325 } 16326 } 16327 #endif 16328 /* Time to copy in our header */ 16329 cpto = mtod(m, uint8_t *); 16330 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16331 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16332 if (optlen) { 16333 bcopy(opt, th + 1, optlen); 16334 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16335 } else { 16336 th->th_off = sizeof(struct tcphdr) >> 2; 16337 } 16338 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16339 union tcp_log_stackspecific log; 16340 16341 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16342 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16343 if (rack->rack_no_prr) 16344 log.u_bbr.flex1 = 0; 16345 else 16346 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16347 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16348 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16349 log.u_bbr.flex4 = max_val; 16350 log.u_bbr.flex5 = 0; 16351 /* Save off the early/late values */ 16352 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16353 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16354 log.u_bbr.bw_inuse = rack_get_bw(rack); 16355 log.u_bbr.flex8 = 0; 16356 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16357 log.u_bbr.flex7 = 44; 16358 log.u_bbr.pkts_out = tp->t_maxseg; 16359 log.u_bbr.timeStamp = cts; 16360 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16361 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16362 log.u_bbr.delivered = 0; 16363 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16364 len, &log, false, NULL, NULL, 0, tv); 16365 } else 16366 lgb = NULL; 16367 #ifdef INET6 16368 if (rack->r_is_v6) { 16369 error = ip6_output(m, NULL, 16370 &inp->inp_route6, 16371 0, NULL, NULL, inp); 16372 } 16373 #endif 16374 #if defined(INET) && defined(INET6) 16375 else 16376 #endif 16377 #ifdef INET 16378 { 16379 error = ip_output(m, NULL, 16380 &inp->inp_route, 16381 0, 0, inp); 16382 } 16383 #endif 16384 if (lgb) { 16385 lgb->tlb_errno = error; 16386 lgb = NULL; 16387 } 16388 if (error) { 16389 *send_err = error; 16390 m = NULL; 16391 goto failed; 16392 } 16393 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16394 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16395 m = NULL; 16396 if (tp->snd_una == tp->snd_max) { 16397 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16398 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16399 tp->t_acktime = ticks; 16400 } 16401 if (error == 0) 16402 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16403 16404 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16405 tot_len += len; 16406 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16407 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16408 tp->snd_max += len; 16409 tp->snd_nxt = tp->snd_max; 16410 { 16411 int idx; 16412 16413 idx = (len / segsiz) + 3; 16414 if (idx >= TCP_MSS_ACCT_ATIMER) 16415 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16416 else 16417 counter_u64_add(rack_out_size[idx], 1); 16418 } 16419 if (len <= rack->r_ctl.fsb.left_to_send) 16420 rack->r_ctl.fsb.left_to_send -= len; 16421 else 16422 rack->r_ctl.fsb.left_to_send = 0; 16423 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16424 rack->r_fast_output = 0; 16425 rack->r_ctl.fsb.left_to_send = 0; 16426 /* At the end of fast_output scale up the sb */ 16427 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16428 rack_sndbuf_autoscale(rack); 16429 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16430 } 16431 if (tp->t_rtttime == 0) { 16432 tp->t_rtttime = ticks; 16433 tp->t_rtseq = startseq; 16434 KMOD_TCPSTAT_INC(tcps_segstimed); 16435 } 16436 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16437 (max_val > len) && 16438 (tso == 0)) { 16439 max_val -= len; 16440 len = segsiz; 16441 th = rack->r_ctl.fsb.th; 16442 cnt_thru++; 16443 goto again; 16444 } 16445 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16446 counter_u64_add(rack_fto_send, 1); 16447 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16448 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16449 #ifdef TCP_ACCOUNTING 16450 crtsc = get_cyclecount(); 16451 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16452 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16453 } 16454 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16455 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16456 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16457 } 16458 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16459 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16460 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16461 } 16462 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16463 sched_unpin(); 16464 #endif 16465 return (0); 16466 failed: 16467 if (m) 16468 m_free(m); 16469 rack->r_fast_output = 0; 16470 return (-1); 16471 } 16472 16473 static int 16474 rack_output(struct tcpcb *tp) 16475 { 16476 struct socket *so; 16477 uint32_t recwin; 16478 uint32_t sb_offset, s_moff = 0; 16479 int32_t len, error = 0; 16480 uint16_t flags; 16481 struct mbuf *m, *s_mb = NULL; 16482 struct mbuf *mb; 16483 uint32_t if_hw_tsomaxsegcount = 0; 16484 uint32_t if_hw_tsomaxsegsize; 16485 int32_t segsiz, minseg; 16486 long tot_len_this_send = 0; 16487 #ifdef INET 16488 struct ip *ip = NULL; 16489 #endif 16490 #ifdef TCPDEBUG 16491 struct ipovly *ipov = NULL; 16492 #endif 16493 struct udphdr *udp = NULL; 16494 struct tcp_rack *rack; 16495 struct tcphdr *th; 16496 uint8_t pass = 0; 16497 uint8_t mark = 0; 16498 uint8_t wanted_cookie = 0; 16499 u_char opt[TCP_MAXOLEN]; 16500 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16501 uint32_t rack_seq; 16502 16503 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16504 unsigned ipsec_optlen = 0; 16505 16506 #endif 16507 int32_t idle, sendalot; 16508 int32_t sub_from_prr = 0; 16509 volatile int32_t sack_rxmit; 16510 struct rack_sendmap *rsm = NULL; 16511 int32_t tso, mtu; 16512 struct tcpopt to; 16513 int32_t slot = 0; 16514 int32_t sup_rack = 0; 16515 uint32_t cts, ms_cts, delayed, early; 16516 uint16_t add_flag = RACK_SENT_SP; 16517 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16518 uint8_t hpts_calling, doing_tlp = 0; 16519 uint32_t cwnd_to_use, pace_max_seg; 16520 int32_t do_a_prefetch = 0; 16521 int32_t prefetch_rsm = 0; 16522 int32_t orig_len = 0; 16523 struct timeval tv; 16524 int32_t prefetch_so_done = 0; 16525 struct tcp_log_buffer *lgb; 16526 struct inpcb *inp; 16527 struct sockbuf *sb; 16528 uint64_t ts_val = 0; 16529 #ifdef TCP_ACCOUNTING 16530 uint64_t crtsc; 16531 #endif 16532 #ifdef INET6 16533 struct ip6_hdr *ip6 = NULL; 16534 int32_t isipv6; 16535 #endif 16536 uint8_t filled_all = 0; 16537 bool hw_tls = false; 16538 16539 /* setup and take the cache hits here */ 16540 rack = (struct tcp_rack *)tp->t_fb_ptr; 16541 #ifdef TCP_ACCOUNTING 16542 sched_pin(); 16543 ts_val = get_cyclecount(); 16544 #endif 16545 hpts_calling = rack->rc_inp->inp_hpts_calls; 16546 NET_EPOCH_ASSERT(); 16547 INP_WLOCK_ASSERT(rack->rc_inp); 16548 #ifdef TCP_OFFLOAD 16549 if (tp->t_flags & TF_TOE) { 16550 #ifdef TCP_ACCOUNTING 16551 sched_unpin(); 16552 #endif 16553 return (tcp_offload_output(tp)); 16554 } 16555 #endif 16556 /* 16557 * For TFO connections in SYN_RECEIVED, only allow the initial 16558 * SYN|ACK and those sent by the retransmit timer. 16559 */ 16560 if (IS_FASTOPEN(tp->t_flags) && 16561 (tp->t_state == TCPS_SYN_RECEIVED) && 16562 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16563 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16564 #ifdef TCP_ACCOUNTING 16565 sched_unpin(); 16566 #endif 16567 return (0); 16568 } 16569 #ifdef INET6 16570 if (rack->r_state) { 16571 /* Use the cache line loaded if possible */ 16572 isipv6 = rack->r_is_v6; 16573 } else { 16574 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16575 } 16576 #endif 16577 early = 0; 16578 cts = tcp_get_usecs(&tv); 16579 ms_cts = tcp_tv_to_mssectick(&tv); 16580 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16581 tcp_in_hpts(rack->rc_inp)) { 16582 /* 16583 * We are on the hpts for some timer but not hptsi output. 16584 * Remove from the hpts unconditionally. 16585 */ 16586 rack_timer_cancel(tp, rack, cts, __LINE__); 16587 } 16588 /* Are we pacing and late? */ 16589 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16590 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16591 /* We are delayed */ 16592 delayed = cts - rack->r_ctl.rc_last_output_to; 16593 } else { 16594 delayed = 0; 16595 } 16596 /* Do the timers, which may override the pacer */ 16597 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16598 int retval; 16599 16600 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16601 &doing_tlp); 16602 if (retval != 0) { 16603 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16604 #ifdef TCP_ACCOUNTING 16605 sched_unpin(); 16606 #endif 16607 /* 16608 * If timers want tcp_drop(), then pass error out, 16609 * otherwise suppress it. 16610 */ 16611 return (retval < 0 ? retval : 0); 16612 } 16613 } 16614 if (rack->rc_in_persist) { 16615 if (tcp_in_hpts(rack->rc_inp) == 0) { 16616 /* Timer is not running */ 16617 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16618 } 16619 #ifdef TCP_ACCOUNTING 16620 sched_unpin(); 16621 #endif 16622 return (0); 16623 } 16624 if ((rack->r_timer_override) || 16625 (rack->rc_ack_can_sendout_data) || 16626 (delayed) || 16627 (tp->t_state < TCPS_ESTABLISHED)) { 16628 rack->rc_ack_can_sendout_data = 0; 16629 if (tcp_in_hpts(rack->rc_inp)) 16630 tcp_hpts_remove(rack->rc_inp); 16631 } else if (tcp_in_hpts(rack->rc_inp)) { 16632 /* 16633 * On the hpts you can't pass even if ACKNOW is on, we will 16634 * when the hpts fires. 16635 */ 16636 #ifdef TCP_ACCOUNTING 16637 crtsc = get_cyclecount(); 16638 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16639 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16640 } 16641 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16642 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16643 tp->tcp_cnt_counters[SND_BLOCKED]++; 16644 } 16645 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16646 sched_unpin(); 16647 #endif 16648 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16649 return (0); 16650 } 16651 rack->rc_inp->inp_hpts_calls = 0; 16652 /* Finish out both pacing early and late accounting */ 16653 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16654 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16655 early = rack->r_ctl.rc_last_output_to - cts; 16656 } else 16657 early = 0; 16658 if (delayed) { 16659 rack->r_ctl.rc_agg_delayed += delayed; 16660 rack->r_late = 1; 16661 } else if (early) { 16662 rack->r_ctl.rc_agg_early += early; 16663 rack->r_early = 1; 16664 } 16665 /* Now that early/late accounting is done turn off the flag */ 16666 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16667 rack->r_wanted_output = 0; 16668 rack->r_timer_override = 0; 16669 if ((tp->t_state != rack->r_state) && 16670 TCPS_HAVEESTABLISHED(tp->t_state)) { 16671 rack_set_state(tp, rack); 16672 } 16673 if ((rack->r_fast_output) && 16674 (doing_tlp == 0) && 16675 (tp->rcv_numsacks == 0)) { 16676 int ret; 16677 16678 error = 0; 16679 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16680 if (ret >= 0) 16681 return(ret); 16682 else if (error) { 16683 inp = rack->rc_inp; 16684 so = inp->inp_socket; 16685 sb = &so->so_snd; 16686 goto nomore; 16687 } 16688 } 16689 inp = rack->rc_inp; 16690 /* 16691 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16692 * only allow the initial SYN or SYN|ACK and those sent 16693 * by the retransmit timer. 16694 */ 16695 if (IS_FASTOPEN(tp->t_flags) && 16696 ((tp->t_state == TCPS_SYN_RECEIVED) || 16697 (tp->t_state == TCPS_SYN_SENT)) && 16698 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16699 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16700 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16701 so = inp->inp_socket; 16702 sb = &so->so_snd; 16703 goto just_return_nolock; 16704 } 16705 /* 16706 * Determine length of data that should be transmitted, and flags 16707 * that will be used. If there is some data or critical controls 16708 * (SYN, RST) to send, then transmit; otherwise, investigate 16709 * further. 16710 */ 16711 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16712 if (tp->t_idle_reduce) { 16713 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16714 rack_cc_after_idle(rack, tp); 16715 } 16716 tp->t_flags &= ~TF_LASTIDLE; 16717 if (idle) { 16718 if (tp->t_flags & TF_MORETOCOME) { 16719 tp->t_flags |= TF_LASTIDLE; 16720 idle = 0; 16721 } 16722 } 16723 if ((tp->snd_una == tp->snd_max) && 16724 rack->r_ctl.rc_went_idle_time && 16725 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16726 idle = cts - rack->r_ctl.rc_went_idle_time; 16727 if (idle > rack_min_probertt_hold) { 16728 /* Count as a probe rtt */ 16729 if (rack->in_probe_rtt == 0) { 16730 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16731 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16732 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16733 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16734 } else { 16735 rack_exit_probertt(rack, cts); 16736 } 16737 } 16738 idle = 0; 16739 } 16740 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16741 rack_init_fsb_block(tp, rack); 16742 again: 16743 /* 16744 * If we've recently taken a timeout, snd_max will be greater than 16745 * snd_nxt. There may be SACK information that allows us to avoid 16746 * resending already delivered data. Adjust snd_nxt accordingly. 16747 */ 16748 sendalot = 0; 16749 cts = tcp_get_usecs(&tv); 16750 ms_cts = tcp_tv_to_mssectick(&tv); 16751 tso = 0; 16752 mtu = 0; 16753 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16754 minseg = segsiz; 16755 if (rack->r_ctl.rc_pace_max_segs == 0) 16756 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16757 else 16758 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16759 sb_offset = tp->snd_max - tp->snd_una; 16760 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16761 flags = tcp_outflags[tp->t_state]; 16762 while (rack->rc_free_cnt < rack_free_cache) { 16763 rsm = rack_alloc(rack); 16764 if (rsm == NULL) { 16765 if (inp->inp_hpts_calls) 16766 /* Retry in a ms */ 16767 slot = (1 * HPTS_USEC_IN_MSEC); 16768 so = inp->inp_socket; 16769 sb = &so->so_snd; 16770 goto just_return_nolock; 16771 } 16772 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16773 rack->rc_free_cnt++; 16774 rsm = NULL; 16775 } 16776 if (inp->inp_hpts_calls) 16777 inp->inp_hpts_calls = 0; 16778 sack_rxmit = 0; 16779 len = 0; 16780 rsm = NULL; 16781 if (flags & TH_RST) { 16782 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16783 so = inp->inp_socket; 16784 sb = &so->so_snd; 16785 goto send; 16786 } 16787 if (rack->r_ctl.rc_resend) { 16788 /* Retransmit timer */ 16789 rsm = rack->r_ctl.rc_resend; 16790 rack->r_ctl.rc_resend = NULL; 16791 len = rsm->r_end - rsm->r_start; 16792 sack_rxmit = 1; 16793 sendalot = 0; 16794 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16795 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16796 __func__, __LINE__, 16797 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16798 sb_offset = rsm->r_start - tp->snd_una; 16799 if (len >= segsiz) 16800 len = segsiz; 16801 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16802 /* We have a retransmit that takes precedence */ 16803 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16804 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16805 /* Enter recovery if not induced by a time-out */ 16806 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 16807 } 16808 #ifdef INVARIANTS 16809 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16810 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16811 tp, rack, rsm, rsm->r_start, tp->snd_una); 16812 } 16813 #endif 16814 len = rsm->r_end - rsm->r_start; 16815 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16816 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16817 __func__, __LINE__, 16818 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16819 sb_offset = rsm->r_start - tp->snd_una; 16820 sendalot = 0; 16821 if (len >= segsiz) 16822 len = segsiz; 16823 if (len > 0) { 16824 sack_rxmit = 1; 16825 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16826 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16827 min(len, segsiz)); 16828 } 16829 } else if (rack->r_ctl.rc_tlpsend) { 16830 /* Tail loss probe */ 16831 long cwin; 16832 long tlen; 16833 16834 /* 16835 * Check if we can do a TLP with a RACK'd packet 16836 * this can happen if we are not doing the rack 16837 * cheat and we skipped to a TLP and it 16838 * went off. 16839 */ 16840 rsm = rack->r_ctl.rc_tlpsend; 16841 /* We are doing a TLP make sure the flag is preent */ 16842 rsm->r_flags |= RACK_TLP; 16843 rack->r_ctl.rc_tlpsend = NULL; 16844 sack_rxmit = 1; 16845 tlen = rsm->r_end - rsm->r_start; 16846 if (tlen > segsiz) 16847 tlen = segsiz; 16848 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16849 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16850 __func__, __LINE__, 16851 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16852 sb_offset = rsm->r_start - tp->snd_una; 16853 cwin = min(tp->snd_wnd, tlen); 16854 len = cwin; 16855 } 16856 if (rack->r_must_retran && 16857 (doing_tlp == 0) && 16858 (rsm == NULL)) { 16859 /* 16860 * Non-Sack and we had a RTO or Sack/non-Sack and a 16861 * MTU change, we need to retransmit until we reach 16862 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 16863 */ 16864 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 16865 int sendwin, flight; 16866 16867 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 16868 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 16869 if (flight >= sendwin) { 16870 so = inp->inp_socket; 16871 sb = &so->so_snd; 16872 goto just_return_nolock; 16873 } 16874 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16875 if (rsm == NULL) { 16876 /* TSNH */ 16877 rack->r_must_retran = 0; 16878 rack->r_ctl.rc_out_at_rto = 0; 16879 rack->r_must_retran = 0; 16880 so = inp->inp_socket; 16881 sb = &so->so_snd; 16882 goto just_return_nolock; 16883 } 16884 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 16885 /* It does not have the flag, we are done */ 16886 rack->r_must_retran = 0; 16887 rack->r_ctl.rc_out_at_rto = 0; 16888 } else { 16889 sack_rxmit = 1; 16890 len = rsm->r_end - rsm->r_start; 16891 sendalot = 0; 16892 sb_offset = rsm->r_start - tp->snd_una; 16893 if (len >= segsiz) 16894 len = segsiz; 16895 /* 16896 * Delay removing the flag RACK_MUST_RXT so 16897 * that the fastpath for retransmit will 16898 * work with this rsm. 16899 */ 16900 16901 } 16902 } else { 16903 /* We must be done if there is nothing outstanding */ 16904 rack->r_must_retran = 0; 16905 rack->r_ctl.rc_out_at_rto = 0; 16906 } 16907 } 16908 /* 16909 * Enforce a connection sendmap count limit if set 16910 * as long as we are not retransmiting. 16911 */ 16912 if ((rsm == NULL) && 16913 (rack->do_detection == 0) && 16914 (V_tcp_map_entries_limit > 0) && 16915 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 16916 counter_u64_add(rack_to_alloc_limited, 1); 16917 if (!rack->alloc_limit_reported) { 16918 rack->alloc_limit_reported = 1; 16919 counter_u64_add(rack_alloc_limited_conns, 1); 16920 } 16921 so = inp->inp_socket; 16922 sb = &so->so_snd; 16923 goto just_return_nolock; 16924 } 16925 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 16926 /* we are retransmitting the fin */ 16927 len--; 16928 if (len) { 16929 /* 16930 * When retransmitting data do *not* include the 16931 * FIN. This could happen from a TLP probe. 16932 */ 16933 flags &= ~TH_FIN; 16934 } 16935 } 16936 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 16937 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 16938 int ret; 16939 16940 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 16941 if (ret == 0) 16942 return (0); 16943 } 16944 if (rsm && (rsm->r_flags & RACK_MUST_RXT)) { 16945 /* 16946 * Clear the flag in prep for the send 16947 * note that if we can't get an mbuf 16948 * and fail, we won't retransmit this 16949 * rsm but that should be ok (its rare). 16950 */ 16951 rsm->r_flags &= ~RACK_MUST_RXT; 16952 } 16953 so = inp->inp_socket; 16954 sb = &so->so_snd; 16955 if (do_a_prefetch == 0) { 16956 kern_prefetch(sb, &do_a_prefetch); 16957 do_a_prefetch = 1; 16958 } 16959 #ifdef NETFLIX_SHARED_CWND 16960 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 16961 rack->rack_enable_scwnd) { 16962 /* We are doing cwnd sharing */ 16963 if (rack->gp_ready && 16964 (rack->rack_attempted_scwnd == 0) && 16965 (rack->r_ctl.rc_scw == NULL) && 16966 tp->t_lib) { 16967 /* The pcbid is in, lets make an attempt */ 16968 counter_u64_add(rack_try_scwnd, 1); 16969 rack->rack_attempted_scwnd = 1; 16970 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 16971 &rack->r_ctl.rc_scw_index, 16972 segsiz); 16973 } 16974 if (rack->r_ctl.rc_scw && 16975 (rack->rack_scwnd_is_idle == 1) && 16976 sbavail(&so->so_snd)) { 16977 /* we are no longer out of data */ 16978 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 16979 rack->rack_scwnd_is_idle = 0; 16980 } 16981 if (rack->r_ctl.rc_scw) { 16982 /* First lets update and get the cwnd */ 16983 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 16984 rack->r_ctl.rc_scw_index, 16985 tp->snd_cwnd, tp->snd_wnd, segsiz); 16986 } 16987 } 16988 #endif 16989 /* 16990 * Get standard flags, and add SYN or FIN if requested by 'hidden' 16991 * state flags. 16992 */ 16993 if (tp->t_flags & TF_NEEDFIN) 16994 flags |= TH_FIN; 16995 if (tp->t_flags & TF_NEEDSYN) 16996 flags |= TH_SYN; 16997 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 16998 void *end_rsm; 16999 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17000 if (end_rsm) 17001 kern_prefetch(end_rsm, &prefetch_rsm); 17002 prefetch_rsm = 1; 17003 } 17004 SOCKBUF_LOCK(sb); 17005 /* 17006 * If snd_nxt == snd_max and we have transmitted a FIN, the 17007 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17008 * negative length. This can also occur when TCP opens up its 17009 * congestion window while receiving additional duplicate acks after 17010 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17011 * the fast-retransmit. 17012 * 17013 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17014 * set to snd_una, the sb_offset will be 0, and the length may wind 17015 * up 0. 17016 * 17017 * If sack_rxmit is true we are retransmitting from the scoreboard 17018 * in which case len is already set. 17019 */ 17020 if ((sack_rxmit == 0) && 17021 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17022 uint32_t avail; 17023 17024 avail = sbavail(sb); 17025 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17026 sb_offset = tp->snd_nxt - tp->snd_una; 17027 else 17028 sb_offset = 0; 17029 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17030 if (rack->r_ctl.rc_tlp_new_data) { 17031 /* TLP is forcing out new data */ 17032 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17033 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17034 } 17035 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17036 if (tp->snd_wnd > sb_offset) 17037 len = tp->snd_wnd - sb_offset; 17038 else 17039 len = 0; 17040 } else { 17041 len = rack->r_ctl.rc_tlp_new_data; 17042 } 17043 rack->r_ctl.rc_tlp_new_data = 0; 17044 } else { 17045 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17046 } 17047 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17048 /* 17049 * For prr=off, we need to send only 1 MSS 17050 * at a time. We do this because another sack could 17051 * be arriving that causes us to send retransmits and 17052 * we don't want to be on a long pace due to a larger send 17053 * that keeps us from sending out the retransmit. 17054 */ 17055 len = segsiz; 17056 } 17057 } else { 17058 uint32_t outstanding; 17059 /* 17060 * We are inside of a Fast recovery episode, this 17061 * is caused by a SACK or 3 dup acks. At this point 17062 * we have sent all the retransmissions and we rely 17063 * on PRR to dictate what we will send in the form of 17064 * new data. 17065 */ 17066 17067 outstanding = tp->snd_max - tp->snd_una; 17068 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17069 if (tp->snd_wnd > outstanding) { 17070 len = tp->snd_wnd - outstanding; 17071 /* Check to see if we have the data */ 17072 if ((sb_offset + len) > avail) { 17073 /* It does not all fit */ 17074 if (avail > sb_offset) 17075 len = avail - sb_offset; 17076 else 17077 len = 0; 17078 } 17079 } else { 17080 len = 0; 17081 } 17082 } else if (avail > sb_offset) { 17083 len = avail - sb_offset; 17084 } else { 17085 len = 0; 17086 } 17087 if (len > 0) { 17088 if (len > rack->r_ctl.rc_prr_sndcnt) { 17089 len = rack->r_ctl.rc_prr_sndcnt; 17090 } 17091 if (len > 0) { 17092 sub_from_prr = 1; 17093 } 17094 } 17095 if (len > segsiz) { 17096 /* 17097 * We should never send more than a MSS when 17098 * retransmitting or sending new data in prr 17099 * mode unless the override flag is on. Most 17100 * likely the PRR algorithm is not going to 17101 * let us send a lot as well :-) 17102 */ 17103 if (rack->r_ctl.rc_prr_sendalot == 0) { 17104 len = segsiz; 17105 } 17106 } else if (len < segsiz) { 17107 /* 17108 * Do we send any? The idea here is if the 17109 * send empty's the socket buffer we want to 17110 * do it. However if not then lets just wait 17111 * for our prr_sndcnt to get bigger. 17112 */ 17113 long leftinsb; 17114 17115 leftinsb = sbavail(sb) - sb_offset; 17116 if (leftinsb > len) { 17117 /* This send does not empty the sb */ 17118 len = 0; 17119 } 17120 } 17121 } 17122 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17123 /* 17124 * If you have not established 17125 * and are not doing FAST OPEN 17126 * no data please. 17127 */ 17128 if ((sack_rxmit == 0) && 17129 (!IS_FASTOPEN(tp->t_flags))){ 17130 len = 0; 17131 sb_offset = 0; 17132 } 17133 } 17134 if (prefetch_so_done == 0) { 17135 kern_prefetch(so, &prefetch_so_done); 17136 prefetch_so_done = 1; 17137 } 17138 /* 17139 * Lop off SYN bit if it has already been sent. However, if this is 17140 * SYN-SENT state and if segment contains data and if we don't know 17141 * that foreign host supports TAO, suppress sending segment. 17142 */ 17143 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17144 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17145 /* 17146 * When sending additional segments following a TFO SYN|ACK, 17147 * do not include the SYN bit. 17148 */ 17149 if (IS_FASTOPEN(tp->t_flags) && 17150 (tp->t_state == TCPS_SYN_RECEIVED)) 17151 flags &= ~TH_SYN; 17152 } 17153 /* 17154 * Be careful not to send data and/or FIN on SYN segments. This 17155 * measure is needed to prevent interoperability problems with not 17156 * fully conformant TCP implementations. 17157 */ 17158 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17159 len = 0; 17160 flags &= ~TH_FIN; 17161 } 17162 /* 17163 * On TFO sockets, ensure no data is sent in the following cases: 17164 * 17165 * - When retransmitting SYN|ACK on a passively-created socket 17166 * 17167 * - When retransmitting SYN on an actively created socket 17168 * 17169 * - When sending a zero-length cookie (cookie request) on an 17170 * actively created socket 17171 * 17172 * - When the socket is in the CLOSED state (RST is being sent) 17173 */ 17174 if (IS_FASTOPEN(tp->t_flags) && 17175 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17176 ((tp->t_state == TCPS_SYN_SENT) && 17177 (tp->t_tfo_client_cookie_len == 0)) || 17178 (flags & TH_RST))) { 17179 sack_rxmit = 0; 17180 len = 0; 17181 } 17182 /* Without fast-open there should never be data sent on a SYN */ 17183 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17184 tp->snd_nxt = tp->iss; 17185 len = 0; 17186 } 17187 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17188 /* We only send 1 MSS if we have a DSACK block */ 17189 add_flag |= RACK_SENT_W_DSACK; 17190 len = segsiz; 17191 } 17192 orig_len = len; 17193 if (len <= 0) { 17194 /* 17195 * If FIN has been sent but not acked, but we haven't been 17196 * called to retransmit, len will be < 0. Otherwise, window 17197 * shrank after we sent into it. If window shrank to 0, 17198 * cancel pending retransmit, pull snd_nxt back to (closed) 17199 * window, and set the persist timer if it isn't already 17200 * going. If the window didn't close completely, just wait 17201 * for an ACK. 17202 * 17203 * We also do a general check here to ensure that we will 17204 * set the persist timer when we have data to send, but a 17205 * 0-byte window. This makes sure the persist timer is set 17206 * even if the packet hits one of the "goto send" lines 17207 * below. 17208 */ 17209 len = 0; 17210 if ((tp->snd_wnd == 0) && 17211 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17212 (tp->snd_una == tp->snd_max) && 17213 (sb_offset < (int)sbavail(sb))) { 17214 rack_enter_persist(tp, rack, cts); 17215 } 17216 } else if ((rsm == NULL) && 17217 (doing_tlp == 0) && 17218 (len < pace_max_seg)) { 17219 /* 17220 * We are not sending a maximum sized segment for 17221 * some reason. Should we not send anything (think 17222 * sws or persists)? 17223 */ 17224 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17225 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17226 (len < minseg) && 17227 (len < (int)(sbavail(sb) - sb_offset))) { 17228 /* 17229 * Here the rwnd is less than 17230 * the minimum pacing size, this is not a retransmit, 17231 * we are established and 17232 * the send is not the last in the socket buffer 17233 * we send nothing, and we may enter persists 17234 * if nothing is outstanding. 17235 */ 17236 len = 0; 17237 if (tp->snd_max == tp->snd_una) { 17238 /* 17239 * Nothing out we can 17240 * go into persists. 17241 */ 17242 rack_enter_persist(tp, rack, cts); 17243 } 17244 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17245 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17246 (len < (int)(sbavail(sb) - sb_offset)) && 17247 (len < minseg)) { 17248 /* 17249 * Here we are not retransmitting, and 17250 * the cwnd is not so small that we could 17251 * not send at least a min size (rxt timer 17252 * not having gone off), We have 2 segments or 17253 * more already in flight, its not the tail end 17254 * of the socket buffer and the cwnd is blocking 17255 * us from sending out a minimum pacing segment size. 17256 * Lets not send anything. 17257 */ 17258 len = 0; 17259 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17260 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17261 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17262 (len < (int)(sbavail(sb) - sb_offset)) && 17263 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17264 /* 17265 * Here we have a send window but we have 17266 * filled it up and we can't send another pacing segment. 17267 * We also have in flight more than 2 segments 17268 * and we are not completing the sb i.e. we allow 17269 * the last bytes of the sb to go out even if 17270 * its not a full pacing segment. 17271 */ 17272 len = 0; 17273 } else if ((rack->r_ctl.crte != NULL) && 17274 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17275 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17276 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17277 (len < (int)(sbavail(sb) - sb_offset))) { 17278 /* 17279 * Here we are doing hardware pacing, this is not a TLP, 17280 * we are not sending a pace max segment size, there is rwnd 17281 * room to send at least N pace_max_seg, the cwnd is greater 17282 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17283 * more segments in flight and its not the tail of the socket buffer. 17284 * 17285 * We don't want to send instead we need to get more ack's in to 17286 * allow us to send a full pacing segment. Normally, if we are pacing 17287 * about the right speed, we should have finished our pacing 17288 * send as most of the acks have come back if we are at the 17289 * right rate. This is a bit fuzzy since return path delay 17290 * can delay the acks, which is why we want to make sure we 17291 * have cwnd space to have a bit more than a max pace segments in flight. 17292 * 17293 * If we have not gotten our acks back we are pacing at too high a 17294 * rate delaying will not hurt and will bring our GP estimate down by 17295 * injecting the delay. If we don't do this we will send 17296 * 2 MSS out in response to the acks being clocked in which 17297 * defeats the point of hw-pacing (i.e. to help us get 17298 * larger TSO's out). 17299 */ 17300 len = 0; 17301 17302 } 17303 17304 } 17305 /* len will be >= 0 after this point. */ 17306 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17307 rack_sndbuf_autoscale(rack); 17308 /* 17309 * Decide if we can use TCP Segmentation Offloading (if supported by 17310 * hardware). 17311 * 17312 * TSO may only be used if we are in a pure bulk sending state. The 17313 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17314 * options prevent using TSO. With TSO the TCP header is the same 17315 * (except for the sequence number) for all generated packets. This 17316 * makes it impossible to transmit any options which vary per 17317 * generated segment or packet. 17318 * 17319 * IPv4 handling has a clear separation of ip options and ip header 17320 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17321 * the right thing below to provide length of just ip options and thus 17322 * checking for ipoptlen is enough to decide if ip options are present. 17323 */ 17324 ipoptlen = 0; 17325 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17326 /* 17327 * Pre-calculate here as we save another lookup into the darknesses 17328 * of IPsec that way and can actually decide if TSO is ok. 17329 */ 17330 #ifdef INET6 17331 if (isipv6 && IPSEC_ENABLED(ipv6)) 17332 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17333 #ifdef INET 17334 else 17335 #endif 17336 #endif /* INET6 */ 17337 #ifdef INET 17338 if (IPSEC_ENABLED(ipv4)) 17339 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17340 #endif /* INET */ 17341 #endif 17342 17343 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17344 ipoptlen += ipsec_optlen; 17345 #endif 17346 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17347 (tp->t_port == 0) && 17348 ((tp->t_flags & TF_SIGNATURE) == 0) && 17349 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17350 ipoptlen == 0) 17351 tso = 1; 17352 { 17353 uint32_t outstanding; 17354 17355 outstanding = tp->snd_max - tp->snd_una; 17356 if (tp->t_flags & TF_SENTFIN) { 17357 /* 17358 * If we sent a fin, snd_max is 1 higher than 17359 * snd_una 17360 */ 17361 outstanding--; 17362 } 17363 if (sack_rxmit) { 17364 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17365 flags &= ~TH_FIN; 17366 } else { 17367 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17368 sbused(sb))) 17369 flags &= ~TH_FIN; 17370 } 17371 } 17372 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17373 (long)TCP_MAXWIN << tp->rcv_scale); 17374 17375 /* 17376 * Sender silly window avoidance. We transmit under the following 17377 * conditions when len is non-zero: 17378 * 17379 * - We have a full segment (or more with TSO) - This is the last 17380 * buffer in a write()/send() and we are either idle or running 17381 * NODELAY - we've timed out (e.g. persist timer) - we have more 17382 * then 1/2 the maximum send window's worth of data (receiver may be 17383 * limited the window size) - we need to retransmit 17384 */ 17385 if (len) { 17386 if (len >= segsiz) { 17387 goto send; 17388 } 17389 /* 17390 * NOTE! on localhost connections an 'ack' from the remote 17391 * end may occur synchronously with the output and cause us 17392 * to flush a buffer queued with moretocome. XXX 17393 * 17394 */ 17395 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17396 (idle || (tp->t_flags & TF_NODELAY)) && 17397 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17398 (tp->t_flags & TF_NOPUSH) == 0) { 17399 pass = 2; 17400 goto send; 17401 } 17402 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17403 pass = 22; 17404 goto send; 17405 } 17406 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17407 pass = 4; 17408 goto send; 17409 } 17410 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17411 pass = 5; 17412 goto send; 17413 } 17414 if (sack_rxmit) { 17415 pass = 6; 17416 goto send; 17417 } 17418 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17419 (ctf_outstanding(tp) < (segsiz * 2))) { 17420 /* 17421 * We have less than two MSS outstanding (delayed ack) 17422 * and our rwnd will not let us send a full sized 17423 * MSS. Lets go ahead and let this small segment 17424 * out because we want to try to have at least two 17425 * packets inflight to not be caught by delayed ack. 17426 */ 17427 pass = 12; 17428 goto send; 17429 } 17430 } 17431 /* 17432 * Sending of standalone window updates. 17433 * 17434 * Window updates are important when we close our window due to a 17435 * full socket buffer and are opening it again after the application 17436 * reads data from it. Once the window has opened again and the 17437 * remote end starts to send again the ACK clock takes over and 17438 * provides the most current window information. 17439 * 17440 * We must avoid the silly window syndrome whereas every read from 17441 * the receive buffer, no matter how small, causes a window update 17442 * to be sent. We also should avoid sending a flurry of window 17443 * updates when the socket buffer had queued a lot of data and the 17444 * application is doing small reads. 17445 * 17446 * Prevent a flurry of pointless window updates by only sending an 17447 * update when we can increase the advertized window by more than 17448 * 1/4th of the socket buffer capacity. When the buffer is getting 17449 * full or is very small be more aggressive and send an update 17450 * whenever we can increase by two mss sized segments. In all other 17451 * situations the ACK's to new incoming data will carry further 17452 * window increases. 17453 * 17454 * Don't send an independent window update if a delayed ACK is 17455 * pending (it will get piggy-backed on it) or the remote side 17456 * already has done a half-close and won't send more data. Skip 17457 * this if the connection is in T/TCP half-open state. 17458 */ 17459 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17460 !(tp->t_flags & TF_DELACK) && 17461 !TCPS_HAVERCVDFIN(tp->t_state)) { 17462 /* 17463 * "adv" is the amount we could increase the window, taking 17464 * into account that we are limited by TCP_MAXWIN << 17465 * tp->rcv_scale. 17466 */ 17467 int32_t adv; 17468 int oldwin; 17469 17470 adv = recwin; 17471 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17472 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17473 if (adv > oldwin) 17474 adv -= oldwin; 17475 else { 17476 /* We can't increase the window */ 17477 adv = 0; 17478 } 17479 } else 17480 oldwin = 0; 17481 17482 /* 17483 * If the new window size ends up being the same as or less 17484 * than the old size when it is scaled, then don't force 17485 * a window update. 17486 */ 17487 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17488 goto dontupdate; 17489 17490 if (adv >= (int32_t)(2 * segsiz) && 17491 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17492 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17493 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17494 pass = 7; 17495 goto send; 17496 } 17497 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17498 pass = 23; 17499 goto send; 17500 } 17501 } 17502 dontupdate: 17503 17504 /* 17505 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17506 * is also a catch-all for the retransmit timer timeout case. 17507 */ 17508 if (tp->t_flags & TF_ACKNOW) { 17509 pass = 8; 17510 goto send; 17511 } 17512 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17513 pass = 9; 17514 goto send; 17515 } 17516 /* 17517 * If our state indicates that FIN should be sent and we have not 17518 * yet done so, then we need to send. 17519 */ 17520 if ((flags & TH_FIN) && 17521 (tp->snd_nxt == tp->snd_una)) { 17522 pass = 11; 17523 goto send; 17524 } 17525 /* 17526 * No reason to send a segment, just return. 17527 */ 17528 just_return: 17529 SOCKBUF_UNLOCK(sb); 17530 just_return_nolock: 17531 { 17532 int app_limited = CTF_JR_SENT_DATA; 17533 17534 if (tot_len_this_send > 0) { 17535 /* Make sure snd_nxt is up to max */ 17536 rack->r_ctl.fsb.recwin = recwin; 17537 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17538 if ((error == 0) && 17539 rack_use_rfo && 17540 ((flags & (TH_SYN|TH_FIN)) == 0) && 17541 (ipoptlen == 0) && 17542 (tp->snd_nxt == tp->snd_max) && 17543 (tp->rcv_numsacks == 0) && 17544 rack->r_fsb_inited && 17545 TCPS_HAVEESTABLISHED(tp->t_state) && 17546 (rack->r_must_retran == 0) && 17547 ((tp->t_flags & TF_NEEDFIN) == 0) && 17548 (len > 0) && (orig_len > 0) && 17549 (orig_len > len) && 17550 ((orig_len - len) >= segsiz) && 17551 ((optlen == 0) || 17552 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17553 /* We can send at least one more MSS using our fsb */ 17554 17555 rack->r_fast_output = 1; 17556 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17557 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17558 rack->r_ctl.fsb.tcp_flags = flags; 17559 rack->r_ctl.fsb.left_to_send = orig_len - len; 17560 if (hw_tls) 17561 rack->r_ctl.fsb.hw_tls = 1; 17562 else 17563 rack->r_ctl.fsb.hw_tls = 0; 17564 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17565 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17566 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17567 (tp->snd_max - tp->snd_una))); 17568 if (rack->r_ctl.fsb.left_to_send < segsiz) 17569 rack->r_fast_output = 0; 17570 else { 17571 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17572 rack->r_ctl.fsb.rfo_apply_push = 1; 17573 else 17574 rack->r_ctl.fsb.rfo_apply_push = 0; 17575 } 17576 } else 17577 rack->r_fast_output = 0; 17578 17579 17580 rack_log_fsb(rack, tp, so, flags, 17581 ipoptlen, orig_len, len, 0, 17582 1, optlen, __LINE__, 1); 17583 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17584 tp->snd_nxt = tp->snd_max; 17585 } else { 17586 int end_window = 0; 17587 uint32_t seq = tp->gput_ack; 17588 17589 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17590 if (rsm) { 17591 /* 17592 * Mark the last sent that we just-returned (hinting 17593 * that delayed ack may play a role in any rtt measurement). 17594 */ 17595 rsm->r_just_ret = 1; 17596 } 17597 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17598 rack->r_ctl.rc_agg_delayed = 0; 17599 rack->r_early = 0; 17600 rack->r_late = 0; 17601 rack->r_ctl.rc_agg_early = 0; 17602 if ((ctf_outstanding(tp) + 17603 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17604 minseg)) >= tp->snd_wnd) { 17605 /* We are limited by the rwnd */ 17606 app_limited = CTF_JR_RWND_LIMITED; 17607 if (IN_FASTRECOVERY(tp->t_flags)) 17608 rack->r_ctl.rc_prr_sndcnt = 0; 17609 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17610 /* We are limited by whats available -- app limited */ 17611 app_limited = CTF_JR_APP_LIMITED; 17612 if (IN_FASTRECOVERY(tp->t_flags)) 17613 rack->r_ctl.rc_prr_sndcnt = 0; 17614 } else if ((idle == 0) && 17615 ((tp->t_flags & TF_NODELAY) == 0) && 17616 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17617 (len < segsiz)) { 17618 /* 17619 * No delay is not on and the 17620 * user is sending less than 1MSS. This 17621 * brings out SWS avoidance so we 17622 * don't send. Another app-limited case. 17623 */ 17624 app_limited = CTF_JR_APP_LIMITED; 17625 } else if (tp->t_flags & TF_NOPUSH) { 17626 /* 17627 * The user has requested no push of 17628 * the last segment and we are 17629 * at the last segment. Another app 17630 * limited case. 17631 */ 17632 app_limited = CTF_JR_APP_LIMITED; 17633 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17634 /* Its the cwnd */ 17635 app_limited = CTF_JR_CWND_LIMITED; 17636 } else if (IN_FASTRECOVERY(tp->t_flags) && 17637 (rack->rack_no_prr == 0) && 17638 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17639 app_limited = CTF_JR_PRR; 17640 } else { 17641 /* Now why here are we not sending? */ 17642 #ifdef NOW 17643 #ifdef INVARIANTS 17644 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17645 #endif 17646 #endif 17647 app_limited = CTF_JR_ASSESSING; 17648 } 17649 /* 17650 * App limited in some fashion, for our pacing GP 17651 * measurements we don't want any gap (even cwnd). 17652 * Close down the measurement window. 17653 */ 17654 if (rack_cwnd_block_ends_measure && 17655 ((app_limited == CTF_JR_CWND_LIMITED) || 17656 (app_limited == CTF_JR_PRR))) { 17657 /* 17658 * The reason we are not sending is 17659 * the cwnd (or prr). We have been configured 17660 * to end the measurement window in 17661 * this case. 17662 */ 17663 end_window = 1; 17664 } else if (rack_rwnd_block_ends_measure && 17665 (app_limited == CTF_JR_RWND_LIMITED)) { 17666 /* 17667 * We are rwnd limited and have been 17668 * configured to end the measurement 17669 * window in this case. 17670 */ 17671 end_window = 1; 17672 } else if (app_limited == CTF_JR_APP_LIMITED) { 17673 /* 17674 * A true application limited period, we have 17675 * ran out of data. 17676 */ 17677 end_window = 1; 17678 } else if (app_limited == CTF_JR_ASSESSING) { 17679 /* 17680 * In the assessing case we hit the end of 17681 * the if/else and had no known reason 17682 * This will panic us under invariants.. 17683 * 17684 * If we get this out in logs we need to 17685 * investagate which reason we missed. 17686 */ 17687 end_window = 1; 17688 } 17689 if (end_window) { 17690 uint8_t log = 0; 17691 17692 /* Adjust the Gput measurement */ 17693 if ((tp->t_flags & TF_GPUTINPROG) && 17694 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17695 tp->gput_ack = tp->snd_max; 17696 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17697 /* 17698 * There is not enough to measure. 17699 */ 17700 tp->t_flags &= ~TF_GPUTINPROG; 17701 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17702 rack->r_ctl.rc_gp_srtt /*flex1*/, 17703 tp->gput_seq, 17704 0, 0, 18, __LINE__, NULL, 0); 17705 } else 17706 log = 1; 17707 } 17708 /* Mark the last packet has app limited */ 17709 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17710 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17711 if (rack->r_ctl.rc_app_limited_cnt == 0) 17712 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17713 else { 17714 /* 17715 * Go out to the end app limited and mark 17716 * this new one as next and move the end_appl up 17717 * to this guy. 17718 */ 17719 if (rack->r_ctl.rc_end_appl) 17720 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17721 rack->r_ctl.rc_end_appl = rsm; 17722 } 17723 rsm->r_flags |= RACK_APP_LIMITED; 17724 rack->r_ctl.rc_app_limited_cnt++; 17725 } 17726 if (log) 17727 rack_log_pacing_delay_calc(rack, 17728 rack->r_ctl.rc_app_limited_cnt, seq, 17729 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17730 } 17731 } 17732 /* Check if we need to go into persists or not */ 17733 if ((tp->snd_max == tp->snd_una) && 17734 TCPS_HAVEESTABLISHED(tp->t_state) && 17735 sbavail(sb) && 17736 (sbavail(sb) > tp->snd_wnd) && 17737 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17738 /* Yes lets make sure to move to persist before timer-start */ 17739 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17740 } 17741 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17742 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17743 } 17744 #ifdef NETFLIX_SHARED_CWND 17745 if ((sbavail(sb) == 0) && 17746 rack->r_ctl.rc_scw) { 17747 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17748 rack->rack_scwnd_is_idle = 1; 17749 } 17750 #endif 17751 #ifdef TCP_ACCOUNTING 17752 if (tot_len_this_send > 0) { 17753 crtsc = get_cyclecount(); 17754 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17755 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17756 } 17757 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17758 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17759 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17760 } 17761 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17762 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17763 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17764 } 17765 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17766 } else { 17767 crtsc = get_cyclecount(); 17768 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17769 tp->tcp_cnt_counters[SND_LIMITED]++; 17770 } 17771 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17772 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17773 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17774 } 17775 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17776 } 17777 sched_unpin(); 17778 #endif 17779 return (0); 17780 17781 send: 17782 if (rsm || sack_rxmit) 17783 counter_u64_add(rack_nfto_resend, 1); 17784 else 17785 counter_u64_add(rack_non_fto_send, 1); 17786 if ((flags & TH_FIN) && 17787 sbavail(sb)) { 17788 /* 17789 * We do not transmit a FIN 17790 * with data outstanding. We 17791 * need to make it so all data 17792 * is acked first. 17793 */ 17794 flags &= ~TH_FIN; 17795 } 17796 /* Enforce stack imposed max seg size if we have one */ 17797 if (rack->r_ctl.rc_pace_max_segs && 17798 (len > rack->r_ctl.rc_pace_max_segs)) { 17799 mark = 1; 17800 len = rack->r_ctl.rc_pace_max_segs; 17801 } 17802 SOCKBUF_LOCK_ASSERT(sb); 17803 if (len > 0) { 17804 if (len >= segsiz) 17805 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17806 else 17807 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17808 } 17809 /* 17810 * Before ESTABLISHED, force sending of initial options unless TCP 17811 * set not to do any options. NOTE: we assume that the IP/TCP header 17812 * plus TCP options always fit in a single mbuf, leaving room for a 17813 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17814 * + optlen <= MCLBYTES 17815 */ 17816 optlen = 0; 17817 #ifdef INET6 17818 if (isipv6) 17819 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17820 else 17821 #endif 17822 hdrlen = sizeof(struct tcpiphdr); 17823 17824 /* 17825 * Compute options for segment. We only have to care about SYN and 17826 * established connection segments. Options for SYN-ACK segments 17827 * are handled in TCP syncache. 17828 */ 17829 to.to_flags = 0; 17830 if ((tp->t_flags & TF_NOOPT) == 0) { 17831 /* Maximum segment size. */ 17832 if (flags & TH_SYN) { 17833 tp->snd_nxt = tp->iss; 17834 to.to_mss = tcp_mssopt(&inp->inp_inc); 17835 if (tp->t_port) 17836 to.to_mss -= V_tcp_udp_tunneling_overhead; 17837 to.to_flags |= TOF_MSS; 17838 17839 /* 17840 * On SYN or SYN|ACK transmits on TFO connections, 17841 * only include the TFO option if it is not a 17842 * retransmit, as the presence of the TFO option may 17843 * have caused the original SYN or SYN|ACK to have 17844 * been dropped by a middlebox. 17845 */ 17846 if (IS_FASTOPEN(tp->t_flags) && 17847 (tp->t_rxtshift == 0)) { 17848 if (tp->t_state == TCPS_SYN_RECEIVED) { 17849 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17850 to.to_tfo_cookie = 17851 (u_int8_t *)&tp->t_tfo_cookie.server; 17852 to.to_flags |= TOF_FASTOPEN; 17853 wanted_cookie = 1; 17854 } else if (tp->t_state == TCPS_SYN_SENT) { 17855 to.to_tfo_len = 17856 tp->t_tfo_client_cookie_len; 17857 to.to_tfo_cookie = 17858 tp->t_tfo_cookie.client; 17859 to.to_flags |= TOF_FASTOPEN; 17860 wanted_cookie = 1; 17861 /* 17862 * If we wind up having more data to 17863 * send with the SYN than can fit in 17864 * one segment, don't send any more 17865 * until the SYN|ACK comes back from 17866 * the other end. 17867 */ 17868 sendalot = 0; 17869 } 17870 } 17871 } 17872 /* Window scaling. */ 17873 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 17874 to.to_wscale = tp->request_r_scale; 17875 to.to_flags |= TOF_SCALE; 17876 } 17877 /* Timestamps. */ 17878 if ((tp->t_flags & TF_RCVD_TSTMP) || 17879 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 17880 to.to_tsval = ms_cts + tp->ts_offset; 17881 to.to_tsecr = tp->ts_recent; 17882 to.to_flags |= TOF_TS; 17883 } 17884 /* Set receive buffer autosizing timestamp. */ 17885 if (tp->rfbuf_ts == 0 && 17886 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 17887 tp->rfbuf_ts = tcp_ts_getticks(); 17888 /* Selective ACK's. */ 17889 if (tp->t_flags & TF_SACK_PERMIT) { 17890 if (flags & TH_SYN) 17891 to.to_flags |= TOF_SACKPERM; 17892 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 17893 tp->rcv_numsacks > 0) { 17894 to.to_flags |= TOF_SACK; 17895 to.to_nsacks = tp->rcv_numsacks; 17896 to.to_sacks = (u_char *)tp->sackblks; 17897 } 17898 } 17899 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 17900 /* TCP-MD5 (RFC2385). */ 17901 if (tp->t_flags & TF_SIGNATURE) 17902 to.to_flags |= TOF_SIGNATURE; 17903 #endif /* TCP_SIGNATURE */ 17904 17905 /* Processing the options. */ 17906 hdrlen += optlen = tcp_addoptions(&to, opt); 17907 /* 17908 * If we wanted a TFO option to be added, but it was unable 17909 * to fit, ensure no data is sent. 17910 */ 17911 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 17912 !(to.to_flags & TOF_FASTOPEN)) 17913 len = 0; 17914 } 17915 if (tp->t_port) { 17916 if (V_tcp_udp_tunneling_port == 0) { 17917 /* The port was removed?? */ 17918 SOCKBUF_UNLOCK(&so->so_snd); 17919 #ifdef TCP_ACCOUNTING 17920 crtsc = get_cyclecount(); 17921 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17922 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 17923 } 17924 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 17925 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17926 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 17927 } 17928 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 17929 sched_unpin(); 17930 #endif 17931 return (EHOSTUNREACH); 17932 } 17933 hdrlen += sizeof(struct udphdr); 17934 } 17935 #ifdef INET6 17936 if (isipv6) 17937 ipoptlen = ip6_optlen(tp->t_inpcb); 17938 else 17939 #endif 17940 if (tp->t_inpcb->inp_options) 17941 ipoptlen = tp->t_inpcb->inp_options->m_len - 17942 offsetof(struct ipoption, ipopt_list); 17943 else 17944 ipoptlen = 0; 17945 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17946 ipoptlen += ipsec_optlen; 17947 #endif 17948 17949 /* 17950 * Adjust data length if insertion of options will bump the packet 17951 * length beyond the t_maxseg length. Clear the FIN bit because we 17952 * cut off the tail of the segment. 17953 */ 17954 if (len + optlen + ipoptlen > tp->t_maxseg) { 17955 if (tso) { 17956 uint32_t if_hw_tsomax; 17957 uint32_t moff; 17958 int32_t max_len; 17959 17960 /* extract TSO information */ 17961 if_hw_tsomax = tp->t_tsomax; 17962 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 17963 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 17964 KASSERT(ipoptlen == 0, 17965 ("%s: TSO can't do IP options", __func__)); 17966 17967 /* 17968 * Check if we should limit by maximum payload 17969 * length: 17970 */ 17971 if (if_hw_tsomax != 0) { 17972 /* compute maximum TSO length */ 17973 max_len = (if_hw_tsomax - hdrlen - 17974 max_linkhdr); 17975 if (max_len <= 0) { 17976 len = 0; 17977 } else if (len > max_len) { 17978 sendalot = 1; 17979 len = max_len; 17980 mark = 2; 17981 } 17982 } 17983 /* 17984 * Prevent the last segment from being fractional 17985 * unless the send sockbuf can be emptied: 17986 */ 17987 max_len = (tp->t_maxseg - optlen); 17988 if ((sb_offset + len) < sbavail(sb)) { 17989 moff = len % (u_int)max_len; 17990 if (moff != 0) { 17991 mark = 3; 17992 len -= moff; 17993 } 17994 } 17995 /* 17996 * In case there are too many small fragments don't 17997 * use TSO: 17998 */ 17999 if (len <= segsiz) { 18000 mark = 4; 18001 tso = 0; 18002 } 18003 /* 18004 * Send the FIN in a separate segment after the bulk 18005 * sending is done. We don't trust the TSO 18006 * implementations to clear the FIN flag on all but 18007 * the last segment. 18008 */ 18009 if (tp->t_flags & TF_NEEDFIN) { 18010 sendalot = 4; 18011 } 18012 } else { 18013 mark = 5; 18014 if (optlen + ipoptlen >= tp->t_maxseg) { 18015 /* 18016 * Since we don't have enough space to put 18017 * the IP header chain and the TCP header in 18018 * one packet as required by RFC 7112, don't 18019 * send it. Also ensure that at least one 18020 * byte of the payload can be put into the 18021 * TCP segment. 18022 */ 18023 SOCKBUF_UNLOCK(&so->so_snd); 18024 error = EMSGSIZE; 18025 sack_rxmit = 0; 18026 goto out; 18027 } 18028 len = tp->t_maxseg - optlen - ipoptlen; 18029 sendalot = 5; 18030 } 18031 } else { 18032 tso = 0; 18033 mark = 6; 18034 } 18035 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18036 ("%s: len > IP_MAXPACKET", __func__)); 18037 #ifdef DIAGNOSTIC 18038 #ifdef INET6 18039 if (max_linkhdr + hdrlen > MCLBYTES) 18040 #else 18041 if (max_linkhdr + hdrlen > MHLEN) 18042 #endif 18043 panic("tcphdr too big"); 18044 #endif 18045 18046 /* 18047 * This KASSERT is here to catch edge cases at a well defined place. 18048 * Before, those had triggered (random) panic conditions further 18049 * down. 18050 */ 18051 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18052 if ((len == 0) && 18053 (flags & TH_FIN) && 18054 (sbused(sb))) { 18055 /* 18056 * We have outstanding data, don't send a fin by itself!. 18057 */ 18058 goto just_return; 18059 } 18060 /* 18061 * Grab a header mbuf, attaching a copy of data to be transmitted, 18062 * and initialize the header from the template for sends on this 18063 * connection. 18064 */ 18065 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18066 if (len) { 18067 uint32_t max_val; 18068 uint32_t moff; 18069 18070 if (rack->r_ctl.rc_pace_max_segs) 18071 max_val = rack->r_ctl.rc_pace_max_segs; 18072 else if (rack->rc_user_set_max_segs) 18073 max_val = rack->rc_user_set_max_segs * segsiz; 18074 else 18075 max_val = len; 18076 /* 18077 * We allow a limit on sending with hptsi. 18078 */ 18079 if (len > max_val) { 18080 mark = 7; 18081 len = max_val; 18082 } 18083 #ifdef INET6 18084 if (MHLEN < hdrlen + max_linkhdr) 18085 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18086 else 18087 #endif 18088 m = m_gethdr(M_NOWAIT, MT_DATA); 18089 18090 if (m == NULL) { 18091 SOCKBUF_UNLOCK(sb); 18092 error = ENOBUFS; 18093 sack_rxmit = 0; 18094 goto out; 18095 } 18096 m->m_data += max_linkhdr; 18097 m->m_len = hdrlen; 18098 18099 /* 18100 * Start the m_copy functions from the closest mbuf to the 18101 * sb_offset in the socket buffer chain. 18102 */ 18103 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18104 s_mb = mb; 18105 s_moff = moff; 18106 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18107 m_copydata(mb, moff, (int)len, 18108 mtod(m, caddr_t)+hdrlen); 18109 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18110 sbsndptr_adv(sb, mb, len); 18111 m->m_len += len; 18112 } else { 18113 struct sockbuf *msb; 18114 18115 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18116 msb = NULL; 18117 else 18118 msb = sb; 18119 m->m_next = tcp_m_copym( 18120 mb, moff, &len, 18121 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18122 ((rsm == NULL) ? hw_tls : 0) 18123 #ifdef NETFLIX_COPY_ARGS 18124 , &filled_all 18125 #endif 18126 ); 18127 if (len <= (tp->t_maxseg - optlen)) { 18128 /* 18129 * Must have ran out of mbufs for the copy 18130 * shorten it to no longer need tso. Lets 18131 * not put on sendalot since we are low on 18132 * mbufs. 18133 */ 18134 tso = 0; 18135 } 18136 if (m->m_next == NULL) { 18137 SOCKBUF_UNLOCK(sb); 18138 (void)m_free(m); 18139 error = ENOBUFS; 18140 sack_rxmit = 0; 18141 goto out; 18142 } 18143 } 18144 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18145 if (rsm && (rsm->r_flags & RACK_TLP)) { 18146 /* 18147 * TLP should not count in retran count, but 18148 * in its own bin 18149 */ 18150 counter_u64_add(rack_tlp_retran, 1); 18151 counter_u64_add(rack_tlp_retran_bytes, len); 18152 } else { 18153 tp->t_sndrexmitpack++; 18154 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18155 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18156 } 18157 #ifdef STATS 18158 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18159 len); 18160 #endif 18161 } else { 18162 KMOD_TCPSTAT_INC(tcps_sndpack); 18163 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18164 #ifdef STATS 18165 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18166 len); 18167 #endif 18168 } 18169 /* 18170 * If we're sending everything we've got, set PUSH. (This 18171 * will keep happy those implementations which only give 18172 * data to the user when a buffer fills or a PUSH comes in.) 18173 */ 18174 if (sb_offset + len == sbused(sb) && 18175 sbused(sb) && 18176 !(flags & TH_SYN)) { 18177 flags |= TH_PUSH; 18178 add_flag |= RACK_HAD_PUSH; 18179 } 18180 18181 SOCKBUF_UNLOCK(sb); 18182 } else { 18183 SOCKBUF_UNLOCK(sb); 18184 if (tp->t_flags & TF_ACKNOW) 18185 KMOD_TCPSTAT_INC(tcps_sndacks); 18186 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18187 KMOD_TCPSTAT_INC(tcps_sndctrl); 18188 else 18189 KMOD_TCPSTAT_INC(tcps_sndwinup); 18190 18191 m = m_gethdr(M_NOWAIT, MT_DATA); 18192 if (m == NULL) { 18193 error = ENOBUFS; 18194 sack_rxmit = 0; 18195 goto out; 18196 } 18197 #ifdef INET6 18198 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18199 MHLEN >= hdrlen) { 18200 M_ALIGN(m, hdrlen); 18201 } else 18202 #endif 18203 m->m_data += max_linkhdr; 18204 m->m_len = hdrlen; 18205 } 18206 SOCKBUF_UNLOCK_ASSERT(sb); 18207 m->m_pkthdr.rcvif = (struct ifnet *)0; 18208 #ifdef MAC 18209 mac_inpcb_create_mbuf(inp, m); 18210 #endif 18211 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18212 #ifdef INET6 18213 if (isipv6) 18214 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18215 else 18216 #endif /* INET6 */ 18217 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18218 th = rack->r_ctl.fsb.th; 18219 udp = rack->r_ctl.fsb.udp; 18220 if (udp) { 18221 #ifdef INET6 18222 if (isipv6) 18223 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18224 else 18225 #endif /* INET6 */ 18226 ulen = hdrlen + len - sizeof(struct ip); 18227 udp->uh_ulen = htons(ulen); 18228 } 18229 } else { 18230 #ifdef INET6 18231 if (isipv6) { 18232 ip6 = mtod(m, struct ip6_hdr *); 18233 if (tp->t_port) { 18234 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18235 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18236 udp->uh_dport = tp->t_port; 18237 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18238 udp->uh_ulen = htons(ulen); 18239 th = (struct tcphdr *)(udp + 1); 18240 } else 18241 th = (struct tcphdr *)(ip6 + 1); 18242 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18243 } else 18244 #endif /* INET6 */ 18245 { 18246 ip = mtod(m, struct ip *); 18247 #ifdef TCPDEBUG 18248 ipov = (struct ipovly *)ip; 18249 #endif 18250 if (tp->t_port) { 18251 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18252 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18253 udp->uh_dport = tp->t_port; 18254 ulen = hdrlen + len - sizeof(struct ip); 18255 udp->uh_ulen = htons(ulen); 18256 th = (struct tcphdr *)(udp + 1); 18257 } else 18258 th = (struct tcphdr *)(ip + 1); 18259 tcpip_fillheaders(inp, tp->t_port, ip, th); 18260 } 18261 } 18262 /* 18263 * Fill in fields, remembering maximum advertised window for use in 18264 * delaying messages about window sizes. If resending a FIN, be sure 18265 * not to use a new sequence number. 18266 */ 18267 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18268 tp->snd_nxt == tp->snd_max) 18269 tp->snd_nxt--; 18270 /* 18271 * If we are starting a connection, send ECN setup SYN packet. If we 18272 * are on a retransmit, we may resend those bits a number of times 18273 * as per RFC 3168. 18274 */ 18275 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18276 flags |= tcp_ecn_output_syn_sent(tp); 18277 } 18278 /* Also handle parallel SYN for ECN */ 18279 if (TCPS_HAVERCVDSYN(tp->t_state) && 18280 (tp->t_flags2 & TF2_ECN_PERMIT)) { 18281 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18282 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18283 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18284 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18285 #ifdef INET6 18286 if (isipv6) { 18287 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18288 ip6->ip6_flow |= htonl(ect << 20); 18289 } 18290 else 18291 #endif 18292 { 18293 ip->ip_tos &= ~IPTOS_ECN_MASK; 18294 ip->ip_tos |= ect; 18295 } 18296 } 18297 /* 18298 * If we are doing retransmissions, then snd_nxt will not reflect 18299 * the first unsent octet. For ACK only packets, we do not want the 18300 * sequence number of the retransmitted packet, we want the sequence 18301 * number of the next unsent octet. So, if there is no data (and no 18302 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18303 * ti_seq. But if we are in persist state, snd_max might reflect 18304 * one byte beyond the right edge of the window, so use snd_nxt in 18305 * that case, since we know we aren't doing a retransmission. 18306 * (retransmit and persist are mutually exclusive...) 18307 */ 18308 if (sack_rxmit == 0) { 18309 if (len || (flags & (TH_SYN | TH_FIN))) { 18310 th->th_seq = htonl(tp->snd_nxt); 18311 rack_seq = tp->snd_nxt; 18312 } else { 18313 th->th_seq = htonl(tp->snd_max); 18314 rack_seq = tp->snd_max; 18315 } 18316 } else { 18317 th->th_seq = htonl(rsm->r_start); 18318 rack_seq = rsm->r_start; 18319 } 18320 th->th_ack = htonl(tp->rcv_nxt); 18321 tcp_set_flags(th, flags); 18322 /* 18323 * Calculate receive window. Don't shrink window, but avoid silly 18324 * window syndrome. 18325 * If a RST segment is sent, advertise a window of zero. 18326 */ 18327 if (flags & TH_RST) { 18328 recwin = 0; 18329 } else { 18330 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18331 recwin < (long)segsiz) { 18332 recwin = 0; 18333 } 18334 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18335 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18336 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18337 } 18338 18339 /* 18340 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18341 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18342 * handled in syncache. 18343 */ 18344 if (flags & TH_SYN) 18345 th->th_win = htons((u_short) 18346 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18347 else { 18348 /* Avoid shrinking window with window scaling. */ 18349 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18350 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18351 } 18352 /* 18353 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18354 * window. This may cause the remote transmitter to stall. This 18355 * flag tells soreceive() to disable delayed acknowledgements when 18356 * draining the buffer. This can occur if the receiver is 18357 * attempting to read more data than can be buffered prior to 18358 * transmitting on the connection. 18359 */ 18360 if (th->th_win == 0) { 18361 tp->t_sndzerowin++; 18362 tp->t_flags |= TF_RXWIN0SENT; 18363 } else 18364 tp->t_flags &= ~TF_RXWIN0SENT; 18365 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18366 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18367 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18368 uint8_t *cpto; 18369 18370 cpto = mtod(m, uint8_t *); 18371 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18372 /* 18373 * We have just copied in: 18374 * IP/IP6 18375 * <optional udphdr> 18376 * tcphdr (no options) 18377 * 18378 * We need to grab the correct pointers into the mbuf 18379 * for both the tcp header, and possibly the udp header (if tunneling). 18380 * We do this by using the offset in the copy buffer and adding it 18381 * to the mbuf base pointer (cpto). 18382 */ 18383 #ifdef INET6 18384 if (isipv6) 18385 ip6 = mtod(m, struct ip6_hdr *); 18386 else 18387 #endif /* INET6 */ 18388 ip = mtod(m, struct ip *); 18389 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18390 /* If we have a udp header lets set it into the mbuf as well */ 18391 if (udp) 18392 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18393 } 18394 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18395 if (to.to_flags & TOF_SIGNATURE) { 18396 /* 18397 * Calculate MD5 signature and put it into the place 18398 * determined before. 18399 * NOTE: since TCP options buffer doesn't point into 18400 * mbuf's data, calculate offset and use it. 18401 */ 18402 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18403 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18404 /* 18405 * Do not send segment if the calculation of MD5 18406 * digest has failed. 18407 */ 18408 goto out; 18409 } 18410 } 18411 #endif 18412 if (optlen) { 18413 bcopy(opt, th + 1, optlen); 18414 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18415 } 18416 /* 18417 * Put TCP length in extended header, and then checksum extended 18418 * header and data. 18419 */ 18420 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18421 #ifdef INET6 18422 if (isipv6) { 18423 /* 18424 * ip6_plen is not need to be filled now, and will be filled 18425 * in ip6_output. 18426 */ 18427 if (tp->t_port) { 18428 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18429 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18430 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18431 th->th_sum = htons(0); 18432 UDPSTAT_INC(udps_opackets); 18433 } else { 18434 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18435 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18436 th->th_sum = in6_cksum_pseudo(ip6, 18437 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18438 0); 18439 } 18440 } 18441 #endif 18442 #if defined(INET6) && defined(INET) 18443 else 18444 #endif 18445 #ifdef INET 18446 { 18447 if (tp->t_port) { 18448 m->m_pkthdr.csum_flags = CSUM_UDP; 18449 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18450 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18451 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18452 th->th_sum = htons(0); 18453 UDPSTAT_INC(udps_opackets); 18454 } else { 18455 m->m_pkthdr.csum_flags = CSUM_TCP; 18456 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18457 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18458 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18459 IPPROTO_TCP + len + optlen)); 18460 } 18461 /* IP version must be set here for ipv4/ipv6 checking later */ 18462 KASSERT(ip->ip_v == IPVERSION, 18463 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18464 } 18465 #endif 18466 /* 18467 * Enable TSO and specify the size of the segments. The TCP pseudo 18468 * header checksum is always provided. XXX: Fixme: This is currently 18469 * not the case for IPv6. 18470 */ 18471 if (tso) { 18472 KASSERT(len > tp->t_maxseg - optlen, 18473 ("%s: len <= tso_segsz", __func__)); 18474 m->m_pkthdr.csum_flags |= CSUM_TSO; 18475 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18476 } 18477 KASSERT(len + hdrlen == m_length(m, NULL), 18478 ("%s: mbuf chain different than expected: %d + %u != %u", 18479 __func__, len, hdrlen, m_length(m, NULL))); 18480 18481 #ifdef TCP_HHOOK 18482 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18483 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18484 #endif 18485 /* We're getting ready to send; log now. */ 18486 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18487 union tcp_log_stackspecific log; 18488 18489 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18490 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18491 if (rack->rack_no_prr) 18492 log.u_bbr.flex1 = 0; 18493 else 18494 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18495 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18496 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18497 log.u_bbr.flex4 = orig_len; 18498 if (filled_all) 18499 log.u_bbr.flex5 = 0x80000000; 18500 else 18501 log.u_bbr.flex5 = 0; 18502 /* Save off the early/late values */ 18503 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18504 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18505 log.u_bbr.bw_inuse = rack_get_bw(rack); 18506 if (rsm || sack_rxmit) { 18507 if (doing_tlp) 18508 log.u_bbr.flex8 = 2; 18509 else 18510 log.u_bbr.flex8 = 1; 18511 } else { 18512 if (doing_tlp) 18513 log.u_bbr.flex8 = 3; 18514 else 18515 log.u_bbr.flex8 = 0; 18516 } 18517 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18518 log.u_bbr.flex7 = mark; 18519 log.u_bbr.flex7 <<= 8; 18520 log.u_bbr.flex7 |= pass; 18521 log.u_bbr.pkts_out = tp->t_maxseg; 18522 log.u_bbr.timeStamp = cts; 18523 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18524 log.u_bbr.lt_epoch = cwnd_to_use; 18525 log.u_bbr.delivered = sendalot; 18526 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18527 len, &log, false, NULL, NULL, 0, &tv); 18528 } else 18529 lgb = NULL; 18530 18531 /* 18532 * Fill in IP length and desired time to live and send to IP level. 18533 * There should be a better way to handle ttl and tos; we could keep 18534 * them in the template, but need a way to checksum without them. 18535 */ 18536 /* 18537 * m->m_pkthdr.len should have been set before cksum calcuration, 18538 * because in6_cksum() need it. 18539 */ 18540 #ifdef INET6 18541 if (isipv6) { 18542 /* 18543 * we separately set hoplimit for every segment, since the 18544 * user might want to change the value via setsockopt. Also, 18545 * desired default hop limit might be changed via Neighbor 18546 * Discovery. 18547 */ 18548 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18549 18550 /* 18551 * Set the packet size here for the benefit of DTrace 18552 * probes. ip6_output() will set it properly; it's supposed 18553 * to include the option header lengths as well. 18554 */ 18555 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18556 18557 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18558 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18559 else 18560 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18561 18562 if (tp->t_state == TCPS_SYN_SENT) 18563 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18564 18565 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18566 /* TODO: IPv6 IP6TOS_ECT bit on */ 18567 error = ip6_output(m, 18568 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18569 inp->in6p_outputopts, 18570 #else 18571 NULL, 18572 #endif 18573 &inp->inp_route6, 18574 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18575 NULL, NULL, inp); 18576 18577 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18578 mtu = inp->inp_route6.ro_nh->nh_mtu; 18579 } 18580 #endif /* INET6 */ 18581 #if defined(INET) && defined(INET6) 18582 else 18583 #endif 18584 #ifdef INET 18585 { 18586 ip->ip_len = htons(m->m_pkthdr.len); 18587 #ifdef INET6 18588 if (inp->inp_vflag & INP_IPV6PROTO) 18589 ip->ip_ttl = in6_selecthlim(inp, NULL); 18590 #endif /* INET6 */ 18591 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18592 /* 18593 * If we do path MTU discovery, then we set DF on every 18594 * packet. This might not be the best thing to do according 18595 * to RFC3390 Section 2. However the tcp hostcache migitates 18596 * the problem so it affects only the first tcp connection 18597 * with a host. 18598 * 18599 * NB: Don't set DF on small MTU/MSS to have a safe 18600 * fallback. 18601 */ 18602 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18603 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18604 if (tp->t_port == 0 || len < V_tcp_minmss) { 18605 ip->ip_off |= htons(IP_DF); 18606 } 18607 } else { 18608 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18609 } 18610 18611 if (tp->t_state == TCPS_SYN_SENT) 18612 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18613 18614 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18615 18616 error = ip_output(m, 18617 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18618 inp->inp_options, 18619 #else 18620 NULL, 18621 #endif 18622 &inp->inp_route, 18623 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18624 inp); 18625 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18626 mtu = inp->inp_route.ro_nh->nh_mtu; 18627 } 18628 #endif /* INET */ 18629 18630 out: 18631 if (lgb) { 18632 lgb->tlb_errno = error; 18633 lgb = NULL; 18634 } 18635 /* 18636 * In transmit state, time the transmission and arrange for the 18637 * retransmit. In persist state, just set snd_max. 18638 */ 18639 if (error == 0) { 18640 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18641 if (rsm && doing_tlp) { 18642 rack->rc_last_sent_tlp_past_cumack = 0; 18643 rack->rc_last_sent_tlp_seq_valid = 1; 18644 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18645 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18646 } 18647 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18648 if (rsm && (doing_tlp == 0)) { 18649 /* Set we retransmitted */ 18650 rack->rc_gp_saw_rec = 1; 18651 } else { 18652 if (cwnd_to_use > tp->snd_ssthresh) { 18653 /* Set we sent in CA */ 18654 rack->rc_gp_saw_ca = 1; 18655 } else { 18656 /* Set we sent in SS */ 18657 rack->rc_gp_saw_ss = 1; 18658 } 18659 } 18660 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18661 (tp->t_flags & TF_SACK_PERMIT) && 18662 tp->rcv_numsacks > 0) 18663 tcp_clean_dsack_blocks(tp); 18664 tot_len_this_send += len; 18665 if (len == 0) 18666 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18667 else if (len == 1) { 18668 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18669 } else if (len > 1) { 18670 int idx; 18671 18672 idx = (len / segsiz) + 3; 18673 if (idx >= TCP_MSS_ACCT_ATIMER) 18674 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18675 else 18676 counter_u64_add(rack_out_size[idx], 1); 18677 } 18678 } 18679 if ((rack->rack_no_prr == 0) && 18680 sub_from_prr && 18681 (error == 0)) { 18682 if (rack->r_ctl.rc_prr_sndcnt >= len) 18683 rack->r_ctl.rc_prr_sndcnt -= len; 18684 else 18685 rack->r_ctl.rc_prr_sndcnt = 0; 18686 } 18687 sub_from_prr = 0; 18688 if (doing_tlp) { 18689 /* Make sure the TLP is added */ 18690 add_flag |= RACK_TLP; 18691 } else if (rsm) { 18692 /* If its a resend without TLP then it must not have the flag */ 18693 rsm->r_flags &= ~RACK_TLP; 18694 } 18695 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18696 rack_to_usec_ts(&tv), 18697 rsm, add_flag, s_mb, s_moff, hw_tls); 18698 18699 18700 if ((error == 0) && 18701 (len > 0) && 18702 (tp->snd_una == tp->snd_max)) 18703 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18704 { 18705 tcp_seq startseq = tp->snd_nxt; 18706 18707 /* Track our lost count */ 18708 if (rsm && (doing_tlp == 0)) 18709 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18710 /* 18711 * Advance snd_nxt over sequence space of this segment. 18712 */ 18713 if (error) 18714 /* We don't log or do anything with errors */ 18715 goto nomore; 18716 if (doing_tlp == 0) { 18717 if (rsm == NULL) { 18718 /* 18719 * Not a retransmission of some 18720 * sort, new data is going out so 18721 * clear our TLP count and flag. 18722 */ 18723 rack->rc_tlp_in_progress = 0; 18724 rack->r_ctl.rc_tlp_cnt_out = 0; 18725 } 18726 } else { 18727 /* 18728 * We have just sent a TLP, mark that it is true 18729 * and make sure our in progress is set so we 18730 * continue to check the count. 18731 */ 18732 rack->rc_tlp_in_progress = 1; 18733 rack->r_ctl.rc_tlp_cnt_out++; 18734 } 18735 if (flags & (TH_SYN | TH_FIN)) { 18736 if (flags & TH_SYN) 18737 tp->snd_nxt++; 18738 if (flags & TH_FIN) { 18739 tp->snd_nxt++; 18740 tp->t_flags |= TF_SENTFIN; 18741 } 18742 } 18743 /* In the ENOBUFS case we do *not* update snd_max */ 18744 if (sack_rxmit) 18745 goto nomore; 18746 18747 tp->snd_nxt += len; 18748 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18749 if (tp->snd_una == tp->snd_max) { 18750 /* 18751 * Update the time we just added data since 18752 * none was outstanding. 18753 */ 18754 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18755 tp->t_acktime = ticks; 18756 } 18757 tp->snd_max = tp->snd_nxt; 18758 /* 18759 * Time this transmission if not a retransmission and 18760 * not currently timing anything. 18761 * This is only relevant in case of switching back to 18762 * the base stack. 18763 */ 18764 if (tp->t_rtttime == 0) { 18765 tp->t_rtttime = ticks; 18766 tp->t_rtseq = startseq; 18767 KMOD_TCPSTAT_INC(tcps_segstimed); 18768 } 18769 if (len && 18770 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18771 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18772 } 18773 /* 18774 * If we are doing FO we need to update the mbuf position and subtract 18775 * this happens when the peer sends us duplicate information and 18776 * we thus want to send a DSACK. 18777 * 18778 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18779 * turned off? If not then we are going to echo multiple DSACK blocks 18780 * out (with the TSO), which we should not be doing. 18781 */ 18782 if (rack->r_fast_output && len) { 18783 if (rack->r_ctl.fsb.left_to_send > len) 18784 rack->r_ctl.fsb.left_to_send -= len; 18785 else 18786 rack->r_ctl.fsb.left_to_send = 0; 18787 if (rack->r_ctl.fsb.left_to_send < segsiz) 18788 rack->r_fast_output = 0; 18789 if (rack->r_fast_output) { 18790 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18791 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18792 } 18793 } 18794 } 18795 nomore: 18796 if (error) { 18797 rack->r_ctl.rc_agg_delayed = 0; 18798 rack->r_early = 0; 18799 rack->r_late = 0; 18800 rack->r_ctl.rc_agg_early = 0; 18801 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18802 /* 18803 * Failures do not advance the seq counter above. For the 18804 * case of ENOBUFS we will fall out and retry in 1ms with 18805 * the hpts. Everything else will just have to retransmit 18806 * with the timer. 18807 * 18808 * In any case, we do not want to loop around for another 18809 * send without a good reason. 18810 */ 18811 sendalot = 0; 18812 switch (error) { 18813 case EPERM: 18814 tp->t_softerror = error; 18815 #ifdef TCP_ACCOUNTING 18816 crtsc = get_cyclecount(); 18817 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18818 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18819 } 18820 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18821 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18822 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18823 } 18824 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18825 sched_unpin(); 18826 #endif 18827 return (error); 18828 case ENOBUFS: 18829 /* 18830 * Pace us right away to retry in a some 18831 * time 18832 */ 18833 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18834 if (rack->rc_enobuf < 0x7f) 18835 rack->rc_enobuf++; 18836 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18837 slot = 10 * HPTS_USEC_IN_MSEC; 18838 if (rack->r_ctl.crte != NULL) { 18839 counter_u64_add(rack_saw_enobuf_hw, 1); 18840 tcp_rl_log_enobuf(rack->r_ctl.crte); 18841 } 18842 counter_u64_add(rack_saw_enobuf, 1); 18843 goto enobufs; 18844 case EMSGSIZE: 18845 /* 18846 * For some reason the interface we used initially 18847 * to send segments changed to another or lowered 18848 * its MTU. If TSO was active we either got an 18849 * interface without TSO capabilits or TSO was 18850 * turned off. If we obtained mtu from ip_output() 18851 * then update it and try again. 18852 */ 18853 if (tso) 18854 tp->t_flags &= ~TF_TSO; 18855 if (mtu != 0) { 18856 tcp_mss_update(tp, -1, mtu, NULL, NULL); 18857 goto again; 18858 } 18859 slot = 10 * HPTS_USEC_IN_MSEC; 18860 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18861 #ifdef TCP_ACCOUNTING 18862 crtsc = get_cyclecount(); 18863 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18864 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18865 } 18866 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18867 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18868 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18869 } 18870 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18871 sched_unpin(); 18872 #endif 18873 return (error); 18874 case ENETUNREACH: 18875 counter_u64_add(rack_saw_enetunreach, 1); 18876 case EHOSTDOWN: 18877 case EHOSTUNREACH: 18878 case ENETDOWN: 18879 if (TCPS_HAVERCVDSYN(tp->t_state)) { 18880 tp->t_softerror = error; 18881 } 18882 /* FALLTHROUGH */ 18883 default: 18884 slot = 10 * HPTS_USEC_IN_MSEC; 18885 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18886 #ifdef TCP_ACCOUNTING 18887 crtsc = get_cyclecount(); 18888 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18889 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18890 } 18891 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18892 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18893 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18894 } 18895 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18896 sched_unpin(); 18897 #endif 18898 return (error); 18899 } 18900 } else { 18901 rack->rc_enobuf = 0; 18902 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18903 rack->r_ctl.retran_during_recovery += len; 18904 } 18905 KMOD_TCPSTAT_INC(tcps_sndtotal); 18906 18907 /* 18908 * Data sent (as far as we can tell). If this advertises a larger 18909 * window than any other segment, then remember the size of the 18910 * advertised window. Any pending ACK has now been sent. 18911 */ 18912 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 18913 tp->rcv_adv = tp->rcv_nxt + recwin; 18914 18915 tp->last_ack_sent = tp->rcv_nxt; 18916 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18917 enobufs: 18918 if (sendalot) { 18919 /* Do we need to turn off sendalot? */ 18920 if (rack->r_ctl.rc_pace_max_segs && 18921 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 18922 /* We hit our max. */ 18923 sendalot = 0; 18924 } else if ((rack->rc_user_set_max_segs) && 18925 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 18926 /* We hit the user defined max */ 18927 sendalot = 0; 18928 } 18929 } 18930 if ((error == 0) && (flags & TH_FIN)) 18931 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 18932 if (flags & TH_RST) { 18933 /* 18934 * We don't send again after sending a RST. 18935 */ 18936 slot = 0; 18937 sendalot = 0; 18938 if (error == 0) 18939 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 18940 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 18941 /* 18942 * Get our pacing rate, if an error 18943 * occurred in sending (ENOBUF) we would 18944 * hit the else if with slot preset. Other 18945 * errors return. 18946 */ 18947 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 18948 } 18949 if (rsm && 18950 (rsm->r_flags & RACK_HAS_SYN) == 0 && 18951 rack->use_rack_rr) { 18952 /* Its a retransmit and we use the rack cheat? */ 18953 if ((slot == 0) || 18954 (rack->rc_always_pace == 0) || 18955 (rack->r_rr_config == 1)) { 18956 /* 18957 * We have no pacing set or we 18958 * are using old-style rack or 18959 * we are overriden to use the old 1ms pacing. 18960 */ 18961 slot = rack->r_ctl.rc_min_to; 18962 } 18963 } 18964 /* We have sent clear the flag */ 18965 rack->r_ent_rec_ns = 0; 18966 if (rack->r_must_retran) { 18967 if (rsm) { 18968 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 18969 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 18970 /* 18971 * We have retransmitted all. 18972 */ 18973 rack->r_must_retran = 0; 18974 rack->r_ctl.rc_out_at_rto = 0; 18975 } 18976 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 18977 /* 18978 * Sending new data will also kill 18979 * the loop. 18980 */ 18981 rack->r_must_retran = 0; 18982 rack->r_ctl.rc_out_at_rto = 0; 18983 } 18984 } 18985 rack->r_ctl.fsb.recwin = recwin; 18986 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 18987 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 18988 /* 18989 * We hit an RTO and now have past snd_max at the RTO 18990 * clear all the WAS flags. 18991 */ 18992 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 18993 } 18994 if (slot) { 18995 /* set the rack tcb into the slot N */ 18996 if ((error == 0) && 18997 rack_use_rfo && 18998 ((flags & (TH_SYN|TH_FIN)) == 0) && 18999 (rsm == NULL) && 19000 (tp->snd_nxt == tp->snd_max) && 19001 (ipoptlen == 0) && 19002 (tp->rcv_numsacks == 0) && 19003 rack->r_fsb_inited && 19004 TCPS_HAVEESTABLISHED(tp->t_state) && 19005 (rack->r_must_retran == 0) && 19006 ((tp->t_flags & TF_NEEDFIN) == 0) && 19007 (len > 0) && (orig_len > 0) && 19008 (orig_len > len) && 19009 ((orig_len - len) >= segsiz) && 19010 ((optlen == 0) || 19011 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19012 /* We can send at least one more MSS using our fsb */ 19013 19014 rack->r_fast_output = 1; 19015 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19016 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19017 rack->r_ctl.fsb.tcp_flags = flags; 19018 rack->r_ctl.fsb.left_to_send = orig_len - len; 19019 if (hw_tls) 19020 rack->r_ctl.fsb.hw_tls = 1; 19021 else 19022 rack->r_ctl.fsb.hw_tls = 0; 19023 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19024 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19025 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19026 (tp->snd_max - tp->snd_una))); 19027 if (rack->r_ctl.fsb.left_to_send < segsiz) 19028 rack->r_fast_output = 0; 19029 else { 19030 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19031 rack->r_ctl.fsb.rfo_apply_push = 1; 19032 else 19033 rack->r_ctl.fsb.rfo_apply_push = 0; 19034 } 19035 } else 19036 rack->r_fast_output = 0; 19037 rack_log_fsb(rack, tp, so, flags, 19038 ipoptlen, orig_len, len, error, 19039 (rsm == NULL), optlen, __LINE__, 2); 19040 } else if (sendalot) { 19041 int ret; 19042 19043 sack_rxmit = 0; 19044 if ((error == 0) && 19045 rack_use_rfo && 19046 ((flags & (TH_SYN|TH_FIN)) == 0) && 19047 (rsm == NULL) && 19048 (ipoptlen == 0) && 19049 (tp->rcv_numsacks == 0) && 19050 (tp->snd_nxt == tp->snd_max) && 19051 (rack->r_must_retran == 0) && 19052 rack->r_fsb_inited && 19053 TCPS_HAVEESTABLISHED(tp->t_state) && 19054 ((tp->t_flags & TF_NEEDFIN) == 0) && 19055 (len > 0) && (orig_len > 0) && 19056 (orig_len > len) && 19057 ((orig_len - len) >= segsiz) && 19058 ((optlen == 0) || 19059 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19060 /* we can use fast_output for more */ 19061 19062 rack->r_fast_output = 1; 19063 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19064 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19065 rack->r_ctl.fsb.tcp_flags = flags; 19066 rack->r_ctl.fsb.left_to_send = orig_len - len; 19067 if (hw_tls) 19068 rack->r_ctl.fsb.hw_tls = 1; 19069 else 19070 rack->r_ctl.fsb.hw_tls = 0; 19071 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19072 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19073 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19074 (tp->snd_max - tp->snd_una))); 19075 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19076 rack->r_fast_output = 0; 19077 } 19078 if (rack->r_fast_output) { 19079 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19080 rack->r_ctl.fsb.rfo_apply_push = 1; 19081 else 19082 rack->r_ctl.fsb.rfo_apply_push = 0; 19083 rack_log_fsb(rack, tp, so, flags, 19084 ipoptlen, orig_len, len, error, 19085 (rsm == NULL), optlen, __LINE__, 3); 19086 error = 0; 19087 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19088 if (ret >= 0) 19089 return (ret); 19090 else if (error) 19091 goto nomore; 19092 19093 } 19094 } 19095 goto again; 19096 } 19097 /* Assure when we leave that snd_nxt will point to top */ 19098 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19099 tp->snd_nxt = tp->snd_max; 19100 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19101 #ifdef TCP_ACCOUNTING 19102 crtsc = get_cyclecount() - ts_val; 19103 if (tot_len_this_send) { 19104 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19105 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19106 } 19107 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19108 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19109 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19110 } 19111 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19112 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19113 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19114 } 19115 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19116 } else { 19117 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19118 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19119 } 19120 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19121 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19122 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19123 } 19124 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19125 } 19126 sched_unpin(); 19127 #endif 19128 if (error == ENOBUFS) 19129 error = 0; 19130 return (error); 19131 } 19132 19133 static void 19134 rack_update_seg(struct tcp_rack *rack) 19135 { 19136 uint32_t orig_val; 19137 19138 orig_val = rack->r_ctl.rc_pace_max_segs; 19139 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19140 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19141 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19142 } 19143 19144 static void 19145 rack_mtu_change(struct tcpcb *tp) 19146 { 19147 /* 19148 * The MSS may have changed 19149 */ 19150 struct tcp_rack *rack; 19151 struct rack_sendmap *rsm; 19152 19153 rack = (struct tcp_rack *)tp->t_fb_ptr; 19154 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19155 /* 19156 * The MTU has changed we need to resend everything 19157 * since all we have sent is lost. We first fix 19158 * up the mtu though. 19159 */ 19160 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19161 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19162 rack_remxt_tmr(tp); 19163 rack->r_fast_output = 0; 19164 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19165 rack->r_ctl.rc_sacked); 19166 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19167 rack->r_must_retran = 1; 19168 /* Mark all inflight to needing to be rxt'd */ 19169 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19170 rsm->r_flags |= RACK_MUST_RXT; 19171 } 19172 } 19173 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19174 /* We don't use snd_nxt to retransmit */ 19175 tp->snd_nxt = tp->snd_max; 19176 } 19177 19178 static int 19179 rack_set_profile(struct tcp_rack *rack, int prof) 19180 { 19181 int err = EINVAL; 19182 if (prof == 1) { 19183 /* pace_always=1 */ 19184 if (rack->rc_always_pace == 0) { 19185 if (tcp_can_enable_pacing() == 0) 19186 return (EBUSY); 19187 } 19188 rack->rc_always_pace = 1; 19189 if (rack->use_fixed_rate || rack->gp_ready) 19190 rack_set_cc_pacing(rack); 19191 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19192 rack->rack_attempt_hdwr_pace = 0; 19193 /* cmpack=1 */ 19194 if (rack_use_cmp_acks) 19195 rack->r_use_cmp_ack = 1; 19196 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19197 rack->r_use_cmp_ack) 19198 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19199 /* scwnd=1 */ 19200 rack->rack_enable_scwnd = 1; 19201 /* dynamic=100 */ 19202 rack->rc_gp_dyn_mul = 1; 19203 /* gp_inc_ca */ 19204 rack->r_ctl.rack_per_of_gp_ca = 100; 19205 /* rrr_conf=3 */ 19206 rack->r_rr_config = 3; 19207 /* npush=2 */ 19208 rack->r_ctl.rc_no_push_at_mrtt = 2; 19209 /* fillcw=1 */ 19210 rack->rc_pace_to_cwnd = 1; 19211 rack->rc_pace_fill_if_rttin_range = 0; 19212 rack->rtt_limit_mul = 0; 19213 /* noprr=1 */ 19214 rack->rack_no_prr = 1; 19215 /* lscwnd=1 */ 19216 rack->r_limit_scw = 1; 19217 /* gp_inc_rec */ 19218 rack->r_ctl.rack_per_of_gp_rec = 90; 19219 err = 0; 19220 19221 } else if (prof == 3) { 19222 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19223 /* pace_always=1 */ 19224 if (rack->rc_always_pace == 0) { 19225 if (tcp_can_enable_pacing() == 0) 19226 return (EBUSY); 19227 } 19228 rack->rc_always_pace = 1; 19229 if (rack->use_fixed_rate || rack->gp_ready) 19230 rack_set_cc_pacing(rack); 19231 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19232 rack->rack_attempt_hdwr_pace = 0; 19233 /* cmpack=1 */ 19234 if (rack_use_cmp_acks) 19235 rack->r_use_cmp_ack = 1; 19236 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19237 rack->r_use_cmp_ack) 19238 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19239 /* scwnd=1 */ 19240 rack->rack_enable_scwnd = 1; 19241 /* dynamic=100 */ 19242 rack->rc_gp_dyn_mul = 1; 19243 /* gp_inc_ca */ 19244 rack->r_ctl.rack_per_of_gp_ca = 100; 19245 /* rrr_conf=3 */ 19246 rack->r_rr_config = 3; 19247 /* npush=2 */ 19248 rack->r_ctl.rc_no_push_at_mrtt = 2; 19249 /* fillcw=2 */ 19250 rack->rc_pace_to_cwnd = 1; 19251 rack->r_fill_less_agg = 1; 19252 rack->rc_pace_fill_if_rttin_range = 0; 19253 rack->rtt_limit_mul = 0; 19254 /* noprr=1 */ 19255 rack->rack_no_prr = 1; 19256 /* lscwnd=1 */ 19257 rack->r_limit_scw = 1; 19258 /* gp_inc_rec */ 19259 rack->r_ctl.rack_per_of_gp_rec = 90; 19260 err = 0; 19261 19262 19263 } else if (prof == 2) { 19264 /* cmpack=1 */ 19265 if (rack->rc_always_pace == 0) { 19266 if (tcp_can_enable_pacing() == 0) 19267 return (EBUSY); 19268 } 19269 rack->rc_always_pace = 1; 19270 if (rack->use_fixed_rate || rack->gp_ready) 19271 rack_set_cc_pacing(rack); 19272 rack->r_use_cmp_ack = 1; 19273 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19274 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19275 /* pace_always=1 */ 19276 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19277 /* scwnd=1 */ 19278 rack->rack_enable_scwnd = 1; 19279 /* dynamic=100 */ 19280 rack->rc_gp_dyn_mul = 1; 19281 rack->r_ctl.rack_per_of_gp_ca = 100; 19282 /* rrr_conf=3 */ 19283 rack->r_rr_config = 3; 19284 /* npush=2 */ 19285 rack->r_ctl.rc_no_push_at_mrtt = 2; 19286 /* fillcw=1 */ 19287 rack->rc_pace_to_cwnd = 1; 19288 rack->rc_pace_fill_if_rttin_range = 0; 19289 rack->rtt_limit_mul = 0; 19290 /* noprr=1 */ 19291 rack->rack_no_prr = 1; 19292 /* lscwnd=0 */ 19293 rack->r_limit_scw = 0; 19294 err = 0; 19295 } else if (prof == 0) { 19296 /* This changes things back to the default settings */ 19297 err = 0; 19298 if (rack->rc_always_pace) { 19299 tcp_decrement_paced_conn(); 19300 rack_undo_cc_pacing(rack); 19301 rack->rc_always_pace = 0; 19302 } 19303 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19304 rack->rc_always_pace = 1; 19305 if (rack->use_fixed_rate || rack->gp_ready) 19306 rack_set_cc_pacing(rack); 19307 } else 19308 rack->rc_always_pace = 0; 19309 if (rack_dsack_std_based & 0x1) { 19310 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19311 rack->rc_rack_tmr_std_based = 1; 19312 } 19313 if (rack_dsack_std_based & 0x2) { 19314 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19315 rack->rc_rack_use_dsack = 1; 19316 } 19317 if (rack_use_cmp_acks) 19318 rack->r_use_cmp_ack = 1; 19319 else 19320 rack->r_use_cmp_ack = 0; 19321 if (rack_disable_prr) 19322 rack->rack_no_prr = 1; 19323 else 19324 rack->rack_no_prr = 0; 19325 if (rack_gp_no_rec_chg) 19326 rack->rc_gp_no_rec_chg = 1; 19327 else 19328 rack->rc_gp_no_rec_chg = 0; 19329 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19330 rack->r_mbuf_queue = 1; 19331 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19332 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19333 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19334 } else { 19335 rack->r_mbuf_queue = 0; 19336 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19337 } 19338 if (rack_enable_shared_cwnd) 19339 rack->rack_enable_scwnd = 1; 19340 else 19341 rack->rack_enable_scwnd = 0; 19342 if (rack_do_dyn_mul) { 19343 /* When dynamic adjustment is on CA needs to start at 100% */ 19344 rack->rc_gp_dyn_mul = 1; 19345 if (rack_do_dyn_mul >= 100) 19346 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19347 } else { 19348 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19349 rack->rc_gp_dyn_mul = 0; 19350 } 19351 rack->r_rr_config = 0; 19352 rack->r_ctl.rc_no_push_at_mrtt = 0; 19353 rack->rc_pace_to_cwnd = 0; 19354 rack->rc_pace_fill_if_rttin_range = 0; 19355 rack->rtt_limit_mul = 0; 19356 19357 if (rack_enable_hw_pacing) 19358 rack->rack_hdw_pace_ena = 1; 19359 else 19360 rack->rack_hdw_pace_ena = 0; 19361 if (rack_disable_prr) 19362 rack->rack_no_prr = 1; 19363 else 19364 rack->rack_no_prr = 0; 19365 if (rack_limits_scwnd) 19366 rack->r_limit_scw = 1; 19367 else 19368 rack->r_limit_scw = 0; 19369 err = 0; 19370 } 19371 return (err); 19372 } 19373 19374 static int 19375 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19376 { 19377 struct deferred_opt_list *dol; 19378 19379 dol = malloc(sizeof(struct deferred_opt_list), 19380 M_TCPFSB, M_NOWAIT|M_ZERO); 19381 if (dol == NULL) { 19382 /* 19383 * No space yikes -- fail out.. 19384 */ 19385 return (0); 19386 } 19387 dol->optname = sopt_name; 19388 dol->optval = loptval; 19389 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19390 return (1); 19391 } 19392 19393 static int 19394 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19395 uint32_t optval, uint64_t loptval) 19396 { 19397 struct epoch_tracker et; 19398 struct sockopt sopt; 19399 struct cc_newreno_opts opt; 19400 uint64_t val; 19401 int error = 0; 19402 uint16_t ca, ss; 19403 19404 switch (sopt_name) { 19405 19406 case TCP_RACK_DSACK_OPT: 19407 RACK_OPTS_INC(tcp_rack_dsack_opt); 19408 if (optval & 0x1) { 19409 rack->rc_rack_tmr_std_based = 1; 19410 } else { 19411 rack->rc_rack_tmr_std_based = 0; 19412 } 19413 if (optval & 0x2) { 19414 rack->rc_rack_use_dsack = 1; 19415 } else { 19416 rack->rc_rack_use_dsack = 0; 19417 } 19418 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19419 break; 19420 case TCP_RACK_PACING_BETA: 19421 RACK_OPTS_INC(tcp_rack_beta); 19422 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19423 /* This only works for newreno. */ 19424 error = EINVAL; 19425 break; 19426 } 19427 if (rack->rc_pacing_cc_set) { 19428 /* 19429 * Set them into the real CC module 19430 * whats in the rack pcb is the old values 19431 * to be used on restoral/ 19432 */ 19433 sopt.sopt_dir = SOPT_SET; 19434 opt.name = CC_NEWRENO_BETA; 19435 opt.val = optval; 19436 if (CC_ALGO(tp)->ctl_output != NULL) 19437 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19438 else { 19439 error = ENOENT; 19440 break; 19441 } 19442 } else { 19443 /* 19444 * Not pacing yet so set it into our local 19445 * rack pcb storage. 19446 */ 19447 rack->r_ctl.rc_saved_beta.beta = optval; 19448 } 19449 break; 19450 case TCP_RACK_TIMER_SLOP: 19451 RACK_OPTS_INC(tcp_rack_timer_slop); 19452 rack->r_ctl.timer_slop = optval; 19453 if (rack->rc_tp->t_srtt) { 19454 /* 19455 * If we have an SRTT lets update t_rxtcur 19456 * to have the new slop. 19457 */ 19458 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19459 rack_rto_min, rack_rto_max, 19460 rack->r_ctl.timer_slop); 19461 } 19462 break; 19463 case TCP_RACK_PACING_BETA_ECN: 19464 RACK_OPTS_INC(tcp_rack_beta_ecn); 19465 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19466 /* This only works for newreno. */ 19467 error = EINVAL; 19468 break; 19469 } 19470 if (rack->rc_pacing_cc_set) { 19471 /* 19472 * Set them into the real CC module 19473 * whats in the rack pcb is the old values 19474 * to be used on restoral/ 19475 */ 19476 sopt.sopt_dir = SOPT_SET; 19477 opt.name = CC_NEWRENO_BETA_ECN; 19478 opt.val = optval; 19479 if (CC_ALGO(tp)->ctl_output != NULL) 19480 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19481 else 19482 error = ENOENT; 19483 } else { 19484 /* 19485 * Not pacing yet so set it into our local 19486 * rack pcb storage. 19487 */ 19488 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19489 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19490 } 19491 break; 19492 case TCP_DEFER_OPTIONS: 19493 RACK_OPTS_INC(tcp_defer_opt); 19494 if (optval) { 19495 if (rack->gp_ready) { 19496 /* Too late */ 19497 error = EINVAL; 19498 break; 19499 } 19500 rack->defer_options = 1; 19501 } else 19502 rack->defer_options = 0; 19503 break; 19504 case TCP_RACK_MEASURE_CNT: 19505 RACK_OPTS_INC(tcp_rack_measure_cnt); 19506 if (optval && (optval <= 0xff)) { 19507 rack->r_ctl.req_measurements = optval; 19508 } else 19509 error = EINVAL; 19510 break; 19511 case TCP_REC_ABC_VAL: 19512 RACK_OPTS_INC(tcp_rec_abc_val); 19513 if (optval > 0) 19514 rack->r_use_labc_for_rec = 1; 19515 else 19516 rack->r_use_labc_for_rec = 0; 19517 break; 19518 case TCP_RACK_ABC_VAL: 19519 RACK_OPTS_INC(tcp_rack_abc_val); 19520 if ((optval > 0) && (optval < 255)) 19521 rack->rc_labc = optval; 19522 else 19523 error = EINVAL; 19524 break; 19525 case TCP_HDWR_UP_ONLY: 19526 RACK_OPTS_INC(tcp_pacing_up_only); 19527 if (optval) 19528 rack->r_up_only = 1; 19529 else 19530 rack->r_up_only = 0; 19531 break; 19532 case TCP_PACING_RATE_CAP: 19533 RACK_OPTS_INC(tcp_pacing_rate_cap); 19534 rack->r_ctl.bw_rate_cap = loptval; 19535 break; 19536 case TCP_RACK_PROFILE: 19537 RACK_OPTS_INC(tcp_profile); 19538 error = rack_set_profile(rack, optval); 19539 break; 19540 case TCP_USE_CMP_ACKS: 19541 RACK_OPTS_INC(tcp_use_cmp_acks); 19542 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19543 /* You can't turn it off once its on! */ 19544 error = EINVAL; 19545 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19546 rack->r_use_cmp_ack = 1; 19547 rack->r_mbuf_queue = 1; 19548 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19549 } 19550 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19551 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19552 break; 19553 case TCP_SHARED_CWND_TIME_LIMIT: 19554 RACK_OPTS_INC(tcp_lscwnd); 19555 if (optval) 19556 rack->r_limit_scw = 1; 19557 else 19558 rack->r_limit_scw = 0; 19559 break; 19560 case TCP_RACK_PACE_TO_FILL: 19561 RACK_OPTS_INC(tcp_fillcw); 19562 if (optval == 0) 19563 rack->rc_pace_to_cwnd = 0; 19564 else { 19565 rack->rc_pace_to_cwnd = 1; 19566 if (optval > 1) 19567 rack->r_fill_less_agg = 1; 19568 } 19569 if ((optval >= rack_gp_rtt_maxmul) && 19570 rack_gp_rtt_maxmul && 19571 (optval < 0xf)) { 19572 rack->rc_pace_fill_if_rttin_range = 1; 19573 rack->rtt_limit_mul = optval; 19574 } else { 19575 rack->rc_pace_fill_if_rttin_range = 0; 19576 rack->rtt_limit_mul = 0; 19577 } 19578 break; 19579 case TCP_RACK_NO_PUSH_AT_MAX: 19580 RACK_OPTS_INC(tcp_npush); 19581 if (optval == 0) 19582 rack->r_ctl.rc_no_push_at_mrtt = 0; 19583 else if (optval < 0xff) 19584 rack->r_ctl.rc_no_push_at_mrtt = optval; 19585 else 19586 error = EINVAL; 19587 break; 19588 case TCP_SHARED_CWND_ENABLE: 19589 RACK_OPTS_INC(tcp_rack_scwnd); 19590 if (optval == 0) 19591 rack->rack_enable_scwnd = 0; 19592 else 19593 rack->rack_enable_scwnd = 1; 19594 break; 19595 case TCP_RACK_MBUF_QUEUE: 19596 /* Now do we use the LRO mbuf-queue feature */ 19597 RACK_OPTS_INC(tcp_rack_mbufq); 19598 if (optval || rack->r_use_cmp_ack) 19599 rack->r_mbuf_queue = 1; 19600 else 19601 rack->r_mbuf_queue = 0; 19602 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19603 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19604 else 19605 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19606 break; 19607 case TCP_RACK_NONRXT_CFG_RATE: 19608 RACK_OPTS_INC(tcp_rack_cfg_rate); 19609 if (optval == 0) 19610 rack->rack_rec_nonrxt_use_cr = 0; 19611 else 19612 rack->rack_rec_nonrxt_use_cr = 1; 19613 break; 19614 case TCP_NO_PRR: 19615 RACK_OPTS_INC(tcp_rack_noprr); 19616 if (optval == 0) 19617 rack->rack_no_prr = 0; 19618 else if (optval == 1) 19619 rack->rack_no_prr = 1; 19620 else if (optval == 2) 19621 rack->no_prr_addback = 1; 19622 else 19623 error = EINVAL; 19624 break; 19625 case TCP_TIMELY_DYN_ADJ: 19626 RACK_OPTS_INC(tcp_timely_dyn); 19627 if (optval == 0) 19628 rack->rc_gp_dyn_mul = 0; 19629 else { 19630 rack->rc_gp_dyn_mul = 1; 19631 if (optval >= 100) { 19632 /* 19633 * If the user sets something 100 or more 19634 * its the gp_ca value. 19635 */ 19636 rack->r_ctl.rack_per_of_gp_ca = optval; 19637 } 19638 } 19639 break; 19640 case TCP_RACK_DO_DETECTION: 19641 RACK_OPTS_INC(tcp_rack_do_detection); 19642 if (optval == 0) 19643 rack->do_detection = 0; 19644 else 19645 rack->do_detection = 1; 19646 break; 19647 case TCP_RACK_TLP_USE: 19648 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19649 error = EINVAL; 19650 break; 19651 } 19652 RACK_OPTS_INC(tcp_tlp_use); 19653 rack->rack_tlp_threshold_use = optval; 19654 break; 19655 case TCP_RACK_TLP_REDUCE: 19656 /* RACK TLP cwnd reduction (bool) */ 19657 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19658 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19659 break; 19660 /* Pacing related ones */ 19661 case TCP_RACK_PACE_ALWAYS: 19662 /* 19663 * zero is old rack method, 1 is new 19664 * method using a pacing rate. 19665 */ 19666 RACK_OPTS_INC(tcp_rack_pace_always); 19667 if (optval > 0) { 19668 if (rack->rc_always_pace) { 19669 error = EALREADY; 19670 break; 19671 } else if (tcp_can_enable_pacing()) { 19672 rack->rc_always_pace = 1; 19673 if (rack->use_fixed_rate || rack->gp_ready) 19674 rack_set_cc_pacing(rack); 19675 } 19676 else { 19677 error = ENOSPC; 19678 break; 19679 } 19680 } else { 19681 if (rack->rc_always_pace) { 19682 tcp_decrement_paced_conn(); 19683 rack->rc_always_pace = 0; 19684 rack_undo_cc_pacing(rack); 19685 } 19686 } 19687 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19688 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19689 else 19690 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19691 /* A rate may be set irate or other, if so set seg size */ 19692 rack_update_seg(rack); 19693 break; 19694 case TCP_BBR_RACK_INIT_RATE: 19695 RACK_OPTS_INC(tcp_initial_rate); 19696 val = optval; 19697 /* Change from kbits per second to bytes per second */ 19698 val *= 1000; 19699 val /= 8; 19700 rack->r_ctl.init_rate = val; 19701 if (rack->rc_init_win != rack_default_init_window) { 19702 uint32_t win, snt; 19703 19704 /* 19705 * Options don't always get applied 19706 * in the order you think. So in order 19707 * to assure we update a cwnd we need 19708 * to check and see if we are still 19709 * where we should raise the cwnd. 19710 */ 19711 win = rc_init_window(rack); 19712 if (SEQ_GT(tp->snd_max, tp->iss)) 19713 snt = tp->snd_max - tp->iss; 19714 else 19715 snt = 0; 19716 if ((snt < win) && 19717 (tp->snd_cwnd < win)) 19718 tp->snd_cwnd = win; 19719 } 19720 if (rack->rc_always_pace) 19721 rack_update_seg(rack); 19722 break; 19723 case TCP_BBR_IWINTSO: 19724 RACK_OPTS_INC(tcp_initial_win); 19725 if (optval && (optval <= 0xff)) { 19726 uint32_t win, snt; 19727 19728 rack->rc_init_win = optval; 19729 win = rc_init_window(rack); 19730 if (SEQ_GT(tp->snd_max, tp->iss)) 19731 snt = tp->snd_max - tp->iss; 19732 else 19733 snt = 0; 19734 if ((snt < win) && 19735 (tp->t_srtt | 19736 #ifdef NETFLIX_PEAKRATE 19737 tp->t_maxpeakrate | 19738 #endif 19739 rack->r_ctl.init_rate)) { 19740 /* 19741 * We are not past the initial window 19742 * and we have some bases for pacing, 19743 * so we need to possibly adjust up 19744 * the cwnd. Note even if we don't set 19745 * the cwnd, its still ok to raise the rc_init_win 19746 * which can be used coming out of idle when we 19747 * would have a rate. 19748 */ 19749 if (tp->snd_cwnd < win) 19750 tp->snd_cwnd = win; 19751 } 19752 if (rack->rc_always_pace) 19753 rack_update_seg(rack); 19754 } else 19755 error = EINVAL; 19756 break; 19757 case TCP_RACK_FORCE_MSEG: 19758 RACK_OPTS_INC(tcp_rack_force_max_seg); 19759 if (optval) 19760 rack->rc_force_max_seg = 1; 19761 else 19762 rack->rc_force_max_seg = 0; 19763 break; 19764 case TCP_RACK_PACE_MAX_SEG: 19765 /* Max segments size in a pace in bytes */ 19766 RACK_OPTS_INC(tcp_rack_max_seg); 19767 rack->rc_user_set_max_segs = optval; 19768 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19769 break; 19770 case TCP_RACK_PACE_RATE_REC: 19771 /* Set the fixed pacing rate in Bytes per second ca */ 19772 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19773 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19774 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19775 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19776 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19777 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19778 rack->use_fixed_rate = 1; 19779 if (rack->rc_always_pace) 19780 rack_set_cc_pacing(rack); 19781 rack_log_pacing_delay_calc(rack, 19782 rack->r_ctl.rc_fixed_pacing_rate_ss, 19783 rack->r_ctl.rc_fixed_pacing_rate_ca, 19784 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19785 __LINE__, NULL,0); 19786 break; 19787 19788 case TCP_RACK_PACE_RATE_SS: 19789 /* Set the fixed pacing rate in Bytes per second ca */ 19790 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19791 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19792 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19793 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19794 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19795 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19796 rack->use_fixed_rate = 1; 19797 if (rack->rc_always_pace) 19798 rack_set_cc_pacing(rack); 19799 rack_log_pacing_delay_calc(rack, 19800 rack->r_ctl.rc_fixed_pacing_rate_ss, 19801 rack->r_ctl.rc_fixed_pacing_rate_ca, 19802 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19803 __LINE__, NULL, 0); 19804 break; 19805 19806 case TCP_RACK_PACE_RATE_CA: 19807 /* Set the fixed pacing rate in Bytes per second ca */ 19808 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19809 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19810 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19811 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19812 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19813 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19814 rack->use_fixed_rate = 1; 19815 if (rack->rc_always_pace) 19816 rack_set_cc_pacing(rack); 19817 rack_log_pacing_delay_calc(rack, 19818 rack->r_ctl.rc_fixed_pacing_rate_ss, 19819 rack->r_ctl.rc_fixed_pacing_rate_ca, 19820 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19821 __LINE__, NULL, 0); 19822 break; 19823 case TCP_RACK_GP_INCREASE_REC: 19824 RACK_OPTS_INC(tcp_gp_inc_rec); 19825 rack->r_ctl.rack_per_of_gp_rec = optval; 19826 rack_log_pacing_delay_calc(rack, 19827 rack->r_ctl.rack_per_of_gp_ss, 19828 rack->r_ctl.rack_per_of_gp_ca, 19829 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19830 __LINE__, NULL, 0); 19831 break; 19832 case TCP_RACK_GP_INCREASE_CA: 19833 RACK_OPTS_INC(tcp_gp_inc_ca); 19834 ca = optval; 19835 if (ca < 100) { 19836 /* 19837 * We don't allow any reduction 19838 * over the GP b/w. 19839 */ 19840 error = EINVAL; 19841 break; 19842 } 19843 rack->r_ctl.rack_per_of_gp_ca = ca; 19844 rack_log_pacing_delay_calc(rack, 19845 rack->r_ctl.rack_per_of_gp_ss, 19846 rack->r_ctl.rack_per_of_gp_ca, 19847 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19848 __LINE__, NULL, 0); 19849 break; 19850 case TCP_RACK_GP_INCREASE_SS: 19851 RACK_OPTS_INC(tcp_gp_inc_ss); 19852 ss = optval; 19853 if (ss < 100) { 19854 /* 19855 * We don't allow any reduction 19856 * over the GP b/w. 19857 */ 19858 error = EINVAL; 19859 break; 19860 } 19861 rack->r_ctl.rack_per_of_gp_ss = ss; 19862 rack_log_pacing_delay_calc(rack, 19863 rack->r_ctl.rack_per_of_gp_ss, 19864 rack->r_ctl.rack_per_of_gp_ca, 19865 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19866 __LINE__, NULL, 0); 19867 break; 19868 case TCP_RACK_RR_CONF: 19869 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 19870 if (optval && optval <= 3) 19871 rack->r_rr_config = optval; 19872 else 19873 rack->r_rr_config = 0; 19874 break; 19875 case TCP_HDWR_RATE_CAP: 19876 RACK_OPTS_INC(tcp_hdwr_rate_cap); 19877 if (optval) { 19878 if (rack->r_rack_hw_rate_caps == 0) 19879 rack->r_rack_hw_rate_caps = 1; 19880 else 19881 error = EALREADY; 19882 } else { 19883 rack->r_rack_hw_rate_caps = 0; 19884 } 19885 break; 19886 case TCP_BBR_HDWR_PACE: 19887 RACK_OPTS_INC(tcp_hdwr_pacing); 19888 if (optval){ 19889 if (rack->rack_hdrw_pacing == 0) { 19890 rack->rack_hdw_pace_ena = 1; 19891 rack->rack_attempt_hdwr_pace = 0; 19892 } else 19893 error = EALREADY; 19894 } else { 19895 rack->rack_hdw_pace_ena = 0; 19896 #ifdef RATELIMIT 19897 if (rack->r_ctl.crte != NULL) { 19898 rack->rack_hdrw_pacing = 0; 19899 rack->rack_attempt_hdwr_pace = 0; 19900 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 19901 rack->r_ctl.crte = NULL; 19902 } 19903 #endif 19904 } 19905 break; 19906 /* End Pacing related ones */ 19907 case TCP_RACK_PRR_SENDALOT: 19908 /* Allow PRR to send more than one seg */ 19909 RACK_OPTS_INC(tcp_rack_prr_sendalot); 19910 rack->r_ctl.rc_prr_sendalot = optval; 19911 break; 19912 case TCP_RACK_MIN_TO: 19913 /* Minimum time between rack t-o's in ms */ 19914 RACK_OPTS_INC(tcp_rack_min_to); 19915 rack->r_ctl.rc_min_to = optval; 19916 break; 19917 case TCP_RACK_EARLY_SEG: 19918 /* If early recovery max segments */ 19919 RACK_OPTS_INC(tcp_rack_early_seg); 19920 rack->r_ctl.rc_early_recovery_segs = optval; 19921 break; 19922 case TCP_RACK_ENABLE_HYSTART: 19923 { 19924 if (optval) { 19925 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 19926 if (rack_do_hystart > RACK_HYSTART_ON) 19927 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 19928 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 19929 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 19930 } else { 19931 tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 19932 } 19933 } 19934 break; 19935 case TCP_RACK_REORD_THRESH: 19936 /* RACK reorder threshold (shift amount) */ 19937 RACK_OPTS_INC(tcp_rack_reord_thresh); 19938 if ((optval > 0) && (optval < 31)) 19939 rack->r_ctl.rc_reorder_shift = optval; 19940 else 19941 error = EINVAL; 19942 break; 19943 case TCP_RACK_REORD_FADE: 19944 /* Does reordering fade after ms time */ 19945 RACK_OPTS_INC(tcp_rack_reord_fade); 19946 rack->r_ctl.rc_reorder_fade = optval; 19947 break; 19948 case TCP_RACK_TLP_THRESH: 19949 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 19950 RACK_OPTS_INC(tcp_rack_tlp_thresh); 19951 if (optval) 19952 rack->r_ctl.rc_tlp_threshold = optval; 19953 else 19954 error = EINVAL; 19955 break; 19956 case TCP_BBR_USE_RACK_RR: 19957 RACK_OPTS_INC(tcp_rack_rr); 19958 if (optval) 19959 rack->use_rack_rr = 1; 19960 else 19961 rack->use_rack_rr = 0; 19962 break; 19963 case TCP_FAST_RSM_HACK: 19964 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 19965 if (optval) 19966 rack->fast_rsm_hack = 1; 19967 else 19968 rack->fast_rsm_hack = 0; 19969 break; 19970 case TCP_RACK_PKT_DELAY: 19971 /* RACK added ms i.e. rack-rtt + reord + N */ 19972 RACK_OPTS_INC(tcp_rack_pkt_delay); 19973 rack->r_ctl.rc_pkt_delay = optval; 19974 break; 19975 case TCP_DELACK: 19976 RACK_OPTS_INC(tcp_rack_delayed_ack); 19977 if (optval == 0) 19978 tp->t_delayed_ack = 0; 19979 else 19980 tp->t_delayed_ack = 1; 19981 if (tp->t_flags & TF_DELACK) { 19982 tp->t_flags &= ~TF_DELACK; 19983 tp->t_flags |= TF_ACKNOW; 19984 NET_EPOCH_ENTER(et); 19985 rack_output(tp); 19986 NET_EPOCH_EXIT(et); 19987 } 19988 break; 19989 19990 case TCP_BBR_RACK_RTT_USE: 19991 RACK_OPTS_INC(tcp_rack_rtt_use); 19992 if ((optval != USE_RTT_HIGH) && 19993 (optval != USE_RTT_LOW) && 19994 (optval != USE_RTT_AVG)) 19995 error = EINVAL; 19996 else 19997 rack->r_ctl.rc_rate_sample_method = optval; 19998 break; 19999 case TCP_DATA_AFTER_CLOSE: 20000 RACK_OPTS_INC(tcp_data_after_close); 20001 if (optval) 20002 rack->rc_allow_data_af_clo = 1; 20003 else 20004 rack->rc_allow_data_af_clo = 0; 20005 break; 20006 default: 20007 break; 20008 } 20009 #ifdef NETFLIX_STATS 20010 tcp_log_socket_option(tp, sopt_name, optval, error); 20011 #endif 20012 return (error); 20013 } 20014 20015 20016 static void 20017 rack_apply_deferred_options(struct tcp_rack *rack) 20018 { 20019 struct deferred_opt_list *dol, *sdol; 20020 uint32_t s_optval; 20021 20022 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20023 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20024 /* Disadvantage of deferal is you loose the error return */ 20025 s_optval = (uint32_t)dol->optval; 20026 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20027 free(dol, M_TCPDO); 20028 } 20029 } 20030 20031 static void 20032 rack_hw_tls_change(struct tcpcb *tp, int chg) 20033 { 20034 /* 20035 * HW tls state has changed.. fix all 20036 * rsm's in flight. 20037 */ 20038 struct tcp_rack *rack; 20039 struct rack_sendmap *rsm; 20040 20041 rack = (struct tcp_rack *)tp->t_fb_ptr; 20042 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20043 if (chg) 20044 rsm->r_hw_tls = 1; 20045 else 20046 rsm->r_hw_tls = 0; 20047 } 20048 if (chg) 20049 rack->r_ctl.fsb.hw_tls = 1; 20050 else 20051 rack->r_ctl.fsb.hw_tls = 0; 20052 } 20053 20054 static int 20055 rack_pru_options(struct tcpcb *tp, int flags) 20056 { 20057 if (flags & PRUS_OOB) 20058 return (EOPNOTSUPP); 20059 return (0); 20060 } 20061 20062 static struct tcp_function_block __tcp_rack = { 20063 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20064 .tfb_tcp_output = rack_output, 20065 .tfb_do_queued_segments = ctf_do_queued_segments, 20066 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20067 .tfb_tcp_do_segment = rack_do_segment, 20068 .tfb_tcp_ctloutput = rack_ctloutput, 20069 .tfb_tcp_fb_init = rack_init, 20070 .tfb_tcp_fb_fini = rack_fini, 20071 .tfb_tcp_timer_stop_all = rack_stopall, 20072 .tfb_tcp_timer_activate = rack_timer_activate, 20073 .tfb_tcp_timer_active = rack_timer_active, 20074 .tfb_tcp_timer_stop = rack_timer_stop, 20075 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20076 .tfb_tcp_handoff_ok = rack_handoff_ok, 20077 .tfb_tcp_mtu_chg = rack_mtu_change, 20078 .tfb_pru_options = rack_pru_options, 20079 .tfb_hwtls_change = rack_hw_tls_change, 20080 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20081 }; 20082 20083 /* 20084 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20085 * socket option arguments. When it re-acquires the lock after the copy, it 20086 * has to revalidate that the connection is still valid for the socket 20087 * option. 20088 */ 20089 static int 20090 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20091 { 20092 #ifdef INET6 20093 struct ip6_hdr *ip6; 20094 #endif 20095 #ifdef INET 20096 struct ip *ip; 20097 #endif 20098 struct tcpcb *tp; 20099 struct tcp_rack *rack; 20100 uint64_t loptval; 20101 int32_t error = 0, optval; 20102 20103 tp = intotcpcb(inp); 20104 rack = (struct tcp_rack *)tp->t_fb_ptr; 20105 if (rack == NULL) { 20106 INP_WUNLOCK(inp); 20107 return (EINVAL); 20108 } 20109 #ifdef INET6 20110 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20111 #endif 20112 #ifdef INET 20113 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20114 #endif 20115 20116 switch (sopt->sopt_level) { 20117 #ifdef INET6 20118 case IPPROTO_IPV6: 20119 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20120 switch (sopt->sopt_name) { 20121 case IPV6_USE_MIN_MTU: 20122 tcp6_use_min_mtu(tp); 20123 break; 20124 case IPV6_TCLASS: 20125 /* 20126 * The DSCP codepoint has changed, update the fsb. 20127 */ 20128 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20129 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20130 break; 20131 } 20132 INP_WUNLOCK(inp); 20133 return (0); 20134 #endif 20135 #ifdef INET 20136 case IPPROTO_IP: 20137 switch (sopt->sopt_name) { 20138 case IP_TOS: 20139 /* 20140 * The DSCP codepoint has changed, update the fsb. 20141 */ 20142 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20143 break; 20144 case IP_TTL: 20145 /* 20146 * The TTL has changed, update the fsb. 20147 */ 20148 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20149 break; 20150 } 20151 INP_WUNLOCK(inp); 20152 return (0); 20153 #endif 20154 } 20155 20156 switch (sopt->sopt_name) { 20157 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20158 /* Pacing related ones */ 20159 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20160 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20161 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20162 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20163 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20164 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20165 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20166 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20167 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20168 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20169 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20170 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20171 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20172 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20173 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20174 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20175 /* End pacing related */ 20176 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20177 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20178 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20179 case TCP_RACK_MIN_TO: /* URL:min_to */ 20180 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20181 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20182 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20183 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20184 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20185 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20186 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20187 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20188 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20189 case TCP_NO_PRR: /* URL:noprr */ 20190 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20191 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20192 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20193 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20194 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20195 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20196 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20197 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20198 case TCP_RACK_PROFILE: /* URL:profile */ 20199 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20200 case TCP_RACK_ABC_VAL: /* URL:labc */ 20201 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20202 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20203 case TCP_DEFER_OPTIONS: /* URL:defer */ 20204 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20205 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20206 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20207 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20208 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20209 break; 20210 default: 20211 /* Filter off all unknown options to the base stack */ 20212 return (tcp_default_ctloutput(inp, sopt)); 20213 break; 20214 } 20215 INP_WUNLOCK(inp); 20216 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20217 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20218 /* 20219 * We truncate it down to 32 bits for the socket-option trace this 20220 * means rates > 34Gbps won't show right, but thats probably ok. 20221 */ 20222 optval = (uint32_t)loptval; 20223 } else { 20224 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20225 /* Save it in 64 bit form too */ 20226 loptval = optval; 20227 } 20228 if (error) 20229 return (error); 20230 INP_WLOCK(inp); 20231 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20232 INP_WUNLOCK(inp); 20233 return (ECONNRESET); 20234 } 20235 if (tp->t_fb != &__tcp_rack) { 20236 INP_WUNLOCK(inp); 20237 return (ENOPROTOOPT); 20238 } 20239 if (rack->defer_options && (rack->gp_ready == 0) && 20240 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20241 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20242 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20243 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20244 /* Options are beind deferred */ 20245 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20246 INP_WUNLOCK(inp); 20247 return (0); 20248 } else { 20249 /* No memory to defer, fail */ 20250 INP_WUNLOCK(inp); 20251 return (ENOMEM); 20252 } 20253 } 20254 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20255 INP_WUNLOCK(inp); 20256 return (error); 20257 } 20258 20259 static void 20260 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20261 { 20262 20263 INP_WLOCK_ASSERT(tp->t_inpcb); 20264 bzero(ti, sizeof(*ti)); 20265 20266 ti->tcpi_state = tp->t_state; 20267 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20268 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20269 if (tp->t_flags & TF_SACK_PERMIT) 20270 ti->tcpi_options |= TCPI_OPT_SACK; 20271 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20272 ti->tcpi_options |= TCPI_OPT_WSCALE; 20273 ti->tcpi_snd_wscale = tp->snd_scale; 20274 ti->tcpi_rcv_wscale = tp->rcv_scale; 20275 } 20276 if (tp->t_flags2 & TF2_ECN_PERMIT) 20277 ti->tcpi_options |= TCPI_OPT_ECN; 20278 if (tp->t_flags & TF_FASTOPEN) 20279 ti->tcpi_options |= TCPI_OPT_TFO; 20280 /* still kept in ticks is t_rcvtime */ 20281 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20282 /* Since we hold everything in precise useconds this is easy */ 20283 ti->tcpi_rtt = tp->t_srtt; 20284 ti->tcpi_rttvar = tp->t_rttvar; 20285 ti->tcpi_rto = tp->t_rxtcur; 20286 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20287 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20288 /* 20289 * FreeBSD-specific extension fields for tcp_info. 20290 */ 20291 ti->tcpi_rcv_space = tp->rcv_wnd; 20292 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20293 ti->tcpi_snd_wnd = tp->snd_wnd; 20294 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20295 ti->tcpi_snd_nxt = tp->snd_nxt; 20296 ti->tcpi_snd_mss = tp->t_maxseg; 20297 ti->tcpi_rcv_mss = tp->t_maxseg; 20298 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20299 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20300 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20301 #ifdef NETFLIX_STATS 20302 ti->tcpi_total_tlp = tp->t_sndtlppack; 20303 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20304 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20305 #endif 20306 #ifdef TCP_OFFLOAD 20307 if (tp->t_flags & TF_TOE) { 20308 ti->tcpi_options |= TCPI_OPT_TOE; 20309 tcp_offload_tcp_info(tp, ti); 20310 } 20311 #endif 20312 } 20313 20314 static int 20315 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20316 { 20317 struct tcpcb *tp; 20318 struct tcp_rack *rack; 20319 int32_t error, optval; 20320 uint64_t val, loptval; 20321 struct tcp_info ti; 20322 /* 20323 * Because all our options are either boolean or an int, we can just 20324 * pull everything into optval and then unlock and copy. If we ever 20325 * add a option that is not a int, then this will have quite an 20326 * impact to this routine. 20327 */ 20328 error = 0; 20329 tp = intotcpcb(inp); 20330 rack = (struct tcp_rack *)tp->t_fb_ptr; 20331 if (rack == NULL) { 20332 INP_WUNLOCK(inp); 20333 return (EINVAL); 20334 } 20335 switch (sopt->sopt_name) { 20336 case TCP_INFO: 20337 /* First get the info filled */ 20338 rack_fill_info(tp, &ti); 20339 /* Fix up the rtt related fields if needed */ 20340 INP_WUNLOCK(inp); 20341 error = sooptcopyout(sopt, &ti, sizeof ti); 20342 return (error); 20343 /* 20344 * Beta is the congestion control value for NewReno that influences how 20345 * much of a backoff happens when loss is detected. It is normally set 20346 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20347 * when you exit recovery. 20348 */ 20349 case TCP_RACK_PACING_BETA: 20350 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20351 error = EINVAL; 20352 else if (rack->rc_pacing_cc_set == 0) 20353 optval = rack->r_ctl.rc_saved_beta.beta; 20354 else { 20355 /* 20356 * Reach out into the CC data and report back what 20357 * I have previously set. Yeah it looks hackish but 20358 * we don't want to report the saved values. 20359 */ 20360 if (tp->ccv->cc_data) 20361 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20362 else 20363 error = EINVAL; 20364 } 20365 break; 20366 /* 20367 * Beta_ecn is the congestion control value for NewReno that influences how 20368 * much of a backoff happens when a ECN mark is detected. It is normally set 20369 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20370 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20371 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20372 */ 20373 20374 case TCP_RACK_PACING_BETA_ECN: 20375 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20376 error = EINVAL; 20377 else if (rack->rc_pacing_cc_set == 0) 20378 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20379 else { 20380 /* 20381 * Reach out into the CC data and report back what 20382 * I have previously set. Yeah it looks hackish but 20383 * we don't want to report the saved values. 20384 */ 20385 if (tp->ccv->cc_data) 20386 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20387 else 20388 error = EINVAL; 20389 } 20390 break; 20391 case TCP_RACK_DSACK_OPT: 20392 optval = 0; 20393 if (rack->rc_rack_tmr_std_based) { 20394 optval |= 1; 20395 } 20396 if (rack->rc_rack_use_dsack) { 20397 optval |= 2; 20398 } 20399 break; 20400 case TCP_RACK_ENABLE_HYSTART: 20401 { 20402 if (tp->ccv->flags & CCF_HYSTART_ALLOWED) { 20403 optval = RACK_HYSTART_ON; 20404 if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND) 20405 optval = RACK_HYSTART_ON_W_SC; 20406 if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH) 20407 optval = RACK_HYSTART_ON_W_SC_C; 20408 } else { 20409 optval = RACK_HYSTART_OFF; 20410 } 20411 } 20412 break; 20413 case TCP_FAST_RSM_HACK: 20414 optval = rack->fast_rsm_hack; 20415 break; 20416 case TCP_DEFER_OPTIONS: 20417 optval = rack->defer_options; 20418 break; 20419 case TCP_RACK_MEASURE_CNT: 20420 optval = rack->r_ctl.req_measurements; 20421 break; 20422 case TCP_REC_ABC_VAL: 20423 optval = rack->r_use_labc_for_rec; 20424 break; 20425 case TCP_RACK_ABC_VAL: 20426 optval = rack->rc_labc; 20427 break; 20428 case TCP_HDWR_UP_ONLY: 20429 optval= rack->r_up_only; 20430 break; 20431 case TCP_PACING_RATE_CAP: 20432 loptval = rack->r_ctl.bw_rate_cap; 20433 break; 20434 case TCP_RACK_PROFILE: 20435 /* You cannot retrieve a profile, its write only */ 20436 error = EINVAL; 20437 break; 20438 case TCP_USE_CMP_ACKS: 20439 optval = rack->r_use_cmp_ack; 20440 break; 20441 case TCP_RACK_PACE_TO_FILL: 20442 optval = rack->rc_pace_to_cwnd; 20443 if (optval && rack->r_fill_less_agg) 20444 optval++; 20445 break; 20446 case TCP_RACK_NO_PUSH_AT_MAX: 20447 optval = rack->r_ctl.rc_no_push_at_mrtt; 20448 break; 20449 case TCP_SHARED_CWND_ENABLE: 20450 optval = rack->rack_enable_scwnd; 20451 break; 20452 case TCP_RACK_NONRXT_CFG_RATE: 20453 optval = rack->rack_rec_nonrxt_use_cr; 20454 break; 20455 case TCP_NO_PRR: 20456 if (rack->rack_no_prr == 1) 20457 optval = 1; 20458 else if (rack->no_prr_addback == 1) 20459 optval = 2; 20460 else 20461 optval = 0; 20462 break; 20463 case TCP_RACK_DO_DETECTION: 20464 optval = rack->do_detection; 20465 break; 20466 case TCP_RACK_MBUF_QUEUE: 20467 /* Now do we use the LRO mbuf-queue feature */ 20468 optval = rack->r_mbuf_queue; 20469 break; 20470 case TCP_TIMELY_DYN_ADJ: 20471 optval = rack->rc_gp_dyn_mul; 20472 break; 20473 case TCP_BBR_IWINTSO: 20474 optval = rack->rc_init_win; 20475 break; 20476 case TCP_RACK_TLP_REDUCE: 20477 /* RACK TLP cwnd reduction (bool) */ 20478 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20479 break; 20480 case TCP_BBR_RACK_INIT_RATE: 20481 val = rack->r_ctl.init_rate; 20482 /* convert to kbits per sec */ 20483 val *= 8; 20484 val /= 1000; 20485 optval = (uint32_t)val; 20486 break; 20487 case TCP_RACK_FORCE_MSEG: 20488 optval = rack->rc_force_max_seg; 20489 break; 20490 case TCP_RACK_PACE_MAX_SEG: 20491 /* Max segments in a pace */ 20492 optval = rack->rc_user_set_max_segs; 20493 break; 20494 case TCP_RACK_PACE_ALWAYS: 20495 /* Use the always pace method */ 20496 optval = rack->rc_always_pace; 20497 break; 20498 case TCP_RACK_PRR_SENDALOT: 20499 /* Allow PRR to send more than one seg */ 20500 optval = rack->r_ctl.rc_prr_sendalot; 20501 break; 20502 case TCP_RACK_MIN_TO: 20503 /* Minimum time between rack t-o's in ms */ 20504 optval = rack->r_ctl.rc_min_to; 20505 break; 20506 case TCP_RACK_EARLY_SEG: 20507 /* If early recovery max segments */ 20508 optval = rack->r_ctl.rc_early_recovery_segs; 20509 break; 20510 case TCP_RACK_REORD_THRESH: 20511 /* RACK reorder threshold (shift amount) */ 20512 optval = rack->r_ctl.rc_reorder_shift; 20513 break; 20514 case TCP_RACK_REORD_FADE: 20515 /* Does reordering fade after ms time */ 20516 optval = rack->r_ctl.rc_reorder_fade; 20517 break; 20518 case TCP_BBR_USE_RACK_RR: 20519 /* Do we use the rack cheat for rxt */ 20520 optval = rack->use_rack_rr; 20521 break; 20522 case TCP_RACK_RR_CONF: 20523 optval = rack->r_rr_config; 20524 break; 20525 case TCP_HDWR_RATE_CAP: 20526 optval = rack->r_rack_hw_rate_caps; 20527 break; 20528 case TCP_BBR_HDWR_PACE: 20529 optval = rack->rack_hdw_pace_ena; 20530 break; 20531 case TCP_RACK_TLP_THRESH: 20532 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20533 optval = rack->r_ctl.rc_tlp_threshold; 20534 break; 20535 case TCP_RACK_PKT_DELAY: 20536 /* RACK added ms i.e. rack-rtt + reord + N */ 20537 optval = rack->r_ctl.rc_pkt_delay; 20538 break; 20539 case TCP_RACK_TLP_USE: 20540 optval = rack->rack_tlp_threshold_use; 20541 break; 20542 case TCP_RACK_PACE_RATE_CA: 20543 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20544 break; 20545 case TCP_RACK_PACE_RATE_SS: 20546 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20547 break; 20548 case TCP_RACK_PACE_RATE_REC: 20549 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20550 break; 20551 case TCP_RACK_GP_INCREASE_SS: 20552 optval = rack->r_ctl.rack_per_of_gp_ca; 20553 break; 20554 case TCP_RACK_GP_INCREASE_CA: 20555 optval = rack->r_ctl.rack_per_of_gp_ss; 20556 break; 20557 case TCP_BBR_RACK_RTT_USE: 20558 optval = rack->r_ctl.rc_rate_sample_method; 20559 break; 20560 case TCP_DELACK: 20561 optval = tp->t_delayed_ack; 20562 break; 20563 case TCP_DATA_AFTER_CLOSE: 20564 optval = rack->rc_allow_data_af_clo; 20565 break; 20566 case TCP_SHARED_CWND_TIME_LIMIT: 20567 optval = rack->r_limit_scw; 20568 break; 20569 case TCP_RACK_TIMER_SLOP: 20570 optval = rack->r_ctl.timer_slop; 20571 break; 20572 default: 20573 return (tcp_default_ctloutput(inp, sopt)); 20574 break; 20575 } 20576 INP_WUNLOCK(inp); 20577 if (error == 0) { 20578 if (TCP_PACING_RATE_CAP) 20579 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20580 else 20581 error = sooptcopyout(sopt, &optval, sizeof optval); 20582 } 20583 return (error); 20584 } 20585 20586 static int 20587 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20588 { 20589 if (sopt->sopt_dir == SOPT_SET) { 20590 return (rack_set_sockopt(inp, sopt)); 20591 } else if (sopt->sopt_dir == SOPT_GET) { 20592 return (rack_get_sockopt(inp, sopt)); 20593 } else { 20594 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20595 } 20596 } 20597 20598 static const char *rack_stack_names[] = { 20599 __XSTRING(STACKNAME), 20600 #ifdef STACKALIAS 20601 __XSTRING(STACKALIAS), 20602 #endif 20603 }; 20604 20605 static int 20606 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20607 { 20608 memset(mem, 0, size); 20609 return (0); 20610 } 20611 20612 static void 20613 rack_dtor(void *mem, int32_t size, void *arg) 20614 { 20615 20616 } 20617 20618 static bool rack_mod_inited = false; 20619 20620 static int 20621 tcp_addrack(module_t mod, int32_t type, void *data) 20622 { 20623 int32_t err = 0; 20624 int num_stacks; 20625 20626 switch (type) { 20627 case MOD_LOAD: 20628 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20629 sizeof(struct rack_sendmap), 20630 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20631 20632 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20633 sizeof(struct tcp_rack), 20634 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20635 20636 sysctl_ctx_init(&rack_sysctl_ctx); 20637 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20638 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20639 OID_AUTO, 20640 #ifdef STACKALIAS 20641 __XSTRING(STACKALIAS), 20642 #else 20643 __XSTRING(STACKNAME), 20644 #endif 20645 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20646 ""); 20647 if (rack_sysctl_root == NULL) { 20648 printf("Failed to add sysctl node\n"); 20649 err = EFAULT; 20650 goto free_uma; 20651 } 20652 rack_init_sysctls(); 20653 num_stacks = nitems(rack_stack_names); 20654 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20655 rack_stack_names, &num_stacks); 20656 if (err) { 20657 printf("Failed to register %s stack name for " 20658 "%s module\n", rack_stack_names[num_stacks], 20659 __XSTRING(MODNAME)); 20660 sysctl_ctx_free(&rack_sysctl_ctx); 20661 free_uma: 20662 uma_zdestroy(rack_zone); 20663 uma_zdestroy(rack_pcb_zone); 20664 rack_counter_destroy(); 20665 printf("Failed to register rack module -- err:%d\n", err); 20666 return (err); 20667 } 20668 tcp_lro_reg_mbufq(); 20669 rack_mod_inited = true; 20670 break; 20671 case MOD_QUIESCE: 20672 err = deregister_tcp_functions(&__tcp_rack, true, false); 20673 break; 20674 case MOD_UNLOAD: 20675 err = deregister_tcp_functions(&__tcp_rack, false, true); 20676 if (err == EBUSY) 20677 break; 20678 if (rack_mod_inited) { 20679 uma_zdestroy(rack_zone); 20680 uma_zdestroy(rack_pcb_zone); 20681 sysctl_ctx_free(&rack_sysctl_ctx); 20682 rack_counter_destroy(); 20683 rack_mod_inited = false; 20684 } 20685 tcp_lro_dereg_mbufq(); 20686 err = 0; 20687 break; 20688 default: 20689 return (EOPNOTSUPP); 20690 } 20691 return (err); 20692 } 20693 20694 static moduledata_t tcp_rack = { 20695 .name = __XSTRING(MODNAME), 20696 .evhand = tcp_addrack, 20697 .priv = 0 20698 }; 20699 20700 MODULE_VERSION(MODNAME, 1); 20701 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20702 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20703