1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCPDEBUG 109 #include <netinet/tcp_debug.h> 110 #endif /* TCPDEBUG */ 111 #ifdef TCP_OFFLOAD 112 #include <netinet/tcp_offload.h> 113 #endif 114 #ifdef INET6 115 #include <netinet6/tcp6_var.h> 116 #endif 117 #include <netinet/tcp_ecn.h> 118 119 #include <netipsec/ipsec_support.h> 120 121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 122 #include <netipsec/ipsec.h> 123 #include <netipsec/ipsec6.h> 124 #endif /* IPSEC */ 125 126 #include <netinet/udp.h> 127 #include <netinet/udp_var.h> 128 #include <machine/in_cksum.h> 129 130 #ifdef MAC 131 #include <security/mac/mac_framework.h> 132 #endif 133 #include "sack_filter.h" 134 #include "tcp_rack.h" 135 #include "rack_bbr_common.h" 136 137 uma_zone_t rack_zone; 138 uma_zone_t rack_pcb_zone; 139 140 #ifndef TICKS2SBT 141 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 142 #endif 143 144 VNET_DECLARE(uint32_t, newreno_beta); 145 VNET_DECLARE(uint32_t, newreno_beta_ecn); 146 #define V_newreno_beta VNET(newreno_beta) 147 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 148 149 150 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 151 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 152 153 struct sysctl_ctx_list rack_sysctl_ctx; 154 struct sysctl_oid *rack_sysctl_root; 155 156 #define CUM_ACKED 1 157 #define SACKED 2 158 159 /* 160 * The RACK module incorporates a number of 161 * TCP ideas that have been put out into the IETF 162 * over the last few years: 163 * - Matt Mathis's Rate Halving which slowly drops 164 * the congestion window so that the ack clock can 165 * be maintained during a recovery. 166 * - Yuchung Cheng's RACK TCP (for which its named) that 167 * will stop us using the number of dup acks and instead 168 * use time as the gage of when we retransmit. 169 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 170 * of Dukkipati et.al. 171 * RACK depends on SACK, so if an endpoint arrives that 172 * cannot do SACK the state machine below will shuttle the 173 * connection back to using the "default" TCP stack that is 174 * in FreeBSD. 175 * 176 * To implement RACK the original TCP stack was first decomposed 177 * into a functional state machine with individual states 178 * for each of the possible TCP connection states. The do_segment 179 * functions role in life is to mandate the connection supports SACK 180 * initially and then assure that the RACK state matches the conenction 181 * state before calling the states do_segment function. Each 182 * state is simplified due to the fact that the original do_segment 183 * has been decomposed and we *know* what state we are in (no 184 * switches on the state) and all tests for SACK are gone. This 185 * greatly simplifies what each state does. 186 * 187 * TCP output is also over-written with a new version since it 188 * must maintain the new rack scoreboard. 189 * 190 */ 191 static int32_t rack_tlp_thresh = 1; 192 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 193 static int32_t rack_tlp_use_greater = 1; 194 static int32_t rack_reorder_thresh = 2; 195 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 196 * - 60 seconds */ 197 static uint8_t rack_req_measurements = 1; 198 /* Attack threshold detections */ 199 static uint32_t rack_highest_sack_thresh_seen = 0; 200 static uint32_t rack_highest_move_thresh_seen = 0; 201 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 202 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 203 static int32_t rack_hw_rate_caps = 1; /* 1; */ 204 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 205 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 206 static int32_t rack_hw_up_only = 1; 207 static int32_t rack_stats_gets_ms_rtt = 1; 208 static int32_t rack_prr_addbackmax = 2; 209 static int32_t rack_do_hystart = 0; 210 static int32_t rack_apply_rtt_with_reduced_conf = 0; 211 212 static int32_t rack_pkt_delay = 1000; 213 static int32_t rack_send_a_lot_in_prr = 1; 214 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 215 static int32_t rack_verbose_logging = 0; 216 static int32_t rack_ignore_data_after_close = 1; 217 static int32_t rack_enable_shared_cwnd = 1; 218 static int32_t rack_use_cmp_acks = 1; 219 static int32_t rack_use_fsb = 1; 220 static int32_t rack_use_rfo = 1; 221 static int32_t rack_use_rsm_rfo = 1; 222 static int32_t rack_max_abc_post_recovery = 2; 223 static int32_t rack_client_low_buf = 0; 224 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 225 #ifdef TCP_ACCOUNTING 226 static int32_t rack_tcp_accounting = 0; 227 #endif 228 static int32_t rack_limits_scwnd = 1; 229 static int32_t rack_enable_mqueue_for_nonpaced = 0; 230 static int32_t rack_disable_prr = 0; 231 static int32_t use_rack_rr = 1; 232 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 233 static int32_t rack_persist_min = 250000; /* 250usec */ 234 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 235 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 236 static int32_t rack_default_init_window = 0; /* Use system default */ 237 static int32_t rack_limit_time_with_srtt = 0; 238 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 239 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 240 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 241 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 242 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 243 /* 244 * Currently regular tcp has a rto_min of 30ms 245 * the backoff goes 12 times so that ends up 246 * being a total of 122.850 seconds before a 247 * connection is killed. 248 */ 249 static uint32_t rack_def_data_window = 20; 250 static uint32_t rack_goal_bdp = 2; 251 static uint32_t rack_min_srtts = 1; 252 static uint32_t rack_min_measure_usec = 0; 253 static int32_t rack_tlp_min = 10000; /* 10ms */ 254 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 255 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 256 static const int32_t rack_free_cache = 2; 257 static int32_t rack_hptsi_segments = 40; 258 static int32_t rack_rate_sample_method = USE_RTT_LOW; 259 static int32_t rack_pace_every_seg = 0; 260 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 261 static int32_t rack_slot_reduction = 4; 262 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 263 static int32_t rack_cwnd_block_ends_measure = 0; 264 static int32_t rack_rwnd_block_ends_measure = 0; 265 static int32_t rack_def_profile = 0; 266 267 static int32_t rack_lower_cwnd_at_tlp = 0; 268 static int32_t rack_limited_retran = 0; 269 static int32_t rack_always_send_oldest = 0; 270 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 271 272 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 273 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 274 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 275 276 /* Probertt */ 277 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 278 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 279 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 280 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 281 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 282 283 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 284 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 285 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 286 static uint32_t rack_probertt_use_min_rtt_exit = 0; 287 static uint32_t rack_probe_rtt_sets_cwnd = 0; 288 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 289 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 290 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 291 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 292 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 293 static uint32_t rack_probertt_filter_life = 10000000; 294 static uint32_t rack_probertt_lower_within = 10; 295 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 296 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 297 static int32_t rack_probertt_clear_is = 1; 298 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 299 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 300 301 /* Part of pacing */ 302 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 303 304 /* Timely information */ 305 /* Combine these two gives the range of 'no change' to bw */ 306 /* ie the up/down provide the upper and lower bound */ 307 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 308 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 309 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 310 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 311 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 312 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 313 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 314 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 315 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 316 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 317 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 318 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 319 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 320 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 321 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 322 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 323 static int32_t rack_use_max_for_nobackoff = 0; 324 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 325 static int32_t rack_timely_no_stopping = 0; 326 static int32_t rack_down_raise_thresh = 100; 327 static int32_t rack_req_segs = 1; 328 static uint64_t rack_bw_rate_cap = 0; 329 330 /* Weird delayed ack mode */ 331 static int32_t rack_use_imac_dack = 0; 332 /* Rack specific counters */ 333 counter_u64_t rack_saw_enobuf; 334 counter_u64_t rack_saw_enobuf_hw; 335 counter_u64_t rack_saw_enetunreach; 336 counter_u64_t rack_persists_sends; 337 counter_u64_t rack_persists_acks; 338 counter_u64_t rack_persists_loss; 339 counter_u64_t rack_persists_lost_ends; 340 #ifdef INVARIANTS 341 counter_u64_t rack_adjust_map_bw; 342 #endif 343 /* Tail loss probe counters */ 344 counter_u64_t rack_tlp_tot; 345 counter_u64_t rack_tlp_newdata; 346 counter_u64_t rack_tlp_retran; 347 counter_u64_t rack_tlp_retran_bytes; 348 counter_u64_t rack_to_tot; 349 counter_u64_t rack_hot_alloc; 350 counter_u64_t rack_to_alloc; 351 counter_u64_t rack_to_alloc_hard; 352 counter_u64_t rack_to_alloc_emerg; 353 counter_u64_t rack_to_alloc_limited; 354 counter_u64_t rack_alloc_limited_conns; 355 counter_u64_t rack_split_limited; 356 357 counter_u64_t rack_multi_single_eq; 358 counter_u64_t rack_proc_non_comp_ack; 359 360 counter_u64_t rack_fto_send; 361 counter_u64_t rack_fto_rsm_send; 362 counter_u64_t rack_nfto_resend; 363 counter_u64_t rack_non_fto_send; 364 counter_u64_t rack_extended_rfo; 365 366 counter_u64_t rack_sack_proc_all; 367 counter_u64_t rack_sack_proc_short; 368 counter_u64_t rack_sack_proc_restart; 369 counter_u64_t rack_sack_attacks_detected; 370 counter_u64_t rack_sack_attacks_reversed; 371 counter_u64_t rack_sack_used_next_merge; 372 counter_u64_t rack_sack_splits; 373 counter_u64_t rack_sack_used_prev_merge; 374 counter_u64_t rack_sack_skipped_acked; 375 counter_u64_t rack_ack_total; 376 counter_u64_t rack_express_sack; 377 counter_u64_t rack_sack_total; 378 counter_u64_t rack_move_none; 379 counter_u64_t rack_move_some; 380 381 counter_u64_t rack_input_idle_reduces; 382 counter_u64_t rack_collapsed_win; 383 counter_u64_t rack_try_scwnd; 384 counter_u64_t rack_hw_pace_init_fail; 385 counter_u64_t rack_hw_pace_lost; 386 387 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 388 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 389 390 391 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 392 393 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 394 (tv) = (value) + slop; \ 395 if ((u_long)(tv) < (u_long)(tvmin)) \ 396 (tv) = (tvmin); \ 397 if ((u_long)(tv) > (u_long)(tvmax)) \ 398 (tv) = (tvmax); \ 399 } while (0) 400 401 static void 402 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 403 404 static int 405 rack_process_ack(struct mbuf *m, struct tcphdr *th, 406 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 407 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 408 static int 409 rack_process_data(struct mbuf *m, struct tcphdr *th, 410 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 411 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 412 static void 413 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 414 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 415 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 416 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 417 uint8_t limit_type); 418 static struct rack_sendmap * 419 rack_check_recovery_mode(struct tcpcb *tp, 420 uint32_t tsused); 421 static void 422 rack_cong_signal(struct tcpcb *tp, 423 uint32_t type, uint32_t ack, int ); 424 static void rack_counter_destroy(void); 425 static int 426 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 427 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 428 static void 429 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 430 static void 431 rack_do_segment(struct mbuf *m, struct tcphdr *th, 432 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 433 uint8_t iptos); 434 static void rack_dtor(void *mem, int32_t size, void *arg); 435 static void 436 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 437 uint32_t flex1, uint32_t flex2, 438 uint32_t flex3, uint32_t flex4, 439 uint32_t flex5, uint32_t flex6, 440 uint16_t flex7, uint8_t mod); 441 442 static void 443 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 444 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 445 struct rack_sendmap *rsm, uint8_t quality); 446 static struct rack_sendmap * 447 rack_find_high_nonack(struct tcp_rack *rack, 448 struct rack_sendmap *rsm); 449 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 450 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 451 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 452 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 453 static void 454 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 455 tcp_seq th_ack, int line, uint8_t quality); 456 static uint32_t 457 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 458 static int32_t rack_handoff_ok(struct tcpcb *tp); 459 static int32_t rack_init(struct tcpcb *tp); 460 static void rack_init_sysctls(void); 461 static void 462 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 463 struct tcphdr *th, int entered_rec, int dup_ack_struck); 464 static void 465 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 466 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 467 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 468 469 static void 470 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 471 struct rack_sendmap *rsm); 472 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 473 static int32_t rack_output(struct tcpcb *tp); 474 475 static uint32_t 476 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 477 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 478 uint32_t cts, int *moved_two); 479 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 480 static void rack_remxt_tmr(struct tcpcb *tp); 481 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 482 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 483 static int32_t rack_stopall(struct tcpcb *tp); 484 static void 485 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 486 uint32_t delta); 487 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 488 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 489 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 490 static uint32_t 491 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 492 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 493 static void 494 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 495 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 496 static int 497 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 498 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 499 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 500 static int 501 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 502 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 503 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 504 static int 505 rack_do_closing(struct mbuf *m, struct tcphdr *th, 506 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 507 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 508 static int 509 rack_do_established(struct mbuf *m, struct tcphdr *th, 510 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 511 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 512 static int 513 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 514 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 515 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 516 static int 517 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 518 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 519 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 520 static int 521 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 522 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 523 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 524 static int 525 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 526 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 527 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 528 static int 529 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 532 static int 533 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 536 struct rack_sendmap * 537 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 538 uint32_t tsused); 539 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 540 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 541 static void 542 tcp_rack_partialack(struct tcpcb *tp); 543 static int 544 rack_set_profile(struct tcp_rack *rack, int prof); 545 static void 546 rack_apply_deferred_options(struct tcp_rack *rack); 547 548 int32_t rack_clear_counter=0; 549 550 static void 551 rack_set_cc_pacing(struct tcp_rack *rack) 552 { 553 struct sockopt sopt; 554 struct cc_newreno_opts opt; 555 struct newreno old, *ptr; 556 struct tcpcb *tp; 557 int error; 558 559 if (rack->rc_pacing_cc_set) 560 return; 561 562 tp = rack->rc_tp; 563 if (tp->cc_algo == NULL) { 564 /* Tcb is leaving */ 565 return; 566 } 567 rack->rc_pacing_cc_set = 1; 568 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 569 /* Not new-reno we can't play games with beta! */ 570 goto out; 571 } 572 ptr = ((struct newreno *)tp->ccv->cc_data); 573 if (CC_ALGO(tp)->ctl_output == NULL) { 574 /* Huh, why does new_reno no longer have a set function? */ 575 goto out; 576 } 577 if (ptr == NULL) { 578 /* Just the default values */ 579 old.beta = V_newreno_beta_ecn; 580 old.beta_ecn = V_newreno_beta_ecn; 581 old.newreno_flags = 0; 582 } else { 583 old.beta = ptr->beta; 584 old.beta_ecn = ptr->beta_ecn; 585 old.newreno_flags = ptr->newreno_flags; 586 } 587 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 588 sopt.sopt_dir = SOPT_SET; 589 opt.name = CC_NEWRENO_BETA; 590 opt.val = rack->r_ctl.rc_saved_beta.beta; 591 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 592 if (error) { 593 goto out; 594 } 595 /* 596 * Hack alert we need to set in our newreno_flags 597 * so that Abe behavior is also applied. 598 */ 599 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 600 opt.name = CC_NEWRENO_BETA_ECN; 601 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 602 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 603 if (error) { 604 goto out; 605 } 606 /* Save off the original values for restoral */ 607 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 608 out: 609 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 610 union tcp_log_stackspecific log; 611 struct timeval tv; 612 613 ptr = ((struct newreno *)tp->ccv->cc_data); 614 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 615 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 616 if (ptr) { 617 log.u_bbr.flex1 = ptr->beta; 618 log.u_bbr.flex2 = ptr->beta_ecn; 619 log.u_bbr.flex3 = ptr->newreno_flags; 620 } 621 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 622 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 623 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 624 log.u_bbr.flex7 = rack->gp_ready; 625 log.u_bbr.flex7 <<= 1; 626 log.u_bbr.flex7 |= rack->use_fixed_rate; 627 log.u_bbr.flex7 <<= 1; 628 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 629 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 630 log.u_bbr.flex8 = 3; 631 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 632 0, &log, false, NULL, NULL, 0, &tv); 633 } 634 } 635 636 static void 637 rack_undo_cc_pacing(struct tcp_rack *rack) 638 { 639 struct newreno old, *ptr; 640 struct tcpcb *tp; 641 642 if (rack->rc_pacing_cc_set == 0) 643 return; 644 tp = rack->rc_tp; 645 rack->rc_pacing_cc_set = 0; 646 if (tp->cc_algo == NULL) 647 /* Tcb is leaving */ 648 return; 649 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 650 /* Not new-reno nothing to do! */ 651 return; 652 } 653 ptr = ((struct newreno *)tp->ccv->cc_data); 654 if (ptr == NULL) { 655 /* 656 * This happens at rack_fini() if the 657 * cc module gets freed on us. In that 658 * case we loose our "new" settings but 659 * thats ok, since the tcb is going away anyway. 660 */ 661 return; 662 } 663 /* Grab out our set values */ 664 memcpy(&old, ptr, sizeof(struct newreno)); 665 /* Copy back in the original values */ 666 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 667 /* Now save back the values we had set in (for when pacing is restored) */ 668 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 669 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 670 union tcp_log_stackspecific log; 671 struct timeval tv; 672 673 ptr = ((struct newreno *)tp->ccv->cc_data); 674 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 675 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 676 log.u_bbr.flex1 = ptr->beta; 677 log.u_bbr.flex2 = ptr->beta_ecn; 678 log.u_bbr.flex3 = ptr->newreno_flags; 679 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 680 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 681 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 682 log.u_bbr.flex7 = rack->gp_ready; 683 log.u_bbr.flex7 <<= 1; 684 log.u_bbr.flex7 |= rack->use_fixed_rate; 685 log.u_bbr.flex7 <<= 1; 686 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 687 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 688 log.u_bbr.flex8 = 4; 689 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 690 0, &log, false, NULL, NULL, 0, &tv); 691 } 692 } 693 694 #ifdef NETFLIX_PEAKRATE 695 static inline void 696 rack_update_peakrate_thr(struct tcpcb *tp) 697 { 698 /* Keep in mind that t_maxpeakrate is in B/s. */ 699 uint64_t peak; 700 peak = uqmax((tp->t_maxseg * 2), 701 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 702 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 703 } 704 #endif 705 706 static int 707 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 708 { 709 uint32_t stat; 710 int32_t error; 711 712 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 713 if (error || req->newptr == NULL) 714 return error; 715 716 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 717 if (error) 718 return (error); 719 if (stat == 1) { 720 #ifdef INVARIANTS 721 printf("Clearing RACK counters\n"); 722 #endif 723 counter_u64_zero(rack_tlp_tot); 724 counter_u64_zero(rack_tlp_newdata); 725 counter_u64_zero(rack_tlp_retran); 726 counter_u64_zero(rack_tlp_retran_bytes); 727 counter_u64_zero(rack_to_tot); 728 counter_u64_zero(rack_saw_enobuf); 729 counter_u64_zero(rack_saw_enobuf_hw); 730 counter_u64_zero(rack_saw_enetunreach); 731 counter_u64_zero(rack_persists_sends); 732 counter_u64_zero(rack_persists_acks); 733 counter_u64_zero(rack_persists_loss); 734 counter_u64_zero(rack_persists_lost_ends); 735 #ifdef INVARIANTS 736 counter_u64_zero(rack_adjust_map_bw); 737 #endif 738 counter_u64_zero(rack_to_alloc_hard); 739 counter_u64_zero(rack_to_alloc_emerg); 740 counter_u64_zero(rack_sack_proc_all); 741 counter_u64_zero(rack_fto_send); 742 counter_u64_zero(rack_fto_rsm_send); 743 counter_u64_zero(rack_extended_rfo); 744 counter_u64_zero(rack_hw_pace_init_fail); 745 counter_u64_zero(rack_hw_pace_lost); 746 counter_u64_zero(rack_non_fto_send); 747 counter_u64_zero(rack_nfto_resend); 748 counter_u64_zero(rack_sack_proc_short); 749 counter_u64_zero(rack_sack_proc_restart); 750 counter_u64_zero(rack_to_alloc); 751 counter_u64_zero(rack_to_alloc_limited); 752 counter_u64_zero(rack_alloc_limited_conns); 753 counter_u64_zero(rack_split_limited); 754 counter_u64_zero(rack_multi_single_eq); 755 counter_u64_zero(rack_proc_non_comp_ack); 756 counter_u64_zero(rack_sack_attacks_detected); 757 counter_u64_zero(rack_sack_attacks_reversed); 758 counter_u64_zero(rack_sack_used_next_merge); 759 counter_u64_zero(rack_sack_used_prev_merge); 760 counter_u64_zero(rack_sack_splits); 761 counter_u64_zero(rack_sack_skipped_acked); 762 counter_u64_zero(rack_ack_total); 763 counter_u64_zero(rack_express_sack); 764 counter_u64_zero(rack_sack_total); 765 counter_u64_zero(rack_move_none); 766 counter_u64_zero(rack_move_some); 767 counter_u64_zero(rack_try_scwnd); 768 counter_u64_zero(rack_collapsed_win); 769 } 770 rack_clear_counter = 0; 771 return (0); 772 } 773 774 static void 775 rack_init_sysctls(void) 776 { 777 struct sysctl_oid *rack_counters; 778 struct sysctl_oid *rack_attack; 779 struct sysctl_oid *rack_pacing; 780 struct sysctl_oid *rack_timely; 781 struct sysctl_oid *rack_timers; 782 struct sysctl_oid *rack_tlp; 783 struct sysctl_oid *rack_misc; 784 struct sysctl_oid *rack_features; 785 struct sysctl_oid *rack_measure; 786 struct sysctl_oid *rack_probertt; 787 struct sysctl_oid *rack_hw_pacing; 788 789 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 790 SYSCTL_CHILDREN(rack_sysctl_root), 791 OID_AUTO, 792 "sack_attack", 793 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 794 "Rack Sack Attack Counters and Controls"); 795 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 796 SYSCTL_CHILDREN(rack_sysctl_root), 797 OID_AUTO, 798 "stats", 799 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 800 "Rack Counters"); 801 SYSCTL_ADD_S32(&rack_sysctl_ctx, 802 SYSCTL_CHILDREN(rack_sysctl_root), 803 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 804 &rack_rate_sample_method , USE_RTT_LOW, 805 "What method should we use for rate sampling 0=high, 1=low "); 806 /* Probe rtt related controls */ 807 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 808 SYSCTL_CHILDREN(rack_sysctl_root), 809 OID_AUTO, 810 "probertt", 811 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 812 "ProbeRTT related Controls"); 813 SYSCTL_ADD_U16(&rack_sysctl_ctx, 814 SYSCTL_CHILDREN(rack_probertt), 815 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 816 &rack_atexit_prtt_hbp, 130, 817 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 818 SYSCTL_ADD_U16(&rack_sysctl_ctx, 819 SYSCTL_CHILDREN(rack_probertt), 820 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 821 &rack_atexit_prtt, 130, 822 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 823 SYSCTL_ADD_U16(&rack_sysctl_ctx, 824 SYSCTL_CHILDREN(rack_probertt), 825 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 826 &rack_per_of_gp_probertt, 60, 827 "What percentage of goodput do we pace at in probertt"); 828 SYSCTL_ADD_U16(&rack_sysctl_ctx, 829 SYSCTL_CHILDREN(rack_probertt), 830 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 831 &rack_per_of_gp_probertt_reduce, 10, 832 "What percentage of goodput do we reduce every gp_srtt"); 833 SYSCTL_ADD_U16(&rack_sysctl_ctx, 834 SYSCTL_CHILDREN(rack_probertt), 835 OID_AUTO, "gp_per_low", CTLFLAG_RW, 836 &rack_per_of_gp_lowthresh, 40, 837 "What percentage of goodput do we allow the multiplier to fall to"); 838 SYSCTL_ADD_U32(&rack_sysctl_ctx, 839 SYSCTL_CHILDREN(rack_probertt), 840 OID_AUTO, "time_between", CTLFLAG_RW, 841 & rack_time_between_probertt, 96000000, 842 "How many useconds between the lowest rtt falling must past before we enter probertt"); 843 SYSCTL_ADD_U32(&rack_sysctl_ctx, 844 SYSCTL_CHILDREN(rack_probertt), 845 OID_AUTO, "safety", CTLFLAG_RW, 846 &rack_probe_rtt_safety_val, 2000000, 847 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 848 SYSCTL_ADD_U32(&rack_sysctl_ctx, 849 SYSCTL_CHILDREN(rack_probertt), 850 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 851 &rack_probe_rtt_sets_cwnd, 0, 852 "Do we set the cwnd too (if always_lower is on)"); 853 SYSCTL_ADD_U32(&rack_sysctl_ctx, 854 SYSCTL_CHILDREN(rack_probertt), 855 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 856 &rack_max_drain_wait, 2, 857 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 858 SYSCTL_ADD_U32(&rack_sysctl_ctx, 859 SYSCTL_CHILDREN(rack_probertt), 860 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 861 &rack_must_drain, 1, 862 "We must drain this many gp_srtt's waiting for flight to reach goal"); 863 SYSCTL_ADD_U32(&rack_sysctl_ctx, 864 SYSCTL_CHILDREN(rack_probertt), 865 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 866 &rack_probertt_use_min_rtt_entry, 1, 867 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 868 SYSCTL_ADD_U32(&rack_sysctl_ctx, 869 SYSCTL_CHILDREN(rack_probertt), 870 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 871 &rack_probertt_use_min_rtt_exit, 0, 872 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 873 SYSCTL_ADD_U32(&rack_sysctl_ctx, 874 SYSCTL_CHILDREN(rack_probertt), 875 OID_AUTO, "length_div", CTLFLAG_RW, 876 &rack_probertt_gpsrtt_cnt_div, 0, 877 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 878 SYSCTL_ADD_U32(&rack_sysctl_ctx, 879 SYSCTL_CHILDREN(rack_probertt), 880 OID_AUTO, "length_mul", CTLFLAG_RW, 881 &rack_probertt_gpsrtt_cnt_mul, 0, 882 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 883 SYSCTL_ADD_U32(&rack_sysctl_ctx, 884 SYSCTL_CHILDREN(rack_probertt), 885 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 886 &rack_min_probertt_hold, 200000, 887 "What is the minimum time we hold probertt at target"); 888 SYSCTL_ADD_U32(&rack_sysctl_ctx, 889 SYSCTL_CHILDREN(rack_probertt), 890 OID_AUTO, "filter_life", CTLFLAG_RW, 891 &rack_probertt_filter_life, 10000000, 892 "What is the time for the filters life in useconds"); 893 SYSCTL_ADD_U32(&rack_sysctl_ctx, 894 SYSCTL_CHILDREN(rack_probertt), 895 OID_AUTO, "lower_within", CTLFLAG_RW, 896 &rack_probertt_lower_within, 10, 897 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 898 SYSCTL_ADD_U32(&rack_sysctl_ctx, 899 SYSCTL_CHILDREN(rack_probertt), 900 OID_AUTO, "must_move", CTLFLAG_RW, 901 &rack_min_rtt_movement, 250, 902 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 903 SYSCTL_ADD_U32(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_probertt), 905 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 906 &rack_probertt_clear_is, 1, 907 "Do we clear I/S counts on exiting probe-rtt"); 908 SYSCTL_ADD_S32(&rack_sysctl_ctx, 909 SYSCTL_CHILDREN(rack_probertt), 910 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 911 &rack_max_drain_hbp, 1, 912 "How many extra drain gpsrtt's do we get in highly buffered paths"); 913 SYSCTL_ADD_S32(&rack_sysctl_ctx, 914 SYSCTL_CHILDREN(rack_probertt), 915 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 916 &rack_hbp_thresh, 3, 917 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 918 /* Pacing related sysctls */ 919 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 920 SYSCTL_CHILDREN(rack_sysctl_root), 921 OID_AUTO, 922 "pacing", 923 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 924 "Pacing related Controls"); 925 SYSCTL_ADD_S32(&rack_sysctl_ctx, 926 SYSCTL_CHILDREN(rack_pacing), 927 OID_AUTO, "max_pace_over", CTLFLAG_RW, 928 &rack_max_per_above, 30, 929 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 930 SYSCTL_ADD_S32(&rack_sysctl_ctx, 931 SYSCTL_CHILDREN(rack_pacing), 932 OID_AUTO, "pace_to_one", CTLFLAG_RW, 933 &rack_pace_one_seg, 0, 934 "Do we allow low b/w pacing of 1MSS instead of two"); 935 SYSCTL_ADD_S32(&rack_sysctl_ctx, 936 SYSCTL_CHILDREN(rack_pacing), 937 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 938 &rack_limit_time_with_srtt, 0, 939 "Do we limit pacing time based on srtt"); 940 SYSCTL_ADD_S32(&rack_sysctl_ctx, 941 SYSCTL_CHILDREN(rack_pacing), 942 OID_AUTO, "init_win", CTLFLAG_RW, 943 &rack_default_init_window, 0, 944 "Do we have a rack initial window 0 = system default"); 945 SYSCTL_ADD_U16(&rack_sysctl_ctx, 946 SYSCTL_CHILDREN(rack_pacing), 947 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 948 &rack_per_of_gp_ss, 250, 949 "If non zero, what percentage of goodput to pace at in slow start"); 950 SYSCTL_ADD_U16(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_pacing), 952 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 953 &rack_per_of_gp_ca, 150, 954 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 955 SYSCTL_ADD_U16(&rack_sysctl_ctx, 956 SYSCTL_CHILDREN(rack_pacing), 957 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 958 &rack_per_of_gp_rec, 200, 959 "If non zero, what percentage of goodput to pace at in recovery"); 960 SYSCTL_ADD_S32(&rack_sysctl_ctx, 961 SYSCTL_CHILDREN(rack_pacing), 962 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 963 &rack_hptsi_segments, 40, 964 "What size is the max for TSO segments in pacing and burst mitigation"); 965 SYSCTL_ADD_S32(&rack_sysctl_ctx, 966 SYSCTL_CHILDREN(rack_pacing), 967 OID_AUTO, "burst_reduces", CTLFLAG_RW, 968 &rack_slot_reduction, 4, 969 "When doing only burst mitigation what is the reduce divisor"); 970 SYSCTL_ADD_S32(&rack_sysctl_ctx, 971 SYSCTL_CHILDREN(rack_sysctl_root), 972 OID_AUTO, "use_pacing", CTLFLAG_RW, 973 &rack_pace_every_seg, 0, 974 "If set we use pacing, if clear we use only the original burst mitigation"); 975 SYSCTL_ADD_U64(&rack_sysctl_ctx, 976 SYSCTL_CHILDREN(rack_pacing), 977 OID_AUTO, "rate_cap", CTLFLAG_RW, 978 &rack_bw_rate_cap, 0, 979 "If set we apply this value to the absolute rate cap used by pacing"); 980 SYSCTL_ADD_U8(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_sysctl_root), 982 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 983 &rack_req_measurements, 1, 984 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 985 /* Hardware pacing */ 986 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 987 SYSCTL_CHILDREN(rack_sysctl_root), 988 OID_AUTO, 989 "hdwr_pacing", 990 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 991 "Pacing related Controls"); 992 SYSCTL_ADD_S32(&rack_sysctl_ctx, 993 SYSCTL_CHILDREN(rack_hw_pacing), 994 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 995 &rack_hw_rwnd_factor, 2, 996 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 997 SYSCTL_ADD_S32(&rack_sysctl_ctx, 998 SYSCTL_CHILDREN(rack_hw_pacing), 999 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1000 &rack_enobuf_hw_boost_mult, 2, 1001 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1002 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_hw_pacing), 1004 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1005 &rack_enobuf_hw_max, 2, 1006 "What is the max boost the pacing time if we see a ENOBUFS?"); 1007 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_hw_pacing), 1009 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1010 &rack_enobuf_hw_min, 2, 1011 "What is the min boost the pacing time if we see a ENOBUFS?"); 1012 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_hw_pacing), 1014 OID_AUTO, "enable", CTLFLAG_RW, 1015 &rack_enable_hw_pacing, 0, 1016 "Should RACK attempt to use hw pacing?"); 1017 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_hw_pacing), 1019 OID_AUTO, "rate_cap", CTLFLAG_RW, 1020 &rack_hw_rate_caps, 1, 1021 "Does the highest hardware pacing rate cap the rate we will send at??"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_hw_pacing), 1024 OID_AUTO, "rate_min", CTLFLAG_RW, 1025 &rack_hw_rate_min, 0, 1026 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1027 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_hw_pacing), 1029 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1030 &rack_hw_rate_to_low, 0, 1031 "If we fall below this rate, dis-engage hw pacing?"); 1032 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1033 SYSCTL_CHILDREN(rack_hw_pacing), 1034 OID_AUTO, "up_only", CTLFLAG_RW, 1035 &rack_hw_up_only, 1, 1036 "Do we allow hw pacing to lower the rate selected?"); 1037 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1038 SYSCTL_CHILDREN(rack_hw_pacing), 1039 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1040 &rack_hw_pace_extra_slots, 2, 1041 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1042 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1043 SYSCTL_CHILDREN(rack_sysctl_root), 1044 OID_AUTO, 1045 "timely", 1046 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1047 "Rack Timely RTT Controls"); 1048 /* Timely based GP dynmics */ 1049 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1050 SYSCTL_CHILDREN(rack_timely), 1051 OID_AUTO, "upper", CTLFLAG_RW, 1052 &rack_gp_per_bw_mul_up, 2, 1053 "Rack timely upper range for equal b/w (in percentage)"); 1054 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1055 SYSCTL_CHILDREN(rack_timely), 1056 OID_AUTO, "lower", CTLFLAG_RW, 1057 &rack_gp_per_bw_mul_down, 4, 1058 "Rack timely lower range for equal b/w (in percentage)"); 1059 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1060 SYSCTL_CHILDREN(rack_timely), 1061 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1062 &rack_gp_rtt_maxmul, 3, 1063 "Rack timely multipler of lowest rtt for rtt_max"); 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_timely), 1066 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1067 &rack_gp_rtt_mindiv, 4, 1068 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_timely), 1071 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1072 &rack_gp_rtt_minmul, 1, 1073 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_timely), 1076 OID_AUTO, "decrease", CTLFLAG_RW, 1077 &rack_gp_decrease_per, 20, 1078 "Rack timely decrease percentage of our GP multiplication factor"); 1079 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_timely), 1081 OID_AUTO, "increase", CTLFLAG_RW, 1082 &rack_gp_increase_per, 2, 1083 "Rack timely increase perentage of our GP multiplication factor"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_timely), 1086 OID_AUTO, "lowerbound", CTLFLAG_RW, 1087 &rack_per_lower_bound, 50, 1088 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1089 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_timely), 1091 OID_AUTO, "upperboundss", CTLFLAG_RW, 1092 &rack_per_upper_bound_ss, 0, 1093 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1094 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_timely), 1096 OID_AUTO, "upperboundca", CTLFLAG_RW, 1097 &rack_per_upper_bound_ca, 0, 1098 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_timely), 1101 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1102 &rack_do_dyn_mul, 0, 1103 "Rack timely do we enable dynmaic timely goodput by default"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_timely), 1106 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1107 &rack_gp_no_rec_chg, 1, 1108 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1109 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_timely), 1111 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1112 &rack_timely_dec_clear, 6, 1113 "Rack timely what threshold do we count to before another boost during b/w decent"); 1114 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_timely), 1116 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1117 &rack_timely_max_push_rise, 3, 1118 "Rack timely how many times do we push up with b/w increase"); 1119 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_timely), 1121 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1122 &rack_timely_max_push_drop, 3, 1123 "Rack timely how many times do we push back on b/w decent"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_timely), 1126 OID_AUTO, "min_segs", CTLFLAG_RW, 1127 &rack_timely_min_segs, 4, 1128 "Rack timely when setting the cwnd what is the min num segments"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_timely), 1131 OID_AUTO, "noback_max", CTLFLAG_RW, 1132 &rack_use_max_for_nobackoff, 0, 1133 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_timely), 1136 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1137 &rack_timely_int_timely_only, 0, 1138 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1139 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_timely), 1141 OID_AUTO, "nonstop", CTLFLAG_RW, 1142 &rack_timely_no_stopping, 0, 1143 "Rack timely don't stop increase"); 1144 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_timely), 1146 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1147 &rack_down_raise_thresh, 100, 1148 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_timely), 1151 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1152 &rack_req_segs, 1, 1153 "Bottom dragging if not these many segments outstanding and room"); 1154 1155 /* TLP and Rack related parameters */ 1156 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1157 SYSCTL_CHILDREN(rack_sysctl_root), 1158 OID_AUTO, 1159 "tlp", 1160 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1161 "TLP and Rack related Controls"); 1162 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1163 SYSCTL_CHILDREN(rack_tlp), 1164 OID_AUTO, "use_rrr", CTLFLAG_RW, 1165 &use_rack_rr, 1, 1166 "Do we use Rack Rapid Recovery"); 1167 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1168 SYSCTL_CHILDREN(rack_tlp), 1169 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1170 &rack_max_abc_post_recovery, 2, 1171 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1172 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_tlp), 1174 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1175 &rack_non_rxt_use_cr, 0, 1176 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_tlp), 1179 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1180 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1181 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_tlp), 1184 OID_AUTO, "limit", CTLFLAG_RW, 1185 &rack_tlp_limit, 2, 1186 "How many TLP's can be sent without sending new data"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_tlp), 1189 OID_AUTO, "use_greater", CTLFLAG_RW, 1190 &rack_tlp_use_greater, 1, 1191 "Should we use the rack_rtt time if its greater than srtt"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_tlp), 1194 OID_AUTO, "tlpminto", CTLFLAG_RW, 1195 &rack_tlp_min, 10000, 1196 "TLP minimum timeout per the specification (in microseconds)"); 1197 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_tlp), 1199 OID_AUTO, "send_oldest", CTLFLAG_RW, 1200 &rack_always_send_oldest, 0, 1201 "Should we always send the oldest TLP and RACK-TLP"); 1202 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1203 SYSCTL_CHILDREN(rack_tlp), 1204 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1205 &rack_limited_retran, 0, 1206 "How many times can a rack timeout drive out sends"); 1207 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1208 SYSCTL_CHILDREN(rack_tlp), 1209 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1210 &rack_lower_cwnd_at_tlp, 0, 1211 "When a TLP completes a retran should we enter recovery"); 1212 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1213 SYSCTL_CHILDREN(rack_tlp), 1214 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1215 &rack_reorder_thresh, 2, 1216 "What factor for rack will be added when seeing reordering (shift right)"); 1217 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1218 SYSCTL_CHILDREN(rack_tlp), 1219 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1220 &rack_tlp_thresh, 1, 1221 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1222 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1223 SYSCTL_CHILDREN(rack_tlp), 1224 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1225 &rack_reorder_fade, 60000000, 1226 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1227 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1228 SYSCTL_CHILDREN(rack_tlp), 1229 OID_AUTO, "pktdelay", CTLFLAG_RW, 1230 &rack_pkt_delay, 1000, 1231 "Extra RACK time (in microseconds) besides reordering thresh"); 1232 1233 /* Timer related controls */ 1234 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1235 SYSCTL_CHILDREN(rack_sysctl_root), 1236 OID_AUTO, 1237 "timers", 1238 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1239 "Timer related controls"); 1240 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1241 SYSCTL_CHILDREN(rack_timers), 1242 OID_AUTO, "persmin", CTLFLAG_RW, 1243 &rack_persist_min, 250000, 1244 "What is the minimum time in microseconds between persists"); 1245 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1246 SYSCTL_CHILDREN(rack_timers), 1247 OID_AUTO, "persmax", CTLFLAG_RW, 1248 &rack_persist_max, 2000000, 1249 "What is the largest delay in microseconds between persists"); 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_timers), 1252 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1253 &rack_delayed_ack_time, 40000, 1254 "Delayed ack time (40ms in microseconds)"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_timers), 1257 OID_AUTO, "minrto", CTLFLAG_RW, 1258 &rack_rto_min, 30000, 1259 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1260 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_timers), 1262 OID_AUTO, "maxrto", CTLFLAG_RW, 1263 &rack_rto_max, 4000000, 1264 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_timers), 1267 OID_AUTO, "minto", CTLFLAG_RW, 1268 &rack_min_to, 1000, 1269 "Minimum rack timeout in microseconds"); 1270 /* Measure controls */ 1271 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1272 SYSCTL_CHILDREN(rack_sysctl_root), 1273 OID_AUTO, 1274 "measure", 1275 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1276 "Measure related controls"); 1277 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1278 SYSCTL_CHILDREN(rack_measure), 1279 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1280 &rack_wma_divisor, 8, 1281 "When doing b/w calculation what is the divisor for the WMA"); 1282 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1283 SYSCTL_CHILDREN(rack_measure), 1284 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1285 &rack_cwnd_block_ends_measure, 0, 1286 "Does a cwnd just-return end the measurement window (app limited)"); 1287 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_measure), 1289 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1290 &rack_rwnd_block_ends_measure, 0, 1291 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1292 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_measure), 1294 OID_AUTO, "min_target", CTLFLAG_RW, 1295 &rack_def_data_window, 20, 1296 "What is the minimum target window (in mss) for a GP measurements"); 1297 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1298 SYSCTL_CHILDREN(rack_measure), 1299 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1300 &rack_goal_bdp, 2, 1301 "What is the goal BDP to measure"); 1302 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1303 SYSCTL_CHILDREN(rack_measure), 1304 OID_AUTO, "min_srtts", CTLFLAG_RW, 1305 &rack_min_srtts, 1, 1306 "What is the goal BDP to measure"); 1307 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1308 SYSCTL_CHILDREN(rack_measure), 1309 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1310 &rack_min_measure_usec, 0, 1311 "What is the Minimum time time for a measurement if 0, this is off"); 1312 /* Features */ 1313 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_sysctl_root), 1315 OID_AUTO, 1316 "features", 1317 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1318 "Feature controls"); 1319 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1320 SYSCTL_CHILDREN(rack_features), 1321 OID_AUTO, "cmpack", CTLFLAG_RW, 1322 &rack_use_cmp_acks, 1, 1323 "Should RACK have LRO send compressed acks"); 1324 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_features), 1326 OID_AUTO, "fsb", CTLFLAG_RW, 1327 &rack_use_fsb, 1, 1328 "Should RACK use the fast send block?"); 1329 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_features), 1331 OID_AUTO, "rfo", CTLFLAG_RW, 1332 &rack_use_rfo, 1, 1333 "Should RACK use rack_fast_output()?"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_features), 1336 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1337 &rack_use_rsm_rfo, 1, 1338 "Should RACK use rack_fast_rsm_output()?"); 1339 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1340 SYSCTL_CHILDREN(rack_features), 1341 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1342 &rack_enable_mqueue_for_nonpaced, 0, 1343 "Should RACK use mbuf queuing for non-paced connections"); 1344 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1345 SYSCTL_CHILDREN(rack_features), 1346 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1347 &rack_do_hystart, 0, 1348 "Should RACK enable HyStart++ on connections?"); 1349 /* Misc rack controls */ 1350 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_sysctl_root), 1352 OID_AUTO, 1353 "misc", 1354 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1355 "Misc related controls"); 1356 #ifdef TCP_ACCOUNTING 1357 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_misc), 1359 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1360 &rack_tcp_accounting, 0, 1361 "Should we turn on TCP accounting for all rack sessions?"); 1362 #endif 1363 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1364 SYSCTL_CHILDREN(rack_misc), 1365 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1366 &rack_apply_rtt_with_reduced_conf, 0, 1367 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1368 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1369 SYSCTL_CHILDREN(rack_misc), 1370 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1371 &rack_dsack_std_based, 3, 1372 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1373 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1374 SYSCTL_CHILDREN(rack_misc), 1375 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1376 &rack_prr_addbackmax, 2, 1377 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1378 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1379 SYSCTL_CHILDREN(rack_misc), 1380 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1381 &rack_stats_gets_ms_rtt, 1, 1382 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1383 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1384 SYSCTL_CHILDREN(rack_misc), 1385 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1386 &rack_client_low_buf, 0, 1387 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1388 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1389 SYSCTL_CHILDREN(rack_misc), 1390 OID_AUTO, "defprofile", CTLFLAG_RW, 1391 &rack_def_profile, 0, 1392 "Should RACK use a default profile (0=no, num == profile num)?"); 1393 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1394 SYSCTL_CHILDREN(rack_misc), 1395 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1396 &rack_enable_shared_cwnd, 1, 1397 "Should RACK try to use the shared cwnd on connections where allowed"); 1398 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1399 SYSCTL_CHILDREN(rack_misc), 1400 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1401 &rack_limits_scwnd, 1, 1402 "Should RACK place low end time limits on the shared cwnd feature"); 1403 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1404 SYSCTL_CHILDREN(rack_misc), 1405 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1406 &rack_use_imac_dack, 0, 1407 "Should RACK try to emulate iMac delayed ack"); 1408 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1409 SYSCTL_CHILDREN(rack_misc), 1410 OID_AUTO, "no_prr", CTLFLAG_RW, 1411 &rack_disable_prr, 0, 1412 "Should RACK not use prr and only pace (must have pacing on)"); 1413 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1414 SYSCTL_CHILDREN(rack_misc), 1415 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1416 &rack_verbose_logging, 0, 1417 "Should RACK black box logging be verbose"); 1418 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1419 SYSCTL_CHILDREN(rack_misc), 1420 OID_AUTO, "data_after_close", CTLFLAG_RW, 1421 &rack_ignore_data_after_close, 1, 1422 "Do we hold off sending a RST until all pending data is ack'd"); 1423 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1424 SYSCTL_CHILDREN(rack_misc), 1425 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1426 &rack_sack_not_required, 1, 1427 "Do we allow rack to run on connections not supporting SACK"); 1428 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1429 SYSCTL_CHILDREN(rack_misc), 1430 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1431 &rack_send_a_lot_in_prr, 1, 1432 "Send a lot in prr"); 1433 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1434 SYSCTL_CHILDREN(rack_misc), 1435 OID_AUTO, "autoscale", CTLFLAG_RW, 1436 &rack_autosndbuf_inc, 20, 1437 "What percentage should rack scale up its snd buffer by?"); 1438 /* Sack Attacker detection stuff */ 1439 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1440 SYSCTL_CHILDREN(rack_attack), 1441 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1442 &rack_highest_sack_thresh_seen, 0, 1443 "Highest sack to ack ratio seen"); 1444 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1445 SYSCTL_CHILDREN(rack_attack), 1446 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1447 &rack_highest_move_thresh_seen, 0, 1448 "Highest move to non-move ratio seen"); 1449 rack_ack_total = counter_u64_alloc(M_WAITOK); 1450 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1451 SYSCTL_CHILDREN(rack_attack), 1452 OID_AUTO, "acktotal", CTLFLAG_RD, 1453 &rack_ack_total, 1454 "Total number of Ack's"); 1455 rack_express_sack = counter_u64_alloc(M_WAITOK); 1456 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1457 SYSCTL_CHILDREN(rack_attack), 1458 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1459 &rack_express_sack, 1460 "Total expresss number of Sack's"); 1461 rack_sack_total = counter_u64_alloc(M_WAITOK); 1462 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1463 SYSCTL_CHILDREN(rack_attack), 1464 OID_AUTO, "sacktotal", CTLFLAG_RD, 1465 &rack_sack_total, 1466 "Total number of SACKs"); 1467 rack_move_none = counter_u64_alloc(M_WAITOK); 1468 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1469 SYSCTL_CHILDREN(rack_attack), 1470 OID_AUTO, "move_none", CTLFLAG_RD, 1471 &rack_move_none, 1472 "Total number of SACK index reuse of postions under threshold"); 1473 rack_move_some = counter_u64_alloc(M_WAITOK); 1474 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1475 SYSCTL_CHILDREN(rack_attack), 1476 OID_AUTO, "move_some", CTLFLAG_RD, 1477 &rack_move_some, 1478 "Total number of SACK index reuse of postions over threshold"); 1479 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1480 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1481 SYSCTL_CHILDREN(rack_attack), 1482 OID_AUTO, "attacks", CTLFLAG_RD, 1483 &rack_sack_attacks_detected, 1484 "Total number of SACK attackers that had sack disabled"); 1485 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1486 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1487 SYSCTL_CHILDREN(rack_attack), 1488 OID_AUTO, "reversed", CTLFLAG_RD, 1489 &rack_sack_attacks_reversed, 1490 "Total number of SACK attackers that were later determined false positive"); 1491 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1492 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_attack), 1494 OID_AUTO, "nextmerge", CTLFLAG_RD, 1495 &rack_sack_used_next_merge, 1496 "Total number of times we used the next merge"); 1497 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1498 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1499 SYSCTL_CHILDREN(rack_attack), 1500 OID_AUTO, "prevmerge", CTLFLAG_RD, 1501 &rack_sack_used_prev_merge, 1502 "Total number of times we used the prev merge"); 1503 /* Counters */ 1504 rack_fto_send = counter_u64_alloc(M_WAITOK); 1505 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1506 SYSCTL_CHILDREN(rack_counters), 1507 OID_AUTO, "fto_send", CTLFLAG_RD, 1508 &rack_fto_send, "Total number of rack_fast_output sends"); 1509 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1510 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1511 SYSCTL_CHILDREN(rack_counters), 1512 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1513 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1514 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1515 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1516 SYSCTL_CHILDREN(rack_counters), 1517 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1518 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1519 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1520 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1521 SYSCTL_CHILDREN(rack_counters), 1522 OID_AUTO, "nfto_send", CTLFLAG_RD, 1523 &rack_non_fto_send, "Total number of rack_output first sends"); 1524 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1525 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1526 SYSCTL_CHILDREN(rack_counters), 1527 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1528 &rack_extended_rfo, "Total number of times we extended rfo"); 1529 1530 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1531 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1532 SYSCTL_CHILDREN(rack_counters), 1533 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1534 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1535 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1536 1537 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1538 SYSCTL_CHILDREN(rack_counters), 1539 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1540 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1541 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1542 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1543 SYSCTL_CHILDREN(rack_counters), 1544 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1545 &rack_tlp_tot, 1546 "Total number of tail loss probe expirations"); 1547 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1548 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1549 SYSCTL_CHILDREN(rack_counters), 1550 OID_AUTO, "tlp_new", CTLFLAG_RD, 1551 &rack_tlp_newdata, 1552 "Total number of tail loss probe sending new data"); 1553 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1554 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1555 SYSCTL_CHILDREN(rack_counters), 1556 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1557 &rack_tlp_retran, 1558 "Total number of tail loss probe sending retransmitted data"); 1559 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1560 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1561 SYSCTL_CHILDREN(rack_counters), 1562 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1563 &rack_tlp_retran_bytes, 1564 "Total bytes of tail loss probe sending retransmitted data"); 1565 rack_to_tot = counter_u64_alloc(M_WAITOK); 1566 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1567 SYSCTL_CHILDREN(rack_counters), 1568 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1569 &rack_to_tot, 1570 "Total number of times the rack to expired"); 1571 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1572 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1573 SYSCTL_CHILDREN(rack_counters), 1574 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1575 &rack_saw_enobuf, 1576 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1577 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_counters), 1580 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1581 &rack_saw_enobuf_hw, 1582 "Total number of times a send returned enobuf for hdwr paced connections"); 1583 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1585 SYSCTL_CHILDREN(rack_counters), 1586 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1587 &rack_saw_enetunreach, 1588 "Total number of times a send received a enetunreachable"); 1589 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1590 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1591 SYSCTL_CHILDREN(rack_counters), 1592 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1593 &rack_hot_alloc, 1594 "Total allocations from the top of our list"); 1595 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1596 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1597 SYSCTL_CHILDREN(rack_counters), 1598 OID_AUTO, "allocs", CTLFLAG_RD, 1599 &rack_to_alloc, 1600 "Total allocations of tracking structures"); 1601 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1602 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1603 SYSCTL_CHILDREN(rack_counters), 1604 OID_AUTO, "allochard", CTLFLAG_RD, 1605 &rack_to_alloc_hard, 1606 "Total allocations done with sleeping the hard way"); 1607 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1608 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1609 SYSCTL_CHILDREN(rack_counters), 1610 OID_AUTO, "allocemerg", CTLFLAG_RD, 1611 &rack_to_alloc_emerg, 1612 "Total allocations done from emergency cache"); 1613 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1614 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1615 SYSCTL_CHILDREN(rack_counters), 1616 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1617 &rack_to_alloc_limited, 1618 "Total allocations dropped due to limit"); 1619 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1620 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1621 SYSCTL_CHILDREN(rack_counters), 1622 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1623 &rack_alloc_limited_conns, 1624 "Connections with allocations dropped due to limit"); 1625 rack_split_limited = counter_u64_alloc(M_WAITOK); 1626 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1627 SYSCTL_CHILDREN(rack_counters), 1628 OID_AUTO, "split_limited", CTLFLAG_RD, 1629 &rack_split_limited, 1630 "Split allocations dropped due to limit"); 1631 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1632 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1633 SYSCTL_CHILDREN(rack_counters), 1634 OID_AUTO, "persist_sends", CTLFLAG_RD, 1635 &rack_persists_sends, 1636 "Number of times we sent a persist probe"); 1637 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1638 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1639 SYSCTL_CHILDREN(rack_counters), 1640 OID_AUTO, "persist_acks", CTLFLAG_RD, 1641 &rack_persists_acks, 1642 "Number of times a persist probe was acked"); 1643 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1644 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1645 SYSCTL_CHILDREN(rack_counters), 1646 OID_AUTO, "persist_loss", CTLFLAG_RD, 1647 &rack_persists_loss, 1648 "Number of times we detected a lost persist probe (no ack)"); 1649 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1650 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1651 SYSCTL_CHILDREN(rack_counters), 1652 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1653 &rack_persists_lost_ends, 1654 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1655 #ifdef INVARIANTS 1656 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1657 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1658 SYSCTL_CHILDREN(rack_counters), 1659 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1660 &rack_adjust_map_bw, 1661 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1662 #endif 1663 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1664 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1665 SYSCTL_CHILDREN(rack_counters), 1666 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1667 &rack_multi_single_eq, 1668 "Number of compressed acks total represented"); 1669 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1670 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1671 SYSCTL_CHILDREN(rack_counters), 1672 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1673 &rack_proc_non_comp_ack, 1674 "Number of non compresseds acks that we processed"); 1675 1676 1677 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1678 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1679 SYSCTL_CHILDREN(rack_counters), 1680 OID_AUTO, "sack_long", CTLFLAG_RD, 1681 &rack_sack_proc_all, 1682 "Total times we had to walk whole list for sack processing"); 1683 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1684 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1685 SYSCTL_CHILDREN(rack_counters), 1686 OID_AUTO, "sack_restart", CTLFLAG_RD, 1687 &rack_sack_proc_restart, 1688 "Total times we had to walk whole list due to a restart"); 1689 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1690 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1691 SYSCTL_CHILDREN(rack_counters), 1692 OID_AUTO, "sack_short", CTLFLAG_RD, 1693 &rack_sack_proc_short, 1694 "Total times we took shortcut for sack processing"); 1695 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1696 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1697 SYSCTL_CHILDREN(rack_attack), 1698 OID_AUTO, "skipacked", CTLFLAG_RD, 1699 &rack_sack_skipped_acked, 1700 "Total number of times we skipped previously sacked"); 1701 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1702 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1703 SYSCTL_CHILDREN(rack_attack), 1704 OID_AUTO, "ofsplit", CTLFLAG_RD, 1705 &rack_sack_splits, 1706 "Total number of times we did the old fashion tree split"); 1707 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1708 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1709 SYSCTL_CHILDREN(rack_counters), 1710 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1711 &rack_input_idle_reduces, 1712 "Total number of idle reductions on input"); 1713 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1714 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1715 SYSCTL_CHILDREN(rack_counters), 1716 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1717 &rack_collapsed_win, 1718 "Total number of collapsed windows"); 1719 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1720 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1721 SYSCTL_CHILDREN(rack_counters), 1722 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1723 &rack_try_scwnd, 1724 "Total number of scwnd attempts"); 1725 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1726 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1727 OID_AUTO, "outsize", CTLFLAG_RD, 1728 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1729 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1730 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1731 OID_AUTO, "opts", CTLFLAG_RD, 1732 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1733 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1734 SYSCTL_CHILDREN(rack_sysctl_root), 1735 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1736 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1737 } 1738 1739 static __inline int 1740 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1741 { 1742 if (SEQ_GEQ(b->r_start, a->r_start) && 1743 SEQ_LT(b->r_start, a->r_end)) { 1744 /* 1745 * The entry b is within the 1746 * block a. i.e.: 1747 * a -- |-------------| 1748 * b -- |----| 1749 * <or> 1750 * b -- |------| 1751 * <or> 1752 * b -- |-----------| 1753 */ 1754 return (0); 1755 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1756 /* 1757 * b falls as either the next 1758 * sequence block after a so a 1759 * is said to be smaller than b. 1760 * i.e: 1761 * a -- |------| 1762 * b -- |--------| 1763 * or 1764 * b -- |-----| 1765 */ 1766 return (1); 1767 } 1768 /* 1769 * Whats left is where a is 1770 * larger than b. i.e: 1771 * a -- |-------| 1772 * b -- |---| 1773 * or even possibly 1774 * b -- |--------------| 1775 */ 1776 return (-1); 1777 } 1778 1779 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1780 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1781 1782 static uint32_t 1783 rc_init_window(struct tcp_rack *rack) 1784 { 1785 uint32_t win; 1786 1787 if (rack->rc_init_win == 0) { 1788 /* 1789 * Nothing set by the user, use the system stack 1790 * default. 1791 */ 1792 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1793 } 1794 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1795 return (win); 1796 } 1797 1798 static uint64_t 1799 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1800 { 1801 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1802 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1803 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1804 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1805 else 1806 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1807 } 1808 1809 static uint64_t 1810 rack_get_bw(struct tcp_rack *rack) 1811 { 1812 if (rack->use_fixed_rate) { 1813 /* Return the fixed pacing rate */ 1814 return (rack_get_fixed_pacing_bw(rack)); 1815 } 1816 if (rack->r_ctl.gp_bw == 0) { 1817 /* 1818 * We have yet no b/w measurement, 1819 * if we have a user set initial bw 1820 * return it. If we don't have that and 1821 * we have an srtt, use the tcp IW (10) to 1822 * calculate a fictional b/w over the SRTT 1823 * which is more or less a guess. Note 1824 * we don't use our IW from rack on purpose 1825 * so if we have like IW=30, we are not 1826 * calculating a "huge" b/w. 1827 */ 1828 uint64_t bw, srtt; 1829 if (rack->r_ctl.init_rate) 1830 return (rack->r_ctl.init_rate); 1831 1832 /* Has the user set a max peak rate? */ 1833 #ifdef NETFLIX_PEAKRATE 1834 if (rack->rc_tp->t_maxpeakrate) 1835 return (rack->rc_tp->t_maxpeakrate); 1836 #endif 1837 /* Ok lets come up with the IW guess, if we have a srtt */ 1838 if (rack->rc_tp->t_srtt == 0) { 1839 /* 1840 * Go with old pacing method 1841 * i.e. burst mitigation only. 1842 */ 1843 return (0); 1844 } 1845 /* Ok lets get the initial TCP win (not racks) */ 1846 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1847 srtt = (uint64_t)rack->rc_tp->t_srtt; 1848 bw *= (uint64_t)USECS_IN_SECOND; 1849 bw /= srtt; 1850 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1851 bw = rack->r_ctl.bw_rate_cap; 1852 return (bw); 1853 } else { 1854 uint64_t bw; 1855 1856 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1857 /* Averaging is done, we can return the value */ 1858 bw = rack->r_ctl.gp_bw; 1859 } else { 1860 /* Still doing initial average must calculate */ 1861 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1862 } 1863 #ifdef NETFLIX_PEAKRATE 1864 if ((rack->rc_tp->t_maxpeakrate) && 1865 (bw > rack->rc_tp->t_maxpeakrate)) { 1866 /* The user has set a peak rate to pace at 1867 * don't allow us to pace faster than that. 1868 */ 1869 return (rack->rc_tp->t_maxpeakrate); 1870 } 1871 #endif 1872 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1873 bw = rack->r_ctl.bw_rate_cap; 1874 return (bw); 1875 } 1876 } 1877 1878 static uint16_t 1879 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1880 { 1881 if (rack->use_fixed_rate) { 1882 return (100); 1883 } else if (rack->in_probe_rtt && (rsm == NULL)) 1884 return (rack->r_ctl.rack_per_of_gp_probertt); 1885 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1886 rack->r_ctl.rack_per_of_gp_rec)) { 1887 if (rsm) { 1888 /* a retransmission always use the recovery rate */ 1889 return (rack->r_ctl.rack_per_of_gp_rec); 1890 } else if (rack->rack_rec_nonrxt_use_cr) { 1891 /* Directed to use the configured rate */ 1892 goto configured_rate; 1893 } else if (rack->rack_no_prr && 1894 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1895 /* No PRR, lets just use the b/w estimate only */ 1896 return (100); 1897 } else { 1898 /* 1899 * Here we may have a non-retransmit but we 1900 * have no overrides, so just use the recovery 1901 * rate (prr is in effect). 1902 */ 1903 return (rack->r_ctl.rack_per_of_gp_rec); 1904 } 1905 } 1906 configured_rate: 1907 /* For the configured rate we look at our cwnd vs the ssthresh */ 1908 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1909 return (rack->r_ctl.rack_per_of_gp_ss); 1910 else 1911 return (rack->r_ctl.rack_per_of_gp_ca); 1912 } 1913 1914 static void 1915 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1916 { 1917 /* 1918 * Types of logs (mod value) 1919 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1920 * 2 = a dsack round begins, persist is reset to 16. 1921 * 3 = a dsack round ends 1922 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1923 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1924 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1925 */ 1926 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1927 union tcp_log_stackspecific log; 1928 struct timeval tv; 1929 1930 memset(&log, 0, sizeof(log)); 1931 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 1932 log.u_bbr.flex1 <<= 1; 1933 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 1934 log.u_bbr.flex1 <<= 1; 1935 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 1936 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 1937 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 1938 log.u_bbr.flex4 = flex4; 1939 log.u_bbr.flex5 = flex5; 1940 log.u_bbr.flex6 = flex6; 1941 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 1942 log.u_bbr.flex8 = mod; 1943 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1944 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1945 &rack->rc_inp->inp_socket->so_rcv, 1946 &rack->rc_inp->inp_socket->so_snd, 1947 RACK_DSACK_HANDLING, 0, 1948 0, &log, false, &tv); 1949 } 1950 } 1951 1952 static void 1953 rack_log_hdwr_pacing(struct tcp_rack *rack, 1954 uint64_t rate, uint64_t hw_rate, int line, 1955 int error, uint16_t mod) 1956 { 1957 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1958 union tcp_log_stackspecific log; 1959 struct timeval tv; 1960 const struct ifnet *ifp; 1961 1962 memset(&log, 0, sizeof(log)); 1963 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 1964 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 1965 if (rack->r_ctl.crte) { 1966 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 1967 } else if (rack->rc_inp->inp_route.ro_nh && 1968 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 1969 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 1970 } else 1971 ifp = NULL; 1972 if (ifp) { 1973 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 1974 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 1975 } 1976 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1977 log.u_bbr.bw_inuse = rate; 1978 log.u_bbr.flex5 = line; 1979 log.u_bbr.flex6 = error; 1980 log.u_bbr.flex7 = mod; 1981 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 1982 log.u_bbr.flex8 = rack->use_fixed_rate; 1983 log.u_bbr.flex8 <<= 1; 1984 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 1985 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 1986 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 1987 if (rack->r_ctl.crte) 1988 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 1989 else 1990 log.u_bbr.cur_del_rate = 0; 1991 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 1992 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1993 &rack->rc_inp->inp_socket->so_rcv, 1994 &rack->rc_inp->inp_socket->so_snd, 1995 BBR_LOG_HDWR_PACE, 0, 1996 0, &log, false, &tv); 1997 } 1998 } 1999 2000 static uint64_t 2001 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2002 { 2003 /* 2004 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2005 */ 2006 uint64_t bw_est, high_rate; 2007 uint64_t gain; 2008 2009 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2010 bw_est = bw * gain; 2011 bw_est /= (uint64_t)100; 2012 /* Never fall below the minimum (def 64kbps) */ 2013 if (bw_est < RACK_MIN_BW) 2014 bw_est = RACK_MIN_BW; 2015 if (rack->r_rack_hw_rate_caps) { 2016 /* Rate caps are in place */ 2017 if (rack->r_ctl.crte != NULL) { 2018 /* We have a hdwr rate already */ 2019 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2020 if (bw_est >= high_rate) { 2021 /* We are capping bw at the highest rate table entry */ 2022 rack_log_hdwr_pacing(rack, 2023 bw_est, high_rate, __LINE__, 2024 0, 3); 2025 bw_est = high_rate; 2026 if (capped) 2027 *capped = 1; 2028 } 2029 } else if ((rack->rack_hdrw_pacing == 0) && 2030 (rack->rack_hdw_pace_ena) && 2031 (rack->rack_attempt_hdwr_pace == 0) && 2032 (rack->rc_inp->inp_route.ro_nh != NULL) && 2033 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2034 /* 2035 * Special case, we have not yet attempted hardware 2036 * pacing, and yet we may, when we do, find out if we are 2037 * above the highest rate. We need to know the maxbw for the interface 2038 * in question (if it supports ratelimiting). We get back 2039 * a 0, if the interface is not found in the RL lists. 2040 */ 2041 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2042 if (high_rate) { 2043 /* Yep, we have a rate is it above this rate? */ 2044 if (bw_est > high_rate) { 2045 bw_est = high_rate; 2046 if (capped) 2047 *capped = 1; 2048 } 2049 } 2050 } 2051 } 2052 return (bw_est); 2053 } 2054 2055 static void 2056 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2057 { 2058 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2059 union tcp_log_stackspecific log; 2060 struct timeval tv; 2061 2062 if ((mod != 1) && (rack_verbose_logging == 0)) { 2063 /* 2064 * We get 3 values currently for mod 2065 * 1 - We are retransmitting and this tells the reason. 2066 * 2 - We are clearing a dup-ack count. 2067 * 3 - We are incrementing a dup-ack count. 2068 * 2069 * The clear/increment are only logged 2070 * if you have BBverbose on. 2071 */ 2072 return; 2073 } 2074 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2075 log.u_bbr.flex1 = tsused; 2076 log.u_bbr.flex2 = thresh; 2077 log.u_bbr.flex3 = rsm->r_flags; 2078 log.u_bbr.flex4 = rsm->r_dupack; 2079 log.u_bbr.flex5 = rsm->r_start; 2080 log.u_bbr.flex6 = rsm->r_end; 2081 log.u_bbr.flex8 = mod; 2082 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2083 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2084 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2085 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2086 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2087 log.u_bbr.pacing_gain = rack->r_must_retran; 2088 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2089 &rack->rc_inp->inp_socket->so_rcv, 2090 &rack->rc_inp->inp_socket->so_snd, 2091 BBR_LOG_SETTINGS_CHG, 0, 2092 0, &log, false, &tv); 2093 } 2094 } 2095 2096 static void 2097 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2098 { 2099 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2100 union tcp_log_stackspecific log; 2101 struct timeval tv; 2102 2103 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2104 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2105 log.u_bbr.flex2 = to; 2106 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2107 log.u_bbr.flex4 = slot; 2108 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2109 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2110 log.u_bbr.flex7 = rack->rc_in_persist; 2111 log.u_bbr.flex8 = which; 2112 if (rack->rack_no_prr) 2113 log.u_bbr.pkts_out = 0; 2114 else 2115 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2116 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2117 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2118 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2119 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2120 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2121 log.u_bbr.pacing_gain = rack->r_must_retran; 2122 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2123 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2124 log.u_bbr.lost = rack_rto_min; 2125 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2126 &rack->rc_inp->inp_socket->so_rcv, 2127 &rack->rc_inp->inp_socket->so_snd, 2128 BBR_LOG_TIMERSTAR, 0, 2129 0, &log, false, &tv); 2130 } 2131 } 2132 2133 static void 2134 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2135 { 2136 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2137 union tcp_log_stackspecific log; 2138 struct timeval tv; 2139 2140 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2141 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2142 log.u_bbr.flex8 = to_num; 2143 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2144 log.u_bbr.flex2 = rack->rc_rack_rtt; 2145 if (rsm == NULL) 2146 log.u_bbr.flex3 = 0; 2147 else 2148 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2149 if (rack->rack_no_prr) 2150 log.u_bbr.flex5 = 0; 2151 else 2152 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2153 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2154 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2155 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2156 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2157 log.u_bbr.pacing_gain = rack->r_must_retran; 2158 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2159 &rack->rc_inp->inp_socket->so_rcv, 2160 &rack->rc_inp->inp_socket->so_snd, 2161 BBR_LOG_RTO, 0, 2162 0, &log, false, &tv); 2163 } 2164 } 2165 2166 static void 2167 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2168 struct rack_sendmap *prev, 2169 struct rack_sendmap *rsm, 2170 struct rack_sendmap *next, 2171 int flag, uint32_t th_ack, int line) 2172 { 2173 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2174 union tcp_log_stackspecific log; 2175 struct timeval tv; 2176 2177 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2178 log.u_bbr.flex8 = flag; 2179 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2180 log.u_bbr.cur_del_rate = (uint64_t)prev; 2181 log.u_bbr.delRate = (uint64_t)rsm; 2182 log.u_bbr.rttProp = (uint64_t)next; 2183 log.u_bbr.flex7 = 0; 2184 if (prev) { 2185 log.u_bbr.flex1 = prev->r_start; 2186 log.u_bbr.flex2 = prev->r_end; 2187 log.u_bbr.flex7 |= 0x4; 2188 } 2189 if (rsm) { 2190 log.u_bbr.flex3 = rsm->r_start; 2191 log.u_bbr.flex4 = rsm->r_end; 2192 log.u_bbr.flex7 |= 0x2; 2193 } 2194 if (next) { 2195 log.u_bbr.flex5 = next->r_start; 2196 log.u_bbr.flex6 = next->r_end; 2197 log.u_bbr.flex7 |= 0x1; 2198 } 2199 log.u_bbr.applimited = line; 2200 log.u_bbr.pkts_out = th_ack; 2201 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2202 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2203 if (rack->rack_no_prr) 2204 log.u_bbr.lost = 0; 2205 else 2206 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2207 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2208 &rack->rc_inp->inp_socket->so_rcv, 2209 &rack->rc_inp->inp_socket->so_snd, 2210 TCP_LOG_MAPCHG, 0, 2211 0, &log, false, &tv); 2212 } 2213 } 2214 2215 static void 2216 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2217 struct rack_sendmap *rsm, int conf) 2218 { 2219 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2220 union tcp_log_stackspecific log; 2221 struct timeval tv; 2222 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2223 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2224 log.u_bbr.flex1 = t; 2225 log.u_bbr.flex2 = len; 2226 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2227 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2228 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2229 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2230 log.u_bbr.flex7 = conf; 2231 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2232 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2233 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2234 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2235 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2236 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2237 if (rsm) { 2238 log.u_bbr.pkt_epoch = rsm->r_start; 2239 log.u_bbr.lost = rsm->r_end; 2240 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2241 /* We loose any upper of the 24 bits */ 2242 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2243 } else { 2244 /* Its a SYN */ 2245 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2246 log.u_bbr.lost = 0; 2247 log.u_bbr.cwnd_gain = 0; 2248 log.u_bbr.pacing_gain = 0; 2249 } 2250 /* Write out general bits of interest rrs here */ 2251 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2252 log.u_bbr.use_lt_bw <<= 1; 2253 log.u_bbr.use_lt_bw |= rack->forced_ack; 2254 log.u_bbr.use_lt_bw <<= 1; 2255 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2256 log.u_bbr.use_lt_bw <<= 1; 2257 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2258 log.u_bbr.use_lt_bw <<= 1; 2259 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2260 log.u_bbr.use_lt_bw <<= 1; 2261 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2262 log.u_bbr.use_lt_bw <<= 1; 2263 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2264 log.u_bbr.use_lt_bw <<= 1; 2265 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2266 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2267 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2268 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2269 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2270 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2271 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2272 log.u_bbr.bw_inuse <<= 32; 2273 if (rsm) 2274 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2275 TCP_LOG_EVENTP(tp, NULL, 2276 &rack->rc_inp->inp_socket->so_rcv, 2277 &rack->rc_inp->inp_socket->so_snd, 2278 BBR_LOG_BBRRTT, 0, 2279 0, &log, false, &tv); 2280 2281 2282 } 2283 } 2284 2285 static void 2286 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2287 { 2288 /* 2289 * Log the rtt sample we are 2290 * applying to the srtt algorithm in 2291 * useconds. 2292 */ 2293 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2294 union tcp_log_stackspecific log; 2295 struct timeval tv; 2296 2297 /* Convert our ms to a microsecond */ 2298 memset(&log, 0, sizeof(log)); 2299 log.u_bbr.flex1 = rtt; 2300 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2301 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2302 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2303 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2304 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2305 log.u_bbr.flex7 = 1; 2306 log.u_bbr.flex8 = rack->sack_attack_disable; 2307 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2308 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2309 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2310 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2311 log.u_bbr.pacing_gain = rack->r_must_retran; 2312 /* 2313 * We capture in delRate the upper 32 bits as 2314 * the confidence level we had declared, and the 2315 * lower 32 bits as the actual RTT using the arrival 2316 * timestamp. 2317 */ 2318 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2319 log.u_bbr.delRate <<= 32; 2320 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2321 /* Lets capture all the things that make up t_rtxcur */ 2322 log.u_bbr.applimited = rack_rto_min; 2323 log.u_bbr.epoch = rack_rto_max; 2324 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2325 log.u_bbr.lost = rack_rto_min; 2326 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2327 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2328 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2329 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2330 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2331 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2332 &rack->rc_inp->inp_socket->so_rcv, 2333 &rack->rc_inp->inp_socket->so_snd, 2334 TCP_LOG_RTT, 0, 2335 0, &log, false, &tv); 2336 } 2337 } 2338 2339 static void 2340 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2341 { 2342 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2343 union tcp_log_stackspecific log; 2344 struct timeval tv; 2345 2346 /* Convert our ms to a microsecond */ 2347 memset(&log, 0, sizeof(log)); 2348 log.u_bbr.flex1 = rtt; 2349 log.u_bbr.flex2 = send_time; 2350 log.u_bbr.flex3 = ack_time; 2351 log.u_bbr.flex4 = where; 2352 log.u_bbr.flex7 = 2; 2353 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2354 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2355 &rack->rc_inp->inp_socket->so_rcv, 2356 &rack->rc_inp->inp_socket->so_snd, 2357 TCP_LOG_RTT, 0, 2358 0, &log, false, &tv); 2359 } 2360 } 2361 2362 2363 2364 static inline void 2365 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2366 { 2367 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2368 union tcp_log_stackspecific log; 2369 struct timeval tv; 2370 2371 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2372 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2373 log.u_bbr.flex1 = line; 2374 log.u_bbr.flex2 = tick; 2375 log.u_bbr.flex3 = tp->t_maxunacktime; 2376 log.u_bbr.flex4 = tp->t_acktime; 2377 log.u_bbr.flex8 = event; 2378 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2379 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2380 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2381 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2382 log.u_bbr.pacing_gain = rack->r_must_retran; 2383 TCP_LOG_EVENTP(tp, NULL, 2384 &rack->rc_inp->inp_socket->so_rcv, 2385 &rack->rc_inp->inp_socket->so_snd, 2386 BBR_LOG_PROGRESS, 0, 2387 0, &log, false, &tv); 2388 } 2389 } 2390 2391 static void 2392 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2393 { 2394 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2395 union tcp_log_stackspecific log; 2396 2397 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2398 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2399 log.u_bbr.flex1 = slot; 2400 if (rack->rack_no_prr) 2401 log.u_bbr.flex2 = 0; 2402 else 2403 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2404 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2405 log.u_bbr.flex8 = rack->rc_in_persist; 2406 log.u_bbr.timeStamp = cts; 2407 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2408 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2409 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2410 log.u_bbr.pacing_gain = rack->r_must_retran; 2411 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2412 &rack->rc_inp->inp_socket->so_rcv, 2413 &rack->rc_inp->inp_socket->so_snd, 2414 BBR_LOG_BBRSND, 0, 2415 0, &log, false, tv); 2416 } 2417 } 2418 2419 static void 2420 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2421 { 2422 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2423 union tcp_log_stackspecific log; 2424 struct timeval tv; 2425 2426 memset(&log, 0, sizeof(log)); 2427 log.u_bbr.flex1 = did_out; 2428 log.u_bbr.flex2 = nxt_pkt; 2429 log.u_bbr.flex3 = way_out; 2430 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2431 if (rack->rack_no_prr) 2432 log.u_bbr.flex5 = 0; 2433 else 2434 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2435 log.u_bbr.flex6 = nsegs; 2436 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2437 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2438 log.u_bbr.flex7 <<= 1; 2439 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2440 log.u_bbr.flex7 <<= 1; 2441 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2442 log.u_bbr.flex8 = rack->rc_in_persist; 2443 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2444 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2445 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2446 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2447 log.u_bbr.use_lt_bw <<= 1; 2448 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2449 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2450 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2451 log.u_bbr.pacing_gain = rack->r_must_retran; 2452 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2453 &rack->rc_inp->inp_socket->so_rcv, 2454 &rack->rc_inp->inp_socket->so_snd, 2455 BBR_LOG_DOSEG_DONE, 0, 2456 0, &log, false, &tv); 2457 } 2458 } 2459 2460 static void 2461 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2462 { 2463 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2464 union tcp_log_stackspecific log; 2465 struct timeval tv; 2466 2467 memset(&log, 0, sizeof(log)); 2468 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2469 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2470 log.u_bbr.flex4 = arg1; 2471 log.u_bbr.flex5 = arg2; 2472 log.u_bbr.flex6 = arg3; 2473 log.u_bbr.flex8 = frm; 2474 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2475 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2476 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2477 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2478 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2479 log.u_bbr.pacing_gain = rack->r_must_retran; 2480 TCP_LOG_EVENTP(tp, NULL, 2481 &tp->t_inpcb->inp_socket->so_rcv, 2482 &tp->t_inpcb->inp_socket->so_snd, 2483 TCP_HDWR_PACE_SIZE, 0, 2484 0, &log, false, &tv); 2485 } 2486 } 2487 2488 static void 2489 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2490 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2491 { 2492 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2493 union tcp_log_stackspecific log; 2494 struct timeval tv; 2495 2496 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2497 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2498 log.u_bbr.flex1 = slot; 2499 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2500 log.u_bbr.flex4 = reason; 2501 if (rack->rack_no_prr) 2502 log.u_bbr.flex5 = 0; 2503 else 2504 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2505 log.u_bbr.flex7 = hpts_calling; 2506 log.u_bbr.flex8 = rack->rc_in_persist; 2507 log.u_bbr.lt_epoch = cwnd_to_use; 2508 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2509 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2510 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2511 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2512 log.u_bbr.pacing_gain = rack->r_must_retran; 2513 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2514 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2515 &rack->rc_inp->inp_socket->so_rcv, 2516 &rack->rc_inp->inp_socket->so_snd, 2517 BBR_LOG_JUSTRET, 0, 2518 tlen, &log, false, &tv); 2519 } 2520 } 2521 2522 static void 2523 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2524 struct timeval *tv, uint32_t flags_on_entry) 2525 { 2526 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2527 union tcp_log_stackspecific log; 2528 2529 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2530 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2531 log.u_bbr.flex1 = line; 2532 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2533 log.u_bbr.flex3 = flags_on_entry; 2534 log.u_bbr.flex4 = us_cts; 2535 if (rack->rack_no_prr) 2536 log.u_bbr.flex5 = 0; 2537 else 2538 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2539 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2540 log.u_bbr.flex7 = hpts_removed; 2541 log.u_bbr.flex8 = 1; 2542 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2543 log.u_bbr.timeStamp = us_cts; 2544 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2545 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2546 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2547 log.u_bbr.pacing_gain = rack->r_must_retran; 2548 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2549 &rack->rc_inp->inp_socket->so_rcv, 2550 &rack->rc_inp->inp_socket->so_snd, 2551 BBR_LOG_TIMERCANC, 0, 2552 0, &log, false, tv); 2553 } 2554 } 2555 2556 static void 2557 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2558 uint32_t flex1, uint32_t flex2, 2559 uint32_t flex3, uint32_t flex4, 2560 uint32_t flex5, uint32_t flex6, 2561 uint16_t flex7, uint8_t mod) 2562 { 2563 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2564 union tcp_log_stackspecific log; 2565 struct timeval tv; 2566 2567 if (mod == 1) { 2568 /* No you can't use 1, its for the real to cancel */ 2569 return; 2570 } 2571 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2572 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2573 log.u_bbr.flex1 = flex1; 2574 log.u_bbr.flex2 = flex2; 2575 log.u_bbr.flex3 = flex3; 2576 log.u_bbr.flex4 = flex4; 2577 log.u_bbr.flex5 = flex5; 2578 log.u_bbr.flex6 = flex6; 2579 log.u_bbr.flex7 = flex7; 2580 log.u_bbr.flex8 = mod; 2581 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2582 &rack->rc_inp->inp_socket->so_rcv, 2583 &rack->rc_inp->inp_socket->so_snd, 2584 BBR_LOG_TIMERCANC, 0, 2585 0, &log, false, &tv); 2586 } 2587 } 2588 2589 static void 2590 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2591 { 2592 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2593 union tcp_log_stackspecific log; 2594 struct timeval tv; 2595 2596 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2597 log.u_bbr.flex1 = timers; 2598 log.u_bbr.flex2 = ret; 2599 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2600 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2601 log.u_bbr.flex5 = cts; 2602 if (rack->rack_no_prr) 2603 log.u_bbr.flex6 = 0; 2604 else 2605 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2606 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2607 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2608 log.u_bbr.pacing_gain = rack->r_must_retran; 2609 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2610 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2611 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2612 &rack->rc_inp->inp_socket->so_rcv, 2613 &rack->rc_inp->inp_socket->so_snd, 2614 BBR_LOG_TO_PROCESS, 0, 2615 0, &log, false, &tv); 2616 } 2617 } 2618 2619 static void 2620 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 2621 { 2622 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2623 union tcp_log_stackspecific log; 2624 struct timeval tv; 2625 2626 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2627 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2628 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2629 if (rack->rack_no_prr) 2630 log.u_bbr.flex3 = 0; 2631 else 2632 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2633 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2634 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2635 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2636 log.u_bbr.flex7 = line; 2637 log.u_bbr.flex8 = frm; 2638 log.u_bbr.pkts_out = orig_cwnd; 2639 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2640 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2641 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2642 log.u_bbr.use_lt_bw <<= 1; 2643 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2644 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2645 &rack->rc_inp->inp_socket->so_rcv, 2646 &rack->rc_inp->inp_socket->so_snd, 2647 BBR_LOG_BBRUPD, 0, 2648 0, &log, false, &tv); 2649 } 2650 } 2651 2652 #ifdef NETFLIX_EXP_DETECTION 2653 static void 2654 rack_log_sad(struct tcp_rack *rack, int event) 2655 { 2656 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2657 union tcp_log_stackspecific log; 2658 struct timeval tv; 2659 2660 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2661 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2662 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2663 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2664 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2665 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2666 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2667 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2668 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2669 log.u_bbr.lt_epoch |= rack->do_detection; 2670 log.u_bbr.applimited = tcp_map_minimum; 2671 log.u_bbr.flex7 = rack->sack_attack_disable; 2672 log.u_bbr.flex8 = event; 2673 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2674 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2675 log.u_bbr.delivered = tcp_sad_decay_val; 2676 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2677 &rack->rc_inp->inp_socket->so_rcv, 2678 &rack->rc_inp->inp_socket->so_snd, 2679 TCP_SAD_DETECTION, 0, 2680 0, &log, false, &tv); 2681 } 2682 } 2683 #endif 2684 2685 static void 2686 rack_counter_destroy(void) 2687 { 2688 counter_u64_free(rack_fto_send); 2689 counter_u64_free(rack_fto_rsm_send); 2690 counter_u64_free(rack_nfto_resend); 2691 counter_u64_free(rack_hw_pace_init_fail); 2692 counter_u64_free(rack_hw_pace_lost); 2693 counter_u64_free(rack_non_fto_send); 2694 counter_u64_free(rack_extended_rfo); 2695 counter_u64_free(rack_ack_total); 2696 counter_u64_free(rack_express_sack); 2697 counter_u64_free(rack_sack_total); 2698 counter_u64_free(rack_move_none); 2699 counter_u64_free(rack_move_some); 2700 counter_u64_free(rack_sack_attacks_detected); 2701 counter_u64_free(rack_sack_attacks_reversed); 2702 counter_u64_free(rack_sack_used_next_merge); 2703 counter_u64_free(rack_sack_used_prev_merge); 2704 counter_u64_free(rack_tlp_tot); 2705 counter_u64_free(rack_tlp_newdata); 2706 counter_u64_free(rack_tlp_retran); 2707 counter_u64_free(rack_tlp_retran_bytes); 2708 counter_u64_free(rack_to_tot); 2709 counter_u64_free(rack_saw_enobuf); 2710 counter_u64_free(rack_saw_enobuf_hw); 2711 counter_u64_free(rack_saw_enetunreach); 2712 counter_u64_free(rack_hot_alloc); 2713 counter_u64_free(rack_to_alloc); 2714 counter_u64_free(rack_to_alloc_hard); 2715 counter_u64_free(rack_to_alloc_emerg); 2716 counter_u64_free(rack_to_alloc_limited); 2717 counter_u64_free(rack_alloc_limited_conns); 2718 counter_u64_free(rack_split_limited); 2719 counter_u64_free(rack_multi_single_eq); 2720 counter_u64_free(rack_proc_non_comp_ack); 2721 counter_u64_free(rack_sack_proc_all); 2722 counter_u64_free(rack_sack_proc_restart); 2723 counter_u64_free(rack_sack_proc_short); 2724 counter_u64_free(rack_sack_skipped_acked); 2725 counter_u64_free(rack_sack_splits); 2726 counter_u64_free(rack_input_idle_reduces); 2727 counter_u64_free(rack_collapsed_win); 2728 counter_u64_free(rack_try_scwnd); 2729 counter_u64_free(rack_persists_sends); 2730 counter_u64_free(rack_persists_acks); 2731 counter_u64_free(rack_persists_loss); 2732 counter_u64_free(rack_persists_lost_ends); 2733 #ifdef INVARIANTS 2734 counter_u64_free(rack_adjust_map_bw); 2735 #endif 2736 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2737 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2738 } 2739 2740 static struct rack_sendmap * 2741 rack_alloc(struct tcp_rack *rack) 2742 { 2743 struct rack_sendmap *rsm; 2744 2745 /* 2746 * First get the top of the list it in 2747 * theory is the "hottest" rsm we have, 2748 * possibly just freed by ack processing. 2749 */ 2750 if (rack->rc_free_cnt > rack_free_cache) { 2751 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2752 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2753 counter_u64_add(rack_hot_alloc, 1); 2754 rack->rc_free_cnt--; 2755 return (rsm); 2756 } 2757 /* 2758 * Once we get under our free cache we probably 2759 * no longer have a "hot" one available. Lets 2760 * get one from UMA. 2761 */ 2762 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2763 if (rsm) { 2764 rack->r_ctl.rc_num_maps_alloced++; 2765 counter_u64_add(rack_to_alloc, 1); 2766 return (rsm); 2767 } 2768 /* 2769 * Dig in to our aux rsm's (the last two) since 2770 * UMA failed to get us one. 2771 */ 2772 if (rack->rc_free_cnt) { 2773 counter_u64_add(rack_to_alloc_emerg, 1); 2774 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2775 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2776 rack->rc_free_cnt--; 2777 return (rsm); 2778 } 2779 return (NULL); 2780 } 2781 2782 static struct rack_sendmap * 2783 rack_alloc_full_limit(struct tcp_rack *rack) 2784 { 2785 if ((V_tcp_map_entries_limit > 0) && 2786 (rack->do_detection == 0) && 2787 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2788 counter_u64_add(rack_to_alloc_limited, 1); 2789 if (!rack->alloc_limit_reported) { 2790 rack->alloc_limit_reported = 1; 2791 counter_u64_add(rack_alloc_limited_conns, 1); 2792 } 2793 return (NULL); 2794 } 2795 return (rack_alloc(rack)); 2796 } 2797 2798 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2799 static struct rack_sendmap * 2800 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2801 { 2802 struct rack_sendmap *rsm; 2803 2804 if (limit_type) { 2805 /* currently there is only one limit type */ 2806 if (V_tcp_map_split_limit > 0 && 2807 (rack->do_detection == 0) && 2808 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2809 counter_u64_add(rack_split_limited, 1); 2810 if (!rack->alloc_limit_reported) { 2811 rack->alloc_limit_reported = 1; 2812 counter_u64_add(rack_alloc_limited_conns, 1); 2813 } 2814 return (NULL); 2815 } 2816 } 2817 2818 /* allocate and mark in the limit type, if set */ 2819 rsm = rack_alloc(rack); 2820 if (rsm != NULL && limit_type) { 2821 rsm->r_limit_type = limit_type; 2822 rack->r_ctl.rc_num_split_allocs++; 2823 } 2824 return (rsm); 2825 } 2826 2827 static void 2828 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2829 { 2830 if (rsm->r_flags & RACK_APP_LIMITED) { 2831 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2832 rack->r_ctl.rc_app_limited_cnt--; 2833 } 2834 } 2835 if (rsm->r_limit_type) { 2836 /* currently there is only one limit type */ 2837 rack->r_ctl.rc_num_split_allocs--; 2838 } 2839 if (rsm == rack->r_ctl.rc_first_appl) { 2840 if (rack->r_ctl.rc_app_limited_cnt == 0) 2841 rack->r_ctl.rc_first_appl = NULL; 2842 else { 2843 /* Follow the next one out */ 2844 struct rack_sendmap fe; 2845 2846 fe.r_start = rsm->r_nseq_appl; 2847 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2848 } 2849 } 2850 if (rsm == rack->r_ctl.rc_resend) 2851 rack->r_ctl.rc_resend = NULL; 2852 if (rsm == rack->r_ctl.rc_end_appl) 2853 rack->r_ctl.rc_end_appl = NULL; 2854 if (rack->r_ctl.rc_tlpsend == rsm) 2855 rack->r_ctl.rc_tlpsend = NULL; 2856 if (rack->r_ctl.rc_sacklast == rsm) 2857 rack->r_ctl.rc_sacklast = NULL; 2858 memset(rsm, 0, sizeof(struct rack_sendmap)); 2859 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2860 rack->rc_free_cnt++; 2861 } 2862 2863 static void 2864 rack_free_trim(struct tcp_rack *rack) 2865 { 2866 struct rack_sendmap *rsm; 2867 2868 /* 2869 * Free up all the tail entries until 2870 * we get our list down to the limit. 2871 */ 2872 while (rack->rc_free_cnt > rack_free_cache) { 2873 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2874 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2875 rack->rc_free_cnt--; 2876 uma_zfree(rack_zone, rsm); 2877 } 2878 } 2879 2880 2881 static uint32_t 2882 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2883 { 2884 uint64_t srtt, bw, len, tim; 2885 uint32_t segsiz, def_len, minl; 2886 2887 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2888 def_len = rack_def_data_window * segsiz; 2889 if (rack->rc_gp_filled == 0) { 2890 /* 2891 * We have no measurement (IW is in flight?) so 2892 * we can only guess using our data_window sysctl 2893 * value (usually 20MSS). 2894 */ 2895 return (def_len); 2896 } 2897 /* 2898 * Now we have a number of factors to consider. 2899 * 2900 * 1) We have a desired BDP which is usually 2901 * at least 2. 2902 * 2) We have a minimum number of rtt's usually 1 SRTT 2903 * but we allow it too to be more. 2904 * 3) We want to make sure a measurement last N useconds (if 2905 * we have set rack_min_measure_usec. 2906 * 2907 * We handle the first concern here by trying to create a data 2908 * window of max(rack_def_data_window, DesiredBDP). The 2909 * second concern we handle in not letting the measurement 2910 * window end normally until at least the required SRTT's 2911 * have gone by which is done further below in 2912 * rack_enough_for_measurement(). Finally the third concern 2913 * we also handle here by calculating how long that time 2914 * would take at the current BW and then return the 2915 * max of our first calculation and that length. Note 2916 * that if rack_min_measure_usec is 0, we don't deal 2917 * with concern 3. Also for both Concern 1 and 3 an 2918 * application limited period could end the measurement 2919 * earlier. 2920 * 2921 * So lets calculate the BDP with the "known" b/w using 2922 * the SRTT has our rtt and then multiply it by the 2923 * goal. 2924 */ 2925 bw = rack_get_bw(rack); 2926 srtt = (uint64_t)tp->t_srtt; 2927 len = bw * srtt; 2928 len /= (uint64_t)HPTS_USEC_IN_SEC; 2929 len *= max(1, rack_goal_bdp); 2930 /* Now we need to round up to the nearest MSS */ 2931 len = roundup(len, segsiz); 2932 if (rack_min_measure_usec) { 2933 /* Now calculate our min length for this b/w */ 2934 tim = rack_min_measure_usec; 2935 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 2936 if (minl == 0) 2937 minl = 1; 2938 minl = roundup(minl, segsiz); 2939 if (len < minl) 2940 len = minl; 2941 } 2942 /* 2943 * Now if we have a very small window we want 2944 * to attempt to get the window that is 2945 * as small as possible. This happens on 2946 * low b/w connections and we don't want to 2947 * span huge numbers of rtt's between measurements. 2948 * 2949 * We basically include 2 over our "MIN window" so 2950 * that the measurement can be shortened (possibly) by 2951 * an ack'ed packet. 2952 */ 2953 if (len < def_len) 2954 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 2955 else 2956 return (max((uint32_t)len, def_len)); 2957 2958 } 2959 2960 static int 2961 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 2962 { 2963 uint32_t tim, srtts, segsiz; 2964 2965 /* 2966 * Has enough time passed for the GP measurement to be valid? 2967 */ 2968 if ((tp->snd_max == tp->snd_una) || 2969 (th_ack == tp->snd_max)){ 2970 /* All is acked */ 2971 *quality = RACK_QUALITY_ALLACKED; 2972 return (1); 2973 } 2974 if (SEQ_LT(th_ack, tp->gput_seq)) { 2975 /* Not enough bytes yet */ 2976 return (0); 2977 } 2978 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2979 if (SEQ_LT(th_ack, tp->gput_ack) && 2980 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 2981 /* Not enough bytes yet */ 2982 return (0); 2983 } 2984 if (rack->r_ctl.rc_first_appl && 2985 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 2986 /* 2987 * We are up to the app limited send point 2988 * we have to measure irrespective of the time.. 2989 */ 2990 *quality = RACK_QUALITY_APPLIMITED; 2991 return (1); 2992 } 2993 /* Now what about time? */ 2994 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 2995 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 2996 if (tim >= srtts) { 2997 *quality = RACK_QUALITY_HIGH; 2998 return (1); 2999 } 3000 /* Nope not even a full SRTT has passed */ 3001 return (0); 3002 } 3003 3004 static void 3005 rack_log_timely(struct tcp_rack *rack, 3006 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3007 uint64_t up_bnd, int line, uint8_t method) 3008 { 3009 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3010 union tcp_log_stackspecific log; 3011 struct timeval tv; 3012 3013 memset(&log, 0, sizeof(log)); 3014 log.u_bbr.flex1 = logged; 3015 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3016 log.u_bbr.flex2 <<= 4; 3017 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3018 log.u_bbr.flex2 <<= 4; 3019 log.u_bbr.flex2 |= rack->rc_gp_incr; 3020 log.u_bbr.flex2 <<= 4; 3021 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3022 log.u_bbr.flex3 = rack->rc_gp_incr; 3023 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3024 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3025 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3026 log.u_bbr.flex7 = rack->rc_gp_bwred; 3027 log.u_bbr.flex8 = method; 3028 log.u_bbr.cur_del_rate = cur_bw; 3029 log.u_bbr.delRate = low_bnd; 3030 log.u_bbr.bw_inuse = up_bnd; 3031 log.u_bbr.rttProp = rack_get_bw(rack); 3032 log.u_bbr.pkt_epoch = line; 3033 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3034 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3035 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3036 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3037 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3038 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3039 log.u_bbr.cwnd_gain <<= 1; 3040 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3041 log.u_bbr.cwnd_gain <<= 1; 3042 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3043 log.u_bbr.cwnd_gain <<= 1; 3044 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3045 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3046 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3047 &rack->rc_inp->inp_socket->so_rcv, 3048 &rack->rc_inp->inp_socket->so_snd, 3049 TCP_TIMELY_WORK, 0, 3050 0, &log, false, &tv); 3051 } 3052 } 3053 3054 static int 3055 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3056 { 3057 /* 3058 * Before we increase we need to know if 3059 * the estimate just made was less than 3060 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3061 * 3062 * If we already are pacing at a fast enough 3063 * rate to push us faster there is no sense of 3064 * increasing. 3065 * 3066 * We first caculate our actual pacing rate (ss or ca multipler 3067 * times our cur_bw). 3068 * 3069 * Then we take the last measured rate and multipy by our 3070 * maximum pacing overage to give us a max allowable rate. 3071 * 3072 * If our act_rate is smaller than our max_allowable rate 3073 * then we should increase. Else we should hold steady. 3074 * 3075 */ 3076 uint64_t act_rate, max_allow_rate; 3077 3078 if (rack_timely_no_stopping) 3079 return (1); 3080 3081 if ((cur_bw == 0) || (last_bw_est == 0)) { 3082 /* 3083 * Initial startup case or 3084 * everything is acked case. 3085 */ 3086 rack_log_timely(rack, mult, cur_bw, 0, 0, 3087 __LINE__, 9); 3088 return (1); 3089 } 3090 if (mult <= 100) { 3091 /* 3092 * We can always pace at or slightly above our rate. 3093 */ 3094 rack_log_timely(rack, mult, cur_bw, 0, 0, 3095 __LINE__, 9); 3096 return (1); 3097 } 3098 act_rate = cur_bw * (uint64_t)mult; 3099 act_rate /= 100; 3100 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3101 max_allow_rate /= 100; 3102 if (act_rate < max_allow_rate) { 3103 /* 3104 * Here the rate we are actually pacing at 3105 * is smaller than 10% above our last measurement. 3106 * This means we are pacing below what we would 3107 * like to try to achieve (plus some wiggle room). 3108 */ 3109 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3110 __LINE__, 9); 3111 return (1); 3112 } else { 3113 /* 3114 * Here we are already pacing at least rack_max_per_above(10%) 3115 * what we are getting back. This indicates most likely 3116 * that we are being limited (cwnd/rwnd/app) and can't 3117 * get any more b/w. There is no sense of trying to 3118 * raise up the pacing rate its not speeding us up 3119 * and we already are pacing faster than we are getting. 3120 */ 3121 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3122 __LINE__, 8); 3123 return (0); 3124 } 3125 } 3126 3127 static void 3128 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3129 { 3130 /* 3131 * When we drag bottom, we want to assure 3132 * that no multiplier is below 1.0, if so 3133 * we want to restore it to at least that. 3134 */ 3135 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3136 /* This is unlikely we usually do not touch recovery */ 3137 rack->r_ctl.rack_per_of_gp_rec = 100; 3138 } 3139 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3140 rack->r_ctl.rack_per_of_gp_ca = 100; 3141 } 3142 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3143 rack->r_ctl.rack_per_of_gp_ss = 100; 3144 } 3145 } 3146 3147 static void 3148 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3149 { 3150 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3151 rack->r_ctl.rack_per_of_gp_ca = 100; 3152 } 3153 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3154 rack->r_ctl.rack_per_of_gp_ss = 100; 3155 } 3156 } 3157 3158 static void 3159 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3160 { 3161 int32_t calc, logged, plus; 3162 3163 logged = 0; 3164 3165 if (override) { 3166 /* 3167 * override is passed when we are 3168 * loosing b/w and making one last 3169 * gasp at trying to not loose out 3170 * to a new-reno flow. 3171 */ 3172 goto extra_boost; 3173 } 3174 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3175 if (rack->rc_gp_incr && 3176 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3177 /* 3178 * Reset and get 5 strokes more before the boost. Note 3179 * that the count is 0 based so we have to add one. 3180 */ 3181 extra_boost: 3182 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3183 rack->rc_gp_timely_inc_cnt = 0; 3184 } else 3185 plus = (uint32_t)rack_gp_increase_per; 3186 /* Must be at least 1% increase for true timely increases */ 3187 if ((plus < 1) && 3188 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3189 plus = 1; 3190 if (rack->rc_gp_saw_rec && 3191 (rack->rc_gp_no_rec_chg == 0) && 3192 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3193 rack->r_ctl.rack_per_of_gp_rec)) { 3194 /* We have been in recovery ding it too */ 3195 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3196 if (calc > 0xffff) 3197 calc = 0xffff; 3198 logged |= 1; 3199 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3200 if (rack_per_upper_bound_ss && 3201 (rack->rc_dragged_bottom == 0) && 3202 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3203 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3204 } 3205 if (rack->rc_gp_saw_ca && 3206 (rack->rc_gp_saw_ss == 0) && 3207 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3208 rack->r_ctl.rack_per_of_gp_ca)) { 3209 /* In CA */ 3210 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3211 if (calc > 0xffff) 3212 calc = 0xffff; 3213 logged |= 2; 3214 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3215 if (rack_per_upper_bound_ca && 3216 (rack->rc_dragged_bottom == 0) && 3217 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3218 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3219 } 3220 if (rack->rc_gp_saw_ss && 3221 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3222 rack->r_ctl.rack_per_of_gp_ss)) { 3223 /* In SS */ 3224 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3225 if (calc > 0xffff) 3226 calc = 0xffff; 3227 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3228 if (rack_per_upper_bound_ss && 3229 (rack->rc_dragged_bottom == 0) && 3230 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3231 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3232 logged |= 4; 3233 } 3234 if (logged && 3235 (rack->rc_gp_incr == 0)){ 3236 /* Go into increment mode */ 3237 rack->rc_gp_incr = 1; 3238 rack->rc_gp_timely_inc_cnt = 0; 3239 } 3240 if (rack->rc_gp_incr && 3241 logged && 3242 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3243 rack->rc_gp_timely_inc_cnt++; 3244 } 3245 rack_log_timely(rack, logged, plus, 0, 0, 3246 __LINE__, 1); 3247 } 3248 3249 static uint32_t 3250 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3251 { 3252 /* 3253 * norm_grad = rtt_diff / minrtt; 3254 * new_per = curper * (1 - B * norm_grad) 3255 * 3256 * B = rack_gp_decrease_per (default 10%) 3257 * rtt_dif = input var current rtt-diff 3258 * curper = input var current percentage 3259 * minrtt = from rack filter 3260 * 3261 */ 3262 uint64_t perf; 3263 3264 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3265 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3266 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3267 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3268 (uint64_t)1000000)) / 3269 (uint64_t)1000000); 3270 if (perf > curper) { 3271 /* TSNH */ 3272 perf = curper - 1; 3273 } 3274 return ((uint32_t)perf); 3275 } 3276 3277 static uint32_t 3278 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3279 { 3280 /* 3281 * highrttthresh 3282 * result = curper * (1 - (B * ( 1 - ------ )) 3283 * gp_srtt 3284 * 3285 * B = rack_gp_decrease_per (default 10%) 3286 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3287 */ 3288 uint64_t perf; 3289 uint32_t highrttthresh; 3290 3291 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3292 3293 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3294 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3295 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3296 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3297 return (perf); 3298 } 3299 3300 static void 3301 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3302 { 3303 uint64_t logvar, logvar2, logvar3; 3304 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3305 3306 if (rack->rc_gp_incr) { 3307 /* Turn off increment counting */ 3308 rack->rc_gp_incr = 0; 3309 rack->rc_gp_timely_inc_cnt = 0; 3310 } 3311 ss_red = ca_red = rec_red = 0; 3312 logged = 0; 3313 /* Calculate the reduction value */ 3314 if (rtt_diff < 0) { 3315 rtt_diff *= -1; 3316 } 3317 /* Must be at least 1% reduction */ 3318 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3319 /* We have been in recovery ding it too */ 3320 if (timely_says == 2) { 3321 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3322 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3323 if (alt < new_per) 3324 val = alt; 3325 else 3326 val = new_per; 3327 } else 3328 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3329 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3330 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3331 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3332 } else { 3333 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3334 rec_red = 0; 3335 } 3336 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3337 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3338 logged |= 1; 3339 } 3340 if (rack->rc_gp_saw_ss) { 3341 /* Sent in SS */ 3342 if (timely_says == 2) { 3343 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3344 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3345 if (alt < new_per) 3346 val = alt; 3347 else 3348 val = new_per; 3349 } else 3350 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3351 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3352 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3353 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3354 } else { 3355 ss_red = new_per; 3356 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3357 logvar = new_per; 3358 logvar <<= 32; 3359 logvar |= alt; 3360 logvar2 = (uint32_t)rtt; 3361 logvar2 <<= 32; 3362 logvar2 |= (uint32_t)rtt_diff; 3363 logvar3 = rack_gp_rtt_maxmul; 3364 logvar3 <<= 32; 3365 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3366 rack_log_timely(rack, timely_says, 3367 logvar2, logvar3, 3368 logvar, __LINE__, 10); 3369 } 3370 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3371 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3372 logged |= 4; 3373 } else if (rack->rc_gp_saw_ca) { 3374 /* Sent in CA */ 3375 if (timely_says == 2) { 3376 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3377 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3378 if (alt < new_per) 3379 val = alt; 3380 else 3381 val = new_per; 3382 } else 3383 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3384 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3385 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3386 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3387 } else { 3388 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3389 ca_red = 0; 3390 logvar = new_per; 3391 logvar <<= 32; 3392 logvar |= alt; 3393 logvar2 = (uint32_t)rtt; 3394 logvar2 <<= 32; 3395 logvar2 |= (uint32_t)rtt_diff; 3396 logvar3 = rack_gp_rtt_maxmul; 3397 logvar3 <<= 32; 3398 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3399 rack_log_timely(rack, timely_says, 3400 logvar2, logvar3, 3401 logvar, __LINE__, 10); 3402 } 3403 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3404 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3405 logged |= 2; 3406 } 3407 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3408 rack->rc_gp_timely_dec_cnt++; 3409 if (rack_timely_dec_clear && 3410 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3411 rack->rc_gp_timely_dec_cnt = 0; 3412 } 3413 logvar = ss_red; 3414 logvar <<= 32; 3415 logvar |= ca_red; 3416 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3417 __LINE__, 2); 3418 } 3419 3420 static void 3421 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3422 uint32_t rtt, uint32_t line, uint8_t reas) 3423 { 3424 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3425 union tcp_log_stackspecific log; 3426 struct timeval tv; 3427 3428 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3429 log.u_bbr.flex1 = line; 3430 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3431 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3432 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3433 log.u_bbr.flex5 = rtt; 3434 log.u_bbr.flex6 = rack->rc_highly_buffered; 3435 log.u_bbr.flex6 <<= 1; 3436 log.u_bbr.flex6 |= rack->forced_ack; 3437 log.u_bbr.flex6 <<= 1; 3438 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3439 log.u_bbr.flex6 <<= 1; 3440 log.u_bbr.flex6 |= rack->in_probe_rtt; 3441 log.u_bbr.flex6 <<= 1; 3442 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3443 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3444 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3445 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3446 log.u_bbr.flex8 = reas; 3447 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3448 log.u_bbr.delRate = rack_get_bw(rack); 3449 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3450 log.u_bbr.cur_del_rate <<= 32; 3451 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3452 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3453 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3454 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3455 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3456 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3457 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3458 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3459 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3460 log.u_bbr.rttProp = us_cts; 3461 log.u_bbr.rttProp <<= 32; 3462 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3463 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3464 &rack->rc_inp->inp_socket->so_rcv, 3465 &rack->rc_inp->inp_socket->so_snd, 3466 BBR_LOG_RTT_SHRINKS, 0, 3467 0, &log, false, &rack->r_ctl.act_rcv_time); 3468 } 3469 } 3470 3471 static void 3472 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3473 { 3474 uint64_t bwdp; 3475 3476 bwdp = rack_get_bw(rack); 3477 bwdp *= (uint64_t)rtt; 3478 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3479 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3480 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3481 /* 3482 * A window protocol must be able to have 4 packets 3483 * outstanding as the floor in order to function 3484 * (especially considering delayed ack :D). 3485 */ 3486 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3487 } 3488 } 3489 3490 static void 3491 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3492 { 3493 /** 3494 * ProbeRTT is a bit different in rack_pacing than in 3495 * BBR. It is like BBR in that it uses the lowering of 3496 * the RTT as a signal that we saw something new and 3497 * counts from there for how long between. But it is 3498 * different in that its quite simple. It does not 3499 * play with the cwnd and wait until we get down 3500 * to N segments outstanding and hold that for 3501 * 200ms. Instead it just sets the pacing reduction 3502 * rate to a set percentage (70 by default) and hold 3503 * that for a number of recent GP Srtt's. 3504 */ 3505 uint32_t segsiz; 3506 3507 if (rack->rc_gp_dyn_mul == 0) 3508 return; 3509 3510 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3511 /* We are idle */ 3512 return; 3513 } 3514 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3515 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3516 /* 3517 * Stop the goodput now, the idea here is 3518 * that future measurements with in_probe_rtt 3519 * won't register if they are not greater so 3520 * we want to get what info (if any) is available 3521 * now. 3522 */ 3523 rack_do_goodput_measurement(rack->rc_tp, rack, 3524 rack->rc_tp->snd_una, __LINE__, 3525 RACK_QUALITY_PROBERTT); 3526 } 3527 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3528 rack->r_ctl.rc_time_probertt_entered = us_cts; 3529 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3530 rack->r_ctl.rc_pace_min_segs); 3531 rack->in_probe_rtt = 1; 3532 rack->measure_saw_probe_rtt = 1; 3533 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3534 rack->r_ctl.rc_time_probertt_starts = 0; 3535 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3536 if (rack_probertt_use_min_rtt_entry) 3537 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3538 else 3539 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3540 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3541 __LINE__, RACK_RTTS_ENTERPROBE); 3542 } 3543 3544 static void 3545 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3546 { 3547 struct rack_sendmap *rsm; 3548 uint32_t segsiz; 3549 3550 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3551 rack->r_ctl.rc_pace_min_segs); 3552 rack->in_probe_rtt = 0; 3553 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3554 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3555 /* 3556 * Stop the goodput now, the idea here is 3557 * that future measurements with in_probe_rtt 3558 * won't register if they are not greater so 3559 * we want to get what info (if any) is available 3560 * now. 3561 */ 3562 rack_do_goodput_measurement(rack->rc_tp, rack, 3563 rack->rc_tp->snd_una, __LINE__, 3564 RACK_QUALITY_PROBERTT); 3565 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3566 /* 3567 * We don't have enough data to make a measurement. 3568 * So lets just stop and start here after exiting 3569 * probe-rtt. We probably are not interested in 3570 * the results anyway. 3571 */ 3572 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3573 } 3574 /* 3575 * Measurements through the current snd_max are going 3576 * to be limited by the slower pacing rate. 3577 * 3578 * We need to mark these as app-limited so we 3579 * don't collapse the b/w. 3580 */ 3581 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3582 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3583 if (rack->r_ctl.rc_app_limited_cnt == 0) 3584 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3585 else { 3586 /* 3587 * Go out to the end app limited and mark 3588 * this new one as next and move the end_appl up 3589 * to this guy. 3590 */ 3591 if (rack->r_ctl.rc_end_appl) 3592 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3593 rack->r_ctl.rc_end_appl = rsm; 3594 } 3595 rsm->r_flags |= RACK_APP_LIMITED; 3596 rack->r_ctl.rc_app_limited_cnt++; 3597 } 3598 /* 3599 * Now, we need to examine our pacing rate multipliers. 3600 * If its under 100%, we need to kick it back up to 3601 * 100%. We also don't let it be over our "max" above 3602 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3603 * Note setting clamp_atexit_prtt to 0 has the effect 3604 * of setting CA/SS to 100% always at exit (which is 3605 * the default behavior). 3606 */ 3607 if (rack_probertt_clear_is) { 3608 rack->rc_gp_incr = 0; 3609 rack->rc_gp_bwred = 0; 3610 rack->rc_gp_timely_inc_cnt = 0; 3611 rack->rc_gp_timely_dec_cnt = 0; 3612 } 3613 /* Do we do any clamping at exit? */ 3614 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3615 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3616 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3617 } 3618 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3619 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3620 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3621 } 3622 /* 3623 * Lets set rtt_diff to 0, so that we will get a "boost" 3624 * after exiting. 3625 */ 3626 rack->r_ctl.rc_rtt_diff = 0; 3627 3628 /* Clear all flags so we start fresh */ 3629 rack->rc_tp->t_bytes_acked = 0; 3630 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3631 /* 3632 * If configured to, set the cwnd and ssthresh to 3633 * our targets. 3634 */ 3635 if (rack_probe_rtt_sets_cwnd) { 3636 uint64_t ebdp; 3637 uint32_t setto; 3638 3639 /* Set ssthresh so we get into CA once we hit our target */ 3640 if (rack_probertt_use_min_rtt_exit == 1) { 3641 /* Set to min rtt */ 3642 rack_set_prtt_target(rack, segsiz, 3643 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3644 } else if (rack_probertt_use_min_rtt_exit == 2) { 3645 /* Set to current gp rtt */ 3646 rack_set_prtt_target(rack, segsiz, 3647 rack->r_ctl.rc_gp_srtt); 3648 } else if (rack_probertt_use_min_rtt_exit == 3) { 3649 /* Set to entry gp rtt */ 3650 rack_set_prtt_target(rack, segsiz, 3651 rack->r_ctl.rc_entry_gp_rtt); 3652 } else { 3653 uint64_t sum; 3654 uint32_t setval; 3655 3656 sum = rack->r_ctl.rc_entry_gp_rtt; 3657 sum *= 10; 3658 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3659 if (sum >= 20) { 3660 /* 3661 * A highly buffered path needs 3662 * cwnd space for timely to work. 3663 * Lets set things up as if 3664 * we are heading back here again. 3665 */ 3666 setval = rack->r_ctl.rc_entry_gp_rtt; 3667 } else if (sum >= 15) { 3668 /* 3669 * Lets take the smaller of the 3670 * two since we are just somewhat 3671 * buffered. 3672 */ 3673 setval = rack->r_ctl.rc_gp_srtt; 3674 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3675 setval = rack->r_ctl.rc_entry_gp_rtt; 3676 } else { 3677 /* 3678 * Here we are not highly buffered 3679 * and should pick the min we can to 3680 * keep from causing loss. 3681 */ 3682 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3683 } 3684 rack_set_prtt_target(rack, segsiz, 3685 setval); 3686 } 3687 if (rack_probe_rtt_sets_cwnd > 1) { 3688 /* There is a percentage here to boost */ 3689 ebdp = rack->r_ctl.rc_target_probertt_flight; 3690 ebdp *= rack_probe_rtt_sets_cwnd; 3691 ebdp /= 100; 3692 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3693 } else 3694 setto = rack->r_ctl.rc_target_probertt_flight; 3695 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3696 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3697 /* Enforce a min */ 3698 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3699 } 3700 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3701 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3702 } 3703 rack_log_rtt_shrinks(rack, us_cts, 3704 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3705 __LINE__, RACK_RTTS_EXITPROBE); 3706 /* Clear times last so log has all the info */ 3707 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3708 rack->r_ctl.rc_time_probertt_entered = us_cts; 3709 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3710 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3711 } 3712 3713 static void 3714 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3715 { 3716 /* Check in on probe-rtt */ 3717 if (rack->rc_gp_filled == 0) { 3718 /* We do not do p-rtt unless we have gp measurements */ 3719 return; 3720 } 3721 if (rack->in_probe_rtt) { 3722 uint64_t no_overflow; 3723 uint32_t endtime, must_stay; 3724 3725 if (rack->r_ctl.rc_went_idle_time && 3726 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3727 /* 3728 * We went idle during prtt, just exit now. 3729 */ 3730 rack_exit_probertt(rack, us_cts); 3731 } else if (rack_probe_rtt_safety_val && 3732 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3733 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3734 /* 3735 * Probe RTT safety value triggered! 3736 */ 3737 rack_log_rtt_shrinks(rack, us_cts, 3738 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3739 __LINE__, RACK_RTTS_SAFETY); 3740 rack_exit_probertt(rack, us_cts); 3741 } 3742 /* Calculate the max we will wait */ 3743 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3744 if (rack->rc_highly_buffered) 3745 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3746 /* Calculate the min we must wait */ 3747 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3748 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3749 TSTMP_LT(us_cts, endtime)) { 3750 uint32_t calc; 3751 /* Do we lower more? */ 3752 no_exit: 3753 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3754 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3755 else 3756 calc = 0; 3757 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3758 if (calc) { 3759 /* Maybe */ 3760 calc *= rack_per_of_gp_probertt_reduce; 3761 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3762 /* Limit it too */ 3763 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3764 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3765 } 3766 /* We must reach target or the time set */ 3767 return; 3768 } 3769 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3770 if ((TSTMP_LT(us_cts, must_stay) && 3771 rack->rc_highly_buffered) || 3772 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3773 rack->r_ctl.rc_target_probertt_flight)) { 3774 /* We are not past the must_stay time */ 3775 goto no_exit; 3776 } 3777 rack_log_rtt_shrinks(rack, us_cts, 3778 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3779 __LINE__, RACK_RTTS_REACHTARGET); 3780 rack->r_ctl.rc_time_probertt_starts = us_cts; 3781 if (rack->r_ctl.rc_time_probertt_starts == 0) 3782 rack->r_ctl.rc_time_probertt_starts = 1; 3783 /* Restore back to our rate we want to pace at in prtt */ 3784 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3785 } 3786 /* 3787 * Setup our end time, some number of gp_srtts plus 200ms. 3788 */ 3789 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3790 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3791 if (rack_probertt_gpsrtt_cnt_div) 3792 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3793 else 3794 endtime = 0; 3795 endtime += rack_min_probertt_hold; 3796 endtime += rack->r_ctl.rc_time_probertt_starts; 3797 if (TSTMP_GEQ(us_cts, endtime)) { 3798 /* yes, exit probertt */ 3799 rack_exit_probertt(rack, us_cts); 3800 } 3801 3802 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3803 /* Go into probertt, its been too long since we went lower */ 3804 rack_enter_probertt(rack, us_cts); 3805 } 3806 } 3807 3808 static void 3809 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3810 uint32_t rtt, int32_t rtt_diff) 3811 { 3812 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3813 uint32_t losses; 3814 3815 if ((rack->rc_gp_dyn_mul == 0) || 3816 (rack->use_fixed_rate) || 3817 (rack->in_probe_rtt) || 3818 (rack->rc_always_pace == 0)) { 3819 /* No dynamic GP multipler in play */ 3820 return; 3821 } 3822 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3823 cur_bw = rack_get_bw(rack); 3824 /* Calculate our up and down range */ 3825 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3826 up_bnd /= 100; 3827 up_bnd += rack->r_ctl.last_gp_comp_bw; 3828 3829 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3830 subfr /= 100; 3831 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3832 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3833 /* 3834 * This is the case where our RTT is above 3835 * the max target and we have been configured 3836 * to just do timely no bonus up stuff in that case. 3837 * 3838 * There are two configurations, set to 1, and we 3839 * just do timely if we are over our max. If its 3840 * set above 1 then we slam the multipliers down 3841 * to 100 and then decrement per timely. 3842 */ 3843 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3844 __LINE__, 3); 3845 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3846 rack_validate_multipliers_at_or_below_100(rack); 3847 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3848 } else if ((last_bw_est < low_bnd) && !losses) { 3849 /* 3850 * We are decreasing this is a bit complicated this 3851 * means we are loosing ground. This could be 3852 * because another flow entered and we are competing 3853 * for b/w with it. This will push the RTT up which 3854 * makes timely unusable unless we want to get shoved 3855 * into a corner and just be backed off (the age 3856 * old problem with delay based CC). 3857 * 3858 * On the other hand if it was a route change we 3859 * would like to stay somewhat contained and not 3860 * blow out the buffers. 3861 */ 3862 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3863 __LINE__, 3); 3864 rack->r_ctl.last_gp_comp_bw = cur_bw; 3865 if (rack->rc_gp_bwred == 0) { 3866 /* Go into reduction counting */ 3867 rack->rc_gp_bwred = 1; 3868 rack->rc_gp_timely_dec_cnt = 0; 3869 } 3870 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3871 (timely_says == 0)) { 3872 /* 3873 * Push another time with a faster pacing 3874 * to try to gain back (we include override to 3875 * get a full raise factor). 3876 */ 3877 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3878 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3879 (timely_says == 0) || 3880 (rack_down_raise_thresh == 0)) { 3881 /* 3882 * Do an override up in b/w if we were 3883 * below the threshold or if the threshold 3884 * is zero we always do the raise. 3885 */ 3886 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3887 } else { 3888 /* Log it stays the same */ 3889 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3890 __LINE__, 11); 3891 } 3892 rack->rc_gp_timely_dec_cnt++; 3893 /* We are not incrementing really no-count */ 3894 rack->rc_gp_incr = 0; 3895 rack->rc_gp_timely_inc_cnt = 0; 3896 } else { 3897 /* 3898 * Lets just use the RTT 3899 * information and give up 3900 * pushing. 3901 */ 3902 goto use_timely; 3903 } 3904 } else if ((timely_says != 2) && 3905 !losses && 3906 (last_bw_est > up_bnd)) { 3907 /* 3908 * We are increasing b/w lets keep going, updating 3909 * our b/w and ignoring any timely input, unless 3910 * of course we are at our max raise (if there is one). 3911 */ 3912 3913 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3914 __LINE__, 3); 3915 rack->r_ctl.last_gp_comp_bw = cur_bw; 3916 if (rack->rc_gp_saw_ss && 3917 rack_per_upper_bound_ss && 3918 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3919 /* 3920 * In cases where we can't go higher 3921 * we should just use timely. 3922 */ 3923 goto use_timely; 3924 } 3925 if (rack->rc_gp_saw_ca && 3926 rack_per_upper_bound_ca && 3927 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 3928 /* 3929 * In cases where we can't go higher 3930 * we should just use timely. 3931 */ 3932 goto use_timely; 3933 } 3934 rack->rc_gp_bwred = 0; 3935 rack->rc_gp_timely_dec_cnt = 0; 3936 /* You get a set number of pushes if timely is trying to reduce */ 3937 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 3938 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3939 } else { 3940 /* Log it stays the same */ 3941 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 3942 __LINE__, 12); 3943 } 3944 return; 3945 } else { 3946 /* 3947 * We are staying between the lower and upper range bounds 3948 * so use timely to decide. 3949 */ 3950 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3951 __LINE__, 3); 3952 use_timely: 3953 if (timely_says) { 3954 rack->rc_gp_incr = 0; 3955 rack->rc_gp_timely_inc_cnt = 0; 3956 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 3957 !losses && 3958 (last_bw_est < low_bnd)) { 3959 /* We are loosing ground */ 3960 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3961 rack->rc_gp_timely_dec_cnt++; 3962 /* We are not incrementing really no-count */ 3963 rack->rc_gp_incr = 0; 3964 rack->rc_gp_timely_inc_cnt = 0; 3965 } else 3966 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3967 } else { 3968 rack->rc_gp_bwred = 0; 3969 rack->rc_gp_timely_dec_cnt = 0; 3970 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3971 } 3972 } 3973 } 3974 3975 static int32_t 3976 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 3977 { 3978 int32_t timely_says; 3979 uint64_t log_mult, log_rtt_a_diff; 3980 3981 log_rtt_a_diff = rtt; 3982 log_rtt_a_diff <<= 32; 3983 log_rtt_a_diff |= (uint32_t)rtt_diff; 3984 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 3985 rack_gp_rtt_maxmul)) { 3986 /* Reduce the b/w multipler */ 3987 timely_says = 2; 3988 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3989 log_mult <<= 32; 3990 log_mult |= prev_rtt; 3991 rack_log_timely(rack, timely_says, log_mult, 3992 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3993 log_rtt_a_diff, __LINE__, 4); 3994 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 3995 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 3996 max(rack_gp_rtt_mindiv , 1)))) { 3997 /* Increase the b/w multipler */ 3998 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 3999 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4000 max(rack_gp_rtt_mindiv , 1)); 4001 log_mult <<= 32; 4002 log_mult |= prev_rtt; 4003 timely_says = 0; 4004 rack_log_timely(rack, timely_says, log_mult , 4005 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4006 log_rtt_a_diff, __LINE__, 5); 4007 } else { 4008 /* 4009 * Use a gradient to find it the timely gradient 4010 * is: 4011 * grad = rc_rtt_diff / min_rtt; 4012 * 4013 * anything below or equal to 0 will be 4014 * a increase indication. Anything above 4015 * zero is a decrease. Note we take care 4016 * of the actual gradient calculation 4017 * in the reduction (its not needed for 4018 * increase). 4019 */ 4020 log_mult = prev_rtt; 4021 if (rtt_diff <= 0) { 4022 /* 4023 * Rttdiff is less than zero, increase the 4024 * b/w multipler (its 0 or negative) 4025 */ 4026 timely_says = 0; 4027 rack_log_timely(rack, timely_says, log_mult, 4028 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4029 } else { 4030 /* Reduce the b/w multipler */ 4031 timely_says = 1; 4032 rack_log_timely(rack, timely_says, log_mult, 4033 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4034 } 4035 } 4036 return (timely_says); 4037 } 4038 4039 static void 4040 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4041 tcp_seq th_ack, int line, uint8_t quality) 4042 { 4043 uint64_t tim, bytes_ps, ltim, stim, utim; 4044 uint32_t segsiz, bytes, reqbytes, us_cts; 4045 int32_t gput, new_rtt_diff, timely_says; 4046 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4047 int did_add = 0; 4048 4049 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4050 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4051 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4052 tim = us_cts - tp->gput_ts; 4053 else 4054 tim = 0; 4055 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4056 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4057 else 4058 stim = 0; 4059 /* 4060 * Use the larger of the send time or ack time. This prevents us 4061 * from being influenced by ack artifacts to come up with too 4062 * high of measurement. Note that since we are spanning over many more 4063 * bytes in most of our measurements hopefully that is less likely to 4064 * occur. 4065 */ 4066 if (tim > stim) 4067 utim = max(tim, 1); 4068 else 4069 utim = max(stim, 1); 4070 /* Lets get a msec time ltim too for the old stuff */ 4071 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4072 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4073 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4074 if ((tim == 0) && (stim == 0)) { 4075 /* 4076 * Invalid measurement time, maybe 4077 * all on one ack/one send? 4078 */ 4079 bytes = 0; 4080 bytes_ps = 0; 4081 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4082 0, 0, 0, 10, __LINE__, NULL, quality); 4083 goto skip_measurement; 4084 } 4085 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4086 /* We never made a us_rtt measurement? */ 4087 bytes = 0; 4088 bytes_ps = 0; 4089 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4090 0, 0, 0, 10, __LINE__, NULL, quality); 4091 goto skip_measurement; 4092 } 4093 /* 4094 * Calculate the maximum possible b/w this connection 4095 * could have. We base our calculation on the lowest 4096 * rtt we have seen during the measurement and the 4097 * largest rwnd the client has given us in that time. This 4098 * forms a BDP that is the maximum that we could ever 4099 * get to the client. Anything larger is not valid. 4100 * 4101 * I originally had code here that rejected measurements 4102 * where the time was less than 1/2 the latest us_rtt. 4103 * But after thinking on that I realized its wrong since 4104 * say you had a 150Mbps or even 1Gbps link, and you 4105 * were a long way away.. example I am in Europe (100ms rtt) 4106 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4107 * bytes my time would be 1.2ms, and yet my rtt would say 4108 * the measurement was invalid the time was < 50ms. The 4109 * same thing is true for 150Mb (8ms of time). 4110 * 4111 * A better way I realized is to look at what the maximum 4112 * the connection could possibly do. This is gated on 4113 * the lowest RTT we have seen and the highest rwnd. 4114 * We should in theory never exceed that, if we are 4115 * then something on the path is storing up packets 4116 * and then feeding them all at once to our endpoint 4117 * messing up our measurement. 4118 */ 4119 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4120 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4121 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4122 if (SEQ_LT(th_ack, tp->gput_seq)) { 4123 /* No measurement can be made */ 4124 bytes = 0; 4125 bytes_ps = 0; 4126 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4127 0, 0, 0, 10, __LINE__, NULL, quality); 4128 goto skip_measurement; 4129 } else 4130 bytes = (th_ack - tp->gput_seq); 4131 bytes_ps = (uint64_t)bytes; 4132 /* 4133 * Don't measure a b/w for pacing unless we have gotten at least 4134 * an initial windows worth of data in this measurement interval. 4135 * 4136 * Small numbers of bytes get badly influenced by delayed ack and 4137 * other artifacts. Note we take the initial window or our 4138 * defined minimum GP (defaulting to 10 which hopefully is the 4139 * IW). 4140 */ 4141 if (rack->rc_gp_filled == 0) { 4142 /* 4143 * The initial estimate is special. We 4144 * have blasted out an IW worth of packets 4145 * without a real valid ack ts results. We 4146 * then setup the app_limited_needs_set flag, 4147 * this should get the first ack in (probably 2 4148 * MSS worth) to be recorded as the timestamp. 4149 * We thus allow a smaller number of bytes i.e. 4150 * IW - 2MSS. 4151 */ 4152 reqbytes -= (2 * segsiz); 4153 /* Also lets fill previous for our first measurement to be neutral */ 4154 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4155 } 4156 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4157 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4158 rack->r_ctl.rc_app_limited_cnt, 4159 0, 0, 10, __LINE__, NULL, quality); 4160 goto skip_measurement; 4161 } 4162 /* 4163 * We now need to calculate the Timely like status so 4164 * we can update (possibly) the b/w multipliers. 4165 */ 4166 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4167 if (rack->rc_gp_filled == 0) { 4168 /* No previous reading */ 4169 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4170 } else { 4171 if (rack->measure_saw_probe_rtt == 0) { 4172 /* 4173 * We don't want a probertt to be counted 4174 * since it will be negative incorrectly. We 4175 * expect to be reducing the RTT when we 4176 * pace at a slower rate. 4177 */ 4178 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4179 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4180 } 4181 } 4182 timely_says = rack_make_timely_judgement(rack, 4183 rack->r_ctl.rc_gp_srtt, 4184 rack->r_ctl.rc_rtt_diff, 4185 rack->r_ctl.rc_prev_gp_srtt 4186 ); 4187 bytes_ps *= HPTS_USEC_IN_SEC; 4188 bytes_ps /= utim; 4189 if (bytes_ps > rack->r_ctl.last_max_bw) { 4190 /* 4191 * Something is on path playing 4192 * since this b/w is not possible based 4193 * on our BDP (highest rwnd and lowest rtt 4194 * we saw in the measurement window). 4195 * 4196 * Another option here would be to 4197 * instead skip the measurement. 4198 */ 4199 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4200 bytes_ps, rack->r_ctl.last_max_bw, 0, 4201 11, __LINE__, NULL, quality); 4202 bytes_ps = rack->r_ctl.last_max_bw; 4203 } 4204 /* We store gp for b/w in bytes per second */ 4205 if (rack->rc_gp_filled == 0) { 4206 /* Initial measurement */ 4207 if (bytes_ps) { 4208 rack->r_ctl.gp_bw = bytes_ps; 4209 rack->rc_gp_filled = 1; 4210 rack->r_ctl.num_measurements = 1; 4211 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4212 } else { 4213 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4214 rack->r_ctl.rc_app_limited_cnt, 4215 0, 0, 10, __LINE__, NULL, quality); 4216 } 4217 if (tcp_in_hpts(rack->rc_inp) && 4218 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4219 /* 4220 * Ok we can't trust the pacer in this case 4221 * where we transition from un-paced to paced. 4222 * Or for that matter when the burst mitigation 4223 * was making a wild guess and got it wrong. 4224 * Stop the pacer and clear up all the aggregate 4225 * delays etc. 4226 */ 4227 tcp_hpts_remove(rack->rc_inp); 4228 rack->r_ctl.rc_hpts_flags = 0; 4229 rack->r_ctl.rc_last_output_to = 0; 4230 } 4231 did_add = 2; 4232 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4233 /* Still a small number run an average */ 4234 rack->r_ctl.gp_bw += bytes_ps; 4235 addpart = rack->r_ctl.num_measurements; 4236 rack->r_ctl.num_measurements++; 4237 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4238 /* We have collected enought to move forward */ 4239 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4240 } 4241 did_add = 3; 4242 } else { 4243 /* 4244 * We want to take 1/wma of the goodput and add in to 7/8th 4245 * of the old value weighted by the srtt. So if your measurement 4246 * period is say 2 SRTT's long you would get 1/4 as the 4247 * value, if it was like 1/2 SRTT then you would get 1/16th. 4248 * 4249 * But we must be careful not to take too much i.e. if the 4250 * srtt is say 20ms and the measurement is taken over 4251 * 400ms our weight would be 400/20 i.e. 20. On the 4252 * other hand if we get a measurement over 1ms with a 4253 * 10ms rtt we only want to take a much smaller portion. 4254 */ 4255 if (rack->r_ctl.num_measurements < 0xff) { 4256 rack->r_ctl.num_measurements++; 4257 } 4258 srtt = (uint64_t)tp->t_srtt; 4259 if (srtt == 0) { 4260 /* 4261 * Strange why did t_srtt go back to zero? 4262 */ 4263 if (rack->r_ctl.rc_rack_min_rtt) 4264 srtt = rack->r_ctl.rc_rack_min_rtt; 4265 else 4266 srtt = HPTS_USEC_IN_MSEC; 4267 } 4268 /* 4269 * XXXrrs: Note for reviewers, in playing with 4270 * dynamic pacing I discovered this GP calculation 4271 * as done originally leads to some undesired results. 4272 * Basically you can get longer measurements contributing 4273 * too much to the WMA. Thus I changed it if you are doing 4274 * dynamic adjustments to only do the aportioned adjustment 4275 * if we have a very small (time wise) measurement. Longer 4276 * measurements just get there weight (defaulting to 1/8) 4277 * add to the WMA. We may want to think about changing 4278 * this to always do that for both sides i.e. dynamic 4279 * and non-dynamic... but considering lots of folks 4280 * were playing with this I did not want to change the 4281 * calculation per.se. without your thoughts.. Lawerence? 4282 * Peter?? 4283 */ 4284 if (rack->rc_gp_dyn_mul == 0) { 4285 subpart = rack->r_ctl.gp_bw * utim; 4286 subpart /= (srtt * 8); 4287 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4288 /* 4289 * The b/w update takes no more 4290 * away then 1/2 our running total 4291 * so factor it in. 4292 */ 4293 addpart = bytes_ps * utim; 4294 addpart /= (srtt * 8); 4295 } else { 4296 /* 4297 * Don't allow a single measurement 4298 * to account for more than 1/2 of the 4299 * WMA. This could happen on a retransmission 4300 * where utim becomes huge compared to 4301 * srtt (multiple retransmissions when using 4302 * the sending rate which factors in all the 4303 * transmissions from the first one). 4304 */ 4305 subpart = rack->r_ctl.gp_bw / 2; 4306 addpart = bytes_ps / 2; 4307 } 4308 resid_bw = rack->r_ctl.gp_bw - subpart; 4309 rack->r_ctl.gp_bw = resid_bw + addpart; 4310 did_add = 1; 4311 } else { 4312 if ((utim / srtt) <= 1) { 4313 /* 4314 * The b/w update was over a small period 4315 * of time. The idea here is to prevent a small 4316 * measurement time period from counting 4317 * too much. So we scale it based on the 4318 * time so it attributes less than 1/rack_wma_divisor 4319 * of its measurement. 4320 */ 4321 subpart = rack->r_ctl.gp_bw * utim; 4322 subpart /= (srtt * rack_wma_divisor); 4323 addpart = bytes_ps * utim; 4324 addpart /= (srtt * rack_wma_divisor); 4325 } else { 4326 /* 4327 * The scaled measurement was long 4328 * enough so lets just add in the 4329 * portion of the measurement i.e. 1/rack_wma_divisor 4330 */ 4331 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4332 addpart = bytes_ps / rack_wma_divisor; 4333 } 4334 if ((rack->measure_saw_probe_rtt == 0) || 4335 (bytes_ps > rack->r_ctl.gp_bw)) { 4336 /* 4337 * For probe-rtt we only add it in 4338 * if its larger, all others we just 4339 * add in. 4340 */ 4341 did_add = 1; 4342 resid_bw = rack->r_ctl.gp_bw - subpart; 4343 rack->r_ctl.gp_bw = resid_bw + addpart; 4344 } 4345 } 4346 } 4347 if ((rack->gp_ready == 0) && 4348 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4349 /* We have enough measurements now */ 4350 rack->gp_ready = 1; 4351 rack_set_cc_pacing(rack); 4352 if (rack->defer_options) 4353 rack_apply_deferred_options(rack); 4354 } 4355 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4356 rack_get_bw(rack), 22, did_add, NULL, quality); 4357 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4358 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4359 rack_update_multiplier(rack, timely_says, bytes_ps, 4360 rack->r_ctl.rc_gp_srtt, 4361 rack->r_ctl.rc_rtt_diff); 4362 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4363 rack_get_bw(rack), 3, line, NULL, quality); 4364 /* reset the gp srtt and setup the new prev */ 4365 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4366 /* Record the lost count for the next measurement */ 4367 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4368 /* 4369 * We restart our diffs based on the gpsrtt in the 4370 * measurement window. 4371 */ 4372 rack->rc_gp_rtt_set = 0; 4373 rack->rc_gp_saw_rec = 0; 4374 rack->rc_gp_saw_ca = 0; 4375 rack->rc_gp_saw_ss = 0; 4376 rack->rc_dragged_bottom = 0; 4377 skip_measurement: 4378 4379 #ifdef STATS 4380 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4381 gput); 4382 /* 4383 * XXXLAS: This is a temporary hack, and should be 4384 * chained off VOI_TCP_GPUT when stats(9) grows an 4385 * API to deal with chained VOIs. 4386 */ 4387 if (tp->t_stats_gput_prev > 0) 4388 stats_voi_update_abs_s32(tp->t_stats, 4389 VOI_TCP_GPUT_ND, 4390 ((gput - tp->t_stats_gput_prev) * 100) / 4391 tp->t_stats_gput_prev); 4392 #endif 4393 tp->t_flags &= ~TF_GPUTINPROG; 4394 tp->t_stats_gput_prev = gput; 4395 /* 4396 * Now are we app limited now and there is space from where we 4397 * were to where we want to go? 4398 * 4399 * We don't do the other case i.e. non-applimited here since 4400 * the next send will trigger us picking up the missing data. 4401 */ 4402 if (rack->r_ctl.rc_first_appl && 4403 TCPS_HAVEESTABLISHED(tp->t_state) && 4404 rack->r_ctl.rc_app_limited_cnt && 4405 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4406 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4407 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4408 /* 4409 * Yep there is enough outstanding to make a measurement here. 4410 */ 4411 struct rack_sendmap *rsm, fe; 4412 4413 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4414 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4415 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4416 rack->app_limited_needs_set = 0; 4417 tp->gput_seq = th_ack; 4418 if (rack->in_probe_rtt) 4419 rack->measure_saw_probe_rtt = 1; 4420 else if ((rack->measure_saw_probe_rtt) && 4421 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4422 rack->measure_saw_probe_rtt = 0; 4423 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4424 /* There is a full window to gain info from */ 4425 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4426 } else { 4427 /* We can only measure up to the applimited point */ 4428 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4429 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4430 /* 4431 * We don't have enough to make a measurement. 4432 */ 4433 tp->t_flags &= ~TF_GPUTINPROG; 4434 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4435 0, 0, 0, 6, __LINE__, NULL, quality); 4436 return; 4437 } 4438 } 4439 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4440 /* 4441 * We will get no more data into the SB 4442 * this means we need to have the data available 4443 * before we start a measurement. 4444 */ 4445 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4446 /* Nope not enough data. */ 4447 return; 4448 } 4449 } 4450 tp->t_flags |= TF_GPUTINPROG; 4451 /* 4452 * Now we need to find the timestamp of the send at tp->gput_seq 4453 * for the send based measurement. 4454 */ 4455 fe.r_start = tp->gput_seq; 4456 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4457 if (rsm) { 4458 /* Ok send-based limit is set */ 4459 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4460 /* 4461 * Move back to include the earlier part 4462 * so our ack time lines up right (this may 4463 * make an overlapping measurement but thats 4464 * ok). 4465 */ 4466 tp->gput_seq = rsm->r_start; 4467 } 4468 if (rsm->r_flags & RACK_ACKED) 4469 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4470 else 4471 rack->app_limited_needs_set = 1; 4472 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4473 } else { 4474 /* 4475 * If we don't find the rsm due to some 4476 * send-limit set the current time, which 4477 * basically disables the send-limit. 4478 */ 4479 struct timeval tv; 4480 4481 microuptime(&tv); 4482 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4483 } 4484 rack_log_pacing_delay_calc(rack, 4485 tp->gput_seq, 4486 tp->gput_ack, 4487 (uint64_t)rsm, 4488 tp->gput_ts, 4489 rack->r_ctl.rc_app_limited_cnt, 4490 9, 4491 __LINE__, NULL, quality); 4492 } 4493 } 4494 4495 /* 4496 * CC wrapper hook functions 4497 */ 4498 static void 4499 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4500 uint16_t type, int32_t recovery) 4501 { 4502 uint32_t prior_cwnd, acked; 4503 struct tcp_log_buffer *lgb = NULL; 4504 uint8_t labc_to_use, quality; 4505 4506 INP_WLOCK_ASSERT(tp->t_inpcb); 4507 tp->ccv->nsegs = nsegs; 4508 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4509 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4510 uint32_t max; 4511 4512 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4513 if (tp->ccv->bytes_this_ack > max) { 4514 tp->ccv->bytes_this_ack = max; 4515 } 4516 } 4517 #ifdef STATS 4518 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4519 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4520 #endif 4521 quality = RACK_QUALITY_NONE; 4522 if ((tp->t_flags & TF_GPUTINPROG) && 4523 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4524 /* Measure the Goodput */ 4525 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4526 #ifdef NETFLIX_PEAKRATE 4527 if ((type == CC_ACK) && 4528 (tp->t_maxpeakrate)) { 4529 /* 4530 * We update t_peakrate_thr. This gives us roughly 4531 * one update per round trip time. Note 4532 * it will only be used if pace_always is off i.e 4533 * we don't do this for paced flows. 4534 */ 4535 rack_update_peakrate_thr(tp); 4536 } 4537 #endif 4538 } 4539 /* Which way our we limited, if not cwnd limited no advance in CA */ 4540 if (tp->snd_cwnd <= tp->snd_wnd) 4541 tp->ccv->flags |= CCF_CWND_LIMITED; 4542 else 4543 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4544 if (tp->snd_cwnd > tp->snd_ssthresh) { 4545 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4546 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4547 /* For the setting of a window past use the actual scwnd we are using */ 4548 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4549 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4550 tp->ccv->flags |= CCF_ABC_SENTAWND; 4551 } 4552 } else { 4553 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4554 tp->t_bytes_acked = 0; 4555 } 4556 prior_cwnd = tp->snd_cwnd; 4557 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4558 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4559 labc_to_use = rack->rc_labc; 4560 else 4561 labc_to_use = rack_max_abc_post_recovery; 4562 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4563 union tcp_log_stackspecific log; 4564 struct timeval tv; 4565 4566 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4567 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4568 log.u_bbr.flex1 = th_ack; 4569 log.u_bbr.flex2 = tp->ccv->flags; 4570 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4571 log.u_bbr.flex4 = tp->ccv->nsegs; 4572 log.u_bbr.flex5 = labc_to_use; 4573 log.u_bbr.flex6 = prior_cwnd; 4574 log.u_bbr.flex7 = V_tcp_do_newsack; 4575 log.u_bbr.flex8 = 1; 4576 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4577 0, &log, false, NULL, NULL, 0, &tv); 4578 } 4579 if (CC_ALGO(tp)->ack_received != NULL) { 4580 /* XXXLAS: Find a way to live without this */ 4581 tp->ccv->curack = th_ack; 4582 tp->ccv->labc = labc_to_use; 4583 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4584 CC_ALGO(tp)->ack_received(tp->ccv, type); 4585 } 4586 if (lgb) { 4587 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4588 } 4589 if (rack->r_must_retran) { 4590 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4591 /* 4592 * We now are beyond the rxt point so lets disable 4593 * the flag. 4594 */ 4595 rack->r_ctl.rc_out_at_rto = 0; 4596 rack->r_must_retran = 0; 4597 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4598 /* 4599 * Only decrement the rc_out_at_rto if the cwnd advances 4600 * at least a whole segment. Otherwise next time the peer 4601 * acks, we won't be able to send this generaly happens 4602 * when we are in Congestion Avoidance. 4603 */ 4604 if (acked <= rack->r_ctl.rc_out_at_rto){ 4605 rack->r_ctl.rc_out_at_rto -= acked; 4606 } else { 4607 rack->r_ctl.rc_out_at_rto = 0; 4608 } 4609 } 4610 } 4611 #ifdef STATS 4612 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4613 #endif 4614 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4615 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4616 } 4617 #ifdef NETFLIX_PEAKRATE 4618 /* we enforce max peak rate if it is set and we are not pacing */ 4619 if ((rack->rc_always_pace == 0) && 4620 tp->t_peakrate_thr && 4621 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4622 tp->snd_cwnd = tp->t_peakrate_thr; 4623 } 4624 #endif 4625 } 4626 4627 static void 4628 tcp_rack_partialack(struct tcpcb *tp) 4629 { 4630 struct tcp_rack *rack; 4631 4632 rack = (struct tcp_rack *)tp->t_fb_ptr; 4633 INP_WLOCK_ASSERT(tp->t_inpcb); 4634 /* 4635 * If we are doing PRR and have enough 4636 * room to send <or> we are pacing and prr 4637 * is disabled we will want to see if we 4638 * can send data (by setting r_wanted_output to 4639 * true). 4640 */ 4641 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4642 rack->rack_no_prr) 4643 rack->r_wanted_output = 1; 4644 } 4645 4646 static void 4647 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4648 { 4649 struct tcp_rack *rack; 4650 uint32_t orig_cwnd; 4651 4652 orig_cwnd = tp->snd_cwnd; 4653 INP_WLOCK_ASSERT(tp->t_inpcb); 4654 rack = (struct tcp_rack *)tp->t_fb_ptr; 4655 /* only alert CC if we alerted when we entered */ 4656 if (CC_ALGO(tp)->post_recovery != NULL) { 4657 tp->ccv->curack = th_ack; 4658 CC_ALGO(tp)->post_recovery(tp->ccv); 4659 if (tp->snd_cwnd < tp->snd_ssthresh) { 4660 /* 4661 * Rack has burst control and pacing 4662 * so lets not set this any lower than 4663 * snd_ssthresh per RFC-6582 (option 2). 4664 */ 4665 tp->snd_cwnd = tp->snd_ssthresh; 4666 } 4667 } 4668 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4669 union tcp_log_stackspecific log; 4670 struct timeval tv; 4671 4672 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4673 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4674 log.u_bbr.flex1 = th_ack; 4675 log.u_bbr.flex2 = tp->ccv->flags; 4676 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4677 log.u_bbr.flex4 = tp->ccv->nsegs; 4678 log.u_bbr.flex5 = V_tcp_abc_l_var; 4679 log.u_bbr.flex6 = orig_cwnd; 4680 log.u_bbr.flex7 = V_tcp_do_newsack; 4681 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4682 log.u_bbr.flex8 = 2; 4683 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4684 0, &log, false, NULL, NULL, 0, &tv); 4685 } 4686 if ((rack->rack_no_prr == 0) && 4687 (rack->no_prr_addback == 0) && 4688 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4689 /* 4690 * Suck the next prr cnt back into cwnd, but 4691 * only do that if we are not application limited. 4692 */ 4693 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4694 /* 4695 * We are allowed to add back to the cwnd the amount we did 4696 * not get out if: 4697 * a) no_prr_addback is off. 4698 * b) we are not app limited 4699 * c) we are doing prr 4700 * <and> 4701 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4702 */ 4703 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4704 rack->r_ctl.rc_prr_sndcnt); 4705 } 4706 rack->r_ctl.rc_prr_sndcnt = 0; 4707 rack_log_to_prr(rack, 1, 0, __LINE__); 4708 } 4709 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 4710 tp->snd_recover = tp->snd_una; 4711 if (rack->r_ctl.dsack_persist) { 4712 rack->r_ctl.dsack_persist--; 4713 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4714 rack->r_ctl.num_dsack = 0; 4715 } 4716 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4717 } 4718 EXIT_RECOVERY(tp->t_flags); 4719 } 4720 4721 static void 4722 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 4723 { 4724 struct tcp_rack *rack; 4725 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4726 4727 INP_WLOCK_ASSERT(tp->t_inpcb); 4728 #ifdef STATS 4729 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4730 #endif 4731 if (IN_RECOVERY(tp->t_flags) == 0) { 4732 in_rec_at_entry = 0; 4733 ssthresh_enter = tp->snd_ssthresh; 4734 cwnd_enter = tp->snd_cwnd; 4735 } else 4736 in_rec_at_entry = 1; 4737 rack = (struct tcp_rack *)tp->t_fb_ptr; 4738 switch (type) { 4739 case CC_NDUPACK: 4740 tp->t_flags &= ~TF_WASFRECOVERY; 4741 tp->t_flags &= ~TF_WASCRECOVERY; 4742 if (!IN_FASTRECOVERY(tp->t_flags)) { 4743 rack->r_ctl.rc_prr_delivered = 0; 4744 rack->r_ctl.rc_prr_out = 0; 4745 if (rack->rack_no_prr == 0) { 4746 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4747 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 4748 } 4749 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4750 tp->snd_recover = tp->snd_max; 4751 if (tp->t_flags2 & TF2_ECN_PERMIT) 4752 tp->t_flags2 |= TF2_ECN_SND_CWR; 4753 } 4754 break; 4755 case CC_ECN: 4756 if (!IN_CONGRECOVERY(tp->t_flags) || 4757 /* 4758 * Allow ECN reaction on ACK to CWR, if 4759 * that data segment was also CE marked. 4760 */ 4761 SEQ_GEQ(ack, tp->snd_recover)) { 4762 EXIT_CONGRECOVERY(tp->t_flags); 4763 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4764 tp->snd_recover = tp->snd_max + 1; 4765 if (tp->t_flags2 & TF2_ECN_PERMIT) 4766 tp->t_flags2 |= TF2_ECN_SND_CWR; 4767 } 4768 break; 4769 case CC_RTO: 4770 tp->t_dupacks = 0; 4771 tp->t_bytes_acked = 0; 4772 EXIT_RECOVERY(tp->t_flags); 4773 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4774 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4775 orig_cwnd = tp->snd_cwnd; 4776 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4777 rack_log_to_prr(rack, 16, orig_cwnd, line); 4778 if (tp->t_flags2 & TF2_ECN_PERMIT) 4779 tp->t_flags2 |= TF2_ECN_SND_CWR; 4780 break; 4781 case CC_RTO_ERR: 4782 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4783 /* RTO was unnecessary, so reset everything. */ 4784 tp->snd_cwnd = tp->snd_cwnd_prev; 4785 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4786 tp->snd_recover = tp->snd_recover_prev; 4787 if (tp->t_flags & TF_WASFRECOVERY) { 4788 ENTER_FASTRECOVERY(tp->t_flags); 4789 tp->t_flags &= ~TF_WASFRECOVERY; 4790 } 4791 if (tp->t_flags & TF_WASCRECOVERY) { 4792 ENTER_CONGRECOVERY(tp->t_flags); 4793 tp->t_flags &= ~TF_WASCRECOVERY; 4794 } 4795 tp->snd_nxt = tp->snd_max; 4796 tp->t_badrxtwin = 0; 4797 break; 4798 } 4799 if ((CC_ALGO(tp)->cong_signal != NULL) && 4800 (type != CC_RTO)){ 4801 tp->ccv->curack = ack; 4802 CC_ALGO(tp)->cong_signal(tp->ccv, type); 4803 } 4804 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4805 rack_log_to_prr(rack, 15, cwnd_enter, line); 4806 rack->r_ctl.dsack_byte_cnt = 0; 4807 rack->r_ctl.retran_during_recovery = 0; 4808 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4809 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4810 rack->r_ent_rec_ns = 1; 4811 } 4812 } 4813 4814 static inline void 4815 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4816 { 4817 uint32_t i_cwnd; 4818 4819 INP_WLOCK_ASSERT(tp->t_inpcb); 4820 4821 #ifdef NETFLIX_STATS 4822 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4823 if (tp->t_state == TCPS_ESTABLISHED) 4824 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4825 #endif 4826 if (CC_ALGO(tp)->after_idle != NULL) 4827 CC_ALGO(tp)->after_idle(tp->ccv); 4828 4829 if (tp->snd_cwnd == 1) 4830 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4831 else 4832 i_cwnd = rc_init_window(rack); 4833 4834 /* 4835 * Being idle is no different than the initial window. If the cc 4836 * clamps it down below the initial window raise it to the initial 4837 * window. 4838 */ 4839 if (tp->snd_cwnd < i_cwnd) { 4840 tp->snd_cwnd = i_cwnd; 4841 } 4842 } 4843 4844 /* 4845 * Indicate whether this ack should be delayed. We can delay the ack if 4846 * following conditions are met: 4847 * - There is no delayed ack timer in progress. 4848 * - Our last ack wasn't a 0-sized window. We never want to delay 4849 * the ack that opens up a 0-sized window. 4850 * - LRO wasn't used for this segment. We make sure by checking that the 4851 * segment size is not larger than the MSS. 4852 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4853 * connection. 4854 */ 4855 #define DELAY_ACK(tp, tlen) \ 4856 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4857 ((tp->t_flags & TF_DELACK) == 0) && \ 4858 (tlen <= tp->t_maxseg) && \ 4859 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4860 4861 static struct rack_sendmap * 4862 rack_find_lowest_rsm(struct tcp_rack *rack) 4863 { 4864 struct rack_sendmap *rsm; 4865 4866 /* 4867 * Walk the time-order transmitted list looking for an rsm that is 4868 * not acked. This will be the one that was sent the longest time 4869 * ago that is still outstanding. 4870 */ 4871 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4872 if (rsm->r_flags & RACK_ACKED) { 4873 continue; 4874 } 4875 goto finish; 4876 } 4877 finish: 4878 return (rsm); 4879 } 4880 4881 static struct rack_sendmap * 4882 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4883 { 4884 struct rack_sendmap *prsm; 4885 4886 /* 4887 * Walk the sequence order list backward until we hit and arrive at 4888 * the highest seq not acked. In theory when this is called it 4889 * should be the last segment (which it was not). 4890 */ 4891 prsm = rsm; 4892 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4893 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4894 continue; 4895 } 4896 return (prsm); 4897 } 4898 return (NULL); 4899 } 4900 4901 static uint32_t 4902 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4903 { 4904 int32_t lro; 4905 uint32_t thresh; 4906 4907 /* 4908 * lro is the flag we use to determine if we have seen reordering. 4909 * If it gets set we have seen reordering. The reorder logic either 4910 * works in one of two ways: 4911 * 4912 * If reorder-fade is configured, then we track the last time we saw 4913 * re-ordering occur. If we reach the point where enough time as 4914 * passed we no longer consider reordering has occuring. 4915 * 4916 * Or if reorder-face is 0, then once we see reordering we consider 4917 * the connection to alway be subject to reordering and just set lro 4918 * to 1. 4919 * 4920 * In the end if lro is non-zero we add the extra time for 4921 * reordering in. 4922 */ 4923 if (srtt == 0) 4924 srtt = 1; 4925 if (rack->r_ctl.rc_reorder_ts) { 4926 if (rack->r_ctl.rc_reorder_fade) { 4927 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 4928 lro = cts - rack->r_ctl.rc_reorder_ts; 4929 if (lro == 0) { 4930 /* 4931 * No time as passed since the last 4932 * reorder, mark it as reordering. 4933 */ 4934 lro = 1; 4935 } 4936 } else { 4937 /* Negative time? */ 4938 lro = 0; 4939 } 4940 if (lro > rack->r_ctl.rc_reorder_fade) { 4941 /* Turn off reordering seen too */ 4942 rack->r_ctl.rc_reorder_ts = 0; 4943 lro = 0; 4944 } 4945 } else { 4946 /* Reodering does not fade */ 4947 lro = 1; 4948 } 4949 } else { 4950 lro = 0; 4951 } 4952 if (rack->rc_rack_tmr_std_based == 0) { 4953 thresh = srtt + rack->r_ctl.rc_pkt_delay; 4954 } else { 4955 /* Standards based pkt-delay is 1/4 srtt */ 4956 thresh = srtt + (srtt >> 2); 4957 } 4958 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 4959 /* It must be set, if not you get 1/4 rtt */ 4960 if (rack->r_ctl.rc_reorder_shift) 4961 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 4962 else 4963 thresh += (srtt >> 2); 4964 } 4965 if (rack->rc_rack_use_dsack && 4966 lro && 4967 (rack->r_ctl.num_dsack > 0)) { 4968 /* 4969 * We only increase the reordering window if we 4970 * have seen reordering <and> we have a DSACK count. 4971 */ 4972 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 4973 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 4974 } 4975 /* SRTT * 2 is the ceiling */ 4976 if (thresh > (srtt * 2)) { 4977 thresh = srtt * 2; 4978 } 4979 /* And we don't want it above the RTO max either */ 4980 if (thresh > rack_rto_max) { 4981 thresh = rack_rto_max; 4982 } 4983 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 4984 return (thresh); 4985 } 4986 4987 static uint32_t 4988 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 4989 struct rack_sendmap *rsm, uint32_t srtt) 4990 { 4991 struct rack_sendmap *prsm; 4992 uint32_t thresh, len; 4993 int segsiz; 4994 4995 if (srtt == 0) 4996 srtt = 1; 4997 if (rack->r_ctl.rc_tlp_threshold) 4998 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 4999 else 5000 thresh = (srtt * 2); 5001 5002 /* Get the previous sent packet, if any */ 5003 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5004 len = rsm->r_end - rsm->r_start; 5005 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5006 /* Exactly like the ID */ 5007 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5008 uint32_t alt_thresh; 5009 /* 5010 * Compensate for delayed-ack with the d-ack time. 5011 */ 5012 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5013 if (alt_thresh > thresh) 5014 thresh = alt_thresh; 5015 } 5016 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5017 /* 2.1 behavior */ 5018 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5019 if (prsm && (len <= segsiz)) { 5020 /* 5021 * Two packets outstanding, thresh should be (2*srtt) + 5022 * possible inter-packet delay (if any). 5023 */ 5024 uint32_t inter_gap = 0; 5025 int idx, nidx; 5026 5027 idx = rsm->r_rtr_cnt - 1; 5028 nidx = prsm->r_rtr_cnt - 1; 5029 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5030 /* Yes it was sent later (or at the same time) */ 5031 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5032 } 5033 thresh += inter_gap; 5034 } else if (len <= segsiz) { 5035 /* 5036 * Possibly compensate for delayed-ack. 5037 */ 5038 uint32_t alt_thresh; 5039 5040 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5041 if (alt_thresh > thresh) 5042 thresh = alt_thresh; 5043 } 5044 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5045 /* 2.2 behavior */ 5046 if (len <= segsiz) { 5047 uint32_t alt_thresh; 5048 /* 5049 * Compensate for delayed-ack with the d-ack time. 5050 */ 5051 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5052 if (alt_thresh > thresh) 5053 thresh = alt_thresh; 5054 } 5055 } 5056 /* Not above an RTO */ 5057 if (thresh > tp->t_rxtcur) { 5058 thresh = tp->t_rxtcur; 5059 } 5060 /* Not above a RTO max */ 5061 if (thresh > rack_rto_max) { 5062 thresh = rack_rto_max; 5063 } 5064 /* Apply user supplied min TLP */ 5065 if (thresh < rack_tlp_min) { 5066 thresh = rack_tlp_min; 5067 } 5068 return (thresh); 5069 } 5070 5071 static uint32_t 5072 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5073 { 5074 /* 5075 * We want the rack_rtt which is the 5076 * last rtt we measured. However if that 5077 * does not exist we fallback to the srtt (which 5078 * we probably will never do) and then as a last 5079 * resort we use RACK_INITIAL_RTO if no srtt is 5080 * yet set. 5081 */ 5082 if (rack->rc_rack_rtt) 5083 return (rack->rc_rack_rtt); 5084 else if (tp->t_srtt == 0) 5085 return (RACK_INITIAL_RTO); 5086 return (tp->t_srtt); 5087 } 5088 5089 static struct rack_sendmap * 5090 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5091 { 5092 /* 5093 * Check to see that we don't need to fall into recovery. We will 5094 * need to do so if our oldest transmit is past the time we should 5095 * have had an ack. 5096 */ 5097 struct tcp_rack *rack; 5098 struct rack_sendmap *rsm; 5099 int32_t idx; 5100 uint32_t srtt, thresh; 5101 5102 rack = (struct tcp_rack *)tp->t_fb_ptr; 5103 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5104 return (NULL); 5105 } 5106 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5107 if (rsm == NULL) 5108 return (NULL); 5109 5110 5111 if (rsm->r_flags & RACK_ACKED) { 5112 rsm = rack_find_lowest_rsm(rack); 5113 if (rsm == NULL) 5114 return (NULL); 5115 } 5116 idx = rsm->r_rtr_cnt - 1; 5117 srtt = rack_grab_rtt(tp, rack); 5118 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5119 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5120 return (NULL); 5121 } 5122 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5123 return (NULL); 5124 } 5125 /* Ok if we reach here we are over-due and this guy can be sent */ 5126 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 5127 return (rsm); 5128 } 5129 5130 static uint32_t 5131 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5132 { 5133 int32_t t; 5134 int32_t tt; 5135 uint32_t ret_val; 5136 5137 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5138 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5139 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5140 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5141 ret_val = (uint32_t)tt; 5142 return (ret_val); 5143 } 5144 5145 static uint32_t 5146 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5147 { 5148 /* 5149 * Start the FR timer, we do this based on getting the first one in 5150 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5151 * events we need to stop the running timer (if its running) before 5152 * starting the new one. 5153 */ 5154 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5155 uint32_t srtt_cur; 5156 int32_t idx; 5157 int32_t is_tlp_timer = 0; 5158 struct rack_sendmap *rsm; 5159 5160 if (rack->t_timers_stopped) { 5161 /* All timers have been stopped none are to run */ 5162 return (0); 5163 } 5164 if (rack->rc_in_persist) { 5165 /* We can't start any timer in persists */ 5166 return (rack_get_persists_timer_val(tp, rack)); 5167 } 5168 rack->rc_on_min_to = 0; 5169 if ((tp->t_state < TCPS_ESTABLISHED) || 5170 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5171 goto activate_rxt; 5172 } 5173 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5174 if ((rsm == NULL) || sup_rack) { 5175 /* Nothing on the send map or no rack */ 5176 activate_rxt: 5177 time_since_sent = 0; 5178 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5179 if (rsm) { 5180 /* 5181 * Should we discount the RTX timer any? 5182 * 5183 * We want to discount it the smallest amount. 5184 * If a timer (Rack/TLP or RXT) has gone off more 5185 * recently thats the discount we want to use (now - timer time). 5186 * If the retransmit of the oldest packet was more recent then 5187 * we want to use that (now - oldest-packet-last_transmit_time). 5188 * 5189 */ 5190 idx = rsm->r_rtr_cnt - 1; 5191 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5192 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5193 else 5194 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5195 if (TSTMP_GT(cts, tstmp_touse)) 5196 time_since_sent = cts - tstmp_touse; 5197 } 5198 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5199 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5200 to = tp->t_rxtcur; 5201 if (to > time_since_sent) 5202 to -= time_since_sent; 5203 else 5204 to = rack->r_ctl.rc_min_to; 5205 if (to == 0) 5206 to = 1; 5207 /* Special case for KEEPINIT */ 5208 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5209 (TP_KEEPINIT(tp) != 0) && 5210 rsm) { 5211 /* 5212 * We have to put a ceiling on the rxt timer 5213 * of the keep-init timeout. 5214 */ 5215 uint32_t max_time, red; 5216 5217 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5218 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5219 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5220 if (red < max_time) 5221 max_time -= red; 5222 else 5223 max_time = 1; 5224 } 5225 /* Reduce timeout to the keep value if needed */ 5226 if (max_time < to) 5227 to = max_time; 5228 } 5229 return (to); 5230 } 5231 return (0); 5232 } 5233 if (rsm->r_flags & RACK_ACKED) { 5234 rsm = rack_find_lowest_rsm(rack); 5235 if (rsm == NULL) { 5236 /* No lowest? */ 5237 goto activate_rxt; 5238 } 5239 } 5240 if (rack->sack_attack_disable) { 5241 /* 5242 * We don't want to do 5243 * any TLP's if you are an attacker. 5244 * Though if you are doing what 5245 * is expected you may still have 5246 * SACK-PASSED marks. 5247 */ 5248 goto activate_rxt; 5249 } 5250 /* Convert from ms to usecs */ 5251 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5252 if ((tp->t_flags & TF_SENTFIN) && 5253 ((tp->snd_max - tp->snd_una) == 1) && 5254 (rsm->r_flags & RACK_HAS_FIN)) { 5255 /* 5256 * We don't start a rack timer if all we have is a 5257 * FIN outstanding. 5258 */ 5259 goto activate_rxt; 5260 } 5261 if ((rack->use_rack_rr == 0) && 5262 (IN_FASTRECOVERY(tp->t_flags)) && 5263 (rack->rack_no_prr == 0) && 5264 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5265 /* 5266 * We are not cheating, in recovery and 5267 * not enough ack's to yet get our next 5268 * retransmission out. 5269 * 5270 * Note that classified attackers do not 5271 * get to use the rack-cheat. 5272 */ 5273 goto activate_tlp; 5274 } 5275 srtt = rack_grab_rtt(tp, rack); 5276 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5277 idx = rsm->r_rtr_cnt - 1; 5278 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5279 if (SEQ_GEQ(exp, cts)) { 5280 to = exp - cts; 5281 if (to < rack->r_ctl.rc_min_to) { 5282 to = rack->r_ctl.rc_min_to; 5283 if (rack->r_rr_config == 3) 5284 rack->rc_on_min_to = 1; 5285 } 5286 } else { 5287 to = rack->r_ctl.rc_min_to; 5288 if (rack->r_rr_config == 3) 5289 rack->rc_on_min_to = 1; 5290 } 5291 } else { 5292 /* Ok we need to do a TLP not RACK */ 5293 activate_tlp: 5294 if ((rack->rc_tlp_in_progress != 0) && 5295 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5296 /* 5297 * The previous send was a TLP and we have sent 5298 * N TLP's without sending new data. 5299 */ 5300 goto activate_rxt; 5301 } 5302 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5303 if (rsm == NULL) { 5304 /* We found no rsm to TLP with. */ 5305 goto activate_rxt; 5306 } 5307 if (rsm->r_flags & RACK_HAS_FIN) { 5308 /* If its a FIN we dont do TLP */ 5309 rsm = NULL; 5310 goto activate_rxt; 5311 } 5312 idx = rsm->r_rtr_cnt - 1; 5313 time_since_sent = 0; 5314 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5315 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5316 else 5317 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5318 if (TSTMP_GT(cts, tstmp_touse)) 5319 time_since_sent = cts - tstmp_touse; 5320 is_tlp_timer = 1; 5321 if (tp->t_srtt) { 5322 if ((rack->rc_srtt_measure_made == 0) && 5323 (tp->t_srtt == 1)) { 5324 /* 5325 * If another stack as run and set srtt to 1, 5326 * then the srtt was 0, so lets use the initial. 5327 */ 5328 srtt = RACK_INITIAL_RTO; 5329 } else { 5330 srtt_cur = tp->t_srtt; 5331 srtt = srtt_cur; 5332 } 5333 } else 5334 srtt = RACK_INITIAL_RTO; 5335 /* 5336 * If the SRTT is not keeping up and the 5337 * rack RTT has spiked we want to use 5338 * the last RTT not the smoothed one. 5339 */ 5340 if (rack_tlp_use_greater && 5341 tp->t_srtt && 5342 (srtt < rack_grab_rtt(tp, rack))) { 5343 srtt = rack_grab_rtt(tp, rack); 5344 } 5345 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5346 if (thresh > time_since_sent) { 5347 to = thresh - time_since_sent; 5348 } else { 5349 to = rack->r_ctl.rc_min_to; 5350 rack_log_alt_to_to_cancel(rack, 5351 thresh, /* flex1 */ 5352 time_since_sent, /* flex2 */ 5353 tstmp_touse, /* flex3 */ 5354 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5355 (uint32_t)rsm->r_tim_lastsent[idx], 5356 srtt, 5357 idx, 99); 5358 } 5359 if (to < rack_tlp_min) { 5360 to = rack_tlp_min; 5361 } 5362 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5363 /* 5364 * If the TLP time works out to larger than the max 5365 * RTO lets not do TLP.. just RTO. 5366 */ 5367 goto activate_rxt; 5368 } 5369 } 5370 if (is_tlp_timer == 0) { 5371 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5372 } else { 5373 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5374 } 5375 if (to == 0) 5376 to = 1; 5377 return (to); 5378 } 5379 5380 static void 5381 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5382 { 5383 if (rack->rc_in_persist == 0) { 5384 if (tp->t_flags & TF_GPUTINPROG) { 5385 /* 5386 * Stop the goodput now, the calling of the 5387 * measurement function clears the flag. 5388 */ 5389 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5390 RACK_QUALITY_PERSIST); 5391 } 5392 #ifdef NETFLIX_SHARED_CWND 5393 if (rack->r_ctl.rc_scw) { 5394 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5395 rack->rack_scwnd_is_idle = 1; 5396 } 5397 #endif 5398 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5399 if (rack->r_ctl.rc_went_idle_time == 0) 5400 rack->r_ctl.rc_went_idle_time = 1; 5401 rack_timer_cancel(tp, rack, cts, __LINE__); 5402 rack->r_ctl.persist_lost_ends = 0; 5403 rack->probe_not_answered = 0; 5404 rack->forced_ack = 0; 5405 tp->t_rxtshift = 0; 5406 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5407 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5408 rack->rc_in_persist = 1; 5409 } 5410 } 5411 5412 static void 5413 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5414 { 5415 if (tcp_in_hpts(rack->rc_inp)) { 5416 tcp_hpts_remove(rack->rc_inp); 5417 rack->r_ctl.rc_hpts_flags = 0; 5418 } 5419 #ifdef NETFLIX_SHARED_CWND 5420 if (rack->r_ctl.rc_scw) { 5421 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5422 rack->rack_scwnd_is_idle = 0; 5423 } 5424 #endif 5425 if (rack->rc_gp_dyn_mul && 5426 (rack->use_fixed_rate == 0) && 5427 (rack->rc_always_pace)) { 5428 /* 5429 * Do we count this as if a probe-rtt just 5430 * finished? 5431 */ 5432 uint32_t time_idle, idle_min; 5433 5434 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5435 idle_min = rack_min_probertt_hold; 5436 if (rack_probertt_gpsrtt_cnt_div) { 5437 uint64_t extra; 5438 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5439 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5440 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5441 idle_min += (uint32_t)extra; 5442 } 5443 if (time_idle >= idle_min) { 5444 /* Yes, we count it as a probe-rtt. */ 5445 uint32_t us_cts; 5446 5447 us_cts = tcp_get_usecs(NULL); 5448 if (rack->in_probe_rtt == 0) { 5449 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5450 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5451 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5452 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5453 } else { 5454 rack_exit_probertt(rack, us_cts); 5455 } 5456 } 5457 } 5458 rack->rc_in_persist = 0; 5459 rack->r_ctl.rc_went_idle_time = 0; 5460 tp->t_rxtshift = 0; 5461 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5462 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5463 rack->r_ctl.rc_agg_delayed = 0; 5464 rack->r_early = 0; 5465 rack->r_late = 0; 5466 rack->r_ctl.rc_agg_early = 0; 5467 } 5468 5469 static void 5470 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5471 struct hpts_diag *diag, struct timeval *tv) 5472 { 5473 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5474 union tcp_log_stackspecific log; 5475 5476 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5477 log.u_bbr.flex1 = diag->p_nxt_slot; 5478 log.u_bbr.flex2 = diag->p_cur_slot; 5479 log.u_bbr.flex3 = diag->slot_req; 5480 log.u_bbr.flex4 = diag->inp_hptsslot; 5481 log.u_bbr.flex5 = diag->slot_remaining; 5482 log.u_bbr.flex6 = diag->need_new_to; 5483 log.u_bbr.flex7 = diag->p_hpts_active; 5484 log.u_bbr.flex8 = diag->p_on_min_sleep; 5485 /* Hijack other fields as needed */ 5486 log.u_bbr.epoch = diag->have_slept; 5487 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5488 log.u_bbr.pkts_out = diag->co_ret; 5489 log.u_bbr.applimited = diag->hpts_sleep_time; 5490 log.u_bbr.delivered = diag->p_prev_slot; 5491 log.u_bbr.inflight = diag->p_runningslot; 5492 log.u_bbr.bw_inuse = diag->wheel_slot; 5493 log.u_bbr.rttProp = diag->wheel_cts; 5494 log.u_bbr.timeStamp = cts; 5495 log.u_bbr.delRate = diag->maxslots; 5496 log.u_bbr.cur_del_rate = diag->p_curtick; 5497 log.u_bbr.cur_del_rate <<= 32; 5498 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5499 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5500 &rack->rc_inp->inp_socket->so_rcv, 5501 &rack->rc_inp->inp_socket->so_snd, 5502 BBR_LOG_HPTSDIAG, 0, 5503 0, &log, false, tv); 5504 } 5505 5506 } 5507 5508 static void 5509 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5510 { 5511 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5512 union tcp_log_stackspecific log; 5513 struct timeval tv; 5514 5515 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5516 log.u_bbr.flex1 = sb->sb_flags; 5517 log.u_bbr.flex2 = len; 5518 log.u_bbr.flex3 = sb->sb_state; 5519 log.u_bbr.flex8 = type; 5520 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5521 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5522 &rack->rc_inp->inp_socket->so_rcv, 5523 &rack->rc_inp->inp_socket->so_snd, 5524 TCP_LOG_SB_WAKE, 0, 5525 len, &log, false, &tv); 5526 } 5527 } 5528 5529 static void 5530 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5531 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5532 { 5533 struct hpts_diag diag; 5534 struct inpcb *inp; 5535 struct timeval tv; 5536 uint32_t delayed_ack = 0; 5537 uint32_t hpts_timeout; 5538 uint32_t entry_slot = slot; 5539 uint8_t stopped; 5540 uint32_t left = 0; 5541 uint32_t us_cts; 5542 5543 inp = tp->t_inpcb; 5544 if ((tp->t_state == TCPS_CLOSED) || 5545 (tp->t_state == TCPS_LISTEN)) { 5546 return; 5547 } 5548 if (tcp_in_hpts(inp)) { 5549 /* Already on the pacer */ 5550 return; 5551 } 5552 stopped = rack->rc_tmr_stopped; 5553 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5554 left = rack->r_ctl.rc_timer_exp - cts; 5555 } 5556 rack->r_ctl.rc_timer_exp = 0; 5557 rack->r_ctl.rc_hpts_flags = 0; 5558 us_cts = tcp_get_usecs(&tv); 5559 /* Now early/late accounting */ 5560 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5561 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5562 /* 5563 * We have a early carry over set, 5564 * we can always add more time so we 5565 * can always make this compensation. 5566 * 5567 * Note if ack's are allowed to wake us do not 5568 * penalize the next timer for being awoke 5569 * by an ack aka the rc_agg_early (non-paced mode). 5570 */ 5571 slot += rack->r_ctl.rc_agg_early; 5572 rack->r_early = 0; 5573 rack->r_ctl.rc_agg_early = 0; 5574 } 5575 if (rack->r_late) { 5576 /* 5577 * This is harder, we can 5578 * compensate some but it 5579 * really depends on what 5580 * the current pacing time is. 5581 */ 5582 if (rack->r_ctl.rc_agg_delayed >= slot) { 5583 /* 5584 * We can't compensate for it all. 5585 * And we have to have some time 5586 * on the clock. We always have a min 5587 * 10 slots (10 x 10 i.e. 100 usecs). 5588 */ 5589 if (slot <= HPTS_TICKS_PER_SLOT) { 5590 /* We gain delay */ 5591 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5592 slot = HPTS_TICKS_PER_SLOT; 5593 } else { 5594 /* We take off some */ 5595 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5596 slot = HPTS_TICKS_PER_SLOT; 5597 } 5598 } else { 5599 slot -= rack->r_ctl.rc_agg_delayed; 5600 rack->r_ctl.rc_agg_delayed = 0; 5601 /* Make sure we have 100 useconds at minimum */ 5602 if (slot < HPTS_TICKS_PER_SLOT) { 5603 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5604 slot = HPTS_TICKS_PER_SLOT; 5605 } 5606 if (rack->r_ctl.rc_agg_delayed == 0) 5607 rack->r_late = 0; 5608 } 5609 } 5610 if (slot) { 5611 /* We are pacing too */ 5612 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5613 } 5614 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5615 #ifdef NETFLIX_EXP_DETECTION 5616 if (rack->sack_attack_disable && 5617 (slot < tcp_sad_pacing_interval)) { 5618 /* 5619 * We have a potential attacker on 5620 * the line. We have possibly some 5621 * (or now) pacing time set. We want to 5622 * slow down the processing of sacks by some 5623 * amount (if it is an attacker). Set the default 5624 * slot for attackers in place (unless the orginal 5625 * interval is longer). Its stored in 5626 * micro-seconds, so lets convert to msecs. 5627 */ 5628 slot = tcp_sad_pacing_interval; 5629 } 5630 #endif 5631 if (tp->t_flags & TF_DELACK) { 5632 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5633 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5634 } 5635 if (delayed_ack && ((hpts_timeout == 0) || 5636 (delayed_ack < hpts_timeout))) 5637 hpts_timeout = delayed_ack; 5638 else 5639 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5640 /* 5641 * If no timers are going to run and we will fall off the hptsi 5642 * wheel, we resort to a keep-alive timer if its configured. 5643 */ 5644 if ((hpts_timeout == 0) && 5645 (slot == 0)) { 5646 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5647 (tp->t_state <= TCPS_CLOSING)) { 5648 /* 5649 * Ok we have no timer (persists, rack, tlp, rxt or 5650 * del-ack), we don't have segments being paced. So 5651 * all that is left is the keepalive timer. 5652 */ 5653 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5654 /* Get the established keep-alive time */ 5655 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5656 } else { 5657 /* 5658 * Get the initial setup keep-alive time, 5659 * note that this is probably not going to 5660 * happen, since rack will be running a rxt timer 5661 * if a SYN of some sort is outstanding. It is 5662 * actually handled in rack_timeout_rxt(). 5663 */ 5664 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5665 } 5666 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5667 if (rack->in_probe_rtt) { 5668 /* 5669 * We want to instead not wake up a long time from 5670 * now but to wake up about the time we would 5671 * exit probe-rtt and initiate a keep-alive ack. 5672 * This will get us out of probe-rtt and update 5673 * our min-rtt. 5674 */ 5675 hpts_timeout = rack_min_probertt_hold; 5676 } 5677 } 5678 } 5679 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5680 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5681 /* 5682 * RACK, TLP, persists and RXT timers all are restartable 5683 * based on actions input .. i.e we received a packet (ack 5684 * or sack) and that changes things (rw, or snd_una etc). 5685 * Thus we can restart them with a new value. For 5686 * keep-alive, delayed_ack we keep track of what was left 5687 * and restart the timer with a smaller value. 5688 */ 5689 if (left < hpts_timeout) 5690 hpts_timeout = left; 5691 } 5692 if (hpts_timeout) { 5693 /* 5694 * Hack alert for now we can't time-out over 2,147,483 5695 * seconds (a bit more than 596 hours), which is probably ok 5696 * :). 5697 */ 5698 if (hpts_timeout > 0x7ffffffe) 5699 hpts_timeout = 0x7ffffffe; 5700 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5701 } 5702 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5703 if ((rack->gp_ready == 0) && 5704 (rack->use_fixed_rate == 0) && 5705 (hpts_timeout < slot) && 5706 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5707 /* 5708 * We have no good estimate yet for the 5709 * old clunky burst mitigation or the 5710 * real pacing. And the tlp or rxt is smaller 5711 * than the pacing calculation. Lets not 5712 * pace that long since we know the calculation 5713 * so far is not accurate. 5714 */ 5715 slot = hpts_timeout; 5716 } 5717 /** 5718 * Turn off all the flags for queuing by default. The 5719 * flags have important meanings to what happens when 5720 * LRO interacts with the transport. Most likely (by default now) 5721 * mbuf_queueing and ack compression are on. So the transport 5722 * has a couple of flags that control what happens (if those 5723 * are not on then these flags won't have any effect since it 5724 * won't go through the queuing LRO path). 5725 * 5726 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5727 * pacing output, so don't disturb. But 5728 * it also means LRO can wake me if there 5729 * is a SACK arrival. 5730 * 5731 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5732 * with the above flag (QUEUE_READY) and 5733 * when present it says don't even wake me 5734 * if a SACK arrives. 5735 * 5736 * The idea behind these flags is that if we are pacing we 5737 * set the MBUF_QUEUE_READY and only get woken up if 5738 * a SACK arrives (which could change things) or if 5739 * our pacing timer expires. If, however, we have a rack 5740 * timer running, then we don't even want a sack to wake 5741 * us since the rack timer has to expire before we can send. 5742 * 5743 * Other cases should usually have none of the flags set 5744 * so LRO can call into us. 5745 */ 5746 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5747 if (slot) { 5748 rack->r_ctl.rc_last_output_to = us_cts + slot; 5749 /* 5750 * A pacing timer (slot) is being set, in 5751 * such a case we cannot send (we are blocked by 5752 * the timer). So lets tell LRO that it should not 5753 * wake us unless there is a SACK. Note this only 5754 * will be effective if mbuf queueing is on or 5755 * compressed acks are being processed. 5756 */ 5757 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5758 /* 5759 * But wait if we have a Rack timer running 5760 * even a SACK should not disturb us (with 5761 * the exception of r_rr_config 3). 5762 */ 5763 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5764 (rack->r_rr_config != 3)) 5765 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5766 if (rack->rc_ack_can_sendout_data) { 5767 /* 5768 * Ahh but wait, this is that special case 5769 * where the pacing timer can be disturbed 5770 * backout the changes (used for non-paced 5771 * burst limiting). 5772 */ 5773 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5774 } 5775 if ((rack->use_rack_rr) && 5776 (rack->r_rr_config < 2) && 5777 ((hpts_timeout) && (hpts_timeout < slot))) { 5778 /* 5779 * Arrange for the hpts to kick back in after the 5780 * t-o if the t-o does not cause a send. 5781 */ 5782 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5783 __LINE__, &diag); 5784 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5785 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5786 } else { 5787 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 5788 __LINE__, &diag); 5789 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5790 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5791 } 5792 } else if (hpts_timeout) { 5793 /* 5794 * With respect to inp_flags2 here, lets let any new acks wake 5795 * us up here. Since we are not pacing (no pacing timer), output 5796 * can happen so we should let it. If its a Rack timer, then any inbound 5797 * packet probably won't change the sending (we will be blocked) 5798 * but it may change the prr stats so letting it in (the set defaults 5799 * at the start of this block) are good enough. 5800 */ 5801 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5802 __LINE__, &diag); 5803 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5804 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5805 } else { 5806 /* No timer starting */ 5807 #ifdef INVARIANTS 5808 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5809 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5810 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5811 } 5812 #endif 5813 } 5814 rack->rc_tmr_stopped = 0; 5815 if (slot) 5816 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5817 } 5818 5819 /* 5820 * RACK Timer, here we simply do logging and house keeping. 5821 * the normal rack_output() function will call the 5822 * appropriate thing to check if we need to do a RACK retransmit. 5823 * We return 1, saying don't proceed with rack_output only 5824 * when all timers have been stopped (destroyed PCB?). 5825 */ 5826 static int 5827 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5828 { 5829 /* 5830 * This timer simply provides an internal trigger to send out data. 5831 * The check_recovery_mode call will see if there are needed 5832 * retransmissions, if so we will enter fast-recovery. The output 5833 * call may or may not do the same thing depending on sysctl 5834 * settings. 5835 */ 5836 struct rack_sendmap *rsm; 5837 5838 if (tp->t_timers->tt_flags & TT_STOPPED) { 5839 return (1); 5840 } 5841 counter_u64_add(rack_to_tot, 1); 5842 if (rack->r_state && (rack->r_state != tp->t_state)) 5843 rack_set_state(tp, rack); 5844 rack->rc_on_min_to = 0; 5845 rsm = rack_check_recovery_mode(tp, cts); 5846 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5847 if (rsm) { 5848 rack->r_ctl.rc_resend = rsm; 5849 rack->r_timer_override = 1; 5850 if (rack->use_rack_rr) { 5851 /* 5852 * Don't accumulate extra pacing delay 5853 * we are allowing the rack timer to 5854 * over-ride pacing i.e. rrr takes precedence 5855 * if the pacing interval is longer than the rrr 5856 * time (in other words we get the min pacing 5857 * time versus rrr pacing time). 5858 */ 5859 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5860 } 5861 } 5862 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5863 if (rsm == NULL) { 5864 /* restart a timer and return 1 */ 5865 rack_start_hpts_timer(rack, tp, cts, 5866 0, 0, 0); 5867 return (1); 5868 } 5869 return (0); 5870 } 5871 5872 static void 5873 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5874 { 5875 if (rsm->m->m_len > rsm->orig_m_len) { 5876 /* 5877 * Mbuf grew, caused by sbcompress, our offset does 5878 * not change. 5879 */ 5880 rsm->orig_m_len = rsm->m->m_len; 5881 } else if (rsm->m->m_len < rsm->orig_m_len) { 5882 /* 5883 * Mbuf shrank, trimmed off the top by an ack, our 5884 * offset changes. 5885 */ 5886 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5887 rsm->orig_m_len = rsm->m->m_len; 5888 } 5889 } 5890 5891 static void 5892 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5893 { 5894 struct mbuf *m; 5895 uint32_t soff; 5896 5897 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5898 /* Fix up the orig_m_len and possibly the mbuf offset */ 5899 rack_adjust_orig_mlen(src_rsm); 5900 } 5901 m = src_rsm->m; 5902 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5903 while (soff >= m->m_len) { 5904 /* Move out past this mbuf */ 5905 soff -= m->m_len; 5906 m = m->m_next; 5907 KASSERT((m != NULL), 5908 ("rsm:%p nrsm:%p hit at soff:%u null m", 5909 src_rsm, rsm, soff)); 5910 } 5911 rsm->m = m; 5912 rsm->soff = soff; 5913 rsm->orig_m_len = m->m_len; 5914 } 5915 5916 static __inline void 5917 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5918 struct rack_sendmap *rsm, uint32_t start) 5919 { 5920 int idx; 5921 5922 nrsm->r_start = start; 5923 nrsm->r_end = rsm->r_end; 5924 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 5925 nrsm->r_flags = rsm->r_flags; 5926 nrsm->r_dupack = rsm->r_dupack; 5927 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 5928 nrsm->r_rtr_bytes = 0; 5929 nrsm->r_fas = rsm->r_fas; 5930 rsm->r_end = nrsm->r_start; 5931 nrsm->r_just_ret = rsm->r_just_ret; 5932 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 5933 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 5934 } 5935 /* Now if we have SYN flag we keep it on the left edge */ 5936 if (nrsm->r_flags & RACK_HAS_SYN) 5937 nrsm->r_flags &= ~RACK_HAS_SYN; 5938 /* Now if we have a FIN flag we keep it on the right edge */ 5939 if (rsm->r_flags & RACK_HAS_FIN) 5940 rsm->r_flags &= ~RACK_HAS_FIN; 5941 /* Push bit must go to the right edge as well */ 5942 if (rsm->r_flags & RACK_HAD_PUSH) 5943 rsm->r_flags &= ~RACK_HAD_PUSH; 5944 /* Clone over the state of the hw_tls flag */ 5945 nrsm->r_hw_tls = rsm->r_hw_tls; 5946 /* 5947 * Now we need to find nrsm's new location in the mbuf chain 5948 * we basically calculate a new offset, which is soff + 5949 * how much is left in original rsm. Then we walk out the mbuf 5950 * chain to find the righ postion, it may be the same mbuf 5951 * or maybe not. 5952 */ 5953 KASSERT(((rsm->m != NULL) || 5954 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 5955 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 5956 if (rsm->m) 5957 rack_setup_offset_for_rsm(rsm, nrsm); 5958 } 5959 5960 static struct rack_sendmap * 5961 rack_merge_rsm(struct tcp_rack *rack, 5962 struct rack_sendmap *l_rsm, 5963 struct rack_sendmap *r_rsm) 5964 { 5965 /* 5966 * We are merging two ack'd RSM's, 5967 * the l_rsm is on the left (lower seq 5968 * values) and the r_rsm is on the right 5969 * (higher seq value). The simplest way 5970 * to merge these is to move the right 5971 * one into the left. I don't think there 5972 * is any reason we need to try to find 5973 * the oldest (or last oldest retransmitted). 5974 */ 5975 #ifdef INVARIANTS 5976 struct rack_sendmap *rm; 5977 #endif 5978 rack_log_map_chg(rack->rc_tp, rack, NULL, 5979 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 5980 l_rsm->r_end = r_rsm->r_end; 5981 if (l_rsm->r_dupack < r_rsm->r_dupack) 5982 l_rsm->r_dupack = r_rsm->r_dupack; 5983 if (r_rsm->r_rtr_bytes) 5984 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 5985 if (r_rsm->r_in_tmap) { 5986 /* This really should not happen */ 5987 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 5988 r_rsm->r_in_tmap = 0; 5989 } 5990 5991 /* Now the flags */ 5992 if (r_rsm->r_flags & RACK_HAS_FIN) 5993 l_rsm->r_flags |= RACK_HAS_FIN; 5994 if (r_rsm->r_flags & RACK_TLP) 5995 l_rsm->r_flags |= RACK_TLP; 5996 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 5997 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 5998 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 5999 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6000 /* 6001 * If both are app-limited then let the 6002 * free lower the count. If right is app 6003 * limited and left is not, transfer. 6004 */ 6005 l_rsm->r_flags |= RACK_APP_LIMITED; 6006 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6007 if (r_rsm == rack->r_ctl.rc_first_appl) 6008 rack->r_ctl.rc_first_appl = l_rsm; 6009 } 6010 #ifndef INVARIANTS 6011 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6012 #else 6013 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6014 if (rm != r_rsm) { 6015 panic("removing head in rack:%p rsm:%p rm:%p", 6016 rack, r_rsm, rm); 6017 } 6018 #endif 6019 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6020 /* Transfer the split limit to the map we free */ 6021 r_rsm->r_limit_type = l_rsm->r_limit_type; 6022 l_rsm->r_limit_type = 0; 6023 } 6024 rack_free(rack, r_rsm); 6025 return (l_rsm); 6026 } 6027 6028 /* 6029 * TLP Timer, here we simply setup what segment we want to 6030 * have the TLP expire on, the normal rack_output() will then 6031 * send it out. 6032 * 6033 * We return 1, saying don't proceed with rack_output only 6034 * when all timers have been stopped (destroyed PCB?). 6035 */ 6036 static int 6037 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6038 { 6039 /* 6040 * Tail Loss Probe. 6041 */ 6042 struct rack_sendmap *rsm = NULL; 6043 #ifdef INVARIANTS 6044 struct rack_sendmap *insret; 6045 #endif 6046 struct socket *so; 6047 uint32_t amm; 6048 uint32_t out, avail; 6049 int collapsed_win = 0; 6050 6051 if (tp->t_timers->tt_flags & TT_STOPPED) { 6052 return (1); 6053 } 6054 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6055 /* Its not time yet */ 6056 return (0); 6057 } 6058 if (ctf_progress_timeout_check(tp, true)) { 6059 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6060 return (-ETIMEDOUT); /* tcp_drop() */ 6061 } 6062 /* 6063 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6064 * need to figure out how to force a full MSS segment out. 6065 */ 6066 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6067 rack->r_ctl.retran_during_recovery = 0; 6068 rack->r_ctl.dsack_byte_cnt = 0; 6069 counter_u64_add(rack_tlp_tot, 1); 6070 if (rack->r_state && (rack->r_state != tp->t_state)) 6071 rack_set_state(tp, rack); 6072 so = tp->t_inpcb->inp_socket; 6073 avail = sbavail(&so->so_snd); 6074 out = tp->snd_max - tp->snd_una; 6075 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 6076 /* special case, we need a retransmission */ 6077 collapsed_win = 1; 6078 goto need_retran; 6079 } 6080 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6081 rack->r_ctl.dsack_persist--; 6082 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6083 rack->r_ctl.num_dsack = 0; 6084 } 6085 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6086 } 6087 if ((tp->t_flags & TF_GPUTINPROG) && 6088 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6089 /* 6090 * If this is the second in a row 6091 * TLP and we are doing a measurement 6092 * its time to abandon the measurement. 6093 * Something is likely broken on 6094 * the clients network and measuring a 6095 * broken network does us no good. 6096 */ 6097 tp->t_flags &= ~TF_GPUTINPROG; 6098 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6099 rack->r_ctl.rc_gp_srtt /*flex1*/, 6100 tp->gput_seq, 6101 0, 0, 18, __LINE__, NULL, 0); 6102 } 6103 /* 6104 * Check our send oldest always settings, and if 6105 * there is an oldest to send jump to the need_retran. 6106 */ 6107 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6108 goto need_retran; 6109 6110 if (avail > out) { 6111 /* New data is available */ 6112 amm = avail - out; 6113 if (amm > ctf_fixed_maxseg(tp)) { 6114 amm = ctf_fixed_maxseg(tp); 6115 if ((amm + out) > tp->snd_wnd) { 6116 /* We are rwnd limited */ 6117 goto need_retran; 6118 } 6119 } else if (amm < ctf_fixed_maxseg(tp)) { 6120 /* not enough to fill a MTU */ 6121 goto need_retran; 6122 } 6123 if (IN_FASTRECOVERY(tp->t_flags)) { 6124 /* Unlikely */ 6125 if (rack->rack_no_prr == 0) { 6126 if (out + amm <= tp->snd_wnd) { 6127 rack->r_ctl.rc_prr_sndcnt = amm; 6128 rack->r_ctl.rc_tlp_new_data = amm; 6129 rack_log_to_prr(rack, 4, 0, __LINE__); 6130 } 6131 } else 6132 goto need_retran; 6133 } else { 6134 /* Set the send-new override */ 6135 if (out + amm <= tp->snd_wnd) 6136 rack->r_ctl.rc_tlp_new_data = amm; 6137 else 6138 goto need_retran; 6139 } 6140 rack->r_ctl.rc_tlpsend = NULL; 6141 counter_u64_add(rack_tlp_newdata, 1); 6142 goto send; 6143 } 6144 need_retran: 6145 /* 6146 * Ok we need to arrange the last un-acked segment to be re-sent, or 6147 * optionally the first un-acked segment. 6148 */ 6149 if (collapsed_win == 0) { 6150 if (rack_always_send_oldest) 6151 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6152 else { 6153 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6154 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6155 rsm = rack_find_high_nonack(rack, rsm); 6156 } 6157 } 6158 if (rsm == NULL) { 6159 #ifdef TCP_BLACKBOX 6160 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6161 #endif 6162 goto out; 6163 } 6164 } else { 6165 /* 6166 * We must find the last segment 6167 * that was acceptable by the client. 6168 */ 6169 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6170 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6171 /* Found one */ 6172 break; 6173 } 6174 } 6175 if (rsm == NULL) { 6176 /* None? if so send the first */ 6177 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6178 if (rsm == NULL) { 6179 #ifdef TCP_BLACKBOX 6180 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6181 #endif 6182 goto out; 6183 } 6184 } 6185 } 6186 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6187 /* 6188 * We need to split this the last segment in two. 6189 */ 6190 struct rack_sendmap *nrsm; 6191 6192 nrsm = rack_alloc_full_limit(rack); 6193 if (nrsm == NULL) { 6194 /* 6195 * No memory to split, we will just exit and punt 6196 * off to the RXT timer. 6197 */ 6198 goto out; 6199 } 6200 rack_clone_rsm(rack, nrsm, rsm, 6201 (rsm->r_end - ctf_fixed_maxseg(tp))); 6202 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6203 #ifndef INVARIANTS 6204 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6205 #else 6206 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6207 if (insret != NULL) { 6208 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6209 nrsm, insret, rack, rsm); 6210 } 6211 #endif 6212 if (rsm->r_in_tmap) { 6213 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6214 nrsm->r_in_tmap = 1; 6215 } 6216 rsm = nrsm; 6217 } 6218 rack->r_ctl.rc_tlpsend = rsm; 6219 send: 6220 /* Make sure output path knows we are doing a TLP */ 6221 *doing_tlp = 1; 6222 rack->r_timer_override = 1; 6223 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6224 return (0); 6225 out: 6226 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6227 return (0); 6228 } 6229 6230 /* 6231 * Delayed ack Timer, here we simply need to setup the 6232 * ACK_NOW flag and remove the DELACK flag. From there 6233 * the output routine will send the ack out. 6234 * 6235 * We only return 1, saying don't proceed, if all timers 6236 * are stopped (destroyed PCB?). 6237 */ 6238 static int 6239 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6240 { 6241 if (tp->t_timers->tt_flags & TT_STOPPED) { 6242 return (1); 6243 } 6244 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6245 tp->t_flags &= ~TF_DELACK; 6246 tp->t_flags |= TF_ACKNOW; 6247 KMOD_TCPSTAT_INC(tcps_delack); 6248 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6249 return (0); 6250 } 6251 6252 /* 6253 * Persists timer, here we simply send the 6254 * same thing as a keepalive will. 6255 * the one byte send. 6256 * 6257 * We only return 1, saying don't proceed, if all timers 6258 * are stopped (destroyed PCB?). 6259 */ 6260 static int 6261 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6262 { 6263 struct tcptemp *t_template; 6264 #ifdef INVARIANTS 6265 struct inpcb *inp = tp->t_inpcb; 6266 #endif 6267 int32_t retval = 1; 6268 6269 if (tp->t_timers->tt_flags & TT_STOPPED) { 6270 return (1); 6271 } 6272 if (rack->rc_in_persist == 0) 6273 return (0); 6274 if (ctf_progress_timeout_check(tp, false)) { 6275 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6276 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6277 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6278 return (-ETIMEDOUT); /* tcp_drop() */ 6279 } 6280 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6281 /* 6282 * Persistence timer into zero window. Force a byte to be output, if 6283 * possible. 6284 */ 6285 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6286 /* 6287 * Hack: if the peer is dead/unreachable, we do not time out if the 6288 * window is closed. After a full backoff, drop the connection if 6289 * the idle time (no responses to probes) reaches the maximum 6290 * backoff that we would use if retransmitting. 6291 */ 6292 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6293 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6294 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6295 KMOD_TCPSTAT_INC(tcps_persistdrop); 6296 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6297 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6298 retval = -ETIMEDOUT; /* tcp_drop() */ 6299 goto out; 6300 } 6301 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6302 tp->snd_una == tp->snd_max) 6303 rack_exit_persist(tp, rack, cts); 6304 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6305 /* 6306 * If the user has closed the socket then drop a persisting 6307 * connection after a much reduced timeout. 6308 */ 6309 if (tp->t_state > TCPS_CLOSE_WAIT && 6310 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6311 KMOD_TCPSTAT_INC(tcps_persistdrop); 6312 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6313 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6314 retval = -ETIMEDOUT; /* tcp_drop() */ 6315 goto out; 6316 } 6317 t_template = tcpip_maketemplate(rack->rc_inp); 6318 if (t_template) { 6319 /* only set it if we were answered */ 6320 if (rack->forced_ack == 0) { 6321 rack->forced_ack = 1; 6322 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6323 } else { 6324 rack->probe_not_answered = 1; 6325 counter_u64_add(rack_persists_loss, 1); 6326 rack->r_ctl.persist_lost_ends++; 6327 } 6328 counter_u64_add(rack_persists_sends, 1); 6329 tcp_respond(tp, t_template->tt_ipgen, 6330 &t_template->tt_t, (struct mbuf *)NULL, 6331 tp->rcv_nxt, tp->snd_una - 1, 0); 6332 /* This sends an ack */ 6333 if (tp->t_flags & TF_DELACK) 6334 tp->t_flags &= ~TF_DELACK; 6335 free(t_template, M_TEMP); 6336 } 6337 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6338 tp->t_rxtshift++; 6339 out: 6340 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6341 rack_start_hpts_timer(rack, tp, cts, 6342 0, 0, 0); 6343 return (retval); 6344 } 6345 6346 /* 6347 * If a keepalive goes off, we had no other timers 6348 * happening. We always return 1 here since this 6349 * routine either drops the connection or sends 6350 * out a segment with respond. 6351 */ 6352 static int 6353 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6354 { 6355 struct tcptemp *t_template; 6356 struct inpcb *inp; 6357 6358 if (tp->t_timers->tt_flags & TT_STOPPED) { 6359 return (1); 6360 } 6361 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6362 inp = tp->t_inpcb; 6363 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6364 /* 6365 * Keep-alive timer went off; send something or drop connection if 6366 * idle for too long. 6367 */ 6368 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6369 if (tp->t_state < TCPS_ESTABLISHED) 6370 goto dropit; 6371 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6372 tp->t_state <= TCPS_CLOSING) { 6373 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6374 goto dropit; 6375 /* 6376 * Send a packet designed to force a response if the peer is 6377 * up and reachable: either an ACK if the connection is 6378 * still alive, or an RST if the peer has closed the 6379 * connection due to timeout or reboot. Using sequence 6380 * number tp->snd_una-1 causes the transmitted zero-length 6381 * segment to lie outside the receive window; by the 6382 * protocol spec, this requires the correspondent TCP to 6383 * respond. 6384 */ 6385 KMOD_TCPSTAT_INC(tcps_keepprobe); 6386 t_template = tcpip_maketemplate(inp); 6387 if (t_template) { 6388 if (rack->forced_ack == 0) { 6389 rack->forced_ack = 1; 6390 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6391 } else { 6392 rack->probe_not_answered = 1; 6393 } 6394 tcp_respond(tp, t_template->tt_ipgen, 6395 &t_template->tt_t, (struct mbuf *)NULL, 6396 tp->rcv_nxt, tp->snd_una - 1, 0); 6397 free(t_template, M_TEMP); 6398 } 6399 } 6400 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6401 return (1); 6402 dropit: 6403 KMOD_TCPSTAT_INC(tcps_keepdrops); 6404 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6405 return (-ETIMEDOUT); /* tcp_drop() */ 6406 } 6407 6408 /* 6409 * Retransmit helper function, clear up all the ack 6410 * flags and take care of important book keeping. 6411 */ 6412 static void 6413 rack_remxt_tmr(struct tcpcb *tp) 6414 { 6415 /* 6416 * The retransmit timer went off, all sack'd blocks must be 6417 * un-acked. 6418 */ 6419 struct rack_sendmap *rsm, *trsm = NULL; 6420 struct tcp_rack *rack; 6421 6422 rack = (struct tcp_rack *)tp->t_fb_ptr; 6423 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6424 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6425 if (rack->r_state && (rack->r_state != tp->t_state)) 6426 rack_set_state(tp, rack); 6427 /* 6428 * Ideally we would like to be able to 6429 * mark SACK-PASS on anything not acked here. 6430 * 6431 * However, if we do that we would burst out 6432 * all that data 1ms apart. This would be unwise, 6433 * so for now we will just let the normal rxt timer 6434 * and tlp timer take care of it. 6435 * 6436 * Also we really need to stick them back in sequence 6437 * order. This way we send in the proper order and any 6438 * sacks that come floating in will "re-ack" the data. 6439 * To do this we zap the tmap with an INIT and then 6440 * walk through and place every rsm in the RB tree 6441 * back in its seq ordered place. 6442 */ 6443 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6444 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6445 rsm->r_dupack = 0; 6446 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6447 /* We must re-add it back to the tlist */ 6448 if (trsm == NULL) { 6449 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6450 } else { 6451 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6452 } 6453 rsm->r_in_tmap = 1; 6454 trsm = rsm; 6455 if (rsm->r_flags & RACK_ACKED) 6456 rsm->r_flags |= RACK_WAS_ACKED; 6457 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6458 rsm->r_flags |= RACK_MUST_RXT; 6459 } 6460 /* Clear the count (we just un-acked them) */ 6461 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6462 rack->r_ctl.rc_sacked = 0; 6463 rack->r_ctl.rc_sacklast = NULL; 6464 rack->r_ctl.rc_agg_delayed = 0; 6465 rack->r_early = 0; 6466 rack->r_ctl.rc_agg_early = 0; 6467 rack->r_late = 0; 6468 /* Clear the tlp rtx mark */ 6469 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6470 if (rack->r_ctl.rc_resend != NULL) 6471 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6472 rack->r_ctl.rc_prr_sndcnt = 0; 6473 rack_log_to_prr(rack, 6, 0, __LINE__); 6474 rack->r_timer_override = 1; 6475 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6476 #ifdef NETFLIX_EXP_DETECTION 6477 || (rack->sack_attack_disable != 0) 6478 #endif 6479 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6480 /* 6481 * For non-sack customers new data 6482 * needs to go out as retransmits until 6483 * we retransmit up to snd_max. 6484 */ 6485 rack->r_must_retran = 1; 6486 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6487 rack->r_ctl.rc_sacked); 6488 } 6489 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6490 } 6491 6492 static void 6493 rack_convert_rtts(struct tcpcb *tp) 6494 { 6495 if (tp->t_srtt > 1) { 6496 uint32_t val, frac; 6497 6498 val = tp->t_srtt >> TCP_RTT_SHIFT; 6499 frac = tp->t_srtt & 0x1f; 6500 tp->t_srtt = TICKS_2_USEC(val); 6501 /* 6502 * frac is the fractional part of the srtt (if any) 6503 * but its in ticks and every bit represents 6504 * 1/32nd of a hz. 6505 */ 6506 if (frac) { 6507 if (hz == 1000) { 6508 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6509 } else { 6510 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6511 } 6512 tp->t_srtt += frac; 6513 } 6514 } 6515 if (tp->t_rttvar) { 6516 uint32_t val, frac; 6517 6518 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6519 frac = tp->t_rttvar & 0x1f; 6520 tp->t_rttvar = TICKS_2_USEC(val); 6521 /* 6522 * frac is the fractional part of the srtt (if any) 6523 * but its in ticks and every bit represents 6524 * 1/32nd of a hz. 6525 */ 6526 if (frac) { 6527 if (hz == 1000) { 6528 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6529 } else { 6530 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6531 } 6532 tp->t_rttvar += frac; 6533 } 6534 } 6535 tp->t_rxtcur = RACK_REXMTVAL(tp); 6536 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6537 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6538 } 6539 if (tp->t_rxtcur > rack_rto_max) { 6540 tp->t_rxtcur = rack_rto_max; 6541 } 6542 } 6543 6544 static void 6545 rack_cc_conn_init(struct tcpcb *tp) 6546 { 6547 struct tcp_rack *rack; 6548 uint32_t srtt; 6549 6550 rack = (struct tcp_rack *)tp->t_fb_ptr; 6551 srtt = tp->t_srtt; 6552 cc_conn_init(tp); 6553 /* 6554 * Now convert to rack's internal format, 6555 * if required. 6556 */ 6557 if ((srtt == 0) && (tp->t_srtt != 0)) 6558 rack_convert_rtts(tp); 6559 /* 6560 * We want a chance to stay in slowstart as 6561 * we create a connection. TCP spec says that 6562 * initially ssthresh is infinite. For our 6563 * purposes that is the snd_wnd. 6564 */ 6565 if (tp->snd_ssthresh < tp->snd_wnd) { 6566 tp->snd_ssthresh = tp->snd_wnd; 6567 } 6568 /* 6569 * We also want to assure a IW worth of 6570 * data can get inflight. 6571 */ 6572 if (rc_init_window(rack) < tp->snd_cwnd) 6573 tp->snd_cwnd = rc_init_window(rack); 6574 } 6575 6576 /* 6577 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6578 * we will setup to retransmit the lowest seq number outstanding. 6579 */ 6580 static int 6581 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6582 { 6583 int32_t rexmt; 6584 int32_t retval = 0; 6585 bool isipv6; 6586 6587 if (tp->t_timers->tt_flags & TT_STOPPED) { 6588 return (1); 6589 } 6590 if ((tp->t_flags & TF_GPUTINPROG) && 6591 (tp->t_rxtshift)) { 6592 /* 6593 * We have had a second timeout 6594 * measurements on successive rxt's are not profitable. 6595 * It is unlikely to be of any use (the network is 6596 * broken or the client went away). 6597 */ 6598 tp->t_flags &= ~TF_GPUTINPROG; 6599 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6600 rack->r_ctl.rc_gp_srtt /*flex1*/, 6601 tp->gput_seq, 6602 0, 0, 18, __LINE__, NULL, 0); 6603 } 6604 if (ctf_progress_timeout_check(tp, false)) { 6605 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6606 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6607 return (-ETIMEDOUT); /* tcp_drop() */ 6608 } 6609 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6610 rack->r_ctl.retran_during_recovery = 0; 6611 rack->r_ctl.dsack_byte_cnt = 0; 6612 if (IN_FASTRECOVERY(tp->t_flags)) 6613 tp->t_flags |= TF_WASFRECOVERY; 6614 else 6615 tp->t_flags &= ~TF_WASFRECOVERY; 6616 if (IN_CONGRECOVERY(tp->t_flags)) 6617 tp->t_flags |= TF_WASCRECOVERY; 6618 else 6619 tp->t_flags &= ~TF_WASCRECOVERY; 6620 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6621 (tp->snd_una == tp->snd_max)) { 6622 /* Nothing outstanding .. nothing to do */ 6623 return (0); 6624 } 6625 if (rack->r_ctl.dsack_persist) { 6626 rack->r_ctl.dsack_persist--; 6627 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6628 rack->r_ctl.num_dsack = 0; 6629 } 6630 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6631 } 6632 /* 6633 * Rack can only run one timer at a time, so we cannot 6634 * run a KEEPINIT (gating SYN sending) and a retransmit 6635 * timer for the SYN. So if we are in a front state and 6636 * have a KEEPINIT timer we need to check the first transmit 6637 * against now to see if we have exceeded the KEEPINIT time 6638 * (if one is set). 6639 */ 6640 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6641 (TP_KEEPINIT(tp) != 0)) { 6642 struct rack_sendmap *rsm; 6643 6644 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6645 if (rsm) { 6646 /* Ok we have something outstanding to test keepinit with */ 6647 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6648 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6649 /* We have exceeded the KEEPINIT time */ 6650 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6651 goto drop_it; 6652 } 6653 } 6654 } 6655 /* 6656 * Retransmission timer went off. Message has not been acked within 6657 * retransmit interval. Back off to a longer retransmit interval 6658 * and retransmit one segment. 6659 */ 6660 rack_remxt_tmr(tp); 6661 if ((rack->r_ctl.rc_resend == NULL) || 6662 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6663 /* 6664 * If the rwnd collapsed on 6665 * the one we are retransmitting 6666 * it does not count against the 6667 * rxt count. 6668 */ 6669 tp->t_rxtshift++; 6670 } 6671 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6672 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6673 drop_it: 6674 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6675 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6676 /* XXXGL: previously t_softerror was casted to uint16_t */ 6677 MPASS(tp->t_softerror >= 0); 6678 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6679 goto out; /* tcp_drop() */ 6680 } 6681 if (tp->t_state == TCPS_SYN_SENT) { 6682 /* 6683 * If the SYN was retransmitted, indicate CWND to be limited 6684 * to 1 segment in cc_conn_init(). 6685 */ 6686 tp->snd_cwnd = 1; 6687 } else if (tp->t_rxtshift == 1) { 6688 /* 6689 * first retransmit; record ssthresh and cwnd so they can be 6690 * recovered if this turns out to be a "bad" retransmit. A 6691 * retransmit is considered "bad" if an ACK for this segment 6692 * is received within RTT/2 interval; the assumption here is 6693 * that the ACK was already in flight. See "On Estimating 6694 * End-to-End Network Path Properties" by Allman and Paxson 6695 * for more details. 6696 */ 6697 tp->snd_cwnd_prev = tp->snd_cwnd; 6698 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6699 tp->snd_recover_prev = tp->snd_recover; 6700 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6701 tp->t_flags |= TF_PREVVALID; 6702 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6703 tp->t_flags &= ~TF_PREVVALID; 6704 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6705 if ((tp->t_state == TCPS_SYN_SENT) || 6706 (tp->t_state == TCPS_SYN_RECEIVED)) 6707 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6708 else 6709 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6710 6711 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6712 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6713 /* 6714 * We enter the path for PLMTUD if connection is established or, if 6715 * connection is FIN_WAIT_1 status, reason for the last is that if 6716 * amount of data we send is very small, we could send it in couple 6717 * of packets and process straight to FIN. In that case we won't 6718 * catch ESTABLISHED state. 6719 */ 6720 #ifdef INET6 6721 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6722 #else 6723 isipv6 = false; 6724 #endif 6725 if (((V_tcp_pmtud_blackhole_detect == 1) || 6726 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6727 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6728 ((tp->t_state == TCPS_ESTABLISHED) || 6729 (tp->t_state == TCPS_FIN_WAIT_1))) { 6730 /* 6731 * Idea here is that at each stage of mtu probe (usually, 6732 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6733 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6734 * should take care of that. 6735 */ 6736 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6737 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6738 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6739 tp->t_rxtshift % 2 == 0)) { 6740 /* 6741 * Enter Path MTU Black-hole Detection mechanism: - 6742 * Disable Path MTU Discovery (IP "DF" bit). - 6743 * Reduce MTU to lower value than what we negotiated 6744 * with peer. 6745 */ 6746 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6747 /* Record that we may have found a black hole. */ 6748 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6749 /* Keep track of previous MSS. */ 6750 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6751 } 6752 6753 /* 6754 * Reduce the MSS to blackhole value or to the 6755 * default in an attempt to retransmit. 6756 */ 6757 #ifdef INET6 6758 if (isipv6 && 6759 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6760 /* Use the sysctl tuneable blackhole MSS. */ 6761 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6762 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6763 } else if (isipv6) { 6764 /* Use the default MSS. */ 6765 tp->t_maxseg = V_tcp_v6mssdflt; 6766 /* 6767 * Disable Path MTU Discovery when we switch 6768 * to minmss. 6769 */ 6770 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6771 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6772 } 6773 #endif 6774 #if defined(INET6) && defined(INET) 6775 else 6776 #endif 6777 #ifdef INET 6778 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6779 /* Use the sysctl tuneable blackhole MSS. */ 6780 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6781 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6782 } else { 6783 /* Use the default MSS. */ 6784 tp->t_maxseg = V_tcp_mssdflt; 6785 /* 6786 * Disable Path MTU Discovery when we switch 6787 * to minmss. 6788 */ 6789 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6790 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6791 } 6792 #endif 6793 } else { 6794 /* 6795 * If further retransmissions are still unsuccessful 6796 * with a lowered MTU, maybe this isn't a blackhole 6797 * and we restore the previous MSS and blackhole 6798 * detection flags. The limit '6' is determined by 6799 * giving each probe stage (1448, 1188, 524) 2 6800 * chances to recover. 6801 */ 6802 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6803 (tp->t_rxtshift >= 6)) { 6804 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6805 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6806 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6807 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6808 } 6809 } 6810 } 6811 /* 6812 * Disable RFC1323 and SACK if we haven't got any response to 6813 * our third SYN to work-around some broken terminal servers 6814 * (most of which have hopefully been retired) that have bad VJ 6815 * header compression code which trashes TCP segments containing 6816 * unknown-to-them TCP options. 6817 */ 6818 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6819 (tp->t_rxtshift == 3)) 6820 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6821 /* 6822 * If we backed off this far, our srtt estimate is probably bogus. 6823 * Clobber it so we'll take the next rtt measurement as our srtt; 6824 * move the current srtt into rttvar to keep the current retransmit 6825 * times until then. 6826 */ 6827 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6828 #ifdef INET6 6829 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 6830 in6_losing(tp->t_inpcb); 6831 else 6832 #endif 6833 in_losing(tp->t_inpcb); 6834 tp->t_rttvar += tp->t_srtt; 6835 tp->t_srtt = 0; 6836 } 6837 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6838 tp->snd_recover = tp->snd_max; 6839 tp->t_flags |= TF_ACKNOW; 6840 tp->t_rtttime = 0; 6841 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 6842 out: 6843 return (retval); 6844 } 6845 6846 static int 6847 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6848 { 6849 int32_t ret = 0; 6850 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6851 6852 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6853 (tp->t_flags & TF_GPUTINPROG)) { 6854 /* 6855 * We have a goodput in progress 6856 * and we have entered a late state. 6857 * Do we have enough data in the sb 6858 * to handle the GPUT request? 6859 */ 6860 uint32_t bytes; 6861 6862 bytes = tp->gput_ack - tp->gput_seq; 6863 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6864 bytes += tp->gput_seq - tp->snd_una; 6865 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 6866 /* 6867 * There are not enough bytes in the socket 6868 * buffer that have been sent to cover this 6869 * measurement. Cancel it. 6870 */ 6871 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6872 rack->r_ctl.rc_gp_srtt /*flex1*/, 6873 tp->gput_seq, 6874 0, 0, 18, __LINE__, NULL, 0); 6875 tp->t_flags &= ~TF_GPUTINPROG; 6876 } 6877 } 6878 if (timers == 0) { 6879 return (0); 6880 } 6881 if (tp->t_state == TCPS_LISTEN) { 6882 /* no timers on listen sockets */ 6883 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6884 return (0); 6885 return (1); 6886 } 6887 if ((timers & PACE_TMR_RACK) && 6888 rack->rc_on_min_to) { 6889 /* 6890 * For the rack timer when we 6891 * are on a min-timeout (which means rrr_conf = 3) 6892 * we don't want to check the timer. It may 6893 * be going off for a pace and thats ok we 6894 * want to send the retransmit (if its ready). 6895 * 6896 * If its on a normal rack timer (non-min) then 6897 * we will check if its expired. 6898 */ 6899 goto skip_time_check; 6900 } 6901 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6902 uint32_t left; 6903 6904 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6905 ret = -1; 6906 rack_log_to_processing(rack, cts, ret, 0); 6907 return (0); 6908 } 6909 if (hpts_calling == 0) { 6910 /* 6911 * A user send or queued mbuf (sack) has called us? We 6912 * return 0 and let the pacing guards 6913 * deal with it if they should or 6914 * should not cause a send. 6915 */ 6916 ret = -2; 6917 rack_log_to_processing(rack, cts, ret, 0); 6918 return (0); 6919 } 6920 /* 6921 * Ok our timer went off early and we are not paced false 6922 * alarm, go back to sleep. 6923 */ 6924 ret = -3; 6925 left = rack->r_ctl.rc_timer_exp - cts; 6926 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 6927 rack_log_to_processing(rack, cts, ret, left); 6928 return (1); 6929 } 6930 skip_time_check: 6931 rack->rc_tmr_stopped = 0; 6932 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 6933 if (timers & PACE_TMR_DELACK) { 6934 ret = rack_timeout_delack(tp, rack, cts); 6935 } else if (timers & PACE_TMR_RACK) { 6936 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6937 rack->r_fast_output = 0; 6938 ret = rack_timeout_rack(tp, rack, cts); 6939 } else if (timers & PACE_TMR_TLP) { 6940 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6941 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 6942 } else if (timers & PACE_TMR_RXT) { 6943 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6944 rack->r_fast_output = 0; 6945 ret = rack_timeout_rxt(tp, rack, cts); 6946 } else if (timers & PACE_TMR_PERSIT) { 6947 ret = rack_timeout_persist(tp, rack, cts); 6948 } else if (timers & PACE_TMR_KEEP) { 6949 ret = rack_timeout_keepalive(tp, rack, cts); 6950 } 6951 rack_log_to_processing(rack, cts, ret, timers); 6952 return (ret); 6953 } 6954 6955 static void 6956 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 6957 { 6958 struct timeval tv; 6959 uint32_t us_cts, flags_on_entry; 6960 uint8_t hpts_removed = 0; 6961 6962 flags_on_entry = rack->r_ctl.rc_hpts_flags; 6963 us_cts = tcp_get_usecs(&tv); 6964 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 6965 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 6966 ((tp->snd_max - tp->snd_una) == 0))) { 6967 tcp_hpts_remove(rack->rc_inp); 6968 hpts_removed = 1; 6969 /* If we were not delayed cancel out the flag. */ 6970 if ((tp->snd_max - tp->snd_una) == 0) 6971 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6972 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6973 } 6974 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 6975 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 6976 if (tcp_in_hpts(rack->rc_inp) && 6977 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 6978 /* 6979 * Canceling timer's when we have no output being 6980 * paced. We also must remove ourselves from the 6981 * hpts. 6982 */ 6983 tcp_hpts_remove(rack->rc_inp); 6984 hpts_removed = 1; 6985 } 6986 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 6987 } 6988 if (hpts_removed == 0) 6989 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6990 } 6991 6992 static void 6993 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 6994 { 6995 return; 6996 } 6997 6998 static int 6999 rack_stopall(struct tcpcb *tp) 7000 { 7001 struct tcp_rack *rack; 7002 rack = (struct tcp_rack *)tp->t_fb_ptr; 7003 rack->t_timers_stopped = 1; 7004 return (0); 7005 } 7006 7007 static void 7008 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7009 { 7010 return; 7011 } 7012 7013 static int 7014 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7015 { 7016 return (0); 7017 } 7018 7019 static void 7020 rack_stop_all_timers(struct tcpcb *tp) 7021 { 7022 struct tcp_rack *rack; 7023 7024 /* 7025 * Assure no timers are running. 7026 */ 7027 if (tcp_timer_active(tp, TT_PERSIST)) { 7028 /* We enter in persists, set the flag appropriately */ 7029 rack = (struct tcp_rack *)tp->t_fb_ptr; 7030 rack->rc_in_persist = 1; 7031 } 7032 tcp_timer_suspend(tp, TT_PERSIST); 7033 tcp_timer_suspend(tp, TT_REXMT); 7034 tcp_timer_suspend(tp, TT_KEEP); 7035 tcp_timer_suspend(tp, TT_DELACK); 7036 } 7037 7038 static void 7039 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7040 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7041 { 7042 int32_t idx; 7043 7044 rsm->r_rtr_cnt++; 7045 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7046 rsm->r_dupack = 0; 7047 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7048 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7049 rsm->r_flags |= RACK_OVERMAX; 7050 } 7051 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7052 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7053 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7054 } 7055 idx = rsm->r_rtr_cnt - 1; 7056 rsm->r_tim_lastsent[idx] = ts; 7057 /* 7058 * Here we don't add in the len of send, since its already 7059 * in snduna <->snd_max. 7060 */ 7061 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7062 rack->r_ctl.rc_sacked); 7063 if (rsm->r_flags & RACK_ACKED) { 7064 /* Problably MTU discovery messing with us */ 7065 rsm->r_flags &= ~RACK_ACKED; 7066 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7067 } 7068 if (rsm->r_in_tmap) { 7069 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7070 rsm->r_in_tmap = 0; 7071 } 7072 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7073 rsm->r_in_tmap = 1; 7074 /* Take off the must retransmit flag, if its on */ 7075 if (rsm->r_flags & RACK_MUST_RXT) { 7076 if (rack->r_must_retran) 7077 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 7078 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 7079 /* 7080 * We have retransmitted all we need. Clear 7081 * any must retransmit flags. 7082 */ 7083 rack->r_must_retran = 0; 7084 rack->r_ctl.rc_out_at_rto = 0; 7085 } 7086 rsm->r_flags &= ~RACK_MUST_RXT; 7087 } 7088 if (rsm->r_flags & RACK_SACK_PASSED) { 7089 /* We have retransmitted due to the SACK pass */ 7090 rsm->r_flags &= ~RACK_SACK_PASSED; 7091 rsm->r_flags |= RACK_WAS_SACKPASS; 7092 } 7093 } 7094 7095 static uint32_t 7096 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7097 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7098 { 7099 /* 7100 * We (re-)transmitted starting at rsm->r_start for some length 7101 * (possibly less than r_end. 7102 */ 7103 struct rack_sendmap *nrsm; 7104 #ifdef INVARIANTS 7105 struct rack_sendmap *insret; 7106 #endif 7107 uint32_t c_end; 7108 int32_t len; 7109 7110 len = *lenp; 7111 c_end = rsm->r_start + len; 7112 if (SEQ_GEQ(c_end, rsm->r_end)) { 7113 /* 7114 * We retransmitted the whole piece or more than the whole 7115 * slopping into the next rsm. 7116 */ 7117 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7118 if (c_end == rsm->r_end) { 7119 *lenp = 0; 7120 return (0); 7121 } else { 7122 int32_t act_len; 7123 7124 /* Hangs over the end return whats left */ 7125 act_len = rsm->r_end - rsm->r_start; 7126 *lenp = (len - act_len); 7127 return (rsm->r_end); 7128 } 7129 /* We don't get out of this block. */ 7130 } 7131 /* 7132 * Here we retransmitted less than the whole thing which means we 7133 * have to split this into what was transmitted and what was not. 7134 */ 7135 nrsm = rack_alloc_full_limit(rack); 7136 if (nrsm == NULL) { 7137 /* 7138 * We can't get memory, so lets not proceed. 7139 */ 7140 *lenp = 0; 7141 return (0); 7142 } 7143 /* 7144 * So here we are going to take the original rsm and make it what we 7145 * retransmitted. nrsm will be the tail portion we did not 7146 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7147 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7148 * 1, 6 and the new piece will be 6, 11. 7149 */ 7150 rack_clone_rsm(rack, nrsm, rsm, c_end); 7151 nrsm->r_dupack = 0; 7152 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7153 #ifndef INVARIANTS 7154 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7155 #else 7156 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7157 if (insret != NULL) { 7158 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7159 nrsm, insret, rack, rsm); 7160 } 7161 #endif 7162 if (rsm->r_in_tmap) { 7163 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7164 nrsm->r_in_tmap = 1; 7165 } 7166 rsm->r_flags &= (~RACK_HAS_FIN); 7167 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7168 /* Log a split of rsm into rsm and nrsm */ 7169 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7170 *lenp = 0; 7171 return (0); 7172 } 7173 7174 static void 7175 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7176 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7177 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7178 { 7179 struct tcp_rack *rack; 7180 struct rack_sendmap *rsm, *nrsm, fe; 7181 #ifdef INVARIANTS 7182 struct rack_sendmap *insret; 7183 #endif 7184 register uint32_t snd_max, snd_una; 7185 7186 /* 7187 * Add to the RACK log of packets in flight or retransmitted. If 7188 * there is a TS option we will use the TS echoed, if not we will 7189 * grab a TS. 7190 * 7191 * Retransmissions will increment the count and move the ts to its 7192 * proper place. Note that if options do not include TS's then we 7193 * won't be able to effectively use the ACK for an RTT on a retran. 7194 * 7195 * Notes about r_start and r_end. Lets consider a send starting at 7196 * sequence 1 for 10 bytes. In such an example the r_start would be 7197 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7198 * This means that r_end is actually the first sequence for the next 7199 * slot (11). 7200 * 7201 */ 7202 /* 7203 * If err is set what do we do XXXrrs? should we not add the thing? 7204 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7205 * i.e. proceed with add ** do this for now. 7206 */ 7207 INP_WLOCK_ASSERT(tp->t_inpcb); 7208 if (err) 7209 /* 7210 * We don't log errors -- we could but snd_max does not 7211 * advance in this case either. 7212 */ 7213 return; 7214 7215 if (th_flags & TH_RST) { 7216 /* 7217 * We don't log resets and we return immediately from 7218 * sending 7219 */ 7220 return; 7221 } 7222 rack = (struct tcp_rack *)tp->t_fb_ptr; 7223 snd_una = tp->snd_una; 7224 snd_max = tp->snd_max; 7225 if (th_flags & (TH_SYN | TH_FIN)) { 7226 /* 7227 * The call to rack_log_output is made before bumping 7228 * snd_max. This means we can record one extra byte on a SYN 7229 * or FIN if seq_out is adding more on and a FIN is present 7230 * (and we are not resending). 7231 */ 7232 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7233 len++; 7234 if (th_flags & TH_FIN) 7235 len++; 7236 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7237 /* 7238 * The add/update as not been done for the FIN/SYN 7239 * yet. 7240 */ 7241 snd_max = tp->snd_nxt; 7242 } 7243 } 7244 if (SEQ_LEQ((seq_out + len), snd_una)) { 7245 /* Are sending an old segment to induce an ack (keep-alive)? */ 7246 return; 7247 } 7248 if (SEQ_LT(seq_out, snd_una)) { 7249 /* huh? should we panic? */ 7250 uint32_t end; 7251 7252 end = seq_out + len; 7253 seq_out = snd_una; 7254 if (SEQ_GEQ(end, seq_out)) 7255 len = end - seq_out; 7256 else 7257 len = 0; 7258 } 7259 if (len == 0) { 7260 /* We don't log zero window probes */ 7261 return; 7262 } 7263 if (IN_FASTRECOVERY(tp->t_flags)) { 7264 rack->r_ctl.rc_prr_out += len; 7265 } 7266 /* First question is it a retransmission or new? */ 7267 if (seq_out == snd_max) { 7268 /* Its new */ 7269 again: 7270 rsm = rack_alloc(rack); 7271 if (rsm == NULL) { 7272 /* 7273 * Hmm out of memory and the tcb got destroyed while 7274 * we tried to wait. 7275 */ 7276 return; 7277 } 7278 if (th_flags & TH_FIN) { 7279 rsm->r_flags = RACK_HAS_FIN|add_flag; 7280 } else { 7281 rsm->r_flags = add_flag; 7282 } 7283 if (hw_tls) 7284 rsm->r_hw_tls = 1; 7285 rsm->r_tim_lastsent[0] = cts; 7286 rsm->r_rtr_cnt = 1; 7287 rsm->r_rtr_bytes = 0; 7288 if (th_flags & TH_SYN) { 7289 /* The data space is one beyond snd_una */ 7290 rsm->r_flags |= RACK_HAS_SYN; 7291 } 7292 rsm->r_start = seq_out; 7293 rsm->r_end = rsm->r_start + len; 7294 rsm->r_dupack = 0; 7295 /* 7296 * save off the mbuf location that 7297 * sndmbuf_noadv returned (which is 7298 * where we started copying from).. 7299 */ 7300 rsm->m = s_mb; 7301 rsm->soff = s_moff; 7302 /* 7303 * Here we do add in the len of send, since its not yet 7304 * reflected in in snduna <->snd_max 7305 */ 7306 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7307 rack->r_ctl.rc_sacked) + 7308 (rsm->r_end - rsm->r_start)); 7309 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7310 if (rsm->m) { 7311 if (rsm->m->m_len <= rsm->soff) { 7312 /* 7313 * XXXrrs Question, will this happen? 7314 * 7315 * If sbsndptr is set at the correct place 7316 * then s_moff should always be somewhere 7317 * within rsm->m. But if the sbsndptr was 7318 * off then that won't be true. If it occurs 7319 * we need to walkout to the correct location. 7320 */ 7321 struct mbuf *lm; 7322 7323 lm = rsm->m; 7324 while (lm->m_len <= rsm->soff) { 7325 rsm->soff -= lm->m_len; 7326 lm = lm->m_next; 7327 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7328 __func__, rack, s_moff, s_mb, rsm->soff)); 7329 } 7330 rsm->m = lm; 7331 } 7332 rsm->orig_m_len = rsm->m->m_len; 7333 } else 7334 rsm->orig_m_len = 0; 7335 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7336 /* Log a new rsm */ 7337 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7338 #ifndef INVARIANTS 7339 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7340 #else 7341 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7342 if (insret != NULL) { 7343 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7344 nrsm, insret, rack, rsm); 7345 } 7346 #endif 7347 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7348 rsm->r_in_tmap = 1; 7349 /* 7350 * Special case detection, is there just a single 7351 * packet outstanding when we are not in recovery? 7352 * 7353 * If this is true mark it so. 7354 */ 7355 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7356 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7357 struct rack_sendmap *prsm; 7358 7359 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7360 if (prsm) 7361 prsm->r_one_out_nr = 1; 7362 } 7363 return; 7364 } 7365 /* 7366 * If we reach here its a retransmission and we need to find it. 7367 */ 7368 memset(&fe, 0, sizeof(fe)); 7369 more: 7370 if (hintrsm && (hintrsm->r_start == seq_out)) { 7371 rsm = hintrsm; 7372 hintrsm = NULL; 7373 } else { 7374 /* No hints sorry */ 7375 rsm = NULL; 7376 } 7377 if ((rsm) && (rsm->r_start == seq_out)) { 7378 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7379 if (len == 0) { 7380 return; 7381 } else { 7382 goto more; 7383 } 7384 } 7385 /* Ok it was not the last pointer go through it the hard way. */ 7386 refind: 7387 fe.r_start = seq_out; 7388 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7389 if (rsm) { 7390 if (rsm->r_start == seq_out) { 7391 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7392 if (len == 0) { 7393 return; 7394 } else { 7395 goto refind; 7396 } 7397 } 7398 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7399 /* Transmitted within this piece */ 7400 /* 7401 * Ok we must split off the front and then let the 7402 * update do the rest 7403 */ 7404 nrsm = rack_alloc_full_limit(rack); 7405 if (nrsm == NULL) { 7406 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7407 return; 7408 } 7409 /* 7410 * copy rsm to nrsm and then trim the front of rsm 7411 * to not include this part. 7412 */ 7413 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7414 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7415 #ifndef INVARIANTS 7416 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7417 #else 7418 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7419 if (insret != NULL) { 7420 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7421 nrsm, insret, rack, rsm); 7422 } 7423 #endif 7424 if (rsm->r_in_tmap) { 7425 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7426 nrsm->r_in_tmap = 1; 7427 } 7428 rsm->r_flags &= (~RACK_HAS_FIN); 7429 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7430 if (len == 0) { 7431 return; 7432 } else if (len > 0) 7433 goto refind; 7434 } 7435 } 7436 /* 7437 * Hmm not found in map did they retransmit both old and on into the 7438 * new? 7439 */ 7440 if (seq_out == tp->snd_max) { 7441 goto again; 7442 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7443 #ifdef INVARIANTS 7444 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7445 seq_out, len, tp->snd_una, tp->snd_max); 7446 printf("Starting Dump of all rack entries\n"); 7447 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7448 printf("rsm:%p start:%u end:%u\n", 7449 rsm, rsm->r_start, rsm->r_end); 7450 } 7451 printf("Dump complete\n"); 7452 panic("seq_out not found rack:%p tp:%p", 7453 rack, tp); 7454 #endif 7455 } else { 7456 #ifdef INVARIANTS 7457 /* 7458 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7459 * flag) 7460 */ 7461 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7462 seq_out, len, tp->snd_max, tp); 7463 #endif 7464 } 7465 } 7466 7467 /* 7468 * Record one of the RTT updates from an ack into 7469 * our sample structure. 7470 */ 7471 7472 static void 7473 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7474 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7475 { 7476 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7477 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7478 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7479 } 7480 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7481 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7482 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7483 } 7484 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7485 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7486 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7487 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7488 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7489 } 7490 if ((confidence == 1) && 7491 ((rsm == NULL) || 7492 (rsm->r_just_ret) || 7493 (rsm->r_one_out_nr && 7494 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7495 /* 7496 * If the rsm had a just return 7497 * hit it then we can't trust the 7498 * rtt measurement for buffer deterimination 7499 * Note that a confidence of 2, indicates 7500 * SACK'd which overrides the r_just_ret or 7501 * the r_one_out_nr. If it was a CUM-ACK and 7502 * we had only two outstanding, but get an 7503 * ack for only 1. Then that also lowers our 7504 * confidence. 7505 */ 7506 confidence = 0; 7507 } 7508 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7509 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7510 if (rack->r_ctl.rack_rs.confidence == 0) { 7511 /* 7512 * We take anything with no current confidence 7513 * saved. 7514 */ 7515 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7516 rack->r_ctl.rack_rs.confidence = confidence; 7517 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7518 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7519 /* 7520 * Once we have a confident number, 7521 * we can update it with a smaller 7522 * value since this confident number 7523 * may include the DSACK time until 7524 * the next segment (the second one) arrived. 7525 */ 7526 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7527 rack->r_ctl.rack_rs.confidence = confidence; 7528 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7529 } 7530 } 7531 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7532 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7533 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7534 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7535 } 7536 7537 /* 7538 * Collect new round-trip time estimate 7539 * and update averages and current timeout. 7540 */ 7541 static void 7542 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7543 { 7544 int32_t delta; 7545 int32_t rtt; 7546 7547 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7548 /* No valid sample */ 7549 return; 7550 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7551 /* We are to use the lowest RTT seen in a single ack */ 7552 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7553 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7554 /* We are to use the highest RTT seen in a single ack */ 7555 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7556 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7557 /* We are to use the average RTT seen in a single ack */ 7558 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7559 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7560 } else { 7561 #ifdef INVARIANTS 7562 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7563 #endif 7564 return; 7565 } 7566 if (rtt == 0) 7567 rtt = 1; 7568 if (rack->rc_gp_rtt_set == 0) { 7569 /* 7570 * With no RTT we have to accept 7571 * even one we are not confident of. 7572 */ 7573 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7574 rack->rc_gp_rtt_set = 1; 7575 } else if (rack->r_ctl.rack_rs.confidence) { 7576 /* update the running gp srtt */ 7577 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7578 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7579 } 7580 if (rack->r_ctl.rack_rs.confidence) { 7581 /* 7582 * record the low and high for highly buffered path computation, 7583 * we only do this if we are confident (not a retransmission). 7584 */ 7585 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7586 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7587 } 7588 if (rack->rc_highly_buffered == 0) { 7589 /* 7590 * Currently once we declare a path has 7591 * highly buffered there is no going 7592 * back, which may be a problem... 7593 */ 7594 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7595 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7596 rack->r_ctl.rc_highest_us_rtt, 7597 rack->r_ctl.rc_lowest_us_rtt, 7598 RACK_RTTS_SEEHBP); 7599 rack->rc_highly_buffered = 1; 7600 } 7601 } 7602 } 7603 if ((rack->r_ctl.rack_rs.confidence) || 7604 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7605 /* 7606 * If we are highly confident of it <or> it was 7607 * never retransmitted we accept it as the last us_rtt. 7608 */ 7609 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7610 /* The lowest rtt can be set if its was not retransmited */ 7611 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7612 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7613 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7614 rack->r_ctl.rc_lowest_us_rtt = 1; 7615 } 7616 } 7617 rack = (struct tcp_rack *)tp->t_fb_ptr; 7618 if (tp->t_srtt != 0) { 7619 /* 7620 * We keep a simple srtt in microseconds, like our rtt 7621 * measurement. We don't need to do any tricks with shifting 7622 * etc. Instead we just add in 1/8th of the new measurement 7623 * and subtract out 1/8 of the old srtt. We do the same with 7624 * the variance after finding the absolute value of the 7625 * difference between this sample and the current srtt. 7626 */ 7627 delta = tp->t_srtt - rtt; 7628 /* Take off 1/8th of the current sRTT */ 7629 tp->t_srtt -= (tp->t_srtt >> 3); 7630 /* Add in 1/8th of the new RTT just measured */ 7631 tp->t_srtt += (rtt >> 3); 7632 if (tp->t_srtt <= 0) 7633 tp->t_srtt = 1; 7634 /* Now lets make the absolute value of the variance */ 7635 if (delta < 0) 7636 delta = -delta; 7637 /* Subtract out 1/8th */ 7638 tp->t_rttvar -= (tp->t_rttvar >> 3); 7639 /* Add in 1/8th of the new variance we just saw */ 7640 tp->t_rttvar += (delta >> 3); 7641 if (tp->t_rttvar <= 0) 7642 tp->t_rttvar = 1; 7643 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7644 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7645 } else { 7646 /* 7647 * No rtt measurement yet - use the unsmoothed rtt. Set the 7648 * variance to half the rtt (so our first retransmit happens 7649 * at 3*rtt). 7650 */ 7651 tp->t_srtt = rtt; 7652 tp->t_rttvar = rtt >> 1; 7653 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7654 } 7655 rack->rc_srtt_measure_made = 1; 7656 KMOD_TCPSTAT_INC(tcps_rttupdated); 7657 tp->t_rttupdated++; 7658 #ifdef STATS 7659 if (rack_stats_gets_ms_rtt == 0) { 7660 /* Send in the microsecond rtt used for rxt timeout purposes */ 7661 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7662 } else if (rack_stats_gets_ms_rtt == 1) { 7663 /* Send in the millisecond rtt used for rxt timeout purposes */ 7664 int32_t ms_rtt; 7665 7666 /* Round up */ 7667 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7668 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7669 } else if (rack_stats_gets_ms_rtt == 2) { 7670 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7671 int32_t ms_rtt; 7672 7673 /* Round up */ 7674 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7675 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7676 } else { 7677 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7678 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7679 } 7680 7681 #endif 7682 /* 7683 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7684 * way we do the smoothing, srtt and rttvar will each average +1/2 7685 * tick of bias. When we compute the retransmit timer, we want 1/2 7686 * tick of rounding and 1 extra tick because of +-1/2 tick 7687 * uncertainty in the firing of the timer. The bias will give us 7688 * exactly the 1.5 tick we need. But, because the bias is 7689 * statistical, we have to test that we don't drop below the minimum 7690 * feasible timer (which is 2 ticks). 7691 */ 7692 tp->t_rxtshift = 0; 7693 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7694 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7695 rack_log_rtt_sample(rack, rtt); 7696 tp->t_softerror = 0; 7697 } 7698 7699 7700 static void 7701 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7702 { 7703 /* 7704 * Apply to filter the inbound us-rtt at us_cts. 7705 */ 7706 uint32_t old_rtt; 7707 7708 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7709 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7710 us_rtt, us_cts); 7711 if (old_rtt > us_rtt) { 7712 /* We just hit a new lower rtt time */ 7713 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7714 __LINE__, RACK_RTTS_NEWRTT); 7715 /* 7716 * Only count it if its lower than what we saw within our 7717 * calculated range. 7718 */ 7719 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7720 if (rack_probertt_lower_within && 7721 rack->rc_gp_dyn_mul && 7722 (rack->use_fixed_rate == 0) && 7723 (rack->rc_always_pace)) { 7724 /* 7725 * We are seeing a new lower rtt very close 7726 * to the time that we would have entered probe-rtt. 7727 * This is probably due to the fact that a peer flow 7728 * has entered probe-rtt. Lets go in now too. 7729 */ 7730 uint32_t val; 7731 7732 val = rack_probertt_lower_within * rack_time_between_probertt; 7733 val /= 100; 7734 if ((rack->in_probe_rtt == 0) && 7735 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7736 rack_enter_probertt(rack, us_cts); 7737 } 7738 } 7739 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7740 } 7741 } 7742 } 7743 7744 static int 7745 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7746 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7747 { 7748 uint32_t us_rtt; 7749 int32_t i, all; 7750 uint32_t t, len_acked; 7751 7752 if ((rsm->r_flags & RACK_ACKED) || 7753 (rsm->r_flags & RACK_WAS_ACKED)) 7754 /* Already done */ 7755 return (0); 7756 if (rsm->r_no_rtt_allowed) { 7757 /* Not allowed */ 7758 return (0); 7759 } 7760 if (ack_type == CUM_ACKED) { 7761 if (SEQ_GT(th_ack, rsm->r_end)) { 7762 len_acked = rsm->r_end - rsm->r_start; 7763 all = 1; 7764 } else { 7765 len_acked = th_ack - rsm->r_start; 7766 all = 0; 7767 } 7768 } else { 7769 len_acked = rsm->r_end - rsm->r_start; 7770 all = 0; 7771 } 7772 if (rsm->r_rtr_cnt == 1) { 7773 7774 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7775 if ((int)t <= 0) 7776 t = 1; 7777 if (!tp->t_rttlow || tp->t_rttlow > t) 7778 tp->t_rttlow = t; 7779 if (!rack->r_ctl.rc_rack_min_rtt || 7780 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7781 rack->r_ctl.rc_rack_min_rtt = t; 7782 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7783 rack->r_ctl.rc_rack_min_rtt = 1; 7784 } 7785 } 7786 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7787 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7788 else 7789 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7790 if (us_rtt == 0) 7791 us_rtt = 1; 7792 if (CC_ALGO(tp)->rttsample != NULL) { 7793 /* Kick the RTT to the CC */ 7794 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7795 } 7796 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7797 if (ack_type == SACKED) { 7798 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7799 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7800 } else { 7801 /* 7802 * We need to setup what our confidence 7803 * is in this ack. 7804 * 7805 * If the rsm was app limited and it is 7806 * less than a mss in length (the end 7807 * of the send) then we have a gap. If we 7808 * were app limited but say we were sending 7809 * multiple MSS's then we are more confident 7810 * int it. 7811 * 7812 * When we are not app-limited then we see if 7813 * the rsm is being included in the current 7814 * measurement, we tell this by the app_limited_needs_set 7815 * flag. 7816 * 7817 * Note that being cwnd blocked is not applimited 7818 * as well as the pacing delay between packets which 7819 * are sending only 1 or 2 MSS's also will show up 7820 * in the RTT. We probably need to examine this algorithm 7821 * a bit more and enhance it to account for the delay 7822 * between rsm's. We could do that by saving off the 7823 * pacing delay of each rsm (in an rsm) and then 7824 * factoring that in somehow though for now I am 7825 * not sure how :) 7826 */ 7827 int calc_conf = 0; 7828 7829 if (rsm->r_flags & RACK_APP_LIMITED) { 7830 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7831 calc_conf = 0; 7832 else 7833 calc_conf = 1; 7834 } else if (rack->app_limited_needs_set == 0) { 7835 calc_conf = 1; 7836 } else { 7837 calc_conf = 0; 7838 } 7839 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7840 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7841 calc_conf, rsm, rsm->r_rtr_cnt); 7842 } 7843 if ((rsm->r_flags & RACK_TLP) && 7844 (!IN_FASTRECOVERY(tp->t_flags))) { 7845 /* Segment was a TLP and our retrans matched */ 7846 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7847 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 7848 } 7849 } 7850 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7851 /* New more recent rack_tmit_time */ 7852 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7853 rack->rc_rack_rtt = t; 7854 } 7855 return (1); 7856 } 7857 /* 7858 * We clear the soft/rxtshift since we got an ack. 7859 * There is no assurance we will call the commit() function 7860 * so we need to clear these to avoid incorrect handling. 7861 */ 7862 tp->t_rxtshift = 0; 7863 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7864 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7865 tp->t_softerror = 0; 7866 if (to && (to->to_flags & TOF_TS) && 7867 (ack_type == CUM_ACKED) && 7868 (to->to_tsecr) && 7869 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7870 /* 7871 * Now which timestamp does it match? In this block the ACK 7872 * must be coming from a previous transmission. 7873 */ 7874 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7875 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7876 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7877 if ((int)t <= 0) 7878 t = 1; 7879 if (CC_ALGO(tp)->rttsample != NULL) { 7880 /* 7881 * Kick the RTT to the CC, here 7882 * we lie a bit in that we know the 7883 * retransmission is correct even though 7884 * we retransmitted. This is because 7885 * we match the timestamps. 7886 */ 7887 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7888 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7889 else 7890 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7891 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7892 } 7893 if ((i + 1) < rsm->r_rtr_cnt) { 7894 /* 7895 * The peer ack'd from our previous 7896 * transmission. We have a spurious 7897 * retransmission and thus we dont 7898 * want to update our rack_rtt. 7899 * 7900 * Hmm should there be a CC revert here? 7901 * 7902 */ 7903 return (0); 7904 } 7905 if (!tp->t_rttlow || tp->t_rttlow > t) 7906 tp->t_rttlow = t; 7907 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7908 rack->r_ctl.rc_rack_min_rtt = t; 7909 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7910 rack->r_ctl.rc_rack_min_rtt = 1; 7911 } 7912 } 7913 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7914 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7915 /* New more recent rack_tmit_time */ 7916 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7917 rack->rc_rack_rtt = t; 7918 } 7919 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7920 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7921 rsm->r_rtr_cnt); 7922 return (1); 7923 } 7924 } 7925 goto ts_not_found; 7926 } else { 7927 /* 7928 * Ok its a SACK block that we retransmitted. or a windows 7929 * machine without timestamps. We can tell nothing from the 7930 * time-stamp since its not there or the time the peer last 7931 * recieved a segment that moved forward its cum-ack point. 7932 */ 7933 ts_not_found: 7934 i = rsm->r_rtr_cnt - 1; 7935 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7936 if ((int)t <= 0) 7937 t = 1; 7938 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7939 /* 7940 * We retransmitted and the ack came back in less 7941 * than the smallest rtt we have observed. We most 7942 * likely did an improper retransmit as outlined in 7943 * 6.2 Step 2 point 2 in the rack-draft so we 7944 * don't want to update our rack_rtt. We in 7945 * theory (in future) might want to think about reverting our 7946 * cwnd state but we won't for now. 7947 */ 7948 return (0); 7949 } else if (rack->r_ctl.rc_rack_min_rtt) { 7950 /* 7951 * We retransmitted it and the retransmit did the 7952 * job. 7953 */ 7954 if (!rack->r_ctl.rc_rack_min_rtt || 7955 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7956 rack->r_ctl.rc_rack_min_rtt = t; 7957 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7958 rack->r_ctl.rc_rack_min_rtt = 1; 7959 } 7960 } 7961 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 7962 /* New more recent rack_tmit_time */ 7963 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 7964 rack->rc_rack_rtt = t; 7965 } 7966 return (1); 7967 } 7968 } 7969 return (0); 7970 } 7971 7972 /* 7973 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7974 */ 7975 static void 7976 rack_log_sack_passed(struct tcpcb *tp, 7977 struct tcp_rack *rack, struct rack_sendmap *rsm) 7978 { 7979 struct rack_sendmap *nrsm; 7980 7981 nrsm = rsm; 7982 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 7983 rack_head, r_tnext) { 7984 if (nrsm == rsm) { 7985 /* Skip orginal segment he is acked */ 7986 continue; 7987 } 7988 if (nrsm->r_flags & RACK_ACKED) { 7989 /* 7990 * Skip ack'd segments, though we 7991 * should not see these, since tmap 7992 * should not have ack'd segments. 7993 */ 7994 continue; 7995 } 7996 if (nrsm->r_flags & RACK_SACK_PASSED) { 7997 /* 7998 * We found one that is already marked 7999 * passed, we have been here before and 8000 * so all others below this are marked. 8001 */ 8002 break; 8003 } 8004 nrsm->r_flags |= RACK_SACK_PASSED; 8005 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8006 } 8007 } 8008 8009 static void 8010 rack_need_set_test(struct tcpcb *tp, 8011 struct tcp_rack *rack, 8012 struct rack_sendmap *rsm, 8013 tcp_seq th_ack, 8014 int line, 8015 int use_which) 8016 { 8017 8018 if ((tp->t_flags & TF_GPUTINPROG) && 8019 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8020 /* 8021 * We were app limited, and this ack 8022 * butts up or goes beyond the point where we want 8023 * to start our next measurement. We need 8024 * to record the new gput_ts as here and 8025 * possibly update the start sequence. 8026 */ 8027 uint32_t seq, ts; 8028 8029 if (rsm->r_rtr_cnt > 1) { 8030 /* 8031 * This is a retransmit, can we 8032 * really make any assessment at this 8033 * point? We are not really sure of 8034 * the timestamp, is it this or the 8035 * previous transmission? 8036 * 8037 * Lets wait for something better that 8038 * is not retransmitted. 8039 */ 8040 return; 8041 } 8042 seq = tp->gput_seq; 8043 ts = tp->gput_ts; 8044 rack->app_limited_needs_set = 0; 8045 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8046 /* Do we start at a new end? */ 8047 if ((use_which == RACK_USE_BEG) && 8048 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8049 /* 8050 * When we get an ACK that just eats 8051 * up some of the rsm, we set RACK_USE_BEG 8052 * since whats at r_start (i.e. th_ack) 8053 * is left unacked and thats where the 8054 * measurement not starts. 8055 */ 8056 tp->gput_seq = rsm->r_start; 8057 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8058 } 8059 if ((use_which == RACK_USE_END) && 8060 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8061 /* 8062 * We use the end when the cumack 8063 * is moving forward and completely 8064 * deleting the rsm passed so basically 8065 * r_end holds th_ack. 8066 * 8067 * For SACK's we also want to use the end 8068 * since this piece just got sacked and 8069 * we want to target anything after that 8070 * in our measurement. 8071 */ 8072 tp->gput_seq = rsm->r_end; 8073 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8074 } 8075 if (use_which == RACK_USE_END_OR_THACK) { 8076 /* 8077 * special case for ack moving forward, 8078 * not a sack, we need to move all the 8079 * way up to where this ack cum-ack moves 8080 * to. 8081 */ 8082 if (SEQ_GT(th_ack, rsm->r_end)) 8083 tp->gput_seq = th_ack; 8084 else 8085 tp->gput_seq = rsm->r_end; 8086 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8087 } 8088 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8089 /* 8090 * We moved beyond this guy's range, re-calculate 8091 * the new end point. 8092 */ 8093 if (rack->rc_gp_filled == 0) { 8094 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8095 } else { 8096 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8097 } 8098 } 8099 /* 8100 * We are moving the goal post, we may be able to clear the 8101 * measure_saw_probe_rtt flag. 8102 */ 8103 if ((rack->in_probe_rtt == 0) && 8104 (rack->measure_saw_probe_rtt) && 8105 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8106 rack->measure_saw_probe_rtt = 0; 8107 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8108 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8109 if (rack->rc_gp_filled && 8110 ((tp->gput_ack - tp->gput_seq) < 8111 max(rc_init_window(rack), (MIN_GP_WIN * 8112 ctf_fixed_maxseg(tp))))) { 8113 uint32_t ideal_amount; 8114 8115 ideal_amount = rack_get_measure_window(tp, rack); 8116 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8117 /* 8118 * There is no sense of continuing this measurement 8119 * because its too small to gain us anything we 8120 * trust. Skip it and that way we can start a new 8121 * measurement quicker. 8122 */ 8123 tp->t_flags &= ~TF_GPUTINPROG; 8124 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8125 0, 0, 0, 6, __LINE__, NULL, 0); 8126 } else { 8127 /* 8128 * Reset the window further out. 8129 */ 8130 tp->gput_ack = tp->gput_seq + ideal_amount; 8131 } 8132 } 8133 } 8134 } 8135 8136 static inline int 8137 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8138 { 8139 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8140 /* Behind our TLP definition or right at */ 8141 return (0); 8142 } 8143 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8144 /* The start is beyond or right at our end of TLP definition */ 8145 return (0); 8146 } 8147 /* It has to be a sub-part of the original TLP recorded */ 8148 return (1); 8149 } 8150 8151 8152 static uint32_t 8153 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8154 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8155 { 8156 uint32_t start, end, changed = 0; 8157 struct rack_sendmap stack_map; 8158 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8159 #ifdef INVARIANTS 8160 struct rack_sendmap *insret; 8161 #endif 8162 int32_t used_ref = 1; 8163 int moved = 0; 8164 8165 start = sack->start; 8166 end = sack->end; 8167 rsm = *prsm; 8168 memset(&fe, 0, sizeof(fe)); 8169 do_rest_ofb: 8170 if ((rsm == NULL) || 8171 (SEQ_LT(end, rsm->r_start)) || 8172 (SEQ_GEQ(start, rsm->r_end)) || 8173 (SEQ_LT(start, rsm->r_start))) { 8174 /* 8175 * We are not in the right spot, 8176 * find the correct spot in the tree. 8177 */ 8178 used_ref = 0; 8179 fe.r_start = start; 8180 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8181 moved++; 8182 } 8183 if (rsm == NULL) { 8184 /* TSNH */ 8185 goto out; 8186 } 8187 /* Ok we have an ACK for some piece of this rsm */ 8188 if (rsm->r_start != start) { 8189 if ((rsm->r_flags & RACK_ACKED) == 0) { 8190 /* 8191 * Before any splitting or hookery is 8192 * done is it a TLP of interest i.e. rxt? 8193 */ 8194 if ((rsm->r_flags & RACK_TLP) && 8195 (rsm->r_rtr_cnt > 1)) { 8196 /* 8197 * We are splitting a rxt TLP, check 8198 * if we need to save off the start/end 8199 */ 8200 if (rack->rc_last_tlp_acked_set && 8201 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8202 /* 8203 * We already turned this on since we are inside 8204 * the previous one was a partially sack now we 8205 * are getting another one (maybe all of it). 8206 * 8207 */ 8208 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8209 /* 8210 * Lets make sure we have all of it though. 8211 */ 8212 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8213 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8214 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8215 rack->r_ctl.last_tlp_acked_end); 8216 } 8217 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8218 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8219 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8220 rack->r_ctl.last_tlp_acked_end); 8221 } 8222 } else { 8223 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8224 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8225 rack->rc_last_tlp_past_cumack = 0; 8226 rack->rc_last_tlp_acked_set = 1; 8227 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8228 } 8229 } 8230 /** 8231 * Need to split this in two pieces the before and after, 8232 * the before remains in the map, the after must be 8233 * added. In other words we have: 8234 * rsm |--------------| 8235 * sackblk |-------> 8236 * rsm will become 8237 * rsm |---| 8238 * and nrsm will be the sacked piece 8239 * nrsm |----------| 8240 * 8241 * But before we start down that path lets 8242 * see if the sack spans over on top of 8243 * the next guy and it is already sacked. 8244 * 8245 */ 8246 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8247 if (next && (next->r_flags & RACK_ACKED) && 8248 SEQ_GEQ(end, next->r_start)) { 8249 /** 8250 * So the next one is already acked, and 8251 * we can thus by hookery use our stack_map 8252 * to reflect the piece being sacked and 8253 * then adjust the two tree entries moving 8254 * the start and ends around. So we start like: 8255 * rsm |------------| (not-acked) 8256 * next |-----------| (acked) 8257 * sackblk |--------> 8258 * We want to end like so: 8259 * rsm |------| (not-acked) 8260 * next |-----------------| (acked) 8261 * nrsm |-----| 8262 * Where nrsm is a temporary stack piece we 8263 * use to update all the gizmos. 8264 */ 8265 /* Copy up our fudge block */ 8266 nrsm = &stack_map; 8267 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8268 /* Now adjust our tree blocks */ 8269 rsm->r_end = start; 8270 next->r_start = start; 8271 /* Now we must adjust back where next->m is */ 8272 rack_setup_offset_for_rsm(rsm, next); 8273 8274 /* We don't need to adjust rsm, it did not change */ 8275 /* Clear out the dup ack count of the remainder */ 8276 rsm->r_dupack = 0; 8277 rsm->r_just_ret = 0; 8278 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8279 /* Now lets make sure our fudge block is right */ 8280 nrsm->r_start = start; 8281 /* Now lets update all the stats and such */ 8282 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8283 if (rack->app_limited_needs_set) 8284 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8285 changed += (nrsm->r_end - nrsm->r_start); 8286 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8287 if (nrsm->r_flags & RACK_SACK_PASSED) { 8288 rack->r_ctl.rc_reorder_ts = cts; 8289 } 8290 /* 8291 * Now we want to go up from rsm (the 8292 * one left un-acked) to the next one 8293 * in the tmap. We do this so when 8294 * we walk backwards we include marking 8295 * sack-passed on rsm (The one passed in 8296 * is skipped since it is generally called 8297 * on something sacked before removing it 8298 * from the tmap). 8299 */ 8300 if (rsm->r_in_tmap) { 8301 nrsm = TAILQ_NEXT(rsm, r_tnext); 8302 /* 8303 * Now that we have the next 8304 * one walk backwards from there. 8305 */ 8306 if (nrsm && nrsm->r_in_tmap) 8307 rack_log_sack_passed(tp, rack, nrsm); 8308 } 8309 /* Now are we done? */ 8310 if (SEQ_LT(end, next->r_end) || 8311 (end == next->r_end)) { 8312 /* Done with block */ 8313 goto out; 8314 } 8315 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8316 counter_u64_add(rack_sack_used_next_merge, 1); 8317 /* Postion for the next block */ 8318 start = next->r_end; 8319 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8320 if (rsm == NULL) 8321 goto out; 8322 } else { 8323 /** 8324 * We can't use any hookery here, so we 8325 * need to split the map. We enter like 8326 * so: 8327 * rsm |--------| 8328 * sackblk |-----> 8329 * We will add the new block nrsm and 8330 * that will be the new portion, and then 8331 * fall through after reseting rsm. So we 8332 * split and look like this: 8333 * rsm |----| 8334 * sackblk |-----> 8335 * nrsm |---| 8336 * We then fall through reseting 8337 * rsm to nrsm, so the next block 8338 * picks it up. 8339 */ 8340 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8341 if (nrsm == NULL) { 8342 /* 8343 * failed XXXrrs what can we do but loose the sack 8344 * info? 8345 */ 8346 goto out; 8347 } 8348 counter_u64_add(rack_sack_splits, 1); 8349 rack_clone_rsm(rack, nrsm, rsm, start); 8350 rsm->r_just_ret = 0; 8351 #ifndef INVARIANTS 8352 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8353 #else 8354 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8355 if (insret != NULL) { 8356 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8357 nrsm, insret, rack, rsm); 8358 } 8359 #endif 8360 if (rsm->r_in_tmap) { 8361 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8362 nrsm->r_in_tmap = 1; 8363 } 8364 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8365 rsm->r_flags &= (~RACK_HAS_FIN); 8366 /* Position us to point to the new nrsm that starts the sack blk */ 8367 rsm = nrsm; 8368 } 8369 } else { 8370 /* Already sacked this piece */ 8371 counter_u64_add(rack_sack_skipped_acked, 1); 8372 moved++; 8373 if (end == rsm->r_end) { 8374 /* Done with block */ 8375 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8376 goto out; 8377 } else if (SEQ_LT(end, rsm->r_end)) { 8378 /* A partial sack to a already sacked block */ 8379 moved++; 8380 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8381 goto out; 8382 } else { 8383 /* 8384 * The end goes beyond this guy 8385 * repostion the start to the 8386 * next block. 8387 */ 8388 start = rsm->r_end; 8389 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8390 if (rsm == NULL) 8391 goto out; 8392 } 8393 } 8394 } 8395 if (SEQ_GEQ(end, rsm->r_end)) { 8396 /** 8397 * The end of this block is either beyond this guy or right 8398 * at this guy. I.e.: 8399 * rsm --- |-----| 8400 * end |-----| 8401 * <or> 8402 * end |---------| 8403 */ 8404 if ((rsm->r_flags & RACK_ACKED) == 0) { 8405 /* 8406 * Is it a TLP of interest? 8407 */ 8408 if ((rsm->r_flags & RACK_TLP) && 8409 (rsm->r_rtr_cnt > 1)) { 8410 /* 8411 * We are splitting a rxt TLP, check 8412 * if we need to save off the start/end 8413 */ 8414 if (rack->rc_last_tlp_acked_set && 8415 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8416 /* 8417 * We already turned this on since we are inside 8418 * the previous one was a partially sack now we 8419 * are getting another one (maybe all of it). 8420 */ 8421 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8422 /* 8423 * Lets make sure we have all of it though. 8424 */ 8425 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8426 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8427 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8428 rack->r_ctl.last_tlp_acked_end); 8429 } 8430 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8431 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8432 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8433 rack->r_ctl.last_tlp_acked_end); 8434 } 8435 } else { 8436 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8437 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8438 rack->rc_last_tlp_past_cumack = 0; 8439 rack->rc_last_tlp_acked_set = 1; 8440 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8441 } 8442 } 8443 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8444 changed += (rsm->r_end - rsm->r_start); 8445 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8446 if (rsm->r_in_tmap) /* should be true */ 8447 rack_log_sack_passed(tp, rack, rsm); 8448 /* Is Reordering occuring? */ 8449 if (rsm->r_flags & RACK_SACK_PASSED) { 8450 rsm->r_flags &= ~RACK_SACK_PASSED; 8451 rack->r_ctl.rc_reorder_ts = cts; 8452 } 8453 if (rack->app_limited_needs_set) 8454 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8455 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8456 rsm->r_flags |= RACK_ACKED; 8457 if (rsm->r_in_tmap) { 8458 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8459 rsm->r_in_tmap = 0; 8460 } 8461 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8462 } else { 8463 counter_u64_add(rack_sack_skipped_acked, 1); 8464 moved++; 8465 } 8466 if (end == rsm->r_end) { 8467 /* This block only - done, setup for next */ 8468 goto out; 8469 } 8470 /* 8471 * There is more not coverend by this rsm move on 8472 * to the next block in the RB tree. 8473 */ 8474 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8475 start = rsm->r_end; 8476 rsm = nrsm; 8477 if (rsm == NULL) 8478 goto out; 8479 goto do_rest_ofb; 8480 } 8481 /** 8482 * The end of this sack block is smaller than 8483 * our rsm i.e.: 8484 * rsm --- |-----| 8485 * end |--| 8486 */ 8487 if ((rsm->r_flags & RACK_ACKED) == 0) { 8488 /* 8489 * Is it a TLP of interest? 8490 */ 8491 if ((rsm->r_flags & RACK_TLP) && 8492 (rsm->r_rtr_cnt > 1)) { 8493 /* 8494 * We are splitting a rxt TLP, check 8495 * if we need to save off the start/end 8496 */ 8497 if (rack->rc_last_tlp_acked_set && 8498 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8499 /* 8500 * We already turned this on since we are inside 8501 * the previous one was a partially sack now we 8502 * are getting another one (maybe all of it). 8503 */ 8504 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8505 /* 8506 * Lets make sure we have all of it though. 8507 */ 8508 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8509 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8510 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8511 rack->r_ctl.last_tlp_acked_end); 8512 } 8513 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8514 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8515 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8516 rack->r_ctl.last_tlp_acked_end); 8517 } 8518 } else { 8519 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8520 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8521 rack->rc_last_tlp_past_cumack = 0; 8522 rack->rc_last_tlp_acked_set = 1; 8523 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8524 } 8525 } 8526 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8527 if (prev && 8528 (prev->r_flags & RACK_ACKED)) { 8529 /** 8530 * Goal, we want the right remainder of rsm to shrink 8531 * in place and span from (rsm->r_start = end) to rsm->r_end. 8532 * We want to expand prev to go all the way 8533 * to prev->r_end <- end. 8534 * so in the tree we have before: 8535 * prev |--------| (acked) 8536 * rsm |-------| (non-acked) 8537 * sackblk |-| 8538 * We churn it so we end up with 8539 * prev |----------| (acked) 8540 * rsm |-----| (non-acked) 8541 * nrsm |-| (temporary) 8542 * 8543 * Note if either prev/rsm is a TLP we don't 8544 * do this. 8545 */ 8546 nrsm = &stack_map; 8547 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8548 prev->r_end = end; 8549 rsm->r_start = end; 8550 /* Now adjust nrsm (stack copy) to be 8551 * the one that is the small 8552 * piece that was "sacked". 8553 */ 8554 nrsm->r_end = end; 8555 rsm->r_dupack = 0; 8556 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8557 /* 8558 * Now that the rsm has had its start moved forward 8559 * lets go ahead and get its new place in the world. 8560 */ 8561 rack_setup_offset_for_rsm(prev, rsm); 8562 /* 8563 * Now nrsm is our new little piece 8564 * that is acked (which was merged 8565 * to prev). Update the rtt and changed 8566 * based on that. Also check for reordering. 8567 */ 8568 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8569 if (rack->app_limited_needs_set) 8570 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8571 changed += (nrsm->r_end - nrsm->r_start); 8572 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8573 if (nrsm->r_flags & RACK_SACK_PASSED) { 8574 rack->r_ctl.rc_reorder_ts = cts; 8575 } 8576 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8577 rsm = prev; 8578 counter_u64_add(rack_sack_used_prev_merge, 1); 8579 } else { 8580 /** 8581 * This is the case where our previous 8582 * block is not acked either, so we must 8583 * split the block in two. 8584 */ 8585 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8586 if (nrsm == NULL) { 8587 /* failed rrs what can we do but loose the sack info? */ 8588 goto out; 8589 } 8590 if ((rsm->r_flags & RACK_TLP) && 8591 (rsm->r_rtr_cnt > 1)) { 8592 /* 8593 * We are splitting a rxt TLP, check 8594 * if we need to save off the start/end 8595 */ 8596 if (rack->rc_last_tlp_acked_set && 8597 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8598 /* 8599 * We already turned this on since this block is inside 8600 * the previous one was a partially sack now we 8601 * are getting another one (maybe all of it). 8602 */ 8603 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8604 /* 8605 * Lets make sure we have all of it though. 8606 */ 8607 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8608 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8609 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8610 rack->r_ctl.last_tlp_acked_end); 8611 } 8612 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8613 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8614 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8615 rack->r_ctl.last_tlp_acked_end); 8616 } 8617 } else { 8618 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8619 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8620 rack->rc_last_tlp_acked_set = 1; 8621 rack->rc_last_tlp_past_cumack = 0; 8622 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8623 } 8624 } 8625 /** 8626 * In this case nrsm becomes 8627 * nrsm->r_start = end; 8628 * nrsm->r_end = rsm->r_end; 8629 * which is un-acked. 8630 * <and> 8631 * rsm->r_end = nrsm->r_start; 8632 * i.e. the remaining un-acked 8633 * piece is left on the left 8634 * hand side. 8635 * 8636 * So we start like this 8637 * rsm |----------| (not acked) 8638 * sackblk |---| 8639 * build it so we have 8640 * rsm |---| (acked) 8641 * nrsm |------| (not acked) 8642 */ 8643 counter_u64_add(rack_sack_splits, 1); 8644 rack_clone_rsm(rack, nrsm, rsm, end); 8645 rsm->r_flags &= (~RACK_HAS_FIN); 8646 rsm->r_just_ret = 0; 8647 #ifndef INVARIANTS 8648 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8649 #else 8650 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8651 if (insret != NULL) { 8652 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8653 nrsm, insret, rack, rsm); 8654 } 8655 #endif 8656 if (rsm->r_in_tmap) { 8657 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8658 nrsm->r_in_tmap = 1; 8659 } 8660 nrsm->r_dupack = 0; 8661 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8662 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8663 changed += (rsm->r_end - rsm->r_start); 8664 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8665 if (rsm->r_in_tmap) /* should be true */ 8666 rack_log_sack_passed(tp, rack, rsm); 8667 /* Is Reordering occuring? */ 8668 if (rsm->r_flags & RACK_SACK_PASSED) { 8669 rsm->r_flags &= ~RACK_SACK_PASSED; 8670 rack->r_ctl.rc_reorder_ts = cts; 8671 } 8672 if (rack->app_limited_needs_set) 8673 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8674 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8675 rsm->r_flags |= RACK_ACKED; 8676 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8677 if (rsm->r_in_tmap) { 8678 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8679 rsm->r_in_tmap = 0; 8680 } 8681 } 8682 } else if (start != end){ 8683 /* 8684 * The block was already acked. 8685 */ 8686 counter_u64_add(rack_sack_skipped_acked, 1); 8687 moved++; 8688 } 8689 out: 8690 if (rsm && 8691 ((rsm->r_flags & RACK_TLP) == 0) && 8692 (rsm->r_flags & RACK_ACKED)) { 8693 /* 8694 * Now can we merge where we worked 8695 * with either the previous or 8696 * next block? 8697 */ 8698 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8699 while (next) { 8700 if (next->r_flags & RACK_TLP) 8701 break; 8702 if (next->r_flags & RACK_ACKED) { 8703 /* yep this and next can be merged */ 8704 rsm = rack_merge_rsm(rack, rsm, next); 8705 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8706 } else 8707 break; 8708 } 8709 /* Now what about the previous? */ 8710 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8711 while (prev) { 8712 if (prev->r_flags & RACK_TLP) 8713 break; 8714 if (prev->r_flags & RACK_ACKED) { 8715 /* yep the previous and this can be merged */ 8716 rsm = rack_merge_rsm(rack, prev, rsm); 8717 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8718 } else 8719 break; 8720 } 8721 } 8722 if (used_ref == 0) { 8723 counter_u64_add(rack_sack_proc_all, 1); 8724 } else { 8725 counter_u64_add(rack_sack_proc_short, 1); 8726 } 8727 /* Save off the next one for quick reference. */ 8728 if (rsm) 8729 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8730 else 8731 nrsm = NULL; 8732 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8733 /* Pass back the moved. */ 8734 *moved_two = moved; 8735 return (changed); 8736 } 8737 8738 static void inline 8739 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8740 { 8741 struct rack_sendmap *tmap; 8742 8743 tmap = NULL; 8744 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8745 /* Its no longer sacked, mark it so */ 8746 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8747 #ifdef INVARIANTS 8748 if (rsm->r_in_tmap) { 8749 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8750 rack, rsm, rsm->r_flags); 8751 } 8752 #endif 8753 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8754 /* Rebuild it into our tmap */ 8755 if (tmap == NULL) { 8756 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8757 tmap = rsm; 8758 } else { 8759 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8760 tmap = rsm; 8761 } 8762 tmap->r_in_tmap = 1; 8763 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8764 } 8765 /* 8766 * Now lets possibly clear the sack filter so we start 8767 * recognizing sacks that cover this area. 8768 */ 8769 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8770 8771 } 8772 8773 static void 8774 rack_do_decay(struct tcp_rack *rack) 8775 { 8776 struct timeval res; 8777 8778 #define timersub(tvp, uvp, vvp) \ 8779 do { \ 8780 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8781 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8782 if ((vvp)->tv_usec < 0) { \ 8783 (vvp)->tv_sec--; \ 8784 (vvp)->tv_usec += 1000000; \ 8785 } \ 8786 } while (0) 8787 8788 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8789 #undef timersub 8790 8791 rack->r_ctl.input_pkt++; 8792 if ((rack->rc_in_persist) || 8793 (res.tv_sec >= 1) || 8794 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8795 /* 8796 * Check for decay of non-SAD, 8797 * we want all SAD detection metrics to 8798 * decay 1/4 per second (or more) passed. 8799 */ 8800 #ifdef NETFLIX_EXP_DETECTION 8801 uint32_t pkt_delta; 8802 8803 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8804 #endif 8805 /* Update our saved tracking values */ 8806 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8807 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8808 /* Now do we escape without decay? */ 8809 #ifdef NETFLIX_EXP_DETECTION 8810 if (rack->rc_in_persist || 8811 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8812 (pkt_delta < tcp_sad_low_pps)){ 8813 /* 8814 * We don't decay idle connections 8815 * or ones that have a low input pps. 8816 */ 8817 return; 8818 } 8819 /* Decay the counters */ 8820 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8821 tcp_sad_decay_val); 8822 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8823 tcp_sad_decay_val); 8824 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8825 tcp_sad_decay_val); 8826 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8827 tcp_sad_decay_val); 8828 #endif 8829 } 8830 } 8831 8832 static void 8833 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8834 { 8835 struct rack_sendmap *rsm; 8836 #ifdef INVARIANTS 8837 struct rack_sendmap *rm; 8838 #endif 8839 8840 /* 8841 * The ACK point is advancing to th_ack, we must drop off 8842 * the packets in the rack log and calculate any eligble 8843 * RTT's. 8844 */ 8845 rack->r_wanted_output = 1; 8846 8847 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8848 if ((rack->rc_last_tlp_acked_set == 1)&& 8849 (rack->rc_last_tlp_past_cumack == 1) && 8850 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8851 /* 8852 * We have reached the point where our last rack 8853 * tlp retransmit sequence is ahead of the cum-ack. 8854 * This can only happen when the cum-ack moves all 8855 * the way around (its been a full 2^^31+1 bytes 8856 * or more since we sent a retransmitted TLP). Lets 8857 * turn off the valid flag since its not really valid. 8858 * 8859 * Note since sack's also turn on this event we have 8860 * a complication, we have to wait to age it out until 8861 * the cum-ack is by the TLP before checking which is 8862 * what the next else clause does. 8863 */ 8864 rack_log_dsack_event(rack, 9, __LINE__, 8865 rack->r_ctl.last_tlp_acked_start, 8866 rack->r_ctl.last_tlp_acked_end); 8867 rack->rc_last_tlp_acked_set = 0; 8868 rack->rc_last_tlp_past_cumack = 0; 8869 } else if ((rack->rc_last_tlp_acked_set == 1) && 8870 (rack->rc_last_tlp_past_cumack == 0) && 8871 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8872 /* 8873 * It is safe to start aging TLP's out. 8874 */ 8875 rack->rc_last_tlp_past_cumack = 1; 8876 } 8877 /* We do the same for the tlp send seq as well */ 8878 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8879 (rack->rc_last_sent_tlp_past_cumack == 1) && 8880 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8881 rack_log_dsack_event(rack, 9, __LINE__, 8882 rack->r_ctl.last_sent_tlp_seq, 8883 (rack->r_ctl.last_sent_tlp_seq + 8884 rack->r_ctl.last_sent_tlp_len)); 8885 rack->rc_last_sent_tlp_seq_valid = 0; 8886 rack->rc_last_sent_tlp_past_cumack = 0; 8887 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8888 (rack->rc_last_sent_tlp_past_cumack == 0) && 8889 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8890 /* 8891 * It is safe to start aging TLP's send. 8892 */ 8893 rack->rc_last_sent_tlp_past_cumack = 1; 8894 } 8895 more: 8896 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8897 if (rsm == NULL) { 8898 if ((th_ack - 1) == tp->iss) { 8899 /* 8900 * For the SYN incoming case we will not 8901 * have called tcp_output for the sending of 8902 * the SYN, so there will be no map. All 8903 * other cases should probably be a panic. 8904 */ 8905 return; 8906 } 8907 if (tp->t_flags & TF_SENTFIN) { 8908 /* if we sent a FIN we often will not have map */ 8909 return; 8910 } 8911 #ifdef INVARIANTS 8912 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8913 tp, 8914 tp->t_state, th_ack, rack, 8915 tp->snd_una, tp->snd_max, tp->snd_nxt); 8916 #endif 8917 return; 8918 } 8919 if (SEQ_LT(th_ack, rsm->r_start)) { 8920 /* Huh map is missing this */ 8921 #ifdef INVARIANTS 8922 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 8923 rsm->r_start, 8924 th_ack, tp->t_state, rack->r_state); 8925 #endif 8926 return; 8927 } 8928 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 8929 8930 /* Now was it a retransmitted TLP? */ 8931 if ((rsm->r_flags & RACK_TLP) && 8932 (rsm->r_rtr_cnt > 1)) { 8933 /* 8934 * Yes, this rsm was a TLP and retransmitted, remember that 8935 * since if a DSACK comes back on this we don't want 8936 * to think of it as a reordered segment. This may 8937 * get updated again with possibly even other TLPs 8938 * in flight, but thats ok. Only when we don't send 8939 * a retransmitted TLP for 1/2 the sequences space 8940 * will it get turned off (above). 8941 */ 8942 if (rack->rc_last_tlp_acked_set && 8943 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8944 /* 8945 * We already turned this on since the end matches, 8946 * the previous one was a partially ack now we 8947 * are getting another one (maybe all of it). 8948 */ 8949 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8950 /* 8951 * Lets make sure we have all of it though. 8952 */ 8953 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8954 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8955 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8956 rack->r_ctl.last_tlp_acked_end); 8957 } 8958 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8959 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8960 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8961 rack->r_ctl.last_tlp_acked_end); 8962 } 8963 } else { 8964 rack->rc_last_tlp_past_cumack = 1; 8965 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8966 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8967 rack->rc_last_tlp_acked_set = 1; 8968 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8969 } 8970 } 8971 /* Now do we consume the whole thing? */ 8972 if (SEQ_GEQ(th_ack, rsm->r_end)) { 8973 /* Its all consumed. */ 8974 uint32_t left; 8975 uint8_t newly_acked; 8976 8977 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 8978 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 8979 rsm->r_rtr_bytes = 0; 8980 /* Record the time of highest cumack sent */ 8981 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8982 #ifndef INVARIANTS 8983 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8984 #else 8985 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8986 if (rm != rsm) { 8987 panic("removing head in rack:%p rsm:%p rm:%p", 8988 rack, rsm, rm); 8989 } 8990 #endif 8991 if (rsm->r_in_tmap) { 8992 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8993 rsm->r_in_tmap = 0; 8994 } 8995 newly_acked = 1; 8996 if (rsm->r_flags & RACK_ACKED) { 8997 /* 8998 * It was acked on the scoreboard -- remove 8999 * it from total 9000 */ 9001 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9002 newly_acked = 0; 9003 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9004 /* 9005 * There are segments ACKED on the 9006 * scoreboard further up. We are seeing 9007 * reordering. 9008 */ 9009 rsm->r_flags &= ~RACK_SACK_PASSED; 9010 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9011 rsm->r_flags |= RACK_ACKED; 9012 rack->r_ctl.rc_reorder_ts = cts; 9013 if (rack->r_ent_rec_ns) { 9014 /* 9015 * We have sent no more, and we saw an sack 9016 * then ack arrive. 9017 */ 9018 rack->r_might_revert = 1; 9019 } 9020 } 9021 if ((rsm->r_flags & RACK_TO_REXT) && 9022 (tp->t_flags & TF_RCVD_TSTMP) && 9023 (to->to_flags & TOF_TS) && 9024 (to->to_tsecr != 0) && 9025 (tp->t_flags & TF_PREVVALID)) { 9026 /* 9027 * We can use the timestamp to see 9028 * if this retransmission was from the 9029 * first transmit. If so we made a mistake. 9030 */ 9031 tp->t_flags &= ~TF_PREVVALID; 9032 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9033 /* The first transmit is what this ack is for */ 9034 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 9035 } 9036 } 9037 left = th_ack - rsm->r_end; 9038 if (rack->app_limited_needs_set && newly_acked) 9039 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9040 /* Free back to zone */ 9041 rack_free(rack, rsm); 9042 if (left) { 9043 goto more; 9044 } 9045 /* Check for reneging */ 9046 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9047 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9048 /* 9049 * The peer has moved snd_una up to 9050 * the edge of this send, i.e. one 9051 * that it had previously acked. The only 9052 * way that can be true if the peer threw 9053 * away data (space issues) that it had 9054 * previously sacked (else it would have 9055 * given us snd_una up to (rsm->r_end). 9056 * We need to undo the acked markings here. 9057 * 9058 * Note we have to look to make sure th_ack is 9059 * our rsm->r_start in case we get an old ack 9060 * where th_ack is behind snd_una. 9061 */ 9062 rack_peer_reneges(rack, rsm, th_ack); 9063 } 9064 return; 9065 } 9066 if (rsm->r_flags & RACK_ACKED) { 9067 /* 9068 * It was acked on the scoreboard -- remove it from 9069 * total for the part being cum-acked. 9070 */ 9071 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9072 } 9073 /* 9074 * Clear the dup ack count for 9075 * the piece that remains. 9076 */ 9077 rsm->r_dupack = 0; 9078 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9079 if (rsm->r_rtr_bytes) { 9080 /* 9081 * It was retransmitted adjust the 9082 * sack holes for what was acked. 9083 */ 9084 int ack_am; 9085 9086 ack_am = (th_ack - rsm->r_start); 9087 if (ack_am >= rsm->r_rtr_bytes) { 9088 rack->r_ctl.rc_holes_rxt -= ack_am; 9089 rsm->r_rtr_bytes -= ack_am; 9090 } 9091 } 9092 /* 9093 * Update where the piece starts and record 9094 * the time of send of highest cumack sent. 9095 */ 9096 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9097 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9098 /* Now we need to move our offset forward too */ 9099 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9100 /* Fix up the orig_m_len and possibly the mbuf offset */ 9101 rack_adjust_orig_mlen(rsm); 9102 } 9103 rsm->soff += (th_ack - rsm->r_start); 9104 rsm->r_start = th_ack; 9105 /* Now do we need to move the mbuf fwd too? */ 9106 if (rsm->m) { 9107 while (rsm->soff >= rsm->m->m_len) { 9108 rsm->soff -= rsm->m->m_len; 9109 rsm->m = rsm->m->m_next; 9110 KASSERT((rsm->m != NULL), 9111 (" nrsm:%p hit at soff:%u null m", 9112 rsm, rsm->soff)); 9113 } 9114 rsm->orig_m_len = rsm->m->m_len; 9115 } 9116 if (rack->app_limited_needs_set) 9117 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9118 } 9119 9120 static void 9121 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9122 { 9123 struct rack_sendmap *rsm; 9124 int sack_pass_fnd = 0; 9125 9126 if (rack->r_might_revert) { 9127 /* 9128 * Ok we have reordering, have not sent anything, we 9129 * might want to revert the congestion state if nothing 9130 * further has SACK_PASSED on it. Lets check. 9131 * 9132 * We also get here when we have DSACKs come in for 9133 * all the data that we FR'd. Note that a rxt or tlp 9134 * timer clears this from happening. 9135 */ 9136 9137 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9138 if (rsm->r_flags & RACK_SACK_PASSED) { 9139 sack_pass_fnd = 1; 9140 break; 9141 } 9142 } 9143 if (sack_pass_fnd == 0) { 9144 /* 9145 * We went into recovery 9146 * incorrectly due to reordering! 9147 */ 9148 int orig_cwnd; 9149 9150 rack->r_ent_rec_ns = 0; 9151 orig_cwnd = tp->snd_cwnd; 9152 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9153 tp->snd_recover = tp->snd_una; 9154 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 9155 EXIT_RECOVERY(tp->t_flags); 9156 } 9157 rack->r_might_revert = 0; 9158 } 9159 } 9160 9161 #ifdef NETFLIX_EXP_DETECTION 9162 static void 9163 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9164 { 9165 if ((rack->do_detection || tcp_force_detection) && 9166 tcp_sack_to_ack_thresh && 9167 tcp_sack_to_move_thresh && 9168 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9169 /* 9170 * We have thresholds set to find 9171 * possible attackers and disable sack. 9172 * Check them. 9173 */ 9174 uint64_t ackratio, moveratio, movetotal; 9175 9176 /* Log detecting */ 9177 rack_log_sad(rack, 1); 9178 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9179 ackratio *= (uint64_t)(1000); 9180 if (rack->r_ctl.ack_count) 9181 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9182 else { 9183 /* We really should not hit here */ 9184 ackratio = 1000; 9185 } 9186 if ((rack->sack_attack_disable == 0) && 9187 (ackratio > rack_highest_sack_thresh_seen)) 9188 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9189 movetotal = rack->r_ctl.sack_moved_extra; 9190 movetotal += rack->r_ctl.sack_noextra_move; 9191 moveratio = rack->r_ctl.sack_moved_extra; 9192 moveratio *= (uint64_t)1000; 9193 if (movetotal) 9194 moveratio /= movetotal; 9195 else { 9196 /* No moves, thats pretty good */ 9197 moveratio = 0; 9198 } 9199 if ((rack->sack_attack_disable == 0) && 9200 (moveratio > rack_highest_move_thresh_seen)) 9201 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9202 if (rack->sack_attack_disable == 0) { 9203 if ((ackratio > tcp_sack_to_ack_thresh) && 9204 (moveratio > tcp_sack_to_move_thresh)) { 9205 /* Disable sack processing */ 9206 rack->sack_attack_disable = 1; 9207 if (rack->r_rep_attack == 0) { 9208 rack->r_rep_attack = 1; 9209 counter_u64_add(rack_sack_attacks_detected, 1); 9210 } 9211 if (tcp_attack_on_turns_on_logging) { 9212 /* 9213 * Turn on logging, used for debugging 9214 * false positives. 9215 */ 9216 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9217 } 9218 /* Clamp the cwnd at flight size */ 9219 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9220 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9221 rack_log_sad(rack, 2); 9222 } 9223 } else { 9224 /* We are sack-disabled check for false positives */ 9225 if ((ackratio <= tcp_restoral_thresh) || 9226 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9227 rack->sack_attack_disable = 0; 9228 rack_log_sad(rack, 3); 9229 /* Restart counting */ 9230 rack->r_ctl.sack_count = 0; 9231 rack->r_ctl.sack_moved_extra = 0; 9232 rack->r_ctl.sack_noextra_move = 1; 9233 rack->r_ctl.ack_count = max(1, 9234 (bytes_this_ack / segsiz)); 9235 9236 if (rack->r_rep_reverse == 0) { 9237 rack->r_rep_reverse = 1; 9238 counter_u64_add(rack_sack_attacks_reversed, 1); 9239 } 9240 /* Restore the cwnd */ 9241 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9242 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9243 } 9244 } 9245 } 9246 } 9247 #endif 9248 9249 static int 9250 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9251 { 9252 9253 uint32_t am, l_end; 9254 int was_tlp = 0; 9255 9256 if (SEQ_GT(end, start)) 9257 am = end - start; 9258 else 9259 am = 0; 9260 if ((rack->rc_last_tlp_acked_set ) && 9261 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9262 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9263 /* 9264 * The DSACK is because of a TLP which we don't 9265 * do anything with the reordering window over since 9266 * it was not reordering that caused the DSACK but 9267 * our previous retransmit TLP. 9268 */ 9269 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9270 was_tlp = 1; 9271 goto skip_dsack_round; 9272 } 9273 if (rack->rc_last_sent_tlp_seq_valid) { 9274 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9275 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9276 (SEQ_LEQ(end, l_end))) { 9277 /* 9278 * This dsack is from the last sent TLP, ignore it 9279 * for reordering purposes. 9280 */ 9281 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9282 was_tlp = 1; 9283 goto skip_dsack_round; 9284 } 9285 } 9286 if (rack->rc_dsack_round_seen == 0) { 9287 rack->rc_dsack_round_seen = 1; 9288 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9289 rack->r_ctl.num_dsack++; 9290 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9291 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9292 } 9293 skip_dsack_round: 9294 /* 9295 * We keep track of how many DSACK blocks we get 9296 * after a recovery incident. 9297 */ 9298 rack->r_ctl.dsack_byte_cnt += am; 9299 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9300 rack->r_ctl.retran_during_recovery && 9301 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9302 /* 9303 * False recovery most likely culprit is reordering. If 9304 * nothing else is missing we need to revert. 9305 */ 9306 rack->r_might_revert = 1; 9307 rack_handle_might_revert(rack->rc_tp, rack); 9308 rack->r_might_revert = 0; 9309 rack->r_ctl.retran_during_recovery = 0; 9310 rack->r_ctl.dsack_byte_cnt = 0; 9311 } 9312 return (was_tlp); 9313 } 9314 9315 static void 9316 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9317 { 9318 /* Deal with changed and PRR here (in recovery only) */ 9319 uint32_t pipe, snd_una; 9320 9321 rack->r_ctl.rc_prr_delivered += changed; 9322 9323 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9324 /* 9325 * It is all outstanding, we are application limited 9326 * and thus we don't need more room to send anything. 9327 * Note we use tp->snd_una here and not th_ack because 9328 * the data as yet not been cut from the sb. 9329 */ 9330 rack->r_ctl.rc_prr_sndcnt = 0; 9331 return; 9332 } 9333 /* Compute prr_sndcnt */ 9334 if (SEQ_GT(tp->snd_una, th_ack)) { 9335 snd_una = tp->snd_una; 9336 } else { 9337 snd_una = th_ack; 9338 } 9339 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9340 if (pipe > tp->snd_ssthresh) { 9341 long sndcnt; 9342 9343 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9344 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9345 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9346 else { 9347 rack->r_ctl.rc_prr_sndcnt = 0; 9348 rack_log_to_prr(rack, 9, 0, __LINE__); 9349 sndcnt = 0; 9350 } 9351 sndcnt++; 9352 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9353 sndcnt -= rack->r_ctl.rc_prr_out; 9354 else 9355 sndcnt = 0; 9356 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9357 rack_log_to_prr(rack, 10, 0, __LINE__); 9358 } else { 9359 uint32_t limit; 9360 9361 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9362 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9363 else 9364 limit = 0; 9365 if (changed > limit) 9366 limit = changed; 9367 limit += ctf_fixed_maxseg(tp); 9368 if (tp->snd_ssthresh > pipe) { 9369 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9370 rack_log_to_prr(rack, 11, 0, __LINE__); 9371 } else { 9372 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9373 rack_log_to_prr(rack, 12, 0, __LINE__); 9374 } 9375 } 9376 } 9377 9378 static void 9379 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9380 { 9381 uint32_t changed; 9382 struct tcp_rack *rack; 9383 struct rack_sendmap *rsm; 9384 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9385 register uint32_t th_ack; 9386 int32_t i, j, k, num_sack_blks = 0; 9387 uint32_t cts, acked, ack_point; 9388 int loop_start = 0, moved_two = 0; 9389 uint32_t tsused; 9390 9391 9392 INP_WLOCK_ASSERT(tp->t_inpcb); 9393 if (tcp_get_flags(th) & TH_RST) { 9394 /* We don't log resets */ 9395 return; 9396 } 9397 rack = (struct tcp_rack *)tp->t_fb_ptr; 9398 cts = tcp_get_usecs(NULL); 9399 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9400 changed = 0; 9401 th_ack = th->th_ack; 9402 if (rack->sack_attack_disable == 0) 9403 rack_do_decay(rack); 9404 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9405 /* 9406 * You only get credit for 9407 * MSS and greater (and you get extra 9408 * credit for larger cum-ack moves). 9409 */ 9410 int ac; 9411 9412 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9413 rack->r_ctl.ack_count += ac; 9414 counter_u64_add(rack_ack_total, ac); 9415 } 9416 if (rack->r_ctl.ack_count > 0xfff00000) { 9417 /* 9418 * reduce the number to keep us under 9419 * a uint32_t. 9420 */ 9421 rack->r_ctl.ack_count /= 2; 9422 rack->r_ctl.sack_count /= 2; 9423 } 9424 if (SEQ_GT(th_ack, tp->snd_una)) { 9425 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9426 tp->t_acktime = ticks; 9427 } 9428 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9429 changed = th_ack - rsm->r_start; 9430 if (changed) { 9431 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9432 } 9433 if ((to->to_flags & TOF_SACK) == 0) { 9434 /* We are done nothing left and no sack. */ 9435 rack_handle_might_revert(tp, rack); 9436 /* 9437 * For cases where we struck a dup-ack 9438 * with no SACK, add to the changes so 9439 * PRR will work right. 9440 */ 9441 if (dup_ack_struck && (changed == 0)) { 9442 changed += ctf_fixed_maxseg(rack->rc_tp); 9443 } 9444 goto out; 9445 } 9446 /* Sack block processing */ 9447 if (SEQ_GT(th_ack, tp->snd_una)) 9448 ack_point = th_ack; 9449 else 9450 ack_point = tp->snd_una; 9451 for (i = 0; i < to->to_nsacks; i++) { 9452 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9453 &sack, sizeof(sack)); 9454 sack.start = ntohl(sack.start); 9455 sack.end = ntohl(sack.end); 9456 if (SEQ_GT(sack.end, sack.start) && 9457 SEQ_GT(sack.start, ack_point) && 9458 SEQ_LT(sack.start, tp->snd_max) && 9459 SEQ_GT(sack.end, ack_point) && 9460 SEQ_LEQ(sack.end, tp->snd_max)) { 9461 sack_blocks[num_sack_blks] = sack; 9462 num_sack_blks++; 9463 } else if (SEQ_LEQ(sack.start, th_ack) && 9464 SEQ_LEQ(sack.end, th_ack)) { 9465 int was_tlp; 9466 9467 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9468 /* 9469 * Its a D-SACK block. 9470 */ 9471 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9472 } 9473 } 9474 if (rack->rc_dsack_round_seen) { 9475 /* Is the dsack roound over? */ 9476 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9477 /* Yes it is */ 9478 rack->rc_dsack_round_seen = 0; 9479 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9480 } 9481 } 9482 /* 9483 * Sort the SACK blocks so we can update the rack scoreboard with 9484 * just one pass. 9485 */ 9486 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9487 num_sack_blks, th->th_ack); 9488 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9489 if (num_sack_blks == 0) { 9490 /* Nothing to sack (DSACKs?) */ 9491 goto out_with_totals; 9492 } 9493 if (num_sack_blks < 2) { 9494 /* Only one, we don't need to sort */ 9495 goto do_sack_work; 9496 } 9497 /* Sort the sacks */ 9498 for (i = 0; i < num_sack_blks; i++) { 9499 for (j = i + 1; j < num_sack_blks; j++) { 9500 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9501 sack = sack_blocks[i]; 9502 sack_blocks[i] = sack_blocks[j]; 9503 sack_blocks[j] = sack; 9504 } 9505 } 9506 } 9507 /* 9508 * Now are any of the sack block ends the same (yes some 9509 * implementations send these)? 9510 */ 9511 again: 9512 if (num_sack_blks == 0) 9513 goto out_with_totals; 9514 if (num_sack_blks > 1) { 9515 for (i = 0; i < num_sack_blks; i++) { 9516 for (j = i + 1; j < num_sack_blks; j++) { 9517 if (sack_blocks[i].end == sack_blocks[j].end) { 9518 /* 9519 * Ok these two have the same end we 9520 * want the smallest end and then 9521 * throw away the larger and start 9522 * again. 9523 */ 9524 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9525 /* 9526 * The second block covers 9527 * more area use that 9528 */ 9529 sack_blocks[i].start = sack_blocks[j].start; 9530 } 9531 /* 9532 * Now collapse out the dup-sack and 9533 * lower the count 9534 */ 9535 for (k = (j + 1); k < num_sack_blks; k++) { 9536 sack_blocks[j].start = sack_blocks[k].start; 9537 sack_blocks[j].end = sack_blocks[k].end; 9538 j++; 9539 } 9540 num_sack_blks--; 9541 goto again; 9542 } 9543 } 9544 } 9545 } 9546 do_sack_work: 9547 /* 9548 * First lets look to see if 9549 * we have retransmitted and 9550 * can use the transmit next? 9551 */ 9552 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9553 if (rsm && 9554 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9555 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9556 /* 9557 * We probably did the FR and the next 9558 * SACK in continues as we would expect. 9559 */ 9560 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9561 if (acked) { 9562 rack->r_wanted_output = 1; 9563 changed += acked; 9564 } 9565 if (num_sack_blks == 1) { 9566 /* 9567 * This is what we would expect from 9568 * a normal implementation to happen 9569 * after we have retransmitted the FR, 9570 * i.e the sack-filter pushes down 9571 * to 1 block and the next to be retransmitted 9572 * is the sequence in the sack block (has more 9573 * are acked). Count this as ACK'd data to boost 9574 * up the chances of recovering any false positives. 9575 */ 9576 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9577 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9578 counter_u64_add(rack_express_sack, 1); 9579 if (rack->r_ctl.ack_count > 0xfff00000) { 9580 /* 9581 * reduce the number to keep us under 9582 * a uint32_t. 9583 */ 9584 rack->r_ctl.ack_count /= 2; 9585 rack->r_ctl.sack_count /= 2; 9586 } 9587 goto out_with_totals; 9588 } else { 9589 /* 9590 * Start the loop through the 9591 * rest of blocks, past the first block. 9592 */ 9593 moved_two = 0; 9594 loop_start = 1; 9595 } 9596 } 9597 /* Its a sack of some sort */ 9598 rack->r_ctl.sack_count++; 9599 if (rack->r_ctl.sack_count > 0xfff00000) { 9600 /* 9601 * reduce the number to keep us under 9602 * a uint32_t. 9603 */ 9604 rack->r_ctl.ack_count /= 2; 9605 rack->r_ctl.sack_count /= 2; 9606 } 9607 counter_u64_add(rack_sack_total, 1); 9608 if (rack->sack_attack_disable) { 9609 /* An attacker disablement is in place */ 9610 if (num_sack_blks > 1) { 9611 rack->r_ctl.sack_count += (num_sack_blks - 1); 9612 rack->r_ctl.sack_moved_extra++; 9613 counter_u64_add(rack_move_some, 1); 9614 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9615 rack->r_ctl.sack_moved_extra /= 2; 9616 rack->r_ctl.sack_noextra_move /= 2; 9617 } 9618 } 9619 goto out; 9620 } 9621 rsm = rack->r_ctl.rc_sacklast; 9622 for (i = loop_start; i < num_sack_blks; i++) { 9623 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9624 if (acked) { 9625 rack->r_wanted_output = 1; 9626 changed += acked; 9627 } 9628 if (moved_two) { 9629 /* 9630 * If we did not get a SACK for at least a MSS and 9631 * had to move at all, or if we moved more than our 9632 * threshold, it counts against the "extra" move. 9633 */ 9634 rack->r_ctl.sack_moved_extra += moved_two; 9635 counter_u64_add(rack_move_some, 1); 9636 } else { 9637 /* 9638 * else we did not have to move 9639 * any more than we would expect. 9640 */ 9641 rack->r_ctl.sack_noextra_move++; 9642 counter_u64_add(rack_move_none, 1); 9643 } 9644 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9645 /* 9646 * If the SACK was not a full MSS then 9647 * we add to sack_count the number of 9648 * MSS's (or possibly more than 9649 * a MSS if its a TSO send) we had to skip by. 9650 */ 9651 rack->r_ctl.sack_count += moved_two; 9652 counter_u64_add(rack_sack_total, moved_two); 9653 } 9654 /* 9655 * Now we need to setup for the next 9656 * round. First we make sure we won't 9657 * exceed the size of our uint32_t on 9658 * the various counts, and then clear out 9659 * moved_two. 9660 */ 9661 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9662 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9663 rack->r_ctl.sack_moved_extra /= 2; 9664 rack->r_ctl.sack_noextra_move /= 2; 9665 } 9666 if (rack->r_ctl.sack_count > 0xfff00000) { 9667 rack->r_ctl.ack_count /= 2; 9668 rack->r_ctl.sack_count /= 2; 9669 } 9670 moved_two = 0; 9671 } 9672 out_with_totals: 9673 if (num_sack_blks > 1) { 9674 /* 9675 * You get an extra stroke if 9676 * you have more than one sack-blk, this 9677 * could be where we are skipping forward 9678 * and the sack-filter is still working, or 9679 * it could be an attacker constantly 9680 * moving us. 9681 */ 9682 rack->r_ctl.sack_moved_extra++; 9683 counter_u64_add(rack_move_some, 1); 9684 } 9685 out: 9686 #ifdef NETFLIX_EXP_DETECTION 9687 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9688 #endif 9689 if (changed) { 9690 /* Something changed cancel the rack timer */ 9691 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9692 } 9693 tsused = tcp_get_usecs(NULL); 9694 rsm = tcp_rack_output(tp, rack, tsused); 9695 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9696 rsm && 9697 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 9698 /* Enter recovery */ 9699 entered_recovery = 1; 9700 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9701 /* 9702 * When we enter recovery we need to assure we send 9703 * one packet. 9704 */ 9705 if (rack->rack_no_prr == 0) { 9706 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9707 rack_log_to_prr(rack, 8, 0, __LINE__); 9708 } 9709 rack->r_timer_override = 1; 9710 rack->r_early = 0; 9711 rack->r_ctl.rc_agg_early = 0; 9712 } else if (IN_FASTRECOVERY(tp->t_flags) && 9713 rsm && 9714 (rack->r_rr_config == 3)) { 9715 /* 9716 * Assure we can output and we get no 9717 * remembered pace time except the retransmit. 9718 */ 9719 rack->r_timer_override = 1; 9720 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9721 rack->r_ctl.rc_resend = rsm; 9722 } 9723 if (IN_FASTRECOVERY(tp->t_flags) && 9724 (rack->rack_no_prr == 0) && 9725 (entered_recovery == 0)) { 9726 rack_update_prr(tp, rack, changed, th_ack); 9727 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9728 ((tcp_in_hpts(rack->rc_inp) == 0) && 9729 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9730 /* 9731 * If you are pacing output you don't want 9732 * to override. 9733 */ 9734 rack->r_early = 0; 9735 rack->r_ctl.rc_agg_early = 0; 9736 rack->r_timer_override = 1; 9737 } 9738 } 9739 } 9740 9741 static void 9742 rack_strike_dupack(struct tcp_rack *rack) 9743 { 9744 struct rack_sendmap *rsm; 9745 9746 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9747 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9748 rsm = TAILQ_NEXT(rsm, r_tnext); 9749 if (rsm->r_flags & RACK_MUST_RXT) { 9750 /* Sendmap entries that are marked to 9751 * be retransmitted do not need dupack's 9752 * struck. We get these marks for a number 9753 * of reasons (rxt timeout with no sack, 9754 * mtu change, or rwnd collapses). When 9755 * these events occur, we know we must retransmit 9756 * them and mark the sendmap entries. Dupack counting 9757 * is not needed since we are already set to retransmit 9758 * it as soon as we can. 9759 */ 9760 continue; 9761 } 9762 } 9763 if (rsm && (rsm->r_dupack < 0xff)) { 9764 rsm->r_dupack++; 9765 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9766 struct timeval tv; 9767 uint32_t cts; 9768 /* 9769 * Here we see if we need to retransmit. For 9770 * a SACK type connection if enough time has passed 9771 * we will get a return of the rsm. For a non-sack 9772 * connection we will get the rsm returned if the 9773 * dupack value is 3 or more. 9774 */ 9775 cts = tcp_get_usecs(&tv); 9776 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9777 if (rack->r_ctl.rc_resend != NULL) { 9778 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9779 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9780 rack->rc_tp->snd_una, __LINE__); 9781 } 9782 rack->r_wanted_output = 1; 9783 rack->r_timer_override = 1; 9784 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9785 } 9786 } else { 9787 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9788 } 9789 } 9790 } 9791 9792 static void 9793 rack_check_bottom_drag(struct tcpcb *tp, 9794 struct tcp_rack *rack, 9795 struct socket *so, int32_t acked) 9796 { 9797 uint32_t segsiz, minseg; 9798 9799 segsiz = ctf_fixed_maxseg(tp); 9800 minseg = segsiz; 9801 9802 if (tp->snd_max == tp->snd_una) { 9803 /* 9804 * We are doing dynamic pacing and we are way 9805 * under. Basically everything got acked while 9806 * we were still waiting on the pacer to expire. 9807 * 9808 * This means we need to boost the b/w in 9809 * addition to any earlier boosting of 9810 * the multipler. 9811 */ 9812 rack->rc_dragged_bottom = 1; 9813 rack_validate_multipliers_at_or_above100(rack); 9814 /* 9815 * Lets use the segment bytes acked plus 9816 * the lowest RTT seen as the basis to 9817 * form a b/w estimate. This will be off 9818 * due to the fact that the true estimate 9819 * should be around 1/2 the time of the RTT 9820 * but we can settle for that. 9821 */ 9822 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9823 acked) { 9824 uint64_t bw, calc_bw, rtt; 9825 9826 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9827 if (rtt == 0) { 9828 /* no us sample is there a ms one? */ 9829 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9830 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9831 } else { 9832 goto no_measurement; 9833 } 9834 } 9835 bw = acked; 9836 calc_bw = bw * 1000000; 9837 calc_bw /= rtt; 9838 if (rack->r_ctl.last_max_bw && 9839 (rack->r_ctl.last_max_bw < calc_bw)) { 9840 /* 9841 * If we have a last calculated max bw 9842 * enforce it. 9843 */ 9844 calc_bw = rack->r_ctl.last_max_bw; 9845 } 9846 /* now plop it in */ 9847 if (rack->rc_gp_filled == 0) { 9848 if (calc_bw > ONE_POINT_TWO_MEG) { 9849 /* 9850 * If we have no measurement 9851 * don't let us set in more than 9852 * 1.2Mbps. If we are still too 9853 * low after pacing with this we 9854 * will hopefully have a max b/w 9855 * available to sanity check things. 9856 */ 9857 calc_bw = ONE_POINT_TWO_MEG; 9858 } 9859 rack->r_ctl.rc_rtt_diff = 0; 9860 rack->r_ctl.gp_bw = calc_bw; 9861 rack->rc_gp_filled = 1; 9862 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9863 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9864 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9865 } else if (calc_bw > rack->r_ctl.gp_bw) { 9866 rack->r_ctl.rc_rtt_diff = 0; 9867 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9868 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9869 rack->r_ctl.gp_bw = calc_bw; 9870 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9871 } else 9872 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9873 if ((rack->gp_ready == 0) && 9874 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9875 /* We have enough measurements now */ 9876 rack->gp_ready = 1; 9877 rack_set_cc_pacing(rack); 9878 if (rack->defer_options) 9879 rack_apply_deferred_options(rack); 9880 } 9881 /* 9882 * For acks over 1mss we do a extra boost to simulate 9883 * where we would get 2 acks (we want 110 for the mul). 9884 */ 9885 if (acked > segsiz) 9886 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9887 } else { 9888 /* 9889 * zero rtt possibly?, settle for just an old increase. 9890 */ 9891 no_measurement: 9892 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9893 } 9894 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9895 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9896 minseg)) && 9897 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9898 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9899 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9900 (segsiz * rack_req_segs))) { 9901 /* 9902 * We are doing dynamic GP pacing and 9903 * we have everything except 1MSS or less 9904 * bytes left out. We are still pacing away. 9905 * And there is data that could be sent, This 9906 * means we are inserting delayed ack time in 9907 * our measurements because we are pacing too slow. 9908 */ 9909 rack_validate_multipliers_at_or_above100(rack); 9910 rack->rc_dragged_bottom = 1; 9911 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9912 } 9913 } 9914 9915 9916 9917 static void 9918 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 9919 { 9920 /* 9921 * The fast output path is enabled and we 9922 * have moved the cumack forward. Lets see if 9923 * we can expand forward the fast path length by 9924 * that amount. What we would ideally like to 9925 * do is increase the number of bytes in the 9926 * fast path block (left_to_send) by the 9927 * acked amount. However we have to gate that 9928 * by two factors: 9929 * 1) The amount outstanding and the rwnd of the peer 9930 * (i.e. we don't want to exceed the rwnd of the peer). 9931 * <and> 9932 * 2) The amount of data left in the socket buffer (i.e. 9933 * we can't send beyond what is in the buffer). 9934 * 9935 * Note that this does not take into account any increase 9936 * in the cwnd. We will only extend the fast path by 9937 * what was acked. 9938 */ 9939 uint32_t new_total, gating_val; 9940 9941 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 9942 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 9943 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 9944 if (new_total <= gating_val) { 9945 /* We can increase left_to_send by the acked amount */ 9946 counter_u64_add(rack_extended_rfo, 1); 9947 rack->r_ctl.fsb.left_to_send = new_total; 9948 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 9949 ("rack:%p left_to_send:%u sbavail:%u out:%u", 9950 rack, rack->r_ctl.fsb.left_to_send, 9951 sbavail(&rack->rc_inp->inp_socket->so_snd), 9952 (tp->snd_max - tp->snd_una))); 9953 9954 } 9955 } 9956 9957 static void 9958 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 9959 { 9960 /* 9961 * Here any sendmap entry that points to the 9962 * beginning mbuf must be adjusted to the correct 9963 * offset. This must be called with: 9964 * 1) The socket buffer locked 9965 * 2) snd_una adjusted to its new postion. 9966 * 9967 * Note that (2) implies rack_ack_received has also 9968 * been called. 9969 * 9970 * We grab the first mbuf in the socket buffer and 9971 * then go through the front of the sendmap, recalculating 9972 * the stored offset for any sendmap entry that has 9973 * that mbuf. We must use the sb functions to do this 9974 * since its possible an add was done has well as 9975 * the subtraction we may have just completed. This should 9976 * not be a penalty though, since we just referenced the sb 9977 * to go in and trim off the mbufs that we freed (of course 9978 * there will be a penalty for the sendmap references though). 9979 */ 9980 struct mbuf *m; 9981 struct rack_sendmap *rsm; 9982 9983 SOCKBUF_LOCK_ASSERT(sb); 9984 m = sb->sb_mb; 9985 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9986 if ((rsm == NULL) || (m == NULL)) { 9987 /* Nothing outstanding */ 9988 return; 9989 } 9990 while (rsm->m && (rsm->m == m)) { 9991 /* one to adjust */ 9992 #ifdef INVARIANTS 9993 struct mbuf *tm; 9994 uint32_t soff; 9995 9996 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 9997 if (rsm->orig_m_len != m->m_len) { 9998 rack_adjust_orig_mlen(rsm); 9999 } 10000 if (rsm->soff != soff) { 10001 /* 10002 * This is not a fatal error, we anticipate it 10003 * might happen (the else code), so we count it here 10004 * so that under invariant we can see that it really 10005 * does happen. 10006 */ 10007 counter_u64_add(rack_adjust_map_bw, 1); 10008 } 10009 rsm->m = tm; 10010 rsm->soff = soff; 10011 if (tm) 10012 rsm->orig_m_len = rsm->m->m_len; 10013 else 10014 rsm->orig_m_len = 0; 10015 #else 10016 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10017 if (rsm->m) 10018 rsm->orig_m_len = rsm->m->m_len; 10019 else 10020 rsm->orig_m_len = 0; 10021 #endif 10022 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10023 rsm); 10024 if (rsm == NULL) 10025 break; 10026 } 10027 } 10028 10029 /* 10030 * Return value of 1, we do not need to call rack_process_data(). 10031 * return value of 0, rack_process_data can be called. 10032 * For ret_val if its 0 the TCP is locked, if its non-zero 10033 * its unlocked and probably unsafe to touch the TCB. 10034 */ 10035 static int 10036 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10037 struct tcpcb *tp, struct tcpopt *to, 10038 uint32_t tiwin, int32_t tlen, 10039 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10040 { 10041 int32_t ourfinisacked = 0; 10042 int32_t nsegs, acked_amount; 10043 int32_t acked; 10044 struct mbuf *mfree; 10045 struct tcp_rack *rack; 10046 int32_t under_pacing = 0; 10047 int32_t recovery = 0; 10048 10049 rack = (struct tcp_rack *)tp->t_fb_ptr; 10050 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10051 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10052 &rack->r_ctl.challenge_ack_ts, 10053 &rack->r_ctl.challenge_ack_cnt); 10054 rack->r_wanted_output = 1; 10055 return (1); 10056 } 10057 if (rack->gp_ready && 10058 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10059 under_pacing = 1; 10060 } 10061 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10062 int in_rec, dup_ack_struck = 0; 10063 10064 in_rec = IN_FASTRECOVERY(tp->t_flags); 10065 if (rack->rc_in_persist) { 10066 tp->t_rxtshift = 0; 10067 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10068 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10069 } 10070 if ((th->th_ack == tp->snd_una) && 10071 (tiwin == tp->snd_wnd) && 10072 ((to->to_flags & TOF_SACK) == 0)) { 10073 rack_strike_dupack(rack); 10074 dup_ack_struck = 1; 10075 } 10076 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10077 } 10078 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10079 /* 10080 * Old ack, behind (or duplicate to) the last one rcv'd 10081 * Note: We mark reordering is occuring if its 10082 * less than and we have not closed our window. 10083 */ 10084 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10085 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10086 } 10087 return (0); 10088 } 10089 /* 10090 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10091 * something we sent. 10092 */ 10093 if (tp->t_flags & TF_NEEDSYN) { 10094 /* 10095 * T/TCP: Connection was half-synchronized, and our SYN has 10096 * been ACK'd (so connection is now fully synchronized). Go 10097 * to non-starred state, increment snd_una for ACK of SYN, 10098 * and check if we can do window scaling. 10099 */ 10100 tp->t_flags &= ~TF_NEEDSYN; 10101 tp->snd_una++; 10102 /* Do window scaling? */ 10103 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10104 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10105 tp->rcv_scale = tp->request_r_scale; 10106 /* Send window already scaled. */ 10107 } 10108 } 10109 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10110 INP_WLOCK_ASSERT(tp->t_inpcb); 10111 10112 acked = BYTES_THIS_ACK(tp, th); 10113 if (acked) { 10114 /* 10115 * Any time we move the cum-ack forward clear 10116 * keep-alive tied probe-not-answered. The 10117 * persists clears its own on entry. 10118 */ 10119 rack->probe_not_answered = 0; 10120 } 10121 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10122 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10123 /* 10124 * If we just performed our first retransmit, and the ACK arrives 10125 * within our recovery window, then it was a mistake to do the 10126 * retransmit in the first place. Recover our original cwnd and 10127 * ssthresh, and proceed to transmit where we left off. 10128 */ 10129 if ((tp->t_flags & TF_PREVVALID) && 10130 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10131 tp->t_flags &= ~TF_PREVVALID; 10132 if (tp->t_rxtshift == 1 && 10133 (int)(ticks - tp->t_badrxtwin) < 0) 10134 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10135 } 10136 if (acked) { 10137 /* assure we are not backed off */ 10138 tp->t_rxtshift = 0; 10139 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10140 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10141 rack->rc_tlp_in_progress = 0; 10142 rack->r_ctl.rc_tlp_cnt_out = 0; 10143 /* 10144 * If it is the RXT timer we want to 10145 * stop it, so we can restart a TLP. 10146 */ 10147 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10148 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10149 #ifdef NETFLIX_HTTP_LOGGING 10150 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10151 #endif 10152 } 10153 /* 10154 * If we have a timestamp reply, update smoothed round trip time. If 10155 * no timestamp is present but transmit timer is running and timed 10156 * sequence number was acked, update smoothed round trip time. Since 10157 * we now have an rtt measurement, cancel the timer backoff (cf., 10158 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10159 * timer. 10160 * 10161 * Some boxes send broken timestamp replies during the SYN+ACK 10162 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10163 * and blow up the retransmit timer. 10164 */ 10165 /* 10166 * If all outstanding data is acked, stop retransmit timer and 10167 * remember to restart (more output or persist). If there is more 10168 * data to be acked, restart retransmit timer, using current 10169 * (possibly backed-off) value. 10170 */ 10171 if (acked == 0) { 10172 if (ofia) 10173 *ofia = ourfinisacked; 10174 return (0); 10175 } 10176 if (IN_RECOVERY(tp->t_flags)) { 10177 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10178 (SEQ_LT(th->th_ack, tp->snd_max))) { 10179 tcp_rack_partialack(tp); 10180 } else { 10181 rack_post_recovery(tp, th->th_ack); 10182 recovery = 1; 10183 } 10184 } 10185 /* 10186 * Let the congestion control algorithm update congestion control 10187 * related information. This typically means increasing the 10188 * congestion window. 10189 */ 10190 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10191 SOCKBUF_LOCK(&so->so_snd); 10192 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10193 tp->snd_wnd -= acked_amount; 10194 mfree = sbcut_locked(&so->so_snd, acked_amount); 10195 if ((sbused(&so->so_snd) == 0) && 10196 (acked > acked_amount) && 10197 (tp->t_state >= TCPS_FIN_WAIT_1) && 10198 (tp->t_flags & TF_SENTFIN)) { 10199 /* 10200 * We must be sure our fin 10201 * was sent and acked (we can be 10202 * in FIN_WAIT_1 without having 10203 * sent the fin). 10204 */ 10205 ourfinisacked = 1; 10206 } 10207 tp->snd_una = th->th_ack; 10208 if (acked_amount && sbavail(&so->so_snd)) 10209 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10210 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10211 /* NB: sowwakeup_locked() does an implicit unlock. */ 10212 sowwakeup_locked(so); 10213 m_freem(mfree); 10214 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10215 tp->snd_recover = tp->snd_una; 10216 10217 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10218 tp->snd_nxt = tp->snd_una; 10219 } 10220 if (under_pacing && 10221 (rack->use_fixed_rate == 0) && 10222 (rack->in_probe_rtt == 0) && 10223 rack->rc_gp_dyn_mul && 10224 rack->rc_always_pace) { 10225 /* Check if we are dragging bottom */ 10226 rack_check_bottom_drag(tp, rack, so, acked); 10227 } 10228 if (tp->snd_una == tp->snd_max) { 10229 /* Nothing left outstanding */ 10230 tp->t_flags &= ~TF_PREVVALID; 10231 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10232 rack->r_ctl.retran_during_recovery = 0; 10233 rack->r_ctl.dsack_byte_cnt = 0; 10234 if (rack->r_ctl.rc_went_idle_time == 0) 10235 rack->r_ctl.rc_went_idle_time = 1; 10236 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10237 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10238 tp->t_acktime = 0; 10239 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10240 /* Set need output so persist might get set */ 10241 rack->r_wanted_output = 1; 10242 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10243 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10244 (sbavail(&so->so_snd) == 0) && 10245 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10246 /* 10247 * The socket was gone and the 10248 * peer sent data (now or in the past), time to 10249 * reset him. 10250 */ 10251 *ret_val = 1; 10252 /* tcp_close will kill the inp pre-log the Reset */ 10253 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10254 tp = tcp_close(tp); 10255 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10256 return (1); 10257 } 10258 } 10259 if (ofia) 10260 *ofia = ourfinisacked; 10261 return (0); 10262 } 10263 10264 static void 10265 rack_collapsed_window(struct tcp_rack *rack) 10266 { 10267 /* 10268 * Now we must walk the 10269 * send map and divide the 10270 * ones left stranded. These 10271 * guys can't cause us to abort 10272 * the connection and are really 10273 * "unsent". However if a buggy 10274 * client actually did keep some 10275 * of the data i.e. collapsed the win 10276 * and refused to ack and then opened 10277 * the win and acked that data. We would 10278 * get into an ack war, the simplier 10279 * method then of just pretending we 10280 * did not send those segments something 10281 * won't work. 10282 */ 10283 struct rack_sendmap *rsm, *nrsm, fe; 10284 #ifdef INVARIANTS 10285 struct rack_sendmap *insret; 10286 #endif 10287 tcp_seq max_seq; 10288 10289 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10290 memset(&fe, 0, sizeof(fe)); 10291 fe.r_start = max_seq; 10292 /* Find the first seq past or at maxseq */ 10293 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10294 if (rsm == NULL) { 10295 /* Nothing to do strange */ 10296 rack->rc_has_collapsed = 0; 10297 return; 10298 } 10299 /* 10300 * Now do we need to split at 10301 * the collapse point? 10302 */ 10303 if (SEQ_GT(max_seq, rsm->r_start)) { 10304 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10305 if (nrsm == NULL) { 10306 /* We can't get a rsm, mark all? */ 10307 nrsm = rsm; 10308 goto no_split; 10309 } 10310 /* Clone it */ 10311 rack_clone_rsm(rack, nrsm, rsm, max_seq); 10312 #ifndef INVARIANTS 10313 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10314 #else 10315 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10316 if (insret != NULL) { 10317 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10318 nrsm, insret, rack, rsm); 10319 } 10320 #endif 10321 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 10322 if (rsm->r_in_tmap) { 10323 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10324 nrsm->r_in_tmap = 1; 10325 } 10326 /* 10327 * Set in the new RSM as the 10328 * collapsed starting point 10329 */ 10330 rsm = nrsm; 10331 } 10332 no_split: 10333 counter_u64_add(rack_collapsed_win, 1); 10334 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10335 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10336 } 10337 rack->rc_has_collapsed = 1; 10338 } 10339 10340 static void 10341 rack_un_collapse_window(struct tcp_rack *rack) 10342 { 10343 struct rack_sendmap *rsm; 10344 int cnt = 0;; 10345 10346 rack->r_ctl.rc_out_at_rto = 0; 10347 rack->r_ctl.rc_snd_max_at_rto = rack->rc_tp->snd_una; 10348 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10349 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 10350 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10351 rsm->r_flags |= RACK_MUST_RXT; 10352 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 10353 rack->r_ctl.rc_snd_max_at_rto = rsm->r_end; 10354 rack->r_ctl.rc_out_at_rto += (rsm->r_end - rsm->r_start); 10355 } 10356 cnt++; 10357 } 10358 else 10359 break; 10360 } 10361 rack->rc_has_collapsed = 0; 10362 if (cnt) { 10363 rack->r_must_retran = 1; 10364 } 10365 } 10366 10367 static void 10368 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10369 int32_t tlen, int32_t tfo_syn) 10370 { 10371 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10372 if (rack->rc_dack_mode && 10373 (tlen > 500) && 10374 (rack->rc_dack_toggle == 1)) { 10375 goto no_delayed_ack; 10376 } 10377 rack_timer_cancel(tp, rack, 10378 rack->r_ctl.rc_rcvtime, __LINE__); 10379 tp->t_flags |= TF_DELACK; 10380 } else { 10381 no_delayed_ack: 10382 rack->r_wanted_output = 1; 10383 tp->t_flags |= TF_ACKNOW; 10384 if (rack->rc_dack_mode) { 10385 if (tp->t_flags & TF_DELACK) 10386 rack->rc_dack_toggle = 1; 10387 else 10388 rack->rc_dack_toggle = 0; 10389 } 10390 } 10391 } 10392 10393 static void 10394 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10395 { 10396 /* 10397 * If fast output is in progress, lets validate that 10398 * the new window did not shrink on us and make it 10399 * so fast output should end. 10400 */ 10401 if (rack->r_fast_output) { 10402 uint32_t out; 10403 10404 /* 10405 * Calculate what we will send if left as is 10406 * and compare that to our send window. 10407 */ 10408 out = ctf_outstanding(tp); 10409 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10410 /* ok we have an issue */ 10411 if (out >= tp->snd_wnd) { 10412 /* Turn off fast output the window is met or collapsed */ 10413 rack->r_fast_output = 0; 10414 } else { 10415 /* we have some room left */ 10416 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10417 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10418 /* If not at least 1 full segment never mind */ 10419 rack->r_fast_output = 0; 10420 } 10421 } 10422 } 10423 } 10424 } 10425 10426 10427 /* 10428 * Return value of 1, the TCB is unlocked and most 10429 * likely gone, return value of 0, the TCP is still 10430 * locked. 10431 */ 10432 static int 10433 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10434 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10435 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10436 { 10437 /* 10438 * Update window information. Don't look at window if no ACK: TAC's 10439 * send garbage on first SYN. 10440 */ 10441 int32_t nsegs; 10442 int32_t tfo_syn; 10443 struct tcp_rack *rack; 10444 10445 rack = (struct tcp_rack *)tp->t_fb_ptr; 10446 INP_WLOCK_ASSERT(tp->t_inpcb); 10447 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10448 if ((thflags & TH_ACK) && 10449 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10450 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10451 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10452 /* keep track of pure window updates */ 10453 if (tlen == 0 && 10454 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10455 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10456 tp->snd_wnd = tiwin; 10457 rack_validate_fo_sendwin_up(tp, rack); 10458 tp->snd_wl1 = th->th_seq; 10459 tp->snd_wl2 = th->th_ack; 10460 if (tp->snd_wnd > tp->max_sndwnd) 10461 tp->max_sndwnd = tp->snd_wnd; 10462 rack->r_wanted_output = 1; 10463 } else if (thflags & TH_ACK) { 10464 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10465 tp->snd_wnd = tiwin; 10466 rack_validate_fo_sendwin_up(tp, rack); 10467 tp->snd_wl1 = th->th_seq; 10468 tp->snd_wl2 = th->th_ack; 10469 } 10470 } 10471 if (tp->snd_wnd < ctf_outstanding(tp)) 10472 /* The peer collapsed the window */ 10473 rack_collapsed_window(rack); 10474 else if (rack->rc_has_collapsed) 10475 rack_un_collapse_window(rack); 10476 /* Was persist timer active and now we have window space? */ 10477 if ((rack->rc_in_persist != 0) && 10478 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10479 rack->r_ctl.rc_pace_min_segs))) { 10480 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10481 tp->snd_nxt = tp->snd_max; 10482 /* Make sure we output to start the timer */ 10483 rack->r_wanted_output = 1; 10484 } 10485 /* Do we enter persists? */ 10486 if ((rack->rc_in_persist == 0) && 10487 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10488 TCPS_HAVEESTABLISHED(tp->t_state) && 10489 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10490 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10491 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10492 /* 10493 * Here the rwnd is less than 10494 * the pacing size, we are established, 10495 * nothing is outstanding, and there is 10496 * data to send. Enter persists. 10497 */ 10498 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10499 } 10500 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10501 m_freem(m); 10502 return (0); 10503 } 10504 /* 10505 * don't process the URG bit, ignore them drag 10506 * along the up. 10507 */ 10508 tp->rcv_up = tp->rcv_nxt; 10509 INP_WLOCK_ASSERT(tp->t_inpcb); 10510 10511 /* 10512 * Process the segment text, merging it into the TCP sequencing 10513 * queue, and arranging for acknowledgment of receipt if necessary. 10514 * This process logically involves adjusting tp->rcv_wnd as data is 10515 * presented to the user (this happens in tcp_usrreq.c, case 10516 * PRU_RCVD). If a FIN has already been received on this connection 10517 * then we just ignore the text. 10518 */ 10519 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10520 IS_FASTOPEN(tp->t_flags)); 10521 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10522 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10523 tcp_seq save_start = th->th_seq; 10524 tcp_seq save_rnxt = tp->rcv_nxt; 10525 int save_tlen = tlen; 10526 10527 m_adj(m, drop_hdrlen); /* delayed header drop */ 10528 /* 10529 * Insert segment which includes th into TCP reassembly 10530 * queue with control block tp. Set thflags to whether 10531 * reassembly now includes a segment with FIN. This handles 10532 * the common case inline (segment is the next to be 10533 * received on an established connection, and the queue is 10534 * empty), avoiding linkage into and removal from the queue 10535 * and repetition of various conversions. Set DELACK for 10536 * segments received in order, but ack immediately when 10537 * segments are out of order (so fast retransmit can work). 10538 */ 10539 if (th->th_seq == tp->rcv_nxt && 10540 SEGQ_EMPTY(tp) && 10541 (TCPS_HAVEESTABLISHED(tp->t_state) || 10542 tfo_syn)) { 10543 #ifdef NETFLIX_SB_LIMITS 10544 u_int mcnt, appended; 10545 10546 if (so->so_rcv.sb_shlim) { 10547 mcnt = m_memcnt(m); 10548 appended = 0; 10549 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10550 CFO_NOSLEEP, NULL) == false) { 10551 counter_u64_add(tcp_sb_shlim_fails, 1); 10552 m_freem(m); 10553 return (0); 10554 } 10555 } 10556 #endif 10557 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10558 tp->rcv_nxt += tlen; 10559 if (tlen && 10560 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10561 (tp->t_fbyte_in == 0)) { 10562 tp->t_fbyte_in = ticks; 10563 if (tp->t_fbyte_in == 0) 10564 tp->t_fbyte_in = 1; 10565 if (tp->t_fbyte_out && tp->t_fbyte_in) 10566 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10567 } 10568 thflags = tcp_get_flags(th) & TH_FIN; 10569 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10570 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10571 SOCKBUF_LOCK(&so->so_rcv); 10572 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10573 m_freem(m); 10574 } else 10575 #ifdef NETFLIX_SB_LIMITS 10576 appended = 10577 #endif 10578 sbappendstream_locked(&so->so_rcv, m, 0); 10579 10580 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10581 /* NB: sorwakeup_locked() does an implicit unlock. */ 10582 sorwakeup_locked(so); 10583 #ifdef NETFLIX_SB_LIMITS 10584 if (so->so_rcv.sb_shlim && appended != mcnt) 10585 counter_fo_release(so->so_rcv.sb_shlim, 10586 mcnt - appended); 10587 #endif 10588 } else { 10589 /* 10590 * XXX: Due to the header drop above "th" is 10591 * theoretically invalid by now. Fortunately 10592 * m_adj() doesn't actually frees any mbufs when 10593 * trimming from the head. 10594 */ 10595 tcp_seq temp = save_start; 10596 10597 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10598 tp->t_flags |= TF_ACKNOW; 10599 if (tp->t_flags & TF_WAKESOR) { 10600 tp->t_flags &= ~TF_WAKESOR; 10601 /* NB: sorwakeup_locked() does an implicit unlock. */ 10602 sorwakeup_locked(so); 10603 } 10604 } 10605 if ((tp->t_flags & TF_SACK_PERMIT) && 10606 (save_tlen > 0) && 10607 TCPS_HAVEESTABLISHED(tp->t_state)) { 10608 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10609 /* 10610 * DSACK actually handled in the fastpath 10611 * above. 10612 */ 10613 RACK_OPTS_INC(tcp_sack_path_1); 10614 tcp_update_sack_list(tp, save_start, 10615 save_start + save_tlen); 10616 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10617 if ((tp->rcv_numsacks >= 1) && 10618 (tp->sackblks[0].end == save_start)) { 10619 /* 10620 * Partial overlap, recorded at todrop 10621 * above. 10622 */ 10623 RACK_OPTS_INC(tcp_sack_path_2a); 10624 tcp_update_sack_list(tp, 10625 tp->sackblks[0].start, 10626 tp->sackblks[0].end); 10627 } else { 10628 RACK_OPTS_INC(tcp_sack_path_2b); 10629 tcp_update_dsack_list(tp, save_start, 10630 save_start + save_tlen); 10631 } 10632 } else if (tlen >= save_tlen) { 10633 /* Update of sackblks. */ 10634 RACK_OPTS_INC(tcp_sack_path_3); 10635 tcp_update_dsack_list(tp, save_start, 10636 save_start + save_tlen); 10637 } else if (tlen > 0) { 10638 RACK_OPTS_INC(tcp_sack_path_4); 10639 tcp_update_dsack_list(tp, save_start, 10640 save_start + tlen); 10641 } 10642 } 10643 } else { 10644 m_freem(m); 10645 thflags &= ~TH_FIN; 10646 } 10647 10648 /* 10649 * If FIN is received ACK the FIN and let the user know that the 10650 * connection is closing. 10651 */ 10652 if (thflags & TH_FIN) { 10653 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10654 /* The socket upcall is handled by socantrcvmore. */ 10655 socantrcvmore(so); 10656 /* 10657 * If connection is half-synchronized (ie NEEDSYN 10658 * flag on) then delay ACK, so it may be piggybacked 10659 * when SYN is sent. Otherwise, since we received a 10660 * FIN then no more input can be expected, send ACK 10661 * now. 10662 */ 10663 if (tp->t_flags & TF_NEEDSYN) { 10664 rack_timer_cancel(tp, rack, 10665 rack->r_ctl.rc_rcvtime, __LINE__); 10666 tp->t_flags |= TF_DELACK; 10667 } else { 10668 tp->t_flags |= TF_ACKNOW; 10669 } 10670 tp->rcv_nxt++; 10671 } 10672 switch (tp->t_state) { 10673 /* 10674 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10675 * CLOSE_WAIT state. 10676 */ 10677 case TCPS_SYN_RECEIVED: 10678 tp->t_starttime = ticks; 10679 /* FALLTHROUGH */ 10680 case TCPS_ESTABLISHED: 10681 rack_timer_cancel(tp, rack, 10682 rack->r_ctl.rc_rcvtime, __LINE__); 10683 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10684 break; 10685 10686 /* 10687 * If still in FIN_WAIT_1 STATE FIN has not been 10688 * acked so enter the CLOSING state. 10689 */ 10690 case TCPS_FIN_WAIT_1: 10691 rack_timer_cancel(tp, rack, 10692 rack->r_ctl.rc_rcvtime, __LINE__); 10693 tcp_state_change(tp, TCPS_CLOSING); 10694 break; 10695 10696 /* 10697 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10698 * starting the time-wait timer, turning off the 10699 * other standard timers. 10700 */ 10701 case TCPS_FIN_WAIT_2: 10702 rack_timer_cancel(tp, rack, 10703 rack->r_ctl.rc_rcvtime, __LINE__); 10704 tcp_twstart(tp); 10705 return (1); 10706 } 10707 } 10708 /* 10709 * Return any desired output. 10710 */ 10711 if ((tp->t_flags & TF_ACKNOW) || 10712 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10713 rack->r_wanted_output = 1; 10714 } 10715 INP_WLOCK_ASSERT(tp->t_inpcb); 10716 return (0); 10717 } 10718 10719 /* 10720 * Here nothing is really faster, its just that we 10721 * have broken out the fast-data path also just like 10722 * the fast-ack. 10723 */ 10724 static int 10725 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10726 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10727 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10728 { 10729 int32_t nsegs; 10730 int32_t newsize = 0; /* automatic sockbuf scaling */ 10731 struct tcp_rack *rack; 10732 #ifdef NETFLIX_SB_LIMITS 10733 u_int mcnt, appended; 10734 #endif 10735 #ifdef TCPDEBUG 10736 /* 10737 * The size of tcp_saveipgen must be the size of the max ip header, 10738 * now IPv6. 10739 */ 10740 u_char tcp_saveipgen[IP6_HDR_LEN]; 10741 struct tcphdr tcp_savetcp; 10742 short ostate = 0; 10743 10744 #endif 10745 /* 10746 * If last ACK falls within this segment's sequence numbers, record 10747 * the timestamp. NOTE that the test is modified according to the 10748 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10749 */ 10750 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10751 return (0); 10752 } 10753 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10754 return (0); 10755 } 10756 if (tiwin && tiwin != tp->snd_wnd) { 10757 return (0); 10758 } 10759 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10760 return (0); 10761 } 10762 if (__predict_false((to->to_flags & TOF_TS) && 10763 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10764 return (0); 10765 } 10766 if (__predict_false((th->th_ack != tp->snd_una))) { 10767 return (0); 10768 } 10769 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10770 return (0); 10771 } 10772 if ((to->to_flags & TOF_TS) != 0 && 10773 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10774 tp->ts_recent_age = tcp_ts_getticks(); 10775 tp->ts_recent = to->to_tsval; 10776 } 10777 rack = (struct tcp_rack *)tp->t_fb_ptr; 10778 /* 10779 * This is a pure, in-sequence data packet with nothing on the 10780 * reassembly queue and we have enough buffer space to take it. 10781 */ 10782 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10783 10784 #ifdef NETFLIX_SB_LIMITS 10785 if (so->so_rcv.sb_shlim) { 10786 mcnt = m_memcnt(m); 10787 appended = 0; 10788 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10789 CFO_NOSLEEP, NULL) == false) { 10790 counter_u64_add(tcp_sb_shlim_fails, 1); 10791 m_freem(m); 10792 return (1); 10793 } 10794 } 10795 #endif 10796 /* Clean receiver SACK report if present */ 10797 if (tp->rcv_numsacks) 10798 tcp_clean_sackreport(tp); 10799 KMOD_TCPSTAT_INC(tcps_preddat); 10800 tp->rcv_nxt += tlen; 10801 if (tlen && 10802 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10803 (tp->t_fbyte_in == 0)) { 10804 tp->t_fbyte_in = ticks; 10805 if (tp->t_fbyte_in == 0) 10806 tp->t_fbyte_in = 1; 10807 if (tp->t_fbyte_out && tp->t_fbyte_in) 10808 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10809 } 10810 /* 10811 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10812 */ 10813 tp->snd_wl1 = th->th_seq; 10814 /* 10815 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10816 */ 10817 tp->rcv_up = tp->rcv_nxt; 10818 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10819 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10820 #ifdef TCPDEBUG 10821 if (so->so_options & SO_DEBUG) 10822 tcp_trace(TA_INPUT, ostate, tp, 10823 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10824 #endif 10825 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10826 10827 /* Add data to socket buffer. */ 10828 SOCKBUF_LOCK(&so->so_rcv); 10829 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10830 m_freem(m); 10831 } else { 10832 /* 10833 * Set new socket buffer size. Give up when limit is 10834 * reached. 10835 */ 10836 if (newsize) 10837 if (!sbreserve_locked(&so->so_rcv, 10838 newsize, so, NULL)) 10839 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10840 m_adj(m, drop_hdrlen); /* delayed header drop */ 10841 #ifdef NETFLIX_SB_LIMITS 10842 appended = 10843 #endif 10844 sbappendstream_locked(&so->so_rcv, m, 0); 10845 ctf_calc_rwin(so, tp); 10846 } 10847 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10848 /* NB: sorwakeup_locked() does an implicit unlock. */ 10849 sorwakeup_locked(so); 10850 #ifdef NETFLIX_SB_LIMITS 10851 if (so->so_rcv.sb_shlim && mcnt != appended) 10852 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10853 #endif 10854 rack_handle_delayed_ack(tp, rack, tlen, 0); 10855 if (tp->snd_una == tp->snd_max) 10856 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10857 return (1); 10858 } 10859 10860 /* 10861 * This subfunction is used to try to highly optimize the 10862 * fast path. We again allow window updates that are 10863 * in sequence to remain in the fast-path. We also add 10864 * in the __predict's to attempt to help the compiler. 10865 * Note that if we return a 0, then we can *not* process 10866 * it and the caller should push the packet into the 10867 * slow-path. 10868 */ 10869 static int 10870 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10871 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10872 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10873 { 10874 int32_t acked; 10875 int32_t nsegs; 10876 #ifdef TCPDEBUG 10877 /* 10878 * The size of tcp_saveipgen must be the size of the max ip header, 10879 * now IPv6. 10880 */ 10881 u_char tcp_saveipgen[IP6_HDR_LEN]; 10882 struct tcphdr tcp_savetcp; 10883 short ostate = 0; 10884 #endif 10885 int32_t under_pacing = 0; 10886 struct tcp_rack *rack; 10887 10888 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10889 /* Old ack, behind (or duplicate to) the last one rcv'd */ 10890 return (0); 10891 } 10892 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 10893 /* Above what we have sent? */ 10894 return (0); 10895 } 10896 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10897 /* We are retransmitting */ 10898 return (0); 10899 } 10900 if (__predict_false(tiwin == 0)) { 10901 /* zero window */ 10902 return (0); 10903 } 10904 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 10905 /* We need a SYN or a FIN, unlikely.. */ 10906 return (0); 10907 } 10908 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 10909 /* Timestamp is behind .. old ack with seq wrap? */ 10910 return (0); 10911 } 10912 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 10913 /* Still recovering */ 10914 return (0); 10915 } 10916 rack = (struct tcp_rack *)tp->t_fb_ptr; 10917 if (rack->r_ctl.rc_sacked) { 10918 /* We have sack holes on our scoreboard */ 10919 return (0); 10920 } 10921 /* Ok if we reach here, we can process a fast-ack */ 10922 if (rack->gp_ready && 10923 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10924 under_pacing = 1; 10925 } 10926 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10927 rack_log_ack(tp, to, th, 0, 0); 10928 /* Did the window get updated? */ 10929 if (tiwin != tp->snd_wnd) { 10930 tp->snd_wnd = tiwin; 10931 rack_validate_fo_sendwin_up(tp, rack); 10932 tp->snd_wl1 = th->th_seq; 10933 if (tp->snd_wnd > tp->max_sndwnd) 10934 tp->max_sndwnd = tp->snd_wnd; 10935 } 10936 /* Do we exit persists? */ 10937 if ((rack->rc_in_persist != 0) && 10938 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10939 rack->r_ctl.rc_pace_min_segs))) { 10940 rack_exit_persist(tp, rack, cts); 10941 } 10942 /* Do we enter persists? */ 10943 if ((rack->rc_in_persist == 0) && 10944 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10945 TCPS_HAVEESTABLISHED(tp->t_state) && 10946 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10947 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10948 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10949 /* 10950 * Here the rwnd is less than 10951 * the pacing size, we are established, 10952 * nothing is outstanding, and there is 10953 * data to send. Enter persists. 10954 */ 10955 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10956 } 10957 /* 10958 * If last ACK falls within this segment's sequence numbers, record 10959 * the timestamp. NOTE that the test is modified according to the 10960 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10961 */ 10962 if ((to->to_flags & TOF_TS) != 0 && 10963 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10964 tp->ts_recent_age = tcp_ts_getticks(); 10965 tp->ts_recent = to->to_tsval; 10966 } 10967 /* 10968 * This is a pure ack for outstanding data. 10969 */ 10970 KMOD_TCPSTAT_INC(tcps_predack); 10971 10972 /* 10973 * "bad retransmit" recovery. 10974 */ 10975 if ((tp->t_flags & TF_PREVVALID) && 10976 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10977 tp->t_flags &= ~TF_PREVVALID; 10978 if (tp->t_rxtshift == 1 && 10979 (int)(ticks - tp->t_badrxtwin) < 0) 10980 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10981 } 10982 /* 10983 * Recalculate the transmit timer / rtt. 10984 * 10985 * Some boxes send broken timestamp replies during the SYN+ACK 10986 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10987 * and blow up the retransmit timer. 10988 */ 10989 acked = BYTES_THIS_ACK(tp, th); 10990 10991 #ifdef TCP_HHOOK 10992 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 10993 hhook_run_tcp_est_in(tp, th, to); 10994 #endif 10995 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10996 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10997 if (acked) { 10998 struct mbuf *mfree; 10999 11000 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11001 SOCKBUF_LOCK(&so->so_snd); 11002 mfree = sbcut_locked(&so->so_snd, acked); 11003 tp->snd_una = th->th_ack; 11004 /* Note we want to hold the sb lock through the sendmap adjust */ 11005 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11006 /* Wake up the socket if we have room to write more */ 11007 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11008 sowwakeup_locked(so); 11009 m_freem(mfree); 11010 tp->t_rxtshift = 0; 11011 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11012 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11013 rack->rc_tlp_in_progress = 0; 11014 rack->r_ctl.rc_tlp_cnt_out = 0; 11015 /* 11016 * If it is the RXT timer we want to 11017 * stop it, so we can restart a TLP. 11018 */ 11019 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11020 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11021 #ifdef NETFLIX_HTTP_LOGGING 11022 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11023 #endif 11024 } 11025 /* 11026 * Let the congestion control algorithm update congestion control 11027 * related information. This typically means increasing the 11028 * congestion window. 11029 */ 11030 if (tp->snd_wnd < ctf_outstanding(tp)) { 11031 /* The peer collapsed the window */ 11032 rack_collapsed_window(rack); 11033 } else if (rack->rc_has_collapsed) 11034 rack_un_collapse_window(rack); 11035 11036 /* 11037 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11038 */ 11039 tp->snd_wl2 = th->th_ack; 11040 tp->t_dupacks = 0; 11041 m_freem(m); 11042 /* ND6_HINT(tp); *//* Some progress has been made. */ 11043 11044 /* 11045 * If all outstanding data are acked, stop retransmit timer, 11046 * otherwise restart timer using current (possibly backed-off) 11047 * value. If process is waiting for space, wakeup/selwakeup/signal. 11048 * If data are ready to send, let tcp_output decide between more 11049 * output or persist. 11050 */ 11051 #ifdef TCPDEBUG 11052 if (so->so_options & SO_DEBUG) 11053 tcp_trace(TA_INPUT, ostate, tp, 11054 (void *)tcp_saveipgen, 11055 &tcp_savetcp, 0); 11056 #endif 11057 if (under_pacing && 11058 (rack->use_fixed_rate == 0) && 11059 (rack->in_probe_rtt == 0) && 11060 rack->rc_gp_dyn_mul && 11061 rack->rc_always_pace) { 11062 /* Check if we are dragging bottom */ 11063 rack_check_bottom_drag(tp, rack, so, acked); 11064 } 11065 if (tp->snd_una == tp->snd_max) { 11066 tp->t_flags &= ~TF_PREVVALID; 11067 rack->r_ctl.retran_during_recovery = 0; 11068 rack->r_ctl.dsack_byte_cnt = 0; 11069 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11070 if (rack->r_ctl.rc_went_idle_time == 0) 11071 rack->r_ctl.rc_went_idle_time = 1; 11072 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11073 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11074 tp->t_acktime = 0; 11075 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11076 } 11077 if (acked && rack->r_fast_output) 11078 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11079 if (sbavail(&so->so_snd)) { 11080 rack->r_wanted_output = 1; 11081 } 11082 return (1); 11083 } 11084 11085 /* 11086 * Return value of 1, the TCB is unlocked and most 11087 * likely gone, return value of 0, the TCP is still 11088 * locked. 11089 */ 11090 static int 11091 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11092 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11093 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11094 { 11095 int32_t ret_val = 0; 11096 int32_t todrop; 11097 int32_t ourfinisacked = 0; 11098 struct tcp_rack *rack; 11099 11100 ctf_calc_rwin(so, tp); 11101 /* 11102 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11103 * SYN, drop the input. if seg contains a RST, then drop the 11104 * connection. if seg does not contain SYN, then drop it. Otherwise 11105 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11106 * tp->irs if seg contains ack then advance tp->snd_una if seg 11107 * contains an ECE and ECN support is enabled, the stream is ECN 11108 * capable. if SYN has been acked change to ESTABLISHED else 11109 * SYN_RCVD state arrange for segment to be acked (eventually) 11110 * continue processing rest of data/controls. 11111 */ 11112 if ((thflags & TH_ACK) && 11113 (SEQ_LEQ(th->th_ack, tp->iss) || 11114 SEQ_GT(th->th_ack, tp->snd_max))) { 11115 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11116 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11117 return (1); 11118 } 11119 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11120 TCP_PROBE5(connect__refused, NULL, tp, 11121 mtod(m, const char *), tp, th); 11122 tp = tcp_drop(tp, ECONNREFUSED); 11123 ctf_do_drop(m, tp); 11124 return (1); 11125 } 11126 if (thflags & TH_RST) { 11127 ctf_do_drop(m, tp); 11128 return (1); 11129 } 11130 if (!(thflags & TH_SYN)) { 11131 ctf_do_drop(m, tp); 11132 return (1); 11133 } 11134 tp->irs = th->th_seq; 11135 tcp_rcvseqinit(tp); 11136 rack = (struct tcp_rack *)tp->t_fb_ptr; 11137 if (thflags & TH_ACK) { 11138 int tfo_partial = 0; 11139 11140 KMOD_TCPSTAT_INC(tcps_connects); 11141 soisconnected(so); 11142 #ifdef MAC 11143 mac_socketpeer_set_from_mbuf(m, so); 11144 #endif 11145 /* Do window scaling on this connection? */ 11146 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11147 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11148 tp->rcv_scale = tp->request_r_scale; 11149 } 11150 tp->rcv_adv += min(tp->rcv_wnd, 11151 TCP_MAXWIN << tp->rcv_scale); 11152 /* 11153 * If not all the data that was sent in the TFO SYN 11154 * has been acked, resend the remainder right away. 11155 */ 11156 if (IS_FASTOPEN(tp->t_flags) && 11157 (tp->snd_una != tp->snd_max)) { 11158 tp->snd_nxt = th->th_ack; 11159 tfo_partial = 1; 11160 } 11161 /* 11162 * If there's data, delay ACK; if there's also a FIN ACKNOW 11163 * will be turned on later. 11164 */ 11165 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11166 rack_timer_cancel(tp, rack, 11167 rack->r_ctl.rc_rcvtime, __LINE__); 11168 tp->t_flags |= TF_DELACK; 11169 } else { 11170 rack->r_wanted_output = 1; 11171 tp->t_flags |= TF_ACKNOW; 11172 rack->rc_dack_toggle = 0; 11173 } 11174 11175 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11176 11177 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11178 /* 11179 * We advance snd_una for the 11180 * fast open case. If th_ack is 11181 * acknowledging data beyond 11182 * snd_una we can't just call 11183 * ack-processing since the 11184 * data stream in our send-map 11185 * will start at snd_una + 1 (one 11186 * beyond the SYN). If its just 11187 * equal we don't need to do that 11188 * and there is no send_map. 11189 */ 11190 tp->snd_una++; 11191 } 11192 /* 11193 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11194 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11195 */ 11196 tp->t_starttime = ticks; 11197 if (tp->t_flags & TF_NEEDFIN) { 11198 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11199 tp->t_flags &= ~TF_NEEDFIN; 11200 thflags &= ~TH_SYN; 11201 } else { 11202 tcp_state_change(tp, TCPS_ESTABLISHED); 11203 TCP_PROBE5(connect__established, NULL, tp, 11204 mtod(m, const char *), tp, th); 11205 rack_cc_conn_init(tp); 11206 } 11207 } else { 11208 /* 11209 * Received initial SYN in SYN-SENT[*] state => simultaneous 11210 * open. If segment contains CC option and there is a 11211 * cached CC, apply TAO test. If it succeeds, connection is * 11212 * half-synchronized. Otherwise, do 3-way handshake: 11213 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11214 * there was no CC option, clear cached CC value. 11215 */ 11216 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 11217 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11218 } 11219 INP_WLOCK_ASSERT(tp->t_inpcb); 11220 /* 11221 * Advance th->th_seq to correspond to first data byte. If data, 11222 * trim to stay within window, dropping FIN if necessary. 11223 */ 11224 th->th_seq++; 11225 if (tlen > tp->rcv_wnd) { 11226 todrop = tlen - tp->rcv_wnd; 11227 m_adj(m, -todrop); 11228 tlen = tp->rcv_wnd; 11229 thflags &= ~TH_FIN; 11230 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11231 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11232 } 11233 tp->snd_wl1 = th->th_seq - 1; 11234 tp->rcv_up = th->th_seq; 11235 /* 11236 * Client side of transaction: already sent SYN and data. If the 11237 * remote host used T/TCP to validate the SYN, our data will be 11238 * ACK'd; if so, enter normal data segment processing in the middle 11239 * of step 5, ack processing. Otherwise, goto step 6. 11240 */ 11241 if (thflags & TH_ACK) { 11242 /* For syn-sent we need to possibly update the rtt */ 11243 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11244 uint32_t t, mcts; 11245 11246 mcts = tcp_ts_getticks(); 11247 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11248 if (!tp->t_rttlow || tp->t_rttlow > t) 11249 tp->t_rttlow = t; 11250 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11251 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11252 tcp_rack_xmit_timer_commit(rack, tp); 11253 } 11254 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11255 return (ret_val); 11256 /* We may have changed to FIN_WAIT_1 above */ 11257 if (tp->t_state == TCPS_FIN_WAIT_1) { 11258 /* 11259 * In FIN_WAIT_1 STATE in addition to the processing 11260 * for the ESTABLISHED state if our FIN is now 11261 * acknowledged then enter FIN_WAIT_2. 11262 */ 11263 if (ourfinisacked) { 11264 /* 11265 * If we can't receive any more data, then 11266 * closing user can proceed. Starting the 11267 * timer is contrary to the specification, 11268 * but if we don't get a FIN we'll hang 11269 * forever. 11270 * 11271 * XXXjl: we should release the tp also, and 11272 * use a compressed state. 11273 */ 11274 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11275 soisdisconnected(so); 11276 tcp_timer_activate(tp, TT_2MSL, 11277 (tcp_fast_finwait2_recycle ? 11278 tcp_finwait2_timeout : 11279 TP_MAXIDLE(tp))); 11280 } 11281 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11282 } 11283 } 11284 } 11285 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11286 tiwin, thflags, nxt_pkt)); 11287 } 11288 11289 /* 11290 * Return value of 1, the TCB is unlocked and most 11291 * likely gone, return value of 0, the TCP is still 11292 * locked. 11293 */ 11294 static int 11295 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11296 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11297 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11298 { 11299 struct tcp_rack *rack; 11300 int32_t ret_val = 0; 11301 int32_t ourfinisacked = 0; 11302 11303 ctf_calc_rwin(so, tp); 11304 if ((thflags & TH_ACK) && 11305 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11306 SEQ_GT(th->th_ack, tp->snd_max))) { 11307 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11308 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11309 return (1); 11310 } 11311 rack = (struct tcp_rack *)tp->t_fb_ptr; 11312 if (IS_FASTOPEN(tp->t_flags)) { 11313 /* 11314 * When a TFO connection is in SYN_RECEIVED, the 11315 * only valid packets are the initial SYN, a 11316 * retransmit/copy of the initial SYN (possibly with 11317 * a subset of the original data), a valid ACK, a 11318 * FIN, or a RST. 11319 */ 11320 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11321 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11322 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11323 return (1); 11324 } else if (thflags & TH_SYN) { 11325 /* non-initial SYN is ignored */ 11326 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11327 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11328 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11329 ctf_do_drop(m, NULL); 11330 return (0); 11331 } 11332 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11333 ctf_do_drop(m, NULL); 11334 return (0); 11335 } 11336 } 11337 11338 if ((thflags & TH_RST) || 11339 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11340 return (__ctf_process_rst(m, th, so, tp, 11341 &rack->r_ctl.challenge_ack_ts, 11342 &rack->r_ctl.challenge_ack_cnt)); 11343 /* 11344 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11345 * it's less than ts_recent, drop it. 11346 */ 11347 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11348 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11349 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11350 return (ret_val); 11351 } 11352 /* 11353 * In the SYN-RECEIVED state, validate that the packet belongs to 11354 * this connection before trimming the data to fit the receive 11355 * window. Check the sequence number versus IRS since we know the 11356 * sequence numbers haven't wrapped. This is a partial fix for the 11357 * "LAND" DoS attack. 11358 */ 11359 if (SEQ_LT(th->th_seq, tp->irs)) { 11360 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11361 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11362 return (1); 11363 } 11364 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11365 &rack->r_ctl.challenge_ack_ts, 11366 &rack->r_ctl.challenge_ack_cnt)) { 11367 return (ret_val); 11368 } 11369 /* 11370 * If last ACK falls within this segment's sequence numbers, record 11371 * its timestamp. NOTE: 1) That the test incorporates suggestions 11372 * from the latest proposal of the tcplw@cray.com list (Braden 11373 * 1993/04/26). 2) That updating only on newer timestamps interferes 11374 * with our earlier PAWS tests, so this check should be solely 11375 * predicated on the sequence space of this segment. 3) That we 11376 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11377 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11378 * SEG.Len, This modified check allows us to overcome RFC1323's 11379 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11380 * p.869. In such cases, we can still calculate the RTT correctly 11381 * when RCV.NXT == Last.ACK.Sent. 11382 */ 11383 if ((to->to_flags & TOF_TS) != 0 && 11384 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11385 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11386 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11387 tp->ts_recent_age = tcp_ts_getticks(); 11388 tp->ts_recent = to->to_tsval; 11389 } 11390 tp->snd_wnd = tiwin; 11391 rack_validate_fo_sendwin_up(tp, rack); 11392 /* 11393 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11394 * is on (half-synchronized state), then queue data for later 11395 * processing; else drop segment and return. 11396 */ 11397 if ((thflags & TH_ACK) == 0) { 11398 if (IS_FASTOPEN(tp->t_flags)) { 11399 rack_cc_conn_init(tp); 11400 } 11401 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11402 tiwin, thflags, nxt_pkt)); 11403 } 11404 KMOD_TCPSTAT_INC(tcps_connects); 11405 soisconnected(so); 11406 /* Do window scaling? */ 11407 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11408 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11409 tp->rcv_scale = tp->request_r_scale; 11410 } 11411 /* 11412 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11413 * FIN-WAIT-1 11414 */ 11415 tp->t_starttime = ticks; 11416 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11417 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11418 tp->t_tfo_pending = NULL; 11419 } 11420 if (tp->t_flags & TF_NEEDFIN) { 11421 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11422 tp->t_flags &= ~TF_NEEDFIN; 11423 } else { 11424 tcp_state_change(tp, TCPS_ESTABLISHED); 11425 TCP_PROBE5(accept__established, NULL, tp, 11426 mtod(m, const char *), tp, th); 11427 /* 11428 * TFO connections call cc_conn_init() during SYN 11429 * processing. Calling it again here for such connections 11430 * is not harmless as it would undo the snd_cwnd reduction 11431 * that occurs when a TFO SYN|ACK is retransmitted. 11432 */ 11433 if (!IS_FASTOPEN(tp->t_flags)) 11434 rack_cc_conn_init(tp); 11435 } 11436 /* 11437 * Account for the ACK of our SYN prior to 11438 * regular ACK processing below, except for 11439 * simultaneous SYN, which is handled later. 11440 */ 11441 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11442 tp->snd_una++; 11443 /* 11444 * If segment contains data or ACK, will call tcp_reass() later; if 11445 * not, do so now to pass queued data to user. 11446 */ 11447 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11448 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11449 (struct mbuf *)0); 11450 if (tp->t_flags & TF_WAKESOR) { 11451 tp->t_flags &= ~TF_WAKESOR; 11452 /* NB: sorwakeup_locked() does an implicit unlock. */ 11453 sorwakeup_locked(so); 11454 } 11455 } 11456 tp->snd_wl1 = th->th_seq - 1; 11457 /* For syn-recv we need to possibly update the rtt */ 11458 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11459 uint32_t t, mcts; 11460 11461 mcts = tcp_ts_getticks(); 11462 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11463 if (!tp->t_rttlow || tp->t_rttlow > t) 11464 tp->t_rttlow = t; 11465 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11466 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11467 tcp_rack_xmit_timer_commit(rack, tp); 11468 } 11469 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11470 return (ret_val); 11471 } 11472 if (tp->t_state == TCPS_FIN_WAIT_1) { 11473 /* We could have went to FIN_WAIT_1 (or EST) above */ 11474 /* 11475 * In FIN_WAIT_1 STATE in addition to the processing for the 11476 * ESTABLISHED state if our FIN is now acknowledged then 11477 * enter FIN_WAIT_2. 11478 */ 11479 if (ourfinisacked) { 11480 /* 11481 * If we can't receive any more data, then closing 11482 * user can proceed. Starting the timer is contrary 11483 * to the specification, but if we don't get a FIN 11484 * we'll hang forever. 11485 * 11486 * XXXjl: we should release the tp also, and use a 11487 * compressed state. 11488 */ 11489 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11490 soisdisconnected(so); 11491 tcp_timer_activate(tp, TT_2MSL, 11492 (tcp_fast_finwait2_recycle ? 11493 tcp_finwait2_timeout : 11494 TP_MAXIDLE(tp))); 11495 } 11496 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11497 } 11498 } 11499 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11500 tiwin, thflags, nxt_pkt)); 11501 } 11502 11503 /* 11504 * Return value of 1, the TCB is unlocked and most 11505 * likely gone, return value of 0, the TCP is still 11506 * locked. 11507 */ 11508 static int 11509 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11510 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11511 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11512 { 11513 int32_t ret_val = 0; 11514 struct tcp_rack *rack; 11515 11516 /* 11517 * Header prediction: check for the two common cases of a 11518 * uni-directional data xfer. If the packet has no control flags, 11519 * is in-sequence, the window didn't change and we're not 11520 * retransmitting, it's a candidate. If the length is zero and the 11521 * ack moved forward, we're the sender side of the xfer. Just free 11522 * the data acked & wake any higher level process that was blocked 11523 * waiting for space. If the length is non-zero and the ack didn't 11524 * move, we're the receiver side. If we're getting packets in-order 11525 * (the reassembly queue is empty), add the data toc The socket 11526 * buffer and note that we need a delayed ack. Make sure that the 11527 * hidden state-flags are also off. Since we check for 11528 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11529 */ 11530 rack = (struct tcp_rack *)tp->t_fb_ptr; 11531 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11532 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11533 __predict_true(SEGQ_EMPTY(tp)) && 11534 __predict_true(th->th_seq == tp->rcv_nxt)) { 11535 if (tlen == 0) { 11536 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11537 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11538 return (0); 11539 } 11540 } else { 11541 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11542 tiwin, nxt_pkt, iptos)) { 11543 return (0); 11544 } 11545 } 11546 } 11547 ctf_calc_rwin(so, tp); 11548 11549 if ((thflags & TH_RST) || 11550 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11551 return (__ctf_process_rst(m, th, so, tp, 11552 &rack->r_ctl.challenge_ack_ts, 11553 &rack->r_ctl.challenge_ack_cnt)); 11554 11555 /* 11556 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11557 * synchronized state. 11558 */ 11559 if (thflags & TH_SYN) { 11560 ctf_challenge_ack(m, th, tp, &ret_val); 11561 return (ret_val); 11562 } 11563 /* 11564 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11565 * it's less than ts_recent, drop it. 11566 */ 11567 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11568 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11569 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11570 return (ret_val); 11571 } 11572 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11573 &rack->r_ctl.challenge_ack_ts, 11574 &rack->r_ctl.challenge_ack_cnt)) { 11575 return (ret_val); 11576 } 11577 /* 11578 * If last ACK falls within this segment's sequence numbers, record 11579 * its timestamp. NOTE: 1) That the test incorporates suggestions 11580 * from the latest proposal of the tcplw@cray.com list (Braden 11581 * 1993/04/26). 2) That updating only on newer timestamps interferes 11582 * with our earlier PAWS tests, so this check should be solely 11583 * predicated on the sequence space of this segment. 3) That we 11584 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11585 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11586 * SEG.Len, This modified check allows us to overcome RFC1323's 11587 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11588 * p.869. In such cases, we can still calculate the RTT correctly 11589 * when RCV.NXT == Last.ACK.Sent. 11590 */ 11591 if ((to->to_flags & TOF_TS) != 0 && 11592 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11593 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11594 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11595 tp->ts_recent_age = tcp_ts_getticks(); 11596 tp->ts_recent = to->to_tsval; 11597 } 11598 /* 11599 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11600 * is on (half-synchronized state), then queue data for later 11601 * processing; else drop segment and return. 11602 */ 11603 if ((thflags & TH_ACK) == 0) { 11604 if (tp->t_flags & TF_NEEDSYN) { 11605 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11606 tiwin, thflags, nxt_pkt)); 11607 11608 } else if (tp->t_flags & TF_ACKNOW) { 11609 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11610 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11611 return (ret_val); 11612 } else { 11613 ctf_do_drop(m, NULL); 11614 return (0); 11615 } 11616 } 11617 /* 11618 * Ack processing. 11619 */ 11620 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11621 return (ret_val); 11622 } 11623 if (sbavail(&so->so_snd)) { 11624 if (ctf_progress_timeout_check(tp, true)) { 11625 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11626 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11627 return (1); 11628 } 11629 } 11630 /* State changes only happen in rack_process_data() */ 11631 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11632 tiwin, thflags, nxt_pkt)); 11633 } 11634 11635 /* 11636 * Return value of 1, the TCB is unlocked and most 11637 * likely gone, return value of 0, the TCP is still 11638 * locked. 11639 */ 11640 static int 11641 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11642 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11643 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11644 { 11645 int32_t ret_val = 0; 11646 struct tcp_rack *rack; 11647 11648 rack = (struct tcp_rack *)tp->t_fb_ptr; 11649 ctf_calc_rwin(so, tp); 11650 if ((thflags & TH_RST) || 11651 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11652 return (__ctf_process_rst(m, th, so, tp, 11653 &rack->r_ctl.challenge_ack_ts, 11654 &rack->r_ctl.challenge_ack_cnt)); 11655 /* 11656 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11657 * synchronized state. 11658 */ 11659 if (thflags & TH_SYN) { 11660 ctf_challenge_ack(m, th, tp, &ret_val); 11661 return (ret_val); 11662 } 11663 /* 11664 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11665 * it's less than ts_recent, drop it. 11666 */ 11667 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11668 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11669 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11670 return (ret_val); 11671 } 11672 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11673 &rack->r_ctl.challenge_ack_ts, 11674 &rack->r_ctl.challenge_ack_cnt)) { 11675 return (ret_val); 11676 } 11677 /* 11678 * If last ACK falls within this segment's sequence numbers, record 11679 * its timestamp. NOTE: 1) That the test incorporates suggestions 11680 * from the latest proposal of the tcplw@cray.com list (Braden 11681 * 1993/04/26). 2) That updating only on newer timestamps interferes 11682 * with our earlier PAWS tests, so this check should be solely 11683 * predicated on the sequence space of this segment. 3) That we 11684 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11685 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11686 * SEG.Len, This modified check allows us to overcome RFC1323's 11687 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11688 * p.869. In such cases, we can still calculate the RTT correctly 11689 * when RCV.NXT == Last.ACK.Sent. 11690 */ 11691 if ((to->to_flags & TOF_TS) != 0 && 11692 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11693 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11694 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11695 tp->ts_recent_age = tcp_ts_getticks(); 11696 tp->ts_recent = to->to_tsval; 11697 } 11698 /* 11699 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11700 * is on (half-synchronized state), then queue data for later 11701 * processing; else drop segment and return. 11702 */ 11703 if ((thflags & TH_ACK) == 0) { 11704 if (tp->t_flags & TF_NEEDSYN) { 11705 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11706 tiwin, thflags, nxt_pkt)); 11707 11708 } else if (tp->t_flags & TF_ACKNOW) { 11709 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11710 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11711 return (ret_val); 11712 } else { 11713 ctf_do_drop(m, NULL); 11714 return (0); 11715 } 11716 } 11717 /* 11718 * Ack processing. 11719 */ 11720 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11721 return (ret_val); 11722 } 11723 if (sbavail(&so->so_snd)) { 11724 if (ctf_progress_timeout_check(tp, true)) { 11725 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11726 tp, tick, PROGRESS_DROP, __LINE__); 11727 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11728 return (1); 11729 } 11730 } 11731 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11732 tiwin, thflags, nxt_pkt)); 11733 } 11734 11735 static int 11736 rack_check_data_after_close(struct mbuf *m, 11737 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11738 { 11739 struct tcp_rack *rack; 11740 11741 rack = (struct tcp_rack *)tp->t_fb_ptr; 11742 if (rack->rc_allow_data_af_clo == 0) { 11743 close_now: 11744 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11745 /* tcp_close will kill the inp pre-log the Reset */ 11746 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11747 tp = tcp_close(tp); 11748 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11749 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11750 return (1); 11751 } 11752 if (sbavail(&so->so_snd) == 0) 11753 goto close_now; 11754 /* Ok we allow data that is ignored and a followup reset */ 11755 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11756 tp->rcv_nxt = th->th_seq + *tlen; 11757 tp->t_flags2 |= TF2_DROP_AF_DATA; 11758 rack->r_wanted_output = 1; 11759 *tlen = 0; 11760 return (0); 11761 } 11762 11763 /* 11764 * Return value of 1, the TCB is unlocked and most 11765 * likely gone, return value of 0, the TCP is still 11766 * locked. 11767 */ 11768 static int 11769 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11770 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11771 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11772 { 11773 int32_t ret_val = 0; 11774 int32_t ourfinisacked = 0; 11775 struct tcp_rack *rack; 11776 11777 rack = (struct tcp_rack *)tp->t_fb_ptr; 11778 ctf_calc_rwin(so, tp); 11779 11780 if ((thflags & TH_RST) || 11781 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11782 return (__ctf_process_rst(m, th, so, tp, 11783 &rack->r_ctl.challenge_ack_ts, 11784 &rack->r_ctl.challenge_ack_cnt)); 11785 /* 11786 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11787 * synchronized state. 11788 */ 11789 if (thflags & TH_SYN) { 11790 ctf_challenge_ack(m, th, tp, &ret_val); 11791 return (ret_val); 11792 } 11793 /* 11794 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11795 * it's less than ts_recent, drop it. 11796 */ 11797 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11798 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11799 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11800 return (ret_val); 11801 } 11802 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11803 &rack->r_ctl.challenge_ack_ts, 11804 &rack->r_ctl.challenge_ack_cnt)) { 11805 return (ret_val); 11806 } 11807 /* 11808 * If new data are received on a connection after the user processes 11809 * are gone, then RST the other end. 11810 */ 11811 if ((so->so_state & SS_NOFDREF) && tlen) { 11812 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11813 return (1); 11814 } 11815 /* 11816 * If last ACK falls within this segment's sequence numbers, record 11817 * its timestamp. NOTE: 1) That the test incorporates suggestions 11818 * from the latest proposal of the tcplw@cray.com list (Braden 11819 * 1993/04/26). 2) That updating only on newer timestamps interferes 11820 * with our earlier PAWS tests, so this check should be solely 11821 * predicated on the sequence space of this segment. 3) That we 11822 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11823 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11824 * SEG.Len, This modified check allows us to overcome RFC1323's 11825 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11826 * p.869. In such cases, we can still calculate the RTT correctly 11827 * when RCV.NXT == Last.ACK.Sent. 11828 */ 11829 if ((to->to_flags & TOF_TS) != 0 && 11830 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11831 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11832 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11833 tp->ts_recent_age = tcp_ts_getticks(); 11834 tp->ts_recent = to->to_tsval; 11835 } 11836 /* 11837 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11838 * is on (half-synchronized state), then queue data for later 11839 * processing; else drop segment and return. 11840 */ 11841 if ((thflags & TH_ACK) == 0) { 11842 if (tp->t_flags & TF_NEEDSYN) { 11843 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11844 tiwin, thflags, nxt_pkt)); 11845 } else if (tp->t_flags & TF_ACKNOW) { 11846 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11847 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11848 return (ret_val); 11849 } else { 11850 ctf_do_drop(m, NULL); 11851 return (0); 11852 } 11853 } 11854 /* 11855 * Ack processing. 11856 */ 11857 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11858 return (ret_val); 11859 } 11860 if (ourfinisacked) { 11861 /* 11862 * If we can't receive any more data, then closing user can 11863 * proceed. Starting the timer is contrary to the 11864 * specification, but if we don't get a FIN we'll hang 11865 * forever. 11866 * 11867 * XXXjl: we should release the tp also, and use a 11868 * compressed state. 11869 */ 11870 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11871 soisdisconnected(so); 11872 tcp_timer_activate(tp, TT_2MSL, 11873 (tcp_fast_finwait2_recycle ? 11874 tcp_finwait2_timeout : 11875 TP_MAXIDLE(tp))); 11876 } 11877 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11878 } 11879 if (sbavail(&so->so_snd)) { 11880 if (ctf_progress_timeout_check(tp, true)) { 11881 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11882 tp, tick, PROGRESS_DROP, __LINE__); 11883 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11884 return (1); 11885 } 11886 } 11887 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11888 tiwin, thflags, nxt_pkt)); 11889 } 11890 11891 /* 11892 * Return value of 1, the TCB is unlocked and most 11893 * likely gone, return value of 0, the TCP is still 11894 * locked. 11895 */ 11896 static int 11897 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 11898 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11899 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11900 { 11901 int32_t ret_val = 0; 11902 int32_t ourfinisacked = 0; 11903 struct tcp_rack *rack; 11904 11905 rack = (struct tcp_rack *)tp->t_fb_ptr; 11906 ctf_calc_rwin(so, tp); 11907 11908 if ((thflags & TH_RST) || 11909 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11910 return (__ctf_process_rst(m, th, so, tp, 11911 &rack->r_ctl.challenge_ack_ts, 11912 &rack->r_ctl.challenge_ack_cnt)); 11913 /* 11914 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11915 * synchronized state. 11916 */ 11917 if (thflags & TH_SYN) { 11918 ctf_challenge_ack(m, th, tp, &ret_val); 11919 return (ret_val); 11920 } 11921 /* 11922 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11923 * it's less than ts_recent, drop it. 11924 */ 11925 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11926 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11927 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11928 return (ret_val); 11929 } 11930 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11931 &rack->r_ctl.challenge_ack_ts, 11932 &rack->r_ctl.challenge_ack_cnt)) { 11933 return (ret_val); 11934 } 11935 /* 11936 * If new data are received on a connection after the user processes 11937 * are gone, then RST the other end. 11938 */ 11939 if ((so->so_state & SS_NOFDREF) && tlen) { 11940 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11941 return (1); 11942 } 11943 /* 11944 * If last ACK falls within this segment's sequence numbers, record 11945 * its timestamp. NOTE: 1) That the test incorporates suggestions 11946 * from the latest proposal of the tcplw@cray.com list (Braden 11947 * 1993/04/26). 2) That updating only on newer timestamps interferes 11948 * with our earlier PAWS tests, so this check should be solely 11949 * predicated on the sequence space of this segment. 3) That we 11950 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11951 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11952 * SEG.Len, This modified check allows us to overcome RFC1323's 11953 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11954 * p.869. In such cases, we can still calculate the RTT correctly 11955 * when RCV.NXT == Last.ACK.Sent. 11956 */ 11957 if ((to->to_flags & TOF_TS) != 0 && 11958 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11959 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11960 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11961 tp->ts_recent_age = tcp_ts_getticks(); 11962 tp->ts_recent = to->to_tsval; 11963 } 11964 /* 11965 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11966 * is on (half-synchronized state), then queue data for later 11967 * processing; else drop segment and return. 11968 */ 11969 if ((thflags & TH_ACK) == 0) { 11970 if (tp->t_flags & TF_NEEDSYN) { 11971 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11972 tiwin, thflags, nxt_pkt)); 11973 } else if (tp->t_flags & TF_ACKNOW) { 11974 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11975 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11976 return (ret_val); 11977 } else { 11978 ctf_do_drop(m, NULL); 11979 return (0); 11980 } 11981 } 11982 /* 11983 * Ack processing. 11984 */ 11985 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11986 return (ret_val); 11987 } 11988 if (ourfinisacked) { 11989 tcp_twstart(tp); 11990 m_freem(m); 11991 return (1); 11992 } 11993 if (sbavail(&so->so_snd)) { 11994 if (ctf_progress_timeout_check(tp, true)) { 11995 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11996 tp, tick, PROGRESS_DROP, __LINE__); 11997 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11998 return (1); 11999 } 12000 } 12001 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12002 tiwin, thflags, nxt_pkt)); 12003 } 12004 12005 /* 12006 * Return value of 1, the TCB is unlocked and most 12007 * likely gone, return value of 0, the TCP is still 12008 * locked. 12009 */ 12010 static int 12011 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12012 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12013 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12014 { 12015 int32_t ret_val = 0; 12016 int32_t ourfinisacked = 0; 12017 struct tcp_rack *rack; 12018 12019 rack = (struct tcp_rack *)tp->t_fb_ptr; 12020 ctf_calc_rwin(so, tp); 12021 12022 if ((thflags & TH_RST) || 12023 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12024 return (__ctf_process_rst(m, th, so, tp, 12025 &rack->r_ctl.challenge_ack_ts, 12026 &rack->r_ctl.challenge_ack_cnt)); 12027 /* 12028 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12029 * synchronized state. 12030 */ 12031 if (thflags & TH_SYN) { 12032 ctf_challenge_ack(m, th, tp, &ret_val); 12033 return (ret_val); 12034 } 12035 /* 12036 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12037 * it's less than ts_recent, drop it. 12038 */ 12039 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12040 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12041 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12042 return (ret_val); 12043 } 12044 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12045 &rack->r_ctl.challenge_ack_ts, 12046 &rack->r_ctl.challenge_ack_cnt)) { 12047 return (ret_val); 12048 } 12049 /* 12050 * If new data are received on a connection after the user processes 12051 * are gone, then RST the other end. 12052 */ 12053 if ((so->so_state & SS_NOFDREF) && tlen) { 12054 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12055 return (1); 12056 } 12057 /* 12058 * If last ACK falls within this segment's sequence numbers, record 12059 * its timestamp. NOTE: 1) That the test incorporates suggestions 12060 * from the latest proposal of the tcplw@cray.com list (Braden 12061 * 1993/04/26). 2) That updating only on newer timestamps interferes 12062 * with our earlier PAWS tests, so this check should be solely 12063 * predicated on the sequence space of this segment. 3) That we 12064 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12065 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12066 * SEG.Len, This modified check allows us to overcome RFC1323's 12067 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12068 * p.869. In such cases, we can still calculate the RTT correctly 12069 * when RCV.NXT == Last.ACK.Sent. 12070 */ 12071 if ((to->to_flags & TOF_TS) != 0 && 12072 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12073 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12074 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12075 tp->ts_recent_age = tcp_ts_getticks(); 12076 tp->ts_recent = to->to_tsval; 12077 } 12078 /* 12079 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12080 * is on (half-synchronized state), then queue data for later 12081 * processing; else drop segment and return. 12082 */ 12083 if ((thflags & TH_ACK) == 0) { 12084 if (tp->t_flags & TF_NEEDSYN) { 12085 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12086 tiwin, thflags, nxt_pkt)); 12087 } else if (tp->t_flags & TF_ACKNOW) { 12088 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12089 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12090 return (ret_val); 12091 } else { 12092 ctf_do_drop(m, NULL); 12093 return (0); 12094 } 12095 } 12096 /* 12097 * case TCPS_LAST_ACK: Ack processing. 12098 */ 12099 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12100 return (ret_val); 12101 } 12102 if (ourfinisacked) { 12103 tp = tcp_close(tp); 12104 ctf_do_drop(m, tp); 12105 return (1); 12106 } 12107 if (sbavail(&so->so_snd)) { 12108 if (ctf_progress_timeout_check(tp, true)) { 12109 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12110 tp, tick, PROGRESS_DROP, __LINE__); 12111 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12112 return (1); 12113 } 12114 } 12115 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12116 tiwin, thflags, nxt_pkt)); 12117 } 12118 12119 /* 12120 * Return value of 1, the TCB is unlocked and most 12121 * likely gone, return value of 0, the TCP is still 12122 * locked. 12123 */ 12124 static int 12125 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12126 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12127 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12128 { 12129 int32_t ret_val = 0; 12130 int32_t ourfinisacked = 0; 12131 struct tcp_rack *rack; 12132 12133 rack = (struct tcp_rack *)tp->t_fb_ptr; 12134 ctf_calc_rwin(so, tp); 12135 12136 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12137 if ((thflags & TH_RST) || 12138 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12139 return (__ctf_process_rst(m, th, so, tp, 12140 &rack->r_ctl.challenge_ack_ts, 12141 &rack->r_ctl.challenge_ack_cnt)); 12142 /* 12143 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12144 * synchronized state. 12145 */ 12146 if (thflags & TH_SYN) { 12147 ctf_challenge_ack(m, th, tp, &ret_val); 12148 return (ret_val); 12149 } 12150 /* 12151 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12152 * it's less than ts_recent, drop it. 12153 */ 12154 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12155 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12156 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12157 return (ret_val); 12158 } 12159 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12160 &rack->r_ctl.challenge_ack_ts, 12161 &rack->r_ctl.challenge_ack_cnt)) { 12162 return (ret_val); 12163 } 12164 /* 12165 * If new data are received on a connection after the user processes 12166 * are gone, then RST the other end. 12167 */ 12168 if ((so->so_state & SS_NOFDREF) && 12169 tlen) { 12170 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 12171 return (1); 12172 } 12173 /* 12174 * If last ACK falls within this segment's sequence numbers, record 12175 * its timestamp. NOTE: 1) That the test incorporates suggestions 12176 * from the latest proposal of the tcplw@cray.com list (Braden 12177 * 1993/04/26). 2) That updating only on newer timestamps interferes 12178 * with our earlier PAWS tests, so this check should be solely 12179 * predicated on the sequence space of this segment. 3) That we 12180 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12181 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12182 * SEG.Len, This modified check allows us to overcome RFC1323's 12183 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12184 * p.869. In such cases, we can still calculate the RTT correctly 12185 * when RCV.NXT == Last.ACK.Sent. 12186 */ 12187 if ((to->to_flags & TOF_TS) != 0 && 12188 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12189 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12190 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12191 tp->ts_recent_age = tcp_ts_getticks(); 12192 tp->ts_recent = to->to_tsval; 12193 } 12194 /* 12195 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12196 * is on (half-synchronized state), then queue data for later 12197 * processing; else drop segment and return. 12198 */ 12199 if ((thflags & TH_ACK) == 0) { 12200 if (tp->t_flags & TF_NEEDSYN) { 12201 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12202 tiwin, thflags, nxt_pkt)); 12203 } else if (tp->t_flags & TF_ACKNOW) { 12204 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12205 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12206 return (ret_val); 12207 } else { 12208 ctf_do_drop(m, NULL); 12209 return (0); 12210 } 12211 } 12212 /* 12213 * Ack processing. 12214 */ 12215 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12216 return (ret_val); 12217 } 12218 if (sbavail(&so->so_snd)) { 12219 if (ctf_progress_timeout_check(tp, true)) { 12220 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12221 tp, tick, PROGRESS_DROP, __LINE__); 12222 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12223 return (1); 12224 } 12225 } 12226 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12227 tiwin, thflags, nxt_pkt)); 12228 } 12229 12230 static void inline 12231 rack_clear_rate_sample(struct tcp_rack *rack) 12232 { 12233 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12234 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12235 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12236 } 12237 12238 static void 12239 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12240 { 12241 uint64_t bw_est, rate_wanted; 12242 int chged = 0; 12243 uint32_t user_max, orig_min, orig_max; 12244 12245 orig_min = rack->r_ctl.rc_pace_min_segs; 12246 orig_max = rack->r_ctl.rc_pace_max_segs; 12247 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12248 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12249 chged = 1; 12250 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12251 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12252 if (user_max != rack->r_ctl.rc_pace_max_segs) 12253 chged = 1; 12254 } 12255 if (rack->rc_force_max_seg) { 12256 rack->r_ctl.rc_pace_max_segs = user_max; 12257 } else if (rack->use_fixed_rate) { 12258 bw_est = rack_get_bw(rack); 12259 if ((rack->r_ctl.crte == NULL) || 12260 (bw_est != rack->r_ctl.crte->rate)) { 12261 rack->r_ctl.rc_pace_max_segs = user_max; 12262 } else { 12263 /* We are pacing right at the hardware rate */ 12264 uint32_t segsiz; 12265 12266 segsiz = min(ctf_fixed_maxseg(tp), 12267 rack->r_ctl.rc_pace_min_segs); 12268 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12269 tp, bw_est, segsiz, 0, 12270 rack->r_ctl.crte, NULL); 12271 } 12272 } else if (rack->rc_always_pace) { 12273 if (rack->r_ctl.gp_bw || 12274 #ifdef NETFLIX_PEAKRATE 12275 rack->rc_tp->t_maxpeakrate || 12276 #endif 12277 rack->r_ctl.init_rate) { 12278 /* We have a rate of some sort set */ 12279 uint32_t orig; 12280 12281 bw_est = rack_get_bw(rack); 12282 orig = rack->r_ctl.rc_pace_max_segs; 12283 if (fill_override) 12284 rate_wanted = *fill_override; 12285 else 12286 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12287 if (rate_wanted) { 12288 /* We have something */ 12289 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12290 rate_wanted, 12291 ctf_fixed_maxseg(rack->rc_tp)); 12292 } else 12293 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12294 if (orig != rack->r_ctl.rc_pace_max_segs) 12295 chged = 1; 12296 } else if ((rack->r_ctl.gp_bw == 0) && 12297 (rack->r_ctl.rc_pace_max_segs == 0)) { 12298 /* 12299 * If we have nothing limit us to bursting 12300 * out IW sized pieces. 12301 */ 12302 chged = 1; 12303 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12304 } 12305 } 12306 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12307 chged = 1; 12308 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12309 } 12310 if (chged) 12311 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12312 } 12313 12314 12315 static void 12316 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12317 { 12318 #ifdef INET6 12319 struct ip6_hdr *ip6 = NULL; 12320 #endif 12321 #ifdef INET 12322 struct ip *ip = NULL; 12323 #endif 12324 struct udphdr *udp = NULL; 12325 12326 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12327 #ifdef INET6 12328 if (rack->r_is_v6) { 12329 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12330 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12331 if (tp->t_port) { 12332 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12333 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12334 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12335 udp->uh_dport = tp->t_port; 12336 rack->r_ctl.fsb.udp = udp; 12337 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12338 } else 12339 { 12340 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12341 rack->r_ctl.fsb.udp = NULL; 12342 } 12343 tcpip_fillheaders(rack->rc_inp, 12344 tp->t_port, 12345 ip6, rack->r_ctl.fsb.th); 12346 } else 12347 #endif /* INET6 */ 12348 { 12349 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12350 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12351 if (tp->t_port) { 12352 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12353 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12354 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12355 udp->uh_dport = tp->t_port; 12356 rack->r_ctl.fsb.udp = udp; 12357 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12358 } else 12359 { 12360 rack->r_ctl.fsb.udp = NULL; 12361 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12362 } 12363 tcpip_fillheaders(rack->rc_inp, 12364 tp->t_port, 12365 ip, rack->r_ctl.fsb.th); 12366 } 12367 rack->r_fsb_inited = 1; 12368 } 12369 12370 static int 12371 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12372 { 12373 /* 12374 * Allocate the larger of spaces V6 if available else just 12375 * V4 and include udphdr (overbook) 12376 */ 12377 #ifdef INET6 12378 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12379 #else 12380 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12381 #endif 12382 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12383 M_TCPFSB, M_NOWAIT|M_ZERO); 12384 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12385 return (ENOMEM); 12386 } 12387 rack->r_fsb_inited = 0; 12388 return (0); 12389 } 12390 12391 static int 12392 rack_init(struct tcpcb *tp) 12393 { 12394 struct tcp_rack *rack = NULL; 12395 #ifdef INVARIANTS 12396 struct rack_sendmap *insret; 12397 #endif 12398 uint32_t iwin, snt, us_cts; 12399 int err; 12400 12401 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12402 if (tp->t_fb_ptr == NULL) { 12403 /* 12404 * We need to allocate memory but cant. The INP and INP_INFO 12405 * locks and they are recursive (happens during setup. So a 12406 * scheme to drop the locks fails :( 12407 * 12408 */ 12409 return (ENOMEM); 12410 } 12411 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12412 12413 rack = (struct tcp_rack *)tp->t_fb_ptr; 12414 RB_INIT(&rack->r_ctl.rc_mtree); 12415 TAILQ_INIT(&rack->r_ctl.rc_free); 12416 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12417 rack->rc_tp = tp; 12418 rack->rc_inp = tp->t_inpcb; 12419 /* Set the flag */ 12420 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12421 /* Probably not needed but lets be sure */ 12422 rack_clear_rate_sample(rack); 12423 /* 12424 * Save off the default values, socket options will poke 12425 * at these if pacing is not on or we have not yet 12426 * reached where pacing is on (gp_ready/fixed enabled). 12427 * When they get set into the CC module (when gp_ready 12428 * is enabled or we enable fixed) then we will set these 12429 * values into the CC and place in here the old values 12430 * so we have a restoral. Then we will set the flag 12431 * rc_pacing_cc_set. That way whenever we turn off pacing 12432 * or switch off this stack, we will know to go restore 12433 * the saved values. 12434 */ 12435 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12436 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12437 /* We want abe like behavior as well */ 12438 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12439 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12440 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12441 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12442 rack->r_ctl.roundends = tp->snd_max; 12443 if (use_rack_rr) 12444 rack->use_rack_rr = 1; 12445 if (V_tcp_delack_enabled) 12446 tp->t_delayed_ack = 1; 12447 else 12448 tp->t_delayed_ack = 0; 12449 #ifdef TCP_ACCOUNTING 12450 if (rack_tcp_accounting) { 12451 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12452 } 12453 #endif 12454 if (rack_enable_shared_cwnd) 12455 rack->rack_enable_scwnd = 1; 12456 rack->rc_user_set_max_segs = rack_hptsi_segments; 12457 rack->rc_force_max_seg = 0; 12458 if (rack_use_imac_dack) 12459 rack->rc_dack_mode = 1; 12460 TAILQ_INIT(&rack->r_ctl.opt_list); 12461 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12462 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12463 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12464 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12465 rack->r_ctl.rc_highest_us_rtt = 0; 12466 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12467 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12468 if (rack_use_cmp_acks) 12469 rack->r_use_cmp_ack = 1; 12470 if (rack_disable_prr) 12471 rack->rack_no_prr = 1; 12472 if (rack_gp_no_rec_chg) 12473 rack->rc_gp_no_rec_chg = 1; 12474 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12475 rack->rc_always_pace = 1; 12476 if (rack->use_fixed_rate || rack->gp_ready) 12477 rack_set_cc_pacing(rack); 12478 } else 12479 rack->rc_always_pace = 0; 12480 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12481 rack->r_mbuf_queue = 1; 12482 else 12483 rack->r_mbuf_queue = 0; 12484 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12485 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12486 else 12487 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12488 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12489 if (rack_limits_scwnd) 12490 rack->r_limit_scw = 1; 12491 else 12492 rack->r_limit_scw = 0; 12493 rack->rc_labc = V_tcp_abc_l_var; 12494 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12495 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12496 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12497 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12498 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12499 rack->r_ctl.rc_min_to = rack_min_to; 12500 microuptime(&rack->r_ctl.act_rcv_time); 12501 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12502 rack->rc_init_win = rack_default_init_window; 12503 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12504 if (rack_hw_up_only) 12505 rack->r_up_only = 1; 12506 if (rack_do_dyn_mul) { 12507 /* When dynamic adjustment is on CA needs to start at 100% */ 12508 rack->rc_gp_dyn_mul = 1; 12509 if (rack_do_dyn_mul >= 100) 12510 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12511 } else 12512 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12513 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12514 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12515 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12516 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12517 rack_probertt_filter_life); 12518 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12519 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12520 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12521 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12522 rack->r_ctl.rc_time_probertt_starts = 0; 12523 if (rack_dsack_std_based & 0x1) { 12524 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12525 rack->rc_rack_tmr_std_based = 1; 12526 } 12527 if (rack_dsack_std_based & 0x2) { 12528 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12529 rack->rc_rack_use_dsack = 1; 12530 } 12531 /* We require at least one measurement, even if the sysctl is 0 */ 12532 if (rack_req_measurements) 12533 rack->r_ctl.req_measurements = rack_req_measurements; 12534 else 12535 rack->r_ctl.req_measurements = 1; 12536 if (rack_enable_hw_pacing) 12537 rack->rack_hdw_pace_ena = 1; 12538 if (rack_hw_rate_caps) 12539 rack->r_rack_hw_rate_caps = 1; 12540 /* Do we force on detection? */ 12541 #ifdef NETFLIX_EXP_DETECTION 12542 if (tcp_force_detection) 12543 rack->do_detection = 1; 12544 else 12545 #endif 12546 rack->do_detection = 0; 12547 if (rack_non_rxt_use_cr) 12548 rack->rack_rec_nonrxt_use_cr = 1; 12549 err = rack_init_fsb(tp, rack); 12550 if (err) { 12551 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12552 tp->t_fb_ptr = NULL; 12553 return (err); 12554 } 12555 if (tp->snd_una != tp->snd_max) { 12556 /* Create a send map for the current outstanding data */ 12557 struct rack_sendmap *rsm; 12558 12559 rsm = rack_alloc(rack); 12560 if (rsm == NULL) { 12561 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12562 tp->t_fb_ptr = NULL; 12563 return (ENOMEM); 12564 } 12565 rsm->r_no_rtt_allowed = 1; 12566 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12567 rsm->r_rtr_cnt = 1; 12568 rsm->r_rtr_bytes = 0; 12569 if (tp->t_flags & TF_SENTFIN) { 12570 rsm->r_end = tp->snd_max - 1; 12571 rsm->r_flags |= RACK_HAS_FIN; 12572 } else { 12573 rsm->r_end = tp->snd_max; 12574 } 12575 if (tp->snd_una == tp->iss) { 12576 /* The data space is one beyond snd_una */ 12577 rsm->r_flags |= RACK_HAS_SYN; 12578 rsm->r_start = tp->iss; 12579 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12580 } else 12581 rsm->r_start = tp->snd_una; 12582 rsm->r_dupack = 0; 12583 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12584 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12585 if (rsm->m) 12586 rsm->orig_m_len = rsm->m->m_len; 12587 else 12588 rsm->orig_m_len = 0; 12589 } else { 12590 /* 12591 * This can happen if we have a stand-alone FIN or 12592 * SYN. 12593 */ 12594 rsm->m = NULL; 12595 rsm->orig_m_len = 0; 12596 rsm->soff = 0; 12597 } 12598 #ifndef INVARIANTS 12599 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12600 #else 12601 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12602 if (insret != NULL) { 12603 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12604 insret, rack, rsm); 12605 } 12606 #endif 12607 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12608 rsm->r_in_tmap = 1; 12609 } 12610 /* 12611 * Timers in Rack are kept in microseconds so lets 12612 * convert any initial incoming variables 12613 * from ticks into usecs. Note that we 12614 * also change the values of t_srtt and t_rttvar, if 12615 * they are non-zero. They are kept with a 5 12616 * bit decimal so we have to carefully convert 12617 * these to get the full precision. 12618 */ 12619 rack_convert_rtts(tp); 12620 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12621 if (rack_do_hystart) { 12622 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 12623 if (rack_do_hystart > 1) 12624 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 12625 if (rack_do_hystart > 2) 12626 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 12627 } 12628 if (rack_def_profile) 12629 rack_set_profile(rack, rack_def_profile); 12630 /* Cancel the GP measurement in progress */ 12631 tp->t_flags &= ~TF_GPUTINPROG; 12632 if (SEQ_GT(tp->snd_max, tp->iss)) 12633 snt = tp->snd_max - tp->iss; 12634 else 12635 snt = 0; 12636 iwin = rc_init_window(rack); 12637 if (snt < iwin) { 12638 /* We are not past the initial window 12639 * so we need to make sure cwnd is 12640 * correct. 12641 */ 12642 if (tp->snd_cwnd < iwin) 12643 tp->snd_cwnd = iwin; 12644 /* 12645 * If we are within the initial window 12646 * we want ssthresh to be unlimited. Setting 12647 * it to the rwnd (which the default stack does 12648 * and older racks) is not really a good idea 12649 * since we want to be in SS and grow both the 12650 * cwnd and the rwnd (via dynamic rwnd growth). If 12651 * we set it to the rwnd then as the peer grows its 12652 * rwnd we will be stuck in CA and never hit SS. 12653 * 12654 * Its far better to raise it up high (this takes the 12655 * risk that there as been a loss already, probably 12656 * we should have an indicator in all stacks of loss 12657 * but we don't), but considering the normal use this 12658 * is a risk worth taking. The consequences of not 12659 * hitting SS are far worse than going one more time 12660 * into it early on (before we have sent even a IW). 12661 * It is highly unlikely that we will have had a loss 12662 * before getting the IW out. 12663 */ 12664 tp->snd_ssthresh = 0xffffffff; 12665 } 12666 rack_stop_all_timers(tp); 12667 /* Lets setup the fsb block */ 12668 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12669 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12670 __LINE__, RACK_RTTS_INIT); 12671 return (0); 12672 } 12673 12674 static int 12675 rack_handoff_ok(struct tcpcb *tp) 12676 { 12677 if ((tp->t_state == TCPS_CLOSED) || 12678 (tp->t_state == TCPS_LISTEN)) { 12679 /* Sure no problem though it may not stick */ 12680 return (0); 12681 } 12682 if ((tp->t_state == TCPS_SYN_SENT) || 12683 (tp->t_state == TCPS_SYN_RECEIVED)) { 12684 /* 12685 * We really don't know if you support sack, 12686 * you have to get to ESTAB or beyond to tell. 12687 */ 12688 return (EAGAIN); 12689 } 12690 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12691 /* 12692 * Rack will only send a FIN after all data is acknowledged. 12693 * So in this case we have more data outstanding. We can't 12694 * switch stacks until either all data and only the FIN 12695 * is left (in which case rack_init() now knows how 12696 * to deal with that) <or> all is acknowledged and we 12697 * are only left with incoming data, though why you 12698 * would want to switch to rack after all data is acknowledged 12699 * I have no idea (rrs)! 12700 */ 12701 return (EAGAIN); 12702 } 12703 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12704 return (0); 12705 } 12706 /* 12707 * If we reach here we don't do SACK on this connection so we can 12708 * never do rack. 12709 */ 12710 return (EINVAL); 12711 } 12712 12713 12714 static void 12715 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12716 { 12717 if (tp->t_fb_ptr) { 12718 struct tcp_rack *rack; 12719 struct rack_sendmap *rsm, *nrsm; 12720 #ifdef INVARIANTS 12721 struct rack_sendmap *rm; 12722 #endif 12723 12724 rack = (struct tcp_rack *)tp->t_fb_ptr; 12725 if (tp->t_in_pkt) { 12726 /* 12727 * It is unsafe to process the packets since a 12728 * reset may be lurking in them (its rare but it 12729 * can occur). If we were to find a RST, then we 12730 * would end up dropping the connection and the 12731 * INP lock, so when we return the caller (tcp_usrreq) 12732 * will blow up when it trys to unlock the inp. 12733 */ 12734 struct mbuf *save, *m; 12735 12736 m = tp->t_in_pkt; 12737 tp->t_in_pkt = NULL; 12738 tp->t_tail_pkt = NULL; 12739 while (m) { 12740 save = m->m_nextpkt; 12741 m->m_nextpkt = NULL; 12742 m_freem(m); 12743 m = save; 12744 } 12745 } 12746 tp->t_flags &= ~TF_FORCEDATA; 12747 #ifdef NETFLIX_SHARED_CWND 12748 if (rack->r_ctl.rc_scw) { 12749 uint32_t limit; 12750 12751 if (rack->r_limit_scw) 12752 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12753 else 12754 limit = 0; 12755 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12756 rack->r_ctl.rc_scw_index, 12757 limit); 12758 rack->r_ctl.rc_scw = NULL; 12759 } 12760 #endif 12761 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12762 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12763 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12764 rack->r_ctl.fsb.th = NULL; 12765 } 12766 /* Convert back to ticks, with */ 12767 if (tp->t_srtt > 1) { 12768 uint32_t val, frac; 12769 12770 val = USEC_2_TICKS(tp->t_srtt); 12771 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12772 tp->t_srtt = val << TCP_RTT_SHIFT; 12773 /* 12774 * frac is the fractional part here is left 12775 * over from converting to hz and shifting. 12776 * We need to convert this to the 5 bit 12777 * remainder. 12778 */ 12779 if (frac) { 12780 if (hz == 1000) { 12781 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12782 } else { 12783 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12784 } 12785 tp->t_srtt += frac; 12786 } 12787 } 12788 if (tp->t_rttvar) { 12789 uint32_t val, frac; 12790 12791 val = USEC_2_TICKS(tp->t_rttvar); 12792 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12793 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12794 /* 12795 * frac is the fractional part here is left 12796 * over from converting to hz and shifting. 12797 * We need to convert this to the 5 bit 12798 * remainder. 12799 */ 12800 if (frac) { 12801 if (hz == 1000) { 12802 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12803 } else { 12804 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12805 } 12806 tp->t_rttvar += frac; 12807 } 12808 } 12809 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12810 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12811 if (rack->rc_always_pace) { 12812 tcp_decrement_paced_conn(); 12813 rack_undo_cc_pacing(rack); 12814 rack->rc_always_pace = 0; 12815 } 12816 /* Clean up any options if they were not applied */ 12817 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12818 struct deferred_opt_list *dol; 12819 12820 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12821 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12822 free(dol, M_TCPDO); 12823 } 12824 /* rack does not use force data but other stacks may clear it */ 12825 if (rack->r_ctl.crte != NULL) { 12826 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12827 rack->rack_hdrw_pacing = 0; 12828 rack->r_ctl.crte = NULL; 12829 } 12830 #ifdef TCP_BLACKBOX 12831 tcp_log_flowend(tp); 12832 #endif 12833 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12834 #ifndef INVARIANTS 12835 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12836 #else 12837 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12838 if (rm != rsm) { 12839 panic("At fini, rack:%p rsm:%p rm:%p", 12840 rack, rsm, rm); 12841 } 12842 #endif 12843 uma_zfree(rack_zone, rsm); 12844 } 12845 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12846 while (rsm) { 12847 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12848 uma_zfree(rack_zone, rsm); 12849 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12850 } 12851 rack->rc_free_cnt = 0; 12852 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12853 tp->t_fb_ptr = NULL; 12854 } 12855 if (tp->t_inpcb) { 12856 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12857 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12858 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12859 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 12860 /* Cancel the GP measurement in progress */ 12861 tp->t_flags &= ~TF_GPUTINPROG; 12862 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 12863 } 12864 /* Make sure snd_nxt is correctly set */ 12865 tp->snd_nxt = tp->snd_max; 12866 } 12867 12868 static void 12869 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12870 { 12871 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12872 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12873 } 12874 switch (tp->t_state) { 12875 case TCPS_SYN_SENT: 12876 rack->r_state = TCPS_SYN_SENT; 12877 rack->r_substate = rack_do_syn_sent; 12878 break; 12879 case TCPS_SYN_RECEIVED: 12880 rack->r_state = TCPS_SYN_RECEIVED; 12881 rack->r_substate = rack_do_syn_recv; 12882 break; 12883 case TCPS_ESTABLISHED: 12884 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12885 rack->r_state = TCPS_ESTABLISHED; 12886 rack->r_substate = rack_do_established; 12887 break; 12888 case TCPS_CLOSE_WAIT: 12889 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12890 rack->r_state = TCPS_CLOSE_WAIT; 12891 rack->r_substate = rack_do_close_wait; 12892 break; 12893 case TCPS_FIN_WAIT_1: 12894 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12895 rack->r_state = TCPS_FIN_WAIT_1; 12896 rack->r_substate = rack_do_fin_wait_1; 12897 break; 12898 case TCPS_CLOSING: 12899 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12900 rack->r_state = TCPS_CLOSING; 12901 rack->r_substate = rack_do_closing; 12902 break; 12903 case TCPS_LAST_ACK: 12904 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12905 rack->r_state = TCPS_LAST_ACK; 12906 rack->r_substate = rack_do_lastack; 12907 break; 12908 case TCPS_FIN_WAIT_2: 12909 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12910 rack->r_state = TCPS_FIN_WAIT_2; 12911 rack->r_substate = rack_do_fin_wait_2; 12912 break; 12913 case TCPS_LISTEN: 12914 case TCPS_CLOSED: 12915 case TCPS_TIME_WAIT: 12916 default: 12917 break; 12918 }; 12919 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 12920 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 12921 12922 } 12923 12924 static void 12925 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 12926 { 12927 /* 12928 * We received an ack, and then did not 12929 * call send or were bounced out due to the 12930 * hpts was running. Now a timer is up as well, is 12931 * it the right timer? 12932 */ 12933 struct rack_sendmap *rsm; 12934 int tmr_up; 12935 12936 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 12937 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 12938 return; 12939 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12940 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 12941 (tmr_up == PACE_TMR_RXT)) { 12942 /* Should be an RXT */ 12943 return; 12944 } 12945 if (rsm == NULL) { 12946 /* Nothing outstanding? */ 12947 if (tp->t_flags & TF_DELACK) { 12948 if (tmr_up == PACE_TMR_DELACK) 12949 /* We are supposed to have delayed ack up and we do */ 12950 return; 12951 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 12952 /* 12953 * if we hit enobufs then we would expect the possiblity 12954 * of nothing outstanding and the RXT up (and the hptsi timer). 12955 */ 12956 return; 12957 } else if (((V_tcp_always_keepalive || 12958 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 12959 (tp->t_state <= TCPS_CLOSING)) && 12960 (tmr_up == PACE_TMR_KEEP) && 12961 (tp->snd_max == tp->snd_una)) { 12962 /* We should have keep alive up and we do */ 12963 return; 12964 } 12965 } 12966 if (SEQ_GT(tp->snd_max, tp->snd_una) && 12967 ((tmr_up == PACE_TMR_TLP) || 12968 (tmr_up == PACE_TMR_RACK) || 12969 (tmr_up == PACE_TMR_RXT))) { 12970 /* 12971 * Either a Rack, TLP or RXT is fine if we 12972 * have outstanding data. 12973 */ 12974 return; 12975 } else if (tmr_up == PACE_TMR_DELACK) { 12976 /* 12977 * If the delayed ack was going to go off 12978 * before the rtx/tlp/rack timer were going to 12979 * expire, then that would be the timer in control. 12980 * Note we don't check the time here trusting the 12981 * code is correct. 12982 */ 12983 return; 12984 } 12985 /* 12986 * Ok the timer originally started is not what we want now. 12987 * We will force the hpts to be stopped if any, and restart 12988 * with the slot set to what was in the saved slot. 12989 */ 12990 if (tcp_in_hpts(rack->rc_inp)) { 12991 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 12992 uint32_t us_cts; 12993 12994 us_cts = tcp_get_usecs(NULL); 12995 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 12996 rack->r_early = 1; 12997 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 12998 } 12999 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13000 } 13001 tcp_hpts_remove(tp->t_inpcb); 13002 } 13003 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13004 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13005 } 13006 13007 13008 static void 13009 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13010 { 13011 if ((SEQ_LT(tp->snd_wl1, seq) || 13012 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13013 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13014 /* keep track of pure window updates */ 13015 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13016 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13017 tp->snd_wnd = tiwin; 13018 rack_validate_fo_sendwin_up(tp, rack); 13019 tp->snd_wl1 = seq; 13020 tp->snd_wl2 = ack; 13021 if (tp->snd_wnd > tp->max_sndwnd) 13022 tp->max_sndwnd = tp->snd_wnd; 13023 rack->r_wanted_output = 1; 13024 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13025 tp->snd_wnd = tiwin; 13026 rack_validate_fo_sendwin_up(tp, rack); 13027 tp->snd_wl1 = seq; 13028 tp->snd_wl2 = ack; 13029 } else { 13030 /* Not a valid win update */ 13031 return; 13032 } 13033 if (tp->snd_wnd > tp->max_sndwnd) 13034 tp->max_sndwnd = tp->snd_wnd; 13035 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 13036 /* The peer collapsed the window */ 13037 rack_collapsed_window(rack); 13038 } else if (rack->rc_has_collapsed) 13039 rack_un_collapse_window(rack); 13040 /* Do we exit persists? */ 13041 if ((rack->rc_in_persist != 0) && 13042 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13043 rack->r_ctl.rc_pace_min_segs))) { 13044 rack_exit_persist(tp, rack, cts); 13045 } 13046 /* Do we enter persists? */ 13047 if ((rack->rc_in_persist == 0) && 13048 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13049 TCPS_HAVEESTABLISHED(tp->t_state) && 13050 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13051 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13052 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13053 /* 13054 * Here the rwnd is less than 13055 * the pacing size, we are established, 13056 * nothing is outstanding, and there is 13057 * data to send. Enter persists. 13058 */ 13059 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13060 } 13061 } 13062 13063 static void 13064 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13065 { 13066 13067 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13068 union tcp_log_stackspecific log; 13069 struct timeval ltv; 13070 char tcp_hdr_buf[60]; 13071 struct tcphdr *th; 13072 struct timespec ts; 13073 uint32_t orig_snd_una; 13074 uint8_t xx = 0; 13075 13076 #ifdef NETFLIX_HTTP_LOGGING 13077 struct http_sendfile_track *http_req; 13078 13079 if (SEQ_GT(ae->ack, tp->snd_una)) { 13080 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13081 } else { 13082 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13083 } 13084 #endif 13085 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13086 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13087 if (rack->rack_no_prr == 0) 13088 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13089 else 13090 log.u_bbr.flex1 = 0; 13091 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13092 log.u_bbr.use_lt_bw <<= 1; 13093 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13094 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13095 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13096 log.u_bbr.pkts_out = tp->t_maxseg; 13097 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13098 log.u_bbr.flex7 = 1; 13099 log.u_bbr.lost = ae->flags; 13100 log.u_bbr.cwnd_gain = ackval; 13101 log.u_bbr.pacing_gain = 0x2; 13102 if (ae->flags & TSTMP_HDWR) { 13103 /* Record the hardware timestamp if present */ 13104 log.u_bbr.flex3 = M_TSTMP; 13105 ts.tv_sec = ae->timestamp / 1000000000; 13106 ts.tv_nsec = ae->timestamp % 1000000000; 13107 ltv.tv_sec = ts.tv_sec; 13108 ltv.tv_usec = ts.tv_nsec / 1000; 13109 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13110 } else if (ae->flags & TSTMP_LRO) { 13111 /* Record the LRO the arrival timestamp */ 13112 log.u_bbr.flex3 = M_TSTMP_LRO; 13113 ts.tv_sec = ae->timestamp / 1000000000; 13114 ts.tv_nsec = ae->timestamp % 1000000000; 13115 ltv.tv_sec = ts.tv_sec; 13116 ltv.tv_usec = ts.tv_nsec / 1000; 13117 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13118 } 13119 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13120 /* Log the rcv time */ 13121 log.u_bbr.delRate = ae->timestamp; 13122 #ifdef NETFLIX_HTTP_LOGGING 13123 log.u_bbr.applimited = tp->t_http_closed; 13124 log.u_bbr.applimited <<= 8; 13125 log.u_bbr.applimited |= tp->t_http_open; 13126 log.u_bbr.applimited <<= 8; 13127 log.u_bbr.applimited |= tp->t_http_req; 13128 if (http_req) { 13129 /* Copy out any client req info */ 13130 /* seconds */ 13131 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13132 /* useconds */ 13133 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13134 log.u_bbr.rttProp = http_req->timestamp; 13135 log.u_bbr.cur_del_rate = http_req->start; 13136 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13137 log.u_bbr.flex8 |= 1; 13138 } else { 13139 log.u_bbr.flex8 |= 2; 13140 log.u_bbr.bw_inuse = http_req->end; 13141 } 13142 log.u_bbr.flex6 = http_req->start_seq; 13143 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13144 log.u_bbr.flex8 |= 4; 13145 log.u_bbr.epoch = http_req->end_seq; 13146 } 13147 } 13148 #endif 13149 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13150 th = (struct tcphdr *)tcp_hdr_buf; 13151 th->th_seq = ae->seq; 13152 th->th_ack = ae->ack; 13153 th->th_win = ae->win; 13154 /* Now fill in the ports */ 13155 th->th_sport = tp->t_inpcb->inp_fport; 13156 th->th_dport = tp->t_inpcb->inp_lport; 13157 tcp_set_flags(th, ae->flags); 13158 /* Now do we have a timestamp option? */ 13159 if (ae->flags & HAS_TSTMP) { 13160 u_char *cp; 13161 uint32_t val; 13162 13163 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13164 cp = (u_char *)(th + 1); 13165 *cp = TCPOPT_NOP; 13166 cp++; 13167 *cp = TCPOPT_NOP; 13168 cp++; 13169 *cp = TCPOPT_TIMESTAMP; 13170 cp++; 13171 *cp = TCPOLEN_TIMESTAMP; 13172 cp++; 13173 val = htonl(ae->ts_value); 13174 bcopy((char *)&val, 13175 (char *)cp, sizeof(uint32_t)); 13176 val = htonl(ae->ts_echo); 13177 bcopy((char *)&val, 13178 (char *)(cp + 4), sizeof(uint32_t)); 13179 } else 13180 th->th_off = (sizeof(struct tcphdr) >> 2); 13181 13182 /* 13183 * For sane logging we need to play a little trick. 13184 * If the ack were fully processed we would have moved 13185 * snd_una to high_seq, but since compressed acks are 13186 * processed in two phases, at this point (logging) snd_una 13187 * won't be advanced. So we would see multiple acks showing 13188 * the advancement. We can prevent that by "pretending" that 13189 * snd_una was advanced and then un-advancing it so that the 13190 * logging code has the right value for tlb_snd_una. 13191 */ 13192 if (tp->snd_una != high_seq) { 13193 orig_snd_una = tp->snd_una; 13194 tp->snd_una = high_seq; 13195 xx = 1; 13196 } else 13197 xx = 0; 13198 TCP_LOG_EVENTP(tp, th, 13199 &tp->t_inpcb->inp_socket->so_rcv, 13200 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13201 0, &log, true, <v); 13202 if (xx) { 13203 tp->snd_una = orig_snd_una; 13204 } 13205 } 13206 13207 } 13208 13209 static void 13210 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13211 { 13212 uint32_t us_rtt; 13213 /* 13214 * A persist or keep-alive was forced out, update our 13215 * min rtt time. Note now worry about lost responses. 13216 * When a subsequent keep-alive or persist times out 13217 * and forced_ack is still on, then the last probe 13218 * was not responded to. In such cases we have a 13219 * sysctl that controls the behavior. Either we apply 13220 * the rtt but with reduced confidence (0). Or we just 13221 * plain don't apply the rtt estimate. Having data flow 13222 * will clear the probe_not_answered flag i.e. cum-ack 13223 * move forward <or> exiting and reentering persists. 13224 */ 13225 13226 rack->forced_ack = 0; 13227 rack->rc_tp->t_rxtshift = 0; 13228 if ((rack->rc_in_persist && 13229 (tiwin == rack->rc_tp->snd_wnd)) || 13230 (rack->rc_in_persist == 0)) { 13231 /* 13232 * In persists only apply the RTT update if this is 13233 * a response to our window probe. And that 13234 * means the rwnd sent must match the current 13235 * snd_wnd. If it does not, then we got a 13236 * window update ack instead. For keepalive 13237 * we allow the answer no matter what the window. 13238 * 13239 * Note that if the probe_not_answered is set then 13240 * the forced_ack_ts is the oldest one i.e. the first 13241 * probe sent that might have been lost. This assures 13242 * us that if we do calculate an RTT it is longer not 13243 * some short thing. 13244 */ 13245 if (rack->rc_in_persist) 13246 counter_u64_add(rack_persists_acks, 1); 13247 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13248 if (us_rtt == 0) 13249 us_rtt = 1; 13250 if (rack->probe_not_answered == 0) { 13251 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13252 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13253 } else { 13254 /* We have a retransmitted probe here too */ 13255 if (rack_apply_rtt_with_reduced_conf) { 13256 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13257 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13258 } 13259 } 13260 } 13261 } 13262 13263 static int 13264 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13265 { 13266 /* 13267 * Handle a "special" compressed ack mbuf. Each incoming 13268 * ack has only four possible dispositions: 13269 * 13270 * A) It moves the cum-ack forward 13271 * B) It is behind the cum-ack. 13272 * C) It is a window-update ack. 13273 * D) It is a dup-ack. 13274 * 13275 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13276 * in the incoming mbuf. We also need to still pay attention 13277 * to nxt_pkt since there may be another packet after this 13278 * one. 13279 */ 13280 #ifdef TCP_ACCOUNTING 13281 uint64_t ts_val; 13282 uint64_t rdstc; 13283 #endif 13284 int segsiz; 13285 struct timespec ts; 13286 struct tcp_rack *rack; 13287 struct tcp_ackent *ae; 13288 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13289 int cnt, i, did_out, ourfinisacked = 0; 13290 struct tcpopt to_holder, *to = NULL; 13291 #ifdef TCP_ACCOUNTING 13292 int win_up_req = 0; 13293 #endif 13294 int nsegs = 0; 13295 int under_pacing = 1; 13296 int recovery = 0; 13297 #ifdef TCP_ACCOUNTING 13298 sched_pin(); 13299 #endif 13300 rack = (struct tcp_rack *)tp->t_fb_ptr; 13301 if (rack->gp_ready && 13302 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13303 under_pacing = 0; 13304 else 13305 under_pacing = 1; 13306 13307 if (rack->r_state != tp->t_state) 13308 rack_set_state(tp, rack); 13309 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13310 (tp->t_flags & TF_GPUTINPROG)) { 13311 /* 13312 * We have a goodput in progress 13313 * and we have entered a late state. 13314 * Do we have enough data in the sb 13315 * to handle the GPUT request? 13316 */ 13317 uint32_t bytes; 13318 13319 bytes = tp->gput_ack - tp->gput_seq; 13320 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13321 bytes += tp->gput_seq - tp->snd_una; 13322 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13323 /* 13324 * There are not enough bytes in the socket 13325 * buffer that have been sent to cover this 13326 * measurement. Cancel it. 13327 */ 13328 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13329 rack->r_ctl.rc_gp_srtt /*flex1*/, 13330 tp->gput_seq, 13331 0, 0, 18, __LINE__, NULL, 0); 13332 tp->t_flags &= ~TF_GPUTINPROG; 13333 } 13334 } 13335 to = &to_holder; 13336 to->to_flags = 0; 13337 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13338 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13339 cnt = m->m_len / sizeof(struct tcp_ackent); 13340 counter_u64_add(rack_multi_single_eq, cnt); 13341 high_seq = tp->snd_una; 13342 the_win = tp->snd_wnd; 13343 win_seq = tp->snd_wl1; 13344 win_upd_ack = tp->snd_wl2; 13345 cts = tcp_tv_to_usectick(tv); 13346 ms_cts = tcp_tv_to_mssectick(tv); 13347 rack->r_ctl.rc_rcvtime = cts; 13348 segsiz = ctf_fixed_maxseg(tp); 13349 if ((rack->rc_gp_dyn_mul) && 13350 (rack->use_fixed_rate == 0) && 13351 (rack->rc_always_pace)) { 13352 /* Check in on probertt */ 13353 rack_check_probe_rtt(rack, cts); 13354 } 13355 for (i = 0; i < cnt; i++) { 13356 #ifdef TCP_ACCOUNTING 13357 ts_val = get_cyclecount(); 13358 #endif 13359 rack_clear_rate_sample(rack); 13360 ae = ((mtod(m, struct tcp_ackent *)) + i); 13361 /* Setup the window */ 13362 tiwin = ae->win << tp->snd_scale; 13363 if (tiwin > rack->r_ctl.rc_high_rwnd) 13364 rack->r_ctl.rc_high_rwnd = tiwin; 13365 /* figure out the type of ack */ 13366 if (SEQ_LT(ae->ack, high_seq)) { 13367 /* Case B*/ 13368 ae->ack_val_set = ACK_BEHIND; 13369 } else if (SEQ_GT(ae->ack, high_seq)) { 13370 /* Case A */ 13371 ae->ack_val_set = ACK_CUMACK; 13372 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13373 /* Case D */ 13374 ae->ack_val_set = ACK_DUPACK; 13375 } else { 13376 /* Case C */ 13377 ae->ack_val_set = ACK_RWND; 13378 } 13379 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13380 /* Validate timestamp */ 13381 if (ae->flags & HAS_TSTMP) { 13382 /* Setup for a timestamp */ 13383 to->to_flags = TOF_TS; 13384 ae->ts_echo -= tp->ts_offset; 13385 to->to_tsecr = ae->ts_echo; 13386 to->to_tsval = ae->ts_value; 13387 /* 13388 * If echoed timestamp is later than the current time, fall back to 13389 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13390 * were used when this connection was established. 13391 */ 13392 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13393 to->to_tsecr = 0; 13394 if (tp->ts_recent && 13395 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13396 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13397 #ifdef TCP_ACCOUNTING 13398 rdstc = get_cyclecount(); 13399 if (rdstc > ts_val) { 13400 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13401 (rdstc - ts_val)); 13402 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13403 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13404 } 13405 } 13406 #endif 13407 continue; 13408 } 13409 } 13410 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13411 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13412 tp->ts_recent_age = tcp_ts_getticks(); 13413 tp->ts_recent = ae->ts_value; 13414 } 13415 } else { 13416 /* Setup for a no options */ 13417 to->to_flags = 0; 13418 } 13419 /* Update the rcv time and perform idle reduction possibly */ 13420 if (tp->t_idle_reduce && 13421 (tp->snd_max == tp->snd_una) && 13422 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13423 counter_u64_add(rack_input_idle_reduces, 1); 13424 rack_cc_after_idle(rack, tp); 13425 } 13426 tp->t_rcvtime = ticks; 13427 /* Now what about ECN? */ 13428 if (tcp_ecn_input_segment(tp, ae->flags, ae->codepoint)) 13429 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 13430 #ifdef TCP_ACCOUNTING 13431 /* Count for the specific type of ack in */ 13432 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13433 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13434 tp->tcp_cnt_counters[ae->ack_val_set]++; 13435 } 13436 #endif 13437 /* 13438 * Note how we could move up these in the determination 13439 * above, but we don't so that way the timestamp checks (and ECN) 13440 * is done first before we do any processing on the ACK. 13441 * The non-compressed path through the code has this 13442 * weakness (noted by @jtl) that it actually does some 13443 * processing before verifying the timestamp information. 13444 * We don't take that path here which is why we set 13445 * the ack_val_set first, do the timestamp and ecn 13446 * processing, and then look at what we have setup. 13447 */ 13448 if (ae->ack_val_set == ACK_BEHIND) { 13449 /* 13450 * Case B flag reordering, if window is not closed 13451 * or it could be a keep-alive or persists 13452 */ 13453 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13454 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13455 } 13456 } else if (ae->ack_val_set == ACK_DUPACK) { 13457 /* Case D */ 13458 rack_strike_dupack(rack); 13459 } else if (ae->ack_val_set == ACK_RWND) { 13460 /* Case C */ 13461 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13462 ts.tv_sec = ae->timestamp / 1000000000; 13463 ts.tv_nsec = ae->timestamp % 1000000000; 13464 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13465 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13466 } else { 13467 rack->r_ctl.act_rcv_time = *tv; 13468 } 13469 if (rack->forced_ack) { 13470 rack_handle_probe_response(rack, tiwin, 13471 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13472 } 13473 #ifdef TCP_ACCOUNTING 13474 win_up_req = 1; 13475 #endif 13476 win_upd_ack = ae->ack; 13477 win_seq = ae->seq; 13478 the_win = tiwin; 13479 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13480 } else { 13481 /* Case A */ 13482 if (SEQ_GT(ae->ack, tp->snd_max)) { 13483 /* 13484 * We just send an ack since the incoming 13485 * ack is beyond the largest seq we sent. 13486 */ 13487 if ((tp->t_flags & TF_ACKNOW) == 0) { 13488 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13489 if (tp->t_flags && TF_ACKNOW) 13490 rack->r_wanted_output = 1; 13491 } 13492 } else { 13493 nsegs++; 13494 /* If the window changed setup to update */ 13495 if (tiwin != tp->snd_wnd) { 13496 win_upd_ack = ae->ack; 13497 win_seq = ae->seq; 13498 the_win = tiwin; 13499 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13500 } 13501 #ifdef TCP_ACCOUNTING 13502 /* Account for the acks */ 13503 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13504 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13505 } 13506 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13507 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13508 #endif 13509 high_seq = ae->ack; 13510 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13511 union tcp_log_stackspecific log; 13512 struct timeval tv; 13513 13514 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13515 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13516 log.u_bbr.flex1 = high_seq; 13517 log.u_bbr.flex2 = rack->r_ctl.roundends; 13518 log.u_bbr.flex3 = rack->r_ctl.current_round; 13519 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13520 log.u_bbr.flex8 = 8; 13521 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13522 0, &log, false, NULL, NULL, 0, &tv); 13523 } 13524 /* 13525 * The draft (v3) calls for us to use SEQ_GEQ, but that 13526 * causes issues when we are just going app limited. Lets 13527 * instead use SEQ_GT <or> where its equal but more data 13528 * is outstanding. 13529 */ 13530 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13531 ((high_seq == rack->r_ctl.roundends) && 13532 SEQ_GT(tp->snd_max, tp->snd_una))) { 13533 rack->r_ctl.current_round++; 13534 rack->r_ctl.roundends = tp->snd_max; 13535 if (CC_ALGO(tp)->newround != NULL) { 13536 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13537 } 13538 } 13539 /* Setup our act_rcv_time */ 13540 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13541 ts.tv_sec = ae->timestamp / 1000000000; 13542 ts.tv_nsec = ae->timestamp % 1000000000; 13543 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13544 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13545 } else { 13546 rack->r_ctl.act_rcv_time = *tv; 13547 } 13548 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13549 if (rack->rc_dsack_round_seen) { 13550 /* Is the dsack round over? */ 13551 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13552 /* Yes it is */ 13553 rack->rc_dsack_round_seen = 0; 13554 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13555 } 13556 } 13557 } 13558 } 13559 /* And lets be sure to commit the rtt measurements for this ack */ 13560 tcp_rack_xmit_timer_commit(rack, tp); 13561 #ifdef TCP_ACCOUNTING 13562 rdstc = get_cyclecount(); 13563 if (rdstc > ts_val) { 13564 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13565 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13566 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13567 if (ae->ack_val_set == ACK_CUMACK) 13568 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13569 } 13570 } 13571 #endif 13572 } 13573 #ifdef TCP_ACCOUNTING 13574 ts_val = get_cyclecount(); 13575 #endif 13576 acked_amount = acked = (high_seq - tp->snd_una); 13577 if (acked) { 13578 /* 13579 * Clear the probe not answered flag 13580 * since cum-ack moved forward. 13581 */ 13582 rack->probe_not_answered = 0; 13583 if (rack->sack_attack_disable == 0) 13584 rack_do_decay(rack); 13585 if (acked >= segsiz) { 13586 /* 13587 * You only get credit for 13588 * MSS and greater (and you get extra 13589 * credit for larger cum-ack moves). 13590 */ 13591 int ac; 13592 13593 ac = acked / segsiz; 13594 rack->r_ctl.ack_count += ac; 13595 counter_u64_add(rack_ack_total, ac); 13596 } 13597 if (rack->r_ctl.ack_count > 0xfff00000) { 13598 /* 13599 * reduce the number to keep us under 13600 * a uint32_t. 13601 */ 13602 rack->r_ctl.ack_count /= 2; 13603 rack->r_ctl.sack_count /= 2; 13604 } 13605 if (tp->t_flags & TF_NEEDSYN) { 13606 /* 13607 * T/TCP: Connection was half-synchronized, and our SYN has 13608 * been ACK'd (so connection is now fully synchronized). Go 13609 * to non-starred state, increment snd_una for ACK of SYN, 13610 * and check if we can do window scaling. 13611 */ 13612 tp->t_flags &= ~TF_NEEDSYN; 13613 tp->snd_una++; 13614 acked_amount = acked = (high_seq - tp->snd_una); 13615 } 13616 if (acked > sbavail(&so->so_snd)) 13617 acked_amount = sbavail(&so->so_snd); 13618 #ifdef NETFLIX_EXP_DETECTION 13619 /* 13620 * We only care on a cum-ack move if we are in a sack-disabled 13621 * state. We have already added in to the ack_count, and we never 13622 * would disable on a cum-ack move, so we only care to do the 13623 * detection if it may "undo" it, i.e. we were in disabled already. 13624 */ 13625 if (rack->sack_attack_disable) 13626 rack_do_detection(tp, rack, acked_amount, segsiz); 13627 #endif 13628 if (IN_FASTRECOVERY(tp->t_flags) && 13629 (rack->rack_no_prr == 0)) 13630 rack_update_prr(tp, rack, acked_amount, high_seq); 13631 if (IN_RECOVERY(tp->t_flags)) { 13632 if (SEQ_LT(high_seq, tp->snd_recover) && 13633 (SEQ_LT(high_seq, tp->snd_max))) { 13634 tcp_rack_partialack(tp); 13635 } else { 13636 rack_post_recovery(tp, high_seq); 13637 recovery = 1; 13638 } 13639 } 13640 /* Handle the rack-log-ack part (sendmap) */ 13641 if ((sbused(&so->so_snd) == 0) && 13642 (acked > acked_amount) && 13643 (tp->t_state >= TCPS_FIN_WAIT_1) && 13644 (tp->t_flags & TF_SENTFIN)) { 13645 /* 13646 * We must be sure our fin 13647 * was sent and acked (we can be 13648 * in FIN_WAIT_1 without having 13649 * sent the fin). 13650 */ 13651 ourfinisacked = 1; 13652 /* 13653 * Lets make sure snd_una is updated 13654 * since most likely acked_amount = 0 (it 13655 * should be). 13656 */ 13657 tp->snd_una = high_seq; 13658 } 13659 /* Did we make a RTO error? */ 13660 if ((tp->t_flags & TF_PREVVALID) && 13661 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13662 tp->t_flags &= ~TF_PREVVALID; 13663 if (tp->t_rxtshift == 1 && 13664 (int)(ticks - tp->t_badrxtwin) < 0) 13665 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 13666 } 13667 /* Handle the data in the socket buffer */ 13668 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13669 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13670 if (acked_amount > 0) { 13671 struct mbuf *mfree; 13672 13673 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13674 SOCKBUF_LOCK(&so->so_snd); 13675 mfree = sbcut_locked(&so->so_snd, acked_amount); 13676 tp->snd_una = high_seq; 13677 /* Note we want to hold the sb lock through the sendmap adjust */ 13678 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13679 /* Wake up the socket if we have room to write more */ 13680 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13681 sowwakeup_locked(so); 13682 m_freem(mfree); 13683 } 13684 /* update progress */ 13685 tp->t_acktime = ticks; 13686 rack_log_progress_event(rack, tp, tp->t_acktime, 13687 PROGRESS_UPDATE, __LINE__); 13688 /* Clear out shifts and such */ 13689 tp->t_rxtshift = 0; 13690 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13691 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13692 rack->rc_tlp_in_progress = 0; 13693 rack->r_ctl.rc_tlp_cnt_out = 0; 13694 /* Send recover and snd_nxt must be dragged along */ 13695 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13696 tp->snd_recover = tp->snd_una; 13697 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13698 tp->snd_nxt = tp->snd_una; 13699 /* 13700 * If the RXT timer is running we want to 13701 * stop it, so we can restart a TLP (or new RXT). 13702 */ 13703 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13704 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13705 #ifdef NETFLIX_HTTP_LOGGING 13706 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13707 #endif 13708 tp->snd_wl2 = high_seq; 13709 tp->t_dupacks = 0; 13710 if (under_pacing && 13711 (rack->use_fixed_rate == 0) && 13712 (rack->in_probe_rtt == 0) && 13713 rack->rc_gp_dyn_mul && 13714 rack->rc_always_pace) { 13715 /* Check if we are dragging bottom */ 13716 rack_check_bottom_drag(tp, rack, so, acked); 13717 } 13718 if (tp->snd_una == tp->snd_max) { 13719 tp->t_flags &= ~TF_PREVVALID; 13720 rack->r_ctl.retran_during_recovery = 0; 13721 rack->r_ctl.dsack_byte_cnt = 0; 13722 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13723 if (rack->r_ctl.rc_went_idle_time == 0) 13724 rack->r_ctl.rc_went_idle_time = 1; 13725 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13726 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13727 tp->t_acktime = 0; 13728 /* Set so we might enter persists... */ 13729 rack->r_wanted_output = 1; 13730 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13731 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13732 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13733 (sbavail(&so->so_snd) == 0) && 13734 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13735 /* 13736 * The socket was gone and the 13737 * peer sent data (not now in the past), time to 13738 * reset him. 13739 */ 13740 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13741 /* tcp_close will kill the inp pre-log the Reset */ 13742 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13743 #ifdef TCP_ACCOUNTING 13744 rdstc = get_cyclecount(); 13745 if (rdstc > ts_val) { 13746 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13747 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13748 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13749 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13750 } 13751 } 13752 #endif 13753 m_freem(m); 13754 tp = tcp_close(tp); 13755 if (tp == NULL) { 13756 #ifdef TCP_ACCOUNTING 13757 sched_unpin(); 13758 #endif 13759 return (1); 13760 } 13761 /* 13762 * We would normally do drop-with-reset which would 13763 * send back a reset. We can't since we don't have 13764 * all the needed bits. Instead lets arrange for 13765 * a call to tcp_output(). That way since we 13766 * are in the closed state we will generate a reset. 13767 * 13768 * Note if tcp_accounting is on we don't unpin since 13769 * we do that after the goto label. 13770 */ 13771 goto send_out_a_rst; 13772 } 13773 if ((sbused(&so->so_snd) == 0) && 13774 (tp->t_state >= TCPS_FIN_WAIT_1) && 13775 (tp->t_flags & TF_SENTFIN)) { 13776 /* 13777 * If we can't receive any more data, then closing user can 13778 * proceed. Starting the timer is contrary to the 13779 * specification, but if we don't get a FIN we'll hang 13780 * forever. 13781 * 13782 */ 13783 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13784 soisdisconnected(so); 13785 tcp_timer_activate(tp, TT_2MSL, 13786 (tcp_fast_finwait2_recycle ? 13787 tcp_finwait2_timeout : 13788 TP_MAXIDLE(tp))); 13789 } 13790 if (ourfinisacked == 0) { 13791 /* 13792 * We don't change to fin-wait-2 if we have our fin acked 13793 * which means we are probably in TCPS_CLOSING. 13794 */ 13795 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13796 } 13797 } 13798 } 13799 /* Wake up the socket if we have room to write more */ 13800 if (sbavail(&so->so_snd)) { 13801 rack->r_wanted_output = 1; 13802 if (ctf_progress_timeout_check(tp, true)) { 13803 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13804 tp, tick, PROGRESS_DROP, __LINE__); 13805 /* 13806 * We cheat here and don't send a RST, we should send one 13807 * when the pacer drops the connection. 13808 */ 13809 #ifdef TCP_ACCOUNTING 13810 rdstc = get_cyclecount(); 13811 if (rdstc > ts_val) { 13812 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13813 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13814 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13815 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13816 } 13817 } 13818 sched_unpin(); 13819 #endif 13820 (void)tcp_drop(tp, ETIMEDOUT); 13821 m_freem(m); 13822 return (1); 13823 } 13824 } 13825 if (ourfinisacked) { 13826 switch(tp->t_state) { 13827 case TCPS_CLOSING: 13828 #ifdef TCP_ACCOUNTING 13829 rdstc = get_cyclecount(); 13830 if (rdstc > ts_val) { 13831 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13832 (rdstc - ts_val)); 13833 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13834 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13835 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13836 } 13837 } 13838 sched_unpin(); 13839 #endif 13840 tcp_twstart(tp); 13841 m_freem(m); 13842 return (1); 13843 break; 13844 case TCPS_LAST_ACK: 13845 #ifdef TCP_ACCOUNTING 13846 rdstc = get_cyclecount(); 13847 if (rdstc > ts_val) { 13848 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13849 (rdstc - ts_val)); 13850 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13851 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13852 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13853 } 13854 } 13855 sched_unpin(); 13856 #endif 13857 tp = tcp_close(tp); 13858 ctf_do_drop(m, tp); 13859 return (1); 13860 break; 13861 case TCPS_FIN_WAIT_1: 13862 #ifdef TCP_ACCOUNTING 13863 rdstc = get_cyclecount(); 13864 if (rdstc > ts_val) { 13865 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13866 (rdstc - ts_val)); 13867 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13868 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13869 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13870 } 13871 } 13872 #endif 13873 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13874 soisdisconnected(so); 13875 tcp_timer_activate(tp, TT_2MSL, 13876 (tcp_fast_finwait2_recycle ? 13877 tcp_finwait2_timeout : 13878 TP_MAXIDLE(tp))); 13879 } 13880 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13881 break; 13882 default: 13883 break; 13884 } 13885 } 13886 if (rack->r_fast_output) { 13887 /* 13888 * We re doing fast output.. can we expand that? 13889 */ 13890 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13891 } 13892 #ifdef TCP_ACCOUNTING 13893 rdstc = get_cyclecount(); 13894 if (rdstc > ts_val) { 13895 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13896 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13897 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13898 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13899 } 13900 } 13901 13902 } else if (win_up_req) { 13903 rdstc = get_cyclecount(); 13904 if (rdstc > ts_val) { 13905 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 13906 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13907 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13908 } 13909 } 13910 #endif 13911 } 13912 /* Now is there a next packet, if so we are done */ 13913 m_freem(m); 13914 did_out = 0; 13915 if (nxt_pkt) { 13916 #ifdef TCP_ACCOUNTING 13917 sched_unpin(); 13918 #endif 13919 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13920 return (0); 13921 } 13922 rack_handle_might_revert(tp, rack); 13923 ctf_calc_rwin(so, tp); 13924 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13925 send_out_a_rst: 13926 if (tcp_output(tp) < 0) { 13927 #ifdef TCP_ACCOUNTING 13928 sched_unpin(); 13929 #endif 13930 return (1); 13931 } 13932 did_out = 1; 13933 } 13934 rack_free_trim(rack); 13935 #ifdef TCP_ACCOUNTING 13936 sched_unpin(); 13937 #endif 13938 rack_timer_audit(tp, rack, &so->so_snd); 13939 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13940 return (0); 13941 } 13942 13943 13944 static int 13945 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13946 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13947 int32_t nxt_pkt, struct timeval *tv) 13948 { 13949 #ifdef TCP_ACCOUNTING 13950 uint64_t ts_val; 13951 #endif 13952 int32_t thflags, retval, did_out = 0; 13953 int32_t way_out = 0; 13954 /* 13955 * cts - is the current time from tv (caller gets ts) in microseconds. 13956 * ms_cts - is the current time from tv in milliseconds. 13957 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 13958 */ 13959 uint32_t cts, us_cts, ms_cts; 13960 uint32_t tiwin, high_seq; 13961 struct timespec ts; 13962 struct tcpopt to; 13963 struct tcp_rack *rack; 13964 struct rack_sendmap *rsm; 13965 int32_t prev_state = 0; 13966 #ifdef TCP_ACCOUNTING 13967 int ack_val_set = 0xf; 13968 #endif 13969 int nsegs; 13970 /* 13971 * tv passed from common code is from either M_TSTMP_LRO or 13972 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 13973 */ 13974 rack = (struct tcp_rack *)tp->t_fb_ptr; 13975 if (m->m_flags & M_ACKCMP) { 13976 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 13977 } 13978 if (m->m_flags & M_ACKCMP) { 13979 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 13980 } 13981 cts = tcp_tv_to_usectick(tv); 13982 ms_cts = tcp_tv_to_mssectick(tv); 13983 nsegs = m->m_pkthdr.lro_nsegs; 13984 counter_u64_add(rack_proc_non_comp_ack, 1); 13985 thflags = tcp_get_flags(th); 13986 #ifdef TCP_ACCOUNTING 13987 sched_pin(); 13988 if (thflags & TH_ACK) 13989 ts_val = get_cyclecount(); 13990 #endif 13991 if ((m->m_flags & M_TSTMP) || 13992 (m->m_flags & M_TSTMP_LRO)) { 13993 mbuf_tstmp2timespec(m, &ts); 13994 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13995 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13996 } else 13997 rack->r_ctl.act_rcv_time = *tv; 13998 kern_prefetch(rack, &prev_state); 13999 prev_state = 0; 14000 /* 14001 * Unscale the window into a 32-bit value. For the SYN_SENT state 14002 * the scale is zero. 14003 */ 14004 tiwin = th->th_win << tp->snd_scale; 14005 #ifdef TCP_ACCOUNTING 14006 if (thflags & TH_ACK) { 14007 /* 14008 * We have a tradeoff here. We can either do what we are 14009 * doing i.e. pinning to this CPU and then doing the accounting 14010 * <or> we could do a critical enter, setup the rdtsc and cpu 14011 * as in below, and then validate we are on the same CPU on 14012 * exit. I have choosen to not do the critical enter since 14013 * that often will gain you a context switch, and instead lock 14014 * us (line above this if) to the same CPU with sched_pin(). This 14015 * means we may be context switched out for a higher priority 14016 * interupt but we won't be moved to another CPU. 14017 * 14018 * If this occurs (which it won't very often since we most likely 14019 * are running this code in interupt context and only a higher 14020 * priority will bump us ... clock?) we will falsely add in 14021 * to the time the interupt processing time plus the ack processing 14022 * time. This is ok since its a rare event. 14023 */ 14024 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14025 ctf_fixed_maxseg(tp)); 14026 } 14027 #endif 14028 /* 14029 * Parse options on any incoming segment. 14030 */ 14031 memset(&to, 0, sizeof(to)); 14032 tcp_dooptions(&to, (u_char *)(th + 1), 14033 (th->th_off << 2) - sizeof(struct tcphdr), 14034 (thflags & TH_SYN) ? TO_SYN : 0); 14035 NET_EPOCH_ASSERT(); 14036 INP_WLOCK_ASSERT(tp->t_inpcb); 14037 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14038 __func__)); 14039 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14040 __func__)); 14041 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14042 (tp->t_flags & TF_GPUTINPROG)) { 14043 /* 14044 * We have a goodput in progress 14045 * and we have entered a late state. 14046 * Do we have enough data in the sb 14047 * to handle the GPUT request? 14048 */ 14049 uint32_t bytes; 14050 14051 bytes = tp->gput_ack - tp->gput_seq; 14052 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14053 bytes += tp->gput_seq - tp->snd_una; 14054 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14055 /* 14056 * There are not enough bytes in the socket 14057 * buffer that have been sent to cover this 14058 * measurement. Cancel it. 14059 */ 14060 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14061 rack->r_ctl.rc_gp_srtt /*flex1*/, 14062 tp->gput_seq, 14063 0, 0, 18, __LINE__, NULL, 0); 14064 tp->t_flags &= ~TF_GPUTINPROG; 14065 } 14066 } 14067 high_seq = th->th_ack; 14068 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14069 union tcp_log_stackspecific log; 14070 struct timeval ltv; 14071 #ifdef NETFLIX_HTTP_LOGGING 14072 struct http_sendfile_track *http_req; 14073 14074 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14075 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14076 } else { 14077 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14078 } 14079 #endif 14080 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14081 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14082 if (rack->rack_no_prr == 0) 14083 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14084 else 14085 log.u_bbr.flex1 = 0; 14086 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14087 log.u_bbr.use_lt_bw <<= 1; 14088 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14089 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14090 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14091 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14092 log.u_bbr.flex3 = m->m_flags; 14093 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14094 log.u_bbr.lost = thflags; 14095 log.u_bbr.pacing_gain = 0x1; 14096 #ifdef TCP_ACCOUNTING 14097 log.u_bbr.cwnd_gain = ack_val_set; 14098 #endif 14099 log.u_bbr.flex7 = 2; 14100 if (m->m_flags & M_TSTMP) { 14101 /* Record the hardware timestamp if present */ 14102 mbuf_tstmp2timespec(m, &ts); 14103 ltv.tv_sec = ts.tv_sec; 14104 ltv.tv_usec = ts.tv_nsec / 1000; 14105 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14106 } else if (m->m_flags & M_TSTMP_LRO) { 14107 /* Record the LRO the arrival timestamp */ 14108 mbuf_tstmp2timespec(m, &ts); 14109 ltv.tv_sec = ts.tv_sec; 14110 ltv.tv_usec = ts.tv_nsec / 1000; 14111 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14112 } 14113 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14114 /* Log the rcv time */ 14115 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14116 #ifdef NETFLIX_HTTP_LOGGING 14117 log.u_bbr.applimited = tp->t_http_closed; 14118 log.u_bbr.applimited <<= 8; 14119 log.u_bbr.applimited |= tp->t_http_open; 14120 log.u_bbr.applimited <<= 8; 14121 log.u_bbr.applimited |= tp->t_http_req; 14122 if (http_req) { 14123 /* Copy out any client req info */ 14124 /* seconds */ 14125 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14126 /* useconds */ 14127 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14128 log.u_bbr.rttProp = http_req->timestamp; 14129 log.u_bbr.cur_del_rate = http_req->start; 14130 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14131 log.u_bbr.flex8 |= 1; 14132 } else { 14133 log.u_bbr.flex8 |= 2; 14134 log.u_bbr.bw_inuse = http_req->end; 14135 } 14136 log.u_bbr.flex6 = http_req->start_seq; 14137 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14138 log.u_bbr.flex8 |= 4; 14139 log.u_bbr.epoch = http_req->end_seq; 14140 } 14141 } 14142 #endif 14143 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14144 tlen, &log, true, <v); 14145 } 14146 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14147 way_out = 4; 14148 retval = 0; 14149 m_freem(m); 14150 goto done_with_input; 14151 } 14152 /* 14153 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14154 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14155 */ 14156 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14157 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14158 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14159 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14160 #ifdef TCP_ACCOUNTING 14161 sched_unpin(); 14162 #endif 14163 return (1); 14164 } 14165 /* 14166 * If timestamps were negotiated during SYN/ACK and a 14167 * segment without a timestamp is received, silently drop 14168 * the segment, unless it is a RST segment or missing timestamps are 14169 * tolerated. 14170 * See section 3.2 of RFC 7323. 14171 */ 14172 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14173 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14174 way_out = 5; 14175 retval = 0; 14176 m_freem(m); 14177 goto done_with_input; 14178 } 14179 14180 /* 14181 * Segment received on connection. Reset idle time and keep-alive 14182 * timer. XXX: This should be done after segment validation to 14183 * ignore broken/spoofed segs. 14184 */ 14185 if (tp->t_idle_reduce && 14186 (tp->snd_max == tp->snd_una) && 14187 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14188 counter_u64_add(rack_input_idle_reduces, 1); 14189 rack_cc_after_idle(rack, tp); 14190 } 14191 tp->t_rcvtime = ticks; 14192 #ifdef STATS 14193 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14194 #endif 14195 if (tiwin > rack->r_ctl.rc_high_rwnd) 14196 rack->r_ctl.rc_high_rwnd = tiwin; 14197 /* 14198 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14199 * this to occur after we've validated the segment. 14200 */ 14201 if (tcp_ecn_input_segment(tp, thflags, iptos)) 14202 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 14203 14204 /* 14205 * If echoed timestamp is later than the current time, fall back to 14206 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14207 * were used when this connection was established. 14208 */ 14209 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14210 to.to_tsecr -= tp->ts_offset; 14211 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14212 to.to_tsecr = 0; 14213 } 14214 14215 /* 14216 * If its the first time in we need to take care of options and 14217 * verify we can do SACK for rack! 14218 */ 14219 if (rack->r_state == 0) { 14220 /* Should be init'd by rack_init() */ 14221 KASSERT(rack->rc_inp != NULL, 14222 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14223 if (rack->rc_inp == NULL) { 14224 rack->rc_inp = tp->t_inpcb; 14225 } 14226 14227 /* 14228 * Process options only when we get SYN/ACK back. The SYN 14229 * case for incoming connections is handled in tcp_syncache. 14230 * According to RFC1323 the window field in a SYN (i.e., a 14231 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14232 * this is traditional behavior, may need to be cleaned up. 14233 */ 14234 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14235 /* Handle parallel SYN for ECN */ 14236 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14237 if ((to.to_flags & TOF_SCALE) && 14238 (tp->t_flags & TF_REQ_SCALE)) { 14239 tp->t_flags |= TF_RCVD_SCALE; 14240 tp->snd_scale = to.to_wscale; 14241 } else 14242 tp->t_flags &= ~TF_REQ_SCALE; 14243 /* 14244 * Initial send window. It will be updated with the 14245 * next incoming segment to the scaled value. 14246 */ 14247 tp->snd_wnd = th->th_win; 14248 rack_validate_fo_sendwin_up(tp, rack); 14249 if ((to.to_flags & TOF_TS) && 14250 (tp->t_flags & TF_REQ_TSTMP)) { 14251 tp->t_flags |= TF_RCVD_TSTMP; 14252 tp->ts_recent = to.to_tsval; 14253 tp->ts_recent_age = cts; 14254 } else 14255 tp->t_flags &= ~TF_REQ_TSTMP; 14256 if (to.to_flags & TOF_MSS) { 14257 tcp_mss(tp, to.to_mss); 14258 } 14259 if ((tp->t_flags & TF_SACK_PERMIT) && 14260 (to.to_flags & TOF_SACKPERM) == 0) 14261 tp->t_flags &= ~TF_SACK_PERMIT; 14262 if (IS_FASTOPEN(tp->t_flags)) { 14263 if (to.to_flags & TOF_FASTOPEN) { 14264 uint16_t mss; 14265 14266 if (to.to_flags & TOF_MSS) 14267 mss = to.to_mss; 14268 else 14269 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14270 mss = TCP6_MSS; 14271 else 14272 mss = TCP_MSS; 14273 tcp_fastopen_update_cache(tp, mss, 14274 to.to_tfo_len, to.to_tfo_cookie); 14275 } else 14276 tcp_fastopen_disable_path(tp); 14277 } 14278 } 14279 /* 14280 * At this point we are at the initial call. Here we decide 14281 * if we are doing RACK or not. We do this by seeing if 14282 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14283 * The code now does do dup-ack counting so if you don't 14284 * switch back you won't get rack & TLP, but you will still 14285 * get this stack. 14286 */ 14287 14288 if ((rack_sack_not_required == 0) && 14289 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14290 tcp_switch_back_to_default(tp); 14291 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14292 tlen, iptos); 14293 #ifdef TCP_ACCOUNTING 14294 sched_unpin(); 14295 #endif 14296 return (1); 14297 } 14298 tcp_set_hpts(tp->t_inpcb); 14299 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14300 } 14301 if (thflags & TH_FIN) 14302 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14303 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14304 if ((rack->rc_gp_dyn_mul) && 14305 (rack->use_fixed_rate == 0) && 14306 (rack->rc_always_pace)) { 14307 /* Check in on probertt */ 14308 rack_check_probe_rtt(rack, us_cts); 14309 } 14310 rack_clear_rate_sample(rack); 14311 if ((rack->forced_ack) && 14312 ((tcp_get_flags(th) & TH_RST) == 0)) { 14313 rack_handle_probe_response(rack, tiwin, us_cts); 14314 } 14315 /* 14316 * This is the one exception case where we set the rack state 14317 * always. All other times (timers etc) we must have a rack-state 14318 * set (so we assure we have done the checks above for SACK). 14319 */ 14320 rack->r_ctl.rc_rcvtime = cts; 14321 if (rack->r_state != tp->t_state) 14322 rack_set_state(tp, rack); 14323 if (SEQ_GT(th->th_ack, tp->snd_una) && 14324 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14325 kern_prefetch(rsm, &prev_state); 14326 prev_state = rack->r_state; 14327 retval = (*rack->r_substate) (m, th, so, 14328 tp, &to, drop_hdrlen, 14329 tlen, tiwin, thflags, nxt_pkt, iptos); 14330 #ifdef INVARIANTS 14331 if ((retval == 0) && 14332 (tp->t_inpcb == NULL)) { 14333 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14334 retval, tp, prev_state); 14335 } 14336 #endif 14337 if (retval == 0) { 14338 /* 14339 * If retval is 1 the tcb is unlocked and most likely the tp 14340 * is gone. 14341 */ 14342 INP_WLOCK_ASSERT(tp->t_inpcb); 14343 if ((rack->rc_gp_dyn_mul) && 14344 (rack->rc_always_pace) && 14345 (rack->use_fixed_rate == 0) && 14346 rack->in_probe_rtt && 14347 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14348 /* 14349 * If we are going for target, lets recheck before 14350 * we output. 14351 */ 14352 rack_check_probe_rtt(rack, us_cts); 14353 } 14354 if (rack->set_pacing_done_a_iw == 0) { 14355 /* How much has been acked? */ 14356 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14357 /* We have enough to set in the pacing segment size */ 14358 rack->set_pacing_done_a_iw = 1; 14359 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14360 } 14361 } 14362 tcp_rack_xmit_timer_commit(rack, tp); 14363 #ifdef TCP_ACCOUNTING 14364 /* 14365 * If we set the ack_val_se to what ack processing we are doing 14366 * we also want to track how many cycles we burned. Note 14367 * the bits after tcp_output we let be "free". This is because 14368 * we are also tracking the tcp_output times as well. Note the 14369 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14370 * 0xf cannot be returned and is what we initialize it too to 14371 * indicate we are not doing the tabulations. 14372 */ 14373 if (ack_val_set != 0xf) { 14374 uint64_t crtsc; 14375 14376 crtsc = get_cyclecount(); 14377 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14378 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14379 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14380 } 14381 } 14382 #endif 14383 if (nxt_pkt == 0) { 14384 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14385 do_output_now: 14386 if (tcp_output(tp) < 0) 14387 return (1); 14388 did_out = 1; 14389 } 14390 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14391 rack_free_trim(rack); 14392 } 14393 /* Update any rounds needed */ 14394 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14395 union tcp_log_stackspecific log; 14396 struct timeval tv; 14397 14398 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14399 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14400 log.u_bbr.flex1 = high_seq; 14401 log.u_bbr.flex2 = rack->r_ctl.roundends; 14402 log.u_bbr.flex3 = rack->r_ctl.current_round; 14403 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14404 log.u_bbr.flex8 = 9; 14405 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14406 0, &log, false, NULL, NULL, 0, &tv); 14407 } 14408 /* 14409 * The draft (v3) calls for us to use SEQ_GEQ, but that 14410 * causes issues when we are just going app limited. Lets 14411 * instead use SEQ_GT <or> where its equal but more data 14412 * is outstanding. 14413 */ 14414 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14415 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14416 rack->r_ctl.current_round++; 14417 rack->r_ctl.roundends = tp->snd_max; 14418 if (CC_ALGO(tp)->newround != NULL) { 14419 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14420 } 14421 } 14422 if ((nxt_pkt == 0) && 14423 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14424 (SEQ_GT(tp->snd_max, tp->snd_una) || 14425 (tp->t_flags & TF_DELACK) || 14426 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14427 (tp->t_state <= TCPS_CLOSING)))) { 14428 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14429 if ((tp->snd_max == tp->snd_una) && 14430 ((tp->t_flags & TF_DELACK) == 0) && 14431 (tcp_in_hpts(rack->rc_inp)) && 14432 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14433 /* keep alive not needed if we are hptsi output yet */ 14434 ; 14435 } else { 14436 int late = 0; 14437 if (tcp_in_hpts(rack->rc_inp)) { 14438 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14439 us_cts = tcp_get_usecs(NULL); 14440 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14441 rack->r_early = 1; 14442 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14443 } else 14444 late = 1; 14445 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14446 } 14447 tcp_hpts_remove(tp->t_inpcb); 14448 } 14449 if (late && (did_out == 0)) { 14450 /* 14451 * We are late in the sending 14452 * and we did not call the output 14453 * (this probably should not happen). 14454 */ 14455 goto do_output_now; 14456 } 14457 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14458 } 14459 way_out = 1; 14460 } else if (nxt_pkt == 0) { 14461 /* Do we have the correct timer running? */ 14462 rack_timer_audit(tp, rack, &so->so_snd); 14463 way_out = 2; 14464 } 14465 done_with_input: 14466 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14467 if (did_out) 14468 rack->r_wanted_output = 0; 14469 #ifdef INVARIANTS 14470 if (tp->t_inpcb == NULL) { 14471 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14472 did_out, 14473 retval, tp, prev_state); 14474 } 14475 #endif 14476 #ifdef TCP_ACCOUNTING 14477 } else { 14478 /* 14479 * Track the time (see above). 14480 */ 14481 if (ack_val_set != 0xf) { 14482 uint64_t crtsc; 14483 14484 crtsc = get_cyclecount(); 14485 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14486 /* 14487 * Note we *DO NOT* increment the per-tcb counters since 14488 * in the else the TP may be gone!! 14489 */ 14490 } 14491 #endif 14492 } 14493 #ifdef TCP_ACCOUNTING 14494 sched_unpin(); 14495 #endif 14496 return (retval); 14497 } 14498 14499 void 14500 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14501 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14502 { 14503 struct timeval tv; 14504 14505 /* First lets see if we have old packets */ 14506 if (tp->t_in_pkt) { 14507 if (ctf_do_queued_segments(so, tp, 1)) { 14508 m_freem(m); 14509 return; 14510 } 14511 } 14512 if (m->m_flags & M_TSTMP_LRO) { 14513 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14514 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14515 } else { 14516 /* Should not be should we kassert instead? */ 14517 tcp_get_usecs(&tv); 14518 } 14519 if (rack_do_segment_nounlock(m, th, so, tp, 14520 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14521 INP_WUNLOCK(tp->t_inpcb); 14522 } 14523 } 14524 14525 struct rack_sendmap * 14526 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14527 { 14528 struct rack_sendmap *rsm = NULL; 14529 int32_t idx; 14530 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14531 14532 /* Return the next guy to be re-transmitted */ 14533 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14534 return (NULL); 14535 } 14536 if (tp->t_flags & TF_SENTFIN) { 14537 /* retran the end FIN? */ 14538 return (NULL); 14539 } 14540 /* ok lets look at this one */ 14541 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14542 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 14543 return (rsm); 14544 } 14545 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14546 goto check_it; 14547 } 14548 rsm = rack_find_lowest_rsm(rack); 14549 if (rsm == NULL) { 14550 return (NULL); 14551 } 14552 check_it: 14553 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14554 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14555 /* 14556 * No sack so we automatically do the 3 strikes and 14557 * retransmit (no rack timer would be started). 14558 */ 14559 14560 return (rsm); 14561 } 14562 if (rsm->r_flags & RACK_ACKED) { 14563 return (NULL); 14564 } 14565 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14566 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14567 /* Its not yet ready */ 14568 return (NULL); 14569 } 14570 srtt = rack_grab_rtt(tp, rack); 14571 idx = rsm->r_rtr_cnt - 1; 14572 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14573 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14574 if ((tsused == ts_low) || 14575 (TSTMP_LT(tsused, ts_low))) { 14576 /* No time since sending */ 14577 return (NULL); 14578 } 14579 if ((tsused - ts_low) < thresh) { 14580 /* It has not been long enough yet */ 14581 return (NULL); 14582 } 14583 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14584 ((rsm->r_flags & RACK_SACK_PASSED) && 14585 (rack->sack_attack_disable == 0))) { 14586 /* 14587 * We have passed the dup-ack threshold <or> 14588 * a SACK has indicated this is missing. 14589 * Note that if you are a declared attacker 14590 * it is only the dup-ack threshold that 14591 * will cause retransmits. 14592 */ 14593 /* log retransmit reason */ 14594 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14595 rack->r_fast_output = 0; 14596 return (rsm); 14597 } 14598 return (NULL); 14599 } 14600 14601 static void 14602 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14603 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14604 int line, struct rack_sendmap *rsm, uint8_t quality) 14605 { 14606 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14607 union tcp_log_stackspecific log; 14608 struct timeval tv; 14609 14610 memset(&log, 0, sizeof(log)); 14611 log.u_bbr.flex1 = slot; 14612 log.u_bbr.flex2 = len; 14613 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14614 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14615 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14616 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14617 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14618 log.u_bbr.use_lt_bw <<= 1; 14619 log.u_bbr.use_lt_bw |= rack->r_late; 14620 log.u_bbr.use_lt_bw <<= 1; 14621 log.u_bbr.use_lt_bw |= rack->r_early; 14622 log.u_bbr.use_lt_bw <<= 1; 14623 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14624 log.u_bbr.use_lt_bw <<= 1; 14625 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14626 log.u_bbr.use_lt_bw <<= 1; 14627 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14628 log.u_bbr.use_lt_bw <<= 1; 14629 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14630 log.u_bbr.use_lt_bw <<= 1; 14631 log.u_bbr.use_lt_bw |= rack->gp_ready; 14632 log.u_bbr.pkt_epoch = line; 14633 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14634 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14635 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14636 log.u_bbr.bw_inuse = bw_est; 14637 log.u_bbr.delRate = bw; 14638 if (rack->r_ctl.gp_bw == 0) 14639 log.u_bbr.cur_del_rate = 0; 14640 else 14641 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14642 log.u_bbr.rttProp = len_time; 14643 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14644 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14645 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14646 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14647 /* We are in slow start */ 14648 log.u_bbr.flex7 = 1; 14649 } else { 14650 /* we are on congestion avoidance */ 14651 log.u_bbr.flex7 = 0; 14652 } 14653 log.u_bbr.flex8 = method; 14654 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14655 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14656 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14657 log.u_bbr.cwnd_gain <<= 1; 14658 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14659 log.u_bbr.cwnd_gain <<= 1; 14660 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14661 log.u_bbr.bbr_substate = quality; 14662 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14663 &rack->rc_inp->inp_socket->so_rcv, 14664 &rack->rc_inp->inp_socket->so_snd, 14665 BBR_LOG_HPTSI_CALC, 0, 14666 0, &log, false, &tv); 14667 } 14668 } 14669 14670 static uint32_t 14671 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14672 { 14673 uint32_t new_tso, user_max; 14674 14675 user_max = rack->rc_user_set_max_segs * mss; 14676 if (rack->rc_force_max_seg) { 14677 return (user_max); 14678 } 14679 if (rack->use_fixed_rate && 14680 ((rack->r_ctl.crte == NULL) || 14681 (bw != rack->r_ctl.crte->rate))) { 14682 /* Use the user mss since we are not exactly matched */ 14683 return (user_max); 14684 } 14685 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14686 if (new_tso > user_max) 14687 new_tso = user_max; 14688 return (new_tso); 14689 } 14690 14691 static int32_t 14692 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14693 { 14694 uint64_t lentim, fill_bw; 14695 14696 /* Lets first see if we are full, if so continue with normal rate */ 14697 rack->r_via_fill_cw = 0; 14698 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14699 return (slot); 14700 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14701 return (slot); 14702 if (rack->r_ctl.rc_last_us_rtt == 0) 14703 return (slot); 14704 if (rack->rc_pace_fill_if_rttin_range && 14705 (rack->r_ctl.rc_last_us_rtt >= 14706 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14707 /* The rtt is huge, N * smallest, lets not fill */ 14708 return (slot); 14709 } 14710 /* 14711 * first lets calculate the b/w based on the last us-rtt 14712 * and the sndwnd. 14713 */ 14714 fill_bw = rack->r_ctl.cwnd_to_use; 14715 /* Take the rwnd if its smaller */ 14716 if (fill_bw > rack->rc_tp->snd_wnd) 14717 fill_bw = rack->rc_tp->snd_wnd; 14718 if (rack->r_fill_less_agg) { 14719 /* 14720 * Now take away the inflight (this will reduce our 14721 * aggressiveness and yeah, if we get that much out in 1RTT 14722 * we will have had acks come back and still be behind). 14723 */ 14724 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14725 } 14726 /* Now lets make it into a b/w */ 14727 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14728 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14729 /* We are below the min b/w */ 14730 if (non_paced) 14731 *rate_wanted = fill_bw; 14732 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14733 return (slot); 14734 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14735 fill_bw = rack->r_ctl.bw_rate_cap; 14736 rack->r_via_fill_cw = 1; 14737 if (rack->r_rack_hw_rate_caps && 14738 (rack->r_ctl.crte != NULL)) { 14739 uint64_t high_rate; 14740 14741 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14742 if (fill_bw > high_rate) { 14743 /* We are capping bw at the highest rate table entry */ 14744 if (*rate_wanted > high_rate) { 14745 /* The original rate was also capped */ 14746 rack->r_via_fill_cw = 0; 14747 } 14748 rack_log_hdwr_pacing(rack, 14749 fill_bw, high_rate, __LINE__, 14750 0, 3); 14751 fill_bw = high_rate; 14752 if (capped) 14753 *capped = 1; 14754 } 14755 } else if ((rack->r_ctl.crte == NULL) && 14756 (rack->rack_hdrw_pacing == 0) && 14757 (rack->rack_hdw_pace_ena) && 14758 rack->r_rack_hw_rate_caps && 14759 (rack->rack_attempt_hdwr_pace == 0) && 14760 (rack->rc_inp->inp_route.ro_nh != NULL) && 14761 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14762 /* 14763 * Ok we may have a first attempt that is greater than our top rate 14764 * lets check. 14765 */ 14766 uint64_t high_rate; 14767 14768 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14769 if (high_rate) { 14770 if (fill_bw > high_rate) { 14771 fill_bw = high_rate; 14772 if (capped) 14773 *capped = 1; 14774 } 14775 } 14776 } 14777 /* 14778 * Ok fill_bw holds our mythical b/w to fill the cwnd 14779 * in a rtt, what does that time wise equate too? 14780 */ 14781 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14782 lentim /= fill_bw; 14783 *rate_wanted = fill_bw; 14784 if (non_paced || (lentim < slot)) { 14785 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14786 0, lentim, 12, __LINE__, NULL, 0); 14787 return ((int32_t)lentim); 14788 } else 14789 return (slot); 14790 } 14791 14792 static int32_t 14793 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14794 { 14795 uint64_t srtt; 14796 int32_t slot = 0; 14797 int can_start_hw_pacing = 1; 14798 int err; 14799 14800 if (rack->rc_always_pace == 0) { 14801 /* 14802 * We use the most optimistic possible cwnd/srtt for 14803 * sending calculations. This will make our 14804 * calculation anticipate getting more through 14805 * quicker then possible. But thats ok we don't want 14806 * the peer to have a gap in data sending. 14807 */ 14808 uint64_t cwnd, tr_perms = 0; 14809 int32_t reduce = 0; 14810 14811 old_method: 14812 /* 14813 * We keep no precise pacing with the old method 14814 * instead we use the pacer to mitigate bursts. 14815 */ 14816 if (rack->r_ctl.rc_rack_min_rtt) 14817 srtt = rack->r_ctl.rc_rack_min_rtt; 14818 else 14819 srtt = max(tp->t_srtt, 1); 14820 if (rack->r_ctl.rc_rack_largest_cwnd) 14821 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14822 else 14823 cwnd = rack->r_ctl.cwnd_to_use; 14824 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14825 tr_perms = (cwnd * 1000) / srtt; 14826 if (tr_perms == 0) { 14827 tr_perms = ctf_fixed_maxseg(tp); 14828 } 14829 /* 14830 * Calculate how long this will take to drain, if 14831 * the calculation comes out to zero, thats ok we 14832 * will use send_a_lot to possibly spin around for 14833 * more increasing tot_len_this_send to the point 14834 * that its going to require a pace, or we hit the 14835 * cwnd. Which in that case we are just waiting for 14836 * a ACK. 14837 */ 14838 slot = len / tr_perms; 14839 /* Now do we reduce the time so we don't run dry? */ 14840 if (slot && rack_slot_reduction) { 14841 reduce = (slot / rack_slot_reduction); 14842 if (reduce < slot) { 14843 slot -= reduce; 14844 } else 14845 slot = 0; 14846 } 14847 slot *= HPTS_USEC_IN_MSEC; 14848 if (rack->rc_pace_to_cwnd) { 14849 uint64_t rate_wanted = 0; 14850 14851 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14852 rack->rc_ack_can_sendout_data = 1; 14853 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14854 } else 14855 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14856 } else { 14857 uint64_t bw_est, res, lentim, rate_wanted; 14858 uint32_t orig_val, segs, oh; 14859 int capped = 0; 14860 int prev_fill; 14861 14862 if ((rack->r_rr_config == 1) && rsm) { 14863 return (rack->r_ctl.rc_min_to); 14864 } 14865 if (rack->use_fixed_rate) { 14866 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14867 } else if ((rack->r_ctl.init_rate == 0) && 14868 #ifdef NETFLIX_PEAKRATE 14869 (rack->rc_tp->t_maxpeakrate == 0) && 14870 #endif 14871 (rack->r_ctl.gp_bw == 0)) { 14872 /* no way to yet do an estimate */ 14873 bw_est = rate_wanted = 0; 14874 } else { 14875 bw_est = rack_get_bw(rack); 14876 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14877 } 14878 if ((bw_est == 0) || (rate_wanted == 0) || 14879 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14880 /* 14881 * No way yet to make a b/w estimate or 14882 * our raise is set incorrectly. 14883 */ 14884 goto old_method; 14885 } 14886 /* We need to account for all the overheads */ 14887 segs = (len + segsiz - 1) / segsiz; 14888 /* 14889 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14890 * and how much data we put in each packet. Yes this 14891 * means we may be off if we are larger than 1500 bytes 14892 * or smaller. But this just makes us more conservative. 14893 */ 14894 if (rack_hw_rate_min && 14895 (bw_est < rack_hw_rate_min)) 14896 can_start_hw_pacing = 0; 14897 if (ETHERNET_SEGMENT_SIZE > segsiz) 14898 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14899 else 14900 oh = 0; 14901 segs *= oh; 14902 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14903 res = lentim / rate_wanted; 14904 slot = (uint32_t)res; 14905 orig_val = rack->r_ctl.rc_pace_max_segs; 14906 if (rack->r_ctl.crte == NULL) { 14907 /* 14908 * Only do this if we are not hardware pacing 14909 * since if we are doing hw-pacing below we will 14910 * set make a call after setting up or changing 14911 * the rate. 14912 */ 14913 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14914 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14915 /* 14916 * We lost our rate somehow, this can happen 14917 * if the interface changed underneath us. 14918 */ 14919 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14920 rack->r_ctl.crte = NULL; 14921 /* Lets re-allow attempting to setup pacing */ 14922 rack->rack_hdrw_pacing = 0; 14923 rack->rack_attempt_hdwr_pace = 0; 14924 rack_log_hdwr_pacing(rack, 14925 rate_wanted, bw_est, __LINE__, 14926 0, 6); 14927 } 14928 /* Did we change the TSO size, if so log it */ 14929 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14930 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 14931 prev_fill = rack->r_via_fill_cw; 14932 if ((rack->rc_pace_to_cwnd) && 14933 (capped == 0) && 14934 (rack->use_fixed_rate == 0) && 14935 (rack->in_probe_rtt == 0) && 14936 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14937 /* 14938 * We want to pace at our rate *or* faster to 14939 * fill the cwnd to the max if its not full. 14940 */ 14941 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14942 } 14943 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14944 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14945 if ((rack->rack_hdw_pace_ena) && 14946 (can_start_hw_pacing > 0) && 14947 (rack->rack_hdrw_pacing == 0) && 14948 (rack->rack_attempt_hdwr_pace == 0)) { 14949 /* 14950 * Lets attempt to turn on hardware pacing 14951 * if we can. 14952 */ 14953 rack->rack_attempt_hdwr_pace = 1; 14954 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14955 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14956 rate_wanted, 14957 RS_PACING_GEQ, 14958 &err, &rack->r_ctl.crte_prev_rate); 14959 if (rack->r_ctl.crte) { 14960 rack->rack_hdrw_pacing = 1; 14961 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 14962 0, rack->r_ctl.crte, 14963 NULL); 14964 rack_log_hdwr_pacing(rack, 14965 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14966 err, 0); 14967 rack->r_ctl.last_hw_bw_req = rate_wanted; 14968 } else { 14969 counter_u64_add(rack_hw_pace_init_fail, 1); 14970 } 14971 } else if (rack->rack_hdrw_pacing && 14972 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 14973 /* Do we need to adjust our rate? */ 14974 const struct tcp_hwrate_limit_table *nrte; 14975 14976 if (rack->r_up_only && 14977 (rate_wanted < rack->r_ctl.crte->rate)) { 14978 /** 14979 * We have four possible states here 14980 * having to do with the previous time 14981 * and this time. 14982 * previous | this-time 14983 * A) 0 | 0 -- fill_cw not in the picture 14984 * B) 1 | 0 -- we were doing a fill-cw but now are not 14985 * C) 1 | 1 -- all rates from fill_cw 14986 * D) 0 | 1 -- we were doing non-fill and now we are filling 14987 * 14988 * For case A, C and D we don't allow a drop. But for 14989 * case B where we now our on our steady rate we do 14990 * allow a drop. 14991 * 14992 */ 14993 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 14994 goto done_w_hdwr; 14995 } 14996 if ((rate_wanted > rack->r_ctl.crte->rate) || 14997 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 14998 if (rack_hw_rate_to_low && 14999 (bw_est < rack_hw_rate_to_low)) { 15000 /* 15001 * The pacing rate is too low for hardware, but 15002 * do allow hardware pacing to be restarted. 15003 */ 15004 rack_log_hdwr_pacing(rack, 15005 bw_est, rack->r_ctl.crte->rate, __LINE__, 15006 0, 5); 15007 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15008 rack->r_ctl.crte = NULL; 15009 rack->rack_attempt_hdwr_pace = 0; 15010 rack->rack_hdrw_pacing = 0; 15011 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15012 goto done_w_hdwr; 15013 } 15014 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15015 rack->rc_tp, 15016 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15017 rate_wanted, 15018 RS_PACING_GEQ, 15019 &err, &rack->r_ctl.crte_prev_rate); 15020 if (nrte == NULL) { 15021 /* Lost the rate */ 15022 rack->rack_hdrw_pacing = 0; 15023 rack->r_ctl.crte = NULL; 15024 rack_log_hdwr_pacing(rack, 15025 rate_wanted, 0, __LINE__, 15026 err, 1); 15027 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15028 counter_u64_add(rack_hw_pace_lost, 1); 15029 } else if (nrte != rack->r_ctl.crte) { 15030 rack->r_ctl.crte = nrte; 15031 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15032 segsiz, 0, 15033 rack->r_ctl.crte, 15034 NULL); 15035 rack_log_hdwr_pacing(rack, 15036 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15037 err, 2); 15038 rack->r_ctl.last_hw_bw_req = rate_wanted; 15039 } 15040 } else { 15041 /* We just need to adjust the segment size */ 15042 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15043 rack_log_hdwr_pacing(rack, 15044 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15045 0, 4); 15046 rack->r_ctl.last_hw_bw_req = rate_wanted; 15047 } 15048 } 15049 } 15050 if ((rack->r_ctl.crte != NULL) && 15051 (rack->r_ctl.crte->rate == rate_wanted)) { 15052 /* 15053 * We need to add a extra if the rates 15054 * are exactly matched. The idea is 15055 * we want the software to make sure the 15056 * queue is empty before adding more, this 15057 * gives us N MSS extra pace times where 15058 * N is our sysctl 15059 */ 15060 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15061 } 15062 done_w_hdwr: 15063 if (rack_limit_time_with_srtt && 15064 (rack->use_fixed_rate == 0) && 15065 #ifdef NETFLIX_PEAKRATE 15066 (rack->rc_tp->t_maxpeakrate == 0) && 15067 #endif 15068 (rack->rack_hdrw_pacing == 0)) { 15069 /* 15070 * Sanity check, we do not allow the pacing delay 15071 * to be longer than the SRTT of the path. If it is 15072 * a slow path, then adding a packet should increase 15073 * the RTT and compensate for this i.e. the srtt will 15074 * be greater so the allowed pacing time will be greater. 15075 * 15076 * Note this restriction is not for where a peak rate 15077 * is set, we are doing fixed pacing or hardware pacing. 15078 */ 15079 if (rack->rc_tp->t_srtt) 15080 srtt = rack->rc_tp->t_srtt; 15081 else 15082 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15083 if (srtt < (uint64_t)slot) { 15084 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15085 slot = srtt; 15086 } 15087 } 15088 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15089 } 15090 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15091 /* 15092 * If this rate is seeing enobufs when it 15093 * goes to send then either the nic is out 15094 * of gas or we are mis-estimating the time 15095 * somehow and not letting the queue empty 15096 * completely. Lets add to the pacing time. 15097 */ 15098 int hw_boost_delay; 15099 15100 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15101 if (hw_boost_delay > rack_enobuf_hw_max) 15102 hw_boost_delay = rack_enobuf_hw_max; 15103 else if (hw_boost_delay < rack_enobuf_hw_min) 15104 hw_boost_delay = rack_enobuf_hw_min; 15105 slot += hw_boost_delay; 15106 } 15107 return (slot); 15108 } 15109 15110 static void 15111 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15112 tcp_seq startseq, uint32_t sb_offset) 15113 { 15114 struct rack_sendmap *my_rsm = NULL; 15115 struct rack_sendmap fe; 15116 15117 if (tp->t_state < TCPS_ESTABLISHED) { 15118 /* 15119 * We don't start any measurements if we are 15120 * not at least established. 15121 */ 15122 return; 15123 } 15124 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15125 /* 15126 * We will get no more data into the SB 15127 * this means we need to have the data available 15128 * before we start a measurement. 15129 */ 15130 15131 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15132 max(rc_init_window(rack), 15133 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15134 /* Nope not enough data */ 15135 return; 15136 } 15137 } 15138 tp->t_flags |= TF_GPUTINPROG; 15139 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15140 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15141 tp->gput_seq = startseq; 15142 rack->app_limited_needs_set = 0; 15143 if (rack->in_probe_rtt) 15144 rack->measure_saw_probe_rtt = 1; 15145 else if ((rack->measure_saw_probe_rtt) && 15146 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15147 rack->measure_saw_probe_rtt = 0; 15148 if (rack->rc_gp_filled) 15149 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15150 else { 15151 /* Special case initial measurement */ 15152 struct timeval tv; 15153 15154 tp->gput_ts = tcp_get_usecs(&tv); 15155 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15156 } 15157 /* 15158 * We take a guess out into the future, 15159 * if we have no measurement and no 15160 * initial rate, we measure the first 15161 * initial-windows worth of data to 15162 * speed up getting some GP measurement and 15163 * thus start pacing. 15164 */ 15165 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15166 rack->app_limited_needs_set = 1; 15167 tp->gput_ack = startseq + max(rc_init_window(rack), 15168 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15169 rack_log_pacing_delay_calc(rack, 15170 tp->gput_seq, 15171 tp->gput_ack, 15172 0, 15173 tp->gput_ts, 15174 rack->r_ctl.rc_app_limited_cnt, 15175 9, 15176 __LINE__, NULL, 0); 15177 return; 15178 } 15179 if (sb_offset) { 15180 /* 15181 * We are out somewhere in the sb 15182 * can we use the already outstanding data? 15183 */ 15184 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15185 /* 15186 * Yes first one is good and in this case 15187 * the tp->gput_ts is correctly set based on 15188 * the last ack that arrived (no need to 15189 * set things up when an ack comes in). 15190 */ 15191 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15192 if ((my_rsm == NULL) || 15193 (my_rsm->r_rtr_cnt != 1)) { 15194 /* retransmission? */ 15195 goto use_latest; 15196 } 15197 } else { 15198 if (rack->r_ctl.rc_first_appl == NULL) { 15199 /* 15200 * If rc_first_appl is NULL 15201 * then the cnt should be 0. 15202 * This is probably an error, maybe 15203 * a KASSERT would be approprate. 15204 */ 15205 goto use_latest; 15206 } 15207 /* 15208 * If we have a marker pointer to the last one that is 15209 * app limited we can use that, but we need to set 15210 * things up so that when it gets ack'ed we record 15211 * the ack time (if its not already acked). 15212 */ 15213 rack->app_limited_needs_set = 1; 15214 /* 15215 * We want to get to the rsm that is either 15216 * next with space i.e. over 1 MSS or the one 15217 * after that (after the app-limited). 15218 */ 15219 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15220 rack->r_ctl.rc_first_appl); 15221 if (my_rsm) { 15222 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15223 /* Have to use the next one */ 15224 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15225 my_rsm); 15226 else { 15227 /* Use after the first MSS of it is acked */ 15228 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15229 goto start_set; 15230 } 15231 } 15232 if ((my_rsm == NULL) || 15233 (my_rsm->r_rtr_cnt != 1)) { 15234 /* 15235 * Either its a retransmit or 15236 * the last is the app-limited one. 15237 */ 15238 goto use_latest; 15239 } 15240 } 15241 tp->gput_seq = my_rsm->r_start; 15242 start_set: 15243 if (my_rsm->r_flags & RACK_ACKED) { 15244 /* 15245 * This one has been acked use the arrival ack time 15246 */ 15247 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15248 rack->app_limited_needs_set = 0; 15249 } 15250 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15251 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15252 rack_log_pacing_delay_calc(rack, 15253 tp->gput_seq, 15254 tp->gput_ack, 15255 (uint64_t)my_rsm, 15256 tp->gput_ts, 15257 rack->r_ctl.rc_app_limited_cnt, 15258 9, 15259 __LINE__, NULL, 0); 15260 return; 15261 } 15262 15263 use_latest: 15264 /* 15265 * We don't know how long we may have been 15266 * idle or if this is the first-send. Lets 15267 * setup the flag so we will trim off 15268 * the first ack'd data so we get a true 15269 * measurement. 15270 */ 15271 rack->app_limited_needs_set = 1; 15272 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15273 /* Find this guy so we can pull the send time */ 15274 fe.r_start = startseq; 15275 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15276 if (my_rsm) { 15277 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15278 if (my_rsm->r_flags & RACK_ACKED) { 15279 /* 15280 * Unlikely since its probably what was 15281 * just transmitted (but I am paranoid). 15282 */ 15283 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15284 rack->app_limited_needs_set = 0; 15285 } 15286 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15287 /* This also is unlikely */ 15288 tp->gput_seq = my_rsm->r_start; 15289 } 15290 } else { 15291 /* 15292 * TSNH unless we have some send-map limit, 15293 * and even at that it should not be hitting 15294 * that limit (we should have stopped sending). 15295 */ 15296 struct timeval tv; 15297 15298 microuptime(&tv); 15299 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15300 } 15301 rack_log_pacing_delay_calc(rack, 15302 tp->gput_seq, 15303 tp->gput_ack, 15304 (uint64_t)my_rsm, 15305 tp->gput_ts, 15306 rack->r_ctl.rc_app_limited_cnt, 15307 9, __LINE__, NULL, 0); 15308 } 15309 15310 static inline uint32_t 15311 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15312 uint32_t avail, int32_t sb_offset) 15313 { 15314 uint32_t len; 15315 uint32_t sendwin; 15316 15317 if (tp->snd_wnd > cwnd_to_use) 15318 sendwin = cwnd_to_use; 15319 else 15320 sendwin = tp->snd_wnd; 15321 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15322 /* We never want to go over our peers rcv-window */ 15323 len = 0; 15324 } else { 15325 uint32_t flight; 15326 15327 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15328 if (flight >= sendwin) { 15329 /* 15330 * We have in flight what we are allowed by cwnd (if 15331 * it was rwnd blocking it would have hit above out 15332 * >= tp->snd_wnd). 15333 */ 15334 return (0); 15335 } 15336 len = sendwin - flight; 15337 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15338 /* We would send too much (beyond the rwnd) */ 15339 len = tp->snd_wnd - ctf_outstanding(tp); 15340 } 15341 if ((len + sb_offset) > avail) { 15342 /* 15343 * We don't have that much in the SB, how much is 15344 * there? 15345 */ 15346 len = avail - sb_offset; 15347 } 15348 } 15349 return (len); 15350 } 15351 15352 static void 15353 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15354 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15355 int rsm_is_null, int optlen, int line, uint16_t mode) 15356 { 15357 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15358 union tcp_log_stackspecific log; 15359 struct timeval tv; 15360 15361 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15362 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15363 log.u_bbr.flex1 = error; 15364 log.u_bbr.flex2 = flags; 15365 log.u_bbr.flex3 = rsm_is_null; 15366 log.u_bbr.flex4 = ipoptlen; 15367 log.u_bbr.flex5 = tp->rcv_numsacks; 15368 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15369 log.u_bbr.flex7 = optlen; 15370 log.u_bbr.flex8 = rack->r_fsb_inited; 15371 log.u_bbr.applimited = rack->r_fast_output; 15372 log.u_bbr.bw_inuse = rack_get_bw(rack); 15373 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15374 log.u_bbr.cwnd_gain = mode; 15375 log.u_bbr.pkts_out = orig_len; 15376 log.u_bbr.lt_epoch = len; 15377 log.u_bbr.delivered = line; 15378 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15379 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15380 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15381 len, &log, false, NULL, NULL, 0, &tv); 15382 } 15383 } 15384 15385 15386 static struct mbuf * 15387 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15388 struct rack_fast_send_blk *fsb, 15389 int32_t seglimit, int32_t segsize, int hw_tls) 15390 { 15391 #ifdef KERN_TLS 15392 struct ktls_session *tls, *ntls; 15393 #ifdef INVARIANTS 15394 struct mbuf *start; 15395 #endif 15396 #endif 15397 struct mbuf *m, *n, **np, *smb; 15398 struct mbuf *top; 15399 int32_t off, soff; 15400 int32_t len = *plen; 15401 int32_t fragsize; 15402 int32_t len_cp = 0; 15403 uint32_t mlen, frags; 15404 15405 soff = off = the_off; 15406 smb = m = the_m; 15407 np = ⊤ 15408 top = NULL; 15409 #ifdef KERN_TLS 15410 if (hw_tls && (m->m_flags & M_EXTPG)) 15411 tls = m->m_epg_tls; 15412 else 15413 tls = NULL; 15414 #ifdef INVARIANTS 15415 start = m; 15416 #endif 15417 #endif 15418 while (len > 0) { 15419 if (m == NULL) { 15420 *plen = len_cp; 15421 break; 15422 } 15423 #ifdef KERN_TLS 15424 if (hw_tls) { 15425 if (m->m_flags & M_EXTPG) 15426 ntls = m->m_epg_tls; 15427 else 15428 ntls = NULL; 15429 15430 /* 15431 * Avoid mixing TLS records with handshake 15432 * data or TLS records from different 15433 * sessions. 15434 */ 15435 if (tls != ntls) { 15436 MPASS(m != start); 15437 *plen = len_cp; 15438 break; 15439 } 15440 } 15441 #endif 15442 mlen = min(len, m->m_len - off); 15443 if (seglimit) { 15444 /* 15445 * For M_EXTPG mbufs, add 3 segments 15446 * + 1 in case we are crossing page boundaries 15447 * + 2 in case the TLS hdr/trailer are used 15448 * It is cheaper to just add the segments 15449 * than it is to take the cache miss to look 15450 * at the mbuf ext_pgs state in detail. 15451 */ 15452 if (m->m_flags & M_EXTPG) { 15453 fragsize = min(segsize, PAGE_SIZE); 15454 frags = 3; 15455 } else { 15456 fragsize = segsize; 15457 frags = 0; 15458 } 15459 15460 /* Break if we really can't fit anymore. */ 15461 if ((frags + 1) >= seglimit) { 15462 *plen = len_cp; 15463 break; 15464 } 15465 15466 /* 15467 * Reduce size if you can't copy the whole 15468 * mbuf. If we can't copy the whole mbuf, also 15469 * adjust len so the loop will end after this 15470 * mbuf. 15471 */ 15472 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15473 mlen = (seglimit - frags - 1) * fragsize; 15474 len = mlen; 15475 *plen = len_cp + len; 15476 } 15477 frags += howmany(mlen, fragsize); 15478 if (frags == 0) 15479 frags++; 15480 seglimit -= frags; 15481 KASSERT(seglimit > 0, 15482 ("%s: seglimit went too low", __func__)); 15483 } 15484 n = m_get(M_NOWAIT, m->m_type); 15485 *np = n; 15486 if (n == NULL) 15487 goto nospace; 15488 n->m_len = mlen; 15489 soff += mlen; 15490 len_cp += n->m_len; 15491 if (m->m_flags & (M_EXT|M_EXTPG)) { 15492 n->m_data = m->m_data + off; 15493 mb_dupcl(n, m); 15494 } else { 15495 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15496 (u_int)n->m_len); 15497 } 15498 len -= n->m_len; 15499 off = 0; 15500 m = m->m_next; 15501 np = &n->m_next; 15502 if (len || (soff == smb->m_len)) { 15503 /* 15504 * We have more so we move forward or 15505 * we have consumed the entire mbuf and 15506 * len has fell to 0. 15507 */ 15508 soff = 0; 15509 smb = m; 15510 } 15511 15512 } 15513 if (fsb != NULL) { 15514 fsb->m = smb; 15515 fsb->off = soff; 15516 if (smb) { 15517 /* 15518 * Save off the size of the mbuf. We do 15519 * this so that we can recognize when it 15520 * has been trimmed by sbcut() as acks 15521 * come in. 15522 */ 15523 fsb->o_m_len = smb->m_len; 15524 } else { 15525 /* 15526 * This is the case where the next mbuf went to NULL. This 15527 * means with this copy we have sent everything in the sb. 15528 * In theory we could clear the fast_output flag, but lets 15529 * not since its possible that we could get more added 15530 * and acks that call the extend function which would let 15531 * us send more. 15532 */ 15533 fsb->o_m_len = 0; 15534 } 15535 } 15536 return (top); 15537 nospace: 15538 if (top) 15539 m_freem(top); 15540 return (NULL); 15541 15542 } 15543 15544 /* 15545 * This is a copy of m_copym(), taking the TSO segment size/limit 15546 * constraints into account, and advancing the sndptr as it goes. 15547 */ 15548 static struct mbuf * 15549 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15550 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15551 { 15552 struct mbuf *m, *n; 15553 int32_t soff; 15554 15555 soff = rack->r_ctl.fsb.off; 15556 m = rack->r_ctl.fsb.m; 15557 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15558 /* 15559 * The mbuf had the front of it chopped off by an ack 15560 * we need to adjust the soff/off by that difference. 15561 */ 15562 uint32_t delta; 15563 15564 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15565 soff -= delta; 15566 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15567 /* 15568 * The mbuf was expanded probably by 15569 * a m_compress. Just update o_m_len. 15570 */ 15571 rack->r_ctl.fsb.o_m_len = m->m_len; 15572 } 15573 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15574 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15575 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15576 __FUNCTION__, 15577 rack, *plen, m, m->m_len)); 15578 /* Save off the right location before we copy and advance */ 15579 *s_soff = soff; 15580 *s_mb = rack->r_ctl.fsb.m; 15581 n = rack_fo_base_copym(m, soff, plen, 15582 &rack->r_ctl.fsb, 15583 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15584 return (n); 15585 } 15586 15587 static int 15588 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15589 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15590 { 15591 /* 15592 * Enter the fast retransmit path. We are given that a sched_pin is 15593 * in place (if accounting is compliled in) and the cycle count taken 15594 * at the entry is in the ts_val. The concept her is that the rsm 15595 * now holds the mbuf offsets and such so we can directly transmit 15596 * without a lot of overhead, the len field is already set for 15597 * us to prohibit us from sending too much (usually its 1MSS). 15598 */ 15599 struct ip *ip = NULL; 15600 struct udphdr *udp = NULL; 15601 struct tcphdr *th = NULL; 15602 struct mbuf *m = NULL; 15603 struct inpcb *inp; 15604 uint8_t *cpto; 15605 struct tcp_log_buffer *lgb; 15606 #ifdef TCP_ACCOUNTING 15607 uint64_t crtsc; 15608 int cnt_thru = 1; 15609 #endif 15610 struct tcpopt to; 15611 u_char opt[TCP_MAXOLEN]; 15612 uint32_t hdrlen, optlen; 15613 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0; 15614 uint16_t flags; 15615 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15616 uint32_t if_hw_tsomaxsegsize; 15617 15618 #ifdef INET6 15619 struct ip6_hdr *ip6 = NULL; 15620 15621 if (rack->r_is_v6) { 15622 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15623 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15624 } else 15625 #endif /* INET6 */ 15626 { 15627 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15628 hdrlen = sizeof(struct tcpiphdr); 15629 } 15630 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15631 goto failed; 15632 } 15633 if (doing_tlp) { 15634 /* Its a TLP add the flag, it may already be there but be sure */ 15635 rsm->r_flags |= RACK_TLP; 15636 } else { 15637 /* If it was a TLP it is not not on this retransmit */ 15638 rsm->r_flags &= ~RACK_TLP; 15639 } 15640 startseq = rsm->r_start; 15641 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15642 inp = rack->rc_inp; 15643 to.to_flags = 0; 15644 flags = tcp_outflags[tp->t_state]; 15645 if (flags & (TH_SYN|TH_RST)) { 15646 goto failed; 15647 } 15648 if (rsm->r_flags & RACK_HAS_FIN) { 15649 /* We can't send a FIN here */ 15650 goto failed; 15651 } 15652 if (flags & TH_FIN) { 15653 /* We never send a FIN */ 15654 flags &= ~TH_FIN; 15655 } 15656 if (tp->t_flags & TF_RCVD_TSTMP) { 15657 to.to_tsval = ms_cts + tp->ts_offset; 15658 to.to_tsecr = tp->ts_recent; 15659 to.to_flags = TOF_TS; 15660 } 15661 optlen = tcp_addoptions(&to, opt); 15662 hdrlen += optlen; 15663 udp = rack->r_ctl.fsb.udp; 15664 if (udp) 15665 hdrlen += sizeof(struct udphdr); 15666 if (rack->r_ctl.rc_pace_max_segs) 15667 max_val = rack->r_ctl.rc_pace_max_segs; 15668 else if (rack->rc_user_set_max_segs) 15669 max_val = rack->rc_user_set_max_segs * segsiz; 15670 else 15671 max_val = len; 15672 if ((tp->t_flags & TF_TSO) && 15673 V_tcp_do_tso && 15674 (len > segsiz) && 15675 (tp->t_port == 0)) 15676 tso = 1; 15677 #ifdef INET6 15678 if (MHLEN < hdrlen + max_linkhdr) 15679 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15680 else 15681 #endif 15682 m = m_gethdr(M_NOWAIT, MT_DATA); 15683 if (m == NULL) 15684 goto failed; 15685 m->m_data += max_linkhdr; 15686 m->m_len = hdrlen; 15687 th = rack->r_ctl.fsb.th; 15688 /* Establish the len to send */ 15689 if (len > max_val) 15690 len = max_val; 15691 if ((tso) && (len + optlen > tp->t_maxseg)) { 15692 uint32_t if_hw_tsomax; 15693 int32_t max_len; 15694 15695 /* extract TSO information */ 15696 if_hw_tsomax = tp->t_tsomax; 15697 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15698 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15699 /* 15700 * Check if we should limit by maximum payload 15701 * length: 15702 */ 15703 if (if_hw_tsomax != 0) { 15704 /* compute maximum TSO length */ 15705 max_len = (if_hw_tsomax - hdrlen - 15706 max_linkhdr); 15707 if (max_len <= 0) { 15708 goto failed; 15709 } else if (len > max_len) { 15710 len = max_len; 15711 } 15712 } 15713 if (len <= segsiz) { 15714 /* 15715 * In case there are too many small fragments don't 15716 * use TSO: 15717 */ 15718 tso = 0; 15719 } 15720 } else { 15721 tso = 0; 15722 } 15723 if ((tso == 0) && (len > segsiz)) 15724 len = segsiz; 15725 if ((len == 0) || 15726 (len <= MHLEN - hdrlen - max_linkhdr)) { 15727 goto failed; 15728 } 15729 th->th_seq = htonl(rsm->r_start); 15730 th->th_ack = htonl(tp->rcv_nxt); 15731 /* 15732 * The PUSH bit should only be applied 15733 * if the full retransmission is made. If 15734 * we are sending less than this is the 15735 * left hand edge and should not have 15736 * the PUSH bit. 15737 */ 15738 if ((rsm->r_flags & RACK_HAD_PUSH) && 15739 (len == (rsm->r_end - rsm->r_start))) 15740 flags |= TH_PUSH; 15741 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15742 if (th->th_win == 0) { 15743 tp->t_sndzerowin++; 15744 tp->t_flags |= TF_RXWIN0SENT; 15745 } else 15746 tp->t_flags &= ~TF_RXWIN0SENT; 15747 if (rsm->r_flags & RACK_TLP) { 15748 /* 15749 * TLP should not count in retran count, but 15750 * in its own bin 15751 */ 15752 counter_u64_add(rack_tlp_retran, 1); 15753 counter_u64_add(rack_tlp_retran_bytes, len); 15754 } else { 15755 tp->t_sndrexmitpack++; 15756 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15757 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15758 } 15759 #ifdef STATS 15760 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15761 len); 15762 #endif 15763 if (rsm->m == NULL) 15764 goto failed; 15765 if (rsm->orig_m_len != rsm->m->m_len) { 15766 /* Fix up the orig_m_len and possibly the mbuf offset */ 15767 rack_adjust_orig_mlen(rsm); 15768 } 15769 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15770 if (len <= segsiz) { 15771 /* 15772 * Must have ran out of mbufs for the copy 15773 * shorten it to no longer need tso. Lets 15774 * not put on sendalot since we are low on 15775 * mbufs. 15776 */ 15777 tso = 0; 15778 } 15779 if ((m->m_next == NULL) || (len <= 0)){ 15780 goto failed; 15781 } 15782 if (udp) { 15783 if (rack->r_is_v6) 15784 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15785 else 15786 ulen = hdrlen + len - sizeof(struct ip); 15787 udp->uh_ulen = htons(ulen); 15788 } 15789 m->m_pkthdr.rcvif = (struct ifnet *)0; 15790 if (TCPS_HAVERCVDSYN(tp->t_state) && 15791 (tp->t_flags2 & TF2_ECN_PERMIT)) { 15792 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15793 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15794 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15795 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15796 #ifdef INET6 15797 if (rack->r_is_v6) { 15798 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15799 ip6->ip6_flow |= htonl(ect << 20); 15800 } 15801 else 15802 #endif 15803 { 15804 ip->ip_tos &= ~IPTOS_ECN_MASK; 15805 ip->ip_tos |= ect; 15806 } 15807 } 15808 tcp_set_flags(th, flags); 15809 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15810 #ifdef INET6 15811 if (rack->r_is_v6) { 15812 if (tp->t_port) { 15813 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15814 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15815 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15816 th->th_sum = htons(0); 15817 UDPSTAT_INC(udps_opackets); 15818 } else { 15819 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15820 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15821 th->th_sum = in6_cksum_pseudo(ip6, 15822 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15823 0); 15824 } 15825 } 15826 #endif 15827 #if defined(INET6) && defined(INET) 15828 else 15829 #endif 15830 #ifdef INET 15831 { 15832 if (tp->t_port) { 15833 m->m_pkthdr.csum_flags = CSUM_UDP; 15834 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15835 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15836 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15837 th->th_sum = htons(0); 15838 UDPSTAT_INC(udps_opackets); 15839 } else { 15840 m->m_pkthdr.csum_flags = CSUM_TCP; 15841 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15842 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15843 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15844 IPPROTO_TCP + len + optlen)); 15845 } 15846 /* IP version must be set here for ipv4/ipv6 checking later */ 15847 KASSERT(ip->ip_v == IPVERSION, 15848 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15849 } 15850 #endif 15851 if (tso) { 15852 KASSERT(len > tp->t_maxseg - optlen, 15853 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15854 m->m_pkthdr.csum_flags |= CSUM_TSO; 15855 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15856 } 15857 #ifdef INET6 15858 if (rack->r_is_v6) { 15859 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15860 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15861 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15862 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15863 else 15864 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15865 } 15866 #endif 15867 #if defined(INET) && defined(INET6) 15868 else 15869 #endif 15870 #ifdef INET 15871 { 15872 ip->ip_len = htons(m->m_pkthdr.len); 15873 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15874 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15875 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15876 if (tp->t_port == 0 || len < V_tcp_minmss) { 15877 ip->ip_off |= htons(IP_DF); 15878 } 15879 } else { 15880 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15881 } 15882 } 15883 #endif 15884 /* Time to copy in our header */ 15885 cpto = mtod(m, uint8_t *); 15886 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15887 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15888 if (optlen) { 15889 bcopy(opt, th + 1, optlen); 15890 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15891 } else { 15892 th->th_off = sizeof(struct tcphdr) >> 2; 15893 } 15894 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15895 union tcp_log_stackspecific log; 15896 15897 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15898 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15899 if (rack->rack_no_prr) 15900 log.u_bbr.flex1 = 0; 15901 else 15902 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15903 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15904 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15905 log.u_bbr.flex4 = max_val; 15906 log.u_bbr.flex5 = 0; 15907 /* Save off the early/late values */ 15908 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15909 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15910 log.u_bbr.bw_inuse = rack_get_bw(rack); 15911 if (doing_tlp == 0) 15912 log.u_bbr.flex8 = 1; 15913 else 15914 log.u_bbr.flex8 = 2; 15915 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15916 log.u_bbr.flex7 = 55; 15917 log.u_bbr.pkts_out = tp->t_maxseg; 15918 log.u_bbr.timeStamp = cts; 15919 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15920 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15921 log.u_bbr.delivered = 0; 15922 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15923 len, &log, false, NULL, NULL, 0, tv); 15924 } else 15925 lgb = NULL; 15926 #ifdef INET6 15927 if (rack->r_is_v6) { 15928 error = ip6_output(m, NULL, 15929 &inp->inp_route6, 15930 0, NULL, NULL, inp); 15931 } 15932 #endif 15933 #if defined(INET) && defined(INET6) 15934 else 15935 #endif 15936 #ifdef INET 15937 { 15938 error = ip_output(m, NULL, 15939 &inp->inp_route, 15940 0, 0, inp); 15941 } 15942 #endif 15943 m = NULL; 15944 if (lgb) { 15945 lgb->tlb_errno = error; 15946 lgb = NULL; 15947 } 15948 if (error) { 15949 goto failed; 15950 } 15951 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15952 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15953 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15954 rack->rc_tlp_in_progress = 1; 15955 rack->r_ctl.rc_tlp_cnt_out++; 15956 } 15957 if (error == 0) { 15958 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15959 if (doing_tlp) { 15960 rack->rc_last_sent_tlp_past_cumack = 0; 15961 rack->rc_last_sent_tlp_seq_valid = 1; 15962 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 15963 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 15964 } 15965 } 15966 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15967 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15968 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15969 rack->r_ctl.retran_during_recovery += len; 15970 { 15971 int idx; 15972 15973 idx = (len / segsiz) + 3; 15974 if (idx >= TCP_MSS_ACCT_ATIMER) 15975 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15976 else 15977 counter_u64_add(rack_out_size[idx], 1); 15978 } 15979 if (tp->t_rtttime == 0) { 15980 tp->t_rtttime = ticks; 15981 tp->t_rtseq = startseq; 15982 KMOD_TCPSTAT_INC(tcps_segstimed); 15983 } 15984 counter_u64_add(rack_fto_rsm_send, 1); 15985 if (error && (error == ENOBUFS)) { 15986 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 15987 if (rack->rc_enobuf < 0x7f) 15988 rack->rc_enobuf++; 15989 if (slot < (10 * HPTS_USEC_IN_MSEC)) 15990 slot = 10 * HPTS_USEC_IN_MSEC; 15991 } else 15992 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 15993 if ((slot == 0) || 15994 (rack->rc_always_pace == 0) || 15995 (rack->r_rr_config == 1)) { 15996 /* 15997 * We have no pacing set or we 15998 * are using old-style rack or 15999 * we are overriden to use the old 1ms pacing. 16000 */ 16001 slot = rack->r_ctl.rc_min_to; 16002 } 16003 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16004 #ifdef TCP_ACCOUNTING 16005 crtsc = get_cyclecount(); 16006 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16007 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16008 } 16009 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16010 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16011 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16012 } 16013 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16014 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16015 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16016 } 16017 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16018 sched_unpin(); 16019 #endif 16020 return (0); 16021 failed: 16022 if (m) 16023 m_free(m); 16024 return (-1); 16025 } 16026 16027 static void 16028 rack_sndbuf_autoscale(struct tcp_rack *rack) 16029 { 16030 /* 16031 * Automatic sizing of send socket buffer. Often the send buffer 16032 * size is not optimally adjusted to the actual network conditions 16033 * at hand (delay bandwidth product). Setting the buffer size too 16034 * small limits throughput on links with high bandwidth and high 16035 * delay (eg. trans-continental/oceanic links). Setting the 16036 * buffer size too big consumes too much real kernel memory, 16037 * especially with many connections on busy servers. 16038 * 16039 * The criteria to step up the send buffer one notch are: 16040 * 1. receive window of remote host is larger than send buffer 16041 * (with a fudge factor of 5/4th); 16042 * 2. send buffer is filled to 7/8th with data (so we actually 16043 * have data to make use of it); 16044 * 3. send buffer fill has not hit maximal automatic size; 16045 * 4. our send window (slow start and cogestion controlled) is 16046 * larger than sent but unacknowledged data in send buffer. 16047 * 16048 * Note that the rack version moves things much faster since 16049 * we want to avoid hitting cache lines in the rack_fast_output() 16050 * path so this is called much less often and thus moves 16051 * the SB forward by a percentage. 16052 */ 16053 struct socket *so; 16054 struct tcpcb *tp; 16055 uint32_t sendwin, scaleup; 16056 16057 tp = rack->rc_tp; 16058 so = rack->rc_inp->inp_socket; 16059 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16060 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16061 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16062 sbused(&so->so_snd) >= 16063 (so->so_snd.sb_hiwat / 8 * 7) && 16064 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16065 sendwin >= (sbused(&so->so_snd) - 16066 (tp->snd_nxt - tp->snd_una))) { 16067 if (rack_autosndbuf_inc) 16068 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16069 else 16070 scaleup = V_tcp_autosndbuf_inc; 16071 if (scaleup < V_tcp_autosndbuf_inc) 16072 scaleup = V_tcp_autosndbuf_inc; 16073 scaleup += so->so_snd.sb_hiwat; 16074 if (scaleup > V_tcp_autosndbuf_max) 16075 scaleup = V_tcp_autosndbuf_max; 16076 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 16077 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16078 } 16079 } 16080 } 16081 16082 static int 16083 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16084 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16085 { 16086 /* 16087 * Enter to do fast output. We are given that the sched_pin is 16088 * in place (if accounting is compiled in) and the cycle count taken 16089 * at entry is in place in ts_val. The idea here is that 16090 * we know how many more bytes needs to be sent (presumably either 16091 * during pacing or to fill the cwnd and that was greater than 16092 * the max-burst). We have how much to send and all the info we 16093 * need to just send. 16094 */ 16095 struct ip *ip = NULL; 16096 struct udphdr *udp = NULL; 16097 struct tcphdr *th = NULL; 16098 struct mbuf *m, *s_mb; 16099 struct inpcb *inp; 16100 uint8_t *cpto; 16101 struct tcp_log_buffer *lgb; 16102 #ifdef TCP_ACCOUNTING 16103 uint64_t crtsc; 16104 #endif 16105 struct tcpopt to; 16106 u_char opt[TCP_MAXOLEN]; 16107 uint32_t hdrlen, optlen; 16108 int cnt_thru = 1; 16109 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16110 uint16_t flags; 16111 uint32_t s_soff; 16112 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16113 uint32_t if_hw_tsomaxsegsize; 16114 uint16_t add_flag = RACK_SENT_FP; 16115 #ifdef INET6 16116 struct ip6_hdr *ip6 = NULL; 16117 16118 if (rack->r_is_v6) { 16119 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16120 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16121 } else 16122 #endif /* INET6 */ 16123 { 16124 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16125 hdrlen = sizeof(struct tcpiphdr); 16126 } 16127 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16128 m = NULL; 16129 goto failed; 16130 } 16131 startseq = tp->snd_max; 16132 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16133 inp = rack->rc_inp; 16134 len = rack->r_ctl.fsb.left_to_send; 16135 to.to_flags = 0; 16136 flags = rack->r_ctl.fsb.tcp_flags; 16137 if (tp->t_flags & TF_RCVD_TSTMP) { 16138 to.to_tsval = ms_cts + tp->ts_offset; 16139 to.to_tsecr = tp->ts_recent; 16140 to.to_flags = TOF_TS; 16141 } 16142 optlen = tcp_addoptions(&to, opt); 16143 hdrlen += optlen; 16144 udp = rack->r_ctl.fsb.udp; 16145 if (udp) 16146 hdrlen += sizeof(struct udphdr); 16147 if (rack->r_ctl.rc_pace_max_segs) 16148 max_val = rack->r_ctl.rc_pace_max_segs; 16149 else if (rack->rc_user_set_max_segs) 16150 max_val = rack->rc_user_set_max_segs * segsiz; 16151 else 16152 max_val = len; 16153 if ((tp->t_flags & TF_TSO) && 16154 V_tcp_do_tso && 16155 (len > segsiz) && 16156 (tp->t_port == 0)) 16157 tso = 1; 16158 again: 16159 #ifdef INET6 16160 if (MHLEN < hdrlen + max_linkhdr) 16161 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16162 else 16163 #endif 16164 m = m_gethdr(M_NOWAIT, MT_DATA); 16165 if (m == NULL) 16166 goto failed; 16167 m->m_data += max_linkhdr; 16168 m->m_len = hdrlen; 16169 th = rack->r_ctl.fsb.th; 16170 /* Establish the len to send */ 16171 if (len > max_val) 16172 len = max_val; 16173 if ((tso) && (len + optlen > tp->t_maxseg)) { 16174 uint32_t if_hw_tsomax; 16175 int32_t max_len; 16176 16177 /* extract TSO information */ 16178 if_hw_tsomax = tp->t_tsomax; 16179 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16180 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16181 /* 16182 * Check if we should limit by maximum payload 16183 * length: 16184 */ 16185 if (if_hw_tsomax != 0) { 16186 /* compute maximum TSO length */ 16187 max_len = (if_hw_tsomax - hdrlen - 16188 max_linkhdr); 16189 if (max_len <= 0) { 16190 goto failed; 16191 } else if (len > max_len) { 16192 len = max_len; 16193 } 16194 } 16195 if (len <= segsiz) { 16196 /* 16197 * In case there are too many small fragments don't 16198 * use TSO: 16199 */ 16200 tso = 0; 16201 } 16202 } else { 16203 tso = 0; 16204 } 16205 if ((tso == 0) && (len > segsiz)) 16206 len = segsiz; 16207 if ((len == 0) || 16208 (len <= MHLEN - hdrlen - max_linkhdr)) { 16209 goto failed; 16210 } 16211 sb_offset = tp->snd_max - tp->snd_una; 16212 th->th_seq = htonl(tp->snd_max); 16213 th->th_ack = htonl(tp->rcv_nxt); 16214 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16215 if (th->th_win == 0) { 16216 tp->t_sndzerowin++; 16217 tp->t_flags |= TF_RXWIN0SENT; 16218 } else 16219 tp->t_flags &= ~TF_RXWIN0SENT; 16220 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16221 KMOD_TCPSTAT_INC(tcps_sndpack); 16222 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16223 #ifdef STATS 16224 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16225 len); 16226 #endif 16227 if (rack->r_ctl.fsb.m == NULL) 16228 goto failed; 16229 16230 /* s_mb and s_soff are saved for rack_log_output */ 16231 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16232 &s_mb, &s_soff); 16233 if (len <= segsiz) { 16234 /* 16235 * Must have ran out of mbufs for the copy 16236 * shorten it to no longer need tso. Lets 16237 * not put on sendalot since we are low on 16238 * mbufs. 16239 */ 16240 tso = 0; 16241 } 16242 if (rack->r_ctl.fsb.rfo_apply_push && 16243 (len == rack->r_ctl.fsb.left_to_send)) { 16244 flags |= TH_PUSH; 16245 add_flag |= RACK_HAD_PUSH; 16246 } 16247 if ((m->m_next == NULL) || (len <= 0)){ 16248 goto failed; 16249 } 16250 if (udp) { 16251 if (rack->r_is_v6) 16252 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16253 else 16254 ulen = hdrlen + len - sizeof(struct ip); 16255 udp->uh_ulen = htons(ulen); 16256 } 16257 m->m_pkthdr.rcvif = (struct ifnet *)0; 16258 if (TCPS_HAVERCVDSYN(tp->t_state) && 16259 (tp->t_flags2 & TF2_ECN_PERMIT)) { 16260 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16261 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16262 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16263 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16264 #ifdef INET6 16265 if (rack->r_is_v6) { 16266 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16267 ip6->ip6_flow |= htonl(ect << 20); 16268 } 16269 else 16270 #endif 16271 { 16272 ip->ip_tos &= ~IPTOS_ECN_MASK; 16273 ip->ip_tos |= ect; 16274 } 16275 } 16276 tcp_set_flags(th, flags); 16277 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16278 #ifdef INET6 16279 if (rack->r_is_v6) { 16280 if (tp->t_port) { 16281 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16282 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16283 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16284 th->th_sum = htons(0); 16285 UDPSTAT_INC(udps_opackets); 16286 } else { 16287 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16288 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16289 th->th_sum = in6_cksum_pseudo(ip6, 16290 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16291 0); 16292 } 16293 } 16294 #endif 16295 #if defined(INET6) && defined(INET) 16296 else 16297 #endif 16298 #ifdef INET 16299 { 16300 if (tp->t_port) { 16301 m->m_pkthdr.csum_flags = CSUM_UDP; 16302 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16303 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16304 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16305 th->th_sum = htons(0); 16306 UDPSTAT_INC(udps_opackets); 16307 } else { 16308 m->m_pkthdr.csum_flags = CSUM_TCP; 16309 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16310 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16311 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16312 IPPROTO_TCP + len + optlen)); 16313 } 16314 /* IP version must be set here for ipv4/ipv6 checking later */ 16315 KASSERT(ip->ip_v == IPVERSION, 16316 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16317 } 16318 #endif 16319 if (tso) { 16320 KASSERT(len > tp->t_maxseg - optlen, 16321 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16322 m->m_pkthdr.csum_flags |= CSUM_TSO; 16323 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16324 } 16325 #ifdef INET6 16326 if (rack->r_is_v6) { 16327 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16328 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16329 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16330 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16331 else 16332 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16333 } 16334 #endif 16335 #if defined(INET) && defined(INET6) 16336 else 16337 #endif 16338 #ifdef INET 16339 { 16340 ip->ip_len = htons(m->m_pkthdr.len); 16341 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16342 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16343 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16344 if (tp->t_port == 0 || len < V_tcp_minmss) { 16345 ip->ip_off |= htons(IP_DF); 16346 } 16347 } else { 16348 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16349 } 16350 } 16351 #endif 16352 /* Time to copy in our header */ 16353 cpto = mtod(m, uint8_t *); 16354 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16355 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16356 if (optlen) { 16357 bcopy(opt, th + 1, optlen); 16358 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16359 } else { 16360 th->th_off = sizeof(struct tcphdr) >> 2; 16361 } 16362 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16363 union tcp_log_stackspecific log; 16364 16365 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16366 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16367 if (rack->rack_no_prr) 16368 log.u_bbr.flex1 = 0; 16369 else 16370 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16371 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16372 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16373 log.u_bbr.flex4 = max_val; 16374 log.u_bbr.flex5 = 0; 16375 /* Save off the early/late values */ 16376 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16377 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16378 log.u_bbr.bw_inuse = rack_get_bw(rack); 16379 log.u_bbr.flex8 = 0; 16380 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16381 log.u_bbr.flex7 = 44; 16382 log.u_bbr.pkts_out = tp->t_maxseg; 16383 log.u_bbr.timeStamp = cts; 16384 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16385 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16386 log.u_bbr.delivered = 0; 16387 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16388 len, &log, false, NULL, NULL, 0, tv); 16389 } else 16390 lgb = NULL; 16391 #ifdef INET6 16392 if (rack->r_is_v6) { 16393 error = ip6_output(m, NULL, 16394 &inp->inp_route6, 16395 0, NULL, NULL, inp); 16396 } 16397 #endif 16398 #if defined(INET) && defined(INET6) 16399 else 16400 #endif 16401 #ifdef INET 16402 { 16403 error = ip_output(m, NULL, 16404 &inp->inp_route, 16405 0, 0, inp); 16406 } 16407 #endif 16408 if (lgb) { 16409 lgb->tlb_errno = error; 16410 lgb = NULL; 16411 } 16412 if (error) { 16413 *send_err = error; 16414 m = NULL; 16415 goto failed; 16416 } 16417 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16418 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16419 m = NULL; 16420 if (tp->snd_una == tp->snd_max) { 16421 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16422 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16423 tp->t_acktime = ticks; 16424 } 16425 if (error == 0) 16426 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16427 16428 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16429 tot_len += len; 16430 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16431 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16432 tp->snd_max += len; 16433 tp->snd_nxt = tp->snd_max; 16434 { 16435 int idx; 16436 16437 idx = (len / segsiz) + 3; 16438 if (idx >= TCP_MSS_ACCT_ATIMER) 16439 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16440 else 16441 counter_u64_add(rack_out_size[idx], 1); 16442 } 16443 if (len <= rack->r_ctl.fsb.left_to_send) 16444 rack->r_ctl.fsb.left_to_send -= len; 16445 else 16446 rack->r_ctl.fsb.left_to_send = 0; 16447 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16448 rack->r_fast_output = 0; 16449 rack->r_ctl.fsb.left_to_send = 0; 16450 /* At the end of fast_output scale up the sb */ 16451 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16452 rack_sndbuf_autoscale(rack); 16453 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16454 } 16455 if (tp->t_rtttime == 0) { 16456 tp->t_rtttime = ticks; 16457 tp->t_rtseq = startseq; 16458 KMOD_TCPSTAT_INC(tcps_segstimed); 16459 } 16460 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16461 (max_val > len) && 16462 (tso == 0)) { 16463 max_val -= len; 16464 len = segsiz; 16465 th = rack->r_ctl.fsb.th; 16466 cnt_thru++; 16467 goto again; 16468 } 16469 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16470 counter_u64_add(rack_fto_send, 1); 16471 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16472 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16473 #ifdef TCP_ACCOUNTING 16474 crtsc = get_cyclecount(); 16475 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16476 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16477 } 16478 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16479 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16480 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16481 } 16482 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16483 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16484 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16485 } 16486 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16487 sched_unpin(); 16488 #endif 16489 return (0); 16490 failed: 16491 if (m) 16492 m_free(m); 16493 rack->r_fast_output = 0; 16494 return (-1); 16495 } 16496 16497 static int 16498 rack_output(struct tcpcb *tp) 16499 { 16500 struct socket *so; 16501 uint32_t recwin; 16502 uint32_t sb_offset, s_moff = 0; 16503 int32_t len, error = 0; 16504 uint16_t flags; 16505 struct mbuf *m, *s_mb = NULL; 16506 struct mbuf *mb; 16507 uint32_t if_hw_tsomaxsegcount = 0; 16508 uint32_t if_hw_tsomaxsegsize; 16509 int32_t segsiz, minseg; 16510 long tot_len_this_send = 0; 16511 #ifdef INET 16512 struct ip *ip = NULL; 16513 #endif 16514 #ifdef TCPDEBUG 16515 struct ipovly *ipov = NULL; 16516 #endif 16517 struct udphdr *udp = NULL; 16518 struct tcp_rack *rack; 16519 struct tcphdr *th; 16520 uint8_t pass = 0; 16521 uint8_t mark = 0; 16522 uint8_t wanted_cookie = 0; 16523 u_char opt[TCP_MAXOLEN]; 16524 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16525 uint32_t rack_seq; 16526 16527 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16528 unsigned ipsec_optlen = 0; 16529 16530 #endif 16531 int32_t idle, sendalot; 16532 int32_t sub_from_prr = 0; 16533 volatile int32_t sack_rxmit; 16534 struct rack_sendmap *rsm = NULL; 16535 int32_t tso, mtu; 16536 struct tcpopt to; 16537 int32_t slot = 0; 16538 int32_t sup_rack = 0; 16539 uint32_t cts, ms_cts, delayed, early; 16540 uint16_t add_flag = RACK_SENT_SP; 16541 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16542 uint8_t hpts_calling, doing_tlp = 0; 16543 uint32_t cwnd_to_use, pace_max_seg; 16544 int32_t do_a_prefetch = 0; 16545 int32_t prefetch_rsm = 0; 16546 int32_t orig_len = 0; 16547 struct timeval tv; 16548 int32_t prefetch_so_done = 0; 16549 struct tcp_log_buffer *lgb; 16550 struct inpcb *inp; 16551 struct sockbuf *sb; 16552 uint64_t ts_val = 0; 16553 #ifdef TCP_ACCOUNTING 16554 uint64_t crtsc; 16555 #endif 16556 #ifdef INET6 16557 struct ip6_hdr *ip6 = NULL; 16558 int32_t isipv6; 16559 #endif 16560 uint8_t filled_all = 0; 16561 bool hw_tls = false; 16562 16563 /* setup and take the cache hits here */ 16564 rack = (struct tcp_rack *)tp->t_fb_ptr; 16565 #ifdef TCP_ACCOUNTING 16566 sched_pin(); 16567 ts_val = get_cyclecount(); 16568 #endif 16569 hpts_calling = rack->rc_inp->inp_hpts_calls; 16570 NET_EPOCH_ASSERT(); 16571 INP_WLOCK_ASSERT(rack->rc_inp); 16572 #ifdef TCP_OFFLOAD 16573 if (tp->t_flags & TF_TOE) { 16574 #ifdef TCP_ACCOUNTING 16575 sched_unpin(); 16576 #endif 16577 return (tcp_offload_output(tp)); 16578 } 16579 #endif 16580 /* 16581 * For TFO connections in SYN_RECEIVED, only allow the initial 16582 * SYN|ACK and those sent by the retransmit timer. 16583 */ 16584 if (IS_FASTOPEN(tp->t_flags) && 16585 (tp->t_state == TCPS_SYN_RECEIVED) && 16586 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16587 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16588 #ifdef TCP_ACCOUNTING 16589 sched_unpin(); 16590 #endif 16591 return (0); 16592 } 16593 #ifdef INET6 16594 if (rack->r_state) { 16595 /* Use the cache line loaded if possible */ 16596 isipv6 = rack->r_is_v6; 16597 } else { 16598 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16599 } 16600 #endif 16601 early = 0; 16602 cts = tcp_get_usecs(&tv); 16603 ms_cts = tcp_tv_to_mssectick(&tv); 16604 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16605 tcp_in_hpts(rack->rc_inp)) { 16606 /* 16607 * We are on the hpts for some timer but not hptsi output. 16608 * Remove from the hpts unconditionally. 16609 */ 16610 rack_timer_cancel(tp, rack, cts, __LINE__); 16611 } 16612 /* Are we pacing and late? */ 16613 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16614 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16615 /* We are delayed */ 16616 delayed = cts - rack->r_ctl.rc_last_output_to; 16617 } else { 16618 delayed = 0; 16619 } 16620 /* Do the timers, which may override the pacer */ 16621 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16622 int retval; 16623 16624 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16625 &doing_tlp); 16626 if (retval != 0) { 16627 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16628 #ifdef TCP_ACCOUNTING 16629 sched_unpin(); 16630 #endif 16631 /* 16632 * If timers want tcp_drop(), then pass error out, 16633 * otherwise suppress it. 16634 */ 16635 return (retval < 0 ? retval : 0); 16636 } 16637 } 16638 if (rack->rc_in_persist) { 16639 if (tcp_in_hpts(rack->rc_inp) == 0) { 16640 /* Timer is not running */ 16641 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16642 } 16643 #ifdef TCP_ACCOUNTING 16644 sched_unpin(); 16645 #endif 16646 return (0); 16647 } 16648 if ((rack->r_timer_override) || 16649 (rack->rc_ack_can_sendout_data) || 16650 (delayed) || 16651 (tp->t_state < TCPS_ESTABLISHED)) { 16652 rack->rc_ack_can_sendout_data = 0; 16653 if (tcp_in_hpts(rack->rc_inp)) 16654 tcp_hpts_remove(rack->rc_inp); 16655 } else if (tcp_in_hpts(rack->rc_inp)) { 16656 /* 16657 * On the hpts you can't pass even if ACKNOW is on, we will 16658 * when the hpts fires. 16659 */ 16660 #ifdef TCP_ACCOUNTING 16661 crtsc = get_cyclecount(); 16662 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16663 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16664 } 16665 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16666 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16667 tp->tcp_cnt_counters[SND_BLOCKED]++; 16668 } 16669 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16670 sched_unpin(); 16671 #endif 16672 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16673 return (0); 16674 } 16675 rack->rc_inp->inp_hpts_calls = 0; 16676 /* Finish out both pacing early and late accounting */ 16677 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16678 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16679 early = rack->r_ctl.rc_last_output_to - cts; 16680 } else 16681 early = 0; 16682 if (delayed) { 16683 rack->r_ctl.rc_agg_delayed += delayed; 16684 rack->r_late = 1; 16685 } else if (early) { 16686 rack->r_ctl.rc_agg_early += early; 16687 rack->r_early = 1; 16688 } 16689 /* Now that early/late accounting is done turn off the flag */ 16690 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16691 rack->r_wanted_output = 0; 16692 rack->r_timer_override = 0; 16693 if ((tp->t_state != rack->r_state) && 16694 TCPS_HAVEESTABLISHED(tp->t_state)) { 16695 rack_set_state(tp, rack); 16696 } 16697 if ((rack->r_fast_output) && 16698 (doing_tlp == 0) && 16699 (tp->rcv_numsacks == 0)) { 16700 int ret; 16701 16702 error = 0; 16703 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16704 if (ret >= 0) 16705 return(ret); 16706 else if (error) { 16707 inp = rack->rc_inp; 16708 so = inp->inp_socket; 16709 sb = &so->so_snd; 16710 goto nomore; 16711 } 16712 } 16713 inp = rack->rc_inp; 16714 /* 16715 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16716 * only allow the initial SYN or SYN|ACK and those sent 16717 * by the retransmit timer. 16718 */ 16719 if (IS_FASTOPEN(tp->t_flags) && 16720 ((tp->t_state == TCPS_SYN_RECEIVED) || 16721 (tp->t_state == TCPS_SYN_SENT)) && 16722 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16723 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16724 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16725 so = inp->inp_socket; 16726 sb = &so->so_snd; 16727 goto just_return_nolock; 16728 } 16729 /* 16730 * Determine length of data that should be transmitted, and flags 16731 * that will be used. If there is some data or critical controls 16732 * (SYN, RST) to send, then transmit; otherwise, investigate 16733 * further. 16734 */ 16735 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16736 if (tp->t_idle_reduce) { 16737 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16738 rack_cc_after_idle(rack, tp); 16739 } 16740 tp->t_flags &= ~TF_LASTIDLE; 16741 if (idle) { 16742 if (tp->t_flags & TF_MORETOCOME) { 16743 tp->t_flags |= TF_LASTIDLE; 16744 idle = 0; 16745 } 16746 } 16747 if ((tp->snd_una == tp->snd_max) && 16748 rack->r_ctl.rc_went_idle_time && 16749 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16750 idle = cts - rack->r_ctl.rc_went_idle_time; 16751 if (idle > rack_min_probertt_hold) { 16752 /* Count as a probe rtt */ 16753 if (rack->in_probe_rtt == 0) { 16754 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16755 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16756 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16757 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16758 } else { 16759 rack_exit_probertt(rack, cts); 16760 } 16761 } 16762 idle = 0; 16763 } 16764 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16765 rack_init_fsb_block(tp, rack); 16766 again: 16767 /* 16768 * If we've recently taken a timeout, snd_max will be greater than 16769 * snd_nxt. There may be SACK information that allows us to avoid 16770 * resending already delivered data. Adjust snd_nxt accordingly. 16771 */ 16772 sendalot = 0; 16773 cts = tcp_get_usecs(&tv); 16774 ms_cts = tcp_tv_to_mssectick(&tv); 16775 tso = 0; 16776 mtu = 0; 16777 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16778 minseg = segsiz; 16779 if (rack->r_ctl.rc_pace_max_segs == 0) 16780 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16781 else 16782 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16783 sb_offset = tp->snd_max - tp->snd_una; 16784 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16785 flags = tcp_outflags[tp->t_state]; 16786 while (rack->rc_free_cnt < rack_free_cache) { 16787 rsm = rack_alloc(rack); 16788 if (rsm == NULL) { 16789 if (inp->inp_hpts_calls) 16790 /* Retry in a ms */ 16791 slot = (1 * HPTS_USEC_IN_MSEC); 16792 so = inp->inp_socket; 16793 sb = &so->so_snd; 16794 goto just_return_nolock; 16795 } 16796 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16797 rack->rc_free_cnt++; 16798 rsm = NULL; 16799 } 16800 if (inp->inp_hpts_calls) 16801 inp->inp_hpts_calls = 0; 16802 sack_rxmit = 0; 16803 len = 0; 16804 rsm = NULL; 16805 if (flags & TH_RST) { 16806 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16807 so = inp->inp_socket; 16808 sb = &so->so_snd; 16809 goto send; 16810 } 16811 if (rack->r_ctl.rc_resend) { 16812 /* Retransmit timer */ 16813 rsm = rack->r_ctl.rc_resend; 16814 rack->r_ctl.rc_resend = NULL; 16815 len = rsm->r_end - rsm->r_start; 16816 sack_rxmit = 1; 16817 sendalot = 0; 16818 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16819 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16820 __func__, __LINE__, 16821 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16822 sb_offset = rsm->r_start - tp->snd_una; 16823 if (len >= segsiz) 16824 len = segsiz; 16825 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16826 /* We have a retransmit that takes precedence */ 16827 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16828 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 16829 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16830 /* Enter recovery if not induced by a time-out */ 16831 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 16832 } 16833 #ifdef INVARIANTS 16834 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16835 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16836 tp, rack, rsm, rsm->r_start, tp->snd_una); 16837 } 16838 #endif 16839 len = rsm->r_end - rsm->r_start; 16840 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16841 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16842 __func__, __LINE__, 16843 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16844 sb_offset = rsm->r_start - tp->snd_una; 16845 sendalot = 0; 16846 if (len >= segsiz) 16847 len = segsiz; 16848 if (len > 0) { 16849 sack_rxmit = 1; 16850 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16851 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16852 min(len, segsiz)); 16853 } 16854 } else if (rack->r_ctl.rc_tlpsend) { 16855 /* Tail loss probe */ 16856 long cwin; 16857 long tlen; 16858 16859 /* 16860 * Check if we can do a TLP with a RACK'd packet 16861 * this can happen if we are not doing the rack 16862 * cheat and we skipped to a TLP and it 16863 * went off. 16864 */ 16865 rsm = rack->r_ctl.rc_tlpsend; 16866 /* We are doing a TLP make sure the flag is preent */ 16867 rsm->r_flags |= RACK_TLP; 16868 rack->r_ctl.rc_tlpsend = NULL; 16869 sack_rxmit = 1; 16870 tlen = rsm->r_end - rsm->r_start; 16871 if (tlen > segsiz) 16872 tlen = segsiz; 16873 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16874 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16875 __func__, __LINE__, 16876 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16877 sb_offset = rsm->r_start - tp->snd_una; 16878 cwin = min(tp->snd_wnd, tlen); 16879 len = cwin; 16880 } 16881 if (rack->r_must_retran && 16882 (doing_tlp == 0) && 16883 (rsm == NULL)) { 16884 /* 16885 * Non-Sack and we had a RTO or Sack/non-Sack and a 16886 * MTU change, we need to retransmit until we reach 16887 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 16888 */ 16889 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 16890 int sendwin, flight; 16891 16892 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 16893 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 16894 if (flight >= sendwin) { 16895 so = inp->inp_socket; 16896 sb = &so->so_snd; 16897 goto just_return_nolock; 16898 } 16899 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16900 if (rsm == NULL) { 16901 /* TSNH */ 16902 rack->r_must_retran = 0; 16903 rack->r_ctl.rc_out_at_rto = 0; 16904 so = inp->inp_socket; 16905 sb = &so->so_snd; 16906 goto just_return_nolock; 16907 } 16908 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 16909 /* It does not have the flag, we are done */ 16910 rack->r_must_retran = 0; 16911 rack->r_ctl.rc_out_at_rto = 0; 16912 } else { 16913 sack_rxmit = 1; 16914 len = rsm->r_end - rsm->r_start; 16915 sendalot = 0; 16916 sb_offset = rsm->r_start - tp->snd_una; 16917 if (len >= segsiz) 16918 len = segsiz; 16919 /* 16920 * Delay removing the flag RACK_MUST_RXT so 16921 * that the fastpath for retransmit will 16922 * work with this rsm. 16923 */ 16924 16925 } 16926 } else { 16927 /* We must be done if there is nothing outstanding */ 16928 rack->r_must_retran = 0; 16929 rack->r_ctl.rc_out_at_rto = 0; 16930 } 16931 } 16932 /* 16933 * Enforce a connection sendmap count limit if set 16934 * as long as we are not retransmiting. 16935 */ 16936 if ((rsm == NULL) && 16937 (rack->do_detection == 0) && 16938 (V_tcp_map_entries_limit > 0) && 16939 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 16940 counter_u64_add(rack_to_alloc_limited, 1); 16941 if (!rack->alloc_limit_reported) { 16942 rack->alloc_limit_reported = 1; 16943 counter_u64_add(rack_alloc_limited_conns, 1); 16944 } 16945 so = inp->inp_socket; 16946 sb = &so->so_snd; 16947 goto just_return_nolock; 16948 } 16949 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 16950 /* we are retransmitting the fin */ 16951 len--; 16952 if (len) { 16953 /* 16954 * When retransmitting data do *not* include the 16955 * FIN. This could happen from a TLP probe. 16956 */ 16957 flags &= ~TH_FIN; 16958 } 16959 } 16960 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 16961 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 16962 int ret; 16963 16964 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 16965 if (ret == 0) 16966 return (0); 16967 } 16968 so = inp->inp_socket; 16969 sb = &so->so_snd; 16970 if (do_a_prefetch == 0) { 16971 kern_prefetch(sb, &do_a_prefetch); 16972 do_a_prefetch = 1; 16973 } 16974 #ifdef NETFLIX_SHARED_CWND 16975 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 16976 rack->rack_enable_scwnd) { 16977 /* We are doing cwnd sharing */ 16978 if (rack->gp_ready && 16979 (rack->rack_attempted_scwnd == 0) && 16980 (rack->r_ctl.rc_scw == NULL) && 16981 tp->t_lib) { 16982 /* The pcbid is in, lets make an attempt */ 16983 counter_u64_add(rack_try_scwnd, 1); 16984 rack->rack_attempted_scwnd = 1; 16985 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 16986 &rack->r_ctl.rc_scw_index, 16987 segsiz); 16988 } 16989 if (rack->r_ctl.rc_scw && 16990 (rack->rack_scwnd_is_idle == 1) && 16991 sbavail(&so->so_snd)) { 16992 /* we are no longer out of data */ 16993 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 16994 rack->rack_scwnd_is_idle = 0; 16995 } 16996 if (rack->r_ctl.rc_scw) { 16997 /* First lets update and get the cwnd */ 16998 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 16999 rack->r_ctl.rc_scw_index, 17000 tp->snd_cwnd, tp->snd_wnd, segsiz); 17001 } 17002 } 17003 #endif 17004 /* 17005 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17006 * state flags. 17007 */ 17008 if (tp->t_flags & TF_NEEDFIN) 17009 flags |= TH_FIN; 17010 if (tp->t_flags & TF_NEEDSYN) 17011 flags |= TH_SYN; 17012 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17013 void *end_rsm; 17014 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17015 if (end_rsm) 17016 kern_prefetch(end_rsm, &prefetch_rsm); 17017 prefetch_rsm = 1; 17018 } 17019 SOCKBUF_LOCK(sb); 17020 /* 17021 * If snd_nxt == snd_max and we have transmitted a FIN, the 17022 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17023 * negative length. This can also occur when TCP opens up its 17024 * congestion window while receiving additional duplicate acks after 17025 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17026 * the fast-retransmit. 17027 * 17028 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17029 * set to snd_una, the sb_offset will be 0, and the length may wind 17030 * up 0. 17031 * 17032 * If sack_rxmit is true we are retransmitting from the scoreboard 17033 * in which case len is already set. 17034 */ 17035 if ((sack_rxmit == 0) && 17036 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17037 uint32_t avail; 17038 17039 avail = sbavail(sb); 17040 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17041 sb_offset = tp->snd_nxt - tp->snd_una; 17042 else 17043 sb_offset = 0; 17044 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17045 if (rack->r_ctl.rc_tlp_new_data) { 17046 /* TLP is forcing out new data */ 17047 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17048 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17049 } 17050 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17051 if (tp->snd_wnd > sb_offset) 17052 len = tp->snd_wnd - sb_offset; 17053 else 17054 len = 0; 17055 } else { 17056 len = rack->r_ctl.rc_tlp_new_data; 17057 } 17058 rack->r_ctl.rc_tlp_new_data = 0; 17059 } else { 17060 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17061 } 17062 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17063 /* 17064 * For prr=off, we need to send only 1 MSS 17065 * at a time. We do this because another sack could 17066 * be arriving that causes us to send retransmits and 17067 * we don't want to be on a long pace due to a larger send 17068 * that keeps us from sending out the retransmit. 17069 */ 17070 len = segsiz; 17071 } 17072 } else { 17073 uint32_t outstanding; 17074 /* 17075 * We are inside of a Fast recovery episode, this 17076 * is caused by a SACK or 3 dup acks. At this point 17077 * we have sent all the retransmissions and we rely 17078 * on PRR to dictate what we will send in the form of 17079 * new data. 17080 */ 17081 17082 outstanding = tp->snd_max - tp->snd_una; 17083 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17084 if (tp->snd_wnd > outstanding) { 17085 len = tp->snd_wnd - outstanding; 17086 /* Check to see if we have the data */ 17087 if ((sb_offset + len) > avail) { 17088 /* It does not all fit */ 17089 if (avail > sb_offset) 17090 len = avail - sb_offset; 17091 else 17092 len = 0; 17093 } 17094 } else { 17095 len = 0; 17096 } 17097 } else if (avail > sb_offset) { 17098 len = avail - sb_offset; 17099 } else { 17100 len = 0; 17101 } 17102 if (len > 0) { 17103 if (len > rack->r_ctl.rc_prr_sndcnt) { 17104 len = rack->r_ctl.rc_prr_sndcnt; 17105 } 17106 if (len > 0) { 17107 sub_from_prr = 1; 17108 } 17109 } 17110 if (len > segsiz) { 17111 /* 17112 * We should never send more than a MSS when 17113 * retransmitting or sending new data in prr 17114 * mode unless the override flag is on. Most 17115 * likely the PRR algorithm is not going to 17116 * let us send a lot as well :-) 17117 */ 17118 if (rack->r_ctl.rc_prr_sendalot == 0) { 17119 len = segsiz; 17120 } 17121 } else if (len < segsiz) { 17122 /* 17123 * Do we send any? The idea here is if the 17124 * send empty's the socket buffer we want to 17125 * do it. However if not then lets just wait 17126 * for our prr_sndcnt to get bigger. 17127 */ 17128 long leftinsb; 17129 17130 leftinsb = sbavail(sb) - sb_offset; 17131 if (leftinsb > len) { 17132 /* This send does not empty the sb */ 17133 len = 0; 17134 } 17135 } 17136 } 17137 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17138 /* 17139 * If you have not established 17140 * and are not doing FAST OPEN 17141 * no data please. 17142 */ 17143 if ((sack_rxmit == 0) && 17144 (!IS_FASTOPEN(tp->t_flags))){ 17145 len = 0; 17146 sb_offset = 0; 17147 } 17148 } 17149 if (prefetch_so_done == 0) { 17150 kern_prefetch(so, &prefetch_so_done); 17151 prefetch_so_done = 1; 17152 } 17153 /* 17154 * Lop off SYN bit if it has already been sent. However, if this is 17155 * SYN-SENT state and if segment contains data and if we don't know 17156 * that foreign host supports TAO, suppress sending segment. 17157 */ 17158 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17159 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17160 /* 17161 * When sending additional segments following a TFO SYN|ACK, 17162 * do not include the SYN bit. 17163 */ 17164 if (IS_FASTOPEN(tp->t_flags) && 17165 (tp->t_state == TCPS_SYN_RECEIVED)) 17166 flags &= ~TH_SYN; 17167 } 17168 /* 17169 * Be careful not to send data and/or FIN on SYN segments. This 17170 * measure is needed to prevent interoperability problems with not 17171 * fully conformant TCP implementations. 17172 */ 17173 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17174 len = 0; 17175 flags &= ~TH_FIN; 17176 } 17177 /* 17178 * On TFO sockets, ensure no data is sent in the following cases: 17179 * 17180 * - When retransmitting SYN|ACK on a passively-created socket 17181 * 17182 * - When retransmitting SYN on an actively created socket 17183 * 17184 * - When sending a zero-length cookie (cookie request) on an 17185 * actively created socket 17186 * 17187 * - When the socket is in the CLOSED state (RST is being sent) 17188 */ 17189 if (IS_FASTOPEN(tp->t_flags) && 17190 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17191 ((tp->t_state == TCPS_SYN_SENT) && 17192 (tp->t_tfo_client_cookie_len == 0)) || 17193 (flags & TH_RST))) { 17194 sack_rxmit = 0; 17195 len = 0; 17196 } 17197 /* Without fast-open there should never be data sent on a SYN */ 17198 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17199 tp->snd_nxt = tp->iss; 17200 len = 0; 17201 } 17202 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17203 /* We only send 1 MSS if we have a DSACK block */ 17204 add_flag |= RACK_SENT_W_DSACK; 17205 len = segsiz; 17206 } 17207 orig_len = len; 17208 if (len <= 0) { 17209 /* 17210 * If FIN has been sent but not acked, but we haven't been 17211 * called to retransmit, len will be < 0. Otherwise, window 17212 * shrank after we sent into it. If window shrank to 0, 17213 * cancel pending retransmit, pull snd_nxt back to (closed) 17214 * window, and set the persist timer if it isn't already 17215 * going. If the window didn't close completely, just wait 17216 * for an ACK. 17217 * 17218 * We also do a general check here to ensure that we will 17219 * set the persist timer when we have data to send, but a 17220 * 0-byte window. This makes sure the persist timer is set 17221 * even if the packet hits one of the "goto send" lines 17222 * below. 17223 */ 17224 len = 0; 17225 if ((tp->snd_wnd == 0) && 17226 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17227 (tp->snd_una == tp->snd_max) && 17228 (sb_offset < (int)sbavail(sb))) { 17229 rack_enter_persist(tp, rack, cts); 17230 } 17231 } else if ((rsm == NULL) && 17232 (doing_tlp == 0) && 17233 (len < pace_max_seg)) { 17234 /* 17235 * We are not sending a maximum sized segment for 17236 * some reason. Should we not send anything (think 17237 * sws or persists)? 17238 */ 17239 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17240 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17241 (len < minseg) && 17242 (len < (int)(sbavail(sb) - sb_offset))) { 17243 /* 17244 * Here the rwnd is less than 17245 * the minimum pacing size, this is not a retransmit, 17246 * we are established and 17247 * the send is not the last in the socket buffer 17248 * we send nothing, and we may enter persists 17249 * if nothing is outstanding. 17250 */ 17251 len = 0; 17252 if (tp->snd_max == tp->snd_una) { 17253 /* 17254 * Nothing out we can 17255 * go into persists. 17256 */ 17257 rack_enter_persist(tp, rack, cts); 17258 } 17259 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17260 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17261 (len < (int)(sbavail(sb) - sb_offset)) && 17262 (len < minseg)) { 17263 /* 17264 * Here we are not retransmitting, and 17265 * the cwnd is not so small that we could 17266 * not send at least a min size (rxt timer 17267 * not having gone off), We have 2 segments or 17268 * more already in flight, its not the tail end 17269 * of the socket buffer and the cwnd is blocking 17270 * us from sending out a minimum pacing segment size. 17271 * Lets not send anything. 17272 */ 17273 len = 0; 17274 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17275 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17276 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17277 (len < (int)(sbavail(sb) - sb_offset)) && 17278 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17279 /* 17280 * Here we have a send window but we have 17281 * filled it up and we can't send another pacing segment. 17282 * We also have in flight more than 2 segments 17283 * and we are not completing the sb i.e. we allow 17284 * the last bytes of the sb to go out even if 17285 * its not a full pacing segment. 17286 */ 17287 len = 0; 17288 } else if ((rack->r_ctl.crte != NULL) && 17289 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17290 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17291 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17292 (len < (int)(sbavail(sb) - sb_offset))) { 17293 /* 17294 * Here we are doing hardware pacing, this is not a TLP, 17295 * we are not sending a pace max segment size, there is rwnd 17296 * room to send at least N pace_max_seg, the cwnd is greater 17297 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17298 * more segments in flight and its not the tail of the socket buffer. 17299 * 17300 * We don't want to send instead we need to get more ack's in to 17301 * allow us to send a full pacing segment. Normally, if we are pacing 17302 * about the right speed, we should have finished our pacing 17303 * send as most of the acks have come back if we are at the 17304 * right rate. This is a bit fuzzy since return path delay 17305 * can delay the acks, which is why we want to make sure we 17306 * have cwnd space to have a bit more than a max pace segments in flight. 17307 * 17308 * If we have not gotten our acks back we are pacing at too high a 17309 * rate delaying will not hurt and will bring our GP estimate down by 17310 * injecting the delay. If we don't do this we will send 17311 * 2 MSS out in response to the acks being clocked in which 17312 * defeats the point of hw-pacing (i.e. to help us get 17313 * larger TSO's out). 17314 */ 17315 len = 0; 17316 17317 } 17318 17319 } 17320 /* len will be >= 0 after this point. */ 17321 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17322 rack_sndbuf_autoscale(rack); 17323 /* 17324 * Decide if we can use TCP Segmentation Offloading (if supported by 17325 * hardware). 17326 * 17327 * TSO may only be used if we are in a pure bulk sending state. The 17328 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17329 * options prevent using TSO. With TSO the TCP header is the same 17330 * (except for the sequence number) for all generated packets. This 17331 * makes it impossible to transmit any options which vary per 17332 * generated segment or packet. 17333 * 17334 * IPv4 handling has a clear separation of ip options and ip header 17335 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17336 * the right thing below to provide length of just ip options and thus 17337 * checking for ipoptlen is enough to decide if ip options are present. 17338 */ 17339 ipoptlen = 0; 17340 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17341 /* 17342 * Pre-calculate here as we save another lookup into the darknesses 17343 * of IPsec that way and can actually decide if TSO is ok. 17344 */ 17345 #ifdef INET6 17346 if (isipv6 && IPSEC_ENABLED(ipv6)) 17347 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17348 #ifdef INET 17349 else 17350 #endif 17351 #endif /* INET6 */ 17352 #ifdef INET 17353 if (IPSEC_ENABLED(ipv4)) 17354 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17355 #endif /* INET */ 17356 #endif 17357 17358 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17359 ipoptlen += ipsec_optlen; 17360 #endif 17361 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17362 (tp->t_port == 0) && 17363 ((tp->t_flags & TF_SIGNATURE) == 0) && 17364 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17365 ipoptlen == 0) 17366 tso = 1; 17367 { 17368 uint32_t outstanding; 17369 17370 outstanding = tp->snd_max - tp->snd_una; 17371 if (tp->t_flags & TF_SENTFIN) { 17372 /* 17373 * If we sent a fin, snd_max is 1 higher than 17374 * snd_una 17375 */ 17376 outstanding--; 17377 } 17378 if (sack_rxmit) { 17379 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17380 flags &= ~TH_FIN; 17381 } else { 17382 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17383 sbused(sb))) 17384 flags &= ~TH_FIN; 17385 } 17386 } 17387 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17388 (long)TCP_MAXWIN << tp->rcv_scale); 17389 17390 /* 17391 * Sender silly window avoidance. We transmit under the following 17392 * conditions when len is non-zero: 17393 * 17394 * - We have a full segment (or more with TSO) - This is the last 17395 * buffer in a write()/send() and we are either idle or running 17396 * NODELAY - we've timed out (e.g. persist timer) - we have more 17397 * then 1/2 the maximum send window's worth of data (receiver may be 17398 * limited the window size) - we need to retransmit 17399 */ 17400 if (len) { 17401 if (len >= segsiz) { 17402 goto send; 17403 } 17404 /* 17405 * NOTE! on localhost connections an 'ack' from the remote 17406 * end may occur synchronously with the output and cause us 17407 * to flush a buffer queued with moretocome. XXX 17408 * 17409 */ 17410 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17411 (idle || (tp->t_flags & TF_NODELAY)) && 17412 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17413 (tp->t_flags & TF_NOPUSH) == 0) { 17414 pass = 2; 17415 goto send; 17416 } 17417 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17418 pass = 22; 17419 goto send; 17420 } 17421 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17422 pass = 4; 17423 goto send; 17424 } 17425 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17426 pass = 5; 17427 goto send; 17428 } 17429 if (sack_rxmit) { 17430 pass = 6; 17431 goto send; 17432 } 17433 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17434 (ctf_outstanding(tp) < (segsiz * 2))) { 17435 /* 17436 * We have less than two MSS outstanding (delayed ack) 17437 * and our rwnd will not let us send a full sized 17438 * MSS. Lets go ahead and let this small segment 17439 * out because we want to try to have at least two 17440 * packets inflight to not be caught by delayed ack. 17441 */ 17442 pass = 12; 17443 goto send; 17444 } 17445 } 17446 /* 17447 * Sending of standalone window updates. 17448 * 17449 * Window updates are important when we close our window due to a 17450 * full socket buffer and are opening it again after the application 17451 * reads data from it. Once the window has opened again and the 17452 * remote end starts to send again the ACK clock takes over and 17453 * provides the most current window information. 17454 * 17455 * We must avoid the silly window syndrome whereas every read from 17456 * the receive buffer, no matter how small, causes a window update 17457 * to be sent. We also should avoid sending a flurry of window 17458 * updates when the socket buffer had queued a lot of data and the 17459 * application is doing small reads. 17460 * 17461 * Prevent a flurry of pointless window updates by only sending an 17462 * update when we can increase the advertized window by more than 17463 * 1/4th of the socket buffer capacity. When the buffer is getting 17464 * full or is very small be more aggressive and send an update 17465 * whenever we can increase by two mss sized segments. In all other 17466 * situations the ACK's to new incoming data will carry further 17467 * window increases. 17468 * 17469 * Don't send an independent window update if a delayed ACK is 17470 * pending (it will get piggy-backed on it) or the remote side 17471 * already has done a half-close and won't send more data. Skip 17472 * this if the connection is in T/TCP half-open state. 17473 */ 17474 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17475 !(tp->t_flags & TF_DELACK) && 17476 !TCPS_HAVERCVDFIN(tp->t_state)) { 17477 /* 17478 * "adv" is the amount we could increase the window, taking 17479 * into account that we are limited by TCP_MAXWIN << 17480 * tp->rcv_scale. 17481 */ 17482 int32_t adv; 17483 int oldwin; 17484 17485 adv = recwin; 17486 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17487 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17488 if (adv > oldwin) 17489 adv -= oldwin; 17490 else { 17491 /* We can't increase the window */ 17492 adv = 0; 17493 } 17494 } else 17495 oldwin = 0; 17496 17497 /* 17498 * If the new window size ends up being the same as or less 17499 * than the old size when it is scaled, then don't force 17500 * a window update. 17501 */ 17502 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17503 goto dontupdate; 17504 17505 if (adv >= (int32_t)(2 * segsiz) && 17506 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17507 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17508 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17509 pass = 7; 17510 goto send; 17511 } 17512 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17513 pass = 23; 17514 goto send; 17515 } 17516 } 17517 dontupdate: 17518 17519 /* 17520 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17521 * is also a catch-all for the retransmit timer timeout case. 17522 */ 17523 if (tp->t_flags & TF_ACKNOW) { 17524 pass = 8; 17525 goto send; 17526 } 17527 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17528 pass = 9; 17529 goto send; 17530 } 17531 /* 17532 * If our state indicates that FIN should be sent and we have not 17533 * yet done so, then we need to send. 17534 */ 17535 if ((flags & TH_FIN) && 17536 (tp->snd_nxt == tp->snd_una)) { 17537 pass = 11; 17538 goto send; 17539 } 17540 /* 17541 * No reason to send a segment, just return. 17542 */ 17543 just_return: 17544 SOCKBUF_UNLOCK(sb); 17545 just_return_nolock: 17546 { 17547 int app_limited = CTF_JR_SENT_DATA; 17548 17549 if (tot_len_this_send > 0) { 17550 /* Make sure snd_nxt is up to max */ 17551 rack->r_ctl.fsb.recwin = recwin; 17552 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17553 if ((error == 0) && 17554 rack_use_rfo && 17555 ((flags & (TH_SYN|TH_FIN)) == 0) && 17556 (ipoptlen == 0) && 17557 (tp->snd_nxt == tp->snd_max) && 17558 (tp->rcv_numsacks == 0) && 17559 rack->r_fsb_inited && 17560 TCPS_HAVEESTABLISHED(tp->t_state) && 17561 (rack->r_must_retran == 0) && 17562 ((tp->t_flags & TF_NEEDFIN) == 0) && 17563 (len > 0) && (orig_len > 0) && 17564 (orig_len > len) && 17565 ((orig_len - len) >= segsiz) && 17566 ((optlen == 0) || 17567 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17568 /* We can send at least one more MSS using our fsb */ 17569 17570 rack->r_fast_output = 1; 17571 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17572 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17573 rack->r_ctl.fsb.tcp_flags = flags; 17574 rack->r_ctl.fsb.left_to_send = orig_len - len; 17575 if (hw_tls) 17576 rack->r_ctl.fsb.hw_tls = 1; 17577 else 17578 rack->r_ctl.fsb.hw_tls = 0; 17579 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17580 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17581 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17582 (tp->snd_max - tp->snd_una))); 17583 if (rack->r_ctl.fsb.left_to_send < segsiz) 17584 rack->r_fast_output = 0; 17585 else { 17586 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17587 rack->r_ctl.fsb.rfo_apply_push = 1; 17588 else 17589 rack->r_ctl.fsb.rfo_apply_push = 0; 17590 } 17591 } else 17592 rack->r_fast_output = 0; 17593 17594 17595 rack_log_fsb(rack, tp, so, flags, 17596 ipoptlen, orig_len, len, 0, 17597 1, optlen, __LINE__, 1); 17598 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17599 tp->snd_nxt = tp->snd_max; 17600 } else { 17601 int end_window = 0; 17602 uint32_t seq = tp->gput_ack; 17603 17604 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17605 if (rsm) { 17606 /* 17607 * Mark the last sent that we just-returned (hinting 17608 * that delayed ack may play a role in any rtt measurement). 17609 */ 17610 rsm->r_just_ret = 1; 17611 } 17612 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17613 rack->r_ctl.rc_agg_delayed = 0; 17614 rack->r_early = 0; 17615 rack->r_late = 0; 17616 rack->r_ctl.rc_agg_early = 0; 17617 if ((ctf_outstanding(tp) + 17618 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17619 minseg)) >= tp->snd_wnd) { 17620 /* We are limited by the rwnd */ 17621 app_limited = CTF_JR_RWND_LIMITED; 17622 if (IN_FASTRECOVERY(tp->t_flags)) 17623 rack->r_ctl.rc_prr_sndcnt = 0; 17624 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17625 /* We are limited by whats available -- app limited */ 17626 app_limited = CTF_JR_APP_LIMITED; 17627 if (IN_FASTRECOVERY(tp->t_flags)) 17628 rack->r_ctl.rc_prr_sndcnt = 0; 17629 } else if ((idle == 0) && 17630 ((tp->t_flags & TF_NODELAY) == 0) && 17631 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17632 (len < segsiz)) { 17633 /* 17634 * No delay is not on and the 17635 * user is sending less than 1MSS. This 17636 * brings out SWS avoidance so we 17637 * don't send. Another app-limited case. 17638 */ 17639 app_limited = CTF_JR_APP_LIMITED; 17640 } else if (tp->t_flags & TF_NOPUSH) { 17641 /* 17642 * The user has requested no push of 17643 * the last segment and we are 17644 * at the last segment. Another app 17645 * limited case. 17646 */ 17647 app_limited = CTF_JR_APP_LIMITED; 17648 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17649 /* Its the cwnd */ 17650 app_limited = CTF_JR_CWND_LIMITED; 17651 } else if (IN_FASTRECOVERY(tp->t_flags) && 17652 (rack->rack_no_prr == 0) && 17653 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17654 app_limited = CTF_JR_PRR; 17655 } else { 17656 /* Now why here are we not sending? */ 17657 #ifdef NOW 17658 #ifdef INVARIANTS 17659 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17660 #endif 17661 #endif 17662 app_limited = CTF_JR_ASSESSING; 17663 } 17664 /* 17665 * App limited in some fashion, for our pacing GP 17666 * measurements we don't want any gap (even cwnd). 17667 * Close down the measurement window. 17668 */ 17669 if (rack_cwnd_block_ends_measure && 17670 ((app_limited == CTF_JR_CWND_LIMITED) || 17671 (app_limited == CTF_JR_PRR))) { 17672 /* 17673 * The reason we are not sending is 17674 * the cwnd (or prr). We have been configured 17675 * to end the measurement window in 17676 * this case. 17677 */ 17678 end_window = 1; 17679 } else if (rack_rwnd_block_ends_measure && 17680 (app_limited == CTF_JR_RWND_LIMITED)) { 17681 /* 17682 * We are rwnd limited and have been 17683 * configured to end the measurement 17684 * window in this case. 17685 */ 17686 end_window = 1; 17687 } else if (app_limited == CTF_JR_APP_LIMITED) { 17688 /* 17689 * A true application limited period, we have 17690 * ran out of data. 17691 */ 17692 end_window = 1; 17693 } else if (app_limited == CTF_JR_ASSESSING) { 17694 /* 17695 * In the assessing case we hit the end of 17696 * the if/else and had no known reason 17697 * This will panic us under invariants.. 17698 * 17699 * If we get this out in logs we need to 17700 * investagate which reason we missed. 17701 */ 17702 end_window = 1; 17703 } 17704 if (end_window) { 17705 uint8_t log = 0; 17706 17707 /* Adjust the Gput measurement */ 17708 if ((tp->t_flags & TF_GPUTINPROG) && 17709 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17710 tp->gput_ack = tp->snd_max; 17711 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17712 /* 17713 * There is not enough to measure. 17714 */ 17715 tp->t_flags &= ~TF_GPUTINPROG; 17716 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17717 rack->r_ctl.rc_gp_srtt /*flex1*/, 17718 tp->gput_seq, 17719 0, 0, 18, __LINE__, NULL, 0); 17720 } else 17721 log = 1; 17722 } 17723 /* Mark the last packet has app limited */ 17724 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17725 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17726 if (rack->r_ctl.rc_app_limited_cnt == 0) 17727 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17728 else { 17729 /* 17730 * Go out to the end app limited and mark 17731 * this new one as next and move the end_appl up 17732 * to this guy. 17733 */ 17734 if (rack->r_ctl.rc_end_appl) 17735 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17736 rack->r_ctl.rc_end_appl = rsm; 17737 } 17738 rsm->r_flags |= RACK_APP_LIMITED; 17739 rack->r_ctl.rc_app_limited_cnt++; 17740 } 17741 if (log) 17742 rack_log_pacing_delay_calc(rack, 17743 rack->r_ctl.rc_app_limited_cnt, seq, 17744 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17745 } 17746 } 17747 /* Check if we need to go into persists or not */ 17748 if ((tp->snd_max == tp->snd_una) && 17749 TCPS_HAVEESTABLISHED(tp->t_state) && 17750 sbavail(sb) && 17751 (sbavail(sb) > tp->snd_wnd) && 17752 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17753 /* Yes lets make sure to move to persist before timer-start */ 17754 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17755 } 17756 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17757 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17758 } 17759 #ifdef NETFLIX_SHARED_CWND 17760 if ((sbavail(sb) == 0) && 17761 rack->r_ctl.rc_scw) { 17762 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17763 rack->rack_scwnd_is_idle = 1; 17764 } 17765 #endif 17766 #ifdef TCP_ACCOUNTING 17767 if (tot_len_this_send > 0) { 17768 crtsc = get_cyclecount(); 17769 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17770 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17771 } 17772 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17773 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17774 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17775 } 17776 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17777 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17778 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17779 } 17780 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17781 } else { 17782 crtsc = get_cyclecount(); 17783 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17784 tp->tcp_cnt_counters[SND_LIMITED]++; 17785 } 17786 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17787 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17788 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17789 } 17790 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17791 } 17792 sched_unpin(); 17793 #endif 17794 return (0); 17795 17796 send: 17797 if (rsm || sack_rxmit) 17798 counter_u64_add(rack_nfto_resend, 1); 17799 else 17800 counter_u64_add(rack_non_fto_send, 1); 17801 if ((flags & TH_FIN) && 17802 sbavail(sb)) { 17803 /* 17804 * We do not transmit a FIN 17805 * with data outstanding. We 17806 * need to make it so all data 17807 * is acked first. 17808 */ 17809 flags &= ~TH_FIN; 17810 } 17811 /* Enforce stack imposed max seg size if we have one */ 17812 if (rack->r_ctl.rc_pace_max_segs && 17813 (len > rack->r_ctl.rc_pace_max_segs)) { 17814 mark = 1; 17815 len = rack->r_ctl.rc_pace_max_segs; 17816 } 17817 SOCKBUF_LOCK_ASSERT(sb); 17818 if (len > 0) { 17819 if (len >= segsiz) 17820 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17821 else 17822 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17823 } 17824 /* 17825 * Before ESTABLISHED, force sending of initial options unless TCP 17826 * set not to do any options. NOTE: we assume that the IP/TCP header 17827 * plus TCP options always fit in a single mbuf, leaving room for a 17828 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17829 * + optlen <= MCLBYTES 17830 */ 17831 optlen = 0; 17832 #ifdef INET6 17833 if (isipv6) 17834 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17835 else 17836 #endif 17837 hdrlen = sizeof(struct tcpiphdr); 17838 17839 /* 17840 * Compute options for segment. We only have to care about SYN and 17841 * established connection segments. Options for SYN-ACK segments 17842 * are handled in TCP syncache. 17843 */ 17844 to.to_flags = 0; 17845 if ((tp->t_flags & TF_NOOPT) == 0) { 17846 /* Maximum segment size. */ 17847 if (flags & TH_SYN) { 17848 tp->snd_nxt = tp->iss; 17849 to.to_mss = tcp_mssopt(&inp->inp_inc); 17850 if (tp->t_port) 17851 to.to_mss -= V_tcp_udp_tunneling_overhead; 17852 to.to_flags |= TOF_MSS; 17853 17854 /* 17855 * On SYN or SYN|ACK transmits on TFO connections, 17856 * only include the TFO option if it is not a 17857 * retransmit, as the presence of the TFO option may 17858 * have caused the original SYN or SYN|ACK to have 17859 * been dropped by a middlebox. 17860 */ 17861 if (IS_FASTOPEN(tp->t_flags) && 17862 (tp->t_rxtshift == 0)) { 17863 if (tp->t_state == TCPS_SYN_RECEIVED) { 17864 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17865 to.to_tfo_cookie = 17866 (u_int8_t *)&tp->t_tfo_cookie.server; 17867 to.to_flags |= TOF_FASTOPEN; 17868 wanted_cookie = 1; 17869 } else if (tp->t_state == TCPS_SYN_SENT) { 17870 to.to_tfo_len = 17871 tp->t_tfo_client_cookie_len; 17872 to.to_tfo_cookie = 17873 tp->t_tfo_cookie.client; 17874 to.to_flags |= TOF_FASTOPEN; 17875 wanted_cookie = 1; 17876 /* 17877 * If we wind up having more data to 17878 * send with the SYN than can fit in 17879 * one segment, don't send any more 17880 * until the SYN|ACK comes back from 17881 * the other end. 17882 */ 17883 sendalot = 0; 17884 } 17885 } 17886 } 17887 /* Window scaling. */ 17888 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 17889 to.to_wscale = tp->request_r_scale; 17890 to.to_flags |= TOF_SCALE; 17891 } 17892 /* Timestamps. */ 17893 if ((tp->t_flags & TF_RCVD_TSTMP) || 17894 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 17895 to.to_tsval = ms_cts + tp->ts_offset; 17896 to.to_tsecr = tp->ts_recent; 17897 to.to_flags |= TOF_TS; 17898 } 17899 /* Set receive buffer autosizing timestamp. */ 17900 if (tp->rfbuf_ts == 0 && 17901 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 17902 tp->rfbuf_ts = tcp_ts_getticks(); 17903 /* Selective ACK's. */ 17904 if (tp->t_flags & TF_SACK_PERMIT) { 17905 if (flags & TH_SYN) 17906 to.to_flags |= TOF_SACKPERM; 17907 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 17908 tp->rcv_numsacks > 0) { 17909 to.to_flags |= TOF_SACK; 17910 to.to_nsacks = tp->rcv_numsacks; 17911 to.to_sacks = (u_char *)tp->sackblks; 17912 } 17913 } 17914 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 17915 /* TCP-MD5 (RFC2385). */ 17916 if (tp->t_flags & TF_SIGNATURE) 17917 to.to_flags |= TOF_SIGNATURE; 17918 #endif /* TCP_SIGNATURE */ 17919 17920 /* Processing the options. */ 17921 hdrlen += optlen = tcp_addoptions(&to, opt); 17922 /* 17923 * If we wanted a TFO option to be added, but it was unable 17924 * to fit, ensure no data is sent. 17925 */ 17926 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 17927 !(to.to_flags & TOF_FASTOPEN)) 17928 len = 0; 17929 } 17930 if (tp->t_port) { 17931 if (V_tcp_udp_tunneling_port == 0) { 17932 /* The port was removed?? */ 17933 SOCKBUF_UNLOCK(&so->so_snd); 17934 #ifdef TCP_ACCOUNTING 17935 crtsc = get_cyclecount(); 17936 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17937 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 17938 } 17939 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 17940 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17941 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 17942 } 17943 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 17944 sched_unpin(); 17945 #endif 17946 return (EHOSTUNREACH); 17947 } 17948 hdrlen += sizeof(struct udphdr); 17949 } 17950 #ifdef INET6 17951 if (isipv6) 17952 ipoptlen = ip6_optlen(tp->t_inpcb); 17953 else 17954 #endif 17955 if (tp->t_inpcb->inp_options) 17956 ipoptlen = tp->t_inpcb->inp_options->m_len - 17957 offsetof(struct ipoption, ipopt_list); 17958 else 17959 ipoptlen = 0; 17960 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17961 ipoptlen += ipsec_optlen; 17962 #endif 17963 17964 /* 17965 * Adjust data length if insertion of options will bump the packet 17966 * length beyond the t_maxseg length. Clear the FIN bit because we 17967 * cut off the tail of the segment. 17968 */ 17969 if (len + optlen + ipoptlen > tp->t_maxseg) { 17970 if (tso) { 17971 uint32_t if_hw_tsomax; 17972 uint32_t moff; 17973 int32_t max_len; 17974 17975 /* extract TSO information */ 17976 if_hw_tsomax = tp->t_tsomax; 17977 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 17978 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 17979 KASSERT(ipoptlen == 0, 17980 ("%s: TSO can't do IP options", __func__)); 17981 17982 /* 17983 * Check if we should limit by maximum payload 17984 * length: 17985 */ 17986 if (if_hw_tsomax != 0) { 17987 /* compute maximum TSO length */ 17988 max_len = (if_hw_tsomax - hdrlen - 17989 max_linkhdr); 17990 if (max_len <= 0) { 17991 len = 0; 17992 } else if (len > max_len) { 17993 sendalot = 1; 17994 len = max_len; 17995 mark = 2; 17996 } 17997 } 17998 /* 17999 * Prevent the last segment from being fractional 18000 * unless the send sockbuf can be emptied: 18001 */ 18002 max_len = (tp->t_maxseg - optlen); 18003 if ((sb_offset + len) < sbavail(sb)) { 18004 moff = len % (u_int)max_len; 18005 if (moff != 0) { 18006 mark = 3; 18007 len -= moff; 18008 } 18009 } 18010 /* 18011 * In case there are too many small fragments don't 18012 * use TSO: 18013 */ 18014 if (len <= segsiz) { 18015 mark = 4; 18016 tso = 0; 18017 } 18018 /* 18019 * Send the FIN in a separate segment after the bulk 18020 * sending is done. We don't trust the TSO 18021 * implementations to clear the FIN flag on all but 18022 * the last segment. 18023 */ 18024 if (tp->t_flags & TF_NEEDFIN) { 18025 sendalot = 4; 18026 } 18027 } else { 18028 mark = 5; 18029 if (optlen + ipoptlen >= tp->t_maxseg) { 18030 /* 18031 * Since we don't have enough space to put 18032 * the IP header chain and the TCP header in 18033 * one packet as required by RFC 7112, don't 18034 * send it. Also ensure that at least one 18035 * byte of the payload can be put into the 18036 * TCP segment. 18037 */ 18038 SOCKBUF_UNLOCK(&so->so_snd); 18039 error = EMSGSIZE; 18040 sack_rxmit = 0; 18041 goto out; 18042 } 18043 len = tp->t_maxseg - optlen - ipoptlen; 18044 sendalot = 5; 18045 } 18046 } else { 18047 tso = 0; 18048 mark = 6; 18049 } 18050 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18051 ("%s: len > IP_MAXPACKET", __func__)); 18052 #ifdef DIAGNOSTIC 18053 #ifdef INET6 18054 if (max_linkhdr + hdrlen > MCLBYTES) 18055 #else 18056 if (max_linkhdr + hdrlen > MHLEN) 18057 #endif 18058 panic("tcphdr too big"); 18059 #endif 18060 18061 /* 18062 * This KASSERT is here to catch edge cases at a well defined place. 18063 * Before, those had triggered (random) panic conditions further 18064 * down. 18065 */ 18066 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18067 if ((len == 0) && 18068 (flags & TH_FIN) && 18069 (sbused(sb))) { 18070 /* 18071 * We have outstanding data, don't send a fin by itself!. 18072 */ 18073 goto just_return; 18074 } 18075 /* 18076 * Grab a header mbuf, attaching a copy of data to be transmitted, 18077 * and initialize the header from the template for sends on this 18078 * connection. 18079 */ 18080 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18081 if (len) { 18082 uint32_t max_val; 18083 uint32_t moff; 18084 18085 if (rack->r_ctl.rc_pace_max_segs) 18086 max_val = rack->r_ctl.rc_pace_max_segs; 18087 else if (rack->rc_user_set_max_segs) 18088 max_val = rack->rc_user_set_max_segs * segsiz; 18089 else 18090 max_val = len; 18091 /* 18092 * We allow a limit on sending with hptsi. 18093 */ 18094 if (len > max_val) { 18095 mark = 7; 18096 len = max_val; 18097 } 18098 #ifdef INET6 18099 if (MHLEN < hdrlen + max_linkhdr) 18100 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18101 else 18102 #endif 18103 m = m_gethdr(M_NOWAIT, MT_DATA); 18104 18105 if (m == NULL) { 18106 SOCKBUF_UNLOCK(sb); 18107 error = ENOBUFS; 18108 sack_rxmit = 0; 18109 goto out; 18110 } 18111 m->m_data += max_linkhdr; 18112 m->m_len = hdrlen; 18113 18114 /* 18115 * Start the m_copy functions from the closest mbuf to the 18116 * sb_offset in the socket buffer chain. 18117 */ 18118 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18119 s_mb = mb; 18120 s_moff = moff; 18121 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18122 m_copydata(mb, moff, (int)len, 18123 mtod(m, caddr_t)+hdrlen); 18124 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18125 sbsndptr_adv(sb, mb, len); 18126 m->m_len += len; 18127 } else { 18128 struct sockbuf *msb; 18129 18130 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18131 msb = NULL; 18132 else 18133 msb = sb; 18134 m->m_next = tcp_m_copym( 18135 mb, moff, &len, 18136 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18137 ((rsm == NULL) ? hw_tls : 0) 18138 #ifdef NETFLIX_COPY_ARGS 18139 , &filled_all 18140 #endif 18141 ); 18142 if (len <= (tp->t_maxseg - optlen)) { 18143 /* 18144 * Must have ran out of mbufs for the copy 18145 * shorten it to no longer need tso. Lets 18146 * not put on sendalot since we are low on 18147 * mbufs. 18148 */ 18149 tso = 0; 18150 } 18151 if (m->m_next == NULL) { 18152 SOCKBUF_UNLOCK(sb); 18153 (void)m_free(m); 18154 error = ENOBUFS; 18155 sack_rxmit = 0; 18156 goto out; 18157 } 18158 } 18159 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18160 if (rsm && (rsm->r_flags & RACK_TLP)) { 18161 /* 18162 * TLP should not count in retran count, but 18163 * in its own bin 18164 */ 18165 counter_u64_add(rack_tlp_retran, 1); 18166 counter_u64_add(rack_tlp_retran_bytes, len); 18167 } else { 18168 tp->t_sndrexmitpack++; 18169 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18170 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18171 } 18172 #ifdef STATS 18173 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18174 len); 18175 #endif 18176 } else { 18177 KMOD_TCPSTAT_INC(tcps_sndpack); 18178 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18179 #ifdef STATS 18180 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18181 len); 18182 #endif 18183 } 18184 /* 18185 * If we're sending everything we've got, set PUSH. (This 18186 * will keep happy those implementations which only give 18187 * data to the user when a buffer fills or a PUSH comes in.) 18188 */ 18189 if (sb_offset + len == sbused(sb) && 18190 sbused(sb) && 18191 !(flags & TH_SYN)) { 18192 flags |= TH_PUSH; 18193 add_flag |= RACK_HAD_PUSH; 18194 } 18195 18196 SOCKBUF_UNLOCK(sb); 18197 } else { 18198 SOCKBUF_UNLOCK(sb); 18199 if (tp->t_flags & TF_ACKNOW) 18200 KMOD_TCPSTAT_INC(tcps_sndacks); 18201 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18202 KMOD_TCPSTAT_INC(tcps_sndctrl); 18203 else 18204 KMOD_TCPSTAT_INC(tcps_sndwinup); 18205 18206 m = m_gethdr(M_NOWAIT, MT_DATA); 18207 if (m == NULL) { 18208 error = ENOBUFS; 18209 sack_rxmit = 0; 18210 goto out; 18211 } 18212 #ifdef INET6 18213 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18214 MHLEN >= hdrlen) { 18215 M_ALIGN(m, hdrlen); 18216 } else 18217 #endif 18218 m->m_data += max_linkhdr; 18219 m->m_len = hdrlen; 18220 } 18221 SOCKBUF_UNLOCK_ASSERT(sb); 18222 m->m_pkthdr.rcvif = (struct ifnet *)0; 18223 #ifdef MAC 18224 mac_inpcb_create_mbuf(inp, m); 18225 #endif 18226 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18227 #ifdef INET6 18228 if (isipv6) 18229 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18230 else 18231 #endif /* INET6 */ 18232 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18233 th = rack->r_ctl.fsb.th; 18234 udp = rack->r_ctl.fsb.udp; 18235 if (udp) { 18236 #ifdef INET6 18237 if (isipv6) 18238 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18239 else 18240 #endif /* INET6 */ 18241 ulen = hdrlen + len - sizeof(struct ip); 18242 udp->uh_ulen = htons(ulen); 18243 } 18244 } else { 18245 #ifdef INET6 18246 if (isipv6) { 18247 ip6 = mtod(m, struct ip6_hdr *); 18248 if (tp->t_port) { 18249 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18250 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18251 udp->uh_dport = tp->t_port; 18252 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18253 udp->uh_ulen = htons(ulen); 18254 th = (struct tcphdr *)(udp + 1); 18255 } else 18256 th = (struct tcphdr *)(ip6 + 1); 18257 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18258 } else 18259 #endif /* INET6 */ 18260 { 18261 ip = mtod(m, struct ip *); 18262 #ifdef TCPDEBUG 18263 ipov = (struct ipovly *)ip; 18264 #endif 18265 if (tp->t_port) { 18266 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18267 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18268 udp->uh_dport = tp->t_port; 18269 ulen = hdrlen + len - sizeof(struct ip); 18270 udp->uh_ulen = htons(ulen); 18271 th = (struct tcphdr *)(udp + 1); 18272 } else 18273 th = (struct tcphdr *)(ip + 1); 18274 tcpip_fillheaders(inp, tp->t_port, ip, th); 18275 } 18276 } 18277 /* 18278 * Fill in fields, remembering maximum advertised window for use in 18279 * delaying messages about window sizes. If resending a FIN, be sure 18280 * not to use a new sequence number. 18281 */ 18282 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18283 tp->snd_nxt == tp->snd_max) 18284 tp->snd_nxt--; 18285 /* 18286 * If we are starting a connection, send ECN setup SYN packet. If we 18287 * are on a retransmit, we may resend those bits a number of times 18288 * as per RFC 3168. 18289 */ 18290 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18291 flags |= tcp_ecn_output_syn_sent(tp); 18292 } 18293 /* Also handle parallel SYN for ECN */ 18294 if (TCPS_HAVERCVDSYN(tp->t_state) && 18295 (tp->t_flags2 & TF2_ECN_PERMIT)) { 18296 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18297 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18298 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18299 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18300 #ifdef INET6 18301 if (isipv6) { 18302 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18303 ip6->ip6_flow |= htonl(ect << 20); 18304 } 18305 else 18306 #endif 18307 { 18308 ip->ip_tos &= ~IPTOS_ECN_MASK; 18309 ip->ip_tos |= ect; 18310 } 18311 } 18312 /* 18313 * If we are doing retransmissions, then snd_nxt will not reflect 18314 * the first unsent octet. For ACK only packets, we do not want the 18315 * sequence number of the retransmitted packet, we want the sequence 18316 * number of the next unsent octet. So, if there is no data (and no 18317 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18318 * ti_seq. But if we are in persist state, snd_max might reflect 18319 * one byte beyond the right edge of the window, so use snd_nxt in 18320 * that case, since we know we aren't doing a retransmission. 18321 * (retransmit and persist are mutually exclusive...) 18322 */ 18323 if (sack_rxmit == 0) { 18324 if (len || (flags & (TH_SYN | TH_FIN))) { 18325 th->th_seq = htonl(tp->snd_nxt); 18326 rack_seq = tp->snd_nxt; 18327 } else { 18328 th->th_seq = htonl(tp->snd_max); 18329 rack_seq = tp->snd_max; 18330 } 18331 } else { 18332 th->th_seq = htonl(rsm->r_start); 18333 rack_seq = rsm->r_start; 18334 } 18335 th->th_ack = htonl(tp->rcv_nxt); 18336 tcp_set_flags(th, flags); 18337 /* 18338 * Calculate receive window. Don't shrink window, but avoid silly 18339 * window syndrome. 18340 * If a RST segment is sent, advertise a window of zero. 18341 */ 18342 if (flags & TH_RST) { 18343 recwin = 0; 18344 } else { 18345 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18346 recwin < (long)segsiz) { 18347 recwin = 0; 18348 } 18349 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18350 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18351 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18352 } 18353 18354 /* 18355 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18356 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18357 * handled in syncache. 18358 */ 18359 if (flags & TH_SYN) 18360 th->th_win = htons((u_short) 18361 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18362 else { 18363 /* Avoid shrinking window with window scaling. */ 18364 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18365 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18366 } 18367 /* 18368 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18369 * window. This may cause the remote transmitter to stall. This 18370 * flag tells soreceive() to disable delayed acknowledgements when 18371 * draining the buffer. This can occur if the receiver is 18372 * attempting to read more data than can be buffered prior to 18373 * transmitting on the connection. 18374 */ 18375 if (th->th_win == 0) { 18376 tp->t_sndzerowin++; 18377 tp->t_flags |= TF_RXWIN0SENT; 18378 } else 18379 tp->t_flags &= ~TF_RXWIN0SENT; 18380 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18381 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18382 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18383 uint8_t *cpto; 18384 18385 cpto = mtod(m, uint8_t *); 18386 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18387 /* 18388 * We have just copied in: 18389 * IP/IP6 18390 * <optional udphdr> 18391 * tcphdr (no options) 18392 * 18393 * We need to grab the correct pointers into the mbuf 18394 * for both the tcp header, and possibly the udp header (if tunneling). 18395 * We do this by using the offset in the copy buffer and adding it 18396 * to the mbuf base pointer (cpto). 18397 */ 18398 #ifdef INET6 18399 if (isipv6) 18400 ip6 = mtod(m, struct ip6_hdr *); 18401 else 18402 #endif /* INET6 */ 18403 ip = mtod(m, struct ip *); 18404 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18405 /* If we have a udp header lets set it into the mbuf as well */ 18406 if (udp) 18407 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18408 } 18409 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18410 if (to.to_flags & TOF_SIGNATURE) { 18411 /* 18412 * Calculate MD5 signature and put it into the place 18413 * determined before. 18414 * NOTE: since TCP options buffer doesn't point into 18415 * mbuf's data, calculate offset and use it. 18416 */ 18417 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18418 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18419 /* 18420 * Do not send segment if the calculation of MD5 18421 * digest has failed. 18422 */ 18423 goto out; 18424 } 18425 } 18426 #endif 18427 if (optlen) { 18428 bcopy(opt, th + 1, optlen); 18429 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18430 } 18431 /* 18432 * Put TCP length in extended header, and then checksum extended 18433 * header and data. 18434 */ 18435 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18436 #ifdef INET6 18437 if (isipv6) { 18438 /* 18439 * ip6_plen is not need to be filled now, and will be filled 18440 * in ip6_output. 18441 */ 18442 if (tp->t_port) { 18443 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18444 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18445 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18446 th->th_sum = htons(0); 18447 UDPSTAT_INC(udps_opackets); 18448 } else { 18449 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18450 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18451 th->th_sum = in6_cksum_pseudo(ip6, 18452 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18453 0); 18454 } 18455 } 18456 #endif 18457 #if defined(INET6) && defined(INET) 18458 else 18459 #endif 18460 #ifdef INET 18461 { 18462 if (tp->t_port) { 18463 m->m_pkthdr.csum_flags = CSUM_UDP; 18464 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18465 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18466 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18467 th->th_sum = htons(0); 18468 UDPSTAT_INC(udps_opackets); 18469 } else { 18470 m->m_pkthdr.csum_flags = CSUM_TCP; 18471 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18472 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18473 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18474 IPPROTO_TCP + len + optlen)); 18475 } 18476 /* IP version must be set here for ipv4/ipv6 checking later */ 18477 KASSERT(ip->ip_v == IPVERSION, 18478 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18479 } 18480 #endif 18481 /* 18482 * Enable TSO and specify the size of the segments. The TCP pseudo 18483 * header checksum is always provided. XXX: Fixme: This is currently 18484 * not the case for IPv6. 18485 */ 18486 if (tso) { 18487 KASSERT(len > tp->t_maxseg - optlen, 18488 ("%s: len <= tso_segsz", __func__)); 18489 m->m_pkthdr.csum_flags |= CSUM_TSO; 18490 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18491 } 18492 KASSERT(len + hdrlen == m_length(m, NULL), 18493 ("%s: mbuf chain different than expected: %d + %u != %u", 18494 __func__, len, hdrlen, m_length(m, NULL))); 18495 18496 #ifdef TCP_HHOOK 18497 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18498 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18499 #endif 18500 /* We're getting ready to send; log now. */ 18501 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18502 union tcp_log_stackspecific log; 18503 18504 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18505 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18506 if (rack->rack_no_prr) 18507 log.u_bbr.flex1 = 0; 18508 else 18509 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18510 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18511 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18512 log.u_bbr.flex4 = orig_len; 18513 if (filled_all) 18514 log.u_bbr.flex5 = 0x80000000; 18515 else 18516 log.u_bbr.flex5 = 0; 18517 /* Save off the early/late values */ 18518 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18519 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18520 log.u_bbr.bw_inuse = rack_get_bw(rack); 18521 if (rsm || sack_rxmit) { 18522 if (doing_tlp) 18523 log.u_bbr.flex8 = 2; 18524 else 18525 log.u_bbr.flex8 = 1; 18526 } else { 18527 if (doing_tlp) 18528 log.u_bbr.flex8 = 3; 18529 else 18530 log.u_bbr.flex8 = 0; 18531 } 18532 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18533 log.u_bbr.flex7 = mark; 18534 log.u_bbr.flex7 <<= 8; 18535 log.u_bbr.flex7 |= pass; 18536 log.u_bbr.pkts_out = tp->t_maxseg; 18537 log.u_bbr.timeStamp = cts; 18538 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18539 log.u_bbr.lt_epoch = cwnd_to_use; 18540 log.u_bbr.delivered = sendalot; 18541 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18542 len, &log, false, NULL, NULL, 0, &tv); 18543 } else 18544 lgb = NULL; 18545 18546 /* 18547 * Fill in IP length and desired time to live and send to IP level. 18548 * There should be a better way to handle ttl and tos; we could keep 18549 * them in the template, but need a way to checksum without them. 18550 */ 18551 /* 18552 * m->m_pkthdr.len should have been set before cksum calcuration, 18553 * because in6_cksum() need it. 18554 */ 18555 #ifdef INET6 18556 if (isipv6) { 18557 /* 18558 * we separately set hoplimit for every segment, since the 18559 * user might want to change the value via setsockopt. Also, 18560 * desired default hop limit might be changed via Neighbor 18561 * Discovery. 18562 */ 18563 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18564 18565 /* 18566 * Set the packet size here for the benefit of DTrace 18567 * probes. ip6_output() will set it properly; it's supposed 18568 * to include the option header lengths as well. 18569 */ 18570 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18571 18572 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18573 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18574 else 18575 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18576 18577 if (tp->t_state == TCPS_SYN_SENT) 18578 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18579 18580 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18581 /* TODO: IPv6 IP6TOS_ECT bit on */ 18582 error = ip6_output(m, 18583 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18584 inp->in6p_outputopts, 18585 #else 18586 NULL, 18587 #endif 18588 &inp->inp_route6, 18589 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18590 NULL, NULL, inp); 18591 18592 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18593 mtu = inp->inp_route6.ro_nh->nh_mtu; 18594 } 18595 #endif /* INET6 */ 18596 #if defined(INET) && defined(INET6) 18597 else 18598 #endif 18599 #ifdef INET 18600 { 18601 ip->ip_len = htons(m->m_pkthdr.len); 18602 #ifdef INET6 18603 if (inp->inp_vflag & INP_IPV6PROTO) 18604 ip->ip_ttl = in6_selecthlim(inp, NULL); 18605 #endif /* INET6 */ 18606 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18607 /* 18608 * If we do path MTU discovery, then we set DF on every 18609 * packet. This might not be the best thing to do according 18610 * to RFC3390 Section 2. However the tcp hostcache migitates 18611 * the problem so it affects only the first tcp connection 18612 * with a host. 18613 * 18614 * NB: Don't set DF on small MTU/MSS to have a safe 18615 * fallback. 18616 */ 18617 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18618 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18619 if (tp->t_port == 0 || len < V_tcp_minmss) { 18620 ip->ip_off |= htons(IP_DF); 18621 } 18622 } else { 18623 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18624 } 18625 18626 if (tp->t_state == TCPS_SYN_SENT) 18627 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18628 18629 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18630 18631 error = ip_output(m, 18632 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18633 inp->inp_options, 18634 #else 18635 NULL, 18636 #endif 18637 &inp->inp_route, 18638 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18639 inp); 18640 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18641 mtu = inp->inp_route.ro_nh->nh_mtu; 18642 } 18643 #endif /* INET */ 18644 18645 out: 18646 if (lgb) { 18647 lgb->tlb_errno = error; 18648 lgb = NULL; 18649 } 18650 /* 18651 * In transmit state, time the transmission and arrange for the 18652 * retransmit. In persist state, just set snd_max. 18653 */ 18654 if (error == 0) { 18655 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18656 if (rsm && doing_tlp) { 18657 rack->rc_last_sent_tlp_past_cumack = 0; 18658 rack->rc_last_sent_tlp_seq_valid = 1; 18659 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18660 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18661 } 18662 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18663 if (rsm && (doing_tlp == 0)) { 18664 /* Set we retransmitted */ 18665 rack->rc_gp_saw_rec = 1; 18666 } else { 18667 if (cwnd_to_use > tp->snd_ssthresh) { 18668 /* Set we sent in CA */ 18669 rack->rc_gp_saw_ca = 1; 18670 } else { 18671 /* Set we sent in SS */ 18672 rack->rc_gp_saw_ss = 1; 18673 } 18674 } 18675 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18676 (tp->t_flags & TF_SACK_PERMIT) && 18677 tp->rcv_numsacks > 0) 18678 tcp_clean_dsack_blocks(tp); 18679 tot_len_this_send += len; 18680 if (len == 0) 18681 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18682 else if (len == 1) { 18683 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18684 } else if (len > 1) { 18685 int idx; 18686 18687 idx = (len / segsiz) + 3; 18688 if (idx >= TCP_MSS_ACCT_ATIMER) 18689 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18690 else 18691 counter_u64_add(rack_out_size[idx], 1); 18692 } 18693 } 18694 if ((rack->rack_no_prr == 0) && 18695 sub_from_prr && 18696 (error == 0)) { 18697 if (rack->r_ctl.rc_prr_sndcnt >= len) 18698 rack->r_ctl.rc_prr_sndcnt -= len; 18699 else 18700 rack->r_ctl.rc_prr_sndcnt = 0; 18701 } 18702 sub_from_prr = 0; 18703 if (doing_tlp) { 18704 /* Make sure the TLP is added */ 18705 add_flag |= RACK_TLP; 18706 } else if (rsm) { 18707 /* If its a resend without TLP then it must not have the flag */ 18708 rsm->r_flags &= ~RACK_TLP; 18709 } 18710 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18711 rack_to_usec_ts(&tv), 18712 rsm, add_flag, s_mb, s_moff, hw_tls); 18713 18714 18715 if ((error == 0) && 18716 (len > 0) && 18717 (tp->snd_una == tp->snd_max)) 18718 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18719 { 18720 tcp_seq startseq = tp->snd_nxt; 18721 18722 /* Track our lost count */ 18723 if (rsm && (doing_tlp == 0)) 18724 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18725 /* 18726 * Advance snd_nxt over sequence space of this segment. 18727 */ 18728 if (error) 18729 /* We don't log or do anything with errors */ 18730 goto nomore; 18731 if (doing_tlp == 0) { 18732 if (rsm == NULL) { 18733 /* 18734 * Not a retransmission of some 18735 * sort, new data is going out so 18736 * clear our TLP count and flag. 18737 */ 18738 rack->rc_tlp_in_progress = 0; 18739 rack->r_ctl.rc_tlp_cnt_out = 0; 18740 } 18741 } else { 18742 /* 18743 * We have just sent a TLP, mark that it is true 18744 * and make sure our in progress is set so we 18745 * continue to check the count. 18746 */ 18747 rack->rc_tlp_in_progress = 1; 18748 rack->r_ctl.rc_tlp_cnt_out++; 18749 } 18750 if (flags & (TH_SYN | TH_FIN)) { 18751 if (flags & TH_SYN) 18752 tp->snd_nxt++; 18753 if (flags & TH_FIN) { 18754 tp->snd_nxt++; 18755 tp->t_flags |= TF_SENTFIN; 18756 } 18757 } 18758 /* In the ENOBUFS case we do *not* update snd_max */ 18759 if (sack_rxmit) 18760 goto nomore; 18761 18762 tp->snd_nxt += len; 18763 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18764 if (tp->snd_una == tp->snd_max) { 18765 /* 18766 * Update the time we just added data since 18767 * none was outstanding. 18768 */ 18769 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18770 tp->t_acktime = ticks; 18771 } 18772 tp->snd_max = tp->snd_nxt; 18773 /* 18774 * Time this transmission if not a retransmission and 18775 * not currently timing anything. 18776 * This is only relevant in case of switching back to 18777 * the base stack. 18778 */ 18779 if (tp->t_rtttime == 0) { 18780 tp->t_rtttime = ticks; 18781 tp->t_rtseq = startseq; 18782 KMOD_TCPSTAT_INC(tcps_segstimed); 18783 } 18784 if (len && 18785 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18786 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18787 } 18788 /* 18789 * If we are doing FO we need to update the mbuf position and subtract 18790 * this happens when the peer sends us duplicate information and 18791 * we thus want to send a DSACK. 18792 * 18793 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18794 * turned off? If not then we are going to echo multiple DSACK blocks 18795 * out (with the TSO), which we should not be doing. 18796 */ 18797 if (rack->r_fast_output && len) { 18798 if (rack->r_ctl.fsb.left_to_send > len) 18799 rack->r_ctl.fsb.left_to_send -= len; 18800 else 18801 rack->r_ctl.fsb.left_to_send = 0; 18802 if (rack->r_ctl.fsb.left_to_send < segsiz) 18803 rack->r_fast_output = 0; 18804 if (rack->r_fast_output) { 18805 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18806 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18807 } 18808 } 18809 } 18810 nomore: 18811 if (error) { 18812 rack->r_ctl.rc_agg_delayed = 0; 18813 rack->r_early = 0; 18814 rack->r_late = 0; 18815 rack->r_ctl.rc_agg_early = 0; 18816 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18817 /* 18818 * Failures do not advance the seq counter above. For the 18819 * case of ENOBUFS we will fall out and retry in 1ms with 18820 * the hpts. Everything else will just have to retransmit 18821 * with the timer. 18822 * 18823 * In any case, we do not want to loop around for another 18824 * send without a good reason. 18825 */ 18826 sendalot = 0; 18827 switch (error) { 18828 case EPERM: 18829 tp->t_softerror = error; 18830 #ifdef TCP_ACCOUNTING 18831 crtsc = get_cyclecount(); 18832 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18833 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18834 } 18835 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18836 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18837 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18838 } 18839 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18840 sched_unpin(); 18841 #endif 18842 return (error); 18843 case ENOBUFS: 18844 /* 18845 * Pace us right away to retry in a some 18846 * time 18847 */ 18848 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18849 if (rack->rc_enobuf < 0x7f) 18850 rack->rc_enobuf++; 18851 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18852 slot = 10 * HPTS_USEC_IN_MSEC; 18853 if (rack->r_ctl.crte != NULL) { 18854 counter_u64_add(rack_saw_enobuf_hw, 1); 18855 tcp_rl_log_enobuf(rack->r_ctl.crte); 18856 } 18857 counter_u64_add(rack_saw_enobuf, 1); 18858 goto enobufs; 18859 case EMSGSIZE: 18860 /* 18861 * For some reason the interface we used initially 18862 * to send segments changed to another or lowered 18863 * its MTU. If TSO was active we either got an 18864 * interface without TSO capabilits or TSO was 18865 * turned off. If we obtained mtu from ip_output() 18866 * then update it and try again. 18867 */ 18868 if (tso) 18869 tp->t_flags &= ~TF_TSO; 18870 if (mtu != 0) { 18871 tcp_mss_update(tp, -1, mtu, NULL, NULL); 18872 goto again; 18873 } 18874 slot = 10 * HPTS_USEC_IN_MSEC; 18875 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18876 #ifdef TCP_ACCOUNTING 18877 crtsc = get_cyclecount(); 18878 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18879 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18880 } 18881 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18882 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18883 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18884 } 18885 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18886 sched_unpin(); 18887 #endif 18888 return (error); 18889 case ENETUNREACH: 18890 counter_u64_add(rack_saw_enetunreach, 1); 18891 case EHOSTDOWN: 18892 case EHOSTUNREACH: 18893 case ENETDOWN: 18894 if (TCPS_HAVERCVDSYN(tp->t_state)) { 18895 tp->t_softerror = error; 18896 } 18897 /* FALLTHROUGH */ 18898 default: 18899 slot = 10 * HPTS_USEC_IN_MSEC; 18900 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18901 #ifdef TCP_ACCOUNTING 18902 crtsc = get_cyclecount(); 18903 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18904 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18905 } 18906 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18907 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18908 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18909 } 18910 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18911 sched_unpin(); 18912 #endif 18913 return (error); 18914 } 18915 } else { 18916 rack->rc_enobuf = 0; 18917 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18918 rack->r_ctl.retran_during_recovery += len; 18919 } 18920 KMOD_TCPSTAT_INC(tcps_sndtotal); 18921 18922 /* 18923 * Data sent (as far as we can tell). If this advertises a larger 18924 * window than any other segment, then remember the size of the 18925 * advertised window. Any pending ACK has now been sent. 18926 */ 18927 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 18928 tp->rcv_adv = tp->rcv_nxt + recwin; 18929 18930 tp->last_ack_sent = tp->rcv_nxt; 18931 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18932 enobufs: 18933 if (sendalot) { 18934 /* Do we need to turn off sendalot? */ 18935 if (rack->r_ctl.rc_pace_max_segs && 18936 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 18937 /* We hit our max. */ 18938 sendalot = 0; 18939 } else if ((rack->rc_user_set_max_segs) && 18940 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 18941 /* We hit the user defined max */ 18942 sendalot = 0; 18943 } 18944 } 18945 if ((error == 0) && (flags & TH_FIN)) 18946 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 18947 if (flags & TH_RST) { 18948 /* 18949 * We don't send again after sending a RST. 18950 */ 18951 slot = 0; 18952 sendalot = 0; 18953 if (error == 0) 18954 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 18955 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 18956 /* 18957 * Get our pacing rate, if an error 18958 * occurred in sending (ENOBUF) we would 18959 * hit the else if with slot preset. Other 18960 * errors return. 18961 */ 18962 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 18963 } 18964 if (rsm && 18965 (rsm->r_flags & RACK_HAS_SYN) == 0 && 18966 rack->use_rack_rr) { 18967 /* Its a retransmit and we use the rack cheat? */ 18968 if ((slot == 0) || 18969 (rack->rc_always_pace == 0) || 18970 (rack->r_rr_config == 1)) { 18971 /* 18972 * We have no pacing set or we 18973 * are using old-style rack or 18974 * we are overriden to use the old 1ms pacing. 18975 */ 18976 slot = rack->r_ctl.rc_min_to; 18977 } 18978 } 18979 /* We have sent clear the flag */ 18980 rack->r_ent_rec_ns = 0; 18981 if (rack->r_must_retran) { 18982 if (rsm) { 18983 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 18984 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 18985 /* 18986 * We have retransmitted all. 18987 */ 18988 rack->r_must_retran = 0; 18989 rack->r_ctl.rc_out_at_rto = 0; 18990 } 18991 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 18992 /* 18993 * Sending new data will also kill 18994 * the loop. 18995 */ 18996 rack->r_must_retran = 0; 18997 rack->r_ctl.rc_out_at_rto = 0; 18998 } 18999 } 19000 rack->r_ctl.fsb.recwin = recwin; 19001 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19002 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19003 /* 19004 * We hit an RTO and now have past snd_max at the RTO 19005 * clear all the WAS flags. 19006 */ 19007 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19008 } 19009 if (slot) { 19010 /* set the rack tcb into the slot N */ 19011 if ((error == 0) && 19012 rack_use_rfo && 19013 ((flags & (TH_SYN|TH_FIN)) == 0) && 19014 (rsm == NULL) && 19015 (tp->snd_nxt == tp->snd_max) && 19016 (ipoptlen == 0) && 19017 (tp->rcv_numsacks == 0) && 19018 rack->r_fsb_inited && 19019 TCPS_HAVEESTABLISHED(tp->t_state) && 19020 (rack->r_must_retran == 0) && 19021 ((tp->t_flags & TF_NEEDFIN) == 0) && 19022 (len > 0) && (orig_len > 0) && 19023 (orig_len > len) && 19024 ((orig_len - len) >= segsiz) && 19025 ((optlen == 0) || 19026 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19027 /* We can send at least one more MSS using our fsb */ 19028 19029 rack->r_fast_output = 1; 19030 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19031 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19032 rack->r_ctl.fsb.tcp_flags = flags; 19033 rack->r_ctl.fsb.left_to_send = orig_len - len; 19034 if (hw_tls) 19035 rack->r_ctl.fsb.hw_tls = 1; 19036 else 19037 rack->r_ctl.fsb.hw_tls = 0; 19038 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19039 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19040 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19041 (tp->snd_max - tp->snd_una))); 19042 if (rack->r_ctl.fsb.left_to_send < segsiz) 19043 rack->r_fast_output = 0; 19044 else { 19045 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19046 rack->r_ctl.fsb.rfo_apply_push = 1; 19047 else 19048 rack->r_ctl.fsb.rfo_apply_push = 0; 19049 } 19050 } else 19051 rack->r_fast_output = 0; 19052 rack_log_fsb(rack, tp, so, flags, 19053 ipoptlen, orig_len, len, error, 19054 (rsm == NULL), optlen, __LINE__, 2); 19055 } else if (sendalot) { 19056 int ret; 19057 19058 sack_rxmit = 0; 19059 if ((error == 0) && 19060 rack_use_rfo && 19061 ((flags & (TH_SYN|TH_FIN)) == 0) && 19062 (rsm == NULL) && 19063 (ipoptlen == 0) && 19064 (tp->rcv_numsacks == 0) && 19065 (tp->snd_nxt == tp->snd_max) && 19066 (rack->r_must_retran == 0) && 19067 rack->r_fsb_inited && 19068 TCPS_HAVEESTABLISHED(tp->t_state) && 19069 ((tp->t_flags & TF_NEEDFIN) == 0) && 19070 (len > 0) && (orig_len > 0) && 19071 (orig_len > len) && 19072 ((orig_len - len) >= segsiz) && 19073 ((optlen == 0) || 19074 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19075 /* we can use fast_output for more */ 19076 19077 rack->r_fast_output = 1; 19078 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19079 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19080 rack->r_ctl.fsb.tcp_flags = flags; 19081 rack->r_ctl.fsb.left_to_send = orig_len - len; 19082 if (hw_tls) 19083 rack->r_ctl.fsb.hw_tls = 1; 19084 else 19085 rack->r_ctl.fsb.hw_tls = 0; 19086 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19087 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19088 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19089 (tp->snd_max - tp->snd_una))); 19090 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19091 rack->r_fast_output = 0; 19092 } 19093 if (rack->r_fast_output) { 19094 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19095 rack->r_ctl.fsb.rfo_apply_push = 1; 19096 else 19097 rack->r_ctl.fsb.rfo_apply_push = 0; 19098 rack_log_fsb(rack, tp, so, flags, 19099 ipoptlen, orig_len, len, error, 19100 (rsm == NULL), optlen, __LINE__, 3); 19101 error = 0; 19102 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19103 if (ret >= 0) 19104 return (ret); 19105 else if (error) 19106 goto nomore; 19107 19108 } 19109 } 19110 goto again; 19111 } 19112 /* Assure when we leave that snd_nxt will point to top */ 19113 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19114 tp->snd_nxt = tp->snd_max; 19115 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19116 #ifdef TCP_ACCOUNTING 19117 crtsc = get_cyclecount() - ts_val; 19118 if (tot_len_this_send) { 19119 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19120 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19121 } 19122 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19123 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19124 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19125 } 19126 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19127 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19128 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19129 } 19130 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19131 } else { 19132 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19133 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19134 } 19135 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19136 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19137 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19138 } 19139 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19140 } 19141 sched_unpin(); 19142 #endif 19143 if (error == ENOBUFS) 19144 error = 0; 19145 return (error); 19146 } 19147 19148 static void 19149 rack_update_seg(struct tcp_rack *rack) 19150 { 19151 uint32_t orig_val; 19152 19153 orig_val = rack->r_ctl.rc_pace_max_segs; 19154 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19155 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19156 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19157 } 19158 19159 static void 19160 rack_mtu_change(struct tcpcb *tp) 19161 { 19162 /* 19163 * The MSS may have changed 19164 */ 19165 struct tcp_rack *rack; 19166 struct rack_sendmap *rsm; 19167 19168 rack = (struct tcp_rack *)tp->t_fb_ptr; 19169 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19170 /* 19171 * The MTU has changed we need to resend everything 19172 * since all we have sent is lost. We first fix 19173 * up the mtu though. 19174 */ 19175 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19176 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19177 rack_remxt_tmr(tp); 19178 rack->r_fast_output = 0; 19179 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19180 rack->r_ctl.rc_sacked); 19181 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19182 rack->r_must_retran = 1; 19183 /* Mark all inflight to needing to be rxt'd */ 19184 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19185 rsm->r_flags |= RACK_MUST_RXT; 19186 } 19187 } 19188 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19189 /* We don't use snd_nxt to retransmit */ 19190 tp->snd_nxt = tp->snd_max; 19191 } 19192 19193 static int 19194 rack_set_profile(struct tcp_rack *rack, int prof) 19195 { 19196 int err = EINVAL; 19197 if (prof == 1) { 19198 /* pace_always=1 */ 19199 if (rack->rc_always_pace == 0) { 19200 if (tcp_can_enable_pacing() == 0) 19201 return (EBUSY); 19202 } 19203 rack->rc_always_pace = 1; 19204 if (rack->use_fixed_rate || rack->gp_ready) 19205 rack_set_cc_pacing(rack); 19206 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19207 rack->rack_attempt_hdwr_pace = 0; 19208 /* cmpack=1 */ 19209 if (rack_use_cmp_acks) 19210 rack->r_use_cmp_ack = 1; 19211 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19212 rack->r_use_cmp_ack) 19213 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19214 /* scwnd=1 */ 19215 rack->rack_enable_scwnd = 1; 19216 /* dynamic=100 */ 19217 rack->rc_gp_dyn_mul = 1; 19218 /* gp_inc_ca */ 19219 rack->r_ctl.rack_per_of_gp_ca = 100; 19220 /* rrr_conf=3 */ 19221 rack->r_rr_config = 3; 19222 /* npush=2 */ 19223 rack->r_ctl.rc_no_push_at_mrtt = 2; 19224 /* fillcw=1 */ 19225 rack->rc_pace_to_cwnd = 1; 19226 rack->rc_pace_fill_if_rttin_range = 0; 19227 rack->rtt_limit_mul = 0; 19228 /* noprr=1 */ 19229 rack->rack_no_prr = 1; 19230 /* lscwnd=1 */ 19231 rack->r_limit_scw = 1; 19232 /* gp_inc_rec */ 19233 rack->r_ctl.rack_per_of_gp_rec = 90; 19234 err = 0; 19235 19236 } else if (prof == 3) { 19237 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19238 /* pace_always=1 */ 19239 if (rack->rc_always_pace == 0) { 19240 if (tcp_can_enable_pacing() == 0) 19241 return (EBUSY); 19242 } 19243 rack->rc_always_pace = 1; 19244 if (rack->use_fixed_rate || rack->gp_ready) 19245 rack_set_cc_pacing(rack); 19246 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19247 rack->rack_attempt_hdwr_pace = 0; 19248 /* cmpack=1 */ 19249 if (rack_use_cmp_acks) 19250 rack->r_use_cmp_ack = 1; 19251 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19252 rack->r_use_cmp_ack) 19253 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19254 /* scwnd=1 */ 19255 rack->rack_enable_scwnd = 1; 19256 /* dynamic=100 */ 19257 rack->rc_gp_dyn_mul = 1; 19258 /* gp_inc_ca */ 19259 rack->r_ctl.rack_per_of_gp_ca = 100; 19260 /* rrr_conf=3 */ 19261 rack->r_rr_config = 3; 19262 /* npush=2 */ 19263 rack->r_ctl.rc_no_push_at_mrtt = 2; 19264 /* fillcw=2 */ 19265 rack->rc_pace_to_cwnd = 1; 19266 rack->r_fill_less_agg = 1; 19267 rack->rc_pace_fill_if_rttin_range = 0; 19268 rack->rtt_limit_mul = 0; 19269 /* noprr=1 */ 19270 rack->rack_no_prr = 1; 19271 /* lscwnd=1 */ 19272 rack->r_limit_scw = 1; 19273 /* gp_inc_rec */ 19274 rack->r_ctl.rack_per_of_gp_rec = 90; 19275 err = 0; 19276 19277 19278 } else if (prof == 2) { 19279 /* cmpack=1 */ 19280 if (rack->rc_always_pace == 0) { 19281 if (tcp_can_enable_pacing() == 0) 19282 return (EBUSY); 19283 } 19284 rack->rc_always_pace = 1; 19285 if (rack->use_fixed_rate || rack->gp_ready) 19286 rack_set_cc_pacing(rack); 19287 rack->r_use_cmp_ack = 1; 19288 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19289 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19290 /* pace_always=1 */ 19291 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19292 /* scwnd=1 */ 19293 rack->rack_enable_scwnd = 1; 19294 /* dynamic=100 */ 19295 rack->rc_gp_dyn_mul = 1; 19296 rack->r_ctl.rack_per_of_gp_ca = 100; 19297 /* rrr_conf=3 */ 19298 rack->r_rr_config = 3; 19299 /* npush=2 */ 19300 rack->r_ctl.rc_no_push_at_mrtt = 2; 19301 /* fillcw=1 */ 19302 rack->rc_pace_to_cwnd = 1; 19303 rack->rc_pace_fill_if_rttin_range = 0; 19304 rack->rtt_limit_mul = 0; 19305 /* noprr=1 */ 19306 rack->rack_no_prr = 1; 19307 /* lscwnd=0 */ 19308 rack->r_limit_scw = 0; 19309 err = 0; 19310 } else if (prof == 0) { 19311 /* This changes things back to the default settings */ 19312 err = 0; 19313 if (rack->rc_always_pace) { 19314 tcp_decrement_paced_conn(); 19315 rack_undo_cc_pacing(rack); 19316 rack->rc_always_pace = 0; 19317 } 19318 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19319 rack->rc_always_pace = 1; 19320 if (rack->use_fixed_rate || rack->gp_ready) 19321 rack_set_cc_pacing(rack); 19322 } else 19323 rack->rc_always_pace = 0; 19324 if (rack_dsack_std_based & 0x1) { 19325 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19326 rack->rc_rack_tmr_std_based = 1; 19327 } 19328 if (rack_dsack_std_based & 0x2) { 19329 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19330 rack->rc_rack_use_dsack = 1; 19331 } 19332 if (rack_use_cmp_acks) 19333 rack->r_use_cmp_ack = 1; 19334 else 19335 rack->r_use_cmp_ack = 0; 19336 if (rack_disable_prr) 19337 rack->rack_no_prr = 1; 19338 else 19339 rack->rack_no_prr = 0; 19340 if (rack_gp_no_rec_chg) 19341 rack->rc_gp_no_rec_chg = 1; 19342 else 19343 rack->rc_gp_no_rec_chg = 0; 19344 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19345 rack->r_mbuf_queue = 1; 19346 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19347 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19348 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19349 } else { 19350 rack->r_mbuf_queue = 0; 19351 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19352 } 19353 if (rack_enable_shared_cwnd) 19354 rack->rack_enable_scwnd = 1; 19355 else 19356 rack->rack_enable_scwnd = 0; 19357 if (rack_do_dyn_mul) { 19358 /* When dynamic adjustment is on CA needs to start at 100% */ 19359 rack->rc_gp_dyn_mul = 1; 19360 if (rack_do_dyn_mul >= 100) 19361 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19362 } else { 19363 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19364 rack->rc_gp_dyn_mul = 0; 19365 } 19366 rack->r_rr_config = 0; 19367 rack->r_ctl.rc_no_push_at_mrtt = 0; 19368 rack->rc_pace_to_cwnd = 0; 19369 rack->rc_pace_fill_if_rttin_range = 0; 19370 rack->rtt_limit_mul = 0; 19371 19372 if (rack_enable_hw_pacing) 19373 rack->rack_hdw_pace_ena = 1; 19374 else 19375 rack->rack_hdw_pace_ena = 0; 19376 if (rack_disable_prr) 19377 rack->rack_no_prr = 1; 19378 else 19379 rack->rack_no_prr = 0; 19380 if (rack_limits_scwnd) 19381 rack->r_limit_scw = 1; 19382 else 19383 rack->r_limit_scw = 0; 19384 err = 0; 19385 } 19386 return (err); 19387 } 19388 19389 static int 19390 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19391 { 19392 struct deferred_opt_list *dol; 19393 19394 dol = malloc(sizeof(struct deferred_opt_list), 19395 M_TCPFSB, M_NOWAIT|M_ZERO); 19396 if (dol == NULL) { 19397 /* 19398 * No space yikes -- fail out.. 19399 */ 19400 return (0); 19401 } 19402 dol->optname = sopt_name; 19403 dol->optval = loptval; 19404 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19405 return (1); 19406 } 19407 19408 static int 19409 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19410 uint32_t optval, uint64_t loptval) 19411 { 19412 struct epoch_tracker et; 19413 struct sockopt sopt; 19414 struct cc_newreno_opts opt; 19415 uint64_t val; 19416 int error = 0; 19417 uint16_t ca, ss; 19418 19419 switch (sopt_name) { 19420 19421 case TCP_RACK_DSACK_OPT: 19422 RACK_OPTS_INC(tcp_rack_dsack_opt); 19423 if (optval & 0x1) { 19424 rack->rc_rack_tmr_std_based = 1; 19425 } else { 19426 rack->rc_rack_tmr_std_based = 0; 19427 } 19428 if (optval & 0x2) { 19429 rack->rc_rack_use_dsack = 1; 19430 } else { 19431 rack->rc_rack_use_dsack = 0; 19432 } 19433 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19434 break; 19435 case TCP_RACK_PACING_BETA: 19436 RACK_OPTS_INC(tcp_rack_beta); 19437 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19438 /* This only works for newreno. */ 19439 error = EINVAL; 19440 break; 19441 } 19442 if (rack->rc_pacing_cc_set) { 19443 /* 19444 * Set them into the real CC module 19445 * whats in the rack pcb is the old values 19446 * to be used on restoral/ 19447 */ 19448 sopt.sopt_dir = SOPT_SET; 19449 opt.name = CC_NEWRENO_BETA; 19450 opt.val = optval; 19451 if (CC_ALGO(tp)->ctl_output != NULL) 19452 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19453 else { 19454 error = ENOENT; 19455 break; 19456 } 19457 } else { 19458 /* 19459 * Not pacing yet so set it into our local 19460 * rack pcb storage. 19461 */ 19462 rack->r_ctl.rc_saved_beta.beta = optval; 19463 } 19464 break; 19465 case TCP_RACK_TIMER_SLOP: 19466 RACK_OPTS_INC(tcp_rack_timer_slop); 19467 rack->r_ctl.timer_slop = optval; 19468 if (rack->rc_tp->t_srtt) { 19469 /* 19470 * If we have an SRTT lets update t_rxtcur 19471 * to have the new slop. 19472 */ 19473 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19474 rack_rto_min, rack_rto_max, 19475 rack->r_ctl.timer_slop); 19476 } 19477 break; 19478 case TCP_RACK_PACING_BETA_ECN: 19479 RACK_OPTS_INC(tcp_rack_beta_ecn); 19480 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19481 /* This only works for newreno. */ 19482 error = EINVAL; 19483 break; 19484 } 19485 if (rack->rc_pacing_cc_set) { 19486 /* 19487 * Set them into the real CC module 19488 * whats in the rack pcb is the old values 19489 * to be used on restoral/ 19490 */ 19491 sopt.sopt_dir = SOPT_SET; 19492 opt.name = CC_NEWRENO_BETA_ECN; 19493 opt.val = optval; 19494 if (CC_ALGO(tp)->ctl_output != NULL) 19495 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19496 else 19497 error = ENOENT; 19498 } else { 19499 /* 19500 * Not pacing yet so set it into our local 19501 * rack pcb storage. 19502 */ 19503 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19504 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19505 } 19506 break; 19507 case TCP_DEFER_OPTIONS: 19508 RACK_OPTS_INC(tcp_defer_opt); 19509 if (optval) { 19510 if (rack->gp_ready) { 19511 /* Too late */ 19512 error = EINVAL; 19513 break; 19514 } 19515 rack->defer_options = 1; 19516 } else 19517 rack->defer_options = 0; 19518 break; 19519 case TCP_RACK_MEASURE_CNT: 19520 RACK_OPTS_INC(tcp_rack_measure_cnt); 19521 if (optval && (optval <= 0xff)) { 19522 rack->r_ctl.req_measurements = optval; 19523 } else 19524 error = EINVAL; 19525 break; 19526 case TCP_REC_ABC_VAL: 19527 RACK_OPTS_INC(tcp_rec_abc_val); 19528 if (optval > 0) 19529 rack->r_use_labc_for_rec = 1; 19530 else 19531 rack->r_use_labc_for_rec = 0; 19532 break; 19533 case TCP_RACK_ABC_VAL: 19534 RACK_OPTS_INC(tcp_rack_abc_val); 19535 if ((optval > 0) && (optval < 255)) 19536 rack->rc_labc = optval; 19537 else 19538 error = EINVAL; 19539 break; 19540 case TCP_HDWR_UP_ONLY: 19541 RACK_OPTS_INC(tcp_pacing_up_only); 19542 if (optval) 19543 rack->r_up_only = 1; 19544 else 19545 rack->r_up_only = 0; 19546 break; 19547 case TCP_PACING_RATE_CAP: 19548 RACK_OPTS_INC(tcp_pacing_rate_cap); 19549 rack->r_ctl.bw_rate_cap = loptval; 19550 break; 19551 case TCP_RACK_PROFILE: 19552 RACK_OPTS_INC(tcp_profile); 19553 error = rack_set_profile(rack, optval); 19554 break; 19555 case TCP_USE_CMP_ACKS: 19556 RACK_OPTS_INC(tcp_use_cmp_acks); 19557 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19558 /* You can't turn it off once its on! */ 19559 error = EINVAL; 19560 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19561 rack->r_use_cmp_ack = 1; 19562 rack->r_mbuf_queue = 1; 19563 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19564 } 19565 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19566 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19567 break; 19568 case TCP_SHARED_CWND_TIME_LIMIT: 19569 RACK_OPTS_INC(tcp_lscwnd); 19570 if (optval) 19571 rack->r_limit_scw = 1; 19572 else 19573 rack->r_limit_scw = 0; 19574 break; 19575 case TCP_RACK_PACE_TO_FILL: 19576 RACK_OPTS_INC(tcp_fillcw); 19577 if (optval == 0) 19578 rack->rc_pace_to_cwnd = 0; 19579 else { 19580 rack->rc_pace_to_cwnd = 1; 19581 if (optval > 1) 19582 rack->r_fill_less_agg = 1; 19583 } 19584 if ((optval >= rack_gp_rtt_maxmul) && 19585 rack_gp_rtt_maxmul && 19586 (optval < 0xf)) { 19587 rack->rc_pace_fill_if_rttin_range = 1; 19588 rack->rtt_limit_mul = optval; 19589 } else { 19590 rack->rc_pace_fill_if_rttin_range = 0; 19591 rack->rtt_limit_mul = 0; 19592 } 19593 break; 19594 case TCP_RACK_NO_PUSH_AT_MAX: 19595 RACK_OPTS_INC(tcp_npush); 19596 if (optval == 0) 19597 rack->r_ctl.rc_no_push_at_mrtt = 0; 19598 else if (optval < 0xff) 19599 rack->r_ctl.rc_no_push_at_mrtt = optval; 19600 else 19601 error = EINVAL; 19602 break; 19603 case TCP_SHARED_CWND_ENABLE: 19604 RACK_OPTS_INC(tcp_rack_scwnd); 19605 if (optval == 0) 19606 rack->rack_enable_scwnd = 0; 19607 else 19608 rack->rack_enable_scwnd = 1; 19609 break; 19610 case TCP_RACK_MBUF_QUEUE: 19611 /* Now do we use the LRO mbuf-queue feature */ 19612 RACK_OPTS_INC(tcp_rack_mbufq); 19613 if (optval || rack->r_use_cmp_ack) 19614 rack->r_mbuf_queue = 1; 19615 else 19616 rack->r_mbuf_queue = 0; 19617 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19618 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19619 else 19620 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19621 break; 19622 case TCP_RACK_NONRXT_CFG_RATE: 19623 RACK_OPTS_INC(tcp_rack_cfg_rate); 19624 if (optval == 0) 19625 rack->rack_rec_nonrxt_use_cr = 0; 19626 else 19627 rack->rack_rec_nonrxt_use_cr = 1; 19628 break; 19629 case TCP_NO_PRR: 19630 RACK_OPTS_INC(tcp_rack_noprr); 19631 if (optval == 0) 19632 rack->rack_no_prr = 0; 19633 else if (optval == 1) 19634 rack->rack_no_prr = 1; 19635 else if (optval == 2) 19636 rack->no_prr_addback = 1; 19637 else 19638 error = EINVAL; 19639 break; 19640 case TCP_TIMELY_DYN_ADJ: 19641 RACK_OPTS_INC(tcp_timely_dyn); 19642 if (optval == 0) 19643 rack->rc_gp_dyn_mul = 0; 19644 else { 19645 rack->rc_gp_dyn_mul = 1; 19646 if (optval >= 100) { 19647 /* 19648 * If the user sets something 100 or more 19649 * its the gp_ca value. 19650 */ 19651 rack->r_ctl.rack_per_of_gp_ca = optval; 19652 } 19653 } 19654 break; 19655 case TCP_RACK_DO_DETECTION: 19656 RACK_OPTS_INC(tcp_rack_do_detection); 19657 if (optval == 0) 19658 rack->do_detection = 0; 19659 else 19660 rack->do_detection = 1; 19661 break; 19662 case TCP_RACK_TLP_USE: 19663 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19664 error = EINVAL; 19665 break; 19666 } 19667 RACK_OPTS_INC(tcp_tlp_use); 19668 rack->rack_tlp_threshold_use = optval; 19669 break; 19670 case TCP_RACK_TLP_REDUCE: 19671 /* RACK TLP cwnd reduction (bool) */ 19672 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19673 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19674 break; 19675 /* Pacing related ones */ 19676 case TCP_RACK_PACE_ALWAYS: 19677 /* 19678 * zero is old rack method, 1 is new 19679 * method using a pacing rate. 19680 */ 19681 RACK_OPTS_INC(tcp_rack_pace_always); 19682 if (optval > 0) { 19683 if (rack->rc_always_pace) { 19684 error = EALREADY; 19685 break; 19686 } else if (tcp_can_enable_pacing()) { 19687 rack->rc_always_pace = 1; 19688 if (rack->use_fixed_rate || rack->gp_ready) 19689 rack_set_cc_pacing(rack); 19690 } 19691 else { 19692 error = ENOSPC; 19693 break; 19694 } 19695 } else { 19696 if (rack->rc_always_pace) { 19697 tcp_decrement_paced_conn(); 19698 rack->rc_always_pace = 0; 19699 rack_undo_cc_pacing(rack); 19700 } 19701 } 19702 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19703 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19704 else 19705 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19706 /* A rate may be set irate or other, if so set seg size */ 19707 rack_update_seg(rack); 19708 break; 19709 case TCP_BBR_RACK_INIT_RATE: 19710 RACK_OPTS_INC(tcp_initial_rate); 19711 val = optval; 19712 /* Change from kbits per second to bytes per second */ 19713 val *= 1000; 19714 val /= 8; 19715 rack->r_ctl.init_rate = val; 19716 if (rack->rc_init_win != rack_default_init_window) { 19717 uint32_t win, snt; 19718 19719 /* 19720 * Options don't always get applied 19721 * in the order you think. So in order 19722 * to assure we update a cwnd we need 19723 * to check and see if we are still 19724 * where we should raise the cwnd. 19725 */ 19726 win = rc_init_window(rack); 19727 if (SEQ_GT(tp->snd_max, tp->iss)) 19728 snt = tp->snd_max - tp->iss; 19729 else 19730 snt = 0; 19731 if ((snt < win) && 19732 (tp->snd_cwnd < win)) 19733 tp->snd_cwnd = win; 19734 } 19735 if (rack->rc_always_pace) 19736 rack_update_seg(rack); 19737 break; 19738 case TCP_BBR_IWINTSO: 19739 RACK_OPTS_INC(tcp_initial_win); 19740 if (optval && (optval <= 0xff)) { 19741 uint32_t win, snt; 19742 19743 rack->rc_init_win = optval; 19744 win = rc_init_window(rack); 19745 if (SEQ_GT(tp->snd_max, tp->iss)) 19746 snt = tp->snd_max - tp->iss; 19747 else 19748 snt = 0; 19749 if ((snt < win) && 19750 (tp->t_srtt | 19751 #ifdef NETFLIX_PEAKRATE 19752 tp->t_maxpeakrate | 19753 #endif 19754 rack->r_ctl.init_rate)) { 19755 /* 19756 * We are not past the initial window 19757 * and we have some bases for pacing, 19758 * so we need to possibly adjust up 19759 * the cwnd. Note even if we don't set 19760 * the cwnd, its still ok to raise the rc_init_win 19761 * which can be used coming out of idle when we 19762 * would have a rate. 19763 */ 19764 if (tp->snd_cwnd < win) 19765 tp->snd_cwnd = win; 19766 } 19767 if (rack->rc_always_pace) 19768 rack_update_seg(rack); 19769 } else 19770 error = EINVAL; 19771 break; 19772 case TCP_RACK_FORCE_MSEG: 19773 RACK_OPTS_INC(tcp_rack_force_max_seg); 19774 if (optval) 19775 rack->rc_force_max_seg = 1; 19776 else 19777 rack->rc_force_max_seg = 0; 19778 break; 19779 case TCP_RACK_PACE_MAX_SEG: 19780 /* Max segments size in a pace in bytes */ 19781 RACK_OPTS_INC(tcp_rack_max_seg); 19782 rack->rc_user_set_max_segs = optval; 19783 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19784 break; 19785 case TCP_RACK_PACE_RATE_REC: 19786 /* Set the fixed pacing rate in Bytes per second ca */ 19787 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19788 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19789 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19790 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19791 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19792 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19793 rack->use_fixed_rate = 1; 19794 if (rack->rc_always_pace) 19795 rack_set_cc_pacing(rack); 19796 rack_log_pacing_delay_calc(rack, 19797 rack->r_ctl.rc_fixed_pacing_rate_ss, 19798 rack->r_ctl.rc_fixed_pacing_rate_ca, 19799 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19800 __LINE__, NULL,0); 19801 break; 19802 19803 case TCP_RACK_PACE_RATE_SS: 19804 /* Set the fixed pacing rate in Bytes per second ca */ 19805 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19806 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19807 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19808 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19809 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19810 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19811 rack->use_fixed_rate = 1; 19812 if (rack->rc_always_pace) 19813 rack_set_cc_pacing(rack); 19814 rack_log_pacing_delay_calc(rack, 19815 rack->r_ctl.rc_fixed_pacing_rate_ss, 19816 rack->r_ctl.rc_fixed_pacing_rate_ca, 19817 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19818 __LINE__, NULL, 0); 19819 break; 19820 19821 case TCP_RACK_PACE_RATE_CA: 19822 /* Set the fixed pacing rate in Bytes per second ca */ 19823 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19824 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19825 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19826 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19827 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19828 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19829 rack->use_fixed_rate = 1; 19830 if (rack->rc_always_pace) 19831 rack_set_cc_pacing(rack); 19832 rack_log_pacing_delay_calc(rack, 19833 rack->r_ctl.rc_fixed_pacing_rate_ss, 19834 rack->r_ctl.rc_fixed_pacing_rate_ca, 19835 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19836 __LINE__, NULL, 0); 19837 break; 19838 case TCP_RACK_GP_INCREASE_REC: 19839 RACK_OPTS_INC(tcp_gp_inc_rec); 19840 rack->r_ctl.rack_per_of_gp_rec = optval; 19841 rack_log_pacing_delay_calc(rack, 19842 rack->r_ctl.rack_per_of_gp_ss, 19843 rack->r_ctl.rack_per_of_gp_ca, 19844 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19845 __LINE__, NULL, 0); 19846 break; 19847 case TCP_RACK_GP_INCREASE_CA: 19848 RACK_OPTS_INC(tcp_gp_inc_ca); 19849 ca = optval; 19850 if (ca < 100) { 19851 /* 19852 * We don't allow any reduction 19853 * over the GP b/w. 19854 */ 19855 error = EINVAL; 19856 break; 19857 } 19858 rack->r_ctl.rack_per_of_gp_ca = ca; 19859 rack_log_pacing_delay_calc(rack, 19860 rack->r_ctl.rack_per_of_gp_ss, 19861 rack->r_ctl.rack_per_of_gp_ca, 19862 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19863 __LINE__, NULL, 0); 19864 break; 19865 case TCP_RACK_GP_INCREASE_SS: 19866 RACK_OPTS_INC(tcp_gp_inc_ss); 19867 ss = optval; 19868 if (ss < 100) { 19869 /* 19870 * We don't allow any reduction 19871 * over the GP b/w. 19872 */ 19873 error = EINVAL; 19874 break; 19875 } 19876 rack->r_ctl.rack_per_of_gp_ss = ss; 19877 rack_log_pacing_delay_calc(rack, 19878 rack->r_ctl.rack_per_of_gp_ss, 19879 rack->r_ctl.rack_per_of_gp_ca, 19880 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19881 __LINE__, NULL, 0); 19882 break; 19883 case TCP_RACK_RR_CONF: 19884 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 19885 if (optval && optval <= 3) 19886 rack->r_rr_config = optval; 19887 else 19888 rack->r_rr_config = 0; 19889 break; 19890 case TCP_HDWR_RATE_CAP: 19891 RACK_OPTS_INC(tcp_hdwr_rate_cap); 19892 if (optval) { 19893 if (rack->r_rack_hw_rate_caps == 0) 19894 rack->r_rack_hw_rate_caps = 1; 19895 else 19896 error = EALREADY; 19897 } else { 19898 rack->r_rack_hw_rate_caps = 0; 19899 } 19900 break; 19901 case TCP_BBR_HDWR_PACE: 19902 RACK_OPTS_INC(tcp_hdwr_pacing); 19903 if (optval){ 19904 if (rack->rack_hdrw_pacing == 0) { 19905 rack->rack_hdw_pace_ena = 1; 19906 rack->rack_attempt_hdwr_pace = 0; 19907 } else 19908 error = EALREADY; 19909 } else { 19910 rack->rack_hdw_pace_ena = 0; 19911 #ifdef RATELIMIT 19912 if (rack->r_ctl.crte != NULL) { 19913 rack->rack_hdrw_pacing = 0; 19914 rack->rack_attempt_hdwr_pace = 0; 19915 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 19916 rack->r_ctl.crte = NULL; 19917 } 19918 #endif 19919 } 19920 break; 19921 /* End Pacing related ones */ 19922 case TCP_RACK_PRR_SENDALOT: 19923 /* Allow PRR to send more than one seg */ 19924 RACK_OPTS_INC(tcp_rack_prr_sendalot); 19925 rack->r_ctl.rc_prr_sendalot = optval; 19926 break; 19927 case TCP_RACK_MIN_TO: 19928 /* Minimum time between rack t-o's in ms */ 19929 RACK_OPTS_INC(tcp_rack_min_to); 19930 rack->r_ctl.rc_min_to = optval; 19931 break; 19932 case TCP_RACK_EARLY_SEG: 19933 /* If early recovery max segments */ 19934 RACK_OPTS_INC(tcp_rack_early_seg); 19935 rack->r_ctl.rc_early_recovery_segs = optval; 19936 break; 19937 case TCP_RACK_ENABLE_HYSTART: 19938 { 19939 if (optval) { 19940 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 19941 if (rack_do_hystart > RACK_HYSTART_ON) 19942 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 19943 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 19944 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 19945 } else { 19946 tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 19947 } 19948 } 19949 break; 19950 case TCP_RACK_REORD_THRESH: 19951 /* RACK reorder threshold (shift amount) */ 19952 RACK_OPTS_INC(tcp_rack_reord_thresh); 19953 if ((optval > 0) && (optval < 31)) 19954 rack->r_ctl.rc_reorder_shift = optval; 19955 else 19956 error = EINVAL; 19957 break; 19958 case TCP_RACK_REORD_FADE: 19959 /* Does reordering fade after ms time */ 19960 RACK_OPTS_INC(tcp_rack_reord_fade); 19961 rack->r_ctl.rc_reorder_fade = optval; 19962 break; 19963 case TCP_RACK_TLP_THRESH: 19964 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 19965 RACK_OPTS_INC(tcp_rack_tlp_thresh); 19966 if (optval) 19967 rack->r_ctl.rc_tlp_threshold = optval; 19968 else 19969 error = EINVAL; 19970 break; 19971 case TCP_BBR_USE_RACK_RR: 19972 RACK_OPTS_INC(tcp_rack_rr); 19973 if (optval) 19974 rack->use_rack_rr = 1; 19975 else 19976 rack->use_rack_rr = 0; 19977 break; 19978 case TCP_FAST_RSM_HACK: 19979 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 19980 if (optval) 19981 rack->fast_rsm_hack = 1; 19982 else 19983 rack->fast_rsm_hack = 0; 19984 break; 19985 case TCP_RACK_PKT_DELAY: 19986 /* RACK added ms i.e. rack-rtt + reord + N */ 19987 RACK_OPTS_INC(tcp_rack_pkt_delay); 19988 rack->r_ctl.rc_pkt_delay = optval; 19989 break; 19990 case TCP_DELACK: 19991 RACK_OPTS_INC(tcp_rack_delayed_ack); 19992 if (optval == 0) 19993 tp->t_delayed_ack = 0; 19994 else 19995 tp->t_delayed_ack = 1; 19996 if (tp->t_flags & TF_DELACK) { 19997 tp->t_flags &= ~TF_DELACK; 19998 tp->t_flags |= TF_ACKNOW; 19999 NET_EPOCH_ENTER(et); 20000 rack_output(tp); 20001 NET_EPOCH_EXIT(et); 20002 } 20003 break; 20004 20005 case TCP_BBR_RACK_RTT_USE: 20006 RACK_OPTS_INC(tcp_rack_rtt_use); 20007 if ((optval != USE_RTT_HIGH) && 20008 (optval != USE_RTT_LOW) && 20009 (optval != USE_RTT_AVG)) 20010 error = EINVAL; 20011 else 20012 rack->r_ctl.rc_rate_sample_method = optval; 20013 break; 20014 case TCP_DATA_AFTER_CLOSE: 20015 RACK_OPTS_INC(tcp_data_after_close); 20016 if (optval) 20017 rack->rc_allow_data_af_clo = 1; 20018 else 20019 rack->rc_allow_data_af_clo = 0; 20020 break; 20021 default: 20022 break; 20023 } 20024 #ifdef NETFLIX_STATS 20025 tcp_log_socket_option(tp, sopt_name, optval, error); 20026 #endif 20027 return (error); 20028 } 20029 20030 20031 static void 20032 rack_apply_deferred_options(struct tcp_rack *rack) 20033 { 20034 struct deferred_opt_list *dol, *sdol; 20035 uint32_t s_optval; 20036 20037 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20038 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20039 /* Disadvantage of deferal is you loose the error return */ 20040 s_optval = (uint32_t)dol->optval; 20041 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20042 free(dol, M_TCPDO); 20043 } 20044 } 20045 20046 static void 20047 rack_hw_tls_change(struct tcpcb *tp, int chg) 20048 { 20049 /* 20050 * HW tls state has changed.. fix all 20051 * rsm's in flight. 20052 */ 20053 struct tcp_rack *rack; 20054 struct rack_sendmap *rsm; 20055 20056 rack = (struct tcp_rack *)tp->t_fb_ptr; 20057 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20058 if (chg) 20059 rsm->r_hw_tls = 1; 20060 else 20061 rsm->r_hw_tls = 0; 20062 } 20063 if (chg) 20064 rack->r_ctl.fsb.hw_tls = 1; 20065 else 20066 rack->r_ctl.fsb.hw_tls = 0; 20067 } 20068 20069 static int 20070 rack_pru_options(struct tcpcb *tp, int flags) 20071 { 20072 if (flags & PRUS_OOB) 20073 return (EOPNOTSUPP); 20074 return (0); 20075 } 20076 20077 static struct tcp_function_block __tcp_rack = { 20078 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20079 .tfb_tcp_output = rack_output, 20080 .tfb_do_queued_segments = ctf_do_queued_segments, 20081 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20082 .tfb_tcp_do_segment = rack_do_segment, 20083 .tfb_tcp_ctloutput = rack_ctloutput, 20084 .tfb_tcp_fb_init = rack_init, 20085 .tfb_tcp_fb_fini = rack_fini, 20086 .tfb_tcp_timer_stop_all = rack_stopall, 20087 .tfb_tcp_timer_activate = rack_timer_activate, 20088 .tfb_tcp_timer_active = rack_timer_active, 20089 .tfb_tcp_timer_stop = rack_timer_stop, 20090 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20091 .tfb_tcp_handoff_ok = rack_handoff_ok, 20092 .tfb_tcp_mtu_chg = rack_mtu_change, 20093 .tfb_pru_options = rack_pru_options, 20094 .tfb_hwtls_change = rack_hw_tls_change, 20095 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20096 }; 20097 20098 /* 20099 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20100 * socket option arguments. When it re-acquires the lock after the copy, it 20101 * has to revalidate that the connection is still valid for the socket 20102 * option. 20103 */ 20104 static int 20105 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20106 { 20107 #ifdef INET6 20108 struct ip6_hdr *ip6; 20109 #endif 20110 #ifdef INET 20111 struct ip *ip; 20112 #endif 20113 struct tcpcb *tp; 20114 struct tcp_rack *rack; 20115 uint64_t loptval; 20116 int32_t error = 0, optval; 20117 20118 tp = intotcpcb(inp); 20119 rack = (struct tcp_rack *)tp->t_fb_ptr; 20120 if (rack == NULL) { 20121 INP_WUNLOCK(inp); 20122 return (EINVAL); 20123 } 20124 #ifdef INET6 20125 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20126 #endif 20127 #ifdef INET 20128 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20129 #endif 20130 20131 switch (sopt->sopt_level) { 20132 #ifdef INET6 20133 case IPPROTO_IPV6: 20134 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20135 switch (sopt->sopt_name) { 20136 case IPV6_USE_MIN_MTU: 20137 tcp6_use_min_mtu(tp); 20138 break; 20139 case IPV6_TCLASS: 20140 /* 20141 * The DSCP codepoint has changed, update the fsb. 20142 */ 20143 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20144 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20145 break; 20146 } 20147 INP_WUNLOCK(inp); 20148 return (0); 20149 #endif 20150 #ifdef INET 20151 case IPPROTO_IP: 20152 switch (sopt->sopt_name) { 20153 case IP_TOS: 20154 /* 20155 * The DSCP codepoint has changed, update the fsb. 20156 */ 20157 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20158 break; 20159 case IP_TTL: 20160 /* 20161 * The TTL has changed, update the fsb. 20162 */ 20163 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20164 break; 20165 } 20166 INP_WUNLOCK(inp); 20167 return (0); 20168 #endif 20169 } 20170 20171 switch (sopt->sopt_name) { 20172 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20173 /* Pacing related ones */ 20174 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20175 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20176 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20177 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20178 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20179 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20180 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20181 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20182 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20183 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20184 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20185 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20186 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20187 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20188 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20189 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20190 /* End pacing related */ 20191 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20192 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20193 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20194 case TCP_RACK_MIN_TO: /* URL:min_to */ 20195 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20196 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20197 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20198 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20199 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20200 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20201 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20202 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20203 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20204 case TCP_NO_PRR: /* URL:noprr */ 20205 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20206 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20207 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20208 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20209 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20210 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20211 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20212 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20213 case TCP_RACK_PROFILE: /* URL:profile */ 20214 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20215 case TCP_RACK_ABC_VAL: /* URL:labc */ 20216 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20217 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20218 case TCP_DEFER_OPTIONS: /* URL:defer */ 20219 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20220 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20221 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20222 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20223 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20224 break; 20225 default: 20226 /* Filter off all unknown options to the base stack */ 20227 return (tcp_default_ctloutput(inp, sopt)); 20228 break; 20229 } 20230 INP_WUNLOCK(inp); 20231 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20232 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20233 /* 20234 * We truncate it down to 32 bits for the socket-option trace this 20235 * means rates > 34Gbps won't show right, but thats probably ok. 20236 */ 20237 optval = (uint32_t)loptval; 20238 } else { 20239 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20240 /* Save it in 64 bit form too */ 20241 loptval = optval; 20242 } 20243 if (error) 20244 return (error); 20245 INP_WLOCK(inp); 20246 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20247 INP_WUNLOCK(inp); 20248 return (ECONNRESET); 20249 } 20250 if (tp->t_fb != &__tcp_rack) { 20251 INP_WUNLOCK(inp); 20252 return (ENOPROTOOPT); 20253 } 20254 if (rack->defer_options && (rack->gp_ready == 0) && 20255 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20256 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20257 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20258 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20259 /* Options are beind deferred */ 20260 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20261 INP_WUNLOCK(inp); 20262 return (0); 20263 } else { 20264 /* No memory to defer, fail */ 20265 INP_WUNLOCK(inp); 20266 return (ENOMEM); 20267 } 20268 } 20269 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20270 INP_WUNLOCK(inp); 20271 return (error); 20272 } 20273 20274 static void 20275 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20276 { 20277 20278 INP_WLOCK_ASSERT(tp->t_inpcb); 20279 bzero(ti, sizeof(*ti)); 20280 20281 ti->tcpi_state = tp->t_state; 20282 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20283 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20284 if (tp->t_flags & TF_SACK_PERMIT) 20285 ti->tcpi_options |= TCPI_OPT_SACK; 20286 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20287 ti->tcpi_options |= TCPI_OPT_WSCALE; 20288 ti->tcpi_snd_wscale = tp->snd_scale; 20289 ti->tcpi_rcv_wscale = tp->rcv_scale; 20290 } 20291 if (tp->t_flags2 & TF2_ECN_PERMIT) 20292 ti->tcpi_options |= TCPI_OPT_ECN; 20293 if (tp->t_flags & TF_FASTOPEN) 20294 ti->tcpi_options |= TCPI_OPT_TFO; 20295 /* still kept in ticks is t_rcvtime */ 20296 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20297 /* Since we hold everything in precise useconds this is easy */ 20298 ti->tcpi_rtt = tp->t_srtt; 20299 ti->tcpi_rttvar = tp->t_rttvar; 20300 ti->tcpi_rto = tp->t_rxtcur; 20301 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20302 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20303 /* 20304 * FreeBSD-specific extension fields for tcp_info. 20305 */ 20306 ti->tcpi_rcv_space = tp->rcv_wnd; 20307 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20308 ti->tcpi_snd_wnd = tp->snd_wnd; 20309 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20310 ti->tcpi_snd_nxt = tp->snd_nxt; 20311 ti->tcpi_snd_mss = tp->t_maxseg; 20312 ti->tcpi_rcv_mss = tp->t_maxseg; 20313 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20314 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20315 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20316 #ifdef NETFLIX_STATS 20317 ti->tcpi_total_tlp = tp->t_sndtlppack; 20318 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20319 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20320 #endif 20321 #ifdef TCP_OFFLOAD 20322 if (tp->t_flags & TF_TOE) { 20323 ti->tcpi_options |= TCPI_OPT_TOE; 20324 tcp_offload_tcp_info(tp, ti); 20325 } 20326 #endif 20327 } 20328 20329 static int 20330 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20331 { 20332 struct tcpcb *tp; 20333 struct tcp_rack *rack; 20334 int32_t error, optval; 20335 uint64_t val, loptval; 20336 struct tcp_info ti; 20337 /* 20338 * Because all our options are either boolean or an int, we can just 20339 * pull everything into optval and then unlock and copy. If we ever 20340 * add a option that is not a int, then this will have quite an 20341 * impact to this routine. 20342 */ 20343 error = 0; 20344 tp = intotcpcb(inp); 20345 rack = (struct tcp_rack *)tp->t_fb_ptr; 20346 if (rack == NULL) { 20347 INP_WUNLOCK(inp); 20348 return (EINVAL); 20349 } 20350 switch (sopt->sopt_name) { 20351 case TCP_INFO: 20352 /* First get the info filled */ 20353 rack_fill_info(tp, &ti); 20354 /* Fix up the rtt related fields if needed */ 20355 INP_WUNLOCK(inp); 20356 error = sooptcopyout(sopt, &ti, sizeof ti); 20357 return (error); 20358 /* 20359 * Beta is the congestion control value for NewReno that influences how 20360 * much of a backoff happens when loss is detected. It is normally set 20361 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20362 * when you exit recovery. 20363 */ 20364 case TCP_RACK_PACING_BETA: 20365 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20366 error = EINVAL; 20367 else if (rack->rc_pacing_cc_set == 0) 20368 optval = rack->r_ctl.rc_saved_beta.beta; 20369 else { 20370 /* 20371 * Reach out into the CC data and report back what 20372 * I have previously set. Yeah it looks hackish but 20373 * we don't want to report the saved values. 20374 */ 20375 if (tp->ccv->cc_data) 20376 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20377 else 20378 error = EINVAL; 20379 } 20380 break; 20381 /* 20382 * Beta_ecn is the congestion control value for NewReno that influences how 20383 * much of a backoff happens when a ECN mark is detected. It is normally set 20384 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20385 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20386 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20387 */ 20388 20389 case TCP_RACK_PACING_BETA_ECN: 20390 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20391 error = EINVAL; 20392 else if (rack->rc_pacing_cc_set == 0) 20393 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20394 else { 20395 /* 20396 * Reach out into the CC data and report back what 20397 * I have previously set. Yeah it looks hackish but 20398 * we don't want to report the saved values. 20399 */ 20400 if (tp->ccv->cc_data) 20401 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20402 else 20403 error = EINVAL; 20404 } 20405 break; 20406 case TCP_RACK_DSACK_OPT: 20407 optval = 0; 20408 if (rack->rc_rack_tmr_std_based) { 20409 optval |= 1; 20410 } 20411 if (rack->rc_rack_use_dsack) { 20412 optval |= 2; 20413 } 20414 break; 20415 case TCP_RACK_ENABLE_HYSTART: 20416 { 20417 if (tp->ccv->flags & CCF_HYSTART_ALLOWED) { 20418 optval = RACK_HYSTART_ON; 20419 if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND) 20420 optval = RACK_HYSTART_ON_W_SC; 20421 if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH) 20422 optval = RACK_HYSTART_ON_W_SC_C; 20423 } else { 20424 optval = RACK_HYSTART_OFF; 20425 } 20426 } 20427 break; 20428 case TCP_FAST_RSM_HACK: 20429 optval = rack->fast_rsm_hack; 20430 break; 20431 case TCP_DEFER_OPTIONS: 20432 optval = rack->defer_options; 20433 break; 20434 case TCP_RACK_MEASURE_CNT: 20435 optval = rack->r_ctl.req_measurements; 20436 break; 20437 case TCP_REC_ABC_VAL: 20438 optval = rack->r_use_labc_for_rec; 20439 break; 20440 case TCP_RACK_ABC_VAL: 20441 optval = rack->rc_labc; 20442 break; 20443 case TCP_HDWR_UP_ONLY: 20444 optval= rack->r_up_only; 20445 break; 20446 case TCP_PACING_RATE_CAP: 20447 loptval = rack->r_ctl.bw_rate_cap; 20448 break; 20449 case TCP_RACK_PROFILE: 20450 /* You cannot retrieve a profile, its write only */ 20451 error = EINVAL; 20452 break; 20453 case TCP_USE_CMP_ACKS: 20454 optval = rack->r_use_cmp_ack; 20455 break; 20456 case TCP_RACK_PACE_TO_FILL: 20457 optval = rack->rc_pace_to_cwnd; 20458 if (optval && rack->r_fill_less_agg) 20459 optval++; 20460 break; 20461 case TCP_RACK_NO_PUSH_AT_MAX: 20462 optval = rack->r_ctl.rc_no_push_at_mrtt; 20463 break; 20464 case TCP_SHARED_CWND_ENABLE: 20465 optval = rack->rack_enable_scwnd; 20466 break; 20467 case TCP_RACK_NONRXT_CFG_RATE: 20468 optval = rack->rack_rec_nonrxt_use_cr; 20469 break; 20470 case TCP_NO_PRR: 20471 if (rack->rack_no_prr == 1) 20472 optval = 1; 20473 else if (rack->no_prr_addback == 1) 20474 optval = 2; 20475 else 20476 optval = 0; 20477 break; 20478 case TCP_RACK_DO_DETECTION: 20479 optval = rack->do_detection; 20480 break; 20481 case TCP_RACK_MBUF_QUEUE: 20482 /* Now do we use the LRO mbuf-queue feature */ 20483 optval = rack->r_mbuf_queue; 20484 break; 20485 case TCP_TIMELY_DYN_ADJ: 20486 optval = rack->rc_gp_dyn_mul; 20487 break; 20488 case TCP_BBR_IWINTSO: 20489 optval = rack->rc_init_win; 20490 break; 20491 case TCP_RACK_TLP_REDUCE: 20492 /* RACK TLP cwnd reduction (bool) */ 20493 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20494 break; 20495 case TCP_BBR_RACK_INIT_RATE: 20496 val = rack->r_ctl.init_rate; 20497 /* convert to kbits per sec */ 20498 val *= 8; 20499 val /= 1000; 20500 optval = (uint32_t)val; 20501 break; 20502 case TCP_RACK_FORCE_MSEG: 20503 optval = rack->rc_force_max_seg; 20504 break; 20505 case TCP_RACK_PACE_MAX_SEG: 20506 /* Max segments in a pace */ 20507 optval = rack->rc_user_set_max_segs; 20508 break; 20509 case TCP_RACK_PACE_ALWAYS: 20510 /* Use the always pace method */ 20511 optval = rack->rc_always_pace; 20512 break; 20513 case TCP_RACK_PRR_SENDALOT: 20514 /* Allow PRR to send more than one seg */ 20515 optval = rack->r_ctl.rc_prr_sendalot; 20516 break; 20517 case TCP_RACK_MIN_TO: 20518 /* Minimum time between rack t-o's in ms */ 20519 optval = rack->r_ctl.rc_min_to; 20520 break; 20521 case TCP_RACK_EARLY_SEG: 20522 /* If early recovery max segments */ 20523 optval = rack->r_ctl.rc_early_recovery_segs; 20524 break; 20525 case TCP_RACK_REORD_THRESH: 20526 /* RACK reorder threshold (shift amount) */ 20527 optval = rack->r_ctl.rc_reorder_shift; 20528 break; 20529 case TCP_RACK_REORD_FADE: 20530 /* Does reordering fade after ms time */ 20531 optval = rack->r_ctl.rc_reorder_fade; 20532 break; 20533 case TCP_BBR_USE_RACK_RR: 20534 /* Do we use the rack cheat for rxt */ 20535 optval = rack->use_rack_rr; 20536 break; 20537 case TCP_RACK_RR_CONF: 20538 optval = rack->r_rr_config; 20539 break; 20540 case TCP_HDWR_RATE_CAP: 20541 optval = rack->r_rack_hw_rate_caps; 20542 break; 20543 case TCP_BBR_HDWR_PACE: 20544 optval = rack->rack_hdw_pace_ena; 20545 break; 20546 case TCP_RACK_TLP_THRESH: 20547 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20548 optval = rack->r_ctl.rc_tlp_threshold; 20549 break; 20550 case TCP_RACK_PKT_DELAY: 20551 /* RACK added ms i.e. rack-rtt + reord + N */ 20552 optval = rack->r_ctl.rc_pkt_delay; 20553 break; 20554 case TCP_RACK_TLP_USE: 20555 optval = rack->rack_tlp_threshold_use; 20556 break; 20557 case TCP_RACK_PACE_RATE_CA: 20558 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20559 break; 20560 case TCP_RACK_PACE_RATE_SS: 20561 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20562 break; 20563 case TCP_RACK_PACE_RATE_REC: 20564 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20565 break; 20566 case TCP_RACK_GP_INCREASE_SS: 20567 optval = rack->r_ctl.rack_per_of_gp_ca; 20568 break; 20569 case TCP_RACK_GP_INCREASE_CA: 20570 optval = rack->r_ctl.rack_per_of_gp_ss; 20571 break; 20572 case TCP_BBR_RACK_RTT_USE: 20573 optval = rack->r_ctl.rc_rate_sample_method; 20574 break; 20575 case TCP_DELACK: 20576 optval = tp->t_delayed_ack; 20577 break; 20578 case TCP_DATA_AFTER_CLOSE: 20579 optval = rack->rc_allow_data_af_clo; 20580 break; 20581 case TCP_SHARED_CWND_TIME_LIMIT: 20582 optval = rack->r_limit_scw; 20583 break; 20584 case TCP_RACK_TIMER_SLOP: 20585 optval = rack->r_ctl.timer_slop; 20586 break; 20587 default: 20588 return (tcp_default_ctloutput(inp, sopt)); 20589 break; 20590 } 20591 INP_WUNLOCK(inp); 20592 if (error == 0) { 20593 if (TCP_PACING_RATE_CAP) 20594 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20595 else 20596 error = sooptcopyout(sopt, &optval, sizeof optval); 20597 } 20598 return (error); 20599 } 20600 20601 static int 20602 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20603 { 20604 if (sopt->sopt_dir == SOPT_SET) { 20605 return (rack_set_sockopt(inp, sopt)); 20606 } else if (sopt->sopt_dir == SOPT_GET) { 20607 return (rack_get_sockopt(inp, sopt)); 20608 } else { 20609 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20610 } 20611 } 20612 20613 static const char *rack_stack_names[] = { 20614 __XSTRING(STACKNAME), 20615 #ifdef STACKALIAS 20616 __XSTRING(STACKALIAS), 20617 #endif 20618 }; 20619 20620 static int 20621 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20622 { 20623 memset(mem, 0, size); 20624 return (0); 20625 } 20626 20627 static void 20628 rack_dtor(void *mem, int32_t size, void *arg) 20629 { 20630 20631 } 20632 20633 static bool rack_mod_inited = false; 20634 20635 static int 20636 tcp_addrack(module_t mod, int32_t type, void *data) 20637 { 20638 int32_t err = 0; 20639 int num_stacks; 20640 20641 switch (type) { 20642 case MOD_LOAD: 20643 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20644 sizeof(struct rack_sendmap), 20645 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20646 20647 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20648 sizeof(struct tcp_rack), 20649 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20650 20651 sysctl_ctx_init(&rack_sysctl_ctx); 20652 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20653 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20654 OID_AUTO, 20655 #ifdef STACKALIAS 20656 __XSTRING(STACKALIAS), 20657 #else 20658 __XSTRING(STACKNAME), 20659 #endif 20660 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20661 ""); 20662 if (rack_sysctl_root == NULL) { 20663 printf("Failed to add sysctl node\n"); 20664 err = EFAULT; 20665 goto free_uma; 20666 } 20667 rack_init_sysctls(); 20668 num_stacks = nitems(rack_stack_names); 20669 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20670 rack_stack_names, &num_stacks); 20671 if (err) { 20672 printf("Failed to register %s stack name for " 20673 "%s module\n", rack_stack_names[num_stacks], 20674 __XSTRING(MODNAME)); 20675 sysctl_ctx_free(&rack_sysctl_ctx); 20676 free_uma: 20677 uma_zdestroy(rack_zone); 20678 uma_zdestroy(rack_pcb_zone); 20679 rack_counter_destroy(); 20680 printf("Failed to register rack module -- err:%d\n", err); 20681 return (err); 20682 } 20683 tcp_lro_reg_mbufq(); 20684 rack_mod_inited = true; 20685 break; 20686 case MOD_QUIESCE: 20687 err = deregister_tcp_functions(&__tcp_rack, true, false); 20688 break; 20689 case MOD_UNLOAD: 20690 err = deregister_tcp_functions(&__tcp_rack, false, true); 20691 if (err == EBUSY) 20692 break; 20693 if (rack_mod_inited) { 20694 uma_zdestroy(rack_zone); 20695 uma_zdestroy(rack_pcb_zone); 20696 sysctl_ctx_free(&rack_sysctl_ctx); 20697 rack_counter_destroy(); 20698 rack_mod_inited = false; 20699 } 20700 tcp_lro_dereg_mbufq(); 20701 err = 0; 20702 break; 20703 default: 20704 return (EOPNOTSUPP); 20705 } 20706 return (err); 20707 } 20708 20709 static moduledata_t tcp_rack = { 20710 .name = __XSTRING(MODNAME), 20711 .evhand = tcp_addrack, 20712 .priv = 0 20713 }; 20714 20715 MODULE_VERSION(MODNAME, 1); 20716 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20717 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20718