1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_ratelimit.h" 34 #include "opt_kern_tls.h" 35 #include <sys/param.h> 36 #include <sys/arb.h> 37 #include <sys/module.h> 38 #include <sys/kernel.h> 39 #ifdef TCP_HHOOK 40 #include <sys/hhook.h> 41 #endif 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/mbuf.h> 47 #include <sys/proc.h> /* for proc0 declaration */ 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/sysctl.h> 51 #include <sys/systm.h> 52 #ifdef STATS 53 #include <sys/qmath.h> 54 #include <sys/tree.h> 55 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 56 #else 57 #include <sys/tree.h> 58 #endif 59 #include <sys/refcount.h> 60 #include <sys/queue.h> 61 #include <sys/tim_filter.h> 62 #include <sys/smp.h> 63 #include <sys/kthread.h> 64 #include <sys/kern_prefetch.h> 65 #include <sys/protosw.h> 66 #ifdef TCP_ACCOUNTING 67 #include <sys/sched.h> 68 #include <machine/cpu.h> 69 #endif 70 #include <vm/uma.h> 71 72 #include <net/route.h> 73 #include <net/route/nhop.h> 74 #include <net/vnet.h> 75 76 #define TCPSTATES /* for logging */ 77 78 #include <netinet/in.h> 79 #include <netinet/in_kdtrace.h> 80 #include <netinet/in_pcb.h> 81 #include <netinet/ip.h> 82 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 83 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 84 #include <netinet/ip_var.h> 85 #include <netinet/ip6.h> 86 #include <netinet6/in6_pcb.h> 87 #include <netinet6/ip6_var.h> 88 #include <netinet/tcp.h> 89 #define TCPOUTFLAGS 90 #include <netinet/tcp_fsm.h> 91 #include <netinet/tcp_log_buf.h> 92 #include <netinet/tcp_seq.h> 93 #include <netinet/tcp_timer.h> 94 #include <netinet/tcp_var.h> 95 #include <netinet/tcp_syncache.h> 96 #include <netinet/tcp_hpts.h> 97 #include <netinet/tcp_ratelimit.h> 98 #include <netinet/tcp_accounting.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/cc/cc.h> 101 #include <netinet/cc/cc_newreno.h> 102 #include <netinet/tcp_fastopen.h> 103 #include <netinet/tcp_lro.h> 104 #ifdef NETFLIX_SHARED_CWND 105 #include <netinet/tcp_shared_cwnd.h> 106 #endif 107 #ifdef TCP_OFFLOAD 108 #include <netinet/tcp_offload.h> 109 #endif 110 #ifdef INET6 111 #include <netinet6/tcp6_var.h> 112 #endif 113 #include <netinet/tcp_ecn.h> 114 115 #include <netipsec/ipsec_support.h> 116 117 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 118 #include <netipsec/ipsec.h> 119 #include <netipsec/ipsec6.h> 120 #endif /* IPSEC */ 121 122 #include <netinet/udp.h> 123 #include <netinet/udp_var.h> 124 #include <machine/in_cksum.h> 125 126 #ifdef MAC 127 #include <security/mac/mac_framework.h> 128 #endif 129 #include "sack_filter.h" 130 #include "tcp_rack.h" 131 #include "rack_bbr_common.h" 132 133 uma_zone_t rack_zone; 134 uma_zone_t rack_pcb_zone; 135 136 #ifndef TICKS2SBT 137 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 138 #endif 139 140 VNET_DECLARE(uint32_t, newreno_beta); 141 VNET_DECLARE(uint32_t, newreno_beta_ecn); 142 #define V_newreno_beta VNET(newreno_beta) 143 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 144 145 146 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 147 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 148 149 struct sysctl_ctx_list rack_sysctl_ctx; 150 struct sysctl_oid *rack_sysctl_root; 151 152 #define CUM_ACKED 1 153 #define SACKED 2 154 155 /* 156 * The RACK module incorporates a number of 157 * TCP ideas that have been put out into the IETF 158 * over the last few years: 159 * - Matt Mathis's Rate Halving which slowly drops 160 * the congestion window so that the ack clock can 161 * be maintained during a recovery. 162 * - Yuchung Cheng's RACK TCP (for which its named) that 163 * will stop us using the number of dup acks and instead 164 * use time as the gage of when we retransmit. 165 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 166 * of Dukkipati et.al. 167 * RACK depends on SACK, so if an endpoint arrives that 168 * cannot do SACK the state machine below will shuttle the 169 * connection back to using the "default" TCP stack that is 170 * in FreeBSD. 171 * 172 * To implement RACK the original TCP stack was first decomposed 173 * into a functional state machine with individual states 174 * for each of the possible TCP connection states. The do_segment 175 * functions role in life is to mandate the connection supports SACK 176 * initially and then assure that the RACK state matches the conenction 177 * state before calling the states do_segment function. Each 178 * state is simplified due to the fact that the original do_segment 179 * has been decomposed and we *know* what state we are in (no 180 * switches on the state) and all tests for SACK are gone. This 181 * greatly simplifies what each state does. 182 * 183 * TCP output is also over-written with a new version since it 184 * must maintain the new rack scoreboard. 185 * 186 */ 187 static int32_t rack_tlp_thresh = 1; 188 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 189 static int32_t rack_tlp_use_greater = 1; 190 static int32_t rack_reorder_thresh = 2; 191 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 192 * - 60 seconds */ 193 static uint8_t rack_req_measurements = 1; 194 /* Attack threshold detections */ 195 static uint32_t rack_highest_sack_thresh_seen = 0; 196 static uint32_t rack_highest_move_thresh_seen = 0; 197 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 198 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 199 static int32_t rack_hw_rate_caps = 1; /* 1; */ 200 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 201 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 202 static int32_t rack_hw_up_only = 1; 203 static int32_t rack_stats_gets_ms_rtt = 1; 204 static int32_t rack_prr_addbackmax = 2; 205 static int32_t rack_do_hystart = 0; 206 static int32_t rack_apply_rtt_with_reduced_conf = 0; 207 208 static int32_t rack_pkt_delay = 1000; 209 static int32_t rack_send_a_lot_in_prr = 1; 210 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 211 static int32_t rack_verbose_logging = 0; 212 static int32_t rack_ignore_data_after_close = 1; 213 static int32_t rack_enable_shared_cwnd = 1; 214 static int32_t rack_use_cmp_acks = 1; 215 static int32_t rack_use_fsb = 1; 216 static int32_t rack_use_rfo = 1; 217 static int32_t rack_use_rsm_rfo = 1; 218 static int32_t rack_max_abc_post_recovery = 2; 219 static int32_t rack_client_low_buf = 0; 220 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 221 #ifdef TCP_ACCOUNTING 222 static int32_t rack_tcp_accounting = 0; 223 #endif 224 static int32_t rack_limits_scwnd = 1; 225 static int32_t rack_enable_mqueue_for_nonpaced = 0; 226 static int32_t rack_disable_prr = 0; 227 static int32_t use_rack_rr = 1; 228 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 229 static int32_t rack_persist_min = 250000; /* 250usec */ 230 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 231 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 232 static int32_t rack_default_init_window = 0; /* Use system default */ 233 static int32_t rack_limit_time_with_srtt = 0; 234 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 235 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 236 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 237 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 238 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 239 240 /* 241 * Currently regular tcp has a rto_min of 30ms 242 * the backoff goes 12 times so that ends up 243 * being a total of 122.850 seconds before a 244 * connection is killed. 245 */ 246 static uint32_t rack_def_data_window = 20; 247 static uint32_t rack_goal_bdp = 2; 248 static uint32_t rack_min_srtts = 1; 249 static uint32_t rack_min_measure_usec = 0; 250 static int32_t rack_tlp_min = 10000; /* 10ms */ 251 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 252 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 253 static const int32_t rack_free_cache = 2; 254 static int32_t rack_hptsi_segments = 40; 255 static int32_t rack_rate_sample_method = USE_RTT_LOW; 256 static int32_t rack_pace_every_seg = 0; 257 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 258 static int32_t rack_slot_reduction = 4; 259 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 260 static int32_t rack_cwnd_block_ends_measure = 0; 261 static int32_t rack_rwnd_block_ends_measure = 0; 262 static int32_t rack_def_profile = 0; 263 264 static int32_t rack_lower_cwnd_at_tlp = 0; 265 static int32_t rack_limited_retran = 0; 266 static int32_t rack_always_send_oldest = 0; 267 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 268 269 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 270 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 271 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 272 273 /* Probertt */ 274 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 275 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 276 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 277 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 278 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 279 280 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 281 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 282 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 283 static uint32_t rack_probertt_use_min_rtt_exit = 0; 284 static uint32_t rack_probe_rtt_sets_cwnd = 0; 285 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 286 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 287 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 288 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 289 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 290 static uint32_t rack_probertt_filter_life = 10000000; 291 static uint32_t rack_probertt_lower_within = 10; 292 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 293 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 294 static int32_t rack_probertt_clear_is = 1; 295 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 296 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 297 298 /* Part of pacing */ 299 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 300 301 /* Timely information */ 302 /* Combine these two gives the range of 'no change' to bw */ 303 /* ie the up/down provide the upper and lower bound */ 304 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 305 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 306 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 307 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 308 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 309 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */ 310 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 311 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 312 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 313 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 314 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 315 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 316 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 317 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 318 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 319 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 320 static int32_t rack_use_max_for_nobackoff = 0; 321 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 322 static int32_t rack_timely_no_stopping = 0; 323 static int32_t rack_down_raise_thresh = 100; 324 static int32_t rack_req_segs = 1; 325 static uint64_t rack_bw_rate_cap = 0; 326 static uint32_t rack_trace_point_config = 0; 327 static uint32_t rack_trace_point_bb_mode = 4; 328 static int32_t rack_trace_point_count = 0; 329 330 331 /* Weird delayed ack mode */ 332 static int32_t rack_use_imac_dack = 0; 333 /* Rack specific counters */ 334 counter_u64_t rack_saw_enobuf; 335 counter_u64_t rack_saw_enobuf_hw; 336 counter_u64_t rack_saw_enetunreach; 337 counter_u64_t rack_persists_sends; 338 counter_u64_t rack_persists_acks; 339 counter_u64_t rack_persists_loss; 340 counter_u64_t rack_persists_lost_ends; 341 #ifdef INVARIANTS 342 counter_u64_t rack_adjust_map_bw; 343 #endif 344 /* Tail loss probe counters */ 345 counter_u64_t rack_tlp_tot; 346 counter_u64_t rack_tlp_newdata; 347 counter_u64_t rack_tlp_retran; 348 counter_u64_t rack_tlp_retran_bytes; 349 counter_u64_t rack_to_tot; 350 counter_u64_t rack_hot_alloc; 351 counter_u64_t rack_to_alloc; 352 counter_u64_t rack_to_alloc_hard; 353 counter_u64_t rack_to_alloc_emerg; 354 counter_u64_t rack_to_alloc_limited; 355 counter_u64_t rack_alloc_limited_conns; 356 counter_u64_t rack_split_limited; 357 358 counter_u64_t rack_multi_single_eq; 359 counter_u64_t rack_proc_non_comp_ack; 360 361 counter_u64_t rack_fto_send; 362 counter_u64_t rack_fto_rsm_send; 363 counter_u64_t rack_nfto_resend; 364 counter_u64_t rack_non_fto_send; 365 counter_u64_t rack_extended_rfo; 366 367 counter_u64_t rack_sack_proc_all; 368 counter_u64_t rack_sack_proc_short; 369 counter_u64_t rack_sack_proc_restart; 370 counter_u64_t rack_sack_attacks_detected; 371 counter_u64_t rack_sack_attacks_reversed; 372 counter_u64_t rack_sack_used_next_merge; 373 counter_u64_t rack_sack_splits; 374 counter_u64_t rack_sack_used_prev_merge; 375 counter_u64_t rack_sack_skipped_acked; 376 counter_u64_t rack_ack_total; 377 counter_u64_t rack_express_sack; 378 counter_u64_t rack_sack_total; 379 counter_u64_t rack_move_none; 380 counter_u64_t rack_move_some; 381 382 counter_u64_t rack_input_idle_reduces; 383 counter_u64_t rack_collapsed_win; 384 counter_u64_t rack_collapsed_win_seen; 385 counter_u64_t rack_collapsed_win_rxt; 386 counter_u64_t rack_collapsed_win_rxt_bytes; 387 counter_u64_t rack_try_scwnd; 388 counter_u64_t rack_hw_pace_init_fail; 389 counter_u64_t rack_hw_pace_lost; 390 391 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 392 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 393 394 395 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 396 397 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 398 (tv) = (value) + slop; \ 399 if ((u_long)(tv) < (u_long)(tvmin)) \ 400 (tv) = (tvmin); \ 401 if ((u_long)(tv) > (u_long)(tvmax)) \ 402 (tv) = (tvmax); \ 403 } while (0) 404 405 static void 406 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 407 408 static int 409 rack_process_ack(struct mbuf *m, struct tcphdr *th, 410 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 411 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 412 static int 413 rack_process_data(struct mbuf *m, struct tcphdr *th, 414 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 415 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 416 static void 417 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 418 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 419 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 420 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 421 uint8_t limit_type); 422 static struct rack_sendmap * 423 rack_check_recovery_mode(struct tcpcb *tp, 424 uint32_t tsused); 425 static void 426 rack_cong_signal(struct tcpcb *tp, 427 uint32_t type, uint32_t ack, int ); 428 static void rack_counter_destroy(void); 429 static int 430 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 431 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 432 static void 433 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 434 static void 435 rack_do_segment(struct mbuf *m, struct tcphdr *th, 436 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 437 uint8_t iptos); 438 static void rack_dtor(void *mem, int32_t size, void *arg); 439 static void 440 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 441 uint32_t flex1, uint32_t flex2, 442 uint32_t flex3, uint32_t flex4, 443 uint32_t flex5, uint32_t flex6, 444 uint16_t flex7, uint8_t mod); 445 446 static void 447 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 448 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 449 struct rack_sendmap *rsm, uint8_t quality); 450 static struct rack_sendmap * 451 rack_find_high_nonack(struct tcp_rack *rack, 452 struct rack_sendmap *rsm); 453 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 454 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 455 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 456 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 457 static void 458 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 459 tcp_seq th_ack, int line, uint8_t quality); 460 static uint32_t 461 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 462 static int32_t rack_handoff_ok(struct tcpcb *tp); 463 static int32_t rack_init(struct tcpcb *tp); 464 static void rack_init_sysctls(void); 465 static void 466 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 467 struct tcphdr *th, int entered_rec, int dup_ack_struck); 468 static void 469 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 470 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 471 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 472 473 static void 474 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 475 struct rack_sendmap *rsm); 476 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 477 static int32_t rack_output(struct tcpcb *tp); 478 479 static uint32_t 480 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 481 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 482 uint32_t cts, int *moved_two); 483 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 484 static void rack_remxt_tmr(struct tcpcb *tp); 485 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 486 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 487 static int32_t rack_stopall(struct tcpcb *tp); 488 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 489 static uint32_t 490 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 491 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 492 static void 493 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 494 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 495 static int 496 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 497 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 498 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 499 static int 500 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 501 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 502 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 503 static int 504 rack_do_closing(struct mbuf *m, struct tcphdr *th, 505 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 506 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 507 static int 508 rack_do_established(struct mbuf *m, struct tcphdr *th, 509 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 510 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 511 static int 512 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 513 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 514 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 515 static int 516 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 517 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 518 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 519 static int 520 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 521 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 522 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 523 static int 524 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 525 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 526 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 527 static int 528 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 529 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 530 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 531 static int 532 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 533 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 534 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 535 struct rack_sendmap * 536 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 537 uint32_t tsused); 538 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 539 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 540 static void 541 tcp_rack_partialack(struct tcpcb *tp); 542 static int 543 rack_set_profile(struct tcp_rack *rack, int prof); 544 static void 545 rack_apply_deferred_options(struct tcp_rack *rack); 546 547 int32_t rack_clear_counter=0; 548 549 static inline void 550 rack_trace_point(struct tcp_rack *rack, int num) 551 { 552 if (((rack_trace_point_config == num) || 553 (rack_trace_point_config = 0xffffffff)) && 554 (rack_trace_point_bb_mode != 0) && 555 (rack_trace_point_count > 0) && 556 (rack->rc_tp->t_logstate == 0)) { 557 int res; 558 res = atomic_fetchadd_int(&rack_trace_point_count, -1); 559 if (res > 0) { 560 rack->rc_tp->t_logstate = rack_trace_point_bb_mode; 561 } else { 562 /* Loss a race assure its zero now */ 563 rack_trace_point_count = 0; 564 } 565 } 566 } 567 568 static void 569 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 570 { 571 struct sockopt sopt; 572 struct cc_newreno_opts opt; 573 struct newreno old; 574 struct tcpcb *tp; 575 int error, failed = 0; 576 577 tp = rack->rc_tp; 578 if (tp->t_cc == NULL) { 579 /* Tcb is leaving */ 580 return; 581 } 582 rack->rc_pacing_cc_set = 1; 583 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 584 /* Not new-reno we can't play games with beta! */ 585 failed = 1; 586 goto out; 587 588 } 589 if (CC_ALGO(tp)->ctl_output == NULL) { 590 /* Huh, not using new-reno so no swaps.? */ 591 failed = 2; 592 goto out; 593 } 594 /* Get the current values out */ 595 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 596 sopt.sopt_dir = SOPT_GET; 597 opt.name = CC_NEWRENO_BETA; 598 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 599 if (error) { 600 failed = 3; 601 goto out; 602 } 603 old.beta = opt.val; 604 opt.name = CC_NEWRENO_BETA_ECN; 605 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 606 if (error) { 607 failed = 4; 608 goto out; 609 } 610 old.beta_ecn = opt.val; 611 612 /* Now lets set in the values we have stored */ 613 sopt.sopt_dir = SOPT_SET; 614 opt.name = CC_NEWRENO_BETA; 615 opt.val = rack->r_ctl.rc_saved_beta.beta; 616 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 617 if (error) { 618 failed = 5; 619 goto out; 620 } 621 opt.name = CC_NEWRENO_BETA_ECN; 622 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 623 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 624 if (error) { 625 failed = 6; 626 goto out; 627 } 628 /* Save off the values for restoral */ 629 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 630 out: 631 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 632 union tcp_log_stackspecific log; 633 struct timeval tv; 634 struct newreno *ptr; 635 636 ptr = ((struct newreno *)tp->t_ccv.cc_data); 637 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 638 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 639 log.u_bbr.flex1 = ptr->beta; 640 log.u_bbr.flex2 = ptr->beta_ecn; 641 log.u_bbr.flex3 = ptr->newreno_flags; 642 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 643 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 644 log.u_bbr.flex6 = failed; 645 log.u_bbr.flex7 = rack->gp_ready; 646 log.u_bbr.flex7 <<= 1; 647 log.u_bbr.flex7 |= rack->use_fixed_rate; 648 log.u_bbr.flex7 <<= 1; 649 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 650 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 651 log.u_bbr.flex8 = flex8; 652 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 653 0, &log, false, NULL, NULL, 0, &tv); 654 } 655 } 656 657 static void 658 rack_set_cc_pacing(struct tcp_rack *rack) 659 { 660 if (rack->rc_pacing_cc_set) 661 return; 662 /* 663 * Use the swap utility placing in 3 for flex8 to id a 664 * set of a new set of values. 665 */ 666 rack->rc_pacing_cc_set = 1; 667 rack_swap_beta_values(rack, 3); 668 } 669 670 static void 671 rack_undo_cc_pacing(struct tcp_rack *rack) 672 { 673 if (rack->rc_pacing_cc_set == 0) 674 return; 675 /* 676 * Use the swap utility placing in 4 for flex8 to id a 677 * restoral of the old values. 678 */ 679 rack->rc_pacing_cc_set = 0; 680 rack_swap_beta_values(rack, 4); 681 } 682 683 #ifdef NETFLIX_PEAKRATE 684 static inline void 685 rack_update_peakrate_thr(struct tcpcb *tp) 686 { 687 /* Keep in mind that t_maxpeakrate is in B/s. */ 688 uint64_t peak; 689 peak = uqmax((tp->t_maxseg * 2), 690 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 691 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 692 } 693 #endif 694 695 static int 696 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 697 { 698 uint32_t stat; 699 int32_t error; 700 701 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 702 if (error || req->newptr == NULL) 703 return error; 704 705 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 706 if (error) 707 return (error); 708 if (stat == 1) { 709 #ifdef INVARIANTS 710 printf("Clearing RACK counters\n"); 711 #endif 712 counter_u64_zero(rack_tlp_tot); 713 counter_u64_zero(rack_tlp_newdata); 714 counter_u64_zero(rack_tlp_retran); 715 counter_u64_zero(rack_tlp_retran_bytes); 716 counter_u64_zero(rack_to_tot); 717 counter_u64_zero(rack_saw_enobuf); 718 counter_u64_zero(rack_saw_enobuf_hw); 719 counter_u64_zero(rack_saw_enetunreach); 720 counter_u64_zero(rack_persists_sends); 721 counter_u64_zero(rack_persists_acks); 722 counter_u64_zero(rack_persists_loss); 723 counter_u64_zero(rack_persists_lost_ends); 724 #ifdef INVARIANTS 725 counter_u64_zero(rack_adjust_map_bw); 726 #endif 727 counter_u64_zero(rack_to_alloc_hard); 728 counter_u64_zero(rack_to_alloc_emerg); 729 counter_u64_zero(rack_sack_proc_all); 730 counter_u64_zero(rack_fto_send); 731 counter_u64_zero(rack_fto_rsm_send); 732 counter_u64_zero(rack_extended_rfo); 733 counter_u64_zero(rack_hw_pace_init_fail); 734 counter_u64_zero(rack_hw_pace_lost); 735 counter_u64_zero(rack_non_fto_send); 736 counter_u64_zero(rack_nfto_resend); 737 counter_u64_zero(rack_sack_proc_short); 738 counter_u64_zero(rack_sack_proc_restart); 739 counter_u64_zero(rack_to_alloc); 740 counter_u64_zero(rack_to_alloc_limited); 741 counter_u64_zero(rack_alloc_limited_conns); 742 counter_u64_zero(rack_split_limited); 743 counter_u64_zero(rack_multi_single_eq); 744 counter_u64_zero(rack_proc_non_comp_ack); 745 counter_u64_zero(rack_sack_attacks_detected); 746 counter_u64_zero(rack_sack_attacks_reversed); 747 counter_u64_zero(rack_sack_used_next_merge); 748 counter_u64_zero(rack_sack_used_prev_merge); 749 counter_u64_zero(rack_sack_splits); 750 counter_u64_zero(rack_sack_skipped_acked); 751 counter_u64_zero(rack_ack_total); 752 counter_u64_zero(rack_express_sack); 753 counter_u64_zero(rack_sack_total); 754 counter_u64_zero(rack_move_none); 755 counter_u64_zero(rack_move_some); 756 counter_u64_zero(rack_try_scwnd); 757 counter_u64_zero(rack_collapsed_win); 758 counter_u64_zero(rack_collapsed_win_rxt); 759 counter_u64_zero(rack_collapsed_win_seen); 760 counter_u64_zero(rack_collapsed_win_rxt_bytes); 761 } 762 rack_clear_counter = 0; 763 return (0); 764 } 765 766 static void 767 rack_init_sysctls(void) 768 { 769 struct sysctl_oid *rack_counters; 770 struct sysctl_oid *rack_attack; 771 struct sysctl_oid *rack_pacing; 772 struct sysctl_oid *rack_timely; 773 struct sysctl_oid *rack_timers; 774 struct sysctl_oid *rack_tlp; 775 struct sysctl_oid *rack_misc; 776 struct sysctl_oid *rack_features; 777 struct sysctl_oid *rack_measure; 778 struct sysctl_oid *rack_probertt; 779 struct sysctl_oid *rack_hw_pacing; 780 struct sysctl_oid *rack_tracepoint; 781 782 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 783 SYSCTL_CHILDREN(rack_sysctl_root), 784 OID_AUTO, 785 "sack_attack", 786 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 787 "Rack Sack Attack Counters and Controls"); 788 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 789 SYSCTL_CHILDREN(rack_sysctl_root), 790 OID_AUTO, 791 "stats", 792 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 793 "Rack Counters"); 794 SYSCTL_ADD_S32(&rack_sysctl_ctx, 795 SYSCTL_CHILDREN(rack_sysctl_root), 796 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 797 &rack_rate_sample_method , USE_RTT_LOW, 798 "What method should we use for rate sampling 0=high, 1=low "); 799 /* Probe rtt related controls */ 800 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 801 SYSCTL_CHILDREN(rack_sysctl_root), 802 OID_AUTO, 803 "probertt", 804 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 805 "ProbeRTT related Controls"); 806 SYSCTL_ADD_U16(&rack_sysctl_ctx, 807 SYSCTL_CHILDREN(rack_probertt), 808 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 809 &rack_atexit_prtt_hbp, 130, 810 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 811 SYSCTL_ADD_U16(&rack_sysctl_ctx, 812 SYSCTL_CHILDREN(rack_probertt), 813 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 814 &rack_atexit_prtt, 130, 815 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 816 SYSCTL_ADD_U16(&rack_sysctl_ctx, 817 SYSCTL_CHILDREN(rack_probertt), 818 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 819 &rack_per_of_gp_probertt, 60, 820 "What percentage of goodput do we pace at in probertt"); 821 SYSCTL_ADD_U16(&rack_sysctl_ctx, 822 SYSCTL_CHILDREN(rack_probertt), 823 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 824 &rack_per_of_gp_probertt_reduce, 10, 825 "What percentage of goodput do we reduce every gp_srtt"); 826 SYSCTL_ADD_U16(&rack_sysctl_ctx, 827 SYSCTL_CHILDREN(rack_probertt), 828 OID_AUTO, "gp_per_low", CTLFLAG_RW, 829 &rack_per_of_gp_lowthresh, 40, 830 "What percentage of goodput do we allow the multiplier to fall to"); 831 SYSCTL_ADD_U32(&rack_sysctl_ctx, 832 SYSCTL_CHILDREN(rack_probertt), 833 OID_AUTO, "time_between", CTLFLAG_RW, 834 & rack_time_between_probertt, 96000000, 835 "How many useconds between the lowest rtt falling must past before we enter probertt"); 836 SYSCTL_ADD_U32(&rack_sysctl_ctx, 837 SYSCTL_CHILDREN(rack_probertt), 838 OID_AUTO, "safety", CTLFLAG_RW, 839 &rack_probe_rtt_safety_val, 2000000, 840 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 841 SYSCTL_ADD_U32(&rack_sysctl_ctx, 842 SYSCTL_CHILDREN(rack_probertt), 843 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 844 &rack_probe_rtt_sets_cwnd, 0, 845 "Do we set the cwnd too (if always_lower is on)"); 846 SYSCTL_ADD_U32(&rack_sysctl_ctx, 847 SYSCTL_CHILDREN(rack_probertt), 848 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 849 &rack_max_drain_wait, 2, 850 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 851 SYSCTL_ADD_U32(&rack_sysctl_ctx, 852 SYSCTL_CHILDREN(rack_probertt), 853 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 854 &rack_must_drain, 1, 855 "We must drain this many gp_srtt's waiting for flight to reach goal"); 856 SYSCTL_ADD_U32(&rack_sysctl_ctx, 857 SYSCTL_CHILDREN(rack_probertt), 858 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 859 &rack_probertt_use_min_rtt_entry, 1, 860 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 861 SYSCTL_ADD_U32(&rack_sysctl_ctx, 862 SYSCTL_CHILDREN(rack_probertt), 863 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 864 &rack_probertt_use_min_rtt_exit, 0, 865 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 866 SYSCTL_ADD_U32(&rack_sysctl_ctx, 867 SYSCTL_CHILDREN(rack_probertt), 868 OID_AUTO, "length_div", CTLFLAG_RW, 869 &rack_probertt_gpsrtt_cnt_div, 0, 870 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 871 SYSCTL_ADD_U32(&rack_sysctl_ctx, 872 SYSCTL_CHILDREN(rack_probertt), 873 OID_AUTO, "length_mul", CTLFLAG_RW, 874 &rack_probertt_gpsrtt_cnt_mul, 0, 875 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 876 SYSCTL_ADD_U32(&rack_sysctl_ctx, 877 SYSCTL_CHILDREN(rack_probertt), 878 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 879 &rack_min_probertt_hold, 200000, 880 "What is the minimum time we hold probertt at target"); 881 SYSCTL_ADD_U32(&rack_sysctl_ctx, 882 SYSCTL_CHILDREN(rack_probertt), 883 OID_AUTO, "filter_life", CTLFLAG_RW, 884 &rack_probertt_filter_life, 10000000, 885 "What is the time for the filters life in useconds"); 886 SYSCTL_ADD_U32(&rack_sysctl_ctx, 887 SYSCTL_CHILDREN(rack_probertt), 888 OID_AUTO, "lower_within", CTLFLAG_RW, 889 &rack_probertt_lower_within, 10, 890 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 891 SYSCTL_ADD_U32(&rack_sysctl_ctx, 892 SYSCTL_CHILDREN(rack_probertt), 893 OID_AUTO, "must_move", CTLFLAG_RW, 894 &rack_min_rtt_movement, 250, 895 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 896 SYSCTL_ADD_U32(&rack_sysctl_ctx, 897 SYSCTL_CHILDREN(rack_probertt), 898 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 899 &rack_probertt_clear_is, 1, 900 "Do we clear I/S counts on exiting probe-rtt"); 901 SYSCTL_ADD_S32(&rack_sysctl_ctx, 902 SYSCTL_CHILDREN(rack_probertt), 903 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 904 &rack_max_drain_hbp, 1, 905 "How many extra drain gpsrtt's do we get in highly buffered paths"); 906 SYSCTL_ADD_S32(&rack_sysctl_ctx, 907 SYSCTL_CHILDREN(rack_probertt), 908 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 909 &rack_hbp_thresh, 3, 910 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 911 912 rack_tracepoint = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 913 SYSCTL_CHILDREN(rack_sysctl_root), 914 OID_AUTO, 915 "tp", 916 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 917 "Rack tracepoint facility"); 918 SYSCTL_ADD_U32(&rack_sysctl_ctx, 919 SYSCTL_CHILDREN(rack_tracepoint), 920 OID_AUTO, "number", CTLFLAG_RW, 921 &rack_trace_point_config, 0, 922 "What is the trace point number to activate (0=none, 0xffffffff = all)?"); 923 SYSCTL_ADD_U32(&rack_sysctl_ctx, 924 SYSCTL_CHILDREN(rack_tracepoint), 925 OID_AUTO, "bbmode", CTLFLAG_RW, 926 &rack_trace_point_bb_mode, 4, 927 "What is BB logging mode that is activated?"); 928 SYSCTL_ADD_S32(&rack_sysctl_ctx, 929 SYSCTL_CHILDREN(rack_tracepoint), 930 OID_AUTO, "count", CTLFLAG_RW, 931 &rack_trace_point_count, 0, 932 "How many connections will have BB logging turned on that hit the tracepoint?"); 933 /* Pacing related sysctls */ 934 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 935 SYSCTL_CHILDREN(rack_sysctl_root), 936 OID_AUTO, 937 "pacing", 938 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 939 "Pacing related Controls"); 940 SYSCTL_ADD_S32(&rack_sysctl_ctx, 941 SYSCTL_CHILDREN(rack_pacing), 942 OID_AUTO, "max_pace_over", CTLFLAG_RW, 943 &rack_max_per_above, 30, 944 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 945 SYSCTL_ADD_S32(&rack_sysctl_ctx, 946 SYSCTL_CHILDREN(rack_pacing), 947 OID_AUTO, "pace_to_one", CTLFLAG_RW, 948 &rack_pace_one_seg, 0, 949 "Do we allow low b/w pacing of 1MSS instead of two"); 950 SYSCTL_ADD_S32(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_pacing), 952 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 953 &rack_limit_time_with_srtt, 0, 954 "Do we limit pacing time based on srtt"); 955 SYSCTL_ADD_S32(&rack_sysctl_ctx, 956 SYSCTL_CHILDREN(rack_pacing), 957 OID_AUTO, "init_win", CTLFLAG_RW, 958 &rack_default_init_window, 0, 959 "Do we have a rack initial window 0 = system default"); 960 SYSCTL_ADD_U16(&rack_sysctl_ctx, 961 SYSCTL_CHILDREN(rack_pacing), 962 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 963 &rack_per_of_gp_ss, 250, 964 "If non zero, what percentage of goodput to pace at in slow start"); 965 SYSCTL_ADD_U16(&rack_sysctl_ctx, 966 SYSCTL_CHILDREN(rack_pacing), 967 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 968 &rack_per_of_gp_ca, 150, 969 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 970 SYSCTL_ADD_U16(&rack_sysctl_ctx, 971 SYSCTL_CHILDREN(rack_pacing), 972 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 973 &rack_per_of_gp_rec, 200, 974 "If non zero, what percentage of goodput to pace at in recovery"); 975 SYSCTL_ADD_S32(&rack_sysctl_ctx, 976 SYSCTL_CHILDREN(rack_pacing), 977 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 978 &rack_hptsi_segments, 40, 979 "What size is the max for TSO segments in pacing and burst mitigation"); 980 SYSCTL_ADD_S32(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_pacing), 982 OID_AUTO, "burst_reduces", CTLFLAG_RW, 983 &rack_slot_reduction, 4, 984 "When doing only burst mitigation what is the reduce divisor"); 985 SYSCTL_ADD_S32(&rack_sysctl_ctx, 986 SYSCTL_CHILDREN(rack_sysctl_root), 987 OID_AUTO, "use_pacing", CTLFLAG_RW, 988 &rack_pace_every_seg, 0, 989 "If set we use pacing, if clear we use only the original burst mitigation"); 990 SYSCTL_ADD_U64(&rack_sysctl_ctx, 991 SYSCTL_CHILDREN(rack_pacing), 992 OID_AUTO, "rate_cap", CTLFLAG_RW, 993 &rack_bw_rate_cap, 0, 994 "If set we apply this value to the absolute rate cap used by pacing"); 995 SYSCTL_ADD_U8(&rack_sysctl_ctx, 996 SYSCTL_CHILDREN(rack_sysctl_root), 997 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 998 &rack_req_measurements, 1, 999 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1000 /* Hardware pacing */ 1001 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1002 SYSCTL_CHILDREN(rack_sysctl_root), 1003 OID_AUTO, 1004 "hdwr_pacing", 1005 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1006 "Pacing related Controls"); 1007 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1008 SYSCTL_CHILDREN(rack_hw_pacing), 1009 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1010 &rack_hw_rwnd_factor, 2, 1011 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1012 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1013 SYSCTL_CHILDREN(rack_hw_pacing), 1014 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1015 &rack_enobuf_hw_boost_mult, 2, 1016 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1017 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1018 SYSCTL_CHILDREN(rack_hw_pacing), 1019 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1020 &rack_enobuf_hw_max, 2, 1021 "What is the max boost the pacing time if we see a ENOBUFS?"); 1022 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1023 SYSCTL_CHILDREN(rack_hw_pacing), 1024 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1025 &rack_enobuf_hw_min, 2, 1026 "What is the min boost the pacing time if we see a ENOBUFS?"); 1027 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1028 SYSCTL_CHILDREN(rack_hw_pacing), 1029 OID_AUTO, "enable", CTLFLAG_RW, 1030 &rack_enable_hw_pacing, 0, 1031 "Should RACK attempt to use hw pacing?"); 1032 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1033 SYSCTL_CHILDREN(rack_hw_pacing), 1034 OID_AUTO, "rate_cap", CTLFLAG_RW, 1035 &rack_hw_rate_caps, 1, 1036 "Does the highest hardware pacing rate cap the rate we will send at??"); 1037 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1038 SYSCTL_CHILDREN(rack_hw_pacing), 1039 OID_AUTO, "rate_min", CTLFLAG_RW, 1040 &rack_hw_rate_min, 0, 1041 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1042 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1043 SYSCTL_CHILDREN(rack_hw_pacing), 1044 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1045 &rack_hw_rate_to_low, 0, 1046 "If we fall below this rate, dis-engage hw pacing?"); 1047 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_hw_pacing), 1049 OID_AUTO, "up_only", CTLFLAG_RW, 1050 &rack_hw_up_only, 1, 1051 "Do we allow hw pacing to lower the rate selected?"); 1052 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1053 SYSCTL_CHILDREN(rack_hw_pacing), 1054 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1055 &rack_hw_pace_extra_slots, 2, 1056 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1057 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1058 SYSCTL_CHILDREN(rack_sysctl_root), 1059 OID_AUTO, 1060 "timely", 1061 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1062 "Rack Timely RTT Controls"); 1063 /* Timely based GP dynmics */ 1064 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1065 SYSCTL_CHILDREN(rack_timely), 1066 OID_AUTO, "upper", CTLFLAG_RW, 1067 &rack_gp_per_bw_mul_up, 2, 1068 "Rack timely upper range for equal b/w (in percentage)"); 1069 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1070 SYSCTL_CHILDREN(rack_timely), 1071 OID_AUTO, "lower", CTLFLAG_RW, 1072 &rack_gp_per_bw_mul_down, 4, 1073 "Rack timely lower range for equal b/w (in percentage)"); 1074 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1075 SYSCTL_CHILDREN(rack_timely), 1076 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1077 &rack_gp_rtt_maxmul, 3, 1078 "Rack timely multiplier of lowest rtt for rtt_max"); 1079 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1080 SYSCTL_CHILDREN(rack_timely), 1081 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1082 &rack_gp_rtt_mindiv, 4, 1083 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1084 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1085 SYSCTL_CHILDREN(rack_timely), 1086 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1087 &rack_gp_rtt_minmul, 1, 1088 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1089 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1090 SYSCTL_CHILDREN(rack_timely), 1091 OID_AUTO, "decrease", CTLFLAG_RW, 1092 &rack_gp_decrease_per, 20, 1093 "Rack timely decrease percentage of our GP multiplication factor"); 1094 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1095 SYSCTL_CHILDREN(rack_timely), 1096 OID_AUTO, "increase", CTLFLAG_RW, 1097 &rack_gp_increase_per, 2, 1098 "Rack timely increase perentage of our GP multiplication factor"); 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_timely), 1101 OID_AUTO, "lowerbound", CTLFLAG_RW, 1102 &rack_per_lower_bound, 50, 1103 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_timely), 1106 OID_AUTO, "upperboundss", CTLFLAG_RW, 1107 &rack_per_upper_bound_ss, 0, 1108 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1109 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_timely), 1111 OID_AUTO, "upperboundca", CTLFLAG_RW, 1112 &rack_per_upper_bound_ca, 0, 1113 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1114 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_timely), 1116 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1117 &rack_do_dyn_mul, 0, 1118 "Rack timely do we enable dynmaic timely goodput by default"); 1119 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_timely), 1121 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1122 &rack_gp_no_rec_chg, 1, 1123 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_timely), 1126 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1127 &rack_timely_dec_clear, 6, 1128 "Rack timely what threshold do we count to before another boost during b/w decent"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_timely), 1131 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1132 &rack_timely_max_push_rise, 3, 1133 "Rack timely how many times do we push up with b/w increase"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_timely), 1136 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1137 &rack_timely_max_push_drop, 3, 1138 "Rack timely how many times do we push back on b/w decent"); 1139 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_timely), 1141 OID_AUTO, "min_segs", CTLFLAG_RW, 1142 &rack_timely_min_segs, 4, 1143 "Rack timely when setting the cwnd what is the min num segments"); 1144 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_timely), 1146 OID_AUTO, "noback_max", CTLFLAG_RW, 1147 &rack_use_max_for_nobackoff, 0, 1148 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_timely), 1151 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1152 &rack_timely_int_timely_only, 0, 1153 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1154 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1155 SYSCTL_CHILDREN(rack_timely), 1156 OID_AUTO, "nonstop", CTLFLAG_RW, 1157 &rack_timely_no_stopping, 0, 1158 "Rack timely don't stop increase"); 1159 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1160 SYSCTL_CHILDREN(rack_timely), 1161 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1162 &rack_down_raise_thresh, 100, 1163 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1164 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1165 SYSCTL_CHILDREN(rack_timely), 1166 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1167 &rack_req_segs, 1, 1168 "Bottom dragging if not these many segments outstanding and room"); 1169 1170 /* TLP and Rack related parameters */ 1171 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1172 SYSCTL_CHILDREN(rack_sysctl_root), 1173 OID_AUTO, 1174 "tlp", 1175 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1176 "TLP and Rack related Controls"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_tlp), 1179 OID_AUTO, "use_rrr", CTLFLAG_RW, 1180 &use_rack_rr, 1, 1181 "Do we use Rack Rapid Recovery"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_tlp), 1184 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1185 &rack_max_abc_post_recovery, 2, 1186 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_tlp), 1189 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1190 &rack_non_rxt_use_cr, 0, 1191 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_tlp), 1194 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1195 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1196 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1197 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_tlp), 1199 OID_AUTO, "limit", CTLFLAG_RW, 1200 &rack_tlp_limit, 2, 1201 "How many TLP's can be sent without sending new data"); 1202 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1203 SYSCTL_CHILDREN(rack_tlp), 1204 OID_AUTO, "use_greater", CTLFLAG_RW, 1205 &rack_tlp_use_greater, 1, 1206 "Should we use the rack_rtt time if its greater than srtt"); 1207 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1208 SYSCTL_CHILDREN(rack_tlp), 1209 OID_AUTO, "tlpminto", CTLFLAG_RW, 1210 &rack_tlp_min, 10000, 1211 "TLP minimum timeout per the specification (in microseconds)"); 1212 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1213 SYSCTL_CHILDREN(rack_tlp), 1214 OID_AUTO, "send_oldest", CTLFLAG_RW, 1215 &rack_always_send_oldest, 0, 1216 "Should we always send the oldest TLP and RACK-TLP"); 1217 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1218 SYSCTL_CHILDREN(rack_tlp), 1219 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1220 &rack_limited_retran, 0, 1221 "How many times can a rack timeout drive out sends"); 1222 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1223 SYSCTL_CHILDREN(rack_tlp), 1224 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1225 &rack_lower_cwnd_at_tlp, 0, 1226 "When a TLP completes a retran should we enter recovery"); 1227 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1228 SYSCTL_CHILDREN(rack_tlp), 1229 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1230 &rack_reorder_thresh, 2, 1231 "What factor for rack will be added when seeing reordering (shift right)"); 1232 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1233 SYSCTL_CHILDREN(rack_tlp), 1234 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1235 &rack_tlp_thresh, 1, 1236 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1237 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1238 SYSCTL_CHILDREN(rack_tlp), 1239 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1240 &rack_reorder_fade, 60000000, 1241 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1242 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1243 SYSCTL_CHILDREN(rack_tlp), 1244 OID_AUTO, "pktdelay", CTLFLAG_RW, 1245 &rack_pkt_delay, 1000, 1246 "Extra RACK time (in microseconds) besides reordering thresh"); 1247 1248 /* Timer related controls */ 1249 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1250 SYSCTL_CHILDREN(rack_sysctl_root), 1251 OID_AUTO, 1252 "timers", 1253 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1254 "Timer related controls"); 1255 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_timers), 1257 OID_AUTO, "persmin", CTLFLAG_RW, 1258 &rack_persist_min, 250000, 1259 "What is the minimum time in microseconds between persists"); 1260 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_timers), 1262 OID_AUTO, "persmax", CTLFLAG_RW, 1263 &rack_persist_max, 2000000, 1264 "What is the largest delay in microseconds between persists"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_timers), 1267 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1268 &rack_delayed_ack_time, 40000, 1269 "Delayed ack time (40ms in microseconds)"); 1270 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1271 SYSCTL_CHILDREN(rack_timers), 1272 OID_AUTO, "minrto", CTLFLAG_RW, 1273 &rack_rto_min, 30000, 1274 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1275 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1276 SYSCTL_CHILDREN(rack_timers), 1277 OID_AUTO, "maxrto", CTLFLAG_RW, 1278 &rack_rto_max, 4000000, 1279 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_timers), 1282 OID_AUTO, "minto", CTLFLAG_RW, 1283 &rack_min_to, 1000, 1284 "Minimum rack timeout in microseconds"); 1285 /* Measure controls */ 1286 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1287 SYSCTL_CHILDREN(rack_sysctl_root), 1288 OID_AUTO, 1289 "measure", 1290 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1291 "Measure related controls"); 1292 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1293 SYSCTL_CHILDREN(rack_measure), 1294 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1295 &rack_wma_divisor, 8, 1296 "When doing b/w calculation what is the divisor for the WMA"); 1297 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1298 SYSCTL_CHILDREN(rack_measure), 1299 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1300 &rack_cwnd_block_ends_measure, 0, 1301 "Does a cwnd just-return end the measurement window (app limited)"); 1302 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1303 SYSCTL_CHILDREN(rack_measure), 1304 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1305 &rack_rwnd_block_ends_measure, 0, 1306 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1307 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1308 SYSCTL_CHILDREN(rack_measure), 1309 OID_AUTO, "min_target", CTLFLAG_RW, 1310 &rack_def_data_window, 20, 1311 "What is the minimum target window (in mss) for a GP measurements"); 1312 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1313 SYSCTL_CHILDREN(rack_measure), 1314 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1315 &rack_goal_bdp, 2, 1316 "What is the goal BDP to measure"); 1317 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1318 SYSCTL_CHILDREN(rack_measure), 1319 OID_AUTO, "min_srtts", CTLFLAG_RW, 1320 &rack_min_srtts, 1, 1321 "What is the goal BDP to measure"); 1322 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1323 SYSCTL_CHILDREN(rack_measure), 1324 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1325 &rack_min_measure_usec, 0, 1326 "What is the Minimum time time for a measurement if 0, this is off"); 1327 /* Features */ 1328 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1329 SYSCTL_CHILDREN(rack_sysctl_root), 1330 OID_AUTO, 1331 "features", 1332 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1333 "Feature controls"); 1334 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1335 SYSCTL_CHILDREN(rack_features), 1336 OID_AUTO, "cmpack", CTLFLAG_RW, 1337 &rack_use_cmp_acks, 1, 1338 "Should RACK have LRO send compressed acks"); 1339 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1340 SYSCTL_CHILDREN(rack_features), 1341 OID_AUTO, "fsb", CTLFLAG_RW, 1342 &rack_use_fsb, 1, 1343 "Should RACK use the fast send block?"); 1344 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1345 SYSCTL_CHILDREN(rack_features), 1346 OID_AUTO, "rfo", CTLFLAG_RW, 1347 &rack_use_rfo, 1, 1348 "Should RACK use rack_fast_output()?"); 1349 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1350 SYSCTL_CHILDREN(rack_features), 1351 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1352 &rack_use_rsm_rfo, 1, 1353 "Should RACK use rack_fast_rsm_output()?"); 1354 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1355 SYSCTL_CHILDREN(rack_features), 1356 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1357 &rack_enable_mqueue_for_nonpaced, 0, 1358 "Should RACK use mbuf queuing for non-paced connections"); 1359 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1360 SYSCTL_CHILDREN(rack_features), 1361 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1362 &rack_do_hystart, 0, 1363 "Should RACK enable HyStart++ on connections?"); 1364 /* Misc rack controls */ 1365 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1366 SYSCTL_CHILDREN(rack_sysctl_root), 1367 OID_AUTO, 1368 "misc", 1369 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1370 "Misc related controls"); 1371 #ifdef TCP_ACCOUNTING 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_misc), 1374 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1375 &rack_tcp_accounting, 0, 1376 "Should we turn on TCP accounting for all rack sessions?"); 1377 #endif 1378 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1379 SYSCTL_CHILDREN(rack_misc), 1380 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1381 &rack_apply_rtt_with_reduced_conf, 0, 1382 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1383 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1384 SYSCTL_CHILDREN(rack_misc), 1385 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1386 &rack_dsack_std_based, 3, 1387 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1388 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1389 SYSCTL_CHILDREN(rack_misc), 1390 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1391 &rack_prr_addbackmax, 2, 1392 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1393 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1394 SYSCTL_CHILDREN(rack_misc), 1395 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1396 &rack_stats_gets_ms_rtt, 1, 1397 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1398 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1399 SYSCTL_CHILDREN(rack_misc), 1400 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1401 &rack_client_low_buf, 0, 1402 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1403 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1404 SYSCTL_CHILDREN(rack_misc), 1405 OID_AUTO, "defprofile", CTLFLAG_RW, 1406 &rack_def_profile, 0, 1407 "Should RACK use a default profile (0=no, num == profile num)?"); 1408 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1409 SYSCTL_CHILDREN(rack_misc), 1410 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1411 &rack_enable_shared_cwnd, 1, 1412 "Should RACK try to use the shared cwnd on connections where allowed"); 1413 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1414 SYSCTL_CHILDREN(rack_misc), 1415 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1416 &rack_limits_scwnd, 1, 1417 "Should RACK place low end time limits on the shared cwnd feature"); 1418 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1419 SYSCTL_CHILDREN(rack_misc), 1420 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1421 &rack_use_imac_dack, 0, 1422 "Should RACK try to emulate iMac delayed ack"); 1423 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1424 SYSCTL_CHILDREN(rack_misc), 1425 OID_AUTO, "no_prr", CTLFLAG_RW, 1426 &rack_disable_prr, 0, 1427 "Should RACK not use prr and only pace (must have pacing on)"); 1428 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1429 SYSCTL_CHILDREN(rack_misc), 1430 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1431 &rack_verbose_logging, 0, 1432 "Should RACK black box logging be verbose"); 1433 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1434 SYSCTL_CHILDREN(rack_misc), 1435 OID_AUTO, "data_after_close", CTLFLAG_RW, 1436 &rack_ignore_data_after_close, 1, 1437 "Do we hold off sending a RST until all pending data is ack'd"); 1438 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1439 SYSCTL_CHILDREN(rack_misc), 1440 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1441 &rack_sack_not_required, 1, 1442 "Do we allow rack to run on connections not supporting SACK"); 1443 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1444 SYSCTL_CHILDREN(rack_misc), 1445 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1446 &rack_send_a_lot_in_prr, 1, 1447 "Send a lot in prr"); 1448 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1449 SYSCTL_CHILDREN(rack_misc), 1450 OID_AUTO, "autoscale", CTLFLAG_RW, 1451 &rack_autosndbuf_inc, 20, 1452 "What percentage should rack scale up its snd buffer by?"); 1453 /* Sack Attacker detection stuff */ 1454 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1455 SYSCTL_CHILDREN(rack_attack), 1456 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1457 &rack_highest_sack_thresh_seen, 0, 1458 "Highest sack to ack ratio seen"); 1459 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1460 SYSCTL_CHILDREN(rack_attack), 1461 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1462 &rack_highest_move_thresh_seen, 0, 1463 "Highest move to non-move ratio seen"); 1464 rack_ack_total = counter_u64_alloc(M_WAITOK); 1465 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1466 SYSCTL_CHILDREN(rack_attack), 1467 OID_AUTO, "acktotal", CTLFLAG_RD, 1468 &rack_ack_total, 1469 "Total number of Ack's"); 1470 rack_express_sack = counter_u64_alloc(M_WAITOK); 1471 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_attack), 1473 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1474 &rack_express_sack, 1475 "Total expresss number of Sack's"); 1476 rack_sack_total = counter_u64_alloc(M_WAITOK); 1477 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1478 SYSCTL_CHILDREN(rack_attack), 1479 OID_AUTO, "sacktotal", CTLFLAG_RD, 1480 &rack_sack_total, 1481 "Total number of SACKs"); 1482 rack_move_none = counter_u64_alloc(M_WAITOK); 1483 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1484 SYSCTL_CHILDREN(rack_attack), 1485 OID_AUTO, "move_none", CTLFLAG_RD, 1486 &rack_move_none, 1487 "Total number of SACK index reuse of positions under threshold"); 1488 rack_move_some = counter_u64_alloc(M_WAITOK); 1489 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1490 SYSCTL_CHILDREN(rack_attack), 1491 OID_AUTO, "move_some", CTLFLAG_RD, 1492 &rack_move_some, 1493 "Total number of SACK index reuse of positions over threshold"); 1494 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1495 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1496 SYSCTL_CHILDREN(rack_attack), 1497 OID_AUTO, "attacks", CTLFLAG_RD, 1498 &rack_sack_attacks_detected, 1499 "Total number of SACK attackers that had sack disabled"); 1500 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1501 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1502 SYSCTL_CHILDREN(rack_attack), 1503 OID_AUTO, "reversed", CTLFLAG_RD, 1504 &rack_sack_attacks_reversed, 1505 "Total number of SACK attackers that were later determined false positive"); 1506 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1507 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1508 SYSCTL_CHILDREN(rack_attack), 1509 OID_AUTO, "nextmerge", CTLFLAG_RD, 1510 &rack_sack_used_next_merge, 1511 "Total number of times we used the next merge"); 1512 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1513 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1514 SYSCTL_CHILDREN(rack_attack), 1515 OID_AUTO, "prevmerge", CTLFLAG_RD, 1516 &rack_sack_used_prev_merge, 1517 "Total number of times we used the prev merge"); 1518 /* Counters */ 1519 rack_fto_send = counter_u64_alloc(M_WAITOK); 1520 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1521 SYSCTL_CHILDREN(rack_counters), 1522 OID_AUTO, "fto_send", CTLFLAG_RD, 1523 &rack_fto_send, "Total number of rack_fast_output sends"); 1524 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1525 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1526 SYSCTL_CHILDREN(rack_counters), 1527 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1528 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1529 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1530 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1531 SYSCTL_CHILDREN(rack_counters), 1532 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1533 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1534 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1535 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1536 SYSCTL_CHILDREN(rack_counters), 1537 OID_AUTO, "nfto_send", CTLFLAG_RD, 1538 &rack_non_fto_send, "Total number of rack_output first sends"); 1539 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1540 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1541 SYSCTL_CHILDREN(rack_counters), 1542 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1543 &rack_extended_rfo, "Total number of times we extended rfo"); 1544 1545 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1546 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1547 SYSCTL_CHILDREN(rack_counters), 1548 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1549 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1550 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1551 1552 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1553 SYSCTL_CHILDREN(rack_counters), 1554 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1555 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1556 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1557 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1558 SYSCTL_CHILDREN(rack_counters), 1559 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1560 &rack_tlp_tot, 1561 "Total number of tail loss probe expirations"); 1562 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1563 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1564 SYSCTL_CHILDREN(rack_counters), 1565 OID_AUTO, "tlp_new", CTLFLAG_RD, 1566 &rack_tlp_newdata, 1567 "Total number of tail loss probe sending new data"); 1568 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1569 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1570 SYSCTL_CHILDREN(rack_counters), 1571 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1572 &rack_tlp_retran, 1573 "Total number of tail loss probe sending retransmitted data"); 1574 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1575 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1576 SYSCTL_CHILDREN(rack_counters), 1577 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1578 &rack_tlp_retran_bytes, 1579 "Total bytes of tail loss probe sending retransmitted data"); 1580 rack_to_tot = counter_u64_alloc(M_WAITOK); 1581 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1582 SYSCTL_CHILDREN(rack_counters), 1583 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1584 &rack_to_tot, 1585 "Total number of times the rack to expired"); 1586 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1587 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1588 SYSCTL_CHILDREN(rack_counters), 1589 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1590 &rack_saw_enobuf, 1591 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1592 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1593 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1594 SYSCTL_CHILDREN(rack_counters), 1595 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1596 &rack_saw_enobuf_hw, 1597 "Total number of times a send returned enobuf for hdwr paced connections"); 1598 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1599 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1600 SYSCTL_CHILDREN(rack_counters), 1601 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1602 &rack_saw_enetunreach, 1603 "Total number of times a send received a enetunreachable"); 1604 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1605 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1606 SYSCTL_CHILDREN(rack_counters), 1607 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1608 &rack_hot_alloc, 1609 "Total allocations from the top of our list"); 1610 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1611 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1612 SYSCTL_CHILDREN(rack_counters), 1613 OID_AUTO, "allocs", CTLFLAG_RD, 1614 &rack_to_alloc, 1615 "Total allocations of tracking structures"); 1616 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1617 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1618 SYSCTL_CHILDREN(rack_counters), 1619 OID_AUTO, "allochard", CTLFLAG_RD, 1620 &rack_to_alloc_hard, 1621 "Total allocations done with sleeping the hard way"); 1622 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1623 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1624 SYSCTL_CHILDREN(rack_counters), 1625 OID_AUTO, "allocemerg", CTLFLAG_RD, 1626 &rack_to_alloc_emerg, 1627 "Total allocations done from emergency cache"); 1628 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1629 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1630 SYSCTL_CHILDREN(rack_counters), 1631 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1632 &rack_to_alloc_limited, 1633 "Total allocations dropped due to limit"); 1634 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1635 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1636 SYSCTL_CHILDREN(rack_counters), 1637 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1638 &rack_alloc_limited_conns, 1639 "Connections with allocations dropped due to limit"); 1640 rack_split_limited = counter_u64_alloc(M_WAITOK); 1641 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1642 SYSCTL_CHILDREN(rack_counters), 1643 OID_AUTO, "split_limited", CTLFLAG_RD, 1644 &rack_split_limited, 1645 "Split allocations dropped due to limit"); 1646 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1647 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1648 SYSCTL_CHILDREN(rack_counters), 1649 OID_AUTO, "persist_sends", CTLFLAG_RD, 1650 &rack_persists_sends, 1651 "Number of times we sent a persist probe"); 1652 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1653 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1654 SYSCTL_CHILDREN(rack_counters), 1655 OID_AUTO, "persist_acks", CTLFLAG_RD, 1656 &rack_persists_acks, 1657 "Number of times a persist probe was acked"); 1658 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1659 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1660 SYSCTL_CHILDREN(rack_counters), 1661 OID_AUTO, "persist_loss", CTLFLAG_RD, 1662 &rack_persists_loss, 1663 "Number of times we detected a lost persist probe (no ack)"); 1664 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1665 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1666 SYSCTL_CHILDREN(rack_counters), 1667 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1668 &rack_persists_lost_ends, 1669 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1670 #ifdef INVARIANTS 1671 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1672 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1673 SYSCTL_CHILDREN(rack_counters), 1674 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1675 &rack_adjust_map_bw, 1676 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1677 #endif 1678 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1682 &rack_multi_single_eq, 1683 "Number of compressed acks total represented"); 1684 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1688 &rack_proc_non_comp_ack, 1689 "Number of non compresseds acks that we processed"); 1690 1691 1692 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1693 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1694 SYSCTL_CHILDREN(rack_counters), 1695 OID_AUTO, "sack_long", CTLFLAG_RD, 1696 &rack_sack_proc_all, 1697 "Total times we had to walk whole list for sack processing"); 1698 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1699 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1700 SYSCTL_CHILDREN(rack_counters), 1701 OID_AUTO, "sack_restart", CTLFLAG_RD, 1702 &rack_sack_proc_restart, 1703 "Total times we had to walk whole list due to a restart"); 1704 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1705 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1706 SYSCTL_CHILDREN(rack_counters), 1707 OID_AUTO, "sack_short", CTLFLAG_RD, 1708 &rack_sack_proc_short, 1709 "Total times we took shortcut for sack processing"); 1710 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1711 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1712 SYSCTL_CHILDREN(rack_attack), 1713 OID_AUTO, "skipacked", CTLFLAG_RD, 1714 &rack_sack_skipped_acked, 1715 "Total number of times we skipped previously sacked"); 1716 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1717 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1718 SYSCTL_CHILDREN(rack_attack), 1719 OID_AUTO, "ofsplit", CTLFLAG_RD, 1720 &rack_sack_splits, 1721 "Total number of times we did the old fashion tree split"); 1722 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1723 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1724 SYSCTL_CHILDREN(rack_counters), 1725 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1726 &rack_input_idle_reduces, 1727 "Total number of idle reductions on input"); 1728 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1729 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1730 SYSCTL_CHILDREN(rack_counters), 1731 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1732 &rack_collapsed_win_seen, 1733 "Total number of collapsed window events seen (where our window shrinks)"); 1734 1735 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1736 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1737 SYSCTL_CHILDREN(rack_counters), 1738 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1739 &rack_collapsed_win, 1740 "Total number of collapsed window events where we mark packets"); 1741 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1742 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1743 SYSCTL_CHILDREN(rack_counters), 1744 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1745 &rack_collapsed_win_rxt, 1746 "Total number of packets that were retransmitted"); 1747 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1748 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1749 SYSCTL_CHILDREN(rack_counters), 1750 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1751 &rack_collapsed_win_rxt_bytes, 1752 "Total number of bytes that were retransmitted"); 1753 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1754 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1755 SYSCTL_CHILDREN(rack_counters), 1756 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1757 &rack_try_scwnd, 1758 "Total number of scwnd attempts"); 1759 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1760 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1761 OID_AUTO, "outsize", CTLFLAG_RD, 1762 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1763 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1764 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1765 OID_AUTO, "opts", CTLFLAG_RD, 1766 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1767 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_sysctl_root), 1769 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1770 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1771 } 1772 1773 static __inline int 1774 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1775 { 1776 if (SEQ_GEQ(b->r_start, a->r_start) && 1777 SEQ_LT(b->r_start, a->r_end)) { 1778 /* 1779 * The entry b is within the 1780 * block a. i.e.: 1781 * a -- |-------------| 1782 * b -- |----| 1783 * <or> 1784 * b -- |------| 1785 * <or> 1786 * b -- |-----------| 1787 */ 1788 return (0); 1789 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1790 /* 1791 * b falls as either the next 1792 * sequence block after a so a 1793 * is said to be smaller than b. 1794 * i.e: 1795 * a -- |------| 1796 * b -- |--------| 1797 * or 1798 * b -- |-----| 1799 */ 1800 return (1); 1801 } 1802 /* 1803 * Whats left is where a is 1804 * larger than b. i.e: 1805 * a -- |-------| 1806 * b -- |---| 1807 * or even possibly 1808 * b -- |--------------| 1809 */ 1810 return (-1); 1811 } 1812 1813 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1814 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1815 1816 static uint32_t 1817 rc_init_window(struct tcp_rack *rack) 1818 { 1819 uint32_t win; 1820 1821 if (rack->rc_init_win == 0) { 1822 /* 1823 * Nothing set by the user, use the system stack 1824 * default. 1825 */ 1826 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1827 } 1828 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1829 return (win); 1830 } 1831 1832 static uint64_t 1833 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1834 { 1835 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1836 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1837 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1838 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1839 else 1840 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1841 } 1842 1843 static uint64_t 1844 rack_get_bw(struct tcp_rack *rack) 1845 { 1846 if (rack->use_fixed_rate) { 1847 /* Return the fixed pacing rate */ 1848 return (rack_get_fixed_pacing_bw(rack)); 1849 } 1850 if (rack->r_ctl.gp_bw == 0) { 1851 /* 1852 * We have yet no b/w measurement, 1853 * if we have a user set initial bw 1854 * return it. If we don't have that and 1855 * we have an srtt, use the tcp IW (10) to 1856 * calculate a fictional b/w over the SRTT 1857 * which is more or less a guess. Note 1858 * we don't use our IW from rack on purpose 1859 * so if we have like IW=30, we are not 1860 * calculating a "huge" b/w. 1861 */ 1862 uint64_t bw, srtt; 1863 if (rack->r_ctl.init_rate) 1864 return (rack->r_ctl.init_rate); 1865 1866 /* Has the user set a max peak rate? */ 1867 #ifdef NETFLIX_PEAKRATE 1868 if (rack->rc_tp->t_maxpeakrate) 1869 return (rack->rc_tp->t_maxpeakrate); 1870 #endif 1871 /* Ok lets come up with the IW guess, if we have a srtt */ 1872 if (rack->rc_tp->t_srtt == 0) { 1873 /* 1874 * Go with old pacing method 1875 * i.e. burst mitigation only. 1876 */ 1877 return (0); 1878 } 1879 /* Ok lets get the initial TCP win (not racks) */ 1880 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1881 srtt = (uint64_t)rack->rc_tp->t_srtt; 1882 bw *= (uint64_t)USECS_IN_SECOND; 1883 bw /= srtt; 1884 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1885 bw = rack->r_ctl.bw_rate_cap; 1886 return (bw); 1887 } else { 1888 uint64_t bw; 1889 1890 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1891 /* Averaging is done, we can return the value */ 1892 bw = rack->r_ctl.gp_bw; 1893 } else { 1894 /* Still doing initial average must calculate */ 1895 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1896 } 1897 #ifdef NETFLIX_PEAKRATE 1898 if ((rack->rc_tp->t_maxpeakrate) && 1899 (bw > rack->rc_tp->t_maxpeakrate)) { 1900 /* The user has set a peak rate to pace at 1901 * don't allow us to pace faster than that. 1902 */ 1903 return (rack->rc_tp->t_maxpeakrate); 1904 } 1905 #endif 1906 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1907 bw = rack->r_ctl.bw_rate_cap; 1908 return (bw); 1909 } 1910 } 1911 1912 static uint16_t 1913 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1914 { 1915 if (rack->use_fixed_rate) { 1916 return (100); 1917 } else if (rack->in_probe_rtt && (rsm == NULL)) 1918 return (rack->r_ctl.rack_per_of_gp_probertt); 1919 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1920 rack->r_ctl.rack_per_of_gp_rec)) { 1921 if (rsm) { 1922 /* a retransmission always use the recovery rate */ 1923 return (rack->r_ctl.rack_per_of_gp_rec); 1924 } else if (rack->rack_rec_nonrxt_use_cr) { 1925 /* Directed to use the configured rate */ 1926 goto configured_rate; 1927 } else if (rack->rack_no_prr && 1928 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1929 /* No PRR, lets just use the b/w estimate only */ 1930 return (100); 1931 } else { 1932 /* 1933 * Here we may have a non-retransmit but we 1934 * have no overrides, so just use the recovery 1935 * rate (prr is in effect). 1936 */ 1937 return (rack->r_ctl.rack_per_of_gp_rec); 1938 } 1939 } 1940 configured_rate: 1941 /* For the configured rate we look at our cwnd vs the ssthresh */ 1942 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1943 return (rack->r_ctl.rack_per_of_gp_ss); 1944 else 1945 return (rack->r_ctl.rack_per_of_gp_ca); 1946 } 1947 1948 static void 1949 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1950 { 1951 /* 1952 * Types of logs (mod value) 1953 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1954 * 2 = a dsack round begins, persist is reset to 16. 1955 * 3 = a dsack round ends 1956 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1957 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1958 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1959 */ 1960 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1961 union tcp_log_stackspecific log; 1962 struct timeval tv; 1963 1964 memset(&log, 0, sizeof(log)); 1965 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 1966 log.u_bbr.flex1 <<= 1; 1967 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 1968 log.u_bbr.flex1 <<= 1; 1969 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 1970 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 1971 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 1972 log.u_bbr.flex4 = flex4; 1973 log.u_bbr.flex5 = flex5; 1974 log.u_bbr.flex6 = flex6; 1975 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 1976 log.u_bbr.flex8 = mod; 1977 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1978 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1979 &rack->rc_inp->inp_socket->so_rcv, 1980 &rack->rc_inp->inp_socket->so_snd, 1981 RACK_DSACK_HANDLING, 0, 1982 0, &log, false, &tv); 1983 } 1984 } 1985 1986 static void 1987 rack_log_hdwr_pacing(struct tcp_rack *rack, 1988 uint64_t rate, uint64_t hw_rate, int line, 1989 int error, uint16_t mod) 1990 { 1991 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1992 union tcp_log_stackspecific log; 1993 struct timeval tv; 1994 const struct ifnet *ifp; 1995 1996 memset(&log, 0, sizeof(log)); 1997 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 1998 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 1999 if (rack->r_ctl.crte) { 2000 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2001 } else if (rack->rc_inp->inp_route.ro_nh && 2002 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2003 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2004 } else 2005 ifp = NULL; 2006 if (ifp) { 2007 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2008 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2009 } 2010 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2011 log.u_bbr.bw_inuse = rate; 2012 log.u_bbr.flex5 = line; 2013 log.u_bbr.flex6 = error; 2014 log.u_bbr.flex7 = mod; 2015 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2016 log.u_bbr.flex8 = rack->use_fixed_rate; 2017 log.u_bbr.flex8 <<= 1; 2018 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2019 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2020 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2021 if (rack->r_ctl.crte) 2022 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2023 else 2024 log.u_bbr.cur_del_rate = 0; 2025 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2026 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2027 &rack->rc_inp->inp_socket->so_rcv, 2028 &rack->rc_inp->inp_socket->so_snd, 2029 BBR_LOG_HDWR_PACE, 0, 2030 0, &log, false, &tv); 2031 } 2032 } 2033 2034 static uint64_t 2035 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2036 { 2037 /* 2038 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2039 */ 2040 uint64_t bw_est, high_rate; 2041 uint64_t gain; 2042 2043 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2044 bw_est = bw * gain; 2045 bw_est /= (uint64_t)100; 2046 /* Never fall below the minimum (def 64kbps) */ 2047 if (bw_est < RACK_MIN_BW) 2048 bw_est = RACK_MIN_BW; 2049 if (rack->r_rack_hw_rate_caps) { 2050 /* Rate caps are in place */ 2051 if (rack->r_ctl.crte != NULL) { 2052 /* We have a hdwr rate already */ 2053 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2054 if (bw_est >= high_rate) { 2055 /* We are capping bw at the highest rate table entry */ 2056 rack_log_hdwr_pacing(rack, 2057 bw_est, high_rate, __LINE__, 2058 0, 3); 2059 bw_est = high_rate; 2060 if (capped) 2061 *capped = 1; 2062 } 2063 } else if ((rack->rack_hdrw_pacing == 0) && 2064 (rack->rack_hdw_pace_ena) && 2065 (rack->rack_attempt_hdwr_pace == 0) && 2066 (rack->rc_inp->inp_route.ro_nh != NULL) && 2067 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2068 /* 2069 * Special case, we have not yet attempted hardware 2070 * pacing, and yet we may, when we do, find out if we are 2071 * above the highest rate. We need to know the maxbw for the interface 2072 * in question (if it supports ratelimiting). We get back 2073 * a 0, if the interface is not found in the RL lists. 2074 */ 2075 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2076 if (high_rate) { 2077 /* Yep, we have a rate is it above this rate? */ 2078 if (bw_est > high_rate) { 2079 bw_est = high_rate; 2080 if (capped) 2081 *capped = 1; 2082 } 2083 } 2084 } 2085 } 2086 return (bw_est); 2087 } 2088 2089 static void 2090 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2091 { 2092 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2093 union tcp_log_stackspecific log; 2094 struct timeval tv; 2095 2096 if ((mod != 1) && (rack_verbose_logging == 0)) { 2097 /* 2098 * We get 3 values currently for mod 2099 * 1 - We are retransmitting and this tells the reason. 2100 * 2 - We are clearing a dup-ack count. 2101 * 3 - We are incrementing a dup-ack count. 2102 * 2103 * The clear/increment are only logged 2104 * if you have BBverbose on. 2105 */ 2106 return; 2107 } 2108 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2109 log.u_bbr.flex1 = tsused; 2110 log.u_bbr.flex2 = thresh; 2111 log.u_bbr.flex3 = rsm->r_flags; 2112 log.u_bbr.flex4 = rsm->r_dupack; 2113 log.u_bbr.flex5 = rsm->r_start; 2114 log.u_bbr.flex6 = rsm->r_end; 2115 log.u_bbr.flex8 = mod; 2116 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2117 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2118 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2119 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2120 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2121 log.u_bbr.pacing_gain = rack->r_must_retran; 2122 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2123 &rack->rc_inp->inp_socket->so_rcv, 2124 &rack->rc_inp->inp_socket->so_snd, 2125 BBR_LOG_SETTINGS_CHG, 0, 2126 0, &log, false, &tv); 2127 } 2128 } 2129 2130 static void 2131 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2132 { 2133 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2134 union tcp_log_stackspecific log; 2135 struct timeval tv; 2136 2137 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2138 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2139 log.u_bbr.flex2 = to; 2140 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2141 log.u_bbr.flex4 = slot; 2142 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2143 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2144 log.u_bbr.flex7 = rack->rc_in_persist; 2145 log.u_bbr.flex8 = which; 2146 if (rack->rack_no_prr) 2147 log.u_bbr.pkts_out = 0; 2148 else 2149 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2150 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2151 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2152 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2153 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2154 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2155 log.u_bbr.pacing_gain = rack->r_must_retran; 2156 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2157 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2158 log.u_bbr.lost = rack_rto_min; 2159 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2160 &rack->rc_inp->inp_socket->so_rcv, 2161 &rack->rc_inp->inp_socket->so_snd, 2162 BBR_LOG_TIMERSTAR, 0, 2163 0, &log, false, &tv); 2164 } 2165 } 2166 2167 static void 2168 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2169 { 2170 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2171 union tcp_log_stackspecific log; 2172 struct timeval tv; 2173 2174 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2175 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2176 log.u_bbr.flex8 = to_num; 2177 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2178 log.u_bbr.flex2 = rack->rc_rack_rtt; 2179 if (rsm == NULL) 2180 log.u_bbr.flex3 = 0; 2181 else 2182 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2183 if (rack->rack_no_prr) 2184 log.u_bbr.flex5 = 0; 2185 else 2186 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2187 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2188 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2189 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2190 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2191 log.u_bbr.pacing_gain = rack->r_must_retran; 2192 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2193 &rack->rc_inp->inp_socket->so_rcv, 2194 &rack->rc_inp->inp_socket->so_snd, 2195 BBR_LOG_RTO, 0, 2196 0, &log, false, &tv); 2197 } 2198 } 2199 2200 static void 2201 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2202 struct rack_sendmap *prev, 2203 struct rack_sendmap *rsm, 2204 struct rack_sendmap *next, 2205 int flag, uint32_t th_ack, int line) 2206 { 2207 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2208 union tcp_log_stackspecific log; 2209 struct timeval tv; 2210 2211 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2212 log.u_bbr.flex8 = flag; 2213 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2214 log.u_bbr.cur_del_rate = (uint64_t)prev; 2215 log.u_bbr.delRate = (uint64_t)rsm; 2216 log.u_bbr.rttProp = (uint64_t)next; 2217 log.u_bbr.flex7 = 0; 2218 if (prev) { 2219 log.u_bbr.flex1 = prev->r_start; 2220 log.u_bbr.flex2 = prev->r_end; 2221 log.u_bbr.flex7 |= 0x4; 2222 } 2223 if (rsm) { 2224 log.u_bbr.flex3 = rsm->r_start; 2225 log.u_bbr.flex4 = rsm->r_end; 2226 log.u_bbr.flex7 |= 0x2; 2227 } 2228 if (next) { 2229 log.u_bbr.flex5 = next->r_start; 2230 log.u_bbr.flex6 = next->r_end; 2231 log.u_bbr.flex7 |= 0x1; 2232 } 2233 log.u_bbr.applimited = line; 2234 log.u_bbr.pkts_out = th_ack; 2235 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2236 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2237 if (rack->rack_no_prr) 2238 log.u_bbr.lost = 0; 2239 else 2240 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2241 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2242 &rack->rc_inp->inp_socket->so_rcv, 2243 &rack->rc_inp->inp_socket->so_snd, 2244 TCP_LOG_MAPCHG, 0, 2245 0, &log, false, &tv); 2246 } 2247 } 2248 2249 static void 2250 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2251 struct rack_sendmap *rsm, int conf) 2252 { 2253 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2254 union tcp_log_stackspecific log; 2255 struct timeval tv; 2256 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2257 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2258 log.u_bbr.flex1 = t; 2259 log.u_bbr.flex2 = len; 2260 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2261 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2262 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2263 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2264 log.u_bbr.flex7 = conf; 2265 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2266 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2267 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2268 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2269 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2270 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2271 if (rsm) { 2272 log.u_bbr.pkt_epoch = rsm->r_start; 2273 log.u_bbr.lost = rsm->r_end; 2274 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2275 /* We loose any upper of the 24 bits */ 2276 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2277 } else { 2278 /* Its a SYN */ 2279 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2280 log.u_bbr.lost = 0; 2281 log.u_bbr.cwnd_gain = 0; 2282 log.u_bbr.pacing_gain = 0; 2283 } 2284 /* Write out general bits of interest rrs here */ 2285 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2286 log.u_bbr.use_lt_bw <<= 1; 2287 log.u_bbr.use_lt_bw |= rack->forced_ack; 2288 log.u_bbr.use_lt_bw <<= 1; 2289 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2290 log.u_bbr.use_lt_bw <<= 1; 2291 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2292 log.u_bbr.use_lt_bw <<= 1; 2293 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2294 log.u_bbr.use_lt_bw <<= 1; 2295 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2296 log.u_bbr.use_lt_bw <<= 1; 2297 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2298 log.u_bbr.use_lt_bw <<= 1; 2299 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2300 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2301 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2302 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2303 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2304 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2305 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2306 log.u_bbr.bw_inuse <<= 32; 2307 if (rsm) 2308 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2309 TCP_LOG_EVENTP(tp, NULL, 2310 &rack->rc_inp->inp_socket->so_rcv, 2311 &rack->rc_inp->inp_socket->so_snd, 2312 BBR_LOG_BBRRTT, 0, 2313 0, &log, false, &tv); 2314 2315 2316 } 2317 } 2318 2319 static void 2320 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2321 { 2322 /* 2323 * Log the rtt sample we are 2324 * applying to the srtt algorithm in 2325 * useconds. 2326 */ 2327 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2328 union tcp_log_stackspecific log; 2329 struct timeval tv; 2330 2331 /* Convert our ms to a microsecond */ 2332 memset(&log, 0, sizeof(log)); 2333 log.u_bbr.flex1 = rtt; 2334 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2335 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2336 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2337 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2338 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2339 log.u_bbr.flex7 = 1; 2340 log.u_bbr.flex8 = rack->sack_attack_disable; 2341 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2342 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2343 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2344 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2345 log.u_bbr.pacing_gain = rack->r_must_retran; 2346 /* 2347 * We capture in delRate the upper 32 bits as 2348 * the confidence level we had declared, and the 2349 * lower 32 bits as the actual RTT using the arrival 2350 * timestamp. 2351 */ 2352 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2353 log.u_bbr.delRate <<= 32; 2354 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2355 /* Lets capture all the things that make up t_rtxcur */ 2356 log.u_bbr.applimited = rack_rto_min; 2357 log.u_bbr.epoch = rack_rto_max; 2358 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2359 log.u_bbr.lost = rack_rto_min; 2360 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2361 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2362 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2363 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2364 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2365 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2366 &rack->rc_inp->inp_socket->so_rcv, 2367 &rack->rc_inp->inp_socket->so_snd, 2368 TCP_LOG_RTT, 0, 2369 0, &log, false, &tv); 2370 } 2371 } 2372 2373 static void 2374 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2375 { 2376 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2377 union tcp_log_stackspecific log; 2378 struct timeval tv; 2379 2380 /* Convert our ms to a microsecond */ 2381 memset(&log, 0, sizeof(log)); 2382 log.u_bbr.flex1 = rtt; 2383 log.u_bbr.flex2 = send_time; 2384 log.u_bbr.flex3 = ack_time; 2385 log.u_bbr.flex4 = where; 2386 log.u_bbr.flex7 = 2; 2387 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2388 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2389 &rack->rc_inp->inp_socket->so_rcv, 2390 &rack->rc_inp->inp_socket->so_snd, 2391 TCP_LOG_RTT, 0, 2392 0, &log, false, &tv); 2393 } 2394 } 2395 2396 2397 2398 static inline void 2399 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2400 { 2401 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2402 union tcp_log_stackspecific log; 2403 struct timeval tv; 2404 2405 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2406 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2407 log.u_bbr.flex1 = line; 2408 log.u_bbr.flex2 = tick; 2409 log.u_bbr.flex3 = tp->t_maxunacktime; 2410 log.u_bbr.flex4 = tp->t_acktime; 2411 log.u_bbr.flex8 = event; 2412 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2413 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2414 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2415 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2416 log.u_bbr.pacing_gain = rack->r_must_retran; 2417 TCP_LOG_EVENTP(tp, NULL, 2418 &rack->rc_inp->inp_socket->so_rcv, 2419 &rack->rc_inp->inp_socket->so_snd, 2420 BBR_LOG_PROGRESS, 0, 2421 0, &log, false, &tv); 2422 } 2423 } 2424 2425 static void 2426 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2427 { 2428 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2429 union tcp_log_stackspecific log; 2430 2431 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2432 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2433 log.u_bbr.flex1 = slot; 2434 if (rack->rack_no_prr) 2435 log.u_bbr.flex2 = 0; 2436 else 2437 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2438 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2439 log.u_bbr.flex8 = rack->rc_in_persist; 2440 log.u_bbr.timeStamp = cts; 2441 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2442 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2443 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2444 log.u_bbr.pacing_gain = rack->r_must_retran; 2445 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2446 &rack->rc_inp->inp_socket->so_rcv, 2447 &rack->rc_inp->inp_socket->so_snd, 2448 BBR_LOG_BBRSND, 0, 2449 0, &log, false, tv); 2450 } 2451 } 2452 2453 static void 2454 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2455 { 2456 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2457 union tcp_log_stackspecific log; 2458 struct timeval tv; 2459 2460 memset(&log, 0, sizeof(log)); 2461 log.u_bbr.flex1 = did_out; 2462 log.u_bbr.flex2 = nxt_pkt; 2463 log.u_bbr.flex3 = way_out; 2464 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2465 if (rack->rack_no_prr) 2466 log.u_bbr.flex5 = 0; 2467 else 2468 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2469 log.u_bbr.flex6 = nsegs; 2470 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2471 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2472 log.u_bbr.flex7 <<= 1; 2473 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2474 log.u_bbr.flex7 <<= 1; 2475 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2476 log.u_bbr.flex8 = rack->rc_in_persist; 2477 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2478 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2479 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2480 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2481 log.u_bbr.use_lt_bw <<= 1; 2482 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2483 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2484 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2485 log.u_bbr.pacing_gain = rack->r_must_retran; 2486 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2487 &rack->rc_inp->inp_socket->so_rcv, 2488 &rack->rc_inp->inp_socket->so_snd, 2489 BBR_LOG_DOSEG_DONE, 0, 2490 0, &log, false, &tv); 2491 } 2492 } 2493 2494 static void 2495 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2496 { 2497 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2498 union tcp_log_stackspecific log; 2499 struct timeval tv; 2500 2501 memset(&log, 0, sizeof(log)); 2502 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2503 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2504 log.u_bbr.flex4 = arg1; 2505 log.u_bbr.flex5 = arg2; 2506 log.u_bbr.flex6 = arg3; 2507 log.u_bbr.flex8 = frm; 2508 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2509 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2510 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2511 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2512 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2513 log.u_bbr.pacing_gain = rack->r_must_retran; 2514 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 2515 &tptosocket(tp)->so_snd, 2516 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 2517 } 2518 } 2519 2520 static void 2521 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2522 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2523 { 2524 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2525 union tcp_log_stackspecific log; 2526 struct timeval tv; 2527 2528 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2529 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2530 log.u_bbr.flex1 = slot; 2531 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2532 log.u_bbr.flex4 = reason; 2533 if (rack->rack_no_prr) 2534 log.u_bbr.flex5 = 0; 2535 else 2536 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2537 log.u_bbr.flex7 = hpts_calling; 2538 log.u_bbr.flex8 = rack->rc_in_persist; 2539 log.u_bbr.lt_epoch = cwnd_to_use; 2540 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2541 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2542 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2543 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2544 log.u_bbr.pacing_gain = rack->r_must_retran; 2545 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2546 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2547 &rack->rc_inp->inp_socket->so_rcv, 2548 &rack->rc_inp->inp_socket->so_snd, 2549 BBR_LOG_JUSTRET, 0, 2550 tlen, &log, false, &tv); 2551 } 2552 } 2553 2554 static void 2555 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2556 struct timeval *tv, uint32_t flags_on_entry) 2557 { 2558 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2559 union tcp_log_stackspecific log; 2560 2561 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2562 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2563 log.u_bbr.flex1 = line; 2564 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2565 log.u_bbr.flex3 = flags_on_entry; 2566 log.u_bbr.flex4 = us_cts; 2567 if (rack->rack_no_prr) 2568 log.u_bbr.flex5 = 0; 2569 else 2570 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2571 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2572 log.u_bbr.flex7 = hpts_removed; 2573 log.u_bbr.flex8 = 1; 2574 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2575 log.u_bbr.timeStamp = us_cts; 2576 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2577 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2578 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2579 log.u_bbr.pacing_gain = rack->r_must_retran; 2580 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2581 &rack->rc_inp->inp_socket->so_rcv, 2582 &rack->rc_inp->inp_socket->so_snd, 2583 BBR_LOG_TIMERCANC, 0, 2584 0, &log, false, tv); 2585 } 2586 } 2587 2588 static void 2589 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2590 uint32_t flex1, uint32_t flex2, 2591 uint32_t flex3, uint32_t flex4, 2592 uint32_t flex5, uint32_t flex6, 2593 uint16_t flex7, uint8_t mod) 2594 { 2595 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2596 union tcp_log_stackspecific log; 2597 struct timeval tv; 2598 2599 if (mod == 1) { 2600 /* No you can't use 1, its for the real to cancel */ 2601 return; 2602 } 2603 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2604 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2605 log.u_bbr.flex1 = flex1; 2606 log.u_bbr.flex2 = flex2; 2607 log.u_bbr.flex3 = flex3; 2608 log.u_bbr.flex4 = flex4; 2609 log.u_bbr.flex5 = flex5; 2610 log.u_bbr.flex6 = flex6; 2611 log.u_bbr.flex7 = flex7; 2612 log.u_bbr.flex8 = mod; 2613 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2614 &rack->rc_inp->inp_socket->so_rcv, 2615 &rack->rc_inp->inp_socket->so_snd, 2616 BBR_LOG_TIMERCANC, 0, 2617 0, &log, false, &tv); 2618 } 2619 } 2620 2621 static void 2622 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2623 { 2624 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2625 union tcp_log_stackspecific log; 2626 struct timeval tv; 2627 2628 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2629 log.u_bbr.flex1 = timers; 2630 log.u_bbr.flex2 = ret; 2631 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2632 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2633 log.u_bbr.flex5 = cts; 2634 if (rack->rack_no_prr) 2635 log.u_bbr.flex6 = 0; 2636 else 2637 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2638 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2639 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2640 log.u_bbr.pacing_gain = rack->r_must_retran; 2641 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2642 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2643 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2644 &rack->rc_inp->inp_socket->so_rcv, 2645 &rack->rc_inp->inp_socket->so_snd, 2646 BBR_LOG_TO_PROCESS, 0, 2647 0, &log, false, &tv); 2648 } 2649 } 2650 2651 static void 2652 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 2653 { 2654 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2655 union tcp_log_stackspecific log; 2656 struct timeval tv; 2657 2658 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2659 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2660 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2661 if (rack->rack_no_prr) 2662 log.u_bbr.flex3 = 0; 2663 else 2664 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2665 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2666 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2667 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2668 log.u_bbr.flex7 = line; 2669 log.u_bbr.flex8 = frm; 2670 log.u_bbr.pkts_out = orig_cwnd; 2671 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2672 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2673 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2674 log.u_bbr.use_lt_bw <<= 1; 2675 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2676 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2677 &rack->rc_inp->inp_socket->so_rcv, 2678 &rack->rc_inp->inp_socket->so_snd, 2679 BBR_LOG_BBRUPD, 0, 2680 0, &log, false, &tv); 2681 } 2682 } 2683 2684 #ifdef NETFLIX_EXP_DETECTION 2685 static void 2686 rack_log_sad(struct tcp_rack *rack, int event) 2687 { 2688 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2689 union tcp_log_stackspecific log; 2690 struct timeval tv; 2691 2692 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2693 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2694 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2695 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2696 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2697 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2698 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2699 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2700 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2701 log.u_bbr.lt_epoch |= rack->do_detection; 2702 log.u_bbr.applimited = tcp_map_minimum; 2703 log.u_bbr.flex7 = rack->sack_attack_disable; 2704 log.u_bbr.flex8 = event; 2705 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2706 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2707 log.u_bbr.delivered = tcp_sad_decay_val; 2708 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2709 &rack->rc_inp->inp_socket->so_rcv, 2710 &rack->rc_inp->inp_socket->so_snd, 2711 TCP_SAD_DETECTION, 0, 2712 0, &log, false, &tv); 2713 } 2714 } 2715 #endif 2716 2717 static void 2718 rack_counter_destroy(void) 2719 { 2720 counter_u64_free(rack_fto_send); 2721 counter_u64_free(rack_fto_rsm_send); 2722 counter_u64_free(rack_nfto_resend); 2723 counter_u64_free(rack_hw_pace_init_fail); 2724 counter_u64_free(rack_hw_pace_lost); 2725 counter_u64_free(rack_non_fto_send); 2726 counter_u64_free(rack_extended_rfo); 2727 counter_u64_free(rack_ack_total); 2728 counter_u64_free(rack_express_sack); 2729 counter_u64_free(rack_sack_total); 2730 counter_u64_free(rack_move_none); 2731 counter_u64_free(rack_move_some); 2732 counter_u64_free(rack_sack_attacks_detected); 2733 counter_u64_free(rack_sack_attacks_reversed); 2734 counter_u64_free(rack_sack_used_next_merge); 2735 counter_u64_free(rack_sack_used_prev_merge); 2736 counter_u64_free(rack_tlp_tot); 2737 counter_u64_free(rack_tlp_newdata); 2738 counter_u64_free(rack_tlp_retran); 2739 counter_u64_free(rack_tlp_retran_bytes); 2740 counter_u64_free(rack_to_tot); 2741 counter_u64_free(rack_saw_enobuf); 2742 counter_u64_free(rack_saw_enobuf_hw); 2743 counter_u64_free(rack_saw_enetunreach); 2744 counter_u64_free(rack_hot_alloc); 2745 counter_u64_free(rack_to_alloc); 2746 counter_u64_free(rack_to_alloc_hard); 2747 counter_u64_free(rack_to_alloc_emerg); 2748 counter_u64_free(rack_to_alloc_limited); 2749 counter_u64_free(rack_alloc_limited_conns); 2750 counter_u64_free(rack_split_limited); 2751 counter_u64_free(rack_multi_single_eq); 2752 counter_u64_free(rack_proc_non_comp_ack); 2753 counter_u64_free(rack_sack_proc_all); 2754 counter_u64_free(rack_sack_proc_restart); 2755 counter_u64_free(rack_sack_proc_short); 2756 counter_u64_free(rack_sack_skipped_acked); 2757 counter_u64_free(rack_sack_splits); 2758 counter_u64_free(rack_input_idle_reduces); 2759 counter_u64_free(rack_collapsed_win); 2760 counter_u64_free(rack_collapsed_win_rxt); 2761 counter_u64_free(rack_collapsed_win_rxt_bytes); 2762 counter_u64_free(rack_collapsed_win_seen); 2763 counter_u64_free(rack_try_scwnd); 2764 counter_u64_free(rack_persists_sends); 2765 counter_u64_free(rack_persists_acks); 2766 counter_u64_free(rack_persists_loss); 2767 counter_u64_free(rack_persists_lost_ends); 2768 #ifdef INVARIANTS 2769 counter_u64_free(rack_adjust_map_bw); 2770 #endif 2771 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2772 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2773 } 2774 2775 static struct rack_sendmap * 2776 rack_alloc(struct tcp_rack *rack) 2777 { 2778 struct rack_sendmap *rsm; 2779 2780 /* 2781 * First get the top of the list it in 2782 * theory is the "hottest" rsm we have, 2783 * possibly just freed by ack processing. 2784 */ 2785 if (rack->rc_free_cnt > rack_free_cache) { 2786 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2787 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2788 counter_u64_add(rack_hot_alloc, 1); 2789 rack->rc_free_cnt--; 2790 return (rsm); 2791 } 2792 /* 2793 * Once we get under our free cache we probably 2794 * no longer have a "hot" one available. Lets 2795 * get one from UMA. 2796 */ 2797 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2798 if (rsm) { 2799 rack->r_ctl.rc_num_maps_alloced++; 2800 counter_u64_add(rack_to_alloc, 1); 2801 return (rsm); 2802 } 2803 /* 2804 * Dig in to our aux rsm's (the last two) since 2805 * UMA failed to get us one. 2806 */ 2807 if (rack->rc_free_cnt) { 2808 counter_u64_add(rack_to_alloc_emerg, 1); 2809 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2810 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2811 rack->rc_free_cnt--; 2812 return (rsm); 2813 } 2814 return (NULL); 2815 } 2816 2817 static struct rack_sendmap * 2818 rack_alloc_full_limit(struct tcp_rack *rack) 2819 { 2820 if ((V_tcp_map_entries_limit > 0) && 2821 (rack->do_detection == 0) && 2822 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2823 counter_u64_add(rack_to_alloc_limited, 1); 2824 if (!rack->alloc_limit_reported) { 2825 rack->alloc_limit_reported = 1; 2826 counter_u64_add(rack_alloc_limited_conns, 1); 2827 } 2828 return (NULL); 2829 } 2830 return (rack_alloc(rack)); 2831 } 2832 2833 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2834 static struct rack_sendmap * 2835 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2836 { 2837 struct rack_sendmap *rsm; 2838 2839 if (limit_type) { 2840 /* currently there is only one limit type */ 2841 if (V_tcp_map_split_limit > 0 && 2842 (rack->do_detection == 0) && 2843 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2844 counter_u64_add(rack_split_limited, 1); 2845 if (!rack->alloc_limit_reported) { 2846 rack->alloc_limit_reported = 1; 2847 counter_u64_add(rack_alloc_limited_conns, 1); 2848 } 2849 return (NULL); 2850 } 2851 } 2852 2853 /* allocate and mark in the limit type, if set */ 2854 rsm = rack_alloc(rack); 2855 if (rsm != NULL && limit_type) { 2856 rsm->r_limit_type = limit_type; 2857 rack->r_ctl.rc_num_split_allocs++; 2858 } 2859 return (rsm); 2860 } 2861 2862 static void 2863 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2864 { 2865 if (rsm->r_flags & RACK_APP_LIMITED) { 2866 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2867 rack->r_ctl.rc_app_limited_cnt--; 2868 } 2869 } 2870 if (rsm->r_limit_type) { 2871 /* currently there is only one limit type */ 2872 rack->r_ctl.rc_num_split_allocs--; 2873 } 2874 if (rsm == rack->r_ctl.rc_first_appl) { 2875 if (rack->r_ctl.rc_app_limited_cnt == 0) 2876 rack->r_ctl.rc_first_appl = NULL; 2877 else { 2878 /* Follow the next one out */ 2879 struct rack_sendmap fe; 2880 2881 fe.r_start = rsm->r_nseq_appl; 2882 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2883 } 2884 } 2885 if (rsm == rack->r_ctl.rc_resend) 2886 rack->r_ctl.rc_resend = NULL; 2887 if (rsm == rack->r_ctl.rc_end_appl) 2888 rack->r_ctl.rc_end_appl = NULL; 2889 if (rack->r_ctl.rc_tlpsend == rsm) 2890 rack->r_ctl.rc_tlpsend = NULL; 2891 if (rack->r_ctl.rc_sacklast == rsm) 2892 rack->r_ctl.rc_sacklast = NULL; 2893 memset(rsm, 0, sizeof(struct rack_sendmap)); 2894 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2895 rack->rc_free_cnt++; 2896 } 2897 2898 static void 2899 rack_free_trim(struct tcp_rack *rack) 2900 { 2901 struct rack_sendmap *rsm; 2902 2903 /* 2904 * Free up all the tail entries until 2905 * we get our list down to the limit. 2906 */ 2907 while (rack->rc_free_cnt > rack_free_cache) { 2908 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2909 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2910 rack->rc_free_cnt--; 2911 uma_zfree(rack_zone, rsm); 2912 } 2913 } 2914 2915 2916 static uint32_t 2917 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2918 { 2919 uint64_t srtt, bw, len, tim; 2920 uint32_t segsiz, def_len, minl; 2921 2922 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2923 def_len = rack_def_data_window * segsiz; 2924 if (rack->rc_gp_filled == 0) { 2925 /* 2926 * We have no measurement (IW is in flight?) so 2927 * we can only guess using our data_window sysctl 2928 * value (usually 20MSS). 2929 */ 2930 return (def_len); 2931 } 2932 /* 2933 * Now we have a number of factors to consider. 2934 * 2935 * 1) We have a desired BDP which is usually 2936 * at least 2. 2937 * 2) We have a minimum number of rtt's usually 1 SRTT 2938 * but we allow it too to be more. 2939 * 3) We want to make sure a measurement last N useconds (if 2940 * we have set rack_min_measure_usec. 2941 * 2942 * We handle the first concern here by trying to create a data 2943 * window of max(rack_def_data_window, DesiredBDP). The 2944 * second concern we handle in not letting the measurement 2945 * window end normally until at least the required SRTT's 2946 * have gone by which is done further below in 2947 * rack_enough_for_measurement(). Finally the third concern 2948 * we also handle here by calculating how long that time 2949 * would take at the current BW and then return the 2950 * max of our first calculation and that length. Note 2951 * that if rack_min_measure_usec is 0, we don't deal 2952 * with concern 3. Also for both Concern 1 and 3 an 2953 * application limited period could end the measurement 2954 * earlier. 2955 * 2956 * So lets calculate the BDP with the "known" b/w using 2957 * the SRTT has our rtt and then multiply it by the 2958 * goal. 2959 */ 2960 bw = rack_get_bw(rack); 2961 srtt = (uint64_t)tp->t_srtt; 2962 len = bw * srtt; 2963 len /= (uint64_t)HPTS_USEC_IN_SEC; 2964 len *= max(1, rack_goal_bdp); 2965 /* Now we need to round up to the nearest MSS */ 2966 len = roundup(len, segsiz); 2967 if (rack_min_measure_usec) { 2968 /* Now calculate our min length for this b/w */ 2969 tim = rack_min_measure_usec; 2970 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 2971 if (minl == 0) 2972 minl = 1; 2973 minl = roundup(minl, segsiz); 2974 if (len < minl) 2975 len = minl; 2976 } 2977 /* 2978 * Now if we have a very small window we want 2979 * to attempt to get the window that is 2980 * as small as possible. This happens on 2981 * low b/w connections and we don't want to 2982 * span huge numbers of rtt's between measurements. 2983 * 2984 * We basically include 2 over our "MIN window" so 2985 * that the measurement can be shortened (possibly) by 2986 * an ack'ed packet. 2987 */ 2988 if (len < def_len) 2989 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 2990 else 2991 return (max((uint32_t)len, def_len)); 2992 2993 } 2994 2995 static int 2996 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 2997 { 2998 uint32_t tim, srtts, segsiz; 2999 3000 /* 3001 * Has enough time passed for the GP measurement to be valid? 3002 */ 3003 if ((tp->snd_max == tp->snd_una) || 3004 (th_ack == tp->snd_max)){ 3005 /* All is acked */ 3006 *quality = RACK_QUALITY_ALLACKED; 3007 return (1); 3008 } 3009 if (SEQ_LT(th_ack, tp->gput_seq)) { 3010 /* Not enough bytes yet */ 3011 return (0); 3012 } 3013 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3014 if (SEQ_LT(th_ack, tp->gput_ack) && 3015 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3016 /* Not enough bytes yet */ 3017 return (0); 3018 } 3019 if (rack->r_ctl.rc_first_appl && 3020 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3021 /* 3022 * We are up to the app limited send point 3023 * we have to measure irrespective of the time.. 3024 */ 3025 *quality = RACK_QUALITY_APPLIMITED; 3026 return (1); 3027 } 3028 /* Now what about time? */ 3029 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3030 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3031 if (tim >= srtts) { 3032 *quality = RACK_QUALITY_HIGH; 3033 return (1); 3034 } 3035 /* Nope not even a full SRTT has passed */ 3036 return (0); 3037 } 3038 3039 static void 3040 rack_log_timely(struct tcp_rack *rack, 3041 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3042 uint64_t up_bnd, int line, uint8_t method) 3043 { 3044 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3045 union tcp_log_stackspecific log; 3046 struct timeval tv; 3047 3048 memset(&log, 0, sizeof(log)); 3049 log.u_bbr.flex1 = logged; 3050 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3051 log.u_bbr.flex2 <<= 4; 3052 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3053 log.u_bbr.flex2 <<= 4; 3054 log.u_bbr.flex2 |= rack->rc_gp_incr; 3055 log.u_bbr.flex2 <<= 4; 3056 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3057 log.u_bbr.flex3 = rack->rc_gp_incr; 3058 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3059 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3060 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3061 log.u_bbr.flex7 = rack->rc_gp_bwred; 3062 log.u_bbr.flex8 = method; 3063 log.u_bbr.cur_del_rate = cur_bw; 3064 log.u_bbr.delRate = low_bnd; 3065 log.u_bbr.bw_inuse = up_bnd; 3066 log.u_bbr.rttProp = rack_get_bw(rack); 3067 log.u_bbr.pkt_epoch = line; 3068 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3069 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3070 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3071 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3072 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3073 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3074 log.u_bbr.cwnd_gain <<= 1; 3075 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3076 log.u_bbr.cwnd_gain <<= 1; 3077 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3078 log.u_bbr.cwnd_gain <<= 1; 3079 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3080 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3081 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3082 &rack->rc_inp->inp_socket->so_rcv, 3083 &rack->rc_inp->inp_socket->so_snd, 3084 TCP_TIMELY_WORK, 0, 3085 0, &log, false, &tv); 3086 } 3087 } 3088 3089 static int 3090 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3091 { 3092 /* 3093 * Before we increase we need to know if 3094 * the estimate just made was less than 3095 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3096 * 3097 * If we already are pacing at a fast enough 3098 * rate to push us faster there is no sense of 3099 * increasing. 3100 * 3101 * We first caculate our actual pacing rate (ss or ca multiplier 3102 * times our cur_bw). 3103 * 3104 * Then we take the last measured rate and multipy by our 3105 * maximum pacing overage to give us a max allowable rate. 3106 * 3107 * If our act_rate is smaller than our max_allowable rate 3108 * then we should increase. Else we should hold steady. 3109 * 3110 */ 3111 uint64_t act_rate, max_allow_rate; 3112 3113 if (rack_timely_no_stopping) 3114 return (1); 3115 3116 if ((cur_bw == 0) || (last_bw_est == 0)) { 3117 /* 3118 * Initial startup case or 3119 * everything is acked case. 3120 */ 3121 rack_log_timely(rack, mult, cur_bw, 0, 0, 3122 __LINE__, 9); 3123 return (1); 3124 } 3125 if (mult <= 100) { 3126 /* 3127 * We can always pace at or slightly above our rate. 3128 */ 3129 rack_log_timely(rack, mult, cur_bw, 0, 0, 3130 __LINE__, 9); 3131 return (1); 3132 } 3133 act_rate = cur_bw * (uint64_t)mult; 3134 act_rate /= 100; 3135 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3136 max_allow_rate /= 100; 3137 if (act_rate < max_allow_rate) { 3138 /* 3139 * Here the rate we are actually pacing at 3140 * is smaller than 10% above our last measurement. 3141 * This means we are pacing below what we would 3142 * like to try to achieve (plus some wiggle room). 3143 */ 3144 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3145 __LINE__, 9); 3146 return (1); 3147 } else { 3148 /* 3149 * Here we are already pacing at least rack_max_per_above(10%) 3150 * what we are getting back. This indicates most likely 3151 * that we are being limited (cwnd/rwnd/app) and can't 3152 * get any more b/w. There is no sense of trying to 3153 * raise up the pacing rate its not speeding us up 3154 * and we already are pacing faster than we are getting. 3155 */ 3156 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3157 __LINE__, 8); 3158 return (0); 3159 } 3160 } 3161 3162 static void 3163 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3164 { 3165 /* 3166 * When we drag bottom, we want to assure 3167 * that no multiplier is below 1.0, if so 3168 * we want to restore it to at least that. 3169 */ 3170 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3171 /* This is unlikely we usually do not touch recovery */ 3172 rack->r_ctl.rack_per_of_gp_rec = 100; 3173 } 3174 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3175 rack->r_ctl.rack_per_of_gp_ca = 100; 3176 } 3177 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3178 rack->r_ctl.rack_per_of_gp_ss = 100; 3179 } 3180 } 3181 3182 static void 3183 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3184 { 3185 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3186 rack->r_ctl.rack_per_of_gp_ca = 100; 3187 } 3188 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3189 rack->r_ctl.rack_per_of_gp_ss = 100; 3190 } 3191 } 3192 3193 static void 3194 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3195 { 3196 int32_t calc, logged, plus; 3197 3198 logged = 0; 3199 3200 if (override) { 3201 /* 3202 * override is passed when we are 3203 * loosing b/w and making one last 3204 * gasp at trying to not loose out 3205 * to a new-reno flow. 3206 */ 3207 goto extra_boost; 3208 } 3209 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3210 if (rack->rc_gp_incr && 3211 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3212 /* 3213 * Reset and get 5 strokes more before the boost. Note 3214 * that the count is 0 based so we have to add one. 3215 */ 3216 extra_boost: 3217 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3218 rack->rc_gp_timely_inc_cnt = 0; 3219 } else 3220 plus = (uint32_t)rack_gp_increase_per; 3221 /* Must be at least 1% increase for true timely increases */ 3222 if ((plus < 1) && 3223 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3224 plus = 1; 3225 if (rack->rc_gp_saw_rec && 3226 (rack->rc_gp_no_rec_chg == 0) && 3227 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3228 rack->r_ctl.rack_per_of_gp_rec)) { 3229 /* We have been in recovery ding it too */ 3230 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3231 if (calc > 0xffff) 3232 calc = 0xffff; 3233 logged |= 1; 3234 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3235 if (rack_per_upper_bound_ss && 3236 (rack->rc_dragged_bottom == 0) && 3237 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3238 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3239 } 3240 if (rack->rc_gp_saw_ca && 3241 (rack->rc_gp_saw_ss == 0) && 3242 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3243 rack->r_ctl.rack_per_of_gp_ca)) { 3244 /* In CA */ 3245 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3246 if (calc > 0xffff) 3247 calc = 0xffff; 3248 logged |= 2; 3249 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3250 if (rack_per_upper_bound_ca && 3251 (rack->rc_dragged_bottom == 0) && 3252 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3253 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3254 } 3255 if (rack->rc_gp_saw_ss && 3256 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3257 rack->r_ctl.rack_per_of_gp_ss)) { 3258 /* In SS */ 3259 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3260 if (calc > 0xffff) 3261 calc = 0xffff; 3262 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3263 if (rack_per_upper_bound_ss && 3264 (rack->rc_dragged_bottom == 0) && 3265 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3266 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3267 logged |= 4; 3268 } 3269 if (logged && 3270 (rack->rc_gp_incr == 0)){ 3271 /* Go into increment mode */ 3272 rack->rc_gp_incr = 1; 3273 rack->rc_gp_timely_inc_cnt = 0; 3274 } 3275 if (rack->rc_gp_incr && 3276 logged && 3277 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3278 rack->rc_gp_timely_inc_cnt++; 3279 } 3280 rack_log_timely(rack, logged, plus, 0, 0, 3281 __LINE__, 1); 3282 } 3283 3284 static uint32_t 3285 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3286 { 3287 /* 3288 * norm_grad = rtt_diff / minrtt; 3289 * new_per = curper * (1 - B * norm_grad) 3290 * 3291 * B = rack_gp_decrease_per (default 10%) 3292 * rtt_dif = input var current rtt-diff 3293 * curper = input var current percentage 3294 * minrtt = from rack filter 3295 * 3296 */ 3297 uint64_t perf; 3298 3299 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3300 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3301 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3302 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3303 (uint64_t)1000000)) / 3304 (uint64_t)1000000); 3305 if (perf > curper) { 3306 /* TSNH */ 3307 perf = curper - 1; 3308 } 3309 return ((uint32_t)perf); 3310 } 3311 3312 static uint32_t 3313 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3314 { 3315 /* 3316 * highrttthresh 3317 * result = curper * (1 - (B * ( 1 - ------ )) 3318 * gp_srtt 3319 * 3320 * B = rack_gp_decrease_per (default 10%) 3321 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3322 */ 3323 uint64_t perf; 3324 uint32_t highrttthresh; 3325 3326 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3327 3328 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3329 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3330 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3331 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3332 return (perf); 3333 } 3334 3335 static void 3336 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3337 { 3338 uint64_t logvar, logvar2, logvar3; 3339 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3340 3341 if (rack->rc_gp_incr) { 3342 /* Turn off increment counting */ 3343 rack->rc_gp_incr = 0; 3344 rack->rc_gp_timely_inc_cnt = 0; 3345 } 3346 ss_red = ca_red = rec_red = 0; 3347 logged = 0; 3348 /* Calculate the reduction value */ 3349 if (rtt_diff < 0) { 3350 rtt_diff *= -1; 3351 } 3352 /* Must be at least 1% reduction */ 3353 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3354 /* We have been in recovery ding it too */ 3355 if (timely_says == 2) { 3356 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3357 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3358 if (alt < new_per) 3359 val = alt; 3360 else 3361 val = new_per; 3362 } else 3363 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3364 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3365 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3366 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3367 } else { 3368 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3369 rec_red = 0; 3370 } 3371 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3372 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3373 logged |= 1; 3374 } 3375 if (rack->rc_gp_saw_ss) { 3376 /* Sent in SS */ 3377 if (timely_says == 2) { 3378 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3379 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3380 if (alt < new_per) 3381 val = alt; 3382 else 3383 val = new_per; 3384 } else 3385 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3386 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3387 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3388 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3389 } else { 3390 ss_red = new_per; 3391 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3392 logvar = new_per; 3393 logvar <<= 32; 3394 logvar |= alt; 3395 logvar2 = (uint32_t)rtt; 3396 logvar2 <<= 32; 3397 logvar2 |= (uint32_t)rtt_diff; 3398 logvar3 = rack_gp_rtt_maxmul; 3399 logvar3 <<= 32; 3400 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3401 rack_log_timely(rack, timely_says, 3402 logvar2, logvar3, 3403 logvar, __LINE__, 10); 3404 } 3405 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3406 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3407 logged |= 4; 3408 } else if (rack->rc_gp_saw_ca) { 3409 /* Sent in CA */ 3410 if (timely_says == 2) { 3411 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3412 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3413 if (alt < new_per) 3414 val = alt; 3415 else 3416 val = new_per; 3417 } else 3418 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3419 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3420 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3421 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3422 } else { 3423 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3424 ca_red = 0; 3425 logvar = new_per; 3426 logvar <<= 32; 3427 logvar |= alt; 3428 logvar2 = (uint32_t)rtt; 3429 logvar2 <<= 32; 3430 logvar2 |= (uint32_t)rtt_diff; 3431 logvar3 = rack_gp_rtt_maxmul; 3432 logvar3 <<= 32; 3433 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3434 rack_log_timely(rack, timely_says, 3435 logvar2, logvar3, 3436 logvar, __LINE__, 10); 3437 } 3438 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3439 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3440 logged |= 2; 3441 } 3442 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3443 rack->rc_gp_timely_dec_cnt++; 3444 if (rack_timely_dec_clear && 3445 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3446 rack->rc_gp_timely_dec_cnt = 0; 3447 } 3448 logvar = ss_red; 3449 logvar <<= 32; 3450 logvar |= ca_red; 3451 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3452 __LINE__, 2); 3453 } 3454 3455 static void 3456 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3457 uint32_t rtt, uint32_t line, uint8_t reas) 3458 { 3459 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3460 union tcp_log_stackspecific log; 3461 struct timeval tv; 3462 3463 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3464 log.u_bbr.flex1 = line; 3465 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3466 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3467 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3468 log.u_bbr.flex5 = rtt; 3469 log.u_bbr.flex6 = rack->rc_highly_buffered; 3470 log.u_bbr.flex6 <<= 1; 3471 log.u_bbr.flex6 |= rack->forced_ack; 3472 log.u_bbr.flex6 <<= 1; 3473 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3474 log.u_bbr.flex6 <<= 1; 3475 log.u_bbr.flex6 |= rack->in_probe_rtt; 3476 log.u_bbr.flex6 <<= 1; 3477 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3478 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3479 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3480 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3481 log.u_bbr.flex8 = reas; 3482 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3483 log.u_bbr.delRate = rack_get_bw(rack); 3484 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3485 log.u_bbr.cur_del_rate <<= 32; 3486 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3487 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3488 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3489 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3490 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3491 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3492 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3493 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3494 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3495 log.u_bbr.rttProp = us_cts; 3496 log.u_bbr.rttProp <<= 32; 3497 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3498 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3499 &rack->rc_inp->inp_socket->so_rcv, 3500 &rack->rc_inp->inp_socket->so_snd, 3501 BBR_LOG_RTT_SHRINKS, 0, 3502 0, &log, false, &rack->r_ctl.act_rcv_time); 3503 } 3504 } 3505 3506 static void 3507 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3508 { 3509 uint64_t bwdp; 3510 3511 bwdp = rack_get_bw(rack); 3512 bwdp *= (uint64_t)rtt; 3513 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3514 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3515 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3516 /* 3517 * A window protocol must be able to have 4 packets 3518 * outstanding as the floor in order to function 3519 * (especially considering delayed ack :D). 3520 */ 3521 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3522 } 3523 } 3524 3525 static void 3526 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3527 { 3528 /** 3529 * ProbeRTT is a bit different in rack_pacing than in 3530 * BBR. It is like BBR in that it uses the lowering of 3531 * the RTT as a signal that we saw something new and 3532 * counts from there for how long between. But it is 3533 * different in that its quite simple. It does not 3534 * play with the cwnd and wait until we get down 3535 * to N segments outstanding and hold that for 3536 * 200ms. Instead it just sets the pacing reduction 3537 * rate to a set percentage (70 by default) and hold 3538 * that for a number of recent GP Srtt's. 3539 */ 3540 uint32_t segsiz; 3541 3542 if (rack->rc_gp_dyn_mul == 0) 3543 return; 3544 3545 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3546 /* We are idle */ 3547 return; 3548 } 3549 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3550 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3551 /* 3552 * Stop the goodput now, the idea here is 3553 * that future measurements with in_probe_rtt 3554 * won't register if they are not greater so 3555 * we want to get what info (if any) is available 3556 * now. 3557 */ 3558 rack_do_goodput_measurement(rack->rc_tp, rack, 3559 rack->rc_tp->snd_una, __LINE__, 3560 RACK_QUALITY_PROBERTT); 3561 } 3562 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3563 rack->r_ctl.rc_time_probertt_entered = us_cts; 3564 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3565 rack->r_ctl.rc_pace_min_segs); 3566 rack->in_probe_rtt = 1; 3567 rack->measure_saw_probe_rtt = 1; 3568 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3569 rack->r_ctl.rc_time_probertt_starts = 0; 3570 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3571 if (rack_probertt_use_min_rtt_entry) 3572 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3573 else 3574 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3575 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3576 __LINE__, RACK_RTTS_ENTERPROBE); 3577 } 3578 3579 static void 3580 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3581 { 3582 struct rack_sendmap *rsm; 3583 uint32_t segsiz; 3584 3585 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3586 rack->r_ctl.rc_pace_min_segs); 3587 rack->in_probe_rtt = 0; 3588 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3589 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3590 /* 3591 * Stop the goodput now, the idea here is 3592 * that future measurements with in_probe_rtt 3593 * won't register if they are not greater so 3594 * we want to get what info (if any) is available 3595 * now. 3596 */ 3597 rack_do_goodput_measurement(rack->rc_tp, rack, 3598 rack->rc_tp->snd_una, __LINE__, 3599 RACK_QUALITY_PROBERTT); 3600 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3601 /* 3602 * We don't have enough data to make a measurement. 3603 * So lets just stop and start here after exiting 3604 * probe-rtt. We probably are not interested in 3605 * the results anyway. 3606 */ 3607 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3608 } 3609 /* 3610 * Measurements through the current snd_max are going 3611 * to be limited by the slower pacing rate. 3612 * 3613 * We need to mark these as app-limited so we 3614 * don't collapse the b/w. 3615 */ 3616 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3617 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3618 if (rack->r_ctl.rc_app_limited_cnt == 0) 3619 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3620 else { 3621 /* 3622 * Go out to the end app limited and mark 3623 * this new one as next and move the end_appl up 3624 * to this guy. 3625 */ 3626 if (rack->r_ctl.rc_end_appl) 3627 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3628 rack->r_ctl.rc_end_appl = rsm; 3629 } 3630 rsm->r_flags |= RACK_APP_LIMITED; 3631 rack->r_ctl.rc_app_limited_cnt++; 3632 } 3633 /* 3634 * Now, we need to examine our pacing rate multipliers. 3635 * If its under 100%, we need to kick it back up to 3636 * 100%. We also don't let it be over our "max" above 3637 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3638 * Note setting clamp_atexit_prtt to 0 has the effect 3639 * of setting CA/SS to 100% always at exit (which is 3640 * the default behavior). 3641 */ 3642 if (rack_probertt_clear_is) { 3643 rack->rc_gp_incr = 0; 3644 rack->rc_gp_bwred = 0; 3645 rack->rc_gp_timely_inc_cnt = 0; 3646 rack->rc_gp_timely_dec_cnt = 0; 3647 } 3648 /* Do we do any clamping at exit? */ 3649 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3650 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3651 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3652 } 3653 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3654 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3655 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3656 } 3657 /* 3658 * Lets set rtt_diff to 0, so that we will get a "boost" 3659 * after exiting. 3660 */ 3661 rack->r_ctl.rc_rtt_diff = 0; 3662 3663 /* Clear all flags so we start fresh */ 3664 rack->rc_tp->t_bytes_acked = 0; 3665 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 3666 /* 3667 * If configured to, set the cwnd and ssthresh to 3668 * our targets. 3669 */ 3670 if (rack_probe_rtt_sets_cwnd) { 3671 uint64_t ebdp; 3672 uint32_t setto; 3673 3674 /* Set ssthresh so we get into CA once we hit our target */ 3675 if (rack_probertt_use_min_rtt_exit == 1) { 3676 /* Set to min rtt */ 3677 rack_set_prtt_target(rack, segsiz, 3678 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3679 } else if (rack_probertt_use_min_rtt_exit == 2) { 3680 /* Set to current gp rtt */ 3681 rack_set_prtt_target(rack, segsiz, 3682 rack->r_ctl.rc_gp_srtt); 3683 } else if (rack_probertt_use_min_rtt_exit == 3) { 3684 /* Set to entry gp rtt */ 3685 rack_set_prtt_target(rack, segsiz, 3686 rack->r_ctl.rc_entry_gp_rtt); 3687 } else { 3688 uint64_t sum; 3689 uint32_t setval; 3690 3691 sum = rack->r_ctl.rc_entry_gp_rtt; 3692 sum *= 10; 3693 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3694 if (sum >= 20) { 3695 /* 3696 * A highly buffered path needs 3697 * cwnd space for timely to work. 3698 * Lets set things up as if 3699 * we are heading back here again. 3700 */ 3701 setval = rack->r_ctl.rc_entry_gp_rtt; 3702 } else if (sum >= 15) { 3703 /* 3704 * Lets take the smaller of the 3705 * two since we are just somewhat 3706 * buffered. 3707 */ 3708 setval = rack->r_ctl.rc_gp_srtt; 3709 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3710 setval = rack->r_ctl.rc_entry_gp_rtt; 3711 } else { 3712 /* 3713 * Here we are not highly buffered 3714 * and should pick the min we can to 3715 * keep from causing loss. 3716 */ 3717 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3718 } 3719 rack_set_prtt_target(rack, segsiz, 3720 setval); 3721 } 3722 if (rack_probe_rtt_sets_cwnd > 1) { 3723 /* There is a percentage here to boost */ 3724 ebdp = rack->r_ctl.rc_target_probertt_flight; 3725 ebdp *= rack_probe_rtt_sets_cwnd; 3726 ebdp /= 100; 3727 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3728 } else 3729 setto = rack->r_ctl.rc_target_probertt_flight; 3730 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3731 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3732 /* Enforce a min */ 3733 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3734 } 3735 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3736 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3737 } 3738 rack_log_rtt_shrinks(rack, us_cts, 3739 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3740 __LINE__, RACK_RTTS_EXITPROBE); 3741 /* Clear times last so log has all the info */ 3742 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3743 rack->r_ctl.rc_time_probertt_entered = us_cts; 3744 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3745 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3746 } 3747 3748 static void 3749 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3750 { 3751 /* Check in on probe-rtt */ 3752 if (rack->rc_gp_filled == 0) { 3753 /* We do not do p-rtt unless we have gp measurements */ 3754 return; 3755 } 3756 if (rack->in_probe_rtt) { 3757 uint64_t no_overflow; 3758 uint32_t endtime, must_stay; 3759 3760 if (rack->r_ctl.rc_went_idle_time && 3761 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3762 /* 3763 * We went idle during prtt, just exit now. 3764 */ 3765 rack_exit_probertt(rack, us_cts); 3766 } else if (rack_probe_rtt_safety_val && 3767 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3768 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3769 /* 3770 * Probe RTT safety value triggered! 3771 */ 3772 rack_log_rtt_shrinks(rack, us_cts, 3773 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3774 __LINE__, RACK_RTTS_SAFETY); 3775 rack_exit_probertt(rack, us_cts); 3776 } 3777 /* Calculate the max we will wait */ 3778 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3779 if (rack->rc_highly_buffered) 3780 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3781 /* Calculate the min we must wait */ 3782 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3783 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3784 TSTMP_LT(us_cts, endtime)) { 3785 uint32_t calc; 3786 /* Do we lower more? */ 3787 no_exit: 3788 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3789 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3790 else 3791 calc = 0; 3792 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3793 if (calc) { 3794 /* Maybe */ 3795 calc *= rack_per_of_gp_probertt_reduce; 3796 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3797 /* Limit it too */ 3798 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3799 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3800 } 3801 /* We must reach target or the time set */ 3802 return; 3803 } 3804 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3805 if ((TSTMP_LT(us_cts, must_stay) && 3806 rack->rc_highly_buffered) || 3807 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3808 rack->r_ctl.rc_target_probertt_flight)) { 3809 /* We are not past the must_stay time */ 3810 goto no_exit; 3811 } 3812 rack_log_rtt_shrinks(rack, us_cts, 3813 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3814 __LINE__, RACK_RTTS_REACHTARGET); 3815 rack->r_ctl.rc_time_probertt_starts = us_cts; 3816 if (rack->r_ctl.rc_time_probertt_starts == 0) 3817 rack->r_ctl.rc_time_probertt_starts = 1; 3818 /* Restore back to our rate we want to pace at in prtt */ 3819 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3820 } 3821 /* 3822 * Setup our end time, some number of gp_srtts plus 200ms. 3823 */ 3824 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3825 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3826 if (rack_probertt_gpsrtt_cnt_div) 3827 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3828 else 3829 endtime = 0; 3830 endtime += rack_min_probertt_hold; 3831 endtime += rack->r_ctl.rc_time_probertt_starts; 3832 if (TSTMP_GEQ(us_cts, endtime)) { 3833 /* yes, exit probertt */ 3834 rack_exit_probertt(rack, us_cts); 3835 } 3836 3837 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3838 /* Go into probertt, its been too long since we went lower */ 3839 rack_enter_probertt(rack, us_cts); 3840 } 3841 } 3842 3843 static void 3844 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3845 uint32_t rtt, int32_t rtt_diff) 3846 { 3847 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3848 uint32_t losses; 3849 3850 if ((rack->rc_gp_dyn_mul == 0) || 3851 (rack->use_fixed_rate) || 3852 (rack->in_probe_rtt) || 3853 (rack->rc_always_pace == 0)) { 3854 /* No dynamic GP multiplier in play */ 3855 return; 3856 } 3857 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3858 cur_bw = rack_get_bw(rack); 3859 /* Calculate our up and down range */ 3860 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3861 up_bnd /= 100; 3862 up_bnd += rack->r_ctl.last_gp_comp_bw; 3863 3864 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3865 subfr /= 100; 3866 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3867 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3868 /* 3869 * This is the case where our RTT is above 3870 * the max target and we have been configured 3871 * to just do timely no bonus up stuff in that case. 3872 * 3873 * There are two configurations, set to 1, and we 3874 * just do timely if we are over our max. If its 3875 * set above 1 then we slam the multipliers down 3876 * to 100 and then decrement per timely. 3877 */ 3878 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3879 __LINE__, 3); 3880 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3881 rack_validate_multipliers_at_or_below_100(rack); 3882 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3883 } else if ((last_bw_est < low_bnd) && !losses) { 3884 /* 3885 * We are decreasing this is a bit complicated this 3886 * means we are loosing ground. This could be 3887 * because another flow entered and we are competing 3888 * for b/w with it. This will push the RTT up which 3889 * makes timely unusable unless we want to get shoved 3890 * into a corner and just be backed off (the age 3891 * old problem with delay based CC). 3892 * 3893 * On the other hand if it was a route change we 3894 * would like to stay somewhat contained and not 3895 * blow out the buffers. 3896 */ 3897 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3898 __LINE__, 3); 3899 rack->r_ctl.last_gp_comp_bw = cur_bw; 3900 if (rack->rc_gp_bwred == 0) { 3901 /* Go into reduction counting */ 3902 rack->rc_gp_bwred = 1; 3903 rack->rc_gp_timely_dec_cnt = 0; 3904 } 3905 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3906 (timely_says == 0)) { 3907 /* 3908 * Push another time with a faster pacing 3909 * to try to gain back (we include override to 3910 * get a full raise factor). 3911 */ 3912 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3913 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3914 (timely_says == 0) || 3915 (rack_down_raise_thresh == 0)) { 3916 /* 3917 * Do an override up in b/w if we were 3918 * below the threshold or if the threshold 3919 * is zero we always do the raise. 3920 */ 3921 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3922 } else { 3923 /* Log it stays the same */ 3924 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3925 __LINE__, 11); 3926 } 3927 rack->rc_gp_timely_dec_cnt++; 3928 /* We are not incrementing really no-count */ 3929 rack->rc_gp_incr = 0; 3930 rack->rc_gp_timely_inc_cnt = 0; 3931 } else { 3932 /* 3933 * Lets just use the RTT 3934 * information and give up 3935 * pushing. 3936 */ 3937 goto use_timely; 3938 } 3939 } else if ((timely_says != 2) && 3940 !losses && 3941 (last_bw_est > up_bnd)) { 3942 /* 3943 * We are increasing b/w lets keep going, updating 3944 * our b/w and ignoring any timely input, unless 3945 * of course we are at our max raise (if there is one). 3946 */ 3947 3948 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3949 __LINE__, 3); 3950 rack->r_ctl.last_gp_comp_bw = cur_bw; 3951 if (rack->rc_gp_saw_ss && 3952 rack_per_upper_bound_ss && 3953 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3954 /* 3955 * In cases where we can't go higher 3956 * we should just use timely. 3957 */ 3958 goto use_timely; 3959 } 3960 if (rack->rc_gp_saw_ca && 3961 rack_per_upper_bound_ca && 3962 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 3963 /* 3964 * In cases where we can't go higher 3965 * we should just use timely. 3966 */ 3967 goto use_timely; 3968 } 3969 rack->rc_gp_bwred = 0; 3970 rack->rc_gp_timely_dec_cnt = 0; 3971 /* You get a set number of pushes if timely is trying to reduce */ 3972 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 3973 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3974 } else { 3975 /* Log it stays the same */ 3976 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 3977 __LINE__, 12); 3978 } 3979 return; 3980 } else { 3981 /* 3982 * We are staying between the lower and upper range bounds 3983 * so use timely to decide. 3984 */ 3985 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3986 __LINE__, 3); 3987 use_timely: 3988 if (timely_says) { 3989 rack->rc_gp_incr = 0; 3990 rack->rc_gp_timely_inc_cnt = 0; 3991 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 3992 !losses && 3993 (last_bw_est < low_bnd)) { 3994 /* We are loosing ground */ 3995 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3996 rack->rc_gp_timely_dec_cnt++; 3997 /* We are not incrementing really no-count */ 3998 rack->rc_gp_incr = 0; 3999 rack->rc_gp_timely_inc_cnt = 0; 4000 } else 4001 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4002 } else { 4003 rack->rc_gp_bwred = 0; 4004 rack->rc_gp_timely_dec_cnt = 0; 4005 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4006 } 4007 } 4008 } 4009 4010 static int32_t 4011 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4012 { 4013 int32_t timely_says; 4014 uint64_t log_mult, log_rtt_a_diff; 4015 4016 log_rtt_a_diff = rtt; 4017 log_rtt_a_diff <<= 32; 4018 log_rtt_a_diff |= (uint32_t)rtt_diff; 4019 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4020 rack_gp_rtt_maxmul)) { 4021 /* Reduce the b/w multiplier */ 4022 timely_says = 2; 4023 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4024 log_mult <<= 32; 4025 log_mult |= prev_rtt; 4026 rack_log_timely(rack, timely_says, log_mult, 4027 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4028 log_rtt_a_diff, __LINE__, 4); 4029 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4030 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4031 max(rack_gp_rtt_mindiv , 1)))) { 4032 /* Increase the b/w multiplier */ 4033 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4034 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4035 max(rack_gp_rtt_mindiv , 1)); 4036 log_mult <<= 32; 4037 log_mult |= prev_rtt; 4038 timely_says = 0; 4039 rack_log_timely(rack, timely_says, log_mult , 4040 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4041 log_rtt_a_diff, __LINE__, 5); 4042 } else { 4043 /* 4044 * Use a gradient to find it the timely gradient 4045 * is: 4046 * grad = rc_rtt_diff / min_rtt; 4047 * 4048 * anything below or equal to 0 will be 4049 * a increase indication. Anything above 4050 * zero is a decrease. Note we take care 4051 * of the actual gradient calculation 4052 * in the reduction (its not needed for 4053 * increase). 4054 */ 4055 log_mult = prev_rtt; 4056 if (rtt_diff <= 0) { 4057 /* 4058 * Rttdiff is less than zero, increase the 4059 * b/w multiplier (its 0 or negative) 4060 */ 4061 timely_says = 0; 4062 rack_log_timely(rack, timely_says, log_mult, 4063 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4064 } else { 4065 /* Reduce the b/w multiplier */ 4066 timely_says = 1; 4067 rack_log_timely(rack, timely_says, log_mult, 4068 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4069 } 4070 } 4071 return (timely_says); 4072 } 4073 4074 static void 4075 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4076 tcp_seq th_ack, int line, uint8_t quality) 4077 { 4078 uint64_t tim, bytes_ps, ltim, stim, utim; 4079 uint32_t segsiz, bytes, reqbytes, us_cts; 4080 int32_t gput, new_rtt_diff, timely_says; 4081 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4082 int did_add = 0; 4083 4084 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4085 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4086 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4087 tim = us_cts - tp->gput_ts; 4088 else 4089 tim = 0; 4090 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4091 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4092 else 4093 stim = 0; 4094 /* 4095 * Use the larger of the send time or ack time. This prevents us 4096 * from being influenced by ack artifacts to come up with too 4097 * high of measurement. Note that since we are spanning over many more 4098 * bytes in most of our measurements hopefully that is less likely to 4099 * occur. 4100 */ 4101 if (tim > stim) 4102 utim = max(tim, 1); 4103 else 4104 utim = max(stim, 1); 4105 /* Lets get a msec time ltim too for the old stuff */ 4106 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4107 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4108 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4109 if ((tim == 0) && (stim == 0)) { 4110 /* 4111 * Invalid measurement time, maybe 4112 * all on one ack/one send? 4113 */ 4114 bytes = 0; 4115 bytes_ps = 0; 4116 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4117 0, 0, 0, 10, __LINE__, NULL, quality); 4118 goto skip_measurement; 4119 } 4120 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4121 /* We never made a us_rtt measurement? */ 4122 bytes = 0; 4123 bytes_ps = 0; 4124 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4125 0, 0, 0, 10, __LINE__, NULL, quality); 4126 goto skip_measurement; 4127 } 4128 /* 4129 * Calculate the maximum possible b/w this connection 4130 * could have. We base our calculation on the lowest 4131 * rtt we have seen during the measurement and the 4132 * largest rwnd the client has given us in that time. This 4133 * forms a BDP that is the maximum that we could ever 4134 * get to the client. Anything larger is not valid. 4135 * 4136 * I originally had code here that rejected measurements 4137 * where the time was less than 1/2 the latest us_rtt. 4138 * But after thinking on that I realized its wrong since 4139 * say you had a 150Mbps or even 1Gbps link, and you 4140 * were a long way away.. example I am in Europe (100ms rtt) 4141 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4142 * bytes my time would be 1.2ms, and yet my rtt would say 4143 * the measurement was invalid the time was < 50ms. The 4144 * same thing is true for 150Mb (8ms of time). 4145 * 4146 * A better way I realized is to look at what the maximum 4147 * the connection could possibly do. This is gated on 4148 * the lowest RTT we have seen and the highest rwnd. 4149 * We should in theory never exceed that, if we are 4150 * then something on the path is storing up packets 4151 * and then feeding them all at once to our endpoint 4152 * messing up our measurement. 4153 */ 4154 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4155 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4156 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4157 if (SEQ_LT(th_ack, tp->gput_seq)) { 4158 /* No measurement can be made */ 4159 bytes = 0; 4160 bytes_ps = 0; 4161 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4162 0, 0, 0, 10, __LINE__, NULL, quality); 4163 goto skip_measurement; 4164 } else 4165 bytes = (th_ack - tp->gput_seq); 4166 bytes_ps = (uint64_t)bytes; 4167 /* 4168 * Don't measure a b/w for pacing unless we have gotten at least 4169 * an initial windows worth of data in this measurement interval. 4170 * 4171 * Small numbers of bytes get badly influenced by delayed ack and 4172 * other artifacts. Note we take the initial window or our 4173 * defined minimum GP (defaulting to 10 which hopefully is the 4174 * IW). 4175 */ 4176 if (rack->rc_gp_filled == 0) { 4177 /* 4178 * The initial estimate is special. We 4179 * have blasted out an IW worth of packets 4180 * without a real valid ack ts results. We 4181 * then setup the app_limited_needs_set flag, 4182 * this should get the first ack in (probably 2 4183 * MSS worth) to be recorded as the timestamp. 4184 * We thus allow a smaller number of bytes i.e. 4185 * IW - 2MSS. 4186 */ 4187 reqbytes -= (2 * segsiz); 4188 /* Also lets fill previous for our first measurement to be neutral */ 4189 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4190 } 4191 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4192 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4193 rack->r_ctl.rc_app_limited_cnt, 4194 0, 0, 10, __LINE__, NULL, quality); 4195 goto skip_measurement; 4196 } 4197 /* 4198 * We now need to calculate the Timely like status so 4199 * we can update (possibly) the b/w multipliers. 4200 */ 4201 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4202 if (rack->rc_gp_filled == 0) { 4203 /* No previous reading */ 4204 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4205 } else { 4206 if (rack->measure_saw_probe_rtt == 0) { 4207 /* 4208 * We don't want a probertt to be counted 4209 * since it will be negative incorrectly. We 4210 * expect to be reducing the RTT when we 4211 * pace at a slower rate. 4212 */ 4213 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4214 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4215 } 4216 } 4217 timely_says = rack_make_timely_judgement(rack, 4218 rack->r_ctl.rc_gp_srtt, 4219 rack->r_ctl.rc_rtt_diff, 4220 rack->r_ctl.rc_prev_gp_srtt 4221 ); 4222 bytes_ps *= HPTS_USEC_IN_SEC; 4223 bytes_ps /= utim; 4224 if (bytes_ps > rack->r_ctl.last_max_bw) { 4225 /* 4226 * Something is on path playing 4227 * since this b/w is not possible based 4228 * on our BDP (highest rwnd and lowest rtt 4229 * we saw in the measurement window). 4230 * 4231 * Another option here would be to 4232 * instead skip the measurement. 4233 */ 4234 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4235 bytes_ps, rack->r_ctl.last_max_bw, 0, 4236 11, __LINE__, NULL, quality); 4237 bytes_ps = rack->r_ctl.last_max_bw; 4238 } 4239 /* We store gp for b/w in bytes per second */ 4240 if (rack->rc_gp_filled == 0) { 4241 /* Initial measurement */ 4242 if (bytes_ps) { 4243 rack->r_ctl.gp_bw = bytes_ps; 4244 rack->rc_gp_filled = 1; 4245 rack->r_ctl.num_measurements = 1; 4246 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4247 } else { 4248 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4249 rack->r_ctl.rc_app_limited_cnt, 4250 0, 0, 10, __LINE__, NULL, quality); 4251 } 4252 if (tcp_in_hpts(rack->rc_inp) && 4253 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4254 /* 4255 * Ok we can't trust the pacer in this case 4256 * where we transition from un-paced to paced. 4257 * Or for that matter when the burst mitigation 4258 * was making a wild guess and got it wrong. 4259 * Stop the pacer and clear up all the aggregate 4260 * delays etc. 4261 */ 4262 tcp_hpts_remove(rack->rc_inp); 4263 rack->r_ctl.rc_hpts_flags = 0; 4264 rack->r_ctl.rc_last_output_to = 0; 4265 } 4266 did_add = 2; 4267 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4268 /* Still a small number run an average */ 4269 rack->r_ctl.gp_bw += bytes_ps; 4270 addpart = rack->r_ctl.num_measurements; 4271 rack->r_ctl.num_measurements++; 4272 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4273 /* We have collected enough to move forward */ 4274 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4275 } 4276 did_add = 3; 4277 } else { 4278 /* 4279 * We want to take 1/wma of the goodput and add in to 7/8th 4280 * of the old value weighted by the srtt. So if your measurement 4281 * period is say 2 SRTT's long you would get 1/4 as the 4282 * value, if it was like 1/2 SRTT then you would get 1/16th. 4283 * 4284 * But we must be careful not to take too much i.e. if the 4285 * srtt is say 20ms and the measurement is taken over 4286 * 400ms our weight would be 400/20 i.e. 20. On the 4287 * other hand if we get a measurement over 1ms with a 4288 * 10ms rtt we only want to take a much smaller portion. 4289 */ 4290 if (rack->r_ctl.num_measurements < 0xff) { 4291 rack->r_ctl.num_measurements++; 4292 } 4293 srtt = (uint64_t)tp->t_srtt; 4294 if (srtt == 0) { 4295 /* 4296 * Strange why did t_srtt go back to zero? 4297 */ 4298 if (rack->r_ctl.rc_rack_min_rtt) 4299 srtt = rack->r_ctl.rc_rack_min_rtt; 4300 else 4301 srtt = HPTS_USEC_IN_MSEC; 4302 } 4303 /* 4304 * XXXrrs: Note for reviewers, in playing with 4305 * dynamic pacing I discovered this GP calculation 4306 * as done originally leads to some undesired results. 4307 * Basically you can get longer measurements contributing 4308 * too much to the WMA. Thus I changed it if you are doing 4309 * dynamic adjustments to only do the aportioned adjustment 4310 * if we have a very small (time wise) measurement. Longer 4311 * measurements just get there weight (defaulting to 1/8) 4312 * add to the WMA. We may want to think about changing 4313 * this to always do that for both sides i.e. dynamic 4314 * and non-dynamic... but considering lots of folks 4315 * were playing with this I did not want to change the 4316 * calculation per.se. without your thoughts.. Lawerence? 4317 * Peter?? 4318 */ 4319 if (rack->rc_gp_dyn_mul == 0) { 4320 subpart = rack->r_ctl.gp_bw * utim; 4321 subpart /= (srtt * 8); 4322 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4323 /* 4324 * The b/w update takes no more 4325 * away then 1/2 our running total 4326 * so factor it in. 4327 */ 4328 addpart = bytes_ps * utim; 4329 addpart /= (srtt * 8); 4330 } else { 4331 /* 4332 * Don't allow a single measurement 4333 * to account for more than 1/2 of the 4334 * WMA. This could happen on a retransmission 4335 * where utim becomes huge compared to 4336 * srtt (multiple retransmissions when using 4337 * the sending rate which factors in all the 4338 * transmissions from the first one). 4339 */ 4340 subpart = rack->r_ctl.gp_bw / 2; 4341 addpart = bytes_ps / 2; 4342 } 4343 resid_bw = rack->r_ctl.gp_bw - subpart; 4344 rack->r_ctl.gp_bw = resid_bw + addpart; 4345 did_add = 1; 4346 } else { 4347 if ((utim / srtt) <= 1) { 4348 /* 4349 * The b/w update was over a small period 4350 * of time. The idea here is to prevent a small 4351 * measurement time period from counting 4352 * too much. So we scale it based on the 4353 * time so it attributes less than 1/rack_wma_divisor 4354 * of its measurement. 4355 */ 4356 subpart = rack->r_ctl.gp_bw * utim; 4357 subpart /= (srtt * rack_wma_divisor); 4358 addpart = bytes_ps * utim; 4359 addpart /= (srtt * rack_wma_divisor); 4360 } else { 4361 /* 4362 * The scaled measurement was long 4363 * enough so lets just add in the 4364 * portion of the measurement i.e. 1/rack_wma_divisor 4365 */ 4366 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4367 addpart = bytes_ps / rack_wma_divisor; 4368 } 4369 if ((rack->measure_saw_probe_rtt == 0) || 4370 (bytes_ps > rack->r_ctl.gp_bw)) { 4371 /* 4372 * For probe-rtt we only add it in 4373 * if its larger, all others we just 4374 * add in. 4375 */ 4376 did_add = 1; 4377 resid_bw = rack->r_ctl.gp_bw - subpart; 4378 rack->r_ctl.gp_bw = resid_bw + addpart; 4379 } 4380 } 4381 } 4382 if ((rack->gp_ready == 0) && 4383 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4384 /* We have enough measurements now */ 4385 rack->gp_ready = 1; 4386 rack_set_cc_pacing(rack); 4387 if (rack->defer_options) 4388 rack_apply_deferred_options(rack); 4389 } 4390 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4391 rack_get_bw(rack), 22, did_add, NULL, quality); 4392 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4393 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4394 rack_update_multiplier(rack, timely_says, bytes_ps, 4395 rack->r_ctl.rc_gp_srtt, 4396 rack->r_ctl.rc_rtt_diff); 4397 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4398 rack_get_bw(rack), 3, line, NULL, quality); 4399 /* reset the gp srtt and setup the new prev */ 4400 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4401 /* Record the lost count for the next measurement */ 4402 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4403 /* 4404 * We restart our diffs based on the gpsrtt in the 4405 * measurement window. 4406 */ 4407 rack->rc_gp_rtt_set = 0; 4408 rack->rc_gp_saw_rec = 0; 4409 rack->rc_gp_saw_ca = 0; 4410 rack->rc_gp_saw_ss = 0; 4411 rack->rc_dragged_bottom = 0; 4412 skip_measurement: 4413 4414 #ifdef STATS 4415 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4416 gput); 4417 /* 4418 * XXXLAS: This is a temporary hack, and should be 4419 * chained off VOI_TCP_GPUT when stats(9) grows an 4420 * API to deal with chained VOIs. 4421 */ 4422 if (tp->t_stats_gput_prev > 0) 4423 stats_voi_update_abs_s32(tp->t_stats, 4424 VOI_TCP_GPUT_ND, 4425 ((gput - tp->t_stats_gput_prev) * 100) / 4426 tp->t_stats_gput_prev); 4427 #endif 4428 tp->t_flags &= ~TF_GPUTINPROG; 4429 tp->t_stats_gput_prev = gput; 4430 /* 4431 * Now are we app limited now and there is space from where we 4432 * were to where we want to go? 4433 * 4434 * We don't do the other case i.e. non-applimited here since 4435 * the next send will trigger us picking up the missing data. 4436 */ 4437 if (rack->r_ctl.rc_first_appl && 4438 TCPS_HAVEESTABLISHED(tp->t_state) && 4439 rack->r_ctl.rc_app_limited_cnt && 4440 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4441 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4442 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4443 /* 4444 * Yep there is enough outstanding to make a measurement here. 4445 */ 4446 struct rack_sendmap *rsm, fe; 4447 4448 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4449 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4450 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4451 rack->app_limited_needs_set = 0; 4452 tp->gput_seq = th_ack; 4453 if (rack->in_probe_rtt) 4454 rack->measure_saw_probe_rtt = 1; 4455 else if ((rack->measure_saw_probe_rtt) && 4456 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4457 rack->measure_saw_probe_rtt = 0; 4458 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4459 /* There is a full window to gain info from */ 4460 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4461 } else { 4462 /* We can only measure up to the applimited point */ 4463 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4464 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4465 /* 4466 * We don't have enough to make a measurement. 4467 */ 4468 tp->t_flags &= ~TF_GPUTINPROG; 4469 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4470 0, 0, 0, 6, __LINE__, NULL, quality); 4471 return; 4472 } 4473 } 4474 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4475 /* 4476 * We will get no more data into the SB 4477 * this means we need to have the data available 4478 * before we start a measurement. 4479 */ 4480 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4481 /* Nope not enough data. */ 4482 return; 4483 } 4484 } 4485 tp->t_flags |= TF_GPUTINPROG; 4486 /* 4487 * Now we need to find the timestamp of the send at tp->gput_seq 4488 * for the send based measurement. 4489 */ 4490 fe.r_start = tp->gput_seq; 4491 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4492 if (rsm) { 4493 /* Ok send-based limit is set */ 4494 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4495 /* 4496 * Move back to include the earlier part 4497 * so our ack time lines up right (this may 4498 * make an overlapping measurement but thats 4499 * ok). 4500 */ 4501 tp->gput_seq = rsm->r_start; 4502 } 4503 if (rsm->r_flags & RACK_ACKED) 4504 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4505 else 4506 rack->app_limited_needs_set = 1; 4507 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4508 } else { 4509 /* 4510 * If we don't find the rsm due to some 4511 * send-limit set the current time, which 4512 * basically disables the send-limit. 4513 */ 4514 struct timeval tv; 4515 4516 microuptime(&tv); 4517 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4518 } 4519 rack_log_pacing_delay_calc(rack, 4520 tp->gput_seq, 4521 tp->gput_ack, 4522 (uint64_t)rsm, 4523 tp->gput_ts, 4524 rack->r_ctl.rc_app_limited_cnt, 4525 9, 4526 __LINE__, NULL, quality); 4527 } 4528 } 4529 4530 /* 4531 * CC wrapper hook functions 4532 */ 4533 static void 4534 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4535 uint16_t type, int32_t recovery) 4536 { 4537 uint32_t prior_cwnd, acked; 4538 struct tcp_log_buffer *lgb = NULL; 4539 uint8_t labc_to_use, quality; 4540 4541 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4542 tp->t_ccv.nsegs = nsegs; 4543 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 4544 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4545 uint32_t max; 4546 4547 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4548 if (tp->t_ccv.bytes_this_ack > max) { 4549 tp->t_ccv.bytes_this_ack = max; 4550 } 4551 } 4552 #ifdef STATS 4553 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4554 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4555 #endif 4556 quality = RACK_QUALITY_NONE; 4557 if ((tp->t_flags & TF_GPUTINPROG) && 4558 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4559 /* Measure the Goodput */ 4560 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4561 #ifdef NETFLIX_PEAKRATE 4562 if ((type == CC_ACK) && 4563 (tp->t_maxpeakrate)) { 4564 /* 4565 * We update t_peakrate_thr. This gives us roughly 4566 * one update per round trip time. Note 4567 * it will only be used if pace_always is off i.e 4568 * we don't do this for paced flows. 4569 */ 4570 rack_update_peakrate_thr(tp); 4571 } 4572 #endif 4573 } 4574 /* Which way our we limited, if not cwnd limited no advance in CA */ 4575 if (tp->snd_cwnd <= tp->snd_wnd) 4576 tp->t_ccv.flags |= CCF_CWND_LIMITED; 4577 else 4578 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 4579 if (tp->snd_cwnd > tp->snd_ssthresh) { 4580 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 4581 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4582 /* For the setting of a window past use the actual scwnd we are using */ 4583 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4584 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4585 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 4586 } 4587 } else { 4588 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4589 tp->t_bytes_acked = 0; 4590 } 4591 prior_cwnd = tp->snd_cwnd; 4592 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4593 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4594 labc_to_use = rack->rc_labc; 4595 else 4596 labc_to_use = rack_max_abc_post_recovery; 4597 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4598 union tcp_log_stackspecific log; 4599 struct timeval tv; 4600 4601 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4602 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4603 log.u_bbr.flex1 = th_ack; 4604 log.u_bbr.flex2 = tp->t_ccv.flags; 4605 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 4606 log.u_bbr.flex4 = tp->t_ccv.nsegs; 4607 log.u_bbr.flex5 = labc_to_use; 4608 log.u_bbr.flex6 = prior_cwnd; 4609 log.u_bbr.flex7 = V_tcp_do_newsack; 4610 log.u_bbr.flex8 = 1; 4611 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4612 0, &log, false, NULL, NULL, 0, &tv); 4613 } 4614 if (CC_ALGO(tp)->ack_received != NULL) { 4615 /* XXXLAS: Find a way to live without this */ 4616 tp->t_ccv.curack = th_ack; 4617 tp->t_ccv.labc = labc_to_use; 4618 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 4619 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 4620 } 4621 if (lgb) { 4622 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4623 } 4624 if (rack->r_must_retran) { 4625 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4626 /* 4627 * We now are beyond the rxt point so lets disable 4628 * the flag. 4629 */ 4630 rack->r_ctl.rc_out_at_rto = 0; 4631 rack->r_must_retran = 0; 4632 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4633 /* 4634 * Only decrement the rc_out_at_rto if the cwnd advances 4635 * at least a whole segment. Otherwise next time the peer 4636 * acks, we won't be able to send this generaly happens 4637 * when we are in Congestion Avoidance. 4638 */ 4639 if (acked <= rack->r_ctl.rc_out_at_rto){ 4640 rack->r_ctl.rc_out_at_rto -= acked; 4641 } else { 4642 rack->r_ctl.rc_out_at_rto = 0; 4643 } 4644 } 4645 } 4646 #ifdef STATS 4647 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4648 #endif 4649 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4650 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4651 } 4652 #ifdef NETFLIX_PEAKRATE 4653 /* we enforce max peak rate if it is set and we are not pacing */ 4654 if ((rack->rc_always_pace == 0) && 4655 tp->t_peakrate_thr && 4656 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4657 tp->snd_cwnd = tp->t_peakrate_thr; 4658 } 4659 #endif 4660 } 4661 4662 static void 4663 tcp_rack_partialack(struct tcpcb *tp) 4664 { 4665 struct tcp_rack *rack; 4666 4667 rack = (struct tcp_rack *)tp->t_fb_ptr; 4668 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4669 /* 4670 * If we are doing PRR and have enough 4671 * room to send <or> we are pacing and prr 4672 * is disabled we will want to see if we 4673 * can send data (by setting r_wanted_output to 4674 * true). 4675 */ 4676 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4677 rack->rack_no_prr) 4678 rack->r_wanted_output = 1; 4679 } 4680 4681 static void 4682 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4683 { 4684 struct tcp_rack *rack; 4685 uint32_t orig_cwnd; 4686 4687 orig_cwnd = tp->snd_cwnd; 4688 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4689 rack = (struct tcp_rack *)tp->t_fb_ptr; 4690 /* only alert CC if we alerted when we entered */ 4691 if (CC_ALGO(tp)->post_recovery != NULL) { 4692 tp->t_ccv.curack = th_ack; 4693 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 4694 if (tp->snd_cwnd < tp->snd_ssthresh) { 4695 /* 4696 * Rack has burst control and pacing 4697 * so lets not set this any lower than 4698 * snd_ssthresh per RFC-6582 (option 2). 4699 */ 4700 tp->snd_cwnd = tp->snd_ssthresh; 4701 } 4702 } 4703 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4704 union tcp_log_stackspecific log; 4705 struct timeval tv; 4706 4707 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4708 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4709 log.u_bbr.flex1 = th_ack; 4710 log.u_bbr.flex2 = tp->t_ccv.flags; 4711 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 4712 log.u_bbr.flex4 = tp->t_ccv.nsegs; 4713 log.u_bbr.flex5 = V_tcp_abc_l_var; 4714 log.u_bbr.flex6 = orig_cwnd; 4715 log.u_bbr.flex7 = V_tcp_do_newsack; 4716 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4717 log.u_bbr.flex8 = 2; 4718 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4719 0, &log, false, NULL, NULL, 0, &tv); 4720 } 4721 if ((rack->rack_no_prr == 0) && 4722 (rack->no_prr_addback == 0) && 4723 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4724 /* 4725 * Suck the next prr cnt back into cwnd, but 4726 * only do that if we are not application limited. 4727 */ 4728 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 4729 /* 4730 * We are allowed to add back to the cwnd the amount we did 4731 * not get out if: 4732 * a) no_prr_addback is off. 4733 * b) we are not app limited 4734 * c) we are doing prr 4735 * <and> 4736 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4737 */ 4738 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4739 rack->r_ctl.rc_prr_sndcnt); 4740 } 4741 rack->r_ctl.rc_prr_sndcnt = 0; 4742 rack_log_to_prr(rack, 1, 0, __LINE__); 4743 } 4744 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 4745 tp->snd_recover = tp->snd_una; 4746 if (rack->r_ctl.dsack_persist) { 4747 rack->r_ctl.dsack_persist--; 4748 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4749 rack->r_ctl.num_dsack = 0; 4750 } 4751 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4752 } 4753 EXIT_RECOVERY(tp->t_flags); 4754 } 4755 4756 static void 4757 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 4758 { 4759 struct tcp_rack *rack; 4760 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4761 4762 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4763 #ifdef STATS 4764 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4765 #endif 4766 if (IN_RECOVERY(tp->t_flags) == 0) { 4767 in_rec_at_entry = 0; 4768 ssthresh_enter = tp->snd_ssthresh; 4769 cwnd_enter = tp->snd_cwnd; 4770 } else 4771 in_rec_at_entry = 1; 4772 rack = (struct tcp_rack *)tp->t_fb_ptr; 4773 switch (type) { 4774 case CC_NDUPACK: 4775 tp->t_flags &= ~TF_WASFRECOVERY; 4776 tp->t_flags &= ~TF_WASCRECOVERY; 4777 if (!IN_FASTRECOVERY(tp->t_flags)) { 4778 rack->r_ctl.rc_prr_delivered = 0; 4779 rack->r_ctl.rc_prr_out = 0; 4780 if (rack->rack_no_prr == 0) { 4781 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4782 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 4783 } 4784 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4785 tp->snd_recover = tp->snd_max; 4786 if (tp->t_flags2 & TF2_ECN_PERMIT) 4787 tp->t_flags2 |= TF2_ECN_SND_CWR; 4788 } 4789 break; 4790 case CC_ECN: 4791 if (!IN_CONGRECOVERY(tp->t_flags) || 4792 /* 4793 * Allow ECN reaction on ACK to CWR, if 4794 * that data segment was also CE marked. 4795 */ 4796 SEQ_GEQ(ack, tp->snd_recover)) { 4797 EXIT_CONGRECOVERY(tp->t_flags); 4798 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4799 tp->snd_recover = tp->snd_max + 1; 4800 if (tp->t_flags2 & TF2_ECN_PERMIT) 4801 tp->t_flags2 |= TF2_ECN_SND_CWR; 4802 } 4803 break; 4804 case CC_RTO: 4805 tp->t_dupacks = 0; 4806 tp->t_bytes_acked = 0; 4807 EXIT_RECOVERY(tp->t_flags); 4808 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4809 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4810 orig_cwnd = tp->snd_cwnd; 4811 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4812 rack_log_to_prr(rack, 16, orig_cwnd, line); 4813 if (tp->t_flags2 & TF2_ECN_PERMIT) 4814 tp->t_flags2 |= TF2_ECN_SND_CWR; 4815 break; 4816 case CC_RTO_ERR: 4817 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4818 /* RTO was unnecessary, so reset everything. */ 4819 tp->snd_cwnd = tp->snd_cwnd_prev; 4820 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4821 tp->snd_recover = tp->snd_recover_prev; 4822 if (tp->t_flags & TF_WASFRECOVERY) { 4823 ENTER_FASTRECOVERY(tp->t_flags); 4824 tp->t_flags &= ~TF_WASFRECOVERY; 4825 } 4826 if (tp->t_flags & TF_WASCRECOVERY) { 4827 ENTER_CONGRECOVERY(tp->t_flags); 4828 tp->t_flags &= ~TF_WASCRECOVERY; 4829 } 4830 tp->snd_nxt = tp->snd_max; 4831 tp->t_badrxtwin = 0; 4832 break; 4833 } 4834 if ((CC_ALGO(tp)->cong_signal != NULL) && 4835 (type != CC_RTO)){ 4836 tp->t_ccv.curack = ack; 4837 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 4838 } 4839 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4840 rack_log_to_prr(rack, 15, cwnd_enter, line); 4841 rack->r_ctl.dsack_byte_cnt = 0; 4842 rack->r_ctl.retran_during_recovery = 0; 4843 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4844 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4845 rack->r_ent_rec_ns = 1; 4846 } 4847 } 4848 4849 static inline void 4850 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4851 { 4852 uint32_t i_cwnd; 4853 4854 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4855 4856 #ifdef NETFLIX_STATS 4857 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4858 if (tp->t_state == TCPS_ESTABLISHED) 4859 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4860 #endif 4861 if (CC_ALGO(tp)->after_idle != NULL) 4862 CC_ALGO(tp)->after_idle(&tp->t_ccv); 4863 4864 if (tp->snd_cwnd == 1) 4865 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4866 else 4867 i_cwnd = rc_init_window(rack); 4868 4869 /* 4870 * Being idle is no different than the initial window. If the cc 4871 * clamps it down below the initial window raise it to the initial 4872 * window. 4873 */ 4874 if (tp->snd_cwnd < i_cwnd) { 4875 tp->snd_cwnd = i_cwnd; 4876 } 4877 } 4878 4879 /* 4880 * Indicate whether this ack should be delayed. We can delay the ack if 4881 * following conditions are met: 4882 * - There is no delayed ack timer in progress. 4883 * - Our last ack wasn't a 0-sized window. We never want to delay 4884 * the ack that opens up a 0-sized window. 4885 * - LRO wasn't used for this segment. We make sure by checking that the 4886 * segment size is not larger than the MSS. 4887 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4888 * connection. 4889 */ 4890 #define DELAY_ACK(tp, tlen) \ 4891 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4892 ((tp->t_flags & TF_DELACK) == 0) && \ 4893 (tlen <= tp->t_maxseg) && \ 4894 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4895 4896 static struct rack_sendmap * 4897 rack_find_lowest_rsm(struct tcp_rack *rack) 4898 { 4899 struct rack_sendmap *rsm; 4900 4901 /* 4902 * Walk the time-order transmitted list looking for an rsm that is 4903 * not acked. This will be the one that was sent the longest time 4904 * ago that is still outstanding. 4905 */ 4906 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4907 if (rsm->r_flags & RACK_ACKED) { 4908 continue; 4909 } 4910 goto finish; 4911 } 4912 finish: 4913 return (rsm); 4914 } 4915 4916 static struct rack_sendmap * 4917 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4918 { 4919 struct rack_sendmap *prsm; 4920 4921 /* 4922 * Walk the sequence order list backward until we hit and arrive at 4923 * the highest seq not acked. In theory when this is called it 4924 * should be the last segment (which it was not). 4925 */ 4926 prsm = rsm; 4927 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4928 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4929 continue; 4930 } 4931 return (prsm); 4932 } 4933 return (NULL); 4934 } 4935 4936 static uint32_t 4937 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4938 { 4939 int32_t lro; 4940 uint32_t thresh; 4941 4942 /* 4943 * lro is the flag we use to determine if we have seen reordering. 4944 * If it gets set we have seen reordering. The reorder logic either 4945 * works in one of two ways: 4946 * 4947 * If reorder-fade is configured, then we track the last time we saw 4948 * re-ordering occur. If we reach the point where enough time as 4949 * passed we no longer consider reordering has occuring. 4950 * 4951 * Or if reorder-face is 0, then once we see reordering we consider 4952 * the connection to alway be subject to reordering and just set lro 4953 * to 1. 4954 * 4955 * In the end if lro is non-zero we add the extra time for 4956 * reordering in. 4957 */ 4958 if (srtt == 0) 4959 srtt = 1; 4960 if (rack->r_ctl.rc_reorder_ts) { 4961 if (rack->r_ctl.rc_reorder_fade) { 4962 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 4963 lro = cts - rack->r_ctl.rc_reorder_ts; 4964 if (lro == 0) { 4965 /* 4966 * No time as passed since the last 4967 * reorder, mark it as reordering. 4968 */ 4969 lro = 1; 4970 } 4971 } else { 4972 /* Negative time? */ 4973 lro = 0; 4974 } 4975 if (lro > rack->r_ctl.rc_reorder_fade) { 4976 /* Turn off reordering seen too */ 4977 rack->r_ctl.rc_reorder_ts = 0; 4978 lro = 0; 4979 } 4980 } else { 4981 /* Reodering does not fade */ 4982 lro = 1; 4983 } 4984 } else { 4985 lro = 0; 4986 } 4987 if (rack->rc_rack_tmr_std_based == 0) { 4988 thresh = srtt + rack->r_ctl.rc_pkt_delay; 4989 } else { 4990 /* Standards based pkt-delay is 1/4 srtt */ 4991 thresh = srtt + (srtt >> 2); 4992 } 4993 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 4994 /* It must be set, if not you get 1/4 rtt */ 4995 if (rack->r_ctl.rc_reorder_shift) 4996 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 4997 else 4998 thresh += (srtt >> 2); 4999 } 5000 if (rack->rc_rack_use_dsack && 5001 lro && 5002 (rack->r_ctl.num_dsack > 0)) { 5003 /* 5004 * We only increase the reordering window if we 5005 * have seen reordering <and> we have a DSACK count. 5006 */ 5007 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5008 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5009 } 5010 /* SRTT * 2 is the ceiling */ 5011 if (thresh > (srtt * 2)) { 5012 thresh = srtt * 2; 5013 } 5014 /* And we don't want it above the RTO max either */ 5015 if (thresh > rack_rto_max) { 5016 thresh = rack_rto_max; 5017 } 5018 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5019 return (thresh); 5020 } 5021 5022 static uint32_t 5023 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5024 struct rack_sendmap *rsm, uint32_t srtt) 5025 { 5026 struct rack_sendmap *prsm; 5027 uint32_t thresh, len; 5028 int segsiz; 5029 5030 if (srtt == 0) 5031 srtt = 1; 5032 if (rack->r_ctl.rc_tlp_threshold) 5033 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5034 else 5035 thresh = (srtt * 2); 5036 5037 /* Get the previous sent packet, if any */ 5038 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5039 len = rsm->r_end - rsm->r_start; 5040 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5041 /* Exactly like the ID */ 5042 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5043 uint32_t alt_thresh; 5044 /* 5045 * Compensate for delayed-ack with the d-ack time. 5046 */ 5047 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5048 if (alt_thresh > thresh) 5049 thresh = alt_thresh; 5050 } 5051 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5052 /* 2.1 behavior */ 5053 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5054 if (prsm && (len <= segsiz)) { 5055 /* 5056 * Two packets outstanding, thresh should be (2*srtt) + 5057 * possible inter-packet delay (if any). 5058 */ 5059 uint32_t inter_gap = 0; 5060 int idx, nidx; 5061 5062 idx = rsm->r_rtr_cnt - 1; 5063 nidx = prsm->r_rtr_cnt - 1; 5064 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5065 /* Yes it was sent later (or at the same time) */ 5066 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5067 } 5068 thresh += inter_gap; 5069 } else if (len <= segsiz) { 5070 /* 5071 * Possibly compensate for delayed-ack. 5072 */ 5073 uint32_t alt_thresh; 5074 5075 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5076 if (alt_thresh > thresh) 5077 thresh = alt_thresh; 5078 } 5079 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5080 /* 2.2 behavior */ 5081 if (len <= segsiz) { 5082 uint32_t alt_thresh; 5083 /* 5084 * Compensate for delayed-ack with the d-ack time. 5085 */ 5086 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5087 if (alt_thresh > thresh) 5088 thresh = alt_thresh; 5089 } 5090 } 5091 /* Not above an RTO */ 5092 if (thresh > tp->t_rxtcur) { 5093 thresh = tp->t_rxtcur; 5094 } 5095 /* Not above a RTO max */ 5096 if (thresh > rack_rto_max) { 5097 thresh = rack_rto_max; 5098 } 5099 /* Apply user supplied min TLP */ 5100 if (thresh < rack_tlp_min) { 5101 thresh = rack_tlp_min; 5102 } 5103 return (thresh); 5104 } 5105 5106 static uint32_t 5107 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5108 { 5109 /* 5110 * We want the rack_rtt which is the 5111 * last rtt we measured. However if that 5112 * does not exist we fallback to the srtt (which 5113 * we probably will never do) and then as a last 5114 * resort we use RACK_INITIAL_RTO if no srtt is 5115 * yet set. 5116 */ 5117 if (rack->rc_rack_rtt) 5118 return (rack->rc_rack_rtt); 5119 else if (tp->t_srtt == 0) 5120 return (RACK_INITIAL_RTO); 5121 return (tp->t_srtt); 5122 } 5123 5124 static struct rack_sendmap * 5125 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5126 { 5127 /* 5128 * Check to see that we don't need to fall into recovery. We will 5129 * need to do so if our oldest transmit is past the time we should 5130 * have had an ack. 5131 */ 5132 struct tcp_rack *rack; 5133 struct rack_sendmap *rsm; 5134 int32_t idx; 5135 uint32_t srtt, thresh; 5136 5137 rack = (struct tcp_rack *)tp->t_fb_ptr; 5138 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5139 return (NULL); 5140 } 5141 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5142 if (rsm == NULL) 5143 return (NULL); 5144 5145 5146 if (rsm->r_flags & RACK_ACKED) { 5147 rsm = rack_find_lowest_rsm(rack); 5148 if (rsm == NULL) 5149 return (NULL); 5150 } 5151 idx = rsm->r_rtr_cnt - 1; 5152 srtt = rack_grab_rtt(tp, rack); 5153 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5154 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5155 return (NULL); 5156 } 5157 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5158 return (NULL); 5159 } 5160 /* Ok if we reach here we are over-due and this guy can be sent */ 5161 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 5162 return (rsm); 5163 } 5164 5165 static uint32_t 5166 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5167 { 5168 int32_t t; 5169 int32_t tt; 5170 uint32_t ret_val; 5171 5172 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5173 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5174 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5175 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5176 ret_val = (uint32_t)tt; 5177 return (ret_val); 5178 } 5179 5180 static uint32_t 5181 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5182 { 5183 /* 5184 * Start the FR timer, we do this based on getting the first one in 5185 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5186 * events we need to stop the running timer (if its running) before 5187 * starting the new one. 5188 */ 5189 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5190 uint32_t srtt_cur; 5191 int32_t idx; 5192 int32_t is_tlp_timer = 0; 5193 struct rack_sendmap *rsm; 5194 5195 if (rack->t_timers_stopped) { 5196 /* All timers have been stopped none are to run */ 5197 return (0); 5198 } 5199 if (rack->rc_in_persist) { 5200 /* We can't start any timer in persists */ 5201 return (rack_get_persists_timer_val(tp, rack)); 5202 } 5203 rack->rc_on_min_to = 0; 5204 if ((tp->t_state < TCPS_ESTABLISHED) || 5205 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5206 goto activate_rxt; 5207 } 5208 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5209 if ((rsm == NULL) || sup_rack) { 5210 /* Nothing on the send map or no rack */ 5211 activate_rxt: 5212 time_since_sent = 0; 5213 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5214 if (rsm) { 5215 /* 5216 * Should we discount the RTX timer any? 5217 * 5218 * We want to discount it the smallest amount. 5219 * If a timer (Rack/TLP or RXT) has gone off more 5220 * recently thats the discount we want to use (now - timer time). 5221 * If the retransmit of the oldest packet was more recent then 5222 * we want to use that (now - oldest-packet-last_transmit_time). 5223 * 5224 */ 5225 idx = rsm->r_rtr_cnt - 1; 5226 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5227 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5228 else 5229 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5230 if (TSTMP_GT(cts, tstmp_touse)) 5231 time_since_sent = cts - tstmp_touse; 5232 } 5233 if (SEQ_LT(tp->snd_una, tp->snd_max) || 5234 sbavail(&tptosocket(tp)->so_snd)) { 5235 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5236 to = tp->t_rxtcur; 5237 if (to > time_since_sent) 5238 to -= time_since_sent; 5239 else 5240 to = rack->r_ctl.rc_min_to; 5241 if (to == 0) 5242 to = 1; 5243 /* Special case for KEEPINIT */ 5244 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5245 (TP_KEEPINIT(tp) != 0) && 5246 rsm) { 5247 /* 5248 * We have to put a ceiling on the rxt timer 5249 * of the keep-init timeout. 5250 */ 5251 uint32_t max_time, red; 5252 5253 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5254 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5255 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5256 if (red < max_time) 5257 max_time -= red; 5258 else 5259 max_time = 1; 5260 } 5261 /* Reduce timeout to the keep value if needed */ 5262 if (max_time < to) 5263 to = max_time; 5264 } 5265 return (to); 5266 } 5267 return (0); 5268 } 5269 if (rsm->r_flags & RACK_ACKED) { 5270 rsm = rack_find_lowest_rsm(rack); 5271 if (rsm == NULL) { 5272 /* No lowest? */ 5273 goto activate_rxt; 5274 } 5275 } 5276 if (rack->sack_attack_disable) { 5277 /* 5278 * We don't want to do 5279 * any TLP's if you are an attacker. 5280 * Though if you are doing what 5281 * is expected you may still have 5282 * SACK-PASSED marks. 5283 */ 5284 goto activate_rxt; 5285 } 5286 /* Convert from ms to usecs */ 5287 if ((rsm->r_flags & RACK_SACK_PASSED) || 5288 (rsm->r_flags & RACK_RWND_COLLAPSED) || 5289 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5290 if ((tp->t_flags & TF_SENTFIN) && 5291 ((tp->snd_max - tp->snd_una) == 1) && 5292 (rsm->r_flags & RACK_HAS_FIN)) { 5293 /* 5294 * We don't start a rack timer if all we have is a 5295 * FIN outstanding. 5296 */ 5297 goto activate_rxt; 5298 } 5299 if ((rack->use_rack_rr == 0) && 5300 (IN_FASTRECOVERY(tp->t_flags)) && 5301 (rack->rack_no_prr == 0) && 5302 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5303 /* 5304 * We are not cheating, in recovery and 5305 * not enough ack's to yet get our next 5306 * retransmission out. 5307 * 5308 * Note that classified attackers do not 5309 * get to use the rack-cheat. 5310 */ 5311 goto activate_tlp; 5312 } 5313 srtt = rack_grab_rtt(tp, rack); 5314 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5315 idx = rsm->r_rtr_cnt - 1; 5316 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5317 if (SEQ_GEQ(exp, cts)) { 5318 to = exp - cts; 5319 if (to < rack->r_ctl.rc_min_to) { 5320 to = rack->r_ctl.rc_min_to; 5321 if (rack->r_rr_config == 3) 5322 rack->rc_on_min_to = 1; 5323 } 5324 } else { 5325 to = rack->r_ctl.rc_min_to; 5326 if (rack->r_rr_config == 3) 5327 rack->rc_on_min_to = 1; 5328 } 5329 } else { 5330 /* Ok we need to do a TLP not RACK */ 5331 activate_tlp: 5332 if ((rack->rc_tlp_in_progress != 0) && 5333 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5334 /* 5335 * The previous send was a TLP and we have sent 5336 * N TLP's without sending new data. 5337 */ 5338 goto activate_rxt; 5339 } 5340 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5341 if (rsm == NULL) { 5342 /* We found no rsm to TLP with. */ 5343 goto activate_rxt; 5344 } 5345 if (rsm->r_flags & RACK_HAS_FIN) { 5346 /* If its a FIN we dont do TLP */ 5347 rsm = NULL; 5348 goto activate_rxt; 5349 } 5350 idx = rsm->r_rtr_cnt - 1; 5351 time_since_sent = 0; 5352 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5353 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5354 else 5355 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5356 if (TSTMP_GT(cts, tstmp_touse)) 5357 time_since_sent = cts - tstmp_touse; 5358 is_tlp_timer = 1; 5359 if (tp->t_srtt) { 5360 if ((rack->rc_srtt_measure_made == 0) && 5361 (tp->t_srtt == 1)) { 5362 /* 5363 * If another stack as run and set srtt to 1, 5364 * then the srtt was 0, so lets use the initial. 5365 */ 5366 srtt = RACK_INITIAL_RTO; 5367 } else { 5368 srtt_cur = tp->t_srtt; 5369 srtt = srtt_cur; 5370 } 5371 } else 5372 srtt = RACK_INITIAL_RTO; 5373 /* 5374 * If the SRTT is not keeping up and the 5375 * rack RTT has spiked we want to use 5376 * the last RTT not the smoothed one. 5377 */ 5378 if (rack_tlp_use_greater && 5379 tp->t_srtt && 5380 (srtt < rack_grab_rtt(tp, rack))) { 5381 srtt = rack_grab_rtt(tp, rack); 5382 } 5383 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5384 if (thresh > time_since_sent) { 5385 to = thresh - time_since_sent; 5386 } else { 5387 to = rack->r_ctl.rc_min_to; 5388 rack_log_alt_to_to_cancel(rack, 5389 thresh, /* flex1 */ 5390 time_since_sent, /* flex2 */ 5391 tstmp_touse, /* flex3 */ 5392 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5393 (uint32_t)rsm->r_tim_lastsent[idx], 5394 srtt, 5395 idx, 99); 5396 } 5397 if (to < rack_tlp_min) { 5398 to = rack_tlp_min; 5399 } 5400 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5401 /* 5402 * If the TLP time works out to larger than the max 5403 * RTO lets not do TLP.. just RTO. 5404 */ 5405 goto activate_rxt; 5406 } 5407 } 5408 if (is_tlp_timer == 0) { 5409 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5410 } else { 5411 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5412 } 5413 if (to == 0) 5414 to = 1; 5415 return (to); 5416 } 5417 5418 static void 5419 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5420 { 5421 if (rack->rc_in_persist == 0) { 5422 if (tp->t_flags & TF_GPUTINPROG) { 5423 /* 5424 * Stop the goodput now, the calling of the 5425 * measurement function clears the flag. 5426 */ 5427 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5428 RACK_QUALITY_PERSIST); 5429 } 5430 #ifdef NETFLIX_SHARED_CWND 5431 if (rack->r_ctl.rc_scw) { 5432 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5433 rack->rack_scwnd_is_idle = 1; 5434 } 5435 #endif 5436 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5437 if (rack->r_ctl.rc_went_idle_time == 0) 5438 rack->r_ctl.rc_went_idle_time = 1; 5439 rack_timer_cancel(tp, rack, cts, __LINE__); 5440 rack->r_ctl.persist_lost_ends = 0; 5441 rack->probe_not_answered = 0; 5442 rack->forced_ack = 0; 5443 tp->t_rxtshift = 0; 5444 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5445 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5446 rack->rc_in_persist = 1; 5447 } 5448 } 5449 5450 static void 5451 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5452 { 5453 if (tcp_in_hpts(rack->rc_inp)) { 5454 tcp_hpts_remove(rack->rc_inp); 5455 rack->r_ctl.rc_hpts_flags = 0; 5456 } 5457 #ifdef NETFLIX_SHARED_CWND 5458 if (rack->r_ctl.rc_scw) { 5459 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5460 rack->rack_scwnd_is_idle = 0; 5461 } 5462 #endif 5463 if (rack->rc_gp_dyn_mul && 5464 (rack->use_fixed_rate == 0) && 5465 (rack->rc_always_pace)) { 5466 /* 5467 * Do we count this as if a probe-rtt just 5468 * finished? 5469 */ 5470 uint32_t time_idle, idle_min; 5471 5472 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5473 idle_min = rack_min_probertt_hold; 5474 if (rack_probertt_gpsrtt_cnt_div) { 5475 uint64_t extra; 5476 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5477 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5478 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5479 idle_min += (uint32_t)extra; 5480 } 5481 if (time_idle >= idle_min) { 5482 /* Yes, we count it as a probe-rtt. */ 5483 uint32_t us_cts; 5484 5485 us_cts = tcp_get_usecs(NULL); 5486 if (rack->in_probe_rtt == 0) { 5487 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5488 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5489 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5490 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5491 } else { 5492 rack_exit_probertt(rack, us_cts); 5493 } 5494 } 5495 } 5496 rack->rc_in_persist = 0; 5497 rack->r_ctl.rc_went_idle_time = 0; 5498 tp->t_rxtshift = 0; 5499 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5500 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5501 rack->r_ctl.rc_agg_delayed = 0; 5502 rack->r_early = 0; 5503 rack->r_late = 0; 5504 rack->r_ctl.rc_agg_early = 0; 5505 } 5506 5507 static void 5508 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5509 struct hpts_diag *diag, struct timeval *tv) 5510 { 5511 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5512 union tcp_log_stackspecific log; 5513 5514 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5515 log.u_bbr.flex1 = diag->p_nxt_slot; 5516 log.u_bbr.flex2 = diag->p_cur_slot; 5517 log.u_bbr.flex3 = diag->slot_req; 5518 log.u_bbr.flex4 = diag->inp_hptsslot; 5519 log.u_bbr.flex5 = diag->slot_remaining; 5520 log.u_bbr.flex6 = diag->need_new_to; 5521 log.u_bbr.flex7 = diag->p_hpts_active; 5522 log.u_bbr.flex8 = diag->p_on_min_sleep; 5523 /* Hijack other fields as needed */ 5524 log.u_bbr.epoch = diag->have_slept; 5525 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5526 log.u_bbr.pkts_out = diag->co_ret; 5527 log.u_bbr.applimited = diag->hpts_sleep_time; 5528 log.u_bbr.delivered = diag->p_prev_slot; 5529 log.u_bbr.inflight = diag->p_runningslot; 5530 log.u_bbr.bw_inuse = diag->wheel_slot; 5531 log.u_bbr.rttProp = diag->wheel_cts; 5532 log.u_bbr.timeStamp = cts; 5533 log.u_bbr.delRate = diag->maxslots; 5534 log.u_bbr.cur_del_rate = diag->p_curtick; 5535 log.u_bbr.cur_del_rate <<= 32; 5536 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5537 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5538 &rack->rc_inp->inp_socket->so_rcv, 5539 &rack->rc_inp->inp_socket->so_snd, 5540 BBR_LOG_HPTSDIAG, 0, 5541 0, &log, false, tv); 5542 } 5543 5544 } 5545 5546 static void 5547 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5548 { 5549 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5550 union tcp_log_stackspecific log; 5551 struct timeval tv; 5552 5553 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5554 log.u_bbr.flex1 = sb->sb_flags; 5555 log.u_bbr.flex2 = len; 5556 log.u_bbr.flex3 = sb->sb_state; 5557 log.u_bbr.flex8 = type; 5558 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5559 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5560 &rack->rc_inp->inp_socket->so_rcv, 5561 &rack->rc_inp->inp_socket->so_snd, 5562 TCP_LOG_SB_WAKE, 0, 5563 len, &log, false, &tv); 5564 } 5565 } 5566 5567 static void 5568 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5569 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5570 { 5571 struct hpts_diag diag; 5572 struct inpcb *inp = tptoinpcb(tp); 5573 struct timeval tv; 5574 uint32_t delayed_ack = 0; 5575 uint32_t hpts_timeout; 5576 uint32_t entry_slot = slot; 5577 uint8_t stopped; 5578 uint32_t left = 0; 5579 uint32_t us_cts; 5580 5581 if ((tp->t_state == TCPS_CLOSED) || 5582 (tp->t_state == TCPS_LISTEN)) { 5583 return; 5584 } 5585 if (tcp_in_hpts(inp)) { 5586 /* Already on the pacer */ 5587 return; 5588 } 5589 stopped = rack->rc_tmr_stopped; 5590 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5591 left = rack->r_ctl.rc_timer_exp - cts; 5592 } 5593 rack->r_ctl.rc_timer_exp = 0; 5594 rack->r_ctl.rc_hpts_flags = 0; 5595 us_cts = tcp_get_usecs(&tv); 5596 /* Now early/late accounting */ 5597 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5598 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5599 /* 5600 * We have a early carry over set, 5601 * we can always add more time so we 5602 * can always make this compensation. 5603 * 5604 * Note if ack's are allowed to wake us do not 5605 * penalize the next timer for being awoke 5606 * by an ack aka the rc_agg_early (non-paced mode). 5607 */ 5608 slot += rack->r_ctl.rc_agg_early; 5609 rack->r_early = 0; 5610 rack->r_ctl.rc_agg_early = 0; 5611 } 5612 if (rack->r_late) { 5613 /* 5614 * This is harder, we can 5615 * compensate some but it 5616 * really depends on what 5617 * the current pacing time is. 5618 */ 5619 if (rack->r_ctl.rc_agg_delayed >= slot) { 5620 /* 5621 * We can't compensate for it all. 5622 * And we have to have some time 5623 * on the clock. We always have a min 5624 * 10 slots (10 x 10 i.e. 100 usecs). 5625 */ 5626 if (slot <= HPTS_TICKS_PER_SLOT) { 5627 /* We gain delay */ 5628 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5629 slot = HPTS_TICKS_PER_SLOT; 5630 } else { 5631 /* We take off some */ 5632 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5633 slot = HPTS_TICKS_PER_SLOT; 5634 } 5635 } else { 5636 slot -= rack->r_ctl.rc_agg_delayed; 5637 rack->r_ctl.rc_agg_delayed = 0; 5638 /* Make sure we have 100 useconds at minimum */ 5639 if (slot < HPTS_TICKS_PER_SLOT) { 5640 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5641 slot = HPTS_TICKS_PER_SLOT; 5642 } 5643 if (rack->r_ctl.rc_agg_delayed == 0) 5644 rack->r_late = 0; 5645 } 5646 } 5647 if (slot) { 5648 /* We are pacing too */ 5649 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5650 } 5651 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5652 #ifdef NETFLIX_EXP_DETECTION 5653 if (rack->sack_attack_disable && 5654 (slot < tcp_sad_pacing_interval)) { 5655 /* 5656 * We have a potential attacker on 5657 * the line. We have possibly some 5658 * (or now) pacing time set. We want to 5659 * slow down the processing of sacks by some 5660 * amount (if it is an attacker). Set the default 5661 * slot for attackers in place (unless the orginal 5662 * interval is longer). Its stored in 5663 * micro-seconds, so lets convert to msecs. 5664 */ 5665 slot = tcp_sad_pacing_interval; 5666 } 5667 #endif 5668 if (tp->t_flags & TF_DELACK) { 5669 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5670 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5671 } 5672 if (delayed_ack && ((hpts_timeout == 0) || 5673 (delayed_ack < hpts_timeout))) 5674 hpts_timeout = delayed_ack; 5675 else 5676 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5677 /* 5678 * If no timers are going to run and we will fall off the hptsi 5679 * wheel, we resort to a keep-alive timer if its configured. 5680 */ 5681 if ((hpts_timeout == 0) && 5682 (slot == 0)) { 5683 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5684 (tp->t_state <= TCPS_CLOSING)) { 5685 /* 5686 * Ok we have no timer (persists, rack, tlp, rxt or 5687 * del-ack), we don't have segments being paced. So 5688 * all that is left is the keepalive timer. 5689 */ 5690 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5691 /* Get the established keep-alive time */ 5692 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5693 } else { 5694 /* 5695 * Get the initial setup keep-alive time, 5696 * note that this is probably not going to 5697 * happen, since rack will be running a rxt timer 5698 * if a SYN of some sort is outstanding. It is 5699 * actually handled in rack_timeout_rxt(). 5700 */ 5701 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5702 } 5703 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5704 if (rack->in_probe_rtt) { 5705 /* 5706 * We want to instead not wake up a long time from 5707 * now but to wake up about the time we would 5708 * exit probe-rtt and initiate a keep-alive ack. 5709 * This will get us out of probe-rtt and update 5710 * our min-rtt. 5711 */ 5712 hpts_timeout = rack_min_probertt_hold; 5713 } 5714 } 5715 } 5716 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5717 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5718 /* 5719 * RACK, TLP, persists and RXT timers all are restartable 5720 * based on actions input .. i.e we received a packet (ack 5721 * or sack) and that changes things (rw, or snd_una etc). 5722 * Thus we can restart them with a new value. For 5723 * keep-alive, delayed_ack we keep track of what was left 5724 * and restart the timer with a smaller value. 5725 */ 5726 if (left < hpts_timeout) 5727 hpts_timeout = left; 5728 } 5729 if (hpts_timeout) { 5730 /* 5731 * Hack alert for now we can't time-out over 2,147,483 5732 * seconds (a bit more than 596 hours), which is probably ok 5733 * :). 5734 */ 5735 if (hpts_timeout > 0x7ffffffe) 5736 hpts_timeout = 0x7ffffffe; 5737 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5738 } 5739 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5740 if ((rack->gp_ready == 0) && 5741 (rack->use_fixed_rate == 0) && 5742 (hpts_timeout < slot) && 5743 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5744 /* 5745 * We have no good estimate yet for the 5746 * old clunky burst mitigation or the 5747 * real pacing. And the tlp or rxt is smaller 5748 * than the pacing calculation. Lets not 5749 * pace that long since we know the calculation 5750 * so far is not accurate. 5751 */ 5752 slot = hpts_timeout; 5753 } 5754 /** 5755 * Turn off all the flags for queuing by default. The 5756 * flags have important meanings to what happens when 5757 * LRO interacts with the transport. Most likely (by default now) 5758 * mbuf_queueing and ack compression are on. So the transport 5759 * has a couple of flags that control what happens (if those 5760 * are not on then these flags won't have any effect since it 5761 * won't go through the queuing LRO path). 5762 * 5763 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5764 * pacing output, so don't disturb. But 5765 * it also means LRO can wake me if there 5766 * is a SACK arrival. 5767 * 5768 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5769 * with the above flag (QUEUE_READY) and 5770 * when present it says don't even wake me 5771 * if a SACK arrives. 5772 * 5773 * The idea behind these flags is that if we are pacing we 5774 * set the MBUF_QUEUE_READY and only get woken up if 5775 * a SACK arrives (which could change things) or if 5776 * our pacing timer expires. If, however, we have a rack 5777 * timer running, then we don't even want a sack to wake 5778 * us since the rack timer has to expire before we can send. 5779 * 5780 * Other cases should usually have none of the flags set 5781 * so LRO can call into us. 5782 */ 5783 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5784 if (slot) { 5785 rack->r_ctl.rc_last_output_to = us_cts + slot; 5786 /* 5787 * A pacing timer (slot) is being set, in 5788 * such a case we cannot send (we are blocked by 5789 * the timer). So lets tell LRO that it should not 5790 * wake us unless there is a SACK. Note this only 5791 * will be effective if mbuf queueing is on or 5792 * compressed acks are being processed. 5793 */ 5794 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5795 /* 5796 * But wait if we have a Rack timer running 5797 * even a SACK should not disturb us (with 5798 * the exception of r_rr_config 3). 5799 */ 5800 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5801 (rack->r_rr_config != 3)) 5802 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5803 if (rack->rc_ack_can_sendout_data) { 5804 /* 5805 * Ahh but wait, this is that special case 5806 * where the pacing timer can be disturbed 5807 * backout the changes (used for non-paced 5808 * burst limiting). 5809 */ 5810 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5811 } 5812 if ((rack->use_rack_rr) && 5813 (rack->r_rr_config < 2) && 5814 ((hpts_timeout) && (hpts_timeout < slot))) { 5815 /* 5816 * Arrange for the hpts to kick back in after the 5817 * t-o if the t-o does not cause a send. 5818 */ 5819 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5820 __LINE__, &diag); 5821 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5822 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5823 } else { 5824 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot), 5825 __LINE__, &diag); 5826 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5827 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5828 } 5829 } else if (hpts_timeout) { 5830 /* 5831 * With respect to inp_flags2 here, lets let any new acks wake 5832 * us up here. Since we are not pacing (no pacing timer), output 5833 * can happen so we should let it. If its a Rack timer, then any inbound 5834 * packet probably won't change the sending (we will be blocked) 5835 * but it may change the prr stats so letting it in (the set defaults 5836 * at the start of this block) are good enough. 5837 */ 5838 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5839 __LINE__, &diag); 5840 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5841 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5842 } else { 5843 /* No timer starting */ 5844 #ifdef INVARIANTS 5845 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5846 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5847 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5848 } 5849 #endif 5850 } 5851 rack->rc_tmr_stopped = 0; 5852 if (slot) 5853 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5854 } 5855 5856 /* 5857 * RACK Timer, here we simply do logging and house keeping. 5858 * the normal rack_output() function will call the 5859 * appropriate thing to check if we need to do a RACK retransmit. 5860 * We return 1, saying don't proceed with rack_output only 5861 * when all timers have been stopped (destroyed PCB?). 5862 */ 5863 static int 5864 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5865 { 5866 /* 5867 * This timer simply provides an internal trigger to send out data. 5868 * The check_recovery_mode call will see if there are needed 5869 * retransmissions, if so we will enter fast-recovery. The output 5870 * call may or may not do the same thing depending on sysctl 5871 * settings. 5872 */ 5873 struct rack_sendmap *rsm; 5874 5875 counter_u64_add(rack_to_tot, 1); 5876 if (rack->r_state && (rack->r_state != tp->t_state)) 5877 rack_set_state(tp, rack); 5878 rack->rc_on_min_to = 0; 5879 rsm = rack_check_recovery_mode(tp, cts); 5880 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5881 if (rsm) { 5882 rack->r_ctl.rc_resend = rsm; 5883 rack->r_timer_override = 1; 5884 if (rack->use_rack_rr) { 5885 /* 5886 * Don't accumulate extra pacing delay 5887 * we are allowing the rack timer to 5888 * over-ride pacing i.e. rrr takes precedence 5889 * if the pacing interval is longer than the rrr 5890 * time (in other words we get the min pacing 5891 * time versus rrr pacing time). 5892 */ 5893 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5894 } 5895 } 5896 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5897 if (rsm == NULL) { 5898 /* restart a timer and return 1 */ 5899 rack_start_hpts_timer(rack, tp, cts, 5900 0, 0, 0); 5901 return (1); 5902 } 5903 return (0); 5904 } 5905 5906 static void 5907 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5908 { 5909 if (rsm->m->m_len > rsm->orig_m_len) { 5910 /* 5911 * Mbuf grew, caused by sbcompress, our offset does 5912 * not change. 5913 */ 5914 rsm->orig_m_len = rsm->m->m_len; 5915 } else if (rsm->m->m_len < rsm->orig_m_len) { 5916 /* 5917 * Mbuf shrank, trimmed off the top by an ack, our 5918 * offset changes. 5919 */ 5920 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5921 rsm->orig_m_len = rsm->m->m_len; 5922 } 5923 } 5924 5925 static void 5926 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5927 { 5928 struct mbuf *m; 5929 uint32_t soff; 5930 5931 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5932 /* Fix up the orig_m_len and possibly the mbuf offset */ 5933 rack_adjust_orig_mlen(src_rsm); 5934 } 5935 m = src_rsm->m; 5936 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5937 while (soff >= m->m_len) { 5938 /* Move out past this mbuf */ 5939 soff -= m->m_len; 5940 m = m->m_next; 5941 KASSERT((m != NULL), 5942 ("rsm:%p nrsm:%p hit at soff:%u null m", 5943 src_rsm, rsm, soff)); 5944 } 5945 rsm->m = m; 5946 rsm->soff = soff; 5947 rsm->orig_m_len = m->m_len; 5948 } 5949 5950 static __inline void 5951 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5952 struct rack_sendmap *rsm, uint32_t start) 5953 { 5954 int idx; 5955 5956 nrsm->r_start = start; 5957 nrsm->r_end = rsm->r_end; 5958 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 5959 nrsm->r_flags = rsm->r_flags; 5960 nrsm->r_dupack = rsm->r_dupack; 5961 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 5962 nrsm->r_rtr_bytes = 0; 5963 nrsm->r_fas = rsm->r_fas; 5964 rsm->r_end = nrsm->r_start; 5965 nrsm->r_just_ret = rsm->r_just_ret; 5966 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 5967 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 5968 } 5969 /* Now if we have SYN flag we keep it on the left edge */ 5970 if (nrsm->r_flags & RACK_HAS_SYN) 5971 nrsm->r_flags &= ~RACK_HAS_SYN; 5972 /* Now if we have a FIN flag we keep it on the right edge */ 5973 if (rsm->r_flags & RACK_HAS_FIN) 5974 rsm->r_flags &= ~RACK_HAS_FIN; 5975 /* Push bit must go to the right edge as well */ 5976 if (rsm->r_flags & RACK_HAD_PUSH) 5977 rsm->r_flags &= ~RACK_HAD_PUSH; 5978 /* Clone over the state of the hw_tls flag */ 5979 nrsm->r_hw_tls = rsm->r_hw_tls; 5980 /* 5981 * Now we need to find nrsm's new location in the mbuf chain 5982 * we basically calculate a new offset, which is soff + 5983 * how much is left in original rsm. Then we walk out the mbuf 5984 * chain to find the righ position, it may be the same mbuf 5985 * or maybe not. 5986 */ 5987 KASSERT(((rsm->m != NULL) || 5988 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 5989 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 5990 if (rsm->m) 5991 rack_setup_offset_for_rsm(rsm, nrsm); 5992 } 5993 5994 static struct rack_sendmap * 5995 rack_merge_rsm(struct tcp_rack *rack, 5996 struct rack_sendmap *l_rsm, 5997 struct rack_sendmap *r_rsm) 5998 { 5999 /* 6000 * We are merging two ack'd RSM's, 6001 * the l_rsm is on the left (lower seq 6002 * values) and the r_rsm is on the right 6003 * (higher seq value). The simplest way 6004 * to merge these is to move the right 6005 * one into the left. I don't think there 6006 * is any reason we need to try to find 6007 * the oldest (or last oldest retransmitted). 6008 */ 6009 #ifdef INVARIANTS 6010 struct rack_sendmap *rm; 6011 #endif 6012 rack_log_map_chg(rack->rc_tp, rack, NULL, 6013 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6014 l_rsm->r_end = r_rsm->r_end; 6015 if (l_rsm->r_dupack < r_rsm->r_dupack) 6016 l_rsm->r_dupack = r_rsm->r_dupack; 6017 if (r_rsm->r_rtr_bytes) 6018 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6019 if (r_rsm->r_in_tmap) { 6020 /* This really should not happen */ 6021 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6022 r_rsm->r_in_tmap = 0; 6023 } 6024 6025 /* Now the flags */ 6026 if (r_rsm->r_flags & RACK_HAS_FIN) 6027 l_rsm->r_flags |= RACK_HAS_FIN; 6028 if (r_rsm->r_flags & RACK_TLP) 6029 l_rsm->r_flags |= RACK_TLP; 6030 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6031 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6032 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6033 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6034 /* 6035 * If both are app-limited then let the 6036 * free lower the count. If right is app 6037 * limited and left is not, transfer. 6038 */ 6039 l_rsm->r_flags |= RACK_APP_LIMITED; 6040 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6041 if (r_rsm == rack->r_ctl.rc_first_appl) 6042 rack->r_ctl.rc_first_appl = l_rsm; 6043 } 6044 #ifndef INVARIANTS 6045 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6046 #else 6047 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6048 if (rm != r_rsm) { 6049 panic("removing head in rack:%p rsm:%p rm:%p", 6050 rack, r_rsm, rm); 6051 } 6052 #endif 6053 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6054 /* Transfer the split limit to the map we free */ 6055 r_rsm->r_limit_type = l_rsm->r_limit_type; 6056 l_rsm->r_limit_type = 0; 6057 } 6058 rack_free(rack, r_rsm); 6059 return (l_rsm); 6060 } 6061 6062 /* 6063 * TLP Timer, here we simply setup what segment we want to 6064 * have the TLP expire on, the normal rack_output() will then 6065 * send it out. 6066 * 6067 * We return 1, saying don't proceed with rack_output only 6068 * when all timers have been stopped (destroyed PCB?). 6069 */ 6070 static int 6071 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6072 { 6073 /* 6074 * Tail Loss Probe. 6075 */ 6076 struct rack_sendmap *rsm = NULL; 6077 #ifdef INVARIANTS 6078 struct rack_sendmap *insret; 6079 #endif 6080 struct socket *so = tptosocket(tp); 6081 uint32_t amm; 6082 uint32_t out, avail; 6083 int collapsed_win = 0; 6084 6085 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6086 /* Its not time yet */ 6087 return (0); 6088 } 6089 if (ctf_progress_timeout_check(tp, true)) { 6090 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6091 return (-ETIMEDOUT); /* tcp_drop() */ 6092 } 6093 /* 6094 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6095 * need to figure out how to force a full MSS segment out. 6096 */ 6097 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6098 rack->r_ctl.retran_during_recovery = 0; 6099 rack->r_ctl.dsack_byte_cnt = 0; 6100 counter_u64_add(rack_tlp_tot, 1); 6101 if (rack->r_state && (rack->r_state != tp->t_state)) 6102 rack_set_state(tp, rack); 6103 avail = sbavail(&so->so_snd); 6104 out = tp->snd_max - tp->snd_una; 6105 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 6106 /* special case, we need a retransmission */ 6107 collapsed_win = 1; 6108 goto need_retran; 6109 } 6110 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6111 rack->r_ctl.dsack_persist--; 6112 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6113 rack->r_ctl.num_dsack = 0; 6114 } 6115 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6116 } 6117 if ((tp->t_flags & TF_GPUTINPROG) && 6118 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6119 /* 6120 * If this is the second in a row 6121 * TLP and we are doing a measurement 6122 * its time to abandon the measurement. 6123 * Something is likely broken on 6124 * the clients network and measuring a 6125 * broken network does us no good. 6126 */ 6127 tp->t_flags &= ~TF_GPUTINPROG; 6128 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6129 rack->r_ctl.rc_gp_srtt /*flex1*/, 6130 tp->gput_seq, 6131 0, 0, 18, __LINE__, NULL, 0); 6132 } 6133 /* 6134 * Check our send oldest always settings, and if 6135 * there is an oldest to send jump to the need_retran. 6136 */ 6137 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6138 goto need_retran; 6139 6140 if (avail > out) { 6141 /* New data is available */ 6142 amm = avail - out; 6143 if (amm > ctf_fixed_maxseg(tp)) { 6144 amm = ctf_fixed_maxseg(tp); 6145 if ((amm + out) > tp->snd_wnd) { 6146 /* We are rwnd limited */ 6147 goto need_retran; 6148 } 6149 } else if (amm < ctf_fixed_maxseg(tp)) { 6150 /* not enough to fill a MTU */ 6151 goto need_retran; 6152 } 6153 if (IN_FASTRECOVERY(tp->t_flags)) { 6154 /* Unlikely */ 6155 if (rack->rack_no_prr == 0) { 6156 if (out + amm <= tp->snd_wnd) { 6157 rack->r_ctl.rc_prr_sndcnt = amm; 6158 rack->r_ctl.rc_tlp_new_data = amm; 6159 rack_log_to_prr(rack, 4, 0, __LINE__); 6160 } 6161 } else 6162 goto need_retran; 6163 } else { 6164 /* Set the send-new override */ 6165 if (out + amm <= tp->snd_wnd) 6166 rack->r_ctl.rc_tlp_new_data = amm; 6167 else 6168 goto need_retran; 6169 } 6170 rack->r_ctl.rc_tlpsend = NULL; 6171 counter_u64_add(rack_tlp_newdata, 1); 6172 goto send; 6173 } 6174 need_retran: 6175 /* 6176 * Ok we need to arrange the last un-acked segment to be re-sent, or 6177 * optionally the first un-acked segment. 6178 */ 6179 if (collapsed_win == 0) { 6180 if (rack_always_send_oldest) 6181 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6182 else { 6183 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6184 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6185 rsm = rack_find_high_nonack(rack, rsm); 6186 } 6187 } 6188 if (rsm == NULL) { 6189 #ifdef TCP_BLACKBOX 6190 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6191 #endif 6192 goto out; 6193 } 6194 } else { 6195 /* 6196 * We must find the last segment 6197 * that was acceptable by the client. 6198 */ 6199 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6200 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6201 /* Found one */ 6202 break; 6203 } 6204 } 6205 if (rsm == NULL) { 6206 /* None? if so send the first */ 6207 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6208 if (rsm == NULL) { 6209 #ifdef TCP_BLACKBOX 6210 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6211 #endif 6212 goto out; 6213 } 6214 } 6215 } 6216 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6217 /* 6218 * We need to split this the last segment in two. 6219 */ 6220 struct rack_sendmap *nrsm; 6221 6222 nrsm = rack_alloc_full_limit(rack); 6223 if (nrsm == NULL) { 6224 /* 6225 * No memory to split, we will just exit and punt 6226 * off to the RXT timer. 6227 */ 6228 goto out; 6229 } 6230 rack_clone_rsm(rack, nrsm, rsm, 6231 (rsm->r_end - ctf_fixed_maxseg(tp))); 6232 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6233 #ifndef INVARIANTS 6234 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6235 #else 6236 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6237 if (insret != NULL) { 6238 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6239 nrsm, insret, rack, rsm); 6240 } 6241 #endif 6242 if (rsm->r_in_tmap) { 6243 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6244 nrsm->r_in_tmap = 1; 6245 } 6246 rsm = nrsm; 6247 } 6248 rack->r_ctl.rc_tlpsend = rsm; 6249 send: 6250 /* Make sure output path knows we are doing a TLP */ 6251 *doing_tlp = 1; 6252 rack->r_timer_override = 1; 6253 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6254 return (0); 6255 out: 6256 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6257 return (0); 6258 } 6259 6260 /* 6261 * Delayed ack Timer, here we simply need to setup the 6262 * ACK_NOW flag and remove the DELACK flag. From there 6263 * the output routine will send the ack out. 6264 * 6265 * We only return 1, saying don't proceed, if all timers 6266 * are stopped (destroyed PCB?). 6267 */ 6268 static int 6269 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6270 { 6271 6272 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6273 tp->t_flags &= ~TF_DELACK; 6274 tp->t_flags |= TF_ACKNOW; 6275 KMOD_TCPSTAT_INC(tcps_delack); 6276 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6277 return (0); 6278 } 6279 6280 /* 6281 * Persists timer, here we simply send the 6282 * same thing as a keepalive will. 6283 * the one byte send. 6284 * 6285 * We only return 1, saying don't proceed, if all timers 6286 * are stopped (destroyed PCB?). 6287 */ 6288 static int 6289 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6290 { 6291 struct tcptemp *t_template; 6292 int32_t retval = 1; 6293 6294 if (rack->rc_in_persist == 0) 6295 return (0); 6296 if (ctf_progress_timeout_check(tp, false)) { 6297 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6298 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6299 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6300 return (-ETIMEDOUT); /* tcp_drop() */ 6301 } 6302 /* 6303 * Persistence timer into zero window. Force a byte to be output, if 6304 * possible. 6305 */ 6306 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6307 /* 6308 * Hack: if the peer is dead/unreachable, we do not time out if the 6309 * window is closed. After a full backoff, drop the connection if 6310 * the idle time (no responses to probes) reaches the maximum 6311 * backoff that we would use if retransmitting. 6312 */ 6313 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6314 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6315 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6316 KMOD_TCPSTAT_INC(tcps_persistdrop); 6317 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6318 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6319 retval = -ETIMEDOUT; /* tcp_drop() */ 6320 goto out; 6321 } 6322 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6323 tp->snd_una == tp->snd_max) 6324 rack_exit_persist(tp, rack, cts); 6325 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6326 /* 6327 * If the user has closed the socket then drop a persisting 6328 * connection after a much reduced timeout. 6329 */ 6330 if (tp->t_state > TCPS_CLOSE_WAIT && 6331 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6332 KMOD_TCPSTAT_INC(tcps_persistdrop); 6333 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6334 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6335 retval = -ETIMEDOUT; /* tcp_drop() */ 6336 goto out; 6337 } 6338 t_template = tcpip_maketemplate(rack->rc_inp); 6339 if (t_template) { 6340 /* only set it if we were answered */ 6341 if (rack->forced_ack == 0) { 6342 rack->forced_ack = 1; 6343 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6344 } else { 6345 rack->probe_not_answered = 1; 6346 counter_u64_add(rack_persists_loss, 1); 6347 rack->r_ctl.persist_lost_ends++; 6348 } 6349 counter_u64_add(rack_persists_sends, 1); 6350 tcp_respond(tp, t_template->tt_ipgen, 6351 &t_template->tt_t, (struct mbuf *)NULL, 6352 tp->rcv_nxt, tp->snd_una - 1, 0); 6353 /* This sends an ack */ 6354 if (tp->t_flags & TF_DELACK) 6355 tp->t_flags &= ~TF_DELACK; 6356 free(t_template, M_TEMP); 6357 } 6358 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6359 tp->t_rxtshift++; 6360 out: 6361 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6362 rack_start_hpts_timer(rack, tp, cts, 6363 0, 0, 0); 6364 return (retval); 6365 } 6366 6367 /* 6368 * If a keepalive goes off, we had no other timers 6369 * happening. We always return 1 here since this 6370 * routine either drops the connection or sends 6371 * out a segment with respond. 6372 */ 6373 static int 6374 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6375 { 6376 struct tcptemp *t_template; 6377 struct inpcb *inp = tptoinpcb(tp); 6378 6379 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6380 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6381 /* 6382 * Keep-alive timer went off; send something or drop connection if 6383 * idle for too long. 6384 */ 6385 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6386 if (tp->t_state < TCPS_ESTABLISHED) 6387 goto dropit; 6388 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6389 tp->t_state <= TCPS_CLOSING) { 6390 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6391 goto dropit; 6392 /* 6393 * Send a packet designed to force a response if the peer is 6394 * up and reachable: either an ACK if the connection is 6395 * still alive, or an RST if the peer has closed the 6396 * connection due to timeout or reboot. Using sequence 6397 * number tp->snd_una-1 causes the transmitted zero-length 6398 * segment to lie outside the receive window; by the 6399 * protocol spec, this requires the correspondent TCP to 6400 * respond. 6401 */ 6402 KMOD_TCPSTAT_INC(tcps_keepprobe); 6403 t_template = tcpip_maketemplate(inp); 6404 if (t_template) { 6405 if (rack->forced_ack == 0) { 6406 rack->forced_ack = 1; 6407 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6408 } else { 6409 rack->probe_not_answered = 1; 6410 } 6411 tcp_respond(tp, t_template->tt_ipgen, 6412 &t_template->tt_t, (struct mbuf *)NULL, 6413 tp->rcv_nxt, tp->snd_una - 1, 0); 6414 free(t_template, M_TEMP); 6415 } 6416 } 6417 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6418 return (1); 6419 dropit: 6420 KMOD_TCPSTAT_INC(tcps_keepdrops); 6421 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6422 return (-ETIMEDOUT); /* tcp_drop() */ 6423 } 6424 6425 /* 6426 * Retransmit helper function, clear up all the ack 6427 * flags and take care of important book keeping. 6428 */ 6429 static void 6430 rack_remxt_tmr(struct tcpcb *tp) 6431 { 6432 /* 6433 * The retransmit timer went off, all sack'd blocks must be 6434 * un-acked. 6435 */ 6436 struct rack_sendmap *rsm, *trsm = NULL; 6437 struct tcp_rack *rack; 6438 6439 rack = (struct tcp_rack *)tp->t_fb_ptr; 6440 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6441 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6442 if (rack->r_state && (rack->r_state != tp->t_state)) 6443 rack_set_state(tp, rack); 6444 /* 6445 * Ideally we would like to be able to 6446 * mark SACK-PASS on anything not acked here. 6447 * 6448 * However, if we do that we would burst out 6449 * all that data 1ms apart. This would be unwise, 6450 * so for now we will just let the normal rxt timer 6451 * and tlp timer take care of it. 6452 * 6453 * Also we really need to stick them back in sequence 6454 * order. This way we send in the proper order and any 6455 * sacks that come floating in will "re-ack" the data. 6456 * To do this we zap the tmap with an INIT and then 6457 * walk through and place every rsm in the RB tree 6458 * back in its seq ordered place. 6459 */ 6460 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6461 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6462 rsm->r_dupack = 0; 6463 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6464 /* We must re-add it back to the tlist */ 6465 if (trsm == NULL) { 6466 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6467 } else { 6468 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6469 } 6470 rsm->r_in_tmap = 1; 6471 trsm = rsm; 6472 if (rsm->r_flags & RACK_ACKED) 6473 rsm->r_flags |= RACK_WAS_ACKED; 6474 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 6475 rsm->r_flags |= RACK_MUST_RXT; 6476 } 6477 /* Clear the count (we just un-acked them) */ 6478 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6479 rack->r_ctl.rc_sacked = 0; 6480 rack->r_ctl.rc_sacklast = NULL; 6481 rack->r_ctl.rc_agg_delayed = 0; 6482 rack->r_early = 0; 6483 rack->r_ctl.rc_agg_early = 0; 6484 rack->r_late = 0; 6485 /* Clear the tlp rtx mark */ 6486 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6487 if (rack->r_ctl.rc_resend != NULL) 6488 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6489 rack->r_ctl.rc_prr_sndcnt = 0; 6490 rack_log_to_prr(rack, 6, 0, __LINE__); 6491 rack->r_timer_override = 1; 6492 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6493 #ifdef NETFLIX_EXP_DETECTION 6494 || (rack->sack_attack_disable != 0) 6495 #endif 6496 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6497 /* 6498 * For non-sack customers new data 6499 * needs to go out as retransmits until 6500 * we retransmit up to snd_max. 6501 */ 6502 rack->r_must_retran = 1; 6503 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6504 rack->r_ctl.rc_sacked); 6505 } 6506 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6507 } 6508 6509 static void 6510 rack_convert_rtts(struct tcpcb *tp) 6511 { 6512 if (tp->t_srtt > 1) { 6513 uint32_t val, frac; 6514 6515 val = tp->t_srtt >> TCP_RTT_SHIFT; 6516 frac = tp->t_srtt & 0x1f; 6517 tp->t_srtt = TICKS_2_USEC(val); 6518 /* 6519 * frac is the fractional part of the srtt (if any) 6520 * but its in ticks and every bit represents 6521 * 1/32nd of a hz. 6522 */ 6523 if (frac) { 6524 if (hz == 1000) { 6525 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6526 } else { 6527 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6528 } 6529 tp->t_srtt += frac; 6530 } 6531 } 6532 if (tp->t_rttvar) { 6533 uint32_t val, frac; 6534 6535 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6536 frac = tp->t_rttvar & 0x1f; 6537 tp->t_rttvar = TICKS_2_USEC(val); 6538 /* 6539 * frac is the fractional part of the srtt (if any) 6540 * but its in ticks and every bit represents 6541 * 1/32nd of a hz. 6542 */ 6543 if (frac) { 6544 if (hz == 1000) { 6545 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6546 } else { 6547 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6548 } 6549 tp->t_rttvar += frac; 6550 } 6551 } 6552 tp->t_rxtcur = RACK_REXMTVAL(tp); 6553 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6554 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6555 } 6556 if (tp->t_rxtcur > rack_rto_max) { 6557 tp->t_rxtcur = rack_rto_max; 6558 } 6559 } 6560 6561 static void 6562 rack_cc_conn_init(struct tcpcb *tp) 6563 { 6564 struct tcp_rack *rack; 6565 uint32_t srtt; 6566 6567 rack = (struct tcp_rack *)tp->t_fb_ptr; 6568 srtt = tp->t_srtt; 6569 cc_conn_init(tp); 6570 /* 6571 * Now convert to rack's internal format, 6572 * if required. 6573 */ 6574 if ((srtt == 0) && (tp->t_srtt != 0)) 6575 rack_convert_rtts(tp); 6576 /* 6577 * We want a chance to stay in slowstart as 6578 * we create a connection. TCP spec says that 6579 * initially ssthresh is infinite. For our 6580 * purposes that is the snd_wnd. 6581 */ 6582 if (tp->snd_ssthresh < tp->snd_wnd) { 6583 tp->snd_ssthresh = tp->snd_wnd; 6584 } 6585 /* 6586 * We also want to assure a IW worth of 6587 * data can get inflight. 6588 */ 6589 if (rc_init_window(rack) < tp->snd_cwnd) 6590 tp->snd_cwnd = rc_init_window(rack); 6591 } 6592 6593 /* 6594 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6595 * we will setup to retransmit the lowest seq number outstanding. 6596 */ 6597 static int 6598 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6599 { 6600 struct inpcb *inp = tptoinpcb(tp); 6601 int32_t rexmt; 6602 int32_t retval = 0; 6603 bool isipv6; 6604 6605 if ((tp->t_flags & TF_GPUTINPROG) && 6606 (tp->t_rxtshift)) { 6607 /* 6608 * We have had a second timeout 6609 * measurements on successive rxt's are not profitable. 6610 * It is unlikely to be of any use (the network is 6611 * broken or the client went away). 6612 */ 6613 tp->t_flags &= ~TF_GPUTINPROG; 6614 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6615 rack->r_ctl.rc_gp_srtt /*flex1*/, 6616 tp->gput_seq, 6617 0, 0, 18, __LINE__, NULL, 0); 6618 } 6619 if (ctf_progress_timeout_check(tp, false)) { 6620 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6621 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6622 return (-ETIMEDOUT); /* tcp_drop() */ 6623 } 6624 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6625 rack->r_ctl.retran_during_recovery = 0; 6626 rack->rc_ack_required = 1; 6627 rack->r_ctl.dsack_byte_cnt = 0; 6628 if (IN_FASTRECOVERY(tp->t_flags)) 6629 tp->t_flags |= TF_WASFRECOVERY; 6630 else 6631 tp->t_flags &= ~TF_WASFRECOVERY; 6632 if (IN_CONGRECOVERY(tp->t_flags)) 6633 tp->t_flags |= TF_WASCRECOVERY; 6634 else 6635 tp->t_flags &= ~TF_WASCRECOVERY; 6636 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6637 (tp->snd_una == tp->snd_max)) { 6638 /* Nothing outstanding .. nothing to do */ 6639 return (0); 6640 } 6641 if (rack->r_ctl.dsack_persist) { 6642 rack->r_ctl.dsack_persist--; 6643 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6644 rack->r_ctl.num_dsack = 0; 6645 } 6646 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6647 } 6648 /* 6649 * Rack can only run one timer at a time, so we cannot 6650 * run a KEEPINIT (gating SYN sending) and a retransmit 6651 * timer for the SYN. So if we are in a front state and 6652 * have a KEEPINIT timer we need to check the first transmit 6653 * against now to see if we have exceeded the KEEPINIT time 6654 * (if one is set). 6655 */ 6656 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6657 (TP_KEEPINIT(tp) != 0)) { 6658 struct rack_sendmap *rsm; 6659 6660 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6661 if (rsm) { 6662 /* Ok we have something outstanding to test keepinit with */ 6663 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6664 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6665 /* We have exceeded the KEEPINIT time */ 6666 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6667 goto drop_it; 6668 } 6669 } 6670 } 6671 /* 6672 * Retransmission timer went off. Message has not been acked within 6673 * retransmit interval. Back off to a longer retransmit interval 6674 * and retransmit one segment. 6675 */ 6676 rack_remxt_tmr(tp); 6677 if ((rack->r_ctl.rc_resend == NULL) || 6678 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6679 /* 6680 * If the rwnd collapsed on 6681 * the one we are retransmitting 6682 * it does not count against the 6683 * rxt count. 6684 */ 6685 tp->t_rxtshift++; 6686 } 6687 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6688 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6689 drop_it: 6690 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6691 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6692 /* XXXGL: previously t_softerror was casted to uint16_t */ 6693 MPASS(tp->t_softerror >= 0); 6694 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6695 goto out; /* tcp_drop() */ 6696 } 6697 if (tp->t_state == TCPS_SYN_SENT) { 6698 /* 6699 * If the SYN was retransmitted, indicate CWND to be limited 6700 * to 1 segment in cc_conn_init(). 6701 */ 6702 tp->snd_cwnd = 1; 6703 } else if (tp->t_rxtshift == 1) { 6704 /* 6705 * first retransmit; record ssthresh and cwnd so they can be 6706 * recovered if this turns out to be a "bad" retransmit. A 6707 * retransmit is considered "bad" if an ACK for this segment 6708 * is received within RTT/2 interval; the assumption here is 6709 * that the ACK was already in flight. See "On Estimating 6710 * End-to-End Network Path Properties" by Allman and Paxson 6711 * for more details. 6712 */ 6713 tp->snd_cwnd_prev = tp->snd_cwnd; 6714 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6715 tp->snd_recover_prev = tp->snd_recover; 6716 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6717 tp->t_flags |= TF_PREVVALID; 6718 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6719 tp->t_flags &= ~TF_PREVVALID; 6720 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6721 if ((tp->t_state == TCPS_SYN_SENT) || 6722 (tp->t_state == TCPS_SYN_RECEIVED)) 6723 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6724 else 6725 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6726 6727 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6728 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6729 /* 6730 * We enter the path for PLMTUD if connection is established or, if 6731 * connection is FIN_WAIT_1 status, reason for the last is that if 6732 * amount of data we send is very small, we could send it in couple 6733 * of packets and process straight to FIN. In that case we won't 6734 * catch ESTABLISHED state. 6735 */ 6736 #ifdef INET6 6737 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 6738 #else 6739 isipv6 = false; 6740 #endif 6741 if (((V_tcp_pmtud_blackhole_detect == 1) || 6742 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6743 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6744 ((tp->t_state == TCPS_ESTABLISHED) || 6745 (tp->t_state == TCPS_FIN_WAIT_1))) { 6746 /* 6747 * Idea here is that at each stage of mtu probe (usually, 6748 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6749 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6750 * should take care of that. 6751 */ 6752 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6753 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6754 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6755 tp->t_rxtshift % 2 == 0)) { 6756 /* 6757 * Enter Path MTU Black-hole Detection mechanism: - 6758 * Disable Path MTU Discovery (IP "DF" bit). - 6759 * Reduce MTU to lower value than what we negotiated 6760 * with peer. 6761 */ 6762 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6763 /* Record that we may have found a black hole. */ 6764 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6765 /* Keep track of previous MSS. */ 6766 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6767 } 6768 6769 /* 6770 * Reduce the MSS to blackhole value or to the 6771 * default in an attempt to retransmit. 6772 */ 6773 #ifdef INET6 6774 if (isipv6 && 6775 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6776 /* Use the sysctl tuneable blackhole MSS. */ 6777 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6778 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6779 } else if (isipv6) { 6780 /* Use the default MSS. */ 6781 tp->t_maxseg = V_tcp_v6mssdflt; 6782 /* 6783 * Disable Path MTU Discovery when we switch 6784 * to minmss. 6785 */ 6786 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6787 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6788 } 6789 #endif 6790 #if defined(INET6) && defined(INET) 6791 else 6792 #endif 6793 #ifdef INET 6794 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6795 /* Use the sysctl tuneable blackhole MSS. */ 6796 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6797 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6798 } else { 6799 /* Use the default MSS. */ 6800 tp->t_maxseg = V_tcp_mssdflt; 6801 /* 6802 * Disable Path MTU Discovery when we switch 6803 * to minmss. 6804 */ 6805 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6806 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6807 } 6808 #endif 6809 } else { 6810 /* 6811 * If further retransmissions are still unsuccessful 6812 * with a lowered MTU, maybe this isn't a blackhole 6813 * and we restore the previous MSS and blackhole 6814 * detection flags. The limit '6' is determined by 6815 * giving each probe stage (1448, 1188, 524) 2 6816 * chances to recover. 6817 */ 6818 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6819 (tp->t_rxtshift >= 6)) { 6820 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6821 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6822 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6823 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6824 } 6825 } 6826 } 6827 /* 6828 * Disable RFC1323 and SACK if we haven't got any response to 6829 * our third SYN to work-around some broken terminal servers 6830 * (most of which have hopefully been retired) that have bad VJ 6831 * header compression code which trashes TCP segments containing 6832 * unknown-to-them TCP options. 6833 */ 6834 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6835 (tp->t_rxtshift == 3)) 6836 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6837 /* 6838 * If we backed off this far, our srtt estimate is probably bogus. 6839 * Clobber it so we'll take the next rtt measurement as our srtt; 6840 * move the current srtt into rttvar to keep the current retransmit 6841 * times until then. 6842 */ 6843 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6844 #ifdef INET6 6845 if ((inp->inp_vflag & INP_IPV6) != 0) 6846 in6_losing(inp); 6847 else 6848 #endif 6849 in_losing(inp); 6850 tp->t_rttvar += tp->t_srtt; 6851 tp->t_srtt = 0; 6852 } 6853 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6854 tp->snd_recover = tp->snd_max; 6855 tp->t_flags |= TF_ACKNOW; 6856 tp->t_rtttime = 0; 6857 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 6858 out: 6859 return (retval); 6860 } 6861 6862 static int 6863 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6864 { 6865 int32_t ret = 0; 6866 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6867 6868 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6869 (tp->t_flags & TF_GPUTINPROG)) { 6870 /* 6871 * We have a goodput in progress 6872 * and we have entered a late state. 6873 * Do we have enough data in the sb 6874 * to handle the GPUT request? 6875 */ 6876 uint32_t bytes; 6877 6878 bytes = tp->gput_ack - tp->gput_seq; 6879 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6880 bytes += tp->gput_seq - tp->snd_una; 6881 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 6882 /* 6883 * There are not enough bytes in the socket 6884 * buffer that have been sent to cover this 6885 * measurement. Cancel it. 6886 */ 6887 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6888 rack->r_ctl.rc_gp_srtt /*flex1*/, 6889 tp->gput_seq, 6890 0, 0, 18, __LINE__, NULL, 0); 6891 tp->t_flags &= ~TF_GPUTINPROG; 6892 } 6893 } 6894 if (timers == 0) { 6895 return (0); 6896 } 6897 if (tp->t_state == TCPS_LISTEN) { 6898 /* no timers on listen sockets */ 6899 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6900 return (0); 6901 return (1); 6902 } 6903 if ((timers & PACE_TMR_RACK) && 6904 rack->rc_on_min_to) { 6905 /* 6906 * For the rack timer when we 6907 * are on a min-timeout (which means rrr_conf = 3) 6908 * we don't want to check the timer. It may 6909 * be going off for a pace and thats ok we 6910 * want to send the retransmit (if its ready). 6911 * 6912 * If its on a normal rack timer (non-min) then 6913 * we will check if its expired. 6914 */ 6915 goto skip_time_check; 6916 } 6917 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6918 uint32_t left; 6919 6920 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6921 ret = -1; 6922 rack_log_to_processing(rack, cts, ret, 0); 6923 return (0); 6924 } 6925 if (hpts_calling == 0) { 6926 /* 6927 * A user send or queued mbuf (sack) has called us? We 6928 * return 0 and let the pacing guards 6929 * deal with it if they should or 6930 * should not cause a send. 6931 */ 6932 ret = -2; 6933 rack_log_to_processing(rack, cts, ret, 0); 6934 return (0); 6935 } 6936 /* 6937 * Ok our timer went off early and we are not paced false 6938 * alarm, go back to sleep. 6939 */ 6940 ret = -3; 6941 left = rack->r_ctl.rc_timer_exp - cts; 6942 tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left)); 6943 rack_log_to_processing(rack, cts, ret, left); 6944 return (1); 6945 } 6946 skip_time_check: 6947 rack->rc_tmr_stopped = 0; 6948 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 6949 if (timers & PACE_TMR_DELACK) { 6950 ret = rack_timeout_delack(tp, rack, cts); 6951 } else if (timers & PACE_TMR_RACK) { 6952 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6953 rack->r_fast_output = 0; 6954 ret = rack_timeout_rack(tp, rack, cts); 6955 } else if (timers & PACE_TMR_TLP) { 6956 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6957 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 6958 } else if (timers & PACE_TMR_RXT) { 6959 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6960 rack->r_fast_output = 0; 6961 ret = rack_timeout_rxt(tp, rack, cts); 6962 } else if (timers & PACE_TMR_PERSIT) { 6963 ret = rack_timeout_persist(tp, rack, cts); 6964 } else if (timers & PACE_TMR_KEEP) { 6965 ret = rack_timeout_keepalive(tp, rack, cts); 6966 } 6967 rack_log_to_processing(rack, cts, ret, timers); 6968 return (ret); 6969 } 6970 6971 static void 6972 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 6973 { 6974 struct timeval tv; 6975 uint32_t us_cts, flags_on_entry; 6976 uint8_t hpts_removed = 0; 6977 6978 flags_on_entry = rack->r_ctl.rc_hpts_flags; 6979 us_cts = tcp_get_usecs(&tv); 6980 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 6981 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 6982 ((tp->snd_max - tp->snd_una) == 0))) { 6983 tcp_hpts_remove(rack->rc_inp); 6984 hpts_removed = 1; 6985 /* If we were not delayed cancel out the flag. */ 6986 if ((tp->snd_max - tp->snd_una) == 0) 6987 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6988 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6989 } 6990 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 6991 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 6992 if (tcp_in_hpts(rack->rc_inp) && 6993 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 6994 /* 6995 * Canceling timer's when we have no output being 6996 * paced. We also must remove ourselves from the 6997 * hpts. 6998 */ 6999 tcp_hpts_remove(rack->rc_inp); 7000 hpts_removed = 1; 7001 } 7002 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7003 } 7004 if (hpts_removed == 0) 7005 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7006 } 7007 7008 static int 7009 rack_stopall(struct tcpcb *tp) 7010 { 7011 struct tcp_rack *rack; 7012 rack = (struct tcp_rack *)tp->t_fb_ptr; 7013 rack->t_timers_stopped = 1; 7014 return (0); 7015 } 7016 7017 static void 7018 rack_stop_all_timers(struct tcpcb *tp) 7019 { 7020 struct tcp_rack *rack; 7021 7022 /* 7023 * Assure no timers are running. 7024 */ 7025 if (tcp_timer_active(tp, TT_PERSIST)) { 7026 /* We enter in persists, set the flag appropriately */ 7027 rack = (struct tcp_rack *)tp->t_fb_ptr; 7028 rack->rc_in_persist = 1; 7029 } 7030 } 7031 7032 static void 7033 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7034 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7035 { 7036 int32_t idx; 7037 7038 rsm->r_rtr_cnt++; 7039 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7040 rsm->r_dupack = 0; 7041 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7042 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7043 rsm->r_flags |= RACK_OVERMAX; 7044 } 7045 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7046 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7047 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7048 } 7049 idx = rsm->r_rtr_cnt - 1; 7050 rsm->r_tim_lastsent[idx] = ts; 7051 /* 7052 * Here we don't add in the len of send, since its already 7053 * in snduna <->snd_max. 7054 */ 7055 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7056 rack->r_ctl.rc_sacked); 7057 if (rsm->r_flags & RACK_ACKED) { 7058 /* Problably MTU discovery messing with us */ 7059 rsm->r_flags &= ~RACK_ACKED; 7060 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7061 } 7062 if (rsm->r_in_tmap) { 7063 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7064 rsm->r_in_tmap = 0; 7065 } 7066 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7067 rsm->r_in_tmap = 1; 7068 /* Take off the must retransmit flag, if its on */ 7069 if (rsm->r_flags & RACK_MUST_RXT) { 7070 if (rack->r_must_retran) 7071 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 7072 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 7073 /* 7074 * We have retransmitted all we need. Clear 7075 * any must retransmit flags. 7076 */ 7077 rack->r_must_retran = 0; 7078 rack->r_ctl.rc_out_at_rto = 0; 7079 } 7080 rsm->r_flags &= ~RACK_MUST_RXT; 7081 } 7082 if (rsm->r_flags & RACK_SACK_PASSED) { 7083 /* We have retransmitted due to the SACK pass */ 7084 rsm->r_flags &= ~RACK_SACK_PASSED; 7085 rsm->r_flags |= RACK_WAS_SACKPASS; 7086 } 7087 } 7088 7089 static uint32_t 7090 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7091 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7092 { 7093 /* 7094 * We (re-)transmitted starting at rsm->r_start for some length 7095 * (possibly less than r_end. 7096 */ 7097 struct rack_sendmap *nrsm; 7098 #ifdef INVARIANTS 7099 struct rack_sendmap *insret; 7100 #endif 7101 uint32_t c_end; 7102 int32_t len; 7103 7104 len = *lenp; 7105 c_end = rsm->r_start + len; 7106 if (SEQ_GEQ(c_end, rsm->r_end)) { 7107 /* 7108 * We retransmitted the whole piece or more than the whole 7109 * slopping into the next rsm. 7110 */ 7111 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7112 if (c_end == rsm->r_end) { 7113 *lenp = 0; 7114 return (0); 7115 } else { 7116 int32_t act_len; 7117 7118 /* Hangs over the end return whats left */ 7119 act_len = rsm->r_end - rsm->r_start; 7120 *lenp = (len - act_len); 7121 return (rsm->r_end); 7122 } 7123 /* We don't get out of this block. */ 7124 } 7125 /* 7126 * Here we retransmitted less than the whole thing which means we 7127 * have to split this into what was transmitted and what was not. 7128 */ 7129 nrsm = rack_alloc_full_limit(rack); 7130 if (nrsm == NULL) { 7131 /* 7132 * We can't get memory, so lets not proceed. 7133 */ 7134 *lenp = 0; 7135 return (0); 7136 } 7137 /* 7138 * So here we are going to take the original rsm and make it what we 7139 * retransmitted. nrsm will be the tail portion we did not 7140 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7141 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7142 * 1, 6 and the new piece will be 6, 11. 7143 */ 7144 rack_clone_rsm(rack, nrsm, rsm, c_end); 7145 nrsm->r_dupack = 0; 7146 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7147 #ifndef INVARIANTS 7148 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7149 #else 7150 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7151 if (insret != NULL) { 7152 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7153 nrsm, insret, rack, rsm); 7154 } 7155 #endif 7156 if (rsm->r_in_tmap) { 7157 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7158 nrsm->r_in_tmap = 1; 7159 } 7160 rsm->r_flags &= (~RACK_HAS_FIN); 7161 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7162 /* Log a split of rsm into rsm and nrsm */ 7163 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7164 *lenp = 0; 7165 return (0); 7166 } 7167 7168 static void 7169 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7170 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7171 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7172 { 7173 struct tcp_rack *rack; 7174 struct rack_sendmap *rsm, *nrsm, fe; 7175 #ifdef INVARIANTS 7176 struct rack_sendmap *insret; 7177 #endif 7178 register uint32_t snd_max, snd_una; 7179 7180 /* 7181 * Add to the RACK log of packets in flight or retransmitted. If 7182 * there is a TS option we will use the TS echoed, if not we will 7183 * grab a TS. 7184 * 7185 * Retransmissions will increment the count and move the ts to its 7186 * proper place. Note that if options do not include TS's then we 7187 * won't be able to effectively use the ACK for an RTT on a retran. 7188 * 7189 * Notes about r_start and r_end. Lets consider a send starting at 7190 * sequence 1 for 10 bytes. In such an example the r_start would be 7191 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7192 * This means that r_end is actually the first sequence for the next 7193 * slot (11). 7194 * 7195 */ 7196 /* 7197 * If err is set what do we do XXXrrs? should we not add the thing? 7198 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7199 * i.e. proceed with add ** do this for now. 7200 */ 7201 INP_WLOCK_ASSERT(tptoinpcb(tp)); 7202 if (err) 7203 /* 7204 * We don't log errors -- we could but snd_max does not 7205 * advance in this case either. 7206 */ 7207 return; 7208 7209 if (th_flags & TH_RST) { 7210 /* 7211 * We don't log resets and we return immediately from 7212 * sending 7213 */ 7214 return; 7215 } 7216 rack = (struct tcp_rack *)tp->t_fb_ptr; 7217 snd_una = tp->snd_una; 7218 snd_max = tp->snd_max; 7219 if (th_flags & (TH_SYN | TH_FIN)) { 7220 /* 7221 * The call to rack_log_output is made before bumping 7222 * snd_max. This means we can record one extra byte on a SYN 7223 * or FIN if seq_out is adding more on and a FIN is present 7224 * (and we are not resending). 7225 */ 7226 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7227 len++; 7228 if (th_flags & TH_FIN) 7229 len++; 7230 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7231 /* 7232 * The add/update as not been done for the FIN/SYN 7233 * yet. 7234 */ 7235 snd_max = tp->snd_nxt; 7236 } 7237 } 7238 if (SEQ_LEQ((seq_out + len), snd_una)) { 7239 /* Are sending an old segment to induce an ack (keep-alive)? */ 7240 return; 7241 } 7242 if (SEQ_LT(seq_out, snd_una)) { 7243 /* huh? should we panic? */ 7244 uint32_t end; 7245 7246 end = seq_out + len; 7247 seq_out = snd_una; 7248 if (SEQ_GEQ(end, seq_out)) 7249 len = end - seq_out; 7250 else 7251 len = 0; 7252 } 7253 if (len == 0) { 7254 /* We don't log zero window probes */ 7255 return; 7256 } 7257 if (IN_FASTRECOVERY(tp->t_flags)) { 7258 rack->r_ctl.rc_prr_out += len; 7259 } 7260 /* First question is it a retransmission or new? */ 7261 if (seq_out == snd_max) { 7262 /* Its new */ 7263 again: 7264 rsm = rack_alloc(rack); 7265 if (rsm == NULL) { 7266 /* 7267 * Hmm out of memory and the tcb got destroyed while 7268 * we tried to wait. 7269 */ 7270 return; 7271 } 7272 if (th_flags & TH_FIN) { 7273 rsm->r_flags = RACK_HAS_FIN|add_flag; 7274 } else { 7275 rsm->r_flags = add_flag; 7276 } 7277 if (hw_tls) 7278 rsm->r_hw_tls = 1; 7279 rsm->r_tim_lastsent[0] = cts; 7280 rsm->r_rtr_cnt = 1; 7281 rsm->r_rtr_bytes = 0; 7282 if (th_flags & TH_SYN) { 7283 /* The data space is one beyond snd_una */ 7284 rsm->r_flags |= RACK_HAS_SYN; 7285 } 7286 rsm->r_start = seq_out; 7287 rsm->r_end = rsm->r_start + len; 7288 rsm->r_dupack = 0; 7289 /* 7290 * save off the mbuf location that 7291 * sndmbuf_noadv returned (which is 7292 * where we started copying from).. 7293 */ 7294 rsm->m = s_mb; 7295 rsm->soff = s_moff; 7296 /* 7297 * Here we do add in the len of send, since its not yet 7298 * reflected in in snduna <->snd_max 7299 */ 7300 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7301 rack->r_ctl.rc_sacked) + 7302 (rsm->r_end - rsm->r_start)); 7303 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7304 if (rsm->m) { 7305 if (rsm->m->m_len <= rsm->soff) { 7306 /* 7307 * XXXrrs Question, will this happen? 7308 * 7309 * If sbsndptr is set at the correct place 7310 * then s_moff should always be somewhere 7311 * within rsm->m. But if the sbsndptr was 7312 * off then that won't be true. If it occurs 7313 * we need to walkout to the correct location. 7314 */ 7315 struct mbuf *lm; 7316 7317 lm = rsm->m; 7318 while (lm->m_len <= rsm->soff) { 7319 rsm->soff -= lm->m_len; 7320 lm = lm->m_next; 7321 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7322 __func__, rack, s_moff, s_mb, rsm->soff)); 7323 } 7324 rsm->m = lm; 7325 } 7326 rsm->orig_m_len = rsm->m->m_len; 7327 } else 7328 rsm->orig_m_len = 0; 7329 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7330 /* Log a new rsm */ 7331 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7332 #ifndef INVARIANTS 7333 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7334 #else 7335 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7336 if (insret != NULL) { 7337 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7338 nrsm, insret, rack, rsm); 7339 } 7340 #endif 7341 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7342 rsm->r_in_tmap = 1; 7343 /* 7344 * Special case detection, is there just a single 7345 * packet outstanding when we are not in recovery? 7346 * 7347 * If this is true mark it so. 7348 */ 7349 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7350 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7351 struct rack_sendmap *prsm; 7352 7353 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7354 if (prsm) 7355 prsm->r_one_out_nr = 1; 7356 } 7357 return; 7358 } 7359 /* 7360 * If we reach here its a retransmission and we need to find it. 7361 */ 7362 memset(&fe, 0, sizeof(fe)); 7363 more: 7364 if (hintrsm && (hintrsm->r_start == seq_out)) { 7365 rsm = hintrsm; 7366 hintrsm = NULL; 7367 } else { 7368 /* No hints sorry */ 7369 rsm = NULL; 7370 } 7371 if ((rsm) && (rsm->r_start == seq_out)) { 7372 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7373 if (len == 0) { 7374 return; 7375 } else { 7376 goto more; 7377 } 7378 } 7379 /* Ok it was not the last pointer go through it the hard way. */ 7380 refind: 7381 fe.r_start = seq_out; 7382 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7383 if (rsm) { 7384 if (rsm->r_start == seq_out) { 7385 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7386 if (len == 0) { 7387 return; 7388 } else { 7389 goto refind; 7390 } 7391 } 7392 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7393 /* Transmitted within this piece */ 7394 /* 7395 * Ok we must split off the front and then let the 7396 * update do the rest 7397 */ 7398 nrsm = rack_alloc_full_limit(rack); 7399 if (nrsm == NULL) { 7400 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7401 return; 7402 } 7403 /* 7404 * copy rsm to nrsm and then trim the front of rsm 7405 * to not include this part. 7406 */ 7407 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7408 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7409 #ifndef INVARIANTS 7410 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7411 #else 7412 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7413 if (insret != NULL) { 7414 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7415 nrsm, insret, rack, rsm); 7416 } 7417 #endif 7418 if (rsm->r_in_tmap) { 7419 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7420 nrsm->r_in_tmap = 1; 7421 } 7422 rsm->r_flags &= (~RACK_HAS_FIN); 7423 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7424 if (len == 0) { 7425 return; 7426 } else if (len > 0) 7427 goto refind; 7428 } 7429 } 7430 /* 7431 * Hmm not found in map did they retransmit both old and on into the 7432 * new? 7433 */ 7434 if (seq_out == tp->snd_max) { 7435 goto again; 7436 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7437 #ifdef INVARIANTS 7438 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7439 seq_out, len, tp->snd_una, tp->snd_max); 7440 printf("Starting Dump of all rack entries\n"); 7441 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7442 printf("rsm:%p start:%u end:%u\n", 7443 rsm, rsm->r_start, rsm->r_end); 7444 } 7445 printf("Dump complete\n"); 7446 panic("seq_out not found rack:%p tp:%p", 7447 rack, tp); 7448 #endif 7449 } else { 7450 #ifdef INVARIANTS 7451 /* 7452 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7453 * flag) 7454 */ 7455 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7456 seq_out, len, tp->snd_max, tp); 7457 #endif 7458 } 7459 } 7460 7461 /* 7462 * Record one of the RTT updates from an ack into 7463 * our sample structure. 7464 */ 7465 7466 static void 7467 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7468 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7469 { 7470 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7471 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7472 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7473 } 7474 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7475 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7476 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7477 } 7478 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7479 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7480 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7481 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7482 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7483 } 7484 if ((confidence == 1) && 7485 ((rsm == NULL) || 7486 (rsm->r_just_ret) || 7487 (rsm->r_one_out_nr && 7488 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7489 /* 7490 * If the rsm had a just return 7491 * hit it then we can't trust the 7492 * rtt measurement for buffer deterimination 7493 * Note that a confidence of 2, indicates 7494 * SACK'd which overrides the r_just_ret or 7495 * the r_one_out_nr. If it was a CUM-ACK and 7496 * we had only two outstanding, but get an 7497 * ack for only 1. Then that also lowers our 7498 * confidence. 7499 */ 7500 confidence = 0; 7501 } 7502 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7503 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7504 if (rack->r_ctl.rack_rs.confidence == 0) { 7505 /* 7506 * We take anything with no current confidence 7507 * saved. 7508 */ 7509 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7510 rack->r_ctl.rack_rs.confidence = confidence; 7511 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7512 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7513 /* 7514 * Once we have a confident number, 7515 * we can update it with a smaller 7516 * value since this confident number 7517 * may include the DSACK time until 7518 * the next segment (the second one) arrived. 7519 */ 7520 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7521 rack->r_ctl.rack_rs.confidence = confidence; 7522 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7523 } 7524 } 7525 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7526 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7527 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7528 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7529 } 7530 7531 /* 7532 * Collect new round-trip time estimate 7533 * and update averages and current timeout. 7534 */ 7535 static void 7536 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7537 { 7538 int32_t delta; 7539 int32_t rtt; 7540 7541 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7542 /* No valid sample */ 7543 return; 7544 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7545 /* We are to use the lowest RTT seen in a single ack */ 7546 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7547 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7548 /* We are to use the highest RTT seen in a single ack */ 7549 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7550 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7551 /* We are to use the average RTT seen in a single ack */ 7552 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7553 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7554 } else { 7555 #ifdef INVARIANTS 7556 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7557 #endif 7558 return; 7559 } 7560 if (rtt == 0) 7561 rtt = 1; 7562 if (rack->rc_gp_rtt_set == 0) { 7563 /* 7564 * With no RTT we have to accept 7565 * even one we are not confident of. 7566 */ 7567 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7568 rack->rc_gp_rtt_set = 1; 7569 } else if (rack->r_ctl.rack_rs.confidence) { 7570 /* update the running gp srtt */ 7571 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7572 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7573 } 7574 if (rack->r_ctl.rack_rs.confidence) { 7575 /* 7576 * record the low and high for highly buffered path computation, 7577 * we only do this if we are confident (not a retransmission). 7578 */ 7579 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7580 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7581 } 7582 if (rack->rc_highly_buffered == 0) { 7583 /* 7584 * Currently once we declare a path has 7585 * highly buffered there is no going 7586 * back, which may be a problem... 7587 */ 7588 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7589 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7590 rack->r_ctl.rc_highest_us_rtt, 7591 rack->r_ctl.rc_lowest_us_rtt, 7592 RACK_RTTS_SEEHBP); 7593 rack->rc_highly_buffered = 1; 7594 } 7595 } 7596 } 7597 if ((rack->r_ctl.rack_rs.confidence) || 7598 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7599 /* 7600 * If we are highly confident of it <or> it was 7601 * never retransmitted we accept it as the last us_rtt. 7602 */ 7603 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7604 /* The lowest rtt can be set if its was not retransmited */ 7605 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7606 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7607 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7608 rack->r_ctl.rc_lowest_us_rtt = 1; 7609 } 7610 } 7611 rack = (struct tcp_rack *)tp->t_fb_ptr; 7612 if (tp->t_srtt != 0) { 7613 /* 7614 * We keep a simple srtt in microseconds, like our rtt 7615 * measurement. We don't need to do any tricks with shifting 7616 * etc. Instead we just add in 1/8th of the new measurement 7617 * and subtract out 1/8 of the old srtt. We do the same with 7618 * the variance after finding the absolute value of the 7619 * difference between this sample and the current srtt. 7620 */ 7621 delta = tp->t_srtt - rtt; 7622 /* Take off 1/8th of the current sRTT */ 7623 tp->t_srtt -= (tp->t_srtt >> 3); 7624 /* Add in 1/8th of the new RTT just measured */ 7625 tp->t_srtt += (rtt >> 3); 7626 if (tp->t_srtt <= 0) 7627 tp->t_srtt = 1; 7628 /* Now lets make the absolute value of the variance */ 7629 if (delta < 0) 7630 delta = -delta; 7631 /* Subtract out 1/8th */ 7632 tp->t_rttvar -= (tp->t_rttvar >> 3); 7633 /* Add in 1/8th of the new variance we just saw */ 7634 tp->t_rttvar += (delta >> 3); 7635 if (tp->t_rttvar <= 0) 7636 tp->t_rttvar = 1; 7637 } else { 7638 /* 7639 * No rtt measurement yet - use the unsmoothed rtt. Set the 7640 * variance to half the rtt (so our first retransmit happens 7641 * at 3*rtt). 7642 */ 7643 tp->t_srtt = rtt; 7644 tp->t_rttvar = rtt >> 1; 7645 } 7646 rack->rc_srtt_measure_made = 1; 7647 KMOD_TCPSTAT_INC(tcps_rttupdated); 7648 tp->t_rttupdated++; 7649 #ifdef STATS 7650 if (rack_stats_gets_ms_rtt == 0) { 7651 /* Send in the microsecond rtt used for rxt timeout purposes */ 7652 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7653 } else if (rack_stats_gets_ms_rtt == 1) { 7654 /* Send in the millisecond rtt used for rxt timeout purposes */ 7655 int32_t ms_rtt; 7656 7657 /* Round up */ 7658 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7659 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7660 } else if (rack_stats_gets_ms_rtt == 2) { 7661 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7662 int32_t ms_rtt; 7663 7664 /* Round up */ 7665 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7666 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7667 } else { 7668 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7669 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7670 } 7671 7672 #endif 7673 /* 7674 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7675 * way we do the smoothing, srtt and rttvar will each average +1/2 7676 * tick of bias. When we compute the retransmit timer, we want 1/2 7677 * tick of rounding and 1 extra tick because of +-1/2 tick 7678 * uncertainty in the firing of the timer. The bias will give us 7679 * exactly the 1.5 tick we need. But, because the bias is 7680 * statistical, we have to test that we don't drop below the minimum 7681 * feasible timer (which is 2 ticks). 7682 */ 7683 tp->t_rxtshift = 0; 7684 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7685 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7686 rack_log_rtt_sample(rack, rtt); 7687 tp->t_softerror = 0; 7688 } 7689 7690 7691 static void 7692 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7693 { 7694 /* 7695 * Apply to filter the inbound us-rtt at us_cts. 7696 */ 7697 uint32_t old_rtt; 7698 7699 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7700 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7701 us_rtt, us_cts); 7702 if (old_rtt > us_rtt) { 7703 /* We just hit a new lower rtt time */ 7704 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7705 __LINE__, RACK_RTTS_NEWRTT); 7706 /* 7707 * Only count it if its lower than what we saw within our 7708 * calculated range. 7709 */ 7710 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7711 if (rack_probertt_lower_within && 7712 rack->rc_gp_dyn_mul && 7713 (rack->use_fixed_rate == 0) && 7714 (rack->rc_always_pace)) { 7715 /* 7716 * We are seeing a new lower rtt very close 7717 * to the time that we would have entered probe-rtt. 7718 * This is probably due to the fact that a peer flow 7719 * has entered probe-rtt. Lets go in now too. 7720 */ 7721 uint32_t val; 7722 7723 val = rack_probertt_lower_within * rack_time_between_probertt; 7724 val /= 100; 7725 if ((rack->in_probe_rtt == 0) && 7726 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7727 rack_enter_probertt(rack, us_cts); 7728 } 7729 } 7730 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7731 } 7732 } 7733 } 7734 7735 static int 7736 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7737 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7738 { 7739 uint32_t us_rtt; 7740 int32_t i, all; 7741 uint32_t t, len_acked; 7742 7743 if ((rsm->r_flags & RACK_ACKED) || 7744 (rsm->r_flags & RACK_WAS_ACKED)) 7745 /* Already done */ 7746 return (0); 7747 if (rsm->r_no_rtt_allowed) { 7748 /* Not allowed */ 7749 return (0); 7750 } 7751 if (ack_type == CUM_ACKED) { 7752 if (SEQ_GT(th_ack, rsm->r_end)) { 7753 len_acked = rsm->r_end - rsm->r_start; 7754 all = 1; 7755 } else { 7756 len_acked = th_ack - rsm->r_start; 7757 all = 0; 7758 } 7759 } else { 7760 len_acked = rsm->r_end - rsm->r_start; 7761 all = 0; 7762 } 7763 if (rsm->r_rtr_cnt == 1) { 7764 7765 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7766 if ((int)t <= 0) 7767 t = 1; 7768 if (!tp->t_rttlow || tp->t_rttlow > t) 7769 tp->t_rttlow = t; 7770 if (!rack->r_ctl.rc_rack_min_rtt || 7771 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7772 rack->r_ctl.rc_rack_min_rtt = t; 7773 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7774 rack->r_ctl.rc_rack_min_rtt = 1; 7775 } 7776 } 7777 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7778 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7779 else 7780 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7781 if (us_rtt == 0) 7782 us_rtt = 1; 7783 if (CC_ALGO(tp)->rttsample != NULL) { 7784 /* Kick the RTT to the CC */ 7785 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 7786 } 7787 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7788 if (ack_type == SACKED) { 7789 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7790 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7791 } else { 7792 /* 7793 * We need to setup what our confidence 7794 * is in this ack. 7795 * 7796 * If the rsm was app limited and it is 7797 * less than a mss in length (the end 7798 * of the send) then we have a gap. If we 7799 * were app limited but say we were sending 7800 * multiple MSS's then we are more confident 7801 * int it. 7802 * 7803 * When we are not app-limited then we see if 7804 * the rsm is being included in the current 7805 * measurement, we tell this by the app_limited_needs_set 7806 * flag. 7807 * 7808 * Note that being cwnd blocked is not applimited 7809 * as well as the pacing delay between packets which 7810 * are sending only 1 or 2 MSS's also will show up 7811 * in the RTT. We probably need to examine this algorithm 7812 * a bit more and enhance it to account for the delay 7813 * between rsm's. We could do that by saving off the 7814 * pacing delay of each rsm (in an rsm) and then 7815 * factoring that in somehow though for now I am 7816 * not sure how :) 7817 */ 7818 int calc_conf = 0; 7819 7820 if (rsm->r_flags & RACK_APP_LIMITED) { 7821 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7822 calc_conf = 0; 7823 else 7824 calc_conf = 1; 7825 } else if (rack->app_limited_needs_set == 0) { 7826 calc_conf = 1; 7827 } else { 7828 calc_conf = 0; 7829 } 7830 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7831 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7832 calc_conf, rsm, rsm->r_rtr_cnt); 7833 } 7834 if ((rsm->r_flags & RACK_TLP) && 7835 (!IN_FASTRECOVERY(tp->t_flags))) { 7836 /* Segment was a TLP and our retrans matched */ 7837 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7838 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 7839 } 7840 } 7841 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7842 /* New more recent rack_tmit_time */ 7843 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7844 rack->rc_rack_rtt = t; 7845 } 7846 return (1); 7847 } 7848 /* 7849 * We clear the soft/rxtshift since we got an ack. 7850 * There is no assurance we will call the commit() function 7851 * so we need to clear these to avoid incorrect handling. 7852 */ 7853 tp->t_rxtshift = 0; 7854 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7855 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7856 tp->t_softerror = 0; 7857 if (to && (to->to_flags & TOF_TS) && 7858 (ack_type == CUM_ACKED) && 7859 (to->to_tsecr) && 7860 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7861 /* 7862 * Now which timestamp does it match? In this block the ACK 7863 * must be coming from a previous transmission. 7864 */ 7865 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7866 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7867 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7868 if ((int)t <= 0) 7869 t = 1; 7870 if (CC_ALGO(tp)->rttsample != NULL) { 7871 /* 7872 * Kick the RTT to the CC, here 7873 * we lie a bit in that we know the 7874 * retransmission is correct even though 7875 * we retransmitted. This is because 7876 * we match the timestamps. 7877 */ 7878 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7879 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7880 else 7881 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7882 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 7883 } 7884 if ((i + 1) < rsm->r_rtr_cnt) { 7885 /* 7886 * The peer ack'd from our previous 7887 * transmission. We have a spurious 7888 * retransmission and thus we dont 7889 * want to update our rack_rtt. 7890 * 7891 * Hmm should there be a CC revert here? 7892 * 7893 */ 7894 return (0); 7895 } 7896 if (!tp->t_rttlow || tp->t_rttlow > t) 7897 tp->t_rttlow = t; 7898 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7899 rack->r_ctl.rc_rack_min_rtt = t; 7900 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7901 rack->r_ctl.rc_rack_min_rtt = 1; 7902 } 7903 } 7904 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7905 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7906 /* New more recent rack_tmit_time */ 7907 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7908 rack->rc_rack_rtt = t; 7909 } 7910 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7911 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7912 rsm->r_rtr_cnt); 7913 return (1); 7914 } 7915 } 7916 goto ts_not_found; 7917 } else { 7918 /* 7919 * Ok its a SACK block that we retransmitted. or a windows 7920 * machine without timestamps. We can tell nothing from the 7921 * time-stamp since its not there or the time the peer last 7922 * recieved a segment that moved forward its cum-ack point. 7923 */ 7924 ts_not_found: 7925 i = rsm->r_rtr_cnt - 1; 7926 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7927 if ((int)t <= 0) 7928 t = 1; 7929 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7930 /* 7931 * We retransmitted and the ack came back in less 7932 * than the smallest rtt we have observed. We most 7933 * likely did an improper retransmit as outlined in 7934 * 6.2 Step 2 point 2 in the rack-draft so we 7935 * don't want to update our rack_rtt. We in 7936 * theory (in future) might want to think about reverting our 7937 * cwnd state but we won't for now. 7938 */ 7939 return (0); 7940 } else if (rack->r_ctl.rc_rack_min_rtt) { 7941 /* 7942 * We retransmitted it and the retransmit did the 7943 * job. 7944 */ 7945 if (!rack->r_ctl.rc_rack_min_rtt || 7946 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7947 rack->r_ctl.rc_rack_min_rtt = t; 7948 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7949 rack->r_ctl.rc_rack_min_rtt = 1; 7950 } 7951 } 7952 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 7953 /* New more recent rack_tmit_time */ 7954 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 7955 rack->rc_rack_rtt = t; 7956 } 7957 return (1); 7958 } 7959 } 7960 return (0); 7961 } 7962 7963 /* 7964 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7965 */ 7966 static void 7967 rack_log_sack_passed(struct tcpcb *tp, 7968 struct tcp_rack *rack, struct rack_sendmap *rsm) 7969 { 7970 struct rack_sendmap *nrsm; 7971 7972 nrsm = rsm; 7973 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 7974 rack_head, r_tnext) { 7975 if (nrsm == rsm) { 7976 /* Skip orginal segment he is acked */ 7977 continue; 7978 } 7979 if (nrsm->r_flags & RACK_ACKED) { 7980 /* 7981 * Skip ack'd segments, though we 7982 * should not see these, since tmap 7983 * should not have ack'd segments. 7984 */ 7985 continue; 7986 } 7987 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 7988 /* 7989 * If the peer dropped the rwnd on 7990 * these then we don't worry about them. 7991 */ 7992 continue; 7993 } 7994 if (nrsm->r_flags & RACK_SACK_PASSED) { 7995 /* 7996 * We found one that is already marked 7997 * passed, we have been here before and 7998 * so all others below this are marked. 7999 */ 8000 break; 8001 } 8002 nrsm->r_flags |= RACK_SACK_PASSED; 8003 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8004 } 8005 } 8006 8007 static void 8008 rack_need_set_test(struct tcpcb *tp, 8009 struct tcp_rack *rack, 8010 struct rack_sendmap *rsm, 8011 tcp_seq th_ack, 8012 int line, 8013 int use_which) 8014 { 8015 8016 if ((tp->t_flags & TF_GPUTINPROG) && 8017 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8018 /* 8019 * We were app limited, and this ack 8020 * butts up or goes beyond the point where we want 8021 * to start our next measurement. We need 8022 * to record the new gput_ts as here and 8023 * possibly update the start sequence. 8024 */ 8025 uint32_t seq, ts; 8026 8027 if (rsm->r_rtr_cnt > 1) { 8028 /* 8029 * This is a retransmit, can we 8030 * really make any assessment at this 8031 * point? We are not really sure of 8032 * the timestamp, is it this or the 8033 * previous transmission? 8034 * 8035 * Lets wait for something better that 8036 * is not retransmitted. 8037 */ 8038 return; 8039 } 8040 seq = tp->gput_seq; 8041 ts = tp->gput_ts; 8042 rack->app_limited_needs_set = 0; 8043 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8044 /* Do we start at a new end? */ 8045 if ((use_which == RACK_USE_BEG) && 8046 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8047 /* 8048 * When we get an ACK that just eats 8049 * up some of the rsm, we set RACK_USE_BEG 8050 * since whats at r_start (i.e. th_ack) 8051 * is left unacked and thats where the 8052 * measurement not starts. 8053 */ 8054 tp->gput_seq = rsm->r_start; 8055 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8056 } 8057 if ((use_which == RACK_USE_END) && 8058 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8059 /* 8060 * We use the end when the cumack 8061 * is moving forward and completely 8062 * deleting the rsm passed so basically 8063 * r_end holds th_ack. 8064 * 8065 * For SACK's we also want to use the end 8066 * since this piece just got sacked and 8067 * we want to target anything after that 8068 * in our measurement. 8069 */ 8070 tp->gput_seq = rsm->r_end; 8071 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8072 } 8073 if (use_which == RACK_USE_END_OR_THACK) { 8074 /* 8075 * special case for ack moving forward, 8076 * not a sack, we need to move all the 8077 * way up to where this ack cum-ack moves 8078 * to. 8079 */ 8080 if (SEQ_GT(th_ack, rsm->r_end)) 8081 tp->gput_seq = th_ack; 8082 else 8083 tp->gput_seq = rsm->r_end; 8084 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8085 } 8086 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8087 /* 8088 * We moved beyond this guy's range, re-calculate 8089 * the new end point. 8090 */ 8091 if (rack->rc_gp_filled == 0) { 8092 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8093 } else { 8094 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8095 } 8096 } 8097 /* 8098 * We are moving the goal post, we may be able to clear the 8099 * measure_saw_probe_rtt flag. 8100 */ 8101 if ((rack->in_probe_rtt == 0) && 8102 (rack->measure_saw_probe_rtt) && 8103 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8104 rack->measure_saw_probe_rtt = 0; 8105 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8106 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8107 if (rack->rc_gp_filled && 8108 ((tp->gput_ack - tp->gput_seq) < 8109 max(rc_init_window(rack), (MIN_GP_WIN * 8110 ctf_fixed_maxseg(tp))))) { 8111 uint32_t ideal_amount; 8112 8113 ideal_amount = rack_get_measure_window(tp, rack); 8114 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 8115 /* 8116 * There is no sense of continuing this measurement 8117 * because its too small to gain us anything we 8118 * trust. Skip it and that way we can start a new 8119 * measurement quicker. 8120 */ 8121 tp->t_flags &= ~TF_GPUTINPROG; 8122 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8123 0, 0, 0, 6, __LINE__, NULL, 0); 8124 } else { 8125 /* 8126 * Reset the window further out. 8127 */ 8128 tp->gput_ack = tp->gput_seq + ideal_amount; 8129 } 8130 } 8131 } 8132 } 8133 8134 static inline int 8135 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8136 { 8137 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8138 /* Behind our TLP definition or right at */ 8139 return (0); 8140 } 8141 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8142 /* The start is beyond or right at our end of TLP definition */ 8143 return (0); 8144 } 8145 /* It has to be a sub-part of the original TLP recorded */ 8146 return (1); 8147 } 8148 8149 8150 static uint32_t 8151 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8152 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8153 { 8154 uint32_t start, end, changed = 0; 8155 struct rack_sendmap stack_map; 8156 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8157 #ifdef INVARIANTS 8158 struct rack_sendmap *insret; 8159 #endif 8160 int32_t used_ref = 1; 8161 int moved = 0; 8162 8163 start = sack->start; 8164 end = sack->end; 8165 rsm = *prsm; 8166 memset(&fe, 0, sizeof(fe)); 8167 do_rest_ofb: 8168 if ((rsm == NULL) || 8169 (SEQ_LT(end, rsm->r_start)) || 8170 (SEQ_GEQ(start, rsm->r_end)) || 8171 (SEQ_LT(start, rsm->r_start))) { 8172 /* 8173 * We are not in the right spot, 8174 * find the correct spot in the tree. 8175 */ 8176 used_ref = 0; 8177 fe.r_start = start; 8178 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8179 moved++; 8180 } 8181 if (rsm == NULL) { 8182 /* TSNH */ 8183 goto out; 8184 } 8185 /* Ok we have an ACK for some piece of this rsm */ 8186 if (rsm->r_start != start) { 8187 if ((rsm->r_flags & RACK_ACKED) == 0) { 8188 /* 8189 * Before any splitting or hookery is 8190 * done is it a TLP of interest i.e. rxt? 8191 */ 8192 if ((rsm->r_flags & RACK_TLP) && 8193 (rsm->r_rtr_cnt > 1)) { 8194 /* 8195 * We are splitting a rxt TLP, check 8196 * if we need to save off the start/end 8197 */ 8198 if (rack->rc_last_tlp_acked_set && 8199 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8200 /* 8201 * We already turned this on since we are inside 8202 * the previous one was a partially sack now we 8203 * are getting another one (maybe all of it). 8204 * 8205 */ 8206 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8207 /* 8208 * Lets make sure we have all of it though. 8209 */ 8210 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8211 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8212 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8213 rack->r_ctl.last_tlp_acked_end); 8214 } 8215 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8216 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8217 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8218 rack->r_ctl.last_tlp_acked_end); 8219 } 8220 } else { 8221 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8222 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8223 rack->rc_last_tlp_past_cumack = 0; 8224 rack->rc_last_tlp_acked_set = 1; 8225 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8226 } 8227 } 8228 /** 8229 * Need to split this in two pieces the before and after, 8230 * the before remains in the map, the after must be 8231 * added. In other words we have: 8232 * rsm |--------------| 8233 * sackblk |-------> 8234 * rsm will become 8235 * rsm |---| 8236 * and nrsm will be the sacked piece 8237 * nrsm |----------| 8238 * 8239 * But before we start down that path lets 8240 * see if the sack spans over on top of 8241 * the next guy and it is already sacked. 8242 * 8243 */ 8244 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8245 if (next && (next->r_flags & RACK_ACKED) && 8246 SEQ_GEQ(end, next->r_start)) { 8247 /** 8248 * So the next one is already acked, and 8249 * we can thus by hookery use our stack_map 8250 * to reflect the piece being sacked and 8251 * then adjust the two tree entries moving 8252 * the start and ends around. So we start like: 8253 * rsm |------------| (not-acked) 8254 * next |-----------| (acked) 8255 * sackblk |--------> 8256 * We want to end like so: 8257 * rsm |------| (not-acked) 8258 * next |-----------------| (acked) 8259 * nrsm |-----| 8260 * Where nrsm is a temporary stack piece we 8261 * use to update all the gizmos. 8262 */ 8263 /* Copy up our fudge block */ 8264 nrsm = &stack_map; 8265 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8266 /* Now adjust our tree blocks */ 8267 rsm->r_end = start; 8268 next->r_start = start; 8269 /* Now we must adjust back where next->m is */ 8270 rack_setup_offset_for_rsm(rsm, next); 8271 8272 /* We don't need to adjust rsm, it did not change */ 8273 /* Clear out the dup ack count of the remainder */ 8274 rsm->r_dupack = 0; 8275 rsm->r_just_ret = 0; 8276 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8277 /* Now lets make sure our fudge block is right */ 8278 nrsm->r_start = start; 8279 /* Now lets update all the stats and such */ 8280 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8281 if (rack->app_limited_needs_set) 8282 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8283 changed += (nrsm->r_end - nrsm->r_start); 8284 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8285 if (nrsm->r_flags & RACK_SACK_PASSED) { 8286 rack->r_ctl.rc_reorder_ts = cts; 8287 } 8288 /* 8289 * Now we want to go up from rsm (the 8290 * one left un-acked) to the next one 8291 * in the tmap. We do this so when 8292 * we walk backwards we include marking 8293 * sack-passed on rsm (The one passed in 8294 * is skipped since it is generally called 8295 * on something sacked before removing it 8296 * from the tmap). 8297 */ 8298 if (rsm->r_in_tmap) { 8299 nrsm = TAILQ_NEXT(rsm, r_tnext); 8300 /* 8301 * Now that we have the next 8302 * one walk backwards from there. 8303 */ 8304 if (nrsm && nrsm->r_in_tmap) 8305 rack_log_sack_passed(tp, rack, nrsm); 8306 } 8307 /* Now are we done? */ 8308 if (SEQ_LT(end, next->r_end) || 8309 (end == next->r_end)) { 8310 /* Done with block */ 8311 goto out; 8312 } 8313 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8314 counter_u64_add(rack_sack_used_next_merge, 1); 8315 /* Postion for the next block */ 8316 start = next->r_end; 8317 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8318 if (rsm == NULL) 8319 goto out; 8320 } else { 8321 /** 8322 * We can't use any hookery here, so we 8323 * need to split the map. We enter like 8324 * so: 8325 * rsm |--------| 8326 * sackblk |-----> 8327 * We will add the new block nrsm and 8328 * that will be the new portion, and then 8329 * fall through after reseting rsm. So we 8330 * split and look like this: 8331 * rsm |----| 8332 * sackblk |-----> 8333 * nrsm |---| 8334 * We then fall through reseting 8335 * rsm to nrsm, so the next block 8336 * picks it up. 8337 */ 8338 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8339 if (nrsm == NULL) { 8340 /* 8341 * failed XXXrrs what can we do but loose the sack 8342 * info? 8343 */ 8344 goto out; 8345 } 8346 counter_u64_add(rack_sack_splits, 1); 8347 rack_clone_rsm(rack, nrsm, rsm, start); 8348 rsm->r_just_ret = 0; 8349 #ifndef INVARIANTS 8350 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8351 #else 8352 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8353 if (insret != NULL) { 8354 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8355 nrsm, insret, rack, rsm); 8356 } 8357 #endif 8358 if (rsm->r_in_tmap) { 8359 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8360 nrsm->r_in_tmap = 1; 8361 } 8362 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8363 rsm->r_flags &= (~RACK_HAS_FIN); 8364 /* Position us to point to the new nrsm that starts the sack blk */ 8365 rsm = nrsm; 8366 } 8367 } else { 8368 /* Already sacked this piece */ 8369 counter_u64_add(rack_sack_skipped_acked, 1); 8370 moved++; 8371 if (end == rsm->r_end) { 8372 /* Done with block */ 8373 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8374 goto out; 8375 } else if (SEQ_LT(end, rsm->r_end)) { 8376 /* A partial sack to a already sacked block */ 8377 moved++; 8378 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8379 goto out; 8380 } else { 8381 /* 8382 * The end goes beyond this guy 8383 * reposition the start to the 8384 * next block. 8385 */ 8386 start = rsm->r_end; 8387 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8388 if (rsm == NULL) 8389 goto out; 8390 } 8391 } 8392 } 8393 if (SEQ_GEQ(end, rsm->r_end)) { 8394 /** 8395 * The end of this block is either beyond this guy or right 8396 * at this guy. I.e.: 8397 * rsm --- |-----| 8398 * end |-----| 8399 * <or> 8400 * end |---------| 8401 */ 8402 if ((rsm->r_flags & RACK_ACKED) == 0) { 8403 /* 8404 * Is it a TLP of interest? 8405 */ 8406 if ((rsm->r_flags & RACK_TLP) && 8407 (rsm->r_rtr_cnt > 1)) { 8408 /* 8409 * We are splitting a rxt TLP, check 8410 * if we need to save off the start/end 8411 */ 8412 if (rack->rc_last_tlp_acked_set && 8413 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8414 /* 8415 * We already turned this on since we are inside 8416 * the previous one was a partially sack now we 8417 * are getting another one (maybe all of it). 8418 */ 8419 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8420 /* 8421 * Lets make sure we have all of it though. 8422 */ 8423 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8424 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8425 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8426 rack->r_ctl.last_tlp_acked_end); 8427 } 8428 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8429 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8430 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8431 rack->r_ctl.last_tlp_acked_end); 8432 } 8433 } else { 8434 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8435 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8436 rack->rc_last_tlp_past_cumack = 0; 8437 rack->rc_last_tlp_acked_set = 1; 8438 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8439 } 8440 } 8441 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8442 changed += (rsm->r_end - rsm->r_start); 8443 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8444 if (rsm->r_in_tmap) /* should be true */ 8445 rack_log_sack_passed(tp, rack, rsm); 8446 /* Is Reordering occuring? */ 8447 if (rsm->r_flags & RACK_SACK_PASSED) { 8448 rsm->r_flags &= ~RACK_SACK_PASSED; 8449 rack->r_ctl.rc_reorder_ts = cts; 8450 } 8451 if (rack->app_limited_needs_set) 8452 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8453 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8454 rsm->r_flags |= RACK_ACKED; 8455 if (rsm->r_in_tmap) { 8456 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8457 rsm->r_in_tmap = 0; 8458 } 8459 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8460 } else { 8461 counter_u64_add(rack_sack_skipped_acked, 1); 8462 moved++; 8463 } 8464 if (end == rsm->r_end) { 8465 /* This block only - done, setup for next */ 8466 goto out; 8467 } 8468 /* 8469 * There is more not coverend by this rsm move on 8470 * to the next block in the RB tree. 8471 */ 8472 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8473 start = rsm->r_end; 8474 rsm = nrsm; 8475 if (rsm == NULL) 8476 goto out; 8477 goto do_rest_ofb; 8478 } 8479 /** 8480 * The end of this sack block is smaller than 8481 * our rsm i.e.: 8482 * rsm --- |-----| 8483 * end |--| 8484 */ 8485 if ((rsm->r_flags & RACK_ACKED) == 0) { 8486 /* 8487 * Is it a TLP of interest? 8488 */ 8489 if ((rsm->r_flags & RACK_TLP) && 8490 (rsm->r_rtr_cnt > 1)) { 8491 /* 8492 * We are splitting a rxt TLP, check 8493 * if we need to save off the start/end 8494 */ 8495 if (rack->rc_last_tlp_acked_set && 8496 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8497 /* 8498 * We already turned this on since we are inside 8499 * the previous one was a partially sack now we 8500 * are getting another one (maybe all of it). 8501 */ 8502 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8503 /* 8504 * Lets make sure we have all of it though. 8505 */ 8506 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8507 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8508 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8509 rack->r_ctl.last_tlp_acked_end); 8510 } 8511 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8512 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8513 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8514 rack->r_ctl.last_tlp_acked_end); 8515 } 8516 } else { 8517 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8518 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8519 rack->rc_last_tlp_past_cumack = 0; 8520 rack->rc_last_tlp_acked_set = 1; 8521 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8522 } 8523 } 8524 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8525 if (prev && 8526 (prev->r_flags & RACK_ACKED)) { 8527 /** 8528 * Goal, we want the right remainder of rsm to shrink 8529 * in place and span from (rsm->r_start = end) to rsm->r_end. 8530 * We want to expand prev to go all the way 8531 * to prev->r_end <- end. 8532 * so in the tree we have before: 8533 * prev |--------| (acked) 8534 * rsm |-------| (non-acked) 8535 * sackblk |-| 8536 * We churn it so we end up with 8537 * prev |----------| (acked) 8538 * rsm |-----| (non-acked) 8539 * nrsm |-| (temporary) 8540 * 8541 * Note if either prev/rsm is a TLP we don't 8542 * do this. 8543 */ 8544 nrsm = &stack_map; 8545 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8546 prev->r_end = end; 8547 rsm->r_start = end; 8548 /* Now adjust nrsm (stack copy) to be 8549 * the one that is the small 8550 * piece that was "sacked". 8551 */ 8552 nrsm->r_end = end; 8553 rsm->r_dupack = 0; 8554 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8555 /* 8556 * Now that the rsm has had its start moved forward 8557 * lets go ahead and get its new place in the world. 8558 */ 8559 rack_setup_offset_for_rsm(prev, rsm); 8560 /* 8561 * Now nrsm is our new little piece 8562 * that is acked (which was merged 8563 * to prev). Update the rtt and changed 8564 * based on that. Also check for reordering. 8565 */ 8566 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8567 if (rack->app_limited_needs_set) 8568 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8569 changed += (nrsm->r_end - nrsm->r_start); 8570 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8571 if (nrsm->r_flags & RACK_SACK_PASSED) { 8572 rack->r_ctl.rc_reorder_ts = cts; 8573 } 8574 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8575 rsm = prev; 8576 counter_u64_add(rack_sack_used_prev_merge, 1); 8577 } else { 8578 /** 8579 * This is the case where our previous 8580 * block is not acked either, so we must 8581 * split the block in two. 8582 */ 8583 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8584 if (nrsm == NULL) { 8585 /* failed rrs what can we do but loose the sack info? */ 8586 goto out; 8587 } 8588 if ((rsm->r_flags & RACK_TLP) && 8589 (rsm->r_rtr_cnt > 1)) { 8590 /* 8591 * We are splitting a rxt TLP, check 8592 * if we need to save off the start/end 8593 */ 8594 if (rack->rc_last_tlp_acked_set && 8595 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8596 /* 8597 * We already turned this on since this block is inside 8598 * the previous one was a partially sack now we 8599 * are getting another one (maybe all of it). 8600 */ 8601 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8602 /* 8603 * Lets make sure we have all of it though. 8604 */ 8605 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8606 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8607 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8608 rack->r_ctl.last_tlp_acked_end); 8609 } 8610 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8611 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8612 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8613 rack->r_ctl.last_tlp_acked_end); 8614 } 8615 } else { 8616 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8617 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8618 rack->rc_last_tlp_acked_set = 1; 8619 rack->rc_last_tlp_past_cumack = 0; 8620 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8621 } 8622 } 8623 /** 8624 * In this case nrsm becomes 8625 * nrsm->r_start = end; 8626 * nrsm->r_end = rsm->r_end; 8627 * which is un-acked. 8628 * <and> 8629 * rsm->r_end = nrsm->r_start; 8630 * i.e. the remaining un-acked 8631 * piece is left on the left 8632 * hand side. 8633 * 8634 * So we start like this 8635 * rsm |----------| (not acked) 8636 * sackblk |---| 8637 * build it so we have 8638 * rsm |---| (acked) 8639 * nrsm |------| (not acked) 8640 */ 8641 counter_u64_add(rack_sack_splits, 1); 8642 rack_clone_rsm(rack, nrsm, rsm, end); 8643 rsm->r_flags &= (~RACK_HAS_FIN); 8644 rsm->r_just_ret = 0; 8645 #ifndef INVARIANTS 8646 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8647 #else 8648 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8649 if (insret != NULL) { 8650 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8651 nrsm, insret, rack, rsm); 8652 } 8653 #endif 8654 if (rsm->r_in_tmap) { 8655 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8656 nrsm->r_in_tmap = 1; 8657 } 8658 nrsm->r_dupack = 0; 8659 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8660 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8661 changed += (rsm->r_end - rsm->r_start); 8662 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8663 if (rsm->r_in_tmap) /* should be true */ 8664 rack_log_sack_passed(tp, rack, rsm); 8665 /* Is Reordering occuring? */ 8666 if (rsm->r_flags & RACK_SACK_PASSED) { 8667 rsm->r_flags &= ~RACK_SACK_PASSED; 8668 rack->r_ctl.rc_reorder_ts = cts; 8669 } 8670 if (rack->app_limited_needs_set) 8671 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8672 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8673 rsm->r_flags |= RACK_ACKED; 8674 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8675 if (rsm->r_in_tmap) { 8676 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8677 rsm->r_in_tmap = 0; 8678 } 8679 } 8680 } else if (start != end){ 8681 /* 8682 * The block was already acked. 8683 */ 8684 counter_u64_add(rack_sack_skipped_acked, 1); 8685 moved++; 8686 } 8687 out: 8688 if (rsm && 8689 ((rsm->r_flags & RACK_TLP) == 0) && 8690 (rsm->r_flags & RACK_ACKED)) { 8691 /* 8692 * Now can we merge where we worked 8693 * with either the previous or 8694 * next block? 8695 */ 8696 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8697 while (next) { 8698 if (next->r_flags & RACK_TLP) 8699 break; 8700 if (next->r_flags & RACK_ACKED) { 8701 /* yep this and next can be merged */ 8702 rsm = rack_merge_rsm(rack, rsm, next); 8703 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8704 } else 8705 break; 8706 } 8707 /* Now what about the previous? */ 8708 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8709 while (prev) { 8710 if (prev->r_flags & RACK_TLP) 8711 break; 8712 if (prev->r_flags & RACK_ACKED) { 8713 /* yep the previous and this can be merged */ 8714 rsm = rack_merge_rsm(rack, prev, rsm); 8715 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8716 } else 8717 break; 8718 } 8719 } 8720 if (used_ref == 0) { 8721 counter_u64_add(rack_sack_proc_all, 1); 8722 } else { 8723 counter_u64_add(rack_sack_proc_short, 1); 8724 } 8725 /* Save off the next one for quick reference. */ 8726 if (rsm) 8727 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8728 else 8729 nrsm = NULL; 8730 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8731 /* Pass back the moved. */ 8732 *moved_two = moved; 8733 return (changed); 8734 } 8735 8736 static void inline 8737 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8738 { 8739 struct rack_sendmap *tmap; 8740 8741 tmap = NULL; 8742 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8743 /* Its no longer sacked, mark it so */ 8744 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8745 #ifdef INVARIANTS 8746 if (rsm->r_in_tmap) { 8747 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8748 rack, rsm, rsm->r_flags); 8749 } 8750 #endif 8751 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8752 /* Rebuild it into our tmap */ 8753 if (tmap == NULL) { 8754 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8755 tmap = rsm; 8756 } else { 8757 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8758 tmap = rsm; 8759 } 8760 tmap->r_in_tmap = 1; 8761 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8762 } 8763 /* 8764 * Now lets possibly clear the sack filter so we start 8765 * recognizing sacks that cover this area. 8766 */ 8767 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8768 8769 } 8770 8771 static void 8772 rack_do_decay(struct tcp_rack *rack) 8773 { 8774 struct timeval res; 8775 8776 #define timersub(tvp, uvp, vvp) \ 8777 do { \ 8778 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8779 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8780 if ((vvp)->tv_usec < 0) { \ 8781 (vvp)->tv_sec--; \ 8782 (vvp)->tv_usec += 1000000; \ 8783 } \ 8784 } while (0) 8785 8786 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8787 #undef timersub 8788 8789 rack->r_ctl.input_pkt++; 8790 if ((rack->rc_in_persist) || 8791 (res.tv_sec >= 1) || 8792 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8793 /* 8794 * Check for decay of non-SAD, 8795 * we want all SAD detection metrics to 8796 * decay 1/4 per second (or more) passed. 8797 */ 8798 #ifdef NETFLIX_EXP_DETECTION 8799 uint32_t pkt_delta; 8800 8801 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8802 #endif 8803 /* Update our saved tracking values */ 8804 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8805 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8806 /* Now do we escape without decay? */ 8807 #ifdef NETFLIX_EXP_DETECTION 8808 if (rack->rc_in_persist || 8809 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8810 (pkt_delta < tcp_sad_low_pps)){ 8811 /* 8812 * We don't decay idle connections 8813 * or ones that have a low input pps. 8814 */ 8815 return; 8816 } 8817 /* Decay the counters */ 8818 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8819 tcp_sad_decay_val); 8820 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8821 tcp_sad_decay_val); 8822 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8823 tcp_sad_decay_val); 8824 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8825 tcp_sad_decay_val); 8826 #endif 8827 } 8828 } 8829 8830 static void 8831 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8832 { 8833 struct rack_sendmap *rsm; 8834 #ifdef INVARIANTS 8835 struct rack_sendmap *rm; 8836 #endif 8837 8838 /* 8839 * The ACK point is advancing to th_ack, we must drop off 8840 * the packets in the rack log and calculate any eligble 8841 * RTT's. 8842 */ 8843 rack->r_wanted_output = 1; 8844 8845 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8846 if ((rack->rc_last_tlp_acked_set == 1)&& 8847 (rack->rc_last_tlp_past_cumack == 1) && 8848 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8849 /* 8850 * We have reached the point where our last rack 8851 * tlp retransmit sequence is ahead of the cum-ack. 8852 * This can only happen when the cum-ack moves all 8853 * the way around (its been a full 2^^31+1 bytes 8854 * or more since we sent a retransmitted TLP). Lets 8855 * turn off the valid flag since its not really valid. 8856 * 8857 * Note since sack's also turn on this event we have 8858 * a complication, we have to wait to age it out until 8859 * the cum-ack is by the TLP before checking which is 8860 * what the next else clause does. 8861 */ 8862 rack_log_dsack_event(rack, 9, __LINE__, 8863 rack->r_ctl.last_tlp_acked_start, 8864 rack->r_ctl.last_tlp_acked_end); 8865 rack->rc_last_tlp_acked_set = 0; 8866 rack->rc_last_tlp_past_cumack = 0; 8867 } else if ((rack->rc_last_tlp_acked_set == 1) && 8868 (rack->rc_last_tlp_past_cumack == 0) && 8869 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8870 /* 8871 * It is safe to start aging TLP's out. 8872 */ 8873 rack->rc_last_tlp_past_cumack = 1; 8874 } 8875 /* We do the same for the tlp send seq as well */ 8876 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8877 (rack->rc_last_sent_tlp_past_cumack == 1) && 8878 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8879 rack_log_dsack_event(rack, 9, __LINE__, 8880 rack->r_ctl.last_sent_tlp_seq, 8881 (rack->r_ctl.last_sent_tlp_seq + 8882 rack->r_ctl.last_sent_tlp_len)); 8883 rack->rc_last_sent_tlp_seq_valid = 0; 8884 rack->rc_last_sent_tlp_past_cumack = 0; 8885 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8886 (rack->rc_last_sent_tlp_past_cumack == 0) && 8887 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8888 /* 8889 * It is safe to start aging TLP's send. 8890 */ 8891 rack->rc_last_sent_tlp_past_cumack = 1; 8892 } 8893 more: 8894 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8895 if (rsm == NULL) { 8896 if ((th_ack - 1) == tp->iss) { 8897 /* 8898 * For the SYN incoming case we will not 8899 * have called tcp_output for the sending of 8900 * the SYN, so there will be no map. All 8901 * other cases should probably be a panic. 8902 */ 8903 return; 8904 } 8905 if (tp->t_flags & TF_SENTFIN) { 8906 /* if we sent a FIN we often will not have map */ 8907 return; 8908 } 8909 #ifdef INVARIANTS 8910 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8911 tp, 8912 tp->t_state, th_ack, rack, 8913 tp->snd_una, tp->snd_max, tp->snd_nxt); 8914 #endif 8915 return; 8916 } 8917 if (SEQ_LT(th_ack, rsm->r_start)) { 8918 /* Huh map is missing this */ 8919 #ifdef INVARIANTS 8920 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 8921 rsm->r_start, 8922 th_ack, tp->t_state, rack->r_state); 8923 #endif 8924 return; 8925 } 8926 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 8927 8928 /* Now was it a retransmitted TLP? */ 8929 if ((rsm->r_flags & RACK_TLP) && 8930 (rsm->r_rtr_cnt > 1)) { 8931 /* 8932 * Yes, this rsm was a TLP and retransmitted, remember that 8933 * since if a DSACK comes back on this we don't want 8934 * to think of it as a reordered segment. This may 8935 * get updated again with possibly even other TLPs 8936 * in flight, but thats ok. Only when we don't send 8937 * a retransmitted TLP for 1/2 the sequences space 8938 * will it get turned off (above). 8939 */ 8940 if (rack->rc_last_tlp_acked_set && 8941 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8942 /* 8943 * We already turned this on since the end matches, 8944 * the previous one was a partially ack now we 8945 * are getting another one (maybe all of it). 8946 */ 8947 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8948 /* 8949 * Lets make sure we have all of it though. 8950 */ 8951 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8952 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8953 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8954 rack->r_ctl.last_tlp_acked_end); 8955 } 8956 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8957 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8958 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8959 rack->r_ctl.last_tlp_acked_end); 8960 } 8961 } else { 8962 rack->rc_last_tlp_past_cumack = 1; 8963 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8964 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8965 rack->rc_last_tlp_acked_set = 1; 8966 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8967 } 8968 } 8969 /* Now do we consume the whole thing? */ 8970 if (SEQ_GEQ(th_ack, rsm->r_end)) { 8971 /* Its all consumed. */ 8972 uint32_t left; 8973 uint8_t newly_acked; 8974 8975 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 8976 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 8977 rsm->r_rtr_bytes = 0; 8978 /* Record the time of highest cumack sent */ 8979 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8980 #ifndef INVARIANTS 8981 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8982 #else 8983 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8984 if (rm != rsm) { 8985 panic("removing head in rack:%p rsm:%p rm:%p", 8986 rack, rsm, rm); 8987 } 8988 #endif 8989 if (rsm->r_in_tmap) { 8990 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8991 rsm->r_in_tmap = 0; 8992 } 8993 newly_acked = 1; 8994 if (rsm->r_flags & RACK_ACKED) { 8995 /* 8996 * It was acked on the scoreboard -- remove 8997 * it from total 8998 */ 8999 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9000 newly_acked = 0; 9001 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9002 /* 9003 * There are segments ACKED on the 9004 * scoreboard further up. We are seeing 9005 * reordering. 9006 */ 9007 rsm->r_flags &= ~RACK_SACK_PASSED; 9008 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9009 rsm->r_flags |= RACK_ACKED; 9010 rack->r_ctl.rc_reorder_ts = cts; 9011 if (rack->r_ent_rec_ns) { 9012 /* 9013 * We have sent no more, and we saw an sack 9014 * then ack arrive. 9015 */ 9016 rack->r_might_revert = 1; 9017 } 9018 } 9019 if ((rsm->r_flags & RACK_TO_REXT) && 9020 (tp->t_flags & TF_RCVD_TSTMP) && 9021 (to->to_flags & TOF_TS) && 9022 (to->to_tsecr != 0) && 9023 (tp->t_flags & TF_PREVVALID)) { 9024 /* 9025 * We can use the timestamp to see 9026 * if this retransmission was from the 9027 * first transmit. If so we made a mistake. 9028 */ 9029 tp->t_flags &= ~TF_PREVVALID; 9030 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9031 /* The first transmit is what this ack is for */ 9032 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 9033 } 9034 } 9035 left = th_ack - rsm->r_end; 9036 if (rack->app_limited_needs_set && newly_acked) 9037 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9038 /* Free back to zone */ 9039 rack_free(rack, rsm); 9040 if (left) { 9041 goto more; 9042 } 9043 /* Check for reneging */ 9044 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9045 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9046 /* 9047 * The peer has moved snd_una up to 9048 * the edge of this send, i.e. one 9049 * that it had previously acked. The only 9050 * way that can be true if the peer threw 9051 * away data (space issues) that it had 9052 * previously sacked (else it would have 9053 * given us snd_una up to (rsm->r_end). 9054 * We need to undo the acked markings here. 9055 * 9056 * Note we have to look to make sure th_ack is 9057 * our rsm->r_start in case we get an old ack 9058 * where th_ack is behind snd_una. 9059 */ 9060 rack_peer_reneges(rack, rsm, th_ack); 9061 } 9062 return; 9063 } 9064 if (rsm->r_flags & RACK_ACKED) { 9065 /* 9066 * It was acked on the scoreboard -- remove it from 9067 * total for the part being cum-acked. 9068 */ 9069 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9070 } 9071 /* 9072 * Clear the dup ack count for 9073 * the piece that remains. 9074 */ 9075 rsm->r_dupack = 0; 9076 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9077 if (rsm->r_rtr_bytes) { 9078 /* 9079 * It was retransmitted adjust the 9080 * sack holes for what was acked. 9081 */ 9082 int ack_am; 9083 9084 ack_am = (th_ack - rsm->r_start); 9085 if (ack_am >= rsm->r_rtr_bytes) { 9086 rack->r_ctl.rc_holes_rxt -= ack_am; 9087 rsm->r_rtr_bytes -= ack_am; 9088 } 9089 } 9090 /* 9091 * Update where the piece starts and record 9092 * the time of send of highest cumack sent. 9093 */ 9094 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9095 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9096 /* Now we need to move our offset forward too */ 9097 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9098 /* Fix up the orig_m_len and possibly the mbuf offset */ 9099 rack_adjust_orig_mlen(rsm); 9100 } 9101 rsm->soff += (th_ack - rsm->r_start); 9102 rsm->r_start = th_ack; 9103 /* Now do we need to move the mbuf fwd too? */ 9104 if (rsm->m) { 9105 while (rsm->soff >= rsm->m->m_len) { 9106 rsm->soff -= rsm->m->m_len; 9107 rsm->m = rsm->m->m_next; 9108 KASSERT((rsm->m != NULL), 9109 (" nrsm:%p hit at soff:%u null m", 9110 rsm, rsm->soff)); 9111 } 9112 rsm->orig_m_len = rsm->m->m_len; 9113 } 9114 if (rack->app_limited_needs_set) 9115 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9116 } 9117 9118 static void 9119 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9120 { 9121 struct rack_sendmap *rsm; 9122 int sack_pass_fnd = 0; 9123 9124 if (rack->r_might_revert) { 9125 /* 9126 * Ok we have reordering, have not sent anything, we 9127 * might want to revert the congestion state if nothing 9128 * further has SACK_PASSED on it. Lets check. 9129 * 9130 * We also get here when we have DSACKs come in for 9131 * all the data that we FR'd. Note that a rxt or tlp 9132 * timer clears this from happening. 9133 */ 9134 9135 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9136 if (rsm->r_flags & RACK_SACK_PASSED) { 9137 sack_pass_fnd = 1; 9138 break; 9139 } 9140 } 9141 if (sack_pass_fnd == 0) { 9142 /* 9143 * We went into recovery 9144 * incorrectly due to reordering! 9145 */ 9146 int orig_cwnd; 9147 9148 rack->r_ent_rec_ns = 0; 9149 orig_cwnd = tp->snd_cwnd; 9150 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9151 tp->snd_recover = tp->snd_una; 9152 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 9153 EXIT_RECOVERY(tp->t_flags); 9154 } 9155 rack->r_might_revert = 0; 9156 } 9157 } 9158 9159 #ifdef NETFLIX_EXP_DETECTION 9160 static void 9161 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9162 { 9163 if ((rack->do_detection || tcp_force_detection) && 9164 tcp_sack_to_ack_thresh && 9165 tcp_sack_to_move_thresh && 9166 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9167 /* 9168 * We have thresholds set to find 9169 * possible attackers and disable sack. 9170 * Check them. 9171 */ 9172 uint64_t ackratio, moveratio, movetotal; 9173 9174 /* Log detecting */ 9175 rack_log_sad(rack, 1); 9176 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9177 ackratio *= (uint64_t)(1000); 9178 if (rack->r_ctl.ack_count) 9179 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9180 else { 9181 /* We really should not hit here */ 9182 ackratio = 1000; 9183 } 9184 if ((rack->sack_attack_disable == 0) && 9185 (ackratio > rack_highest_sack_thresh_seen)) 9186 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9187 movetotal = rack->r_ctl.sack_moved_extra; 9188 movetotal += rack->r_ctl.sack_noextra_move; 9189 moveratio = rack->r_ctl.sack_moved_extra; 9190 moveratio *= (uint64_t)1000; 9191 if (movetotal) 9192 moveratio /= movetotal; 9193 else { 9194 /* No moves, thats pretty good */ 9195 moveratio = 0; 9196 } 9197 if ((rack->sack_attack_disable == 0) && 9198 (moveratio > rack_highest_move_thresh_seen)) 9199 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9200 if (rack->sack_attack_disable == 0) { 9201 if ((ackratio > tcp_sack_to_ack_thresh) && 9202 (moveratio > tcp_sack_to_move_thresh)) { 9203 /* Disable sack processing */ 9204 rack->sack_attack_disable = 1; 9205 if (rack->r_rep_attack == 0) { 9206 rack->r_rep_attack = 1; 9207 counter_u64_add(rack_sack_attacks_detected, 1); 9208 } 9209 if (tcp_attack_on_turns_on_logging) { 9210 /* 9211 * Turn on logging, used for debugging 9212 * false positives. 9213 */ 9214 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9215 } 9216 /* Clamp the cwnd at flight size */ 9217 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9218 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9219 rack_log_sad(rack, 2); 9220 } 9221 } else { 9222 /* We are sack-disabled check for false positives */ 9223 if ((ackratio <= tcp_restoral_thresh) || 9224 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9225 rack->sack_attack_disable = 0; 9226 rack_log_sad(rack, 3); 9227 /* Restart counting */ 9228 rack->r_ctl.sack_count = 0; 9229 rack->r_ctl.sack_moved_extra = 0; 9230 rack->r_ctl.sack_noextra_move = 1; 9231 rack->r_ctl.ack_count = max(1, 9232 (bytes_this_ack / segsiz)); 9233 9234 if (rack->r_rep_reverse == 0) { 9235 rack->r_rep_reverse = 1; 9236 counter_u64_add(rack_sack_attacks_reversed, 1); 9237 } 9238 /* Restore the cwnd */ 9239 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9240 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9241 } 9242 } 9243 } 9244 } 9245 #endif 9246 9247 static int 9248 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9249 { 9250 9251 uint32_t am, l_end; 9252 int was_tlp = 0; 9253 9254 if (SEQ_GT(end, start)) 9255 am = end - start; 9256 else 9257 am = 0; 9258 if ((rack->rc_last_tlp_acked_set ) && 9259 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9260 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9261 /* 9262 * The DSACK is because of a TLP which we don't 9263 * do anything with the reordering window over since 9264 * it was not reordering that caused the DSACK but 9265 * our previous retransmit TLP. 9266 */ 9267 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9268 was_tlp = 1; 9269 goto skip_dsack_round; 9270 } 9271 if (rack->rc_last_sent_tlp_seq_valid) { 9272 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9273 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9274 (SEQ_LEQ(end, l_end))) { 9275 /* 9276 * This dsack is from the last sent TLP, ignore it 9277 * for reordering purposes. 9278 */ 9279 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9280 was_tlp = 1; 9281 goto skip_dsack_round; 9282 } 9283 } 9284 if (rack->rc_dsack_round_seen == 0) { 9285 rack->rc_dsack_round_seen = 1; 9286 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9287 rack->r_ctl.num_dsack++; 9288 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9289 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9290 } 9291 skip_dsack_round: 9292 /* 9293 * We keep track of how many DSACK blocks we get 9294 * after a recovery incident. 9295 */ 9296 rack->r_ctl.dsack_byte_cnt += am; 9297 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9298 rack->r_ctl.retran_during_recovery && 9299 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9300 /* 9301 * False recovery most likely culprit is reordering. If 9302 * nothing else is missing we need to revert. 9303 */ 9304 rack->r_might_revert = 1; 9305 rack_handle_might_revert(rack->rc_tp, rack); 9306 rack->r_might_revert = 0; 9307 rack->r_ctl.retran_during_recovery = 0; 9308 rack->r_ctl.dsack_byte_cnt = 0; 9309 } 9310 return (was_tlp); 9311 } 9312 9313 static uint32_t 9314 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 9315 { 9316 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 9317 } 9318 9319 static int32_t 9320 rack_compute_pipe(struct tcpcb *tp) 9321 { 9322 return ((int32_t)do_rack_compute_pipe(tp, 9323 (struct tcp_rack *)tp->t_fb_ptr, 9324 tp->snd_una)); 9325 } 9326 9327 static void 9328 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9329 { 9330 /* Deal with changed and PRR here (in recovery only) */ 9331 uint32_t pipe, snd_una; 9332 9333 rack->r_ctl.rc_prr_delivered += changed; 9334 9335 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9336 /* 9337 * It is all outstanding, we are application limited 9338 * and thus we don't need more room to send anything. 9339 * Note we use tp->snd_una here and not th_ack because 9340 * the data as yet not been cut from the sb. 9341 */ 9342 rack->r_ctl.rc_prr_sndcnt = 0; 9343 return; 9344 } 9345 /* Compute prr_sndcnt */ 9346 if (SEQ_GT(tp->snd_una, th_ack)) { 9347 snd_una = tp->snd_una; 9348 } else { 9349 snd_una = th_ack; 9350 } 9351 pipe = do_rack_compute_pipe(tp, rack, snd_una); 9352 if (pipe > tp->snd_ssthresh) { 9353 long sndcnt; 9354 9355 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9356 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9357 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9358 else { 9359 rack->r_ctl.rc_prr_sndcnt = 0; 9360 rack_log_to_prr(rack, 9, 0, __LINE__); 9361 sndcnt = 0; 9362 } 9363 sndcnt++; 9364 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9365 sndcnt -= rack->r_ctl.rc_prr_out; 9366 else 9367 sndcnt = 0; 9368 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9369 rack_log_to_prr(rack, 10, 0, __LINE__); 9370 } else { 9371 uint32_t limit; 9372 9373 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9374 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9375 else 9376 limit = 0; 9377 if (changed > limit) 9378 limit = changed; 9379 limit += ctf_fixed_maxseg(tp); 9380 if (tp->snd_ssthresh > pipe) { 9381 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9382 rack_log_to_prr(rack, 11, 0, __LINE__); 9383 } else { 9384 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9385 rack_log_to_prr(rack, 12, 0, __LINE__); 9386 } 9387 } 9388 } 9389 9390 static void 9391 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9392 { 9393 uint32_t changed; 9394 struct tcp_rack *rack; 9395 struct rack_sendmap *rsm; 9396 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9397 register uint32_t th_ack; 9398 int32_t i, j, k, num_sack_blks = 0; 9399 uint32_t cts, acked, ack_point; 9400 int loop_start = 0, moved_two = 0; 9401 uint32_t tsused; 9402 9403 9404 INP_WLOCK_ASSERT(tptoinpcb(tp)); 9405 if (tcp_get_flags(th) & TH_RST) { 9406 /* We don't log resets */ 9407 return; 9408 } 9409 rack = (struct tcp_rack *)tp->t_fb_ptr; 9410 cts = tcp_get_usecs(NULL); 9411 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9412 changed = 0; 9413 th_ack = th->th_ack; 9414 if (rack->sack_attack_disable == 0) 9415 rack_do_decay(rack); 9416 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9417 /* 9418 * You only get credit for 9419 * MSS and greater (and you get extra 9420 * credit for larger cum-ack moves). 9421 */ 9422 int ac; 9423 9424 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9425 rack->r_ctl.ack_count += ac; 9426 counter_u64_add(rack_ack_total, ac); 9427 } 9428 if (rack->r_ctl.ack_count > 0xfff00000) { 9429 /* 9430 * reduce the number to keep us under 9431 * a uint32_t. 9432 */ 9433 rack->r_ctl.ack_count /= 2; 9434 rack->r_ctl.sack_count /= 2; 9435 } 9436 if (SEQ_GT(th_ack, tp->snd_una)) { 9437 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9438 tp->t_acktime = ticks; 9439 } 9440 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9441 changed = th_ack - rsm->r_start; 9442 if (changed) { 9443 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9444 } 9445 if ((to->to_flags & TOF_SACK) == 0) { 9446 /* We are done nothing left and no sack. */ 9447 rack_handle_might_revert(tp, rack); 9448 /* 9449 * For cases where we struck a dup-ack 9450 * with no SACK, add to the changes so 9451 * PRR will work right. 9452 */ 9453 if (dup_ack_struck && (changed == 0)) { 9454 changed += ctf_fixed_maxseg(rack->rc_tp); 9455 } 9456 goto out; 9457 } 9458 /* Sack block processing */ 9459 if (SEQ_GT(th_ack, tp->snd_una)) 9460 ack_point = th_ack; 9461 else 9462 ack_point = tp->snd_una; 9463 for (i = 0; i < to->to_nsacks; i++) { 9464 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9465 &sack, sizeof(sack)); 9466 sack.start = ntohl(sack.start); 9467 sack.end = ntohl(sack.end); 9468 if (SEQ_GT(sack.end, sack.start) && 9469 SEQ_GT(sack.start, ack_point) && 9470 SEQ_LT(sack.start, tp->snd_max) && 9471 SEQ_GT(sack.end, ack_point) && 9472 SEQ_LEQ(sack.end, tp->snd_max)) { 9473 sack_blocks[num_sack_blks] = sack; 9474 num_sack_blks++; 9475 } else if (SEQ_LEQ(sack.start, th_ack) && 9476 SEQ_LEQ(sack.end, th_ack)) { 9477 int was_tlp; 9478 9479 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9480 /* 9481 * Its a D-SACK block. 9482 */ 9483 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9484 } 9485 } 9486 if (rack->rc_dsack_round_seen) { 9487 /* Is the dsack roound over? */ 9488 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9489 /* Yes it is */ 9490 rack->rc_dsack_round_seen = 0; 9491 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9492 } 9493 } 9494 /* 9495 * Sort the SACK blocks so we can update the rack scoreboard with 9496 * just one pass. 9497 */ 9498 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9499 num_sack_blks, th->th_ack); 9500 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9501 if (num_sack_blks == 0) { 9502 /* Nothing to sack (DSACKs?) */ 9503 goto out_with_totals; 9504 } 9505 if (num_sack_blks < 2) { 9506 /* Only one, we don't need to sort */ 9507 goto do_sack_work; 9508 } 9509 /* Sort the sacks */ 9510 for (i = 0; i < num_sack_blks; i++) { 9511 for (j = i + 1; j < num_sack_blks; j++) { 9512 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9513 sack = sack_blocks[i]; 9514 sack_blocks[i] = sack_blocks[j]; 9515 sack_blocks[j] = sack; 9516 } 9517 } 9518 } 9519 /* 9520 * Now are any of the sack block ends the same (yes some 9521 * implementations send these)? 9522 */ 9523 again: 9524 if (num_sack_blks == 0) 9525 goto out_with_totals; 9526 if (num_sack_blks > 1) { 9527 for (i = 0; i < num_sack_blks; i++) { 9528 for (j = i + 1; j < num_sack_blks; j++) { 9529 if (sack_blocks[i].end == sack_blocks[j].end) { 9530 /* 9531 * Ok these two have the same end we 9532 * want the smallest end and then 9533 * throw away the larger and start 9534 * again. 9535 */ 9536 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9537 /* 9538 * The second block covers 9539 * more area use that 9540 */ 9541 sack_blocks[i].start = sack_blocks[j].start; 9542 } 9543 /* 9544 * Now collapse out the dup-sack and 9545 * lower the count 9546 */ 9547 for (k = (j + 1); k < num_sack_blks; k++) { 9548 sack_blocks[j].start = sack_blocks[k].start; 9549 sack_blocks[j].end = sack_blocks[k].end; 9550 j++; 9551 } 9552 num_sack_blks--; 9553 goto again; 9554 } 9555 } 9556 } 9557 } 9558 do_sack_work: 9559 /* 9560 * First lets look to see if 9561 * we have retransmitted and 9562 * can use the transmit next? 9563 */ 9564 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9565 if (rsm && 9566 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9567 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9568 /* 9569 * We probably did the FR and the next 9570 * SACK in continues as we would expect. 9571 */ 9572 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9573 if (acked) { 9574 rack->r_wanted_output = 1; 9575 changed += acked; 9576 } 9577 if (num_sack_blks == 1) { 9578 /* 9579 * This is what we would expect from 9580 * a normal implementation to happen 9581 * after we have retransmitted the FR, 9582 * i.e the sack-filter pushes down 9583 * to 1 block and the next to be retransmitted 9584 * is the sequence in the sack block (has more 9585 * are acked). Count this as ACK'd data to boost 9586 * up the chances of recovering any false positives. 9587 */ 9588 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9589 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9590 counter_u64_add(rack_express_sack, 1); 9591 if (rack->r_ctl.ack_count > 0xfff00000) { 9592 /* 9593 * reduce the number to keep us under 9594 * a uint32_t. 9595 */ 9596 rack->r_ctl.ack_count /= 2; 9597 rack->r_ctl.sack_count /= 2; 9598 } 9599 goto out_with_totals; 9600 } else { 9601 /* 9602 * Start the loop through the 9603 * rest of blocks, past the first block. 9604 */ 9605 moved_two = 0; 9606 loop_start = 1; 9607 } 9608 } 9609 /* Its a sack of some sort */ 9610 rack->r_ctl.sack_count++; 9611 if (rack->r_ctl.sack_count > 0xfff00000) { 9612 /* 9613 * reduce the number to keep us under 9614 * a uint32_t. 9615 */ 9616 rack->r_ctl.ack_count /= 2; 9617 rack->r_ctl.sack_count /= 2; 9618 } 9619 counter_u64_add(rack_sack_total, 1); 9620 if (rack->sack_attack_disable) { 9621 /* An attacker disablement is in place */ 9622 if (num_sack_blks > 1) { 9623 rack->r_ctl.sack_count += (num_sack_blks - 1); 9624 rack->r_ctl.sack_moved_extra++; 9625 counter_u64_add(rack_move_some, 1); 9626 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9627 rack->r_ctl.sack_moved_extra /= 2; 9628 rack->r_ctl.sack_noextra_move /= 2; 9629 } 9630 } 9631 goto out; 9632 } 9633 rsm = rack->r_ctl.rc_sacklast; 9634 for (i = loop_start; i < num_sack_blks; i++) { 9635 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9636 if (acked) { 9637 rack->r_wanted_output = 1; 9638 changed += acked; 9639 } 9640 if (moved_two) { 9641 /* 9642 * If we did not get a SACK for at least a MSS and 9643 * had to move at all, or if we moved more than our 9644 * threshold, it counts against the "extra" move. 9645 */ 9646 rack->r_ctl.sack_moved_extra += moved_two; 9647 counter_u64_add(rack_move_some, 1); 9648 } else { 9649 /* 9650 * else we did not have to move 9651 * any more than we would expect. 9652 */ 9653 rack->r_ctl.sack_noextra_move++; 9654 counter_u64_add(rack_move_none, 1); 9655 } 9656 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9657 /* 9658 * If the SACK was not a full MSS then 9659 * we add to sack_count the number of 9660 * MSS's (or possibly more than 9661 * a MSS if its a TSO send) we had to skip by. 9662 */ 9663 rack->r_ctl.sack_count += moved_two; 9664 counter_u64_add(rack_sack_total, moved_two); 9665 } 9666 /* 9667 * Now we need to setup for the next 9668 * round. First we make sure we won't 9669 * exceed the size of our uint32_t on 9670 * the various counts, and then clear out 9671 * moved_two. 9672 */ 9673 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9674 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9675 rack->r_ctl.sack_moved_extra /= 2; 9676 rack->r_ctl.sack_noextra_move /= 2; 9677 } 9678 if (rack->r_ctl.sack_count > 0xfff00000) { 9679 rack->r_ctl.ack_count /= 2; 9680 rack->r_ctl.sack_count /= 2; 9681 } 9682 moved_two = 0; 9683 } 9684 out_with_totals: 9685 if (num_sack_blks > 1) { 9686 /* 9687 * You get an extra stroke if 9688 * you have more than one sack-blk, this 9689 * could be where we are skipping forward 9690 * and the sack-filter is still working, or 9691 * it could be an attacker constantly 9692 * moving us. 9693 */ 9694 rack->r_ctl.sack_moved_extra++; 9695 counter_u64_add(rack_move_some, 1); 9696 } 9697 out: 9698 #ifdef NETFLIX_EXP_DETECTION 9699 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9700 #endif 9701 if (changed) { 9702 /* Something changed cancel the rack timer */ 9703 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9704 } 9705 tsused = tcp_get_usecs(NULL); 9706 rsm = tcp_rack_output(tp, rack, tsused); 9707 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9708 rsm && 9709 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 9710 /* Enter recovery */ 9711 entered_recovery = 1; 9712 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9713 /* 9714 * When we enter recovery we need to assure we send 9715 * one packet. 9716 */ 9717 if (rack->rack_no_prr == 0) { 9718 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9719 rack_log_to_prr(rack, 8, 0, __LINE__); 9720 } 9721 rack->r_timer_override = 1; 9722 rack->r_early = 0; 9723 rack->r_ctl.rc_agg_early = 0; 9724 } else if (IN_FASTRECOVERY(tp->t_flags) && 9725 rsm && 9726 (rack->r_rr_config == 3)) { 9727 /* 9728 * Assure we can output and we get no 9729 * remembered pace time except the retransmit. 9730 */ 9731 rack->r_timer_override = 1; 9732 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9733 rack->r_ctl.rc_resend = rsm; 9734 } 9735 if (IN_FASTRECOVERY(tp->t_flags) && 9736 (rack->rack_no_prr == 0) && 9737 (entered_recovery == 0)) { 9738 rack_update_prr(tp, rack, changed, th_ack); 9739 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9740 ((tcp_in_hpts(rack->rc_inp) == 0) && 9741 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9742 /* 9743 * If you are pacing output you don't want 9744 * to override. 9745 */ 9746 rack->r_early = 0; 9747 rack->r_ctl.rc_agg_early = 0; 9748 rack->r_timer_override = 1; 9749 } 9750 } 9751 } 9752 9753 static void 9754 rack_strike_dupack(struct tcp_rack *rack) 9755 { 9756 struct rack_sendmap *rsm; 9757 9758 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9759 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9760 rsm = TAILQ_NEXT(rsm, r_tnext); 9761 if (rsm->r_flags & RACK_MUST_RXT) { 9762 /* Sendmap entries that are marked to 9763 * be retransmitted do not need dupack's 9764 * struck. We get these marks for a number 9765 * of reasons (rxt timeout with no sack, 9766 * mtu change, or rwnd collapses). When 9767 * these events occur, we know we must retransmit 9768 * them and mark the sendmap entries. Dupack counting 9769 * is not needed since we are already set to retransmit 9770 * it as soon as we can. 9771 */ 9772 continue; 9773 } 9774 } 9775 if (rsm && (rsm->r_dupack < 0xff)) { 9776 rsm->r_dupack++; 9777 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9778 struct timeval tv; 9779 uint32_t cts; 9780 /* 9781 * Here we see if we need to retransmit. For 9782 * a SACK type connection if enough time has passed 9783 * we will get a return of the rsm. For a non-sack 9784 * connection we will get the rsm returned if the 9785 * dupack value is 3 or more. 9786 */ 9787 cts = tcp_get_usecs(&tv); 9788 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9789 if (rack->r_ctl.rc_resend != NULL) { 9790 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9791 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9792 rack->rc_tp->snd_una, __LINE__); 9793 } 9794 rack->r_wanted_output = 1; 9795 rack->r_timer_override = 1; 9796 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9797 } 9798 } else { 9799 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9800 } 9801 } 9802 } 9803 9804 static void 9805 rack_check_bottom_drag(struct tcpcb *tp, 9806 struct tcp_rack *rack, 9807 struct socket *so, int32_t acked) 9808 { 9809 uint32_t segsiz, minseg; 9810 9811 segsiz = ctf_fixed_maxseg(tp); 9812 minseg = segsiz; 9813 9814 if (tp->snd_max == tp->snd_una) { 9815 /* 9816 * We are doing dynamic pacing and we are way 9817 * under. Basically everything got acked while 9818 * we were still waiting on the pacer to expire. 9819 * 9820 * This means we need to boost the b/w in 9821 * addition to any earlier boosting of 9822 * the multiplier. 9823 */ 9824 rack->rc_dragged_bottom = 1; 9825 rack_validate_multipliers_at_or_above100(rack); 9826 /* 9827 * Lets use the segment bytes acked plus 9828 * the lowest RTT seen as the basis to 9829 * form a b/w estimate. This will be off 9830 * due to the fact that the true estimate 9831 * should be around 1/2 the time of the RTT 9832 * but we can settle for that. 9833 */ 9834 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9835 acked) { 9836 uint64_t bw, calc_bw, rtt; 9837 9838 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9839 if (rtt == 0) { 9840 /* no us sample is there a ms one? */ 9841 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9842 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9843 } else { 9844 goto no_measurement; 9845 } 9846 } 9847 bw = acked; 9848 calc_bw = bw * 1000000; 9849 calc_bw /= rtt; 9850 if (rack->r_ctl.last_max_bw && 9851 (rack->r_ctl.last_max_bw < calc_bw)) { 9852 /* 9853 * If we have a last calculated max bw 9854 * enforce it. 9855 */ 9856 calc_bw = rack->r_ctl.last_max_bw; 9857 } 9858 /* now plop it in */ 9859 if (rack->rc_gp_filled == 0) { 9860 if (calc_bw > ONE_POINT_TWO_MEG) { 9861 /* 9862 * If we have no measurement 9863 * don't let us set in more than 9864 * 1.2Mbps. If we are still too 9865 * low after pacing with this we 9866 * will hopefully have a max b/w 9867 * available to sanity check things. 9868 */ 9869 calc_bw = ONE_POINT_TWO_MEG; 9870 } 9871 rack->r_ctl.rc_rtt_diff = 0; 9872 rack->r_ctl.gp_bw = calc_bw; 9873 rack->rc_gp_filled = 1; 9874 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9875 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9876 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9877 } else if (calc_bw > rack->r_ctl.gp_bw) { 9878 rack->r_ctl.rc_rtt_diff = 0; 9879 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9880 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9881 rack->r_ctl.gp_bw = calc_bw; 9882 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9883 } else 9884 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9885 if ((rack->gp_ready == 0) && 9886 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9887 /* We have enough measurements now */ 9888 rack->gp_ready = 1; 9889 rack_set_cc_pacing(rack); 9890 if (rack->defer_options) 9891 rack_apply_deferred_options(rack); 9892 } 9893 /* 9894 * For acks over 1mss we do a extra boost to simulate 9895 * where we would get 2 acks (we want 110 for the mul). 9896 */ 9897 if (acked > segsiz) 9898 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9899 } else { 9900 /* 9901 * zero rtt possibly?, settle for just an old increase. 9902 */ 9903 no_measurement: 9904 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9905 } 9906 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9907 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9908 minseg)) && 9909 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9910 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9911 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9912 (segsiz * rack_req_segs))) { 9913 /* 9914 * We are doing dynamic GP pacing and 9915 * we have everything except 1MSS or less 9916 * bytes left out. We are still pacing away. 9917 * And there is data that could be sent, This 9918 * means we are inserting delayed ack time in 9919 * our measurements because we are pacing too slow. 9920 */ 9921 rack_validate_multipliers_at_or_above100(rack); 9922 rack->rc_dragged_bottom = 1; 9923 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9924 } 9925 } 9926 9927 9928 9929 static void 9930 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 9931 { 9932 /* 9933 * The fast output path is enabled and we 9934 * have moved the cumack forward. Lets see if 9935 * we can expand forward the fast path length by 9936 * that amount. What we would ideally like to 9937 * do is increase the number of bytes in the 9938 * fast path block (left_to_send) by the 9939 * acked amount. However we have to gate that 9940 * by two factors: 9941 * 1) The amount outstanding and the rwnd of the peer 9942 * (i.e. we don't want to exceed the rwnd of the peer). 9943 * <and> 9944 * 2) The amount of data left in the socket buffer (i.e. 9945 * we can't send beyond what is in the buffer). 9946 * 9947 * Note that this does not take into account any increase 9948 * in the cwnd. We will only extend the fast path by 9949 * what was acked. 9950 */ 9951 uint32_t new_total, gating_val; 9952 9953 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 9954 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 9955 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 9956 if (new_total <= gating_val) { 9957 /* We can increase left_to_send by the acked amount */ 9958 counter_u64_add(rack_extended_rfo, 1); 9959 rack->r_ctl.fsb.left_to_send = new_total; 9960 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 9961 ("rack:%p left_to_send:%u sbavail:%u out:%u", 9962 rack, rack->r_ctl.fsb.left_to_send, 9963 sbavail(&rack->rc_inp->inp_socket->so_snd), 9964 (tp->snd_max - tp->snd_una))); 9965 9966 } 9967 } 9968 9969 static void 9970 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 9971 { 9972 /* 9973 * Here any sendmap entry that points to the 9974 * beginning mbuf must be adjusted to the correct 9975 * offset. This must be called with: 9976 * 1) The socket buffer locked 9977 * 2) snd_una adjusted to its new postion. 9978 * 9979 * Note that (2) implies rack_ack_received has also 9980 * been called. 9981 * 9982 * We grab the first mbuf in the socket buffer and 9983 * then go through the front of the sendmap, recalculating 9984 * the stored offset for any sendmap entry that has 9985 * that mbuf. We must use the sb functions to do this 9986 * since its possible an add was done has well as 9987 * the subtraction we may have just completed. This should 9988 * not be a penalty though, since we just referenced the sb 9989 * to go in and trim off the mbufs that we freed (of course 9990 * there will be a penalty for the sendmap references though). 9991 */ 9992 struct mbuf *m; 9993 struct rack_sendmap *rsm; 9994 9995 SOCKBUF_LOCK_ASSERT(sb); 9996 m = sb->sb_mb; 9997 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9998 if ((rsm == NULL) || (m == NULL)) { 9999 /* Nothing outstanding */ 10000 return; 10001 } 10002 while (rsm->m && (rsm->m == m)) { 10003 /* one to adjust */ 10004 #ifdef INVARIANTS 10005 struct mbuf *tm; 10006 uint32_t soff; 10007 10008 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10009 if (rsm->orig_m_len != m->m_len) { 10010 rack_adjust_orig_mlen(rsm); 10011 } 10012 if (rsm->soff != soff) { 10013 /* 10014 * This is not a fatal error, we anticipate it 10015 * might happen (the else code), so we count it here 10016 * so that under invariant we can see that it really 10017 * does happen. 10018 */ 10019 counter_u64_add(rack_adjust_map_bw, 1); 10020 } 10021 rsm->m = tm; 10022 rsm->soff = soff; 10023 if (tm) 10024 rsm->orig_m_len = rsm->m->m_len; 10025 else 10026 rsm->orig_m_len = 0; 10027 #else 10028 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10029 if (rsm->m) 10030 rsm->orig_m_len = rsm->m->m_len; 10031 else 10032 rsm->orig_m_len = 0; 10033 #endif 10034 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10035 rsm); 10036 if (rsm == NULL) 10037 break; 10038 } 10039 } 10040 10041 /* 10042 * Return value of 1, we do not need to call rack_process_data(). 10043 * return value of 0, rack_process_data can be called. 10044 * For ret_val if its 0 the TCP is locked, if its non-zero 10045 * its unlocked and probably unsafe to touch the TCB. 10046 */ 10047 static int 10048 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10049 struct tcpcb *tp, struct tcpopt *to, 10050 uint32_t tiwin, int32_t tlen, 10051 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10052 { 10053 int32_t ourfinisacked = 0; 10054 int32_t nsegs, acked_amount; 10055 int32_t acked; 10056 struct mbuf *mfree; 10057 struct tcp_rack *rack; 10058 int32_t under_pacing = 0; 10059 int32_t recovery = 0; 10060 10061 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10062 10063 rack = (struct tcp_rack *)tp->t_fb_ptr; 10064 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10065 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10066 &rack->r_ctl.challenge_ack_ts, 10067 &rack->r_ctl.challenge_ack_cnt); 10068 rack->r_wanted_output = 1; 10069 return (1); 10070 } 10071 if (rack->gp_ready && 10072 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10073 under_pacing = 1; 10074 } 10075 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10076 int in_rec, dup_ack_struck = 0; 10077 10078 in_rec = IN_FASTRECOVERY(tp->t_flags); 10079 if (rack->rc_in_persist) { 10080 tp->t_rxtshift = 0; 10081 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10082 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10083 } 10084 if ((th->th_ack == tp->snd_una) && 10085 (tiwin == tp->snd_wnd) && 10086 ((to->to_flags & TOF_SACK) == 0)) { 10087 rack_strike_dupack(rack); 10088 dup_ack_struck = 1; 10089 } 10090 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10091 } 10092 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10093 /* 10094 * Old ack, behind (or duplicate to) the last one rcv'd 10095 * Note: We mark reordering is occuring if its 10096 * less than and we have not closed our window. 10097 */ 10098 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10099 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10100 } 10101 return (0); 10102 } 10103 /* 10104 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10105 * something we sent. 10106 */ 10107 if (tp->t_flags & TF_NEEDSYN) { 10108 /* 10109 * T/TCP: Connection was half-synchronized, and our SYN has 10110 * been ACK'd (so connection is now fully synchronized). Go 10111 * to non-starred state, increment snd_una for ACK of SYN, 10112 * and check if we can do window scaling. 10113 */ 10114 tp->t_flags &= ~TF_NEEDSYN; 10115 tp->snd_una++; 10116 /* Do window scaling? */ 10117 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10118 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10119 tp->rcv_scale = tp->request_r_scale; 10120 /* Send window already scaled. */ 10121 } 10122 } 10123 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10124 10125 acked = BYTES_THIS_ACK(tp, th); 10126 if (acked) { 10127 /* 10128 * Any time we move the cum-ack forward clear 10129 * keep-alive tied probe-not-answered. The 10130 * persists clears its own on entry. 10131 */ 10132 rack->probe_not_answered = 0; 10133 } 10134 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10135 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10136 /* 10137 * If we just performed our first retransmit, and the ACK arrives 10138 * within our recovery window, then it was a mistake to do the 10139 * retransmit in the first place. Recover our original cwnd and 10140 * ssthresh, and proceed to transmit where we left off. 10141 */ 10142 if ((tp->t_flags & TF_PREVVALID) && 10143 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10144 tp->t_flags &= ~TF_PREVVALID; 10145 if (tp->t_rxtshift == 1 && 10146 (int)(ticks - tp->t_badrxtwin) < 0) 10147 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10148 } 10149 if (acked) { 10150 /* assure we are not backed off */ 10151 tp->t_rxtshift = 0; 10152 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10153 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10154 rack->rc_tlp_in_progress = 0; 10155 rack->r_ctl.rc_tlp_cnt_out = 0; 10156 /* 10157 * If it is the RXT timer we want to 10158 * stop it, so we can restart a TLP. 10159 */ 10160 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10161 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10162 #ifdef NETFLIX_HTTP_LOGGING 10163 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10164 #endif 10165 } 10166 /* 10167 * If we have a timestamp reply, update smoothed round trip time. If 10168 * no timestamp is present but transmit timer is running and timed 10169 * sequence number was acked, update smoothed round trip time. Since 10170 * we now have an rtt measurement, cancel the timer backoff (cf., 10171 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10172 * timer. 10173 * 10174 * Some boxes send broken timestamp replies during the SYN+ACK 10175 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10176 * and blow up the retransmit timer. 10177 */ 10178 /* 10179 * If all outstanding data is acked, stop retransmit timer and 10180 * remember to restart (more output or persist). If there is more 10181 * data to be acked, restart retransmit timer, using current 10182 * (possibly backed-off) value. 10183 */ 10184 if (acked == 0) { 10185 if (ofia) 10186 *ofia = ourfinisacked; 10187 return (0); 10188 } 10189 if (IN_RECOVERY(tp->t_flags)) { 10190 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10191 (SEQ_LT(th->th_ack, tp->snd_max))) { 10192 tcp_rack_partialack(tp); 10193 } else { 10194 rack_post_recovery(tp, th->th_ack); 10195 recovery = 1; 10196 } 10197 } 10198 /* 10199 * Let the congestion control algorithm update congestion control 10200 * related information. This typically means increasing the 10201 * congestion window. 10202 */ 10203 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10204 SOCKBUF_LOCK(&so->so_snd); 10205 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10206 tp->snd_wnd -= acked_amount; 10207 mfree = sbcut_locked(&so->so_snd, acked_amount); 10208 if ((sbused(&so->so_snd) == 0) && 10209 (acked > acked_amount) && 10210 (tp->t_state >= TCPS_FIN_WAIT_1) && 10211 (tp->t_flags & TF_SENTFIN)) { 10212 /* 10213 * We must be sure our fin 10214 * was sent and acked (we can be 10215 * in FIN_WAIT_1 without having 10216 * sent the fin). 10217 */ 10218 ourfinisacked = 1; 10219 } 10220 tp->snd_una = th->th_ack; 10221 if (acked_amount && sbavail(&so->so_snd)) 10222 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10223 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10224 /* NB: sowwakeup_locked() does an implicit unlock. */ 10225 sowwakeup_locked(so); 10226 m_freem(mfree); 10227 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10228 tp->snd_recover = tp->snd_una; 10229 10230 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10231 tp->snd_nxt = tp->snd_una; 10232 } 10233 if (under_pacing && 10234 (rack->use_fixed_rate == 0) && 10235 (rack->in_probe_rtt == 0) && 10236 rack->rc_gp_dyn_mul && 10237 rack->rc_always_pace) { 10238 /* Check if we are dragging bottom */ 10239 rack_check_bottom_drag(tp, rack, so, acked); 10240 } 10241 if (tp->snd_una == tp->snd_max) { 10242 /* Nothing left outstanding */ 10243 tp->t_flags &= ~TF_PREVVALID; 10244 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10245 rack->r_ctl.retran_during_recovery = 0; 10246 rack->r_ctl.dsack_byte_cnt = 0; 10247 if (rack->r_ctl.rc_went_idle_time == 0) 10248 rack->r_ctl.rc_went_idle_time = 1; 10249 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10250 if (sbavail(&tptosocket(tp)->so_snd) == 0) 10251 tp->t_acktime = 0; 10252 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10253 /* Set need output so persist might get set */ 10254 rack->r_wanted_output = 1; 10255 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10256 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10257 (sbavail(&so->so_snd) == 0) && 10258 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10259 /* 10260 * The socket was gone and the 10261 * peer sent data (now or in the past), time to 10262 * reset him. 10263 */ 10264 *ret_val = 1; 10265 /* tcp_close will kill the inp pre-log the Reset */ 10266 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10267 tp = tcp_close(tp); 10268 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10269 return (1); 10270 } 10271 } 10272 if (ofia) 10273 *ofia = ourfinisacked; 10274 return (0); 10275 } 10276 10277 10278 static void 10279 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 10280 int dir, uint32_t flags, struct rack_sendmap *rsm) 10281 { 10282 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 10283 union tcp_log_stackspecific log; 10284 struct timeval tv; 10285 10286 memset(&log, 0, sizeof(log)); 10287 log.u_bbr.flex1 = cnt; 10288 log.u_bbr.flex2 = split; 10289 log.u_bbr.flex3 = out; 10290 log.u_bbr.flex4 = line; 10291 log.u_bbr.flex5 = rack->r_must_retran; 10292 log.u_bbr.flex6 = flags; 10293 log.u_bbr.flex7 = rack->rc_has_collapsed; 10294 log.u_bbr.flex8 = dir; /* 10295 * 1 is collapsed, 0 is uncollapsed, 10296 * 2 is log of a rsm being marked, 3 is a split. 10297 */ 10298 if (rsm == NULL) 10299 log.u_bbr.rttProp = 0; 10300 else 10301 log.u_bbr.rttProp = (uint64_t)rsm; 10302 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 10303 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10304 TCP_LOG_EVENTP(rack->rc_tp, NULL, 10305 &rack->rc_inp->inp_socket->so_rcv, 10306 &rack->rc_inp->inp_socket->so_snd, 10307 TCP_RACK_LOG_COLLAPSE, 0, 10308 0, &log, false, &tv); 10309 } 10310 } 10311 10312 static void 10313 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line) 10314 { 10315 /* 10316 * Here all we do is mark the collapsed point and set the flag. 10317 * This may happen again and again, but there is no 10318 * sense splitting our map until we know where the 10319 * peer finally lands in the collapse. 10320 */ 10321 rack_trace_point(rack, RACK_TP_COLLAPSED_WND); 10322 if ((rack->rc_has_collapsed == 0) || 10323 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd))) 10324 counter_u64_add(rack_collapsed_win_seen, 1); 10325 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10326 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 10327 rack->rc_has_collapsed = 1; 10328 rack->r_collapse_point_valid = 1; 10329 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 10330 } 10331 10332 static void 10333 rack_un_collapse_window(struct tcp_rack *rack, int line) 10334 { 10335 struct rack_sendmap *nrsm, *rsm, fe; 10336 int cnt = 0, split = 0; 10337 #ifdef INVARIANTS 10338 struct rack_sendmap *insret; 10339 #endif 10340 10341 memset(&fe, 0, sizeof(fe)); 10342 rack->rc_has_collapsed = 0; 10343 fe.r_start = rack->r_ctl.last_collapse_point; 10344 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10345 if (rsm == NULL) { 10346 /* Nothing to do maybe the peer ack'ed it all */ 10347 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10348 return; 10349 } 10350 /* Now do we need to split this one? */ 10351 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 10352 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 10353 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 10354 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10355 if (nrsm == NULL) { 10356 /* We can't get a rsm, mark all? */ 10357 nrsm = rsm; 10358 goto no_split; 10359 } 10360 /* Clone it */ 10361 split = 1; 10362 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 10363 #ifndef INVARIANTS 10364 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10365 #else 10366 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10367 if (insret != NULL) { 10368 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10369 nrsm, insret, rack, rsm); 10370 } 10371 #endif 10372 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 10373 rack->r_ctl.last_collapse_point, __LINE__); 10374 if (rsm->r_in_tmap) { 10375 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10376 nrsm->r_in_tmap = 1; 10377 } 10378 /* 10379 * Set in the new RSM as the 10380 * collapsed starting point 10381 */ 10382 rsm = nrsm; 10383 } 10384 no_split: 10385 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10386 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10387 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 10388 cnt++; 10389 } 10390 if (cnt) { 10391 counter_u64_add(rack_collapsed_win, 1); 10392 } 10393 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10394 } 10395 10396 static void 10397 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10398 int32_t tlen, int32_t tfo_syn) 10399 { 10400 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10401 if (rack->rc_dack_mode && 10402 (tlen > 500) && 10403 (rack->rc_dack_toggle == 1)) { 10404 goto no_delayed_ack; 10405 } 10406 rack_timer_cancel(tp, rack, 10407 rack->r_ctl.rc_rcvtime, __LINE__); 10408 tp->t_flags |= TF_DELACK; 10409 } else { 10410 no_delayed_ack: 10411 rack->r_wanted_output = 1; 10412 tp->t_flags |= TF_ACKNOW; 10413 if (rack->rc_dack_mode) { 10414 if (tp->t_flags & TF_DELACK) 10415 rack->rc_dack_toggle = 1; 10416 else 10417 rack->rc_dack_toggle = 0; 10418 } 10419 } 10420 } 10421 10422 static void 10423 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10424 { 10425 /* 10426 * If fast output is in progress, lets validate that 10427 * the new window did not shrink on us and make it 10428 * so fast output should end. 10429 */ 10430 if (rack->r_fast_output) { 10431 uint32_t out; 10432 10433 /* 10434 * Calculate what we will send if left as is 10435 * and compare that to our send window. 10436 */ 10437 out = ctf_outstanding(tp); 10438 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10439 /* ok we have an issue */ 10440 if (out >= tp->snd_wnd) { 10441 /* Turn off fast output the window is met or collapsed */ 10442 rack->r_fast_output = 0; 10443 } else { 10444 /* we have some room left */ 10445 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10446 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10447 /* If not at least 1 full segment never mind */ 10448 rack->r_fast_output = 0; 10449 } 10450 } 10451 } 10452 } 10453 } 10454 10455 10456 /* 10457 * Return value of 1, the TCB is unlocked and most 10458 * likely gone, return value of 0, the TCP is still 10459 * locked. 10460 */ 10461 static int 10462 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10463 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10464 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10465 { 10466 /* 10467 * Update window information. Don't look at window if no ACK: TAC's 10468 * send garbage on first SYN. 10469 */ 10470 int32_t nsegs; 10471 int32_t tfo_syn; 10472 struct tcp_rack *rack; 10473 10474 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10475 10476 rack = (struct tcp_rack *)tp->t_fb_ptr; 10477 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10478 if ((thflags & TH_ACK) && 10479 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10480 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10481 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10482 /* keep track of pure window updates */ 10483 if (tlen == 0 && 10484 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10485 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10486 tp->snd_wnd = tiwin; 10487 rack_validate_fo_sendwin_up(tp, rack); 10488 tp->snd_wl1 = th->th_seq; 10489 tp->snd_wl2 = th->th_ack; 10490 if (tp->snd_wnd > tp->max_sndwnd) 10491 tp->max_sndwnd = tp->snd_wnd; 10492 rack->r_wanted_output = 1; 10493 } else if (thflags & TH_ACK) { 10494 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10495 tp->snd_wnd = tiwin; 10496 rack_validate_fo_sendwin_up(tp, rack); 10497 tp->snd_wl1 = th->th_seq; 10498 tp->snd_wl2 = th->th_ack; 10499 } 10500 } 10501 if (tp->snd_wnd < ctf_outstanding(tp)) 10502 /* The peer collapsed the window */ 10503 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 10504 else if (rack->rc_has_collapsed) 10505 rack_un_collapse_window(rack, __LINE__); 10506 if ((rack->r_collapse_point_valid) && 10507 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 10508 rack->r_collapse_point_valid = 0; 10509 /* Was persist timer active and now we have window space? */ 10510 if ((rack->rc_in_persist != 0) && 10511 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10512 rack->r_ctl.rc_pace_min_segs))) { 10513 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10514 tp->snd_nxt = tp->snd_max; 10515 /* Make sure we output to start the timer */ 10516 rack->r_wanted_output = 1; 10517 } 10518 /* Do we enter persists? */ 10519 if ((rack->rc_in_persist == 0) && 10520 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10521 TCPS_HAVEESTABLISHED(tp->t_state) && 10522 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10523 sbavail(&tptosocket(tp)->so_snd) && 10524 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10525 /* 10526 * Here the rwnd is less than 10527 * the pacing size, we are established, 10528 * nothing is outstanding, and there is 10529 * data to send. Enter persists. 10530 */ 10531 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10532 } 10533 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10534 m_freem(m); 10535 return (0); 10536 } 10537 /* 10538 * don't process the URG bit, ignore them drag 10539 * along the up. 10540 */ 10541 tp->rcv_up = tp->rcv_nxt; 10542 10543 /* 10544 * Process the segment text, merging it into the TCP sequencing 10545 * queue, and arranging for acknowledgment of receipt if necessary. 10546 * This process logically involves adjusting tp->rcv_wnd as data is 10547 * presented to the user (this happens in tcp_usrreq.c, case 10548 * PRU_RCVD). If a FIN has already been received on this connection 10549 * then we just ignore the text. 10550 */ 10551 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10552 IS_FASTOPEN(tp->t_flags)); 10553 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10554 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10555 tcp_seq save_start = th->th_seq; 10556 tcp_seq save_rnxt = tp->rcv_nxt; 10557 int save_tlen = tlen; 10558 10559 m_adj(m, drop_hdrlen); /* delayed header drop */ 10560 /* 10561 * Insert segment which includes th into TCP reassembly 10562 * queue with control block tp. Set thflags to whether 10563 * reassembly now includes a segment with FIN. This handles 10564 * the common case inline (segment is the next to be 10565 * received on an established connection, and the queue is 10566 * empty), avoiding linkage into and removal from the queue 10567 * and repetition of various conversions. Set DELACK for 10568 * segments received in order, but ack immediately when 10569 * segments are out of order (so fast retransmit can work). 10570 */ 10571 if (th->th_seq == tp->rcv_nxt && 10572 SEGQ_EMPTY(tp) && 10573 (TCPS_HAVEESTABLISHED(tp->t_state) || 10574 tfo_syn)) { 10575 #ifdef NETFLIX_SB_LIMITS 10576 u_int mcnt, appended; 10577 10578 if (so->so_rcv.sb_shlim) { 10579 mcnt = m_memcnt(m); 10580 appended = 0; 10581 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10582 CFO_NOSLEEP, NULL) == false) { 10583 counter_u64_add(tcp_sb_shlim_fails, 1); 10584 m_freem(m); 10585 return (0); 10586 } 10587 } 10588 #endif 10589 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10590 tp->rcv_nxt += tlen; 10591 if (tlen && 10592 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10593 (tp->t_fbyte_in == 0)) { 10594 tp->t_fbyte_in = ticks; 10595 if (tp->t_fbyte_in == 0) 10596 tp->t_fbyte_in = 1; 10597 if (tp->t_fbyte_out && tp->t_fbyte_in) 10598 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10599 } 10600 thflags = tcp_get_flags(th) & TH_FIN; 10601 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10602 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10603 SOCKBUF_LOCK(&so->so_rcv); 10604 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10605 m_freem(m); 10606 } else 10607 #ifdef NETFLIX_SB_LIMITS 10608 appended = 10609 #endif 10610 sbappendstream_locked(&so->so_rcv, m, 0); 10611 10612 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10613 /* NB: sorwakeup_locked() does an implicit unlock. */ 10614 sorwakeup_locked(so); 10615 #ifdef NETFLIX_SB_LIMITS 10616 if (so->so_rcv.sb_shlim && appended != mcnt) 10617 counter_fo_release(so->so_rcv.sb_shlim, 10618 mcnt - appended); 10619 #endif 10620 } else { 10621 /* 10622 * XXX: Due to the header drop above "th" is 10623 * theoretically invalid by now. Fortunately 10624 * m_adj() doesn't actually frees any mbufs when 10625 * trimming from the head. 10626 */ 10627 tcp_seq temp = save_start; 10628 10629 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10630 tp->t_flags |= TF_ACKNOW; 10631 if (tp->t_flags & TF_WAKESOR) { 10632 tp->t_flags &= ~TF_WAKESOR; 10633 /* NB: sorwakeup_locked() does an implicit unlock. */ 10634 sorwakeup_locked(so); 10635 } 10636 } 10637 if ((tp->t_flags & TF_SACK_PERMIT) && 10638 (save_tlen > 0) && 10639 TCPS_HAVEESTABLISHED(tp->t_state)) { 10640 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10641 /* 10642 * DSACK actually handled in the fastpath 10643 * above. 10644 */ 10645 RACK_OPTS_INC(tcp_sack_path_1); 10646 tcp_update_sack_list(tp, save_start, 10647 save_start + save_tlen); 10648 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10649 if ((tp->rcv_numsacks >= 1) && 10650 (tp->sackblks[0].end == save_start)) { 10651 /* 10652 * Partial overlap, recorded at todrop 10653 * above. 10654 */ 10655 RACK_OPTS_INC(tcp_sack_path_2a); 10656 tcp_update_sack_list(tp, 10657 tp->sackblks[0].start, 10658 tp->sackblks[0].end); 10659 } else { 10660 RACK_OPTS_INC(tcp_sack_path_2b); 10661 tcp_update_dsack_list(tp, save_start, 10662 save_start + save_tlen); 10663 } 10664 } else if (tlen >= save_tlen) { 10665 /* Update of sackblks. */ 10666 RACK_OPTS_INC(tcp_sack_path_3); 10667 tcp_update_dsack_list(tp, save_start, 10668 save_start + save_tlen); 10669 } else if (tlen > 0) { 10670 RACK_OPTS_INC(tcp_sack_path_4); 10671 tcp_update_dsack_list(tp, save_start, 10672 save_start + tlen); 10673 } 10674 } 10675 } else { 10676 m_freem(m); 10677 thflags &= ~TH_FIN; 10678 } 10679 10680 /* 10681 * If FIN is received ACK the FIN and let the user know that the 10682 * connection is closing. 10683 */ 10684 if (thflags & TH_FIN) { 10685 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10686 /* The socket upcall is handled by socantrcvmore. */ 10687 socantrcvmore(so); 10688 /* 10689 * If connection is half-synchronized (ie NEEDSYN 10690 * flag on) then delay ACK, so it may be piggybacked 10691 * when SYN is sent. Otherwise, since we received a 10692 * FIN then no more input can be expected, send ACK 10693 * now. 10694 */ 10695 if (tp->t_flags & TF_NEEDSYN) { 10696 rack_timer_cancel(tp, rack, 10697 rack->r_ctl.rc_rcvtime, __LINE__); 10698 tp->t_flags |= TF_DELACK; 10699 } else { 10700 tp->t_flags |= TF_ACKNOW; 10701 } 10702 tp->rcv_nxt++; 10703 } 10704 switch (tp->t_state) { 10705 /* 10706 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10707 * CLOSE_WAIT state. 10708 */ 10709 case TCPS_SYN_RECEIVED: 10710 tp->t_starttime = ticks; 10711 /* FALLTHROUGH */ 10712 case TCPS_ESTABLISHED: 10713 rack_timer_cancel(tp, rack, 10714 rack->r_ctl.rc_rcvtime, __LINE__); 10715 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10716 break; 10717 10718 /* 10719 * If still in FIN_WAIT_1 STATE FIN has not been 10720 * acked so enter the CLOSING state. 10721 */ 10722 case TCPS_FIN_WAIT_1: 10723 rack_timer_cancel(tp, rack, 10724 rack->r_ctl.rc_rcvtime, __LINE__); 10725 tcp_state_change(tp, TCPS_CLOSING); 10726 break; 10727 10728 /* 10729 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10730 * starting the time-wait timer, turning off the 10731 * other standard timers. 10732 */ 10733 case TCPS_FIN_WAIT_2: 10734 rack_timer_cancel(tp, rack, 10735 rack->r_ctl.rc_rcvtime, __LINE__); 10736 tcp_twstart(tp); 10737 return (1); 10738 } 10739 } 10740 /* 10741 * Return any desired output. 10742 */ 10743 if ((tp->t_flags & TF_ACKNOW) || 10744 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10745 rack->r_wanted_output = 1; 10746 } 10747 return (0); 10748 } 10749 10750 /* 10751 * Here nothing is really faster, its just that we 10752 * have broken out the fast-data path also just like 10753 * the fast-ack. 10754 */ 10755 static int 10756 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10757 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10758 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10759 { 10760 int32_t nsegs; 10761 int32_t newsize = 0; /* automatic sockbuf scaling */ 10762 struct tcp_rack *rack; 10763 #ifdef NETFLIX_SB_LIMITS 10764 u_int mcnt, appended; 10765 #endif 10766 10767 /* 10768 * If last ACK falls within this segment's sequence numbers, record 10769 * the timestamp. NOTE that the test is modified according to the 10770 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10771 */ 10772 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10773 return (0); 10774 } 10775 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10776 return (0); 10777 } 10778 if (tiwin && tiwin != tp->snd_wnd) { 10779 return (0); 10780 } 10781 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10782 return (0); 10783 } 10784 if (__predict_false((to->to_flags & TOF_TS) && 10785 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10786 return (0); 10787 } 10788 if (__predict_false((th->th_ack != tp->snd_una))) { 10789 return (0); 10790 } 10791 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10792 return (0); 10793 } 10794 if ((to->to_flags & TOF_TS) != 0 && 10795 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10796 tp->ts_recent_age = tcp_ts_getticks(); 10797 tp->ts_recent = to->to_tsval; 10798 } 10799 rack = (struct tcp_rack *)tp->t_fb_ptr; 10800 /* 10801 * This is a pure, in-sequence data packet with nothing on the 10802 * reassembly queue and we have enough buffer space to take it. 10803 */ 10804 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10805 10806 #ifdef NETFLIX_SB_LIMITS 10807 if (so->so_rcv.sb_shlim) { 10808 mcnt = m_memcnt(m); 10809 appended = 0; 10810 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10811 CFO_NOSLEEP, NULL) == false) { 10812 counter_u64_add(tcp_sb_shlim_fails, 1); 10813 m_freem(m); 10814 return (1); 10815 } 10816 } 10817 #endif 10818 /* Clean receiver SACK report if present */ 10819 if (tp->rcv_numsacks) 10820 tcp_clean_sackreport(tp); 10821 KMOD_TCPSTAT_INC(tcps_preddat); 10822 tp->rcv_nxt += tlen; 10823 if (tlen && 10824 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10825 (tp->t_fbyte_in == 0)) { 10826 tp->t_fbyte_in = ticks; 10827 if (tp->t_fbyte_in == 0) 10828 tp->t_fbyte_in = 1; 10829 if (tp->t_fbyte_out && tp->t_fbyte_in) 10830 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10831 } 10832 /* 10833 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10834 */ 10835 tp->snd_wl1 = th->th_seq; 10836 /* 10837 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10838 */ 10839 tp->rcv_up = tp->rcv_nxt; 10840 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10841 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10842 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10843 10844 /* Add data to socket buffer. */ 10845 SOCKBUF_LOCK(&so->so_rcv); 10846 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10847 m_freem(m); 10848 } else { 10849 /* 10850 * Set new socket buffer size. Give up when limit is 10851 * reached. 10852 */ 10853 if (newsize) 10854 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 10855 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10856 m_adj(m, drop_hdrlen); /* delayed header drop */ 10857 #ifdef NETFLIX_SB_LIMITS 10858 appended = 10859 #endif 10860 sbappendstream_locked(&so->so_rcv, m, 0); 10861 ctf_calc_rwin(so, tp); 10862 } 10863 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10864 /* NB: sorwakeup_locked() does an implicit unlock. */ 10865 sorwakeup_locked(so); 10866 #ifdef NETFLIX_SB_LIMITS 10867 if (so->so_rcv.sb_shlim && mcnt != appended) 10868 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10869 #endif 10870 rack_handle_delayed_ack(tp, rack, tlen, 0); 10871 if (tp->snd_una == tp->snd_max) 10872 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10873 return (1); 10874 } 10875 10876 /* 10877 * This subfunction is used to try to highly optimize the 10878 * fast path. We again allow window updates that are 10879 * in sequence to remain in the fast-path. We also add 10880 * in the __predict's to attempt to help the compiler. 10881 * Note that if we return a 0, then we can *not* process 10882 * it and the caller should push the packet into the 10883 * slow-path. 10884 */ 10885 static int 10886 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10887 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10888 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10889 { 10890 int32_t acked; 10891 int32_t nsegs; 10892 int32_t under_pacing = 0; 10893 struct tcp_rack *rack; 10894 10895 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10896 /* Old ack, behind (or duplicate to) the last one rcv'd */ 10897 return (0); 10898 } 10899 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 10900 /* Above what we have sent? */ 10901 return (0); 10902 } 10903 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10904 /* We are retransmitting */ 10905 return (0); 10906 } 10907 if (__predict_false(tiwin == 0)) { 10908 /* zero window */ 10909 return (0); 10910 } 10911 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 10912 /* We need a SYN or a FIN, unlikely.. */ 10913 return (0); 10914 } 10915 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 10916 /* Timestamp is behind .. old ack with seq wrap? */ 10917 return (0); 10918 } 10919 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 10920 /* Still recovering */ 10921 return (0); 10922 } 10923 rack = (struct tcp_rack *)tp->t_fb_ptr; 10924 if (rack->r_ctl.rc_sacked) { 10925 /* We have sack holes on our scoreboard */ 10926 return (0); 10927 } 10928 /* Ok if we reach here, we can process a fast-ack */ 10929 if (rack->gp_ready && 10930 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10931 under_pacing = 1; 10932 } 10933 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10934 rack_log_ack(tp, to, th, 0, 0); 10935 /* Did the window get updated? */ 10936 if (tiwin != tp->snd_wnd) { 10937 tp->snd_wnd = tiwin; 10938 rack_validate_fo_sendwin_up(tp, rack); 10939 tp->snd_wl1 = th->th_seq; 10940 if (tp->snd_wnd > tp->max_sndwnd) 10941 tp->max_sndwnd = tp->snd_wnd; 10942 } 10943 /* Do we exit persists? */ 10944 if ((rack->rc_in_persist != 0) && 10945 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10946 rack->r_ctl.rc_pace_min_segs))) { 10947 rack_exit_persist(tp, rack, cts); 10948 } 10949 /* Do we enter persists? */ 10950 if ((rack->rc_in_persist == 0) && 10951 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10952 TCPS_HAVEESTABLISHED(tp->t_state) && 10953 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10954 sbavail(&tptosocket(tp)->so_snd) && 10955 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10956 /* 10957 * Here the rwnd is less than 10958 * the pacing size, we are established, 10959 * nothing is outstanding, and there is 10960 * data to send. Enter persists. 10961 */ 10962 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10963 } 10964 /* 10965 * If last ACK falls within this segment's sequence numbers, record 10966 * the timestamp. NOTE that the test is modified according to the 10967 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10968 */ 10969 if ((to->to_flags & TOF_TS) != 0 && 10970 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10971 tp->ts_recent_age = tcp_ts_getticks(); 10972 tp->ts_recent = to->to_tsval; 10973 } 10974 /* 10975 * This is a pure ack for outstanding data. 10976 */ 10977 KMOD_TCPSTAT_INC(tcps_predack); 10978 10979 /* 10980 * "bad retransmit" recovery. 10981 */ 10982 if ((tp->t_flags & TF_PREVVALID) && 10983 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10984 tp->t_flags &= ~TF_PREVVALID; 10985 if (tp->t_rxtshift == 1 && 10986 (int)(ticks - tp->t_badrxtwin) < 0) 10987 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10988 } 10989 /* 10990 * Recalculate the transmit timer / rtt. 10991 * 10992 * Some boxes send broken timestamp replies during the SYN+ACK 10993 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10994 * and blow up the retransmit timer. 10995 */ 10996 acked = BYTES_THIS_ACK(tp, th); 10997 10998 #ifdef TCP_HHOOK 10999 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11000 hhook_run_tcp_est_in(tp, th, to); 11001 #endif 11002 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11003 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11004 if (acked) { 11005 struct mbuf *mfree; 11006 11007 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11008 SOCKBUF_LOCK(&so->so_snd); 11009 mfree = sbcut_locked(&so->so_snd, acked); 11010 tp->snd_una = th->th_ack; 11011 /* Note we want to hold the sb lock through the sendmap adjust */ 11012 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11013 /* Wake up the socket if we have room to write more */ 11014 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11015 sowwakeup_locked(so); 11016 m_freem(mfree); 11017 tp->t_rxtshift = 0; 11018 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11019 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11020 rack->rc_tlp_in_progress = 0; 11021 rack->r_ctl.rc_tlp_cnt_out = 0; 11022 /* 11023 * If it is the RXT timer we want to 11024 * stop it, so we can restart a TLP. 11025 */ 11026 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11027 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11028 #ifdef NETFLIX_HTTP_LOGGING 11029 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11030 #endif 11031 } 11032 /* 11033 * Let the congestion control algorithm update congestion control 11034 * related information. This typically means increasing the 11035 * congestion window. 11036 */ 11037 if (tp->snd_wnd < ctf_outstanding(tp)) { 11038 /* The peer collapsed the window */ 11039 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 11040 } else if (rack->rc_has_collapsed) 11041 rack_un_collapse_window(rack, __LINE__); 11042 if ((rack->r_collapse_point_valid) && 11043 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 11044 rack->r_collapse_point_valid = 0; 11045 /* 11046 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11047 */ 11048 tp->snd_wl2 = th->th_ack; 11049 tp->t_dupacks = 0; 11050 m_freem(m); 11051 /* ND6_HINT(tp); *//* Some progress has been made. */ 11052 11053 /* 11054 * If all outstanding data are acked, stop retransmit timer, 11055 * otherwise restart timer using current (possibly backed-off) 11056 * value. If process is waiting for space, wakeup/selwakeup/signal. 11057 * If data are ready to send, let tcp_output decide between more 11058 * output or persist. 11059 */ 11060 if (under_pacing && 11061 (rack->use_fixed_rate == 0) && 11062 (rack->in_probe_rtt == 0) && 11063 rack->rc_gp_dyn_mul && 11064 rack->rc_always_pace) { 11065 /* Check if we are dragging bottom */ 11066 rack_check_bottom_drag(tp, rack, so, acked); 11067 } 11068 if (tp->snd_una == tp->snd_max) { 11069 tp->t_flags &= ~TF_PREVVALID; 11070 rack->r_ctl.retran_during_recovery = 0; 11071 rack->r_ctl.dsack_byte_cnt = 0; 11072 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11073 if (rack->r_ctl.rc_went_idle_time == 0) 11074 rack->r_ctl.rc_went_idle_time = 1; 11075 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11076 if (sbavail(&tptosocket(tp)->so_snd) == 0) 11077 tp->t_acktime = 0; 11078 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11079 } 11080 if (acked && rack->r_fast_output) 11081 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11082 if (sbavail(&so->so_snd)) { 11083 rack->r_wanted_output = 1; 11084 } 11085 return (1); 11086 } 11087 11088 /* 11089 * Return value of 1, the TCB is unlocked and most 11090 * likely gone, return value of 0, the TCP is still 11091 * locked. 11092 */ 11093 static int 11094 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11095 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11096 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11097 { 11098 int32_t ret_val = 0; 11099 int32_t todrop; 11100 int32_t ourfinisacked = 0; 11101 struct tcp_rack *rack; 11102 11103 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11104 11105 ctf_calc_rwin(so, tp); 11106 /* 11107 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11108 * SYN, drop the input. if seg contains a RST, then drop the 11109 * connection. if seg does not contain SYN, then drop it. Otherwise 11110 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11111 * tp->irs if seg contains ack then advance tp->snd_una if seg 11112 * contains an ECE and ECN support is enabled, the stream is ECN 11113 * capable. if SYN has been acked change to ESTABLISHED else 11114 * SYN_RCVD state arrange for segment to be acked (eventually) 11115 * continue processing rest of data/controls. 11116 */ 11117 if ((thflags & TH_ACK) && 11118 (SEQ_LEQ(th->th_ack, tp->iss) || 11119 SEQ_GT(th->th_ack, tp->snd_max))) { 11120 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11121 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11122 return (1); 11123 } 11124 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11125 TCP_PROBE5(connect__refused, NULL, tp, 11126 mtod(m, const char *), tp, th); 11127 tp = tcp_drop(tp, ECONNREFUSED); 11128 ctf_do_drop(m, tp); 11129 return (1); 11130 } 11131 if (thflags & TH_RST) { 11132 ctf_do_drop(m, tp); 11133 return (1); 11134 } 11135 if (!(thflags & TH_SYN)) { 11136 ctf_do_drop(m, tp); 11137 return (1); 11138 } 11139 tp->irs = th->th_seq; 11140 tcp_rcvseqinit(tp); 11141 rack = (struct tcp_rack *)tp->t_fb_ptr; 11142 if (thflags & TH_ACK) { 11143 int tfo_partial = 0; 11144 11145 KMOD_TCPSTAT_INC(tcps_connects); 11146 soisconnected(so); 11147 #ifdef MAC 11148 mac_socketpeer_set_from_mbuf(m, so); 11149 #endif 11150 /* Do window scaling on this connection? */ 11151 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11152 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11153 tp->rcv_scale = tp->request_r_scale; 11154 } 11155 tp->rcv_adv += min(tp->rcv_wnd, 11156 TCP_MAXWIN << tp->rcv_scale); 11157 /* 11158 * If not all the data that was sent in the TFO SYN 11159 * has been acked, resend the remainder right away. 11160 */ 11161 if (IS_FASTOPEN(tp->t_flags) && 11162 (tp->snd_una != tp->snd_max)) { 11163 tp->snd_nxt = th->th_ack; 11164 tfo_partial = 1; 11165 } 11166 /* 11167 * If there's data, delay ACK; if there's also a FIN ACKNOW 11168 * will be turned on later. 11169 */ 11170 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11171 rack_timer_cancel(tp, rack, 11172 rack->r_ctl.rc_rcvtime, __LINE__); 11173 tp->t_flags |= TF_DELACK; 11174 } else { 11175 rack->r_wanted_output = 1; 11176 tp->t_flags |= TF_ACKNOW; 11177 rack->rc_dack_toggle = 0; 11178 } 11179 11180 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11181 11182 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11183 /* 11184 * We advance snd_una for the 11185 * fast open case. If th_ack is 11186 * acknowledging data beyond 11187 * snd_una we can't just call 11188 * ack-processing since the 11189 * data stream in our send-map 11190 * will start at snd_una + 1 (one 11191 * beyond the SYN). If its just 11192 * equal we don't need to do that 11193 * and there is no send_map. 11194 */ 11195 tp->snd_una++; 11196 } 11197 /* 11198 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11199 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11200 */ 11201 tp->t_starttime = ticks; 11202 if (tp->t_flags & TF_NEEDFIN) { 11203 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11204 tp->t_flags &= ~TF_NEEDFIN; 11205 thflags &= ~TH_SYN; 11206 } else { 11207 tcp_state_change(tp, TCPS_ESTABLISHED); 11208 TCP_PROBE5(connect__established, NULL, tp, 11209 mtod(m, const char *), tp, th); 11210 rack_cc_conn_init(tp); 11211 } 11212 } else { 11213 /* 11214 * Received initial SYN in SYN-SENT[*] state => simultaneous 11215 * open. If segment contains CC option and there is a 11216 * cached CC, apply TAO test. If it succeeds, connection is * 11217 * half-synchronized. Otherwise, do 3-way handshake: 11218 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11219 * there was no CC option, clear cached CC value. 11220 */ 11221 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 11222 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11223 } 11224 /* 11225 * Advance th->th_seq to correspond to first data byte. If data, 11226 * trim to stay within window, dropping FIN if necessary. 11227 */ 11228 th->th_seq++; 11229 if (tlen > tp->rcv_wnd) { 11230 todrop = tlen - tp->rcv_wnd; 11231 m_adj(m, -todrop); 11232 tlen = tp->rcv_wnd; 11233 thflags &= ~TH_FIN; 11234 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11235 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11236 } 11237 tp->snd_wl1 = th->th_seq - 1; 11238 tp->rcv_up = th->th_seq; 11239 /* 11240 * Client side of transaction: already sent SYN and data. If the 11241 * remote host used T/TCP to validate the SYN, our data will be 11242 * ACK'd; if so, enter normal data segment processing in the middle 11243 * of step 5, ack processing. Otherwise, goto step 6. 11244 */ 11245 if (thflags & TH_ACK) { 11246 /* For syn-sent we need to possibly update the rtt */ 11247 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11248 uint32_t t, mcts; 11249 11250 mcts = tcp_ts_getticks(); 11251 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11252 if (!tp->t_rttlow || tp->t_rttlow > t) 11253 tp->t_rttlow = t; 11254 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11255 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11256 tcp_rack_xmit_timer_commit(rack, tp); 11257 } 11258 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11259 return (ret_val); 11260 /* We may have changed to FIN_WAIT_1 above */ 11261 if (tp->t_state == TCPS_FIN_WAIT_1) { 11262 /* 11263 * In FIN_WAIT_1 STATE in addition to the processing 11264 * for the ESTABLISHED state if our FIN is now 11265 * acknowledged then enter FIN_WAIT_2. 11266 */ 11267 if (ourfinisacked) { 11268 /* 11269 * If we can't receive any more data, then 11270 * closing user can proceed. Starting the 11271 * timer is contrary to the specification, 11272 * but if we don't get a FIN we'll hang 11273 * forever. 11274 * 11275 * XXXjl: we should release the tp also, and 11276 * use a compressed state. 11277 */ 11278 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11279 soisdisconnected(so); 11280 tcp_timer_activate(tp, TT_2MSL, 11281 (tcp_fast_finwait2_recycle ? 11282 tcp_finwait2_timeout : 11283 TP_MAXIDLE(tp))); 11284 } 11285 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11286 } 11287 } 11288 } 11289 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11290 tiwin, thflags, nxt_pkt)); 11291 } 11292 11293 /* 11294 * Return value of 1, the TCB is unlocked and most 11295 * likely gone, return value of 0, the TCP is still 11296 * locked. 11297 */ 11298 static int 11299 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11300 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11301 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11302 { 11303 struct tcp_rack *rack; 11304 int32_t ret_val = 0; 11305 int32_t ourfinisacked = 0; 11306 11307 ctf_calc_rwin(so, tp); 11308 if ((thflags & TH_ACK) && 11309 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11310 SEQ_GT(th->th_ack, tp->snd_max))) { 11311 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11312 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11313 return (1); 11314 } 11315 rack = (struct tcp_rack *)tp->t_fb_ptr; 11316 if (IS_FASTOPEN(tp->t_flags)) { 11317 /* 11318 * When a TFO connection is in SYN_RECEIVED, the 11319 * only valid packets are the initial SYN, a 11320 * retransmit/copy of the initial SYN (possibly with 11321 * a subset of the original data), a valid ACK, a 11322 * FIN, or a RST. 11323 */ 11324 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11325 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11326 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11327 return (1); 11328 } else if (thflags & TH_SYN) { 11329 /* non-initial SYN is ignored */ 11330 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11331 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11332 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11333 ctf_do_drop(m, NULL); 11334 return (0); 11335 } 11336 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11337 ctf_do_drop(m, NULL); 11338 return (0); 11339 } 11340 } 11341 11342 if ((thflags & TH_RST) || 11343 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11344 return (__ctf_process_rst(m, th, so, tp, 11345 &rack->r_ctl.challenge_ack_ts, 11346 &rack->r_ctl.challenge_ack_cnt)); 11347 /* 11348 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11349 * it's less than ts_recent, drop it. 11350 */ 11351 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11352 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11353 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11354 return (ret_val); 11355 } 11356 /* 11357 * In the SYN-RECEIVED state, validate that the packet belongs to 11358 * this connection before trimming the data to fit the receive 11359 * window. Check the sequence number versus IRS since we know the 11360 * sequence numbers haven't wrapped. This is a partial fix for the 11361 * "LAND" DoS attack. 11362 */ 11363 if (SEQ_LT(th->th_seq, tp->irs)) { 11364 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11365 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11366 return (1); 11367 } 11368 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11369 &rack->r_ctl.challenge_ack_ts, 11370 &rack->r_ctl.challenge_ack_cnt)) { 11371 return (ret_val); 11372 } 11373 /* 11374 * If last ACK falls within this segment's sequence numbers, record 11375 * its timestamp. NOTE: 1) That the test incorporates suggestions 11376 * from the latest proposal of the tcplw@cray.com list (Braden 11377 * 1993/04/26). 2) That updating only on newer timestamps interferes 11378 * with our earlier PAWS tests, so this check should be solely 11379 * predicated on the sequence space of this segment. 3) That we 11380 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11381 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11382 * SEG.Len, This modified check allows us to overcome RFC1323's 11383 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11384 * p.869. In such cases, we can still calculate the RTT correctly 11385 * when RCV.NXT == Last.ACK.Sent. 11386 */ 11387 if ((to->to_flags & TOF_TS) != 0 && 11388 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11389 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11390 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11391 tp->ts_recent_age = tcp_ts_getticks(); 11392 tp->ts_recent = to->to_tsval; 11393 } 11394 tp->snd_wnd = tiwin; 11395 rack_validate_fo_sendwin_up(tp, rack); 11396 /* 11397 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11398 * is on (half-synchronized state), then queue data for later 11399 * processing; else drop segment and return. 11400 */ 11401 if ((thflags & TH_ACK) == 0) { 11402 if (IS_FASTOPEN(tp->t_flags)) { 11403 rack_cc_conn_init(tp); 11404 } 11405 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11406 tiwin, thflags, nxt_pkt)); 11407 } 11408 KMOD_TCPSTAT_INC(tcps_connects); 11409 if (tp->t_flags & TF_SONOTCONN) { 11410 tp->t_flags &= ~TF_SONOTCONN; 11411 soisconnected(so); 11412 } 11413 /* Do window scaling? */ 11414 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11415 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11416 tp->rcv_scale = tp->request_r_scale; 11417 } 11418 /* 11419 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11420 * FIN-WAIT-1 11421 */ 11422 tp->t_starttime = ticks; 11423 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11424 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11425 tp->t_tfo_pending = NULL; 11426 } 11427 if (tp->t_flags & TF_NEEDFIN) { 11428 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11429 tp->t_flags &= ~TF_NEEDFIN; 11430 } else { 11431 tcp_state_change(tp, TCPS_ESTABLISHED); 11432 TCP_PROBE5(accept__established, NULL, tp, 11433 mtod(m, const char *), tp, th); 11434 /* 11435 * TFO connections call cc_conn_init() during SYN 11436 * processing. Calling it again here for such connections 11437 * is not harmless as it would undo the snd_cwnd reduction 11438 * that occurs when a TFO SYN|ACK is retransmitted. 11439 */ 11440 if (!IS_FASTOPEN(tp->t_flags)) 11441 rack_cc_conn_init(tp); 11442 } 11443 /* 11444 * Account for the ACK of our SYN prior to 11445 * regular ACK processing below, except for 11446 * simultaneous SYN, which is handled later. 11447 */ 11448 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11449 tp->snd_una++; 11450 /* 11451 * If segment contains data or ACK, will call tcp_reass() later; if 11452 * not, do so now to pass queued data to user. 11453 */ 11454 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11455 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11456 (struct mbuf *)0); 11457 if (tp->t_flags & TF_WAKESOR) { 11458 tp->t_flags &= ~TF_WAKESOR; 11459 /* NB: sorwakeup_locked() does an implicit unlock. */ 11460 sorwakeup_locked(so); 11461 } 11462 } 11463 tp->snd_wl1 = th->th_seq - 1; 11464 /* For syn-recv we need to possibly update the rtt */ 11465 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11466 uint32_t t, mcts; 11467 11468 mcts = tcp_ts_getticks(); 11469 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11470 if (!tp->t_rttlow || tp->t_rttlow > t) 11471 tp->t_rttlow = t; 11472 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11473 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11474 tcp_rack_xmit_timer_commit(rack, tp); 11475 } 11476 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11477 return (ret_val); 11478 } 11479 if (tp->t_state == TCPS_FIN_WAIT_1) { 11480 /* We could have went to FIN_WAIT_1 (or EST) above */ 11481 /* 11482 * In FIN_WAIT_1 STATE in addition to the processing for the 11483 * ESTABLISHED state if our FIN is now acknowledged then 11484 * enter FIN_WAIT_2. 11485 */ 11486 if (ourfinisacked) { 11487 /* 11488 * If we can't receive any more data, then closing 11489 * user can proceed. Starting the timer is contrary 11490 * to the specification, but if we don't get a FIN 11491 * we'll hang forever. 11492 * 11493 * XXXjl: we should release the tp also, and use a 11494 * compressed state. 11495 */ 11496 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11497 soisdisconnected(so); 11498 tcp_timer_activate(tp, TT_2MSL, 11499 (tcp_fast_finwait2_recycle ? 11500 tcp_finwait2_timeout : 11501 TP_MAXIDLE(tp))); 11502 } 11503 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11504 } 11505 } 11506 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11507 tiwin, thflags, nxt_pkt)); 11508 } 11509 11510 /* 11511 * Return value of 1, the TCB is unlocked and most 11512 * likely gone, return value of 0, the TCP is still 11513 * locked. 11514 */ 11515 static int 11516 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11517 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11518 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11519 { 11520 int32_t ret_val = 0; 11521 struct tcp_rack *rack; 11522 11523 /* 11524 * Header prediction: check for the two common cases of a 11525 * uni-directional data xfer. If the packet has no control flags, 11526 * is in-sequence, the window didn't change and we're not 11527 * retransmitting, it's a candidate. If the length is zero and the 11528 * ack moved forward, we're the sender side of the xfer. Just free 11529 * the data acked & wake any higher level process that was blocked 11530 * waiting for space. If the length is non-zero and the ack didn't 11531 * move, we're the receiver side. If we're getting packets in-order 11532 * (the reassembly queue is empty), add the data toc The socket 11533 * buffer and note that we need a delayed ack. Make sure that the 11534 * hidden state-flags are also off. Since we check for 11535 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11536 */ 11537 rack = (struct tcp_rack *)tp->t_fb_ptr; 11538 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11539 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11540 __predict_true(SEGQ_EMPTY(tp)) && 11541 __predict_true(th->th_seq == tp->rcv_nxt)) { 11542 if (tlen == 0) { 11543 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11544 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11545 return (0); 11546 } 11547 } else { 11548 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11549 tiwin, nxt_pkt, iptos)) { 11550 return (0); 11551 } 11552 } 11553 } 11554 ctf_calc_rwin(so, tp); 11555 11556 if ((thflags & TH_RST) || 11557 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11558 return (__ctf_process_rst(m, th, so, tp, 11559 &rack->r_ctl.challenge_ack_ts, 11560 &rack->r_ctl.challenge_ack_cnt)); 11561 11562 /* 11563 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11564 * synchronized state. 11565 */ 11566 if (thflags & TH_SYN) { 11567 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11568 return (ret_val); 11569 } 11570 /* 11571 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11572 * it's less than ts_recent, drop it. 11573 */ 11574 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11575 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11576 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11577 return (ret_val); 11578 } 11579 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11580 &rack->r_ctl.challenge_ack_ts, 11581 &rack->r_ctl.challenge_ack_cnt)) { 11582 return (ret_val); 11583 } 11584 /* 11585 * If last ACK falls within this segment's sequence numbers, record 11586 * its timestamp. NOTE: 1) That the test incorporates suggestions 11587 * from the latest proposal of the tcplw@cray.com list (Braden 11588 * 1993/04/26). 2) That updating only on newer timestamps interferes 11589 * with our earlier PAWS tests, so this check should be solely 11590 * predicated on the sequence space of this segment. 3) That we 11591 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11592 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11593 * SEG.Len, This modified check allows us to overcome RFC1323's 11594 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11595 * p.869. In such cases, we can still calculate the RTT correctly 11596 * when RCV.NXT == Last.ACK.Sent. 11597 */ 11598 if ((to->to_flags & TOF_TS) != 0 && 11599 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11600 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11601 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11602 tp->ts_recent_age = tcp_ts_getticks(); 11603 tp->ts_recent = to->to_tsval; 11604 } 11605 /* 11606 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11607 * is on (half-synchronized state), then queue data for later 11608 * processing; else drop segment and return. 11609 */ 11610 if ((thflags & TH_ACK) == 0) { 11611 if (tp->t_flags & TF_NEEDSYN) { 11612 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11613 tiwin, thflags, nxt_pkt)); 11614 11615 } else if (tp->t_flags & TF_ACKNOW) { 11616 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11617 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11618 return (ret_val); 11619 } else { 11620 ctf_do_drop(m, NULL); 11621 return (0); 11622 } 11623 } 11624 /* 11625 * Ack processing. 11626 */ 11627 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11628 return (ret_val); 11629 } 11630 if (sbavail(&so->so_snd)) { 11631 if (ctf_progress_timeout_check(tp, true)) { 11632 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11633 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11634 return (1); 11635 } 11636 } 11637 /* State changes only happen in rack_process_data() */ 11638 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11639 tiwin, thflags, nxt_pkt)); 11640 } 11641 11642 /* 11643 * Return value of 1, the TCB is unlocked and most 11644 * likely gone, return value of 0, the TCP is still 11645 * locked. 11646 */ 11647 static int 11648 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11649 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11650 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11651 { 11652 int32_t ret_val = 0; 11653 struct tcp_rack *rack; 11654 11655 rack = (struct tcp_rack *)tp->t_fb_ptr; 11656 ctf_calc_rwin(so, tp); 11657 if ((thflags & TH_RST) || 11658 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11659 return (__ctf_process_rst(m, th, so, tp, 11660 &rack->r_ctl.challenge_ack_ts, 11661 &rack->r_ctl.challenge_ack_cnt)); 11662 /* 11663 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11664 * synchronized state. 11665 */ 11666 if (thflags & TH_SYN) { 11667 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11668 return (ret_val); 11669 } 11670 /* 11671 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11672 * it's less than ts_recent, drop it. 11673 */ 11674 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11675 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11676 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11677 return (ret_val); 11678 } 11679 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11680 &rack->r_ctl.challenge_ack_ts, 11681 &rack->r_ctl.challenge_ack_cnt)) { 11682 return (ret_val); 11683 } 11684 /* 11685 * If last ACK falls within this segment's sequence numbers, record 11686 * its timestamp. NOTE: 1) That the test incorporates suggestions 11687 * from the latest proposal of the tcplw@cray.com list (Braden 11688 * 1993/04/26). 2) That updating only on newer timestamps interferes 11689 * with our earlier PAWS tests, so this check should be solely 11690 * predicated on the sequence space of this segment. 3) That we 11691 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11692 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11693 * SEG.Len, This modified check allows us to overcome RFC1323's 11694 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11695 * p.869. In such cases, we can still calculate the RTT correctly 11696 * when RCV.NXT == Last.ACK.Sent. 11697 */ 11698 if ((to->to_flags & TOF_TS) != 0 && 11699 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11700 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11701 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11702 tp->ts_recent_age = tcp_ts_getticks(); 11703 tp->ts_recent = to->to_tsval; 11704 } 11705 /* 11706 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11707 * is on (half-synchronized state), then queue data for later 11708 * processing; else drop segment and return. 11709 */ 11710 if ((thflags & TH_ACK) == 0) { 11711 if (tp->t_flags & TF_NEEDSYN) { 11712 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11713 tiwin, thflags, nxt_pkt)); 11714 11715 } else if (tp->t_flags & TF_ACKNOW) { 11716 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11717 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11718 return (ret_val); 11719 } else { 11720 ctf_do_drop(m, NULL); 11721 return (0); 11722 } 11723 } 11724 /* 11725 * Ack processing. 11726 */ 11727 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11728 return (ret_val); 11729 } 11730 if (sbavail(&so->so_snd)) { 11731 if (ctf_progress_timeout_check(tp, true)) { 11732 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11733 tp, tick, PROGRESS_DROP, __LINE__); 11734 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11735 return (1); 11736 } 11737 } 11738 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11739 tiwin, thflags, nxt_pkt)); 11740 } 11741 11742 static int 11743 rack_check_data_after_close(struct mbuf *m, 11744 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11745 { 11746 struct tcp_rack *rack; 11747 11748 rack = (struct tcp_rack *)tp->t_fb_ptr; 11749 if (rack->rc_allow_data_af_clo == 0) { 11750 close_now: 11751 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11752 /* tcp_close will kill the inp pre-log the Reset */ 11753 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11754 tp = tcp_close(tp); 11755 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11756 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11757 return (1); 11758 } 11759 if (sbavail(&so->so_snd) == 0) 11760 goto close_now; 11761 /* Ok we allow data that is ignored and a followup reset */ 11762 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11763 tp->rcv_nxt = th->th_seq + *tlen; 11764 tp->t_flags2 |= TF2_DROP_AF_DATA; 11765 rack->r_wanted_output = 1; 11766 *tlen = 0; 11767 return (0); 11768 } 11769 11770 /* 11771 * Return value of 1, the TCB is unlocked and most 11772 * likely gone, return value of 0, the TCP is still 11773 * locked. 11774 */ 11775 static int 11776 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11777 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11778 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11779 { 11780 int32_t ret_val = 0; 11781 int32_t ourfinisacked = 0; 11782 struct tcp_rack *rack; 11783 11784 rack = (struct tcp_rack *)tp->t_fb_ptr; 11785 ctf_calc_rwin(so, tp); 11786 11787 if ((thflags & TH_RST) || 11788 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11789 return (__ctf_process_rst(m, th, so, tp, 11790 &rack->r_ctl.challenge_ack_ts, 11791 &rack->r_ctl.challenge_ack_cnt)); 11792 /* 11793 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11794 * synchronized state. 11795 */ 11796 if (thflags & TH_SYN) { 11797 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11798 return (ret_val); 11799 } 11800 /* 11801 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11802 * it's less than ts_recent, drop it. 11803 */ 11804 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11805 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11806 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11807 return (ret_val); 11808 } 11809 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11810 &rack->r_ctl.challenge_ack_ts, 11811 &rack->r_ctl.challenge_ack_cnt)) { 11812 return (ret_val); 11813 } 11814 /* 11815 * If new data are received on a connection after the user processes 11816 * are gone, then RST the other end. 11817 */ 11818 if ((tp->t_flags & TF_CLOSED) && tlen && 11819 rack_check_data_after_close(m, tp, &tlen, th, so)) 11820 return (1); 11821 /* 11822 * If last ACK falls within this segment's sequence numbers, record 11823 * its timestamp. NOTE: 1) That the test incorporates suggestions 11824 * from the latest proposal of the tcplw@cray.com list (Braden 11825 * 1993/04/26). 2) That updating only on newer timestamps interferes 11826 * with our earlier PAWS tests, so this check should be solely 11827 * predicated on the sequence space of this segment. 3) That we 11828 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11829 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11830 * SEG.Len, This modified check allows us to overcome RFC1323's 11831 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11832 * p.869. In such cases, we can still calculate the RTT correctly 11833 * when RCV.NXT == Last.ACK.Sent. 11834 */ 11835 if ((to->to_flags & TOF_TS) != 0 && 11836 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11837 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11838 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11839 tp->ts_recent_age = tcp_ts_getticks(); 11840 tp->ts_recent = to->to_tsval; 11841 } 11842 /* 11843 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11844 * is on (half-synchronized state), then queue data for later 11845 * processing; else drop segment and return. 11846 */ 11847 if ((thflags & TH_ACK) == 0) { 11848 if (tp->t_flags & TF_NEEDSYN) { 11849 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11850 tiwin, thflags, nxt_pkt)); 11851 } else if (tp->t_flags & TF_ACKNOW) { 11852 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11853 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11854 return (ret_val); 11855 } else { 11856 ctf_do_drop(m, NULL); 11857 return (0); 11858 } 11859 } 11860 /* 11861 * Ack processing. 11862 */ 11863 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11864 return (ret_val); 11865 } 11866 if (ourfinisacked) { 11867 /* 11868 * If we can't receive any more data, then closing user can 11869 * proceed. Starting the timer is contrary to the 11870 * specification, but if we don't get a FIN we'll hang 11871 * forever. 11872 * 11873 * XXXjl: we should release the tp also, and use a 11874 * compressed state. 11875 */ 11876 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11877 soisdisconnected(so); 11878 tcp_timer_activate(tp, TT_2MSL, 11879 (tcp_fast_finwait2_recycle ? 11880 tcp_finwait2_timeout : 11881 TP_MAXIDLE(tp))); 11882 } 11883 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11884 } 11885 if (sbavail(&so->so_snd)) { 11886 if (ctf_progress_timeout_check(tp, true)) { 11887 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11888 tp, tick, PROGRESS_DROP, __LINE__); 11889 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11890 return (1); 11891 } 11892 } 11893 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11894 tiwin, thflags, nxt_pkt)); 11895 } 11896 11897 /* 11898 * Return value of 1, the TCB is unlocked and most 11899 * likely gone, return value of 0, the TCP is still 11900 * locked. 11901 */ 11902 static int 11903 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 11904 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11905 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11906 { 11907 int32_t ret_val = 0; 11908 int32_t ourfinisacked = 0; 11909 struct tcp_rack *rack; 11910 11911 rack = (struct tcp_rack *)tp->t_fb_ptr; 11912 ctf_calc_rwin(so, tp); 11913 11914 if ((thflags & TH_RST) || 11915 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11916 return (__ctf_process_rst(m, th, so, tp, 11917 &rack->r_ctl.challenge_ack_ts, 11918 &rack->r_ctl.challenge_ack_cnt)); 11919 /* 11920 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11921 * synchronized state. 11922 */ 11923 if (thflags & TH_SYN) { 11924 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11925 return (ret_val); 11926 } 11927 /* 11928 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11929 * it's less than ts_recent, drop it. 11930 */ 11931 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11932 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11933 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11934 return (ret_val); 11935 } 11936 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11937 &rack->r_ctl.challenge_ack_ts, 11938 &rack->r_ctl.challenge_ack_cnt)) { 11939 return (ret_val); 11940 } 11941 /* 11942 * If new data are received on a connection after the user processes 11943 * are gone, then RST the other end. 11944 */ 11945 if ((tp->t_flags & TF_CLOSED) && tlen && 11946 rack_check_data_after_close(m, tp, &tlen, th, so)) 11947 return (1); 11948 /* 11949 * If last ACK falls within this segment's sequence numbers, record 11950 * its timestamp. NOTE: 1) That the test incorporates suggestions 11951 * from the latest proposal of the tcplw@cray.com list (Braden 11952 * 1993/04/26). 2) That updating only on newer timestamps interferes 11953 * with our earlier PAWS tests, so this check should be solely 11954 * predicated on the sequence space of this segment. 3) That we 11955 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11956 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11957 * SEG.Len, This modified check allows us to overcome RFC1323's 11958 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11959 * p.869. In such cases, we can still calculate the RTT correctly 11960 * when RCV.NXT == Last.ACK.Sent. 11961 */ 11962 if ((to->to_flags & TOF_TS) != 0 && 11963 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11964 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11965 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11966 tp->ts_recent_age = tcp_ts_getticks(); 11967 tp->ts_recent = to->to_tsval; 11968 } 11969 /* 11970 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11971 * is on (half-synchronized state), then queue data for later 11972 * processing; else drop segment and return. 11973 */ 11974 if ((thflags & TH_ACK) == 0) { 11975 if (tp->t_flags & TF_NEEDSYN) { 11976 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11977 tiwin, thflags, nxt_pkt)); 11978 } else if (tp->t_flags & TF_ACKNOW) { 11979 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11980 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11981 return (ret_val); 11982 } else { 11983 ctf_do_drop(m, NULL); 11984 return (0); 11985 } 11986 } 11987 /* 11988 * Ack processing. 11989 */ 11990 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11991 return (ret_val); 11992 } 11993 if (ourfinisacked) { 11994 tcp_twstart(tp); 11995 m_freem(m); 11996 return (1); 11997 } 11998 if (sbavail(&so->so_snd)) { 11999 if (ctf_progress_timeout_check(tp, true)) { 12000 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12001 tp, tick, PROGRESS_DROP, __LINE__); 12002 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12003 return (1); 12004 } 12005 } 12006 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12007 tiwin, thflags, nxt_pkt)); 12008 } 12009 12010 /* 12011 * Return value of 1, the TCB is unlocked and most 12012 * likely gone, return value of 0, the TCP is still 12013 * locked. 12014 */ 12015 static int 12016 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12017 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12018 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12019 { 12020 int32_t ret_val = 0; 12021 int32_t ourfinisacked = 0; 12022 struct tcp_rack *rack; 12023 12024 rack = (struct tcp_rack *)tp->t_fb_ptr; 12025 ctf_calc_rwin(so, tp); 12026 12027 if ((thflags & TH_RST) || 12028 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12029 return (__ctf_process_rst(m, th, so, tp, 12030 &rack->r_ctl.challenge_ack_ts, 12031 &rack->r_ctl.challenge_ack_cnt)); 12032 /* 12033 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12034 * synchronized state. 12035 */ 12036 if (thflags & TH_SYN) { 12037 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12038 return (ret_val); 12039 } 12040 /* 12041 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12042 * it's less than ts_recent, drop it. 12043 */ 12044 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12045 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12046 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12047 return (ret_val); 12048 } 12049 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12050 &rack->r_ctl.challenge_ack_ts, 12051 &rack->r_ctl.challenge_ack_cnt)) { 12052 return (ret_val); 12053 } 12054 /* 12055 * If new data are received on a connection after the user processes 12056 * are gone, then RST the other end. 12057 */ 12058 if ((tp->t_flags & TF_CLOSED) && tlen && 12059 rack_check_data_after_close(m, tp, &tlen, th, so)) 12060 return (1); 12061 /* 12062 * If last ACK falls within this segment's sequence numbers, record 12063 * its timestamp. NOTE: 1) That the test incorporates suggestions 12064 * from the latest proposal of the tcplw@cray.com list (Braden 12065 * 1993/04/26). 2) That updating only on newer timestamps interferes 12066 * with our earlier PAWS tests, so this check should be solely 12067 * predicated on the sequence space of this segment. 3) That we 12068 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12069 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12070 * SEG.Len, This modified check allows us to overcome RFC1323's 12071 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12072 * p.869. In such cases, we can still calculate the RTT correctly 12073 * when RCV.NXT == Last.ACK.Sent. 12074 */ 12075 if ((to->to_flags & TOF_TS) != 0 && 12076 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12077 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12078 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12079 tp->ts_recent_age = tcp_ts_getticks(); 12080 tp->ts_recent = to->to_tsval; 12081 } 12082 /* 12083 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12084 * is on (half-synchronized state), then queue data for later 12085 * processing; else drop segment and return. 12086 */ 12087 if ((thflags & TH_ACK) == 0) { 12088 if (tp->t_flags & TF_NEEDSYN) { 12089 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12090 tiwin, thflags, nxt_pkt)); 12091 } else if (tp->t_flags & TF_ACKNOW) { 12092 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12093 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12094 return (ret_val); 12095 } else { 12096 ctf_do_drop(m, NULL); 12097 return (0); 12098 } 12099 } 12100 /* 12101 * case TCPS_LAST_ACK: Ack processing. 12102 */ 12103 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12104 return (ret_val); 12105 } 12106 if (ourfinisacked) { 12107 tp = tcp_close(tp); 12108 ctf_do_drop(m, tp); 12109 return (1); 12110 } 12111 if (sbavail(&so->so_snd)) { 12112 if (ctf_progress_timeout_check(tp, true)) { 12113 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12114 tp, tick, PROGRESS_DROP, __LINE__); 12115 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12116 return (1); 12117 } 12118 } 12119 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12120 tiwin, thflags, nxt_pkt)); 12121 } 12122 12123 /* 12124 * Return value of 1, the TCB is unlocked and most 12125 * likely gone, return value of 0, the TCP is still 12126 * locked. 12127 */ 12128 static int 12129 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12130 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12131 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12132 { 12133 int32_t ret_val = 0; 12134 int32_t ourfinisacked = 0; 12135 struct tcp_rack *rack; 12136 12137 rack = (struct tcp_rack *)tp->t_fb_ptr; 12138 ctf_calc_rwin(so, tp); 12139 12140 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12141 if ((thflags & TH_RST) || 12142 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12143 return (__ctf_process_rst(m, th, so, tp, 12144 &rack->r_ctl.challenge_ack_ts, 12145 &rack->r_ctl.challenge_ack_cnt)); 12146 /* 12147 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12148 * synchronized state. 12149 */ 12150 if (thflags & TH_SYN) { 12151 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12152 return (ret_val); 12153 } 12154 /* 12155 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12156 * it's less than ts_recent, drop it. 12157 */ 12158 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12159 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12160 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12161 return (ret_val); 12162 } 12163 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12164 &rack->r_ctl.challenge_ack_ts, 12165 &rack->r_ctl.challenge_ack_cnt)) { 12166 return (ret_val); 12167 } 12168 /* 12169 * If new data are received on a connection after the user processes 12170 * are gone, then RST the other end. 12171 */ 12172 if ((tp->t_flags & TF_CLOSED) && tlen && 12173 rack_check_data_after_close(m, tp, &tlen, th, so)) 12174 return (1); 12175 /* 12176 * If last ACK falls within this segment's sequence numbers, record 12177 * its timestamp. NOTE: 1) That the test incorporates suggestions 12178 * from the latest proposal of the tcplw@cray.com list (Braden 12179 * 1993/04/26). 2) That updating only on newer timestamps interferes 12180 * with our earlier PAWS tests, so this check should be solely 12181 * predicated on the sequence space of this segment. 3) That we 12182 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12183 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12184 * SEG.Len, This modified check allows us to overcome RFC1323's 12185 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12186 * p.869. In such cases, we can still calculate the RTT correctly 12187 * when RCV.NXT == Last.ACK.Sent. 12188 */ 12189 if ((to->to_flags & TOF_TS) != 0 && 12190 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12191 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12192 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12193 tp->ts_recent_age = tcp_ts_getticks(); 12194 tp->ts_recent = to->to_tsval; 12195 } 12196 /* 12197 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12198 * is on (half-synchronized state), then queue data for later 12199 * processing; else drop segment and return. 12200 */ 12201 if ((thflags & TH_ACK) == 0) { 12202 if (tp->t_flags & TF_NEEDSYN) { 12203 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12204 tiwin, thflags, nxt_pkt)); 12205 } else if (tp->t_flags & TF_ACKNOW) { 12206 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12207 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12208 return (ret_val); 12209 } else { 12210 ctf_do_drop(m, NULL); 12211 return (0); 12212 } 12213 } 12214 /* 12215 * Ack processing. 12216 */ 12217 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12218 return (ret_val); 12219 } 12220 if (sbavail(&so->so_snd)) { 12221 if (ctf_progress_timeout_check(tp, true)) { 12222 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12223 tp, tick, PROGRESS_DROP, __LINE__); 12224 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12225 return (1); 12226 } 12227 } 12228 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12229 tiwin, thflags, nxt_pkt)); 12230 } 12231 12232 static void inline 12233 rack_clear_rate_sample(struct tcp_rack *rack) 12234 { 12235 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12236 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12237 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12238 } 12239 12240 static void 12241 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12242 { 12243 uint64_t bw_est, rate_wanted; 12244 int chged = 0; 12245 uint32_t user_max, orig_min, orig_max; 12246 12247 orig_min = rack->r_ctl.rc_pace_min_segs; 12248 orig_max = rack->r_ctl.rc_pace_max_segs; 12249 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12250 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12251 chged = 1; 12252 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12253 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12254 if (user_max != rack->r_ctl.rc_pace_max_segs) 12255 chged = 1; 12256 } 12257 if (rack->rc_force_max_seg) { 12258 rack->r_ctl.rc_pace_max_segs = user_max; 12259 } else if (rack->use_fixed_rate) { 12260 bw_est = rack_get_bw(rack); 12261 if ((rack->r_ctl.crte == NULL) || 12262 (bw_est != rack->r_ctl.crte->rate)) { 12263 rack->r_ctl.rc_pace_max_segs = user_max; 12264 } else { 12265 /* We are pacing right at the hardware rate */ 12266 uint32_t segsiz; 12267 12268 segsiz = min(ctf_fixed_maxseg(tp), 12269 rack->r_ctl.rc_pace_min_segs); 12270 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12271 tp, bw_est, segsiz, 0, 12272 rack->r_ctl.crte, NULL); 12273 } 12274 } else if (rack->rc_always_pace) { 12275 if (rack->r_ctl.gp_bw || 12276 #ifdef NETFLIX_PEAKRATE 12277 rack->rc_tp->t_maxpeakrate || 12278 #endif 12279 rack->r_ctl.init_rate) { 12280 /* We have a rate of some sort set */ 12281 uint32_t orig; 12282 12283 bw_est = rack_get_bw(rack); 12284 orig = rack->r_ctl.rc_pace_max_segs; 12285 if (fill_override) 12286 rate_wanted = *fill_override; 12287 else 12288 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12289 if (rate_wanted) { 12290 /* We have something */ 12291 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12292 rate_wanted, 12293 ctf_fixed_maxseg(rack->rc_tp)); 12294 } else 12295 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12296 if (orig != rack->r_ctl.rc_pace_max_segs) 12297 chged = 1; 12298 } else if ((rack->r_ctl.gp_bw == 0) && 12299 (rack->r_ctl.rc_pace_max_segs == 0)) { 12300 /* 12301 * If we have nothing limit us to bursting 12302 * out IW sized pieces. 12303 */ 12304 chged = 1; 12305 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12306 } 12307 } 12308 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12309 chged = 1; 12310 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12311 } 12312 if (chged) 12313 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12314 } 12315 12316 12317 static void 12318 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12319 { 12320 #ifdef INET6 12321 struct ip6_hdr *ip6 = NULL; 12322 #endif 12323 #ifdef INET 12324 struct ip *ip = NULL; 12325 #endif 12326 struct udphdr *udp = NULL; 12327 12328 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12329 #ifdef INET6 12330 if (rack->r_is_v6) { 12331 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12332 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12333 if (tp->t_port) { 12334 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12335 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12336 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12337 udp->uh_dport = tp->t_port; 12338 rack->r_ctl.fsb.udp = udp; 12339 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12340 } else 12341 { 12342 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12343 rack->r_ctl.fsb.udp = NULL; 12344 } 12345 tcpip_fillheaders(rack->rc_inp, 12346 tp->t_port, 12347 ip6, rack->r_ctl.fsb.th); 12348 } else 12349 #endif /* INET6 */ 12350 { 12351 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12352 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12353 if (tp->t_port) { 12354 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12355 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12356 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12357 udp->uh_dport = tp->t_port; 12358 rack->r_ctl.fsb.udp = udp; 12359 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12360 } else 12361 { 12362 rack->r_ctl.fsb.udp = NULL; 12363 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12364 } 12365 tcpip_fillheaders(rack->rc_inp, 12366 tp->t_port, 12367 ip, rack->r_ctl.fsb.th); 12368 } 12369 rack->r_fsb_inited = 1; 12370 } 12371 12372 static int 12373 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12374 { 12375 /* 12376 * Allocate the larger of spaces V6 if available else just 12377 * V4 and include udphdr (overbook) 12378 */ 12379 #ifdef INET6 12380 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12381 #else 12382 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12383 #endif 12384 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12385 M_TCPFSB, M_NOWAIT|M_ZERO); 12386 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12387 return (ENOMEM); 12388 } 12389 rack->r_fsb_inited = 0; 12390 return (0); 12391 } 12392 12393 static int 12394 rack_init(struct tcpcb *tp) 12395 { 12396 struct inpcb *inp = tptoinpcb(tp); 12397 struct tcp_rack *rack = NULL; 12398 #ifdef INVARIANTS 12399 struct rack_sendmap *insret; 12400 #endif 12401 uint32_t iwin, snt, us_cts; 12402 int err; 12403 12404 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12405 if (tp->t_fb_ptr == NULL) { 12406 /* 12407 * We need to allocate memory but cant. The INP and INP_INFO 12408 * locks and they are recursive (happens during setup. So a 12409 * scheme to drop the locks fails :( 12410 * 12411 */ 12412 return (ENOMEM); 12413 } 12414 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12415 12416 rack = (struct tcp_rack *)tp->t_fb_ptr; 12417 RB_INIT(&rack->r_ctl.rc_mtree); 12418 TAILQ_INIT(&rack->r_ctl.rc_free); 12419 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12420 rack->rc_tp = tp; 12421 rack->rc_inp = inp; 12422 /* Set the flag */ 12423 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 12424 /* Probably not needed but lets be sure */ 12425 rack_clear_rate_sample(rack); 12426 /* 12427 * Save off the default values, socket options will poke 12428 * at these if pacing is not on or we have not yet 12429 * reached where pacing is on (gp_ready/fixed enabled). 12430 * When they get set into the CC module (when gp_ready 12431 * is enabled or we enable fixed) then we will set these 12432 * values into the CC and place in here the old values 12433 * so we have a restoral. Then we will set the flag 12434 * rc_pacing_cc_set. That way whenever we turn off pacing 12435 * or switch off this stack, we will know to go restore 12436 * the saved values. 12437 */ 12438 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12439 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12440 /* We want abe like behavior as well */ 12441 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12442 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12443 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12444 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12445 rack->r_ctl.roundends = tp->snd_max; 12446 if (use_rack_rr) 12447 rack->use_rack_rr = 1; 12448 if (V_tcp_delack_enabled) 12449 tp->t_delayed_ack = 1; 12450 else 12451 tp->t_delayed_ack = 0; 12452 #ifdef TCP_ACCOUNTING 12453 if (rack_tcp_accounting) { 12454 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12455 } 12456 #endif 12457 if (rack_enable_shared_cwnd) 12458 rack->rack_enable_scwnd = 1; 12459 rack->rc_user_set_max_segs = rack_hptsi_segments; 12460 rack->rc_force_max_seg = 0; 12461 if (rack_use_imac_dack) 12462 rack->rc_dack_mode = 1; 12463 TAILQ_INIT(&rack->r_ctl.opt_list); 12464 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12465 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12466 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12467 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12468 rack->r_ctl.rc_highest_us_rtt = 0; 12469 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12470 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12471 if (rack_use_cmp_acks) 12472 rack->r_use_cmp_ack = 1; 12473 if (rack_disable_prr) 12474 rack->rack_no_prr = 1; 12475 if (rack_gp_no_rec_chg) 12476 rack->rc_gp_no_rec_chg = 1; 12477 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12478 rack->rc_always_pace = 1; 12479 if (rack->use_fixed_rate || rack->gp_ready) 12480 rack_set_cc_pacing(rack); 12481 } else 12482 rack->rc_always_pace = 0; 12483 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12484 rack->r_mbuf_queue = 1; 12485 else 12486 rack->r_mbuf_queue = 0; 12487 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12488 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12489 else 12490 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12491 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12492 if (rack_limits_scwnd) 12493 rack->r_limit_scw = 1; 12494 else 12495 rack->r_limit_scw = 0; 12496 rack->rc_labc = V_tcp_abc_l_var; 12497 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12498 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12499 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12500 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12501 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12502 rack->r_ctl.rc_min_to = rack_min_to; 12503 microuptime(&rack->r_ctl.act_rcv_time); 12504 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12505 rack->rc_init_win = rack_default_init_window; 12506 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12507 if (rack_hw_up_only) 12508 rack->r_up_only = 1; 12509 if (rack_do_dyn_mul) { 12510 /* When dynamic adjustment is on CA needs to start at 100% */ 12511 rack->rc_gp_dyn_mul = 1; 12512 if (rack_do_dyn_mul >= 100) 12513 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12514 } else 12515 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12516 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12517 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12518 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12519 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12520 rack_probertt_filter_life); 12521 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12522 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12523 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12524 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12525 rack->r_ctl.rc_time_probertt_starts = 0; 12526 if (rack_dsack_std_based & 0x1) { 12527 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12528 rack->rc_rack_tmr_std_based = 1; 12529 } 12530 if (rack_dsack_std_based & 0x2) { 12531 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12532 rack->rc_rack_use_dsack = 1; 12533 } 12534 /* We require at least one measurement, even if the sysctl is 0 */ 12535 if (rack_req_measurements) 12536 rack->r_ctl.req_measurements = rack_req_measurements; 12537 else 12538 rack->r_ctl.req_measurements = 1; 12539 if (rack_enable_hw_pacing) 12540 rack->rack_hdw_pace_ena = 1; 12541 if (rack_hw_rate_caps) 12542 rack->r_rack_hw_rate_caps = 1; 12543 /* Do we force on detection? */ 12544 #ifdef NETFLIX_EXP_DETECTION 12545 if (tcp_force_detection) 12546 rack->do_detection = 1; 12547 else 12548 #endif 12549 rack->do_detection = 0; 12550 if (rack_non_rxt_use_cr) 12551 rack->rack_rec_nonrxt_use_cr = 1; 12552 err = rack_init_fsb(tp, rack); 12553 if (err) { 12554 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12555 tp->t_fb_ptr = NULL; 12556 return (err); 12557 } 12558 if (tp->snd_una != tp->snd_max) { 12559 /* Create a send map for the current outstanding data */ 12560 struct rack_sendmap *rsm; 12561 12562 rsm = rack_alloc(rack); 12563 if (rsm == NULL) { 12564 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12565 tp->t_fb_ptr = NULL; 12566 return (ENOMEM); 12567 } 12568 rsm->r_no_rtt_allowed = 1; 12569 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12570 rsm->r_rtr_cnt = 1; 12571 rsm->r_rtr_bytes = 0; 12572 if (tp->t_flags & TF_SENTFIN) 12573 rsm->r_flags |= RACK_HAS_FIN; 12574 if ((tp->snd_una == tp->iss) && 12575 !TCPS_HAVEESTABLISHED(tp->t_state)) 12576 rsm->r_flags |= RACK_HAS_SYN; 12577 rsm->r_start = tp->snd_una; 12578 rsm->r_end = tp->snd_max; 12579 rsm->r_dupack = 0; 12580 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12581 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12582 if (rsm->m) 12583 rsm->orig_m_len = rsm->m->m_len; 12584 else 12585 rsm->orig_m_len = 0; 12586 } else { 12587 /* 12588 * This can happen if we have a stand-alone FIN or 12589 * SYN. 12590 */ 12591 rsm->m = NULL; 12592 rsm->orig_m_len = 0; 12593 rsm->soff = 0; 12594 } 12595 #ifndef INVARIANTS 12596 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12597 #else 12598 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12599 if (insret != NULL) { 12600 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12601 insret, rack, rsm); 12602 } 12603 #endif 12604 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12605 rsm->r_in_tmap = 1; 12606 } 12607 /* 12608 * Timers in Rack are kept in microseconds so lets 12609 * convert any initial incoming variables 12610 * from ticks into usecs. Note that we 12611 * also change the values of t_srtt and t_rttvar, if 12612 * they are non-zero. They are kept with a 5 12613 * bit decimal so we have to carefully convert 12614 * these to get the full precision. 12615 */ 12616 rack_convert_rtts(tp); 12617 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12618 if (rack_do_hystart) { 12619 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 12620 if (rack_do_hystart > 1) 12621 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 12622 if (rack_do_hystart > 2) 12623 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 12624 } 12625 if (rack_def_profile) 12626 rack_set_profile(rack, rack_def_profile); 12627 /* Cancel the GP measurement in progress */ 12628 tp->t_flags &= ~TF_GPUTINPROG; 12629 if (SEQ_GT(tp->snd_max, tp->iss)) 12630 snt = tp->snd_max - tp->iss; 12631 else 12632 snt = 0; 12633 iwin = rc_init_window(rack); 12634 if (snt < iwin) { 12635 /* We are not past the initial window 12636 * so we need to make sure cwnd is 12637 * correct. 12638 */ 12639 if (tp->snd_cwnd < iwin) 12640 tp->snd_cwnd = iwin; 12641 /* 12642 * If we are within the initial window 12643 * we want ssthresh to be unlimited. Setting 12644 * it to the rwnd (which the default stack does 12645 * and older racks) is not really a good idea 12646 * since we want to be in SS and grow both the 12647 * cwnd and the rwnd (via dynamic rwnd growth). If 12648 * we set it to the rwnd then as the peer grows its 12649 * rwnd we will be stuck in CA and never hit SS. 12650 * 12651 * Its far better to raise it up high (this takes the 12652 * risk that there as been a loss already, probably 12653 * we should have an indicator in all stacks of loss 12654 * but we don't), but considering the normal use this 12655 * is a risk worth taking. The consequences of not 12656 * hitting SS are far worse than going one more time 12657 * into it early on (before we have sent even a IW). 12658 * It is highly unlikely that we will have had a loss 12659 * before getting the IW out. 12660 */ 12661 tp->snd_ssthresh = 0xffffffff; 12662 } 12663 rack_stop_all_timers(tp); 12664 /* Lets setup the fsb block */ 12665 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12666 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12667 __LINE__, RACK_RTTS_INIT); 12668 return (0); 12669 } 12670 12671 static int 12672 rack_handoff_ok(struct tcpcb *tp) 12673 { 12674 if ((tp->t_state == TCPS_CLOSED) || 12675 (tp->t_state == TCPS_LISTEN)) { 12676 /* Sure no problem though it may not stick */ 12677 return (0); 12678 } 12679 if ((tp->t_state == TCPS_SYN_SENT) || 12680 (tp->t_state == TCPS_SYN_RECEIVED)) { 12681 /* 12682 * We really don't know if you support sack, 12683 * you have to get to ESTAB or beyond to tell. 12684 */ 12685 return (EAGAIN); 12686 } 12687 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12688 /* 12689 * Rack will only send a FIN after all data is acknowledged. 12690 * So in this case we have more data outstanding. We can't 12691 * switch stacks until either all data and only the FIN 12692 * is left (in which case rack_init() now knows how 12693 * to deal with that) <or> all is acknowledged and we 12694 * are only left with incoming data, though why you 12695 * would want to switch to rack after all data is acknowledged 12696 * I have no idea (rrs)! 12697 */ 12698 return (EAGAIN); 12699 } 12700 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12701 return (0); 12702 } 12703 /* 12704 * If we reach here we don't do SACK on this connection so we can 12705 * never do rack. 12706 */ 12707 return (EINVAL); 12708 } 12709 12710 12711 static void 12712 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12713 { 12714 struct inpcb *inp = tptoinpcb(tp); 12715 12716 if (tp->t_fb_ptr) { 12717 struct tcp_rack *rack; 12718 struct rack_sendmap *rsm, *nrsm; 12719 #ifdef INVARIANTS 12720 struct rack_sendmap *rm; 12721 #endif 12722 12723 rack = (struct tcp_rack *)tp->t_fb_ptr; 12724 if (tp->t_in_pkt) { 12725 /* 12726 * It is unsafe to process the packets since a 12727 * reset may be lurking in them (its rare but it 12728 * can occur). If we were to find a RST, then we 12729 * would end up dropping the connection and the 12730 * INP lock, so when we return the caller (tcp_usrreq) 12731 * will blow up when it trys to unlock the inp. 12732 */ 12733 struct mbuf *save, *m; 12734 12735 m = tp->t_in_pkt; 12736 tp->t_in_pkt = NULL; 12737 tp->t_tail_pkt = NULL; 12738 while (m) { 12739 save = m->m_nextpkt; 12740 m->m_nextpkt = NULL; 12741 m_freem(m); 12742 m = save; 12743 } 12744 } 12745 tp->t_flags &= ~TF_FORCEDATA; 12746 #ifdef NETFLIX_SHARED_CWND 12747 if (rack->r_ctl.rc_scw) { 12748 uint32_t limit; 12749 12750 if (rack->r_limit_scw) 12751 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12752 else 12753 limit = 0; 12754 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12755 rack->r_ctl.rc_scw_index, 12756 limit); 12757 rack->r_ctl.rc_scw = NULL; 12758 } 12759 #endif 12760 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12761 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12762 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12763 rack->r_ctl.fsb.th = NULL; 12764 } 12765 /* Convert back to ticks, with */ 12766 if (tp->t_srtt > 1) { 12767 uint32_t val, frac; 12768 12769 val = USEC_2_TICKS(tp->t_srtt); 12770 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12771 tp->t_srtt = val << TCP_RTT_SHIFT; 12772 /* 12773 * frac is the fractional part here is left 12774 * over from converting to hz and shifting. 12775 * We need to convert this to the 5 bit 12776 * remainder. 12777 */ 12778 if (frac) { 12779 if (hz == 1000) { 12780 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12781 } else { 12782 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12783 } 12784 tp->t_srtt += frac; 12785 } 12786 } 12787 if (tp->t_rttvar) { 12788 uint32_t val, frac; 12789 12790 val = USEC_2_TICKS(tp->t_rttvar); 12791 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12792 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12793 /* 12794 * frac is the fractional part here is left 12795 * over from converting to hz and shifting. 12796 * We need to convert this to the 5 bit 12797 * remainder. 12798 */ 12799 if (frac) { 12800 if (hz == 1000) { 12801 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12802 } else { 12803 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12804 } 12805 tp->t_rttvar += frac; 12806 } 12807 } 12808 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12809 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12810 if (rack->rc_always_pace) { 12811 tcp_decrement_paced_conn(); 12812 rack_undo_cc_pacing(rack); 12813 rack->rc_always_pace = 0; 12814 } 12815 /* Clean up any options if they were not applied */ 12816 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12817 struct deferred_opt_list *dol; 12818 12819 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12820 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12821 free(dol, M_TCPDO); 12822 } 12823 /* rack does not use force data but other stacks may clear it */ 12824 if (rack->r_ctl.crte != NULL) { 12825 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12826 rack->rack_hdrw_pacing = 0; 12827 rack->r_ctl.crte = NULL; 12828 } 12829 #ifdef TCP_BLACKBOX 12830 tcp_log_flowend(tp); 12831 #endif 12832 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12833 #ifndef INVARIANTS 12834 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12835 #else 12836 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12837 if (rm != rsm) { 12838 panic("At fini, rack:%p rsm:%p rm:%p", 12839 rack, rsm, rm); 12840 } 12841 #endif 12842 uma_zfree(rack_zone, rsm); 12843 } 12844 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12845 while (rsm) { 12846 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12847 uma_zfree(rack_zone, rsm); 12848 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12849 } 12850 rack->rc_free_cnt = 0; 12851 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12852 tp->t_fb_ptr = NULL; 12853 } 12854 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12855 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12856 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12857 inp->inp_flags2 &= ~INP_MBUF_ACKCMP; 12858 /* Cancel the GP measurement in progress */ 12859 tp->t_flags &= ~TF_GPUTINPROG; 12860 inp->inp_flags2 &= ~INP_MBUF_L_ACKS; 12861 /* Make sure snd_nxt is correctly set */ 12862 tp->snd_nxt = tp->snd_max; 12863 } 12864 12865 static void 12866 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12867 { 12868 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12869 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 12870 } 12871 switch (tp->t_state) { 12872 case TCPS_SYN_SENT: 12873 rack->r_state = TCPS_SYN_SENT; 12874 rack->r_substate = rack_do_syn_sent; 12875 break; 12876 case TCPS_SYN_RECEIVED: 12877 rack->r_state = TCPS_SYN_RECEIVED; 12878 rack->r_substate = rack_do_syn_recv; 12879 break; 12880 case TCPS_ESTABLISHED: 12881 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12882 rack->r_state = TCPS_ESTABLISHED; 12883 rack->r_substate = rack_do_established; 12884 break; 12885 case TCPS_CLOSE_WAIT: 12886 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12887 rack->r_state = TCPS_CLOSE_WAIT; 12888 rack->r_substate = rack_do_close_wait; 12889 break; 12890 case TCPS_FIN_WAIT_1: 12891 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12892 rack->r_state = TCPS_FIN_WAIT_1; 12893 rack->r_substate = rack_do_fin_wait_1; 12894 break; 12895 case TCPS_CLOSING: 12896 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12897 rack->r_state = TCPS_CLOSING; 12898 rack->r_substate = rack_do_closing; 12899 break; 12900 case TCPS_LAST_ACK: 12901 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12902 rack->r_state = TCPS_LAST_ACK; 12903 rack->r_substate = rack_do_lastack; 12904 break; 12905 case TCPS_FIN_WAIT_2: 12906 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12907 rack->r_state = TCPS_FIN_WAIT_2; 12908 rack->r_substate = rack_do_fin_wait_2; 12909 break; 12910 case TCPS_LISTEN: 12911 case TCPS_CLOSED: 12912 case TCPS_TIME_WAIT: 12913 default: 12914 break; 12915 }; 12916 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 12917 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 12918 12919 } 12920 12921 static void 12922 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 12923 { 12924 /* 12925 * We received an ack, and then did not 12926 * call send or were bounced out due to the 12927 * hpts was running. Now a timer is up as well, is 12928 * it the right timer? 12929 */ 12930 struct rack_sendmap *rsm; 12931 int tmr_up; 12932 12933 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 12934 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 12935 return; 12936 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12937 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 12938 (tmr_up == PACE_TMR_RXT)) { 12939 /* Should be an RXT */ 12940 return; 12941 } 12942 if (rsm == NULL) { 12943 /* Nothing outstanding? */ 12944 if (tp->t_flags & TF_DELACK) { 12945 if (tmr_up == PACE_TMR_DELACK) 12946 /* We are supposed to have delayed ack up and we do */ 12947 return; 12948 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 12949 /* 12950 * if we hit enobufs then we would expect the possibility 12951 * of nothing outstanding and the RXT up (and the hptsi timer). 12952 */ 12953 return; 12954 } else if (((V_tcp_always_keepalive || 12955 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 12956 (tp->t_state <= TCPS_CLOSING)) && 12957 (tmr_up == PACE_TMR_KEEP) && 12958 (tp->snd_max == tp->snd_una)) { 12959 /* We should have keep alive up and we do */ 12960 return; 12961 } 12962 } 12963 if (SEQ_GT(tp->snd_max, tp->snd_una) && 12964 ((tmr_up == PACE_TMR_TLP) || 12965 (tmr_up == PACE_TMR_RACK) || 12966 (tmr_up == PACE_TMR_RXT))) { 12967 /* 12968 * Either a Rack, TLP or RXT is fine if we 12969 * have outstanding data. 12970 */ 12971 return; 12972 } else if (tmr_up == PACE_TMR_DELACK) { 12973 /* 12974 * If the delayed ack was going to go off 12975 * before the rtx/tlp/rack timer were going to 12976 * expire, then that would be the timer in control. 12977 * Note we don't check the time here trusting the 12978 * code is correct. 12979 */ 12980 return; 12981 } 12982 /* 12983 * Ok the timer originally started is not what we want now. 12984 * We will force the hpts to be stopped if any, and restart 12985 * with the slot set to what was in the saved slot. 12986 */ 12987 if (tcp_in_hpts(rack->rc_inp)) { 12988 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 12989 uint32_t us_cts; 12990 12991 us_cts = tcp_get_usecs(NULL); 12992 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 12993 rack->r_early = 1; 12994 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 12995 } 12996 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 12997 } 12998 tcp_hpts_remove(rack->rc_inp); 12999 } 13000 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13001 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13002 } 13003 13004 13005 static void 13006 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13007 { 13008 if ((SEQ_LT(tp->snd_wl1, seq) || 13009 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13010 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13011 /* keep track of pure window updates */ 13012 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13013 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13014 tp->snd_wnd = tiwin; 13015 rack_validate_fo_sendwin_up(tp, rack); 13016 tp->snd_wl1 = seq; 13017 tp->snd_wl2 = ack; 13018 if (tp->snd_wnd > tp->max_sndwnd) 13019 tp->max_sndwnd = tp->snd_wnd; 13020 rack->r_wanted_output = 1; 13021 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13022 tp->snd_wnd = tiwin; 13023 rack_validate_fo_sendwin_up(tp, rack); 13024 tp->snd_wl1 = seq; 13025 tp->snd_wl2 = ack; 13026 } else { 13027 /* Not a valid win update */ 13028 return; 13029 } 13030 /* Do we exit persists? */ 13031 if ((rack->rc_in_persist != 0) && 13032 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13033 rack->r_ctl.rc_pace_min_segs))) { 13034 rack_exit_persist(tp, rack, cts); 13035 } 13036 /* Do we enter persists? */ 13037 if ((rack->rc_in_persist == 0) && 13038 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13039 TCPS_HAVEESTABLISHED(tp->t_state) && 13040 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13041 sbavail(&tptosocket(tp)->so_snd) && 13042 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13043 /* 13044 * Here the rwnd is less than 13045 * the pacing size, we are established, 13046 * nothing is outstanding, and there is 13047 * data to send. Enter persists. 13048 */ 13049 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13050 } 13051 } 13052 13053 static void 13054 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13055 { 13056 13057 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13058 struct inpcb *inp = tptoinpcb(tp); 13059 union tcp_log_stackspecific log; 13060 struct timeval ltv; 13061 char tcp_hdr_buf[60]; 13062 struct tcphdr *th; 13063 struct timespec ts; 13064 uint32_t orig_snd_una; 13065 uint8_t xx = 0; 13066 13067 #ifdef NETFLIX_HTTP_LOGGING 13068 struct http_sendfile_track *http_req; 13069 13070 if (SEQ_GT(ae->ack, tp->snd_una)) { 13071 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13072 } else { 13073 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13074 } 13075 #endif 13076 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13077 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13078 if (rack->rack_no_prr == 0) 13079 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13080 else 13081 log.u_bbr.flex1 = 0; 13082 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13083 log.u_bbr.use_lt_bw <<= 1; 13084 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13085 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13086 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13087 log.u_bbr.pkts_out = tp->t_maxseg; 13088 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13089 log.u_bbr.flex7 = 1; 13090 log.u_bbr.lost = ae->flags; 13091 log.u_bbr.cwnd_gain = ackval; 13092 log.u_bbr.pacing_gain = 0x2; 13093 if (ae->flags & TSTMP_HDWR) { 13094 /* Record the hardware timestamp if present */ 13095 log.u_bbr.flex3 = M_TSTMP; 13096 ts.tv_sec = ae->timestamp / 1000000000; 13097 ts.tv_nsec = ae->timestamp % 1000000000; 13098 ltv.tv_sec = ts.tv_sec; 13099 ltv.tv_usec = ts.tv_nsec / 1000; 13100 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13101 } else if (ae->flags & TSTMP_LRO) { 13102 /* Record the LRO the arrival timestamp */ 13103 log.u_bbr.flex3 = M_TSTMP_LRO; 13104 ts.tv_sec = ae->timestamp / 1000000000; 13105 ts.tv_nsec = ae->timestamp % 1000000000; 13106 ltv.tv_sec = ts.tv_sec; 13107 ltv.tv_usec = ts.tv_nsec / 1000; 13108 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13109 } 13110 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13111 /* Log the rcv time */ 13112 log.u_bbr.delRate = ae->timestamp; 13113 #ifdef NETFLIX_HTTP_LOGGING 13114 log.u_bbr.applimited = tp->t_http_closed; 13115 log.u_bbr.applimited <<= 8; 13116 log.u_bbr.applimited |= tp->t_http_open; 13117 log.u_bbr.applimited <<= 8; 13118 log.u_bbr.applimited |= tp->t_http_req; 13119 if (http_req) { 13120 /* Copy out any client req info */ 13121 /* seconds */ 13122 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13123 /* useconds */ 13124 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13125 log.u_bbr.rttProp = http_req->timestamp; 13126 log.u_bbr.cur_del_rate = http_req->start; 13127 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13128 log.u_bbr.flex8 |= 1; 13129 } else { 13130 log.u_bbr.flex8 |= 2; 13131 log.u_bbr.bw_inuse = http_req->end; 13132 } 13133 log.u_bbr.flex6 = http_req->start_seq; 13134 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13135 log.u_bbr.flex8 |= 4; 13136 log.u_bbr.epoch = http_req->end_seq; 13137 } 13138 } 13139 #endif 13140 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13141 th = (struct tcphdr *)tcp_hdr_buf; 13142 th->th_seq = ae->seq; 13143 th->th_ack = ae->ack; 13144 th->th_win = ae->win; 13145 /* Now fill in the ports */ 13146 th->th_sport = inp->inp_fport; 13147 th->th_dport = inp->inp_lport; 13148 tcp_set_flags(th, ae->flags); 13149 /* Now do we have a timestamp option? */ 13150 if (ae->flags & HAS_TSTMP) { 13151 u_char *cp; 13152 uint32_t val; 13153 13154 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13155 cp = (u_char *)(th + 1); 13156 *cp = TCPOPT_NOP; 13157 cp++; 13158 *cp = TCPOPT_NOP; 13159 cp++; 13160 *cp = TCPOPT_TIMESTAMP; 13161 cp++; 13162 *cp = TCPOLEN_TIMESTAMP; 13163 cp++; 13164 val = htonl(ae->ts_value); 13165 bcopy((char *)&val, 13166 (char *)cp, sizeof(uint32_t)); 13167 val = htonl(ae->ts_echo); 13168 bcopy((char *)&val, 13169 (char *)(cp + 4), sizeof(uint32_t)); 13170 } else 13171 th->th_off = (sizeof(struct tcphdr) >> 2); 13172 13173 /* 13174 * For sane logging we need to play a little trick. 13175 * If the ack were fully processed we would have moved 13176 * snd_una to high_seq, but since compressed acks are 13177 * processed in two phases, at this point (logging) snd_una 13178 * won't be advanced. So we would see multiple acks showing 13179 * the advancement. We can prevent that by "pretending" that 13180 * snd_una was advanced and then un-advancing it so that the 13181 * logging code has the right value for tlb_snd_una. 13182 */ 13183 if (tp->snd_una != high_seq) { 13184 orig_snd_una = tp->snd_una; 13185 tp->snd_una = high_seq; 13186 xx = 1; 13187 } else 13188 xx = 0; 13189 TCP_LOG_EVENTP(tp, th, 13190 &tptosocket(tp)->so_rcv, 13191 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 13192 0, &log, true, <v); 13193 if (xx) { 13194 tp->snd_una = orig_snd_una; 13195 } 13196 } 13197 13198 } 13199 13200 static void 13201 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13202 { 13203 uint32_t us_rtt; 13204 /* 13205 * A persist or keep-alive was forced out, update our 13206 * min rtt time. Note now worry about lost responses. 13207 * When a subsequent keep-alive or persist times out 13208 * and forced_ack is still on, then the last probe 13209 * was not responded to. In such cases we have a 13210 * sysctl that controls the behavior. Either we apply 13211 * the rtt but with reduced confidence (0). Or we just 13212 * plain don't apply the rtt estimate. Having data flow 13213 * will clear the probe_not_answered flag i.e. cum-ack 13214 * move forward <or> exiting and reentering persists. 13215 */ 13216 13217 rack->forced_ack = 0; 13218 rack->rc_tp->t_rxtshift = 0; 13219 if ((rack->rc_in_persist && 13220 (tiwin == rack->rc_tp->snd_wnd)) || 13221 (rack->rc_in_persist == 0)) { 13222 /* 13223 * In persists only apply the RTT update if this is 13224 * a response to our window probe. And that 13225 * means the rwnd sent must match the current 13226 * snd_wnd. If it does not, then we got a 13227 * window update ack instead. For keepalive 13228 * we allow the answer no matter what the window. 13229 * 13230 * Note that if the probe_not_answered is set then 13231 * the forced_ack_ts is the oldest one i.e. the first 13232 * probe sent that might have been lost. This assures 13233 * us that if we do calculate an RTT it is longer not 13234 * some short thing. 13235 */ 13236 if (rack->rc_in_persist) 13237 counter_u64_add(rack_persists_acks, 1); 13238 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13239 if (us_rtt == 0) 13240 us_rtt = 1; 13241 if (rack->probe_not_answered == 0) { 13242 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13243 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13244 } else { 13245 /* We have a retransmitted probe here too */ 13246 if (rack_apply_rtt_with_reduced_conf) { 13247 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13248 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13249 } 13250 } 13251 } 13252 } 13253 13254 static int 13255 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13256 { 13257 /* 13258 * Handle a "special" compressed ack mbuf. Each incoming 13259 * ack has only four possible dispositions: 13260 * 13261 * A) It moves the cum-ack forward 13262 * B) It is behind the cum-ack. 13263 * C) It is a window-update ack. 13264 * D) It is a dup-ack. 13265 * 13266 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13267 * in the incoming mbuf. We also need to still pay attention 13268 * to nxt_pkt since there may be another packet after this 13269 * one. 13270 */ 13271 #ifdef TCP_ACCOUNTING 13272 uint64_t ts_val; 13273 uint64_t rdstc; 13274 #endif 13275 int segsiz; 13276 struct timespec ts; 13277 struct tcp_rack *rack; 13278 struct tcp_ackent *ae; 13279 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13280 int cnt, i, did_out, ourfinisacked = 0; 13281 struct tcpopt to_holder, *to = NULL; 13282 #ifdef TCP_ACCOUNTING 13283 int win_up_req = 0; 13284 #endif 13285 int nsegs = 0; 13286 int under_pacing = 1; 13287 int recovery = 0; 13288 #ifdef TCP_ACCOUNTING 13289 sched_pin(); 13290 #endif 13291 rack = (struct tcp_rack *)tp->t_fb_ptr; 13292 if (rack->gp_ready && 13293 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13294 under_pacing = 0; 13295 else 13296 under_pacing = 1; 13297 13298 if (rack->r_state != tp->t_state) 13299 rack_set_state(tp, rack); 13300 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13301 (tp->t_flags & TF_GPUTINPROG)) { 13302 /* 13303 * We have a goodput in progress 13304 * and we have entered a late state. 13305 * Do we have enough data in the sb 13306 * to handle the GPUT request? 13307 */ 13308 uint32_t bytes; 13309 13310 bytes = tp->gput_ack - tp->gput_seq; 13311 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13312 bytes += tp->gput_seq - tp->snd_una; 13313 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 13314 /* 13315 * There are not enough bytes in the socket 13316 * buffer that have been sent to cover this 13317 * measurement. Cancel it. 13318 */ 13319 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13320 rack->r_ctl.rc_gp_srtt /*flex1*/, 13321 tp->gput_seq, 13322 0, 0, 18, __LINE__, NULL, 0); 13323 tp->t_flags &= ~TF_GPUTINPROG; 13324 } 13325 } 13326 to = &to_holder; 13327 to->to_flags = 0; 13328 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13329 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13330 cnt = m->m_len / sizeof(struct tcp_ackent); 13331 counter_u64_add(rack_multi_single_eq, cnt); 13332 high_seq = tp->snd_una; 13333 the_win = tp->snd_wnd; 13334 win_seq = tp->snd_wl1; 13335 win_upd_ack = tp->snd_wl2; 13336 cts = tcp_tv_to_usectick(tv); 13337 ms_cts = tcp_tv_to_mssectick(tv); 13338 rack->r_ctl.rc_rcvtime = cts; 13339 segsiz = ctf_fixed_maxseg(tp); 13340 if ((rack->rc_gp_dyn_mul) && 13341 (rack->use_fixed_rate == 0) && 13342 (rack->rc_always_pace)) { 13343 /* Check in on probertt */ 13344 rack_check_probe_rtt(rack, cts); 13345 } 13346 for (i = 0; i < cnt; i++) { 13347 #ifdef TCP_ACCOUNTING 13348 ts_val = get_cyclecount(); 13349 #endif 13350 rack_clear_rate_sample(rack); 13351 ae = ((mtod(m, struct tcp_ackent *)) + i); 13352 /* Setup the window */ 13353 tiwin = ae->win << tp->snd_scale; 13354 if (tiwin > rack->r_ctl.rc_high_rwnd) 13355 rack->r_ctl.rc_high_rwnd = tiwin; 13356 /* figure out the type of ack */ 13357 if (SEQ_LT(ae->ack, high_seq)) { 13358 /* Case B*/ 13359 ae->ack_val_set = ACK_BEHIND; 13360 } else if (SEQ_GT(ae->ack, high_seq)) { 13361 /* Case A */ 13362 ae->ack_val_set = ACK_CUMACK; 13363 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13364 /* Case D */ 13365 ae->ack_val_set = ACK_DUPACK; 13366 } else { 13367 /* Case C */ 13368 ae->ack_val_set = ACK_RWND; 13369 } 13370 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13371 /* Validate timestamp */ 13372 if (ae->flags & HAS_TSTMP) { 13373 /* Setup for a timestamp */ 13374 to->to_flags = TOF_TS; 13375 ae->ts_echo -= tp->ts_offset; 13376 to->to_tsecr = ae->ts_echo; 13377 to->to_tsval = ae->ts_value; 13378 /* 13379 * If echoed timestamp is later than the current time, fall back to 13380 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13381 * were used when this connection was established. 13382 */ 13383 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13384 to->to_tsecr = 0; 13385 if (tp->ts_recent && 13386 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13387 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13388 #ifdef TCP_ACCOUNTING 13389 rdstc = get_cyclecount(); 13390 if (rdstc > ts_val) { 13391 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13392 (rdstc - ts_val)); 13393 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13394 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13395 } 13396 } 13397 #endif 13398 continue; 13399 } 13400 } 13401 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13402 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13403 tp->ts_recent_age = tcp_ts_getticks(); 13404 tp->ts_recent = ae->ts_value; 13405 } 13406 } else { 13407 /* Setup for a no options */ 13408 to->to_flags = 0; 13409 } 13410 /* Update the rcv time and perform idle reduction possibly */ 13411 if (tp->t_idle_reduce && 13412 (tp->snd_max == tp->snd_una) && 13413 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13414 counter_u64_add(rack_input_idle_reduces, 1); 13415 rack_cc_after_idle(rack, tp); 13416 } 13417 tp->t_rcvtime = ticks; 13418 /* Now what about ECN of a chain of pure ACKs? */ 13419 if (tcp_ecn_input_segment(tp, ae->flags, 0, 13420 tcp_packets_this_ack(tp, ae->ack), 13421 ae->codepoint)) 13422 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 13423 #ifdef TCP_ACCOUNTING 13424 /* Count for the specific type of ack in */ 13425 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13426 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13427 tp->tcp_cnt_counters[ae->ack_val_set]++; 13428 } 13429 #endif 13430 /* 13431 * Note how we could move up these in the determination 13432 * above, but we don't so that way the timestamp checks (and ECN) 13433 * is done first before we do any processing on the ACK. 13434 * The non-compressed path through the code has this 13435 * weakness (noted by @jtl) that it actually does some 13436 * processing before verifying the timestamp information. 13437 * We don't take that path here which is why we set 13438 * the ack_val_set first, do the timestamp and ecn 13439 * processing, and then look at what we have setup. 13440 */ 13441 if (ae->ack_val_set == ACK_BEHIND) { 13442 /* 13443 * Case B flag reordering, if window is not closed 13444 * or it could be a keep-alive or persists 13445 */ 13446 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13447 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13448 } 13449 } else if (ae->ack_val_set == ACK_DUPACK) { 13450 /* Case D */ 13451 rack_strike_dupack(rack); 13452 } else if (ae->ack_val_set == ACK_RWND) { 13453 /* Case C */ 13454 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13455 ts.tv_sec = ae->timestamp / 1000000000; 13456 ts.tv_nsec = ae->timestamp % 1000000000; 13457 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13458 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13459 } else { 13460 rack->r_ctl.act_rcv_time = *tv; 13461 } 13462 if (rack->forced_ack) { 13463 rack_handle_probe_response(rack, tiwin, 13464 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13465 } 13466 #ifdef TCP_ACCOUNTING 13467 win_up_req = 1; 13468 #endif 13469 win_upd_ack = ae->ack; 13470 win_seq = ae->seq; 13471 the_win = tiwin; 13472 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13473 } else { 13474 /* Case A */ 13475 if (SEQ_GT(ae->ack, tp->snd_max)) { 13476 /* 13477 * We just send an ack since the incoming 13478 * ack is beyond the largest seq we sent. 13479 */ 13480 if ((tp->t_flags & TF_ACKNOW) == 0) { 13481 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13482 if (tp->t_flags && TF_ACKNOW) 13483 rack->r_wanted_output = 1; 13484 } 13485 } else { 13486 nsegs++; 13487 /* If the window changed setup to update */ 13488 if (tiwin != tp->snd_wnd) { 13489 win_upd_ack = ae->ack; 13490 win_seq = ae->seq; 13491 the_win = tiwin; 13492 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13493 } 13494 #ifdef TCP_ACCOUNTING 13495 /* Account for the acks */ 13496 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13497 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13498 } 13499 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13500 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13501 #endif 13502 high_seq = ae->ack; 13503 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13504 union tcp_log_stackspecific log; 13505 struct timeval tv; 13506 13507 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13508 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13509 log.u_bbr.flex1 = high_seq; 13510 log.u_bbr.flex2 = rack->r_ctl.roundends; 13511 log.u_bbr.flex3 = rack->r_ctl.current_round; 13512 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13513 log.u_bbr.flex8 = 8; 13514 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13515 0, &log, false, NULL, NULL, 0, &tv); 13516 } 13517 /* 13518 * The draft (v3) calls for us to use SEQ_GEQ, but that 13519 * causes issues when we are just going app limited. Lets 13520 * instead use SEQ_GT <or> where its equal but more data 13521 * is outstanding. 13522 */ 13523 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13524 ((high_seq == rack->r_ctl.roundends) && 13525 SEQ_GT(tp->snd_max, tp->snd_una))) { 13526 rack->r_ctl.current_round++; 13527 rack->r_ctl.roundends = tp->snd_max; 13528 if (CC_ALGO(tp)->newround != NULL) { 13529 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 13530 } 13531 } 13532 /* Setup our act_rcv_time */ 13533 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13534 ts.tv_sec = ae->timestamp / 1000000000; 13535 ts.tv_nsec = ae->timestamp % 1000000000; 13536 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13537 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13538 } else { 13539 rack->r_ctl.act_rcv_time = *tv; 13540 } 13541 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13542 if (rack->rc_dsack_round_seen) { 13543 /* Is the dsack round over? */ 13544 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13545 /* Yes it is */ 13546 rack->rc_dsack_round_seen = 0; 13547 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13548 } 13549 } 13550 } 13551 } 13552 /* And lets be sure to commit the rtt measurements for this ack */ 13553 tcp_rack_xmit_timer_commit(rack, tp); 13554 #ifdef TCP_ACCOUNTING 13555 rdstc = get_cyclecount(); 13556 if (rdstc > ts_val) { 13557 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13558 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13559 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13560 if (ae->ack_val_set == ACK_CUMACK) 13561 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13562 } 13563 } 13564 #endif 13565 } 13566 #ifdef TCP_ACCOUNTING 13567 ts_val = get_cyclecount(); 13568 #endif 13569 /* Tend to any collapsed window */ 13570 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 13571 /* The peer collapsed the window */ 13572 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__); 13573 } else if (rack->rc_has_collapsed) 13574 rack_un_collapse_window(rack, __LINE__); 13575 if ((rack->r_collapse_point_valid) && 13576 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 13577 rack->r_collapse_point_valid = 0; 13578 acked_amount = acked = (high_seq - tp->snd_una); 13579 if (acked) { 13580 /* 13581 * Clear the probe not answered flag 13582 * since cum-ack moved forward. 13583 */ 13584 rack->probe_not_answered = 0; 13585 if (rack->sack_attack_disable == 0) 13586 rack_do_decay(rack); 13587 if (acked >= segsiz) { 13588 /* 13589 * You only get credit for 13590 * MSS and greater (and you get extra 13591 * credit for larger cum-ack moves). 13592 */ 13593 int ac; 13594 13595 ac = acked / segsiz; 13596 rack->r_ctl.ack_count += ac; 13597 counter_u64_add(rack_ack_total, ac); 13598 } 13599 if (rack->r_ctl.ack_count > 0xfff00000) { 13600 /* 13601 * reduce the number to keep us under 13602 * a uint32_t. 13603 */ 13604 rack->r_ctl.ack_count /= 2; 13605 rack->r_ctl.sack_count /= 2; 13606 } 13607 if (tp->t_flags & TF_NEEDSYN) { 13608 /* 13609 * T/TCP: Connection was half-synchronized, and our SYN has 13610 * been ACK'd (so connection is now fully synchronized). Go 13611 * to non-starred state, increment snd_una for ACK of SYN, 13612 * and check if we can do window scaling. 13613 */ 13614 tp->t_flags &= ~TF_NEEDSYN; 13615 tp->snd_una++; 13616 acked_amount = acked = (high_seq - tp->snd_una); 13617 } 13618 if (acked > sbavail(&so->so_snd)) 13619 acked_amount = sbavail(&so->so_snd); 13620 #ifdef NETFLIX_EXP_DETECTION 13621 /* 13622 * We only care on a cum-ack move if we are in a sack-disabled 13623 * state. We have already added in to the ack_count, and we never 13624 * would disable on a cum-ack move, so we only care to do the 13625 * detection if it may "undo" it, i.e. we were in disabled already. 13626 */ 13627 if (rack->sack_attack_disable) 13628 rack_do_detection(tp, rack, acked_amount, segsiz); 13629 #endif 13630 if (IN_FASTRECOVERY(tp->t_flags) && 13631 (rack->rack_no_prr == 0)) 13632 rack_update_prr(tp, rack, acked_amount, high_seq); 13633 if (IN_RECOVERY(tp->t_flags)) { 13634 if (SEQ_LT(high_seq, tp->snd_recover) && 13635 (SEQ_LT(high_seq, tp->snd_max))) { 13636 tcp_rack_partialack(tp); 13637 } else { 13638 rack_post_recovery(tp, high_seq); 13639 recovery = 1; 13640 } 13641 } 13642 /* Handle the rack-log-ack part (sendmap) */ 13643 if ((sbused(&so->so_snd) == 0) && 13644 (acked > acked_amount) && 13645 (tp->t_state >= TCPS_FIN_WAIT_1) && 13646 (tp->t_flags & TF_SENTFIN)) { 13647 /* 13648 * We must be sure our fin 13649 * was sent and acked (we can be 13650 * in FIN_WAIT_1 without having 13651 * sent the fin). 13652 */ 13653 ourfinisacked = 1; 13654 /* 13655 * Lets make sure snd_una is updated 13656 * since most likely acked_amount = 0 (it 13657 * should be). 13658 */ 13659 tp->snd_una = high_seq; 13660 } 13661 /* Did we make a RTO error? */ 13662 if ((tp->t_flags & TF_PREVVALID) && 13663 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13664 tp->t_flags &= ~TF_PREVVALID; 13665 if (tp->t_rxtshift == 1 && 13666 (int)(ticks - tp->t_badrxtwin) < 0) 13667 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 13668 } 13669 /* Handle the data in the socket buffer */ 13670 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13671 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13672 if (acked_amount > 0) { 13673 struct mbuf *mfree; 13674 13675 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13676 SOCKBUF_LOCK(&so->so_snd); 13677 mfree = sbcut_locked(&so->so_snd, acked_amount); 13678 tp->snd_una = high_seq; 13679 /* Note we want to hold the sb lock through the sendmap adjust */ 13680 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13681 /* Wake up the socket if we have room to write more */ 13682 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13683 sowwakeup_locked(so); 13684 m_freem(mfree); 13685 } 13686 /* update progress */ 13687 tp->t_acktime = ticks; 13688 rack_log_progress_event(rack, tp, tp->t_acktime, 13689 PROGRESS_UPDATE, __LINE__); 13690 /* Clear out shifts and such */ 13691 tp->t_rxtshift = 0; 13692 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13693 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13694 rack->rc_tlp_in_progress = 0; 13695 rack->r_ctl.rc_tlp_cnt_out = 0; 13696 /* Send recover and snd_nxt must be dragged along */ 13697 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13698 tp->snd_recover = tp->snd_una; 13699 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13700 tp->snd_nxt = tp->snd_una; 13701 /* 13702 * If the RXT timer is running we want to 13703 * stop it, so we can restart a TLP (or new RXT). 13704 */ 13705 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13706 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13707 #ifdef NETFLIX_HTTP_LOGGING 13708 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13709 #endif 13710 tp->snd_wl2 = high_seq; 13711 tp->t_dupacks = 0; 13712 if (under_pacing && 13713 (rack->use_fixed_rate == 0) && 13714 (rack->in_probe_rtt == 0) && 13715 rack->rc_gp_dyn_mul && 13716 rack->rc_always_pace) { 13717 /* Check if we are dragging bottom */ 13718 rack_check_bottom_drag(tp, rack, so, acked); 13719 } 13720 if (tp->snd_una == tp->snd_max) { 13721 tp->t_flags &= ~TF_PREVVALID; 13722 rack->r_ctl.retran_during_recovery = 0; 13723 rack->r_ctl.dsack_byte_cnt = 0; 13724 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13725 if (rack->r_ctl.rc_went_idle_time == 0) 13726 rack->r_ctl.rc_went_idle_time = 1; 13727 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13728 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13729 tp->t_acktime = 0; 13730 /* Set so we might enter persists... */ 13731 rack->r_wanted_output = 1; 13732 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13733 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13734 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13735 (sbavail(&so->so_snd) == 0) && 13736 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13737 /* 13738 * The socket was gone and the 13739 * peer sent data (not now in the past), time to 13740 * reset him. 13741 */ 13742 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13743 /* tcp_close will kill the inp pre-log the Reset */ 13744 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13745 #ifdef TCP_ACCOUNTING 13746 rdstc = get_cyclecount(); 13747 if (rdstc > ts_val) { 13748 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13749 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13750 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13751 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13752 } 13753 } 13754 #endif 13755 m_freem(m); 13756 tp = tcp_close(tp); 13757 if (tp == NULL) { 13758 #ifdef TCP_ACCOUNTING 13759 sched_unpin(); 13760 #endif 13761 return (1); 13762 } 13763 /* 13764 * We would normally do drop-with-reset which would 13765 * send back a reset. We can't since we don't have 13766 * all the needed bits. Instead lets arrange for 13767 * a call to tcp_output(). That way since we 13768 * are in the closed state we will generate a reset. 13769 * 13770 * Note if tcp_accounting is on we don't unpin since 13771 * we do that after the goto label. 13772 */ 13773 goto send_out_a_rst; 13774 } 13775 if ((sbused(&so->so_snd) == 0) && 13776 (tp->t_state >= TCPS_FIN_WAIT_1) && 13777 (tp->t_flags & TF_SENTFIN)) { 13778 /* 13779 * If we can't receive any more data, then closing user can 13780 * proceed. Starting the timer is contrary to the 13781 * specification, but if we don't get a FIN we'll hang 13782 * forever. 13783 * 13784 */ 13785 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13786 soisdisconnected(so); 13787 tcp_timer_activate(tp, TT_2MSL, 13788 (tcp_fast_finwait2_recycle ? 13789 tcp_finwait2_timeout : 13790 TP_MAXIDLE(tp))); 13791 } 13792 if (ourfinisacked == 0) { 13793 /* 13794 * We don't change to fin-wait-2 if we have our fin acked 13795 * which means we are probably in TCPS_CLOSING. 13796 */ 13797 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13798 } 13799 } 13800 } 13801 /* Wake up the socket if we have room to write more */ 13802 if (sbavail(&so->so_snd)) { 13803 rack->r_wanted_output = 1; 13804 if (ctf_progress_timeout_check(tp, true)) { 13805 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13806 tp, tick, PROGRESS_DROP, __LINE__); 13807 /* 13808 * We cheat here and don't send a RST, we should send one 13809 * when the pacer drops the connection. 13810 */ 13811 #ifdef TCP_ACCOUNTING 13812 rdstc = get_cyclecount(); 13813 if (rdstc > ts_val) { 13814 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13815 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13816 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13817 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13818 } 13819 } 13820 sched_unpin(); 13821 #endif 13822 (void)tcp_drop(tp, ETIMEDOUT); 13823 m_freem(m); 13824 return (1); 13825 } 13826 } 13827 if (ourfinisacked) { 13828 switch(tp->t_state) { 13829 case TCPS_CLOSING: 13830 #ifdef TCP_ACCOUNTING 13831 rdstc = get_cyclecount(); 13832 if (rdstc > ts_val) { 13833 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13834 (rdstc - ts_val)); 13835 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13836 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13837 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13838 } 13839 } 13840 sched_unpin(); 13841 #endif 13842 tcp_twstart(tp); 13843 m_freem(m); 13844 return (1); 13845 break; 13846 case TCPS_LAST_ACK: 13847 #ifdef TCP_ACCOUNTING 13848 rdstc = get_cyclecount(); 13849 if (rdstc > ts_val) { 13850 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13851 (rdstc - ts_val)); 13852 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13853 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13854 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13855 } 13856 } 13857 sched_unpin(); 13858 #endif 13859 tp = tcp_close(tp); 13860 ctf_do_drop(m, tp); 13861 return (1); 13862 break; 13863 case TCPS_FIN_WAIT_1: 13864 #ifdef TCP_ACCOUNTING 13865 rdstc = get_cyclecount(); 13866 if (rdstc > ts_val) { 13867 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13868 (rdstc - ts_val)); 13869 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13870 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13871 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13872 } 13873 } 13874 #endif 13875 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13876 soisdisconnected(so); 13877 tcp_timer_activate(tp, TT_2MSL, 13878 (tcp_fast_finwait2_recycle ? 13879 tcp_finwait2_timeout : 13880 TP_MAXIDLE(tp))); 13881 } 13882 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13883 break; 13884 default: 13885 break; 13886 } 13887 } 13888 if (rack->r_fast_output) { 13889 /* 13890 * We re doing fast output.. can we expand that? 13891 */ 13892 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13893 } 13894 #ifdef TCP_ACCOUNTING 13895 rdstc = get_cyclecount(); 13896 if (rdstc > ts_val) { 13897 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13899 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13900 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13901 } 13902 } 13903 13904 } else if (win_up_req) { 13905 rdstc = get_cyclecount(); 13906 if (rdstc > ts_val) { 13907 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 13908 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13909 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13910 } 13911 } 13912 #endif 13913 } 13914 /* Now is there a next packet, if so we are done */ 13915 m_freem(m); 13916 did_out = 0; 13917 if (nxt_pkt) { 13918 #ifdef TCP_ACCOUNTING 13919 sched_unpin(); 13920 #endif 13921 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13922 return (0); 13923 } 13924 rack_handle_might_revert(tp, rack); 13925 ctf_calc_rwin(so, tp); 13926 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13927 send_out_a_rst: 13928 if (tcp_output(tp) < 0) { 13929 #ifdef TCP_ACCOUNTING 13930 sched_unpin(); 13931 #endif 13932 return (1); 13933 } 13934 did_out = 1; 13935 } 13936 rack_free_trim(rack); 13937 #ifdef TCP_ACCOUNTING 13938 sched_unpin(); 13939 #endif 13940 rack_timer_audit(tp, rack, &so->so_snd); 13941 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13942 return (0); 13943 } 13944 13945 13946 static int 13947 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13948 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13949 int32_t nxt_pkt, struct timeval *tv) 13950 { 13951 struct inpcb *inp = tptoinpcb(tp); 13952 #ifdef TCP_ACCOUNTING 13953 uint64_t ts_val; 13954 #endif 13955 int32_t thflags, retval, did_out = 0; 13956 int32_t way_out = 0; 13957 /* 13958 * cts - is the current time from tv (caller gets ts) in microseconds. 13959 * ms_cts - is the current time from tv in milliseconds. 13960 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 13961 */ 13962 uint32_t cts, us_cts, ms_cts; 13963 uint32_t tiwin, high_seq; 13964 struct timespec ts; 13965 struct tcpopt to; 13966 struct tcp_rack *rack; 13967 struct rack_sendmap *rsm; 13968 int32_t prev_state = 0; 13969 #ifdef TCP_ACCOUNTING 13970 int ack_val_set = 0xf; 13971 #endif 13972 int nsegs; 13973 13974 NET_EPOCH_ASSERT(); 13975 INP_WLOCK_ASSERT(inp); 13976 13977 /* 13978 * tv passed from common code is from either M_TSTMP_LRO or 13979 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 13980 */ 13981 rack = (struct tcp_rack *)tp->t_fb_ptr; 13982 if (m->m_flags & M_ACKCMP) { 13983 /* 13984 * All compressed ack's are ack's by definition so 13985 * remove any ack required flag and then do the processing. 13986 */ 13987 rack->rc_ack_required = 0; 13988 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 13989 } 13990 if (m->m_flags & M_ACKCMP) { 13991 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 13992 } 13993 cts = tcp_tv_to_usectick(tv); 13994 ms_cts = tcp_tv_to_mssectick(tv); 13995 nsegs = m->m_pkthdr.lro_nsegs; 13996 counter_u64_add(rack_proc_non_comp_ack, 1); 13997 thflags = tcp_get_flags(th); 13998 #ifdef TCP_ACCOUNTING 13999 sched_pin(); 14000 if (thflags & TH_ACK) 14001 ts_val = get_cyclecount(); 14002 #endif 14003 if ((m->m_flags & M_TSTMP) || 14004 (m->m_flags & M_TSTMP_LRO)) { 14005 mbuf_tstmp2timespec(m, &ts); 14006 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14007 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14008 } else 14009 rack->r_ctl.act_rcv_time = *tv; 14010 kern_prefetch(rack, &prev_state); 14011 prev_state = 0; 14012 /* 14013 * Unscale the window into a 32-bit value. For the SYN_SENT state 14014 * the scale is zero. 14015 */ 14016 tiwin = th->th_win << tp->snd_scale; 14017 #ifdef TCP_ACCOUNTING 14018 if (thflags & TH_ACK) { 14019 /* 14020 * We have a tradeoff here. We can either do what we are 14021 * doing i.e. pinning to this CPU and then doing the accounting 14022 * <or> we could do a critical enter, setup the rdtsc and cpu 14023 * as in below, and then validate we are on the same CPU on 14024 * exit. I have choosen to not do the critical enter since 14025 * that often will gain you a context switch, and instead lock 14026 * us (line above this if) to the same CPU with sched_pin(). This 14027 * means we may be context switched out for a higher priority 14028 * interupt but we won't be moved to another CPU. 14029 * 14030 * If this occurs (which it won't very often since we most likely 14031 * are running this code in interupt context and only a higher 14032 * priority will bump us ... clock?) we will falsely add in 14033 * to the time the interupt processing time plus the ack processing 14034 * time. This is ok since its a rare event. 14035 */ 14036 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14037 ctf_fixed_maxseg(tp)); 14038 } 14039 #endif 14040 /* 14041 * Parse options on any incoming segment. 14042 */ 14043 memset(&to, 0, sizeof(to)); 14044 tcp_dooptions(&to, (u_char *)(th + 1), 14045 (th->th_off << 2) - sizeof(struct tcphdr), 14046 (thflags & TH_SYN) ? TO_SYN : 0); 14047 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14048 __func__)); 14049 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14050 __func__)); 14051 14052 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14053 (tp->t_flags & TF_GPUTINPROG)) { 14054 /* 14055 * We have a goodput in progress 14056 * and we have entered a late state. 14057 * Do we have enough data in the sb 14058 * to handle the GPUT request? 14059 */ 14060 uint32_t bytes; 14061 14062 bytes = tp->gput_ack - tp->gput_seq; 14063 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14064 bytes += tp->gput_seq - tp->snd_una; 14065 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 14066 /* 14067 * There are not enough bytes in the socket 14068 * buffer that have been sent to cover this 14069 * measurement. Cancel it. 14070 */ 14071 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14072 rack->r_ctl.rc_gp_srtt /*flex1*/, 14073 tp->gput_seq, 14074 0, 0, 18, __LINE__, NULL, 0); 14075 tp->t_flags &= ~TF_GPUTINPROG; 14076 } 14077 } 14078 high_seq = th->th_ack; 14079 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14080 union tcp_log_stackspecific log; 14081 struct timeval ltv; 14082 #ifdef NETFLIX_HTTP_LOGGING 14083 struct http_sendfile_track *http_req; 14084 14085 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14086 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14087 } else { 14088 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14089 } 14090 #endif 14091 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14092 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14093 if (rack->rack_no_prr == 0) 14094 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14095 else 14096 log.u_bbr.flex1 = 0; 14097 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14098 log.u_bbr.use_lt_bw <<= 1; 14099 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14100 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14101 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14102 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14103 log.u_bbr.flex3 = m->m_flags; 14104 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14105 log.u_bbr.lost = thflags; 14106 log.u_bbr.pacing_gain = 0x1; 14107 #ifdef TCP_ACCOUNTING 14108 log.u_bbr.cwnd_gain = ack_val_set; 14109 #endif 14110 log.u_bbr.flex7 = 2; 14111 if (m->m_flags & M_TSTMP) { 14112 /* Record the hardware timestamp if present */ 14113 mbuf_tstmp2timespec(m, &ts); 14114 ltv.tv_sec = ts.tv_sec; 14115 ltv.tv_usec = ts.tv_nsec / 1000; 14116 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14117 } else if (m->m_flags & M_TSTMP_LRO) { 14118 /* Record the LRO the arrival timestamp */ 14119 mbuf_tstmp2timespec(m, &ts); 14120 ltv.tv_sec = ts.tv_sec; 14121 ltv.tv_usec = ts.tv_nsec / 1000; 14122 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14123 } 14124 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14125 /* Log the rcv time */ 14126 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14127 #ifdef NETFLIX_HTTP_LOGGING 14128 log.u_bbr.applimited = tp->t_http_closed; 14129 log.u_bbr.applimited <<= 8; 14130 log.u_bbr.applimited |= tp->t_http_open; 14131 log.u_bbr.applimited <<= 8; 14132 log.u_bbr.applimited |= tp->t_http_req; 14133 if (http_req) { 14134 /* Copy out any client req info */ 14135 /* seconds */ 14136 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14137 /* useconds */ 14138 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14139 log.u_bbr.rttProp = http_req->timestamp; 14140 log.u_bbr.cur_del_rate = http_req->start; 14141 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14142 log.u_bbr.flex8 |= 1; 14143 } else { 14144 log.u_bbr.flex8 |= 2; 14145 log.u_bbr.bw_inuse = http_req->end; 14146 } 14147 log.u_bbr.flex6 = http_req->start_seq; 14148 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14149 log.u_bbr.flex8 |= 4; 14150 log.u_bbr.epoch = http_req->end_seq; 14151 } 14152 } 14153 #endif 14154 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14155 tlen, &log, true, <v); 14156 } 14157 /* Remove ack required flag if set, we have one */ 14158 if (thflags & TH_ACK) 14159 rack->rc_ack_required = 0; 14160 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14161 way_out = 4; 14162 retval = 0; 14163 m_freem(m); 14164 goto done_with_input; 14165 } 14166 /* 14167 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14168 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14169 */ 14170 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14171 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14172 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14173 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14174 #ifdef TCP_ACCOUNTING 14175 sched_unpin(); 14176 #endif 14177 return (1); 14178 } 14179 /* 14180 * If timestamps were negotiated during SYN/ACK and a 14181 * segment without a timestamp is received, silently drop 14182 * the segment, unless it is a RST segment or missing timestamps are 14183 * tolerated. 14184 * See section 3.2 of RFC 7323. 14185 */ 14186 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14187 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14188 way_out = 5; 14189 retval = 0; 14190 m_freem(m); 14191 goto done_with_input; 14192 } 14193 14194 /* 14195 * Segment received on connection. Reset idle time and keep-alive 14196 * timer. XXX: This should be done after segment validation to 14197 * ignore broken/spoofed segs. 14198 */ 14199 if (tp->t_idle_reduce && 14200 (tp->snd_max == tp->snd_una) && 14201 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14202 counter_u64_add(rack_input_idle_reduces, 1); 14203 rack_cc_after_idle(rack, tp); 14204 } 14205 tp->t_rcvtime = ticks; 14206 #ifdef STATS 14207 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14208 #endif 14209 if (tiwin > rack->r_ctl.rc_high_rwnd) 14210 rack->r_ctl.rc_high_rwnd = tiwin; 14211 /* 14212 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14213 * this to occur after we've validated the segment. 14214 */ 14215 if (tcp_ecn_input_segment(tp, thflags, tlen, 14216 tcp_packets_this_ack(tp, th->th_ack), 14217 iptos)) 14218 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 14219 14220 /* 14221 * If echoed timestamp is later than the current time, fall back to 14222 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14223 * were used when this connection was established. 14224 */ 14225 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14226 to.to_tsecr -= tp->ts_offset; 14227 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14228 to.to_tsecr = 0; 14229 } 14230 14231 /* 14232 * If its the first time in we need to take care of options and 14233 * verify we can do SACK for rack! 14234 */ 14235 if (rack->r_state == 0) { 14236 /* Should be init'd by rack_init() */ 14237 KASSERT(rack->rc_inp != NULL, 14238 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14239 if (rack->rc_inp == NULL) { 14240 rack->rc_inp = inp; 14241 } 14242 14243 /* 14244 * Process options only when we get SYN/ACK back. The SYN 14245 * case for incoming connections is handled in tcp_syncache. 14246 * According to RFC1323 the window field in a SYN (i.e., a 14247 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14248 * this is traditional behavior, may need to be cleaned up. 14249 */ 14250 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14251 /* Handle parallel SYN for ECN */ 14252 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14253 if ((to.to_flags & TOF_SCALE) && 14254 (tp->t_flags & TF_REQ_SCALE)) { 14255 tp->t_flags |= TF_RCVD_SCALE; 14256 tp->snd_scale = to.to_wscale; 14257 } else 14258 tp->t_flags &= ~TF_REQ_SCALE; 14259 /* 14260 * Initial send window. It will be updated with the 14261 * next incoming segment to the scaled value. 14262 */ 14263 tp->snd_wnd = th->th_win; 14264 rack_validate_fo_sendwin_up(tp, rack); 14265 if ((to.to_flags & TOF_TS) && 14266 (tp->t_flags & TF_REQ_TSTMP)) { 14267 tp->t_flags |= TF_RCVD_TSTMP; 14268 tp->ts_recent = to.to_tsval; 14269 tp->ts_recent_age = cts; 14270 } else 14271 tp->t_flags &= ~TF_REQ_TSTMP; 14272 if (to.to_flags & TOF_MSS) { 14273 tcp_mss(tp, to.to_mss); 14274 } 14275 if ((tp->t_flags & TF_SACK_PERMIT) && 14276 (to.to_flags & TOF_SACKPERM) == 0) 14277 tp->t_flags &= ~TF_SACK_PERMIT; 14278 if (IS_FASTOPEN(tp->t_flags)) { 14279 if (to.to_flags & TOF_FASTOPEN) { 14280 uint16_t mss; 14281 14282 if (to.to_flags & TOF_MSS) 14283 mss = to.to_mss; 14284 else 14285 if ((inp->inp_vflag & INP_IPV6) != 0) 14286 mss = TCP6_MSS; 14287 else 14288 mss = TCP_MSS; 14289 tcp_fastopen_update_cache(tp, mss, 14290 to.to_tfo_len, to.to_tfo_cookie); 14291 } else 14292 tcp_fastopen_disable_path(tp); 14293 } 14294 } 14295 /* 14296 * At this point we are at the initial call. Here we decide 14297 * if we are doing RACK or not. We do this by seeing if 14298 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14299 * The code now does do dup-ack counting so if you don't 14300 * switch back you won't get rack & TLP, but you will still 14301 * get this stack. 14302 */ 14303 14304 if ((rack_sack_not_required == 0) && 14305 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14306 tcp_switch_back_to_default(tp); 14307 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14308 tlen, iptos); 14309 #ifdef TCP_ACCOUNTING 14310 sched_unpin(); 14311 #endif 14312 return (1); 14313 } 14314 tcp_set_hpts(inp); 14315 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14316 } 14317 if (thflags & TH_FIN) 14318 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14319 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14320 if ((rack->rc_gp_dyn_mul) && 14321 (rack->use_fixed_rate == 0) && 14322 (rack->rc_always_pace)) { 14323 /* Check in on probertt */ 14324 rack_check_probe_rtt(rack, us_cts); 14325 } 14326 rack_clear_rate_sample(rack); 14327 if ((rack->forced_ack) && 14328 ((tcp_get_flags(th) & TH_RST) == 0)) { 14329 rack_handle_probe_response(rack, tiwin, us_cts); 14330 } 14331 /* 14332 * This is the one exception case where we set the rack state 14333 * always. All other times (timers etc) we must have a rack-state 14334 * set (so we assure we have done the checks above for SACK). 14335 */ 14336 rack->r_ctl.rc_rcvtime = cts; 14337 if (rack->r_state != tp->t_state) 14338 rack_set_state(tp, rack); 14339 if (SEQ_GT(th->th_ack, tp->snd_una) && 14340 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14341 kern_prefetch(rsm, &prev_state); 14342 prev_state = rack->r_state; 14343 retval = (*rack->r_substate) (m, th, so, 14344 tp, &to, drop_hdrlen, 14345 tlen, tiwin, thflags, nxt_pkt, iptos); 14346 if (retval == 0) { 14347 /* 14348 * If retval is 1 the tcb is unlocked and most likely the tp 14349 * is gone. 14350 */ 14351 INP_WLOCK_ASSERT(inp); 14352 if ((rack->rc_gp_dyn_mul) && 14353 (rack->rc_always_pace) && 14354 (rack->use_fixed_rate == 0) && 14355 rack->in_probe_rtt && 14356 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14357 /* 14358 * If we are going for target, lets recheck before 14359 * we output. 14360 */ 14361 rack_check_probe_rtt(rack, us_cts); 14362 } 14363 if (rack->set_pacing_done_a_iw == 0) { 14364 /* How much has been acked? */ 14365 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14366 /* We have enough to set in the pacing segment size */ 14367 rack->set_pacing_done_a_iw = 1; 14368 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14369 } 14370 } 14371 tcp_rack_xmit_timer_commit(rack, tp); 14372 #ifdef TCP_ACCOUNTING 14373 /* 14374 * If we set the ack_val_se to what ack processing we are doing 14375 * we also want to track how many cycles we burned. Note 14376 * the bits after tcp_output we let be "free". This is because 14377 * we are also tracking the tcp_output times as well. Note the 14378 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14379 * 0xf cannot be returned and is what we initialize it too to 14380 * indicate we are not doing the tabulations. 14381 */ 14382 if (ack_val_set != 0xf) { 14383 uint64_t crtsc; 14384 14385 crtsc = get_cyclecount(); 14386 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14387 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14388 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14389 } 14390 } 14391 #endif 14392 if (nxt_pkt == 0) { 14393 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14394 do_output_now: 14395 if (tcp_output(tp) < 0) 14396 return (1); 14397 did_out = 1; 14398 } 14399 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14400 rack_free_trim(rack); 14401 } 14402 /* Update any rounds needed */ 14403 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14404 union tcp_log_stackspecific log; 14405 struct timeval tv; 14406 14407 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14408 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14409 log.u_bbr.flex1 = high_seq; 14410 log.u_bbr.flex2 = rack->r_ctl.roundends; 14411 log.u_bbr.flex3 = rack->r_ctl.current_round; 14412 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14413 log.u_bbr.flex8 = 9; 14414 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14415 0, &log, false, NULL, NULL, 0, &tv); 14416 } 14417 /* 14418 * The draft (v3) calls for us to use SEQ_GEQ, but that 14419 * causes issues when we are just going app limited. Lets 14420 * instead use SEQ_GT <or> where its equal but more data 14421 * is outstanding. 14422 */ 14423 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14424 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14425 rack->r_ctl.current_round++; 14426 rack->r_ctl.roundends = tp->snd_max; 14427 if (CC_ALGO(tp)->newround != NULL) { 14428 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 14429 } 14430 } 14431 if ((nxt_pkt == 0) && 14432 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14433 (SEQ_GT(tp->snd_max, tp->snd_una) || 14434 (tp->t_flags & TF_DELACK) || 14435 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14436 (tp->t_state <= TCPS_CLOSING)))) { 14437 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14438 if ((tp->snd_max == tp->snd_una) && 14439 ((tp->t_flags & TF_DELACK) == 0) && 14440 (tcp_in_hpts(rack->rc_inp)) && 14441 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14442 /* keep alive not needed if we are hptsi output yet */ 14443 ; 14444 } else { 14445 int late = 0; 14446 if (tcp_in_hpts(inp)) { 14447 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14448 us_cts = tcp_get_usecs(NULL); 14449 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14450 rack->r_early = 1; 14451 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14452 } else 14453 late = 1; 14454 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14455 } 14456 tcp_hpts_remove(inp); 14457 } 14458 if (late && (did_out == 0)) { 14459 /* 14460 * We are late in the sending 14461 * and we did not call the output 14462 * (this probably should not happen). 14463 */ 14464 goto do_output_now; 14465 } 14466 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14467 } 14468 way_out = 1; 14469 } else if (nxt_pkt == 0) { 14470 /* Do we have the correct timer running? */ 14471 rack_timer_audit(tp, rack, &so->so_snd); 14472 way_out = 2; 14473 } 14474 done_with_input: 14475 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14476 if (did_out) 14477 rack->r_wanted_output = 0; 14478 #ifdef TCP_ACCOUNTING 14479 } else { 14480 /* 14481 * Track the time (see above). 14482 */ 14483 if (ack_val_set != 0xf) { 14484 uint64_t crtsc; 14485 14486 crtsc = get_cyclecount(); 14487 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14488 /* 14489 * Note we *DO NOT* increment the per-tcb counters since 14490 * in the else the TP may be gone!! 14491 */ 14492 } 14493 #endif 14494 } 14495 #ifdef TCP_ACCOUNTING 14496 sched_unpin(); 14497 #endif 14498 return (retval); 14499 } 14500 14501 void 14502 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14503 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14504 { 14505 struct timeval tv; 14506 14507 /* First lets see if we have old packets */ 14508 if (tp->t_in_pkt) { 14509 if (ctf_do_queued_segments(so, tp, 1)) { 14510 m_freem(m); 14511 return; 14512 } 14513 } 14514 if (m->m_flags & M_TSTMP_LRO) { 14515 mbuf_tstmp2timeval(m, &tv); 14516 } else { 14517 /* Should not be should we kassert instead? */ 14518 tcp_get_usecs(&tv); 14519 } 14520 if (rack_do_segment_nounlock(m, th, so, tp, 14521 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14522 INP_WUNLOCK(tptoinpcb(tp)); 14523 } 14524 } 14525 14526 struct rack_sendmap * 14527 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14528 { 14529 struct rack_sendmap *rsm = NULL; 14530 int32_t idx; 14531 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14532 14533 /* Return the next guy to be re-transmitted */ 14534 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14535 return (NULL); 14536 } 14537 if (tp->t_flags & TF_SENTFIN) { 14538 /* retran the end FIN? */ 14539 return (NULL); 14540 } 14541 /* ok lets look at this one */ 14542 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14543 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 14544 return (rsm); 14545 } 14546 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14547 goto check_it; 14548 } 14549 rsm = rack_find_lowest_rsm(rack); 14550 if (rsm == NULL) { 14551 return (NULL); 14552 } 14553 check_it: 14554 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14555 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14556 /* 14557 * No sack so we automatically do the 3 strikes and 14558 * retransmit (no rack timer would be started). 14559 */ 14560 14561 return (rsm); 14562 } 14563 if (rsm->r_flags & RACK_ACKED) { 14564 return (NULL); 14565 } 14566 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14567 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14568 /* Its not yet ready */ 14569 return (NULL); 14570 } 14571 srtt = rack_grab_rtt(tp, rack); 14572 idx = rsm->r_rtr_cnt - 1; 14573 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14574 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14575 if ((tsused == ts_low) || 14576 (TSTMP_LT(tsused, ts_low))) { 14577 /* No time since sending */ 14578 return (NULL); 14579 } 14580 if ((tsused - ts_low) < thresh) { 14581 /* It has not been long enough yet */ 14582 return (NULL); 14583 } 14584 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14585 ((rsm->r_flags & RACK_SACK_PASSED) && 14586 (rack->sack_attack_disable == 0))) { 14587 /* 14588 * We have passed the dup-ack threshold <or> 14589 * a SACK has indicated this is missing. 14590 * Note that if you are a declared attacker 14591 * it is only the dup-ack threshold that 14592 * will cause retransmits. 14593 */ 14594 /* log retransmit reason */ 14595 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14596 rack->r_fast_output = 0; 14597 return (rsm); 14598 } 14599 return (NULL); 14600 } 14601 14602 static void 14603 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14604 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14605 int line, struct rack_sendmap *rsm, uint8_t quality) 14606 { 14607 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14608 union tcp_log_stackspecific log; 14609 struct timeval tv; 14610 14611 memset(&log, 0, sizeof(log)); 14612 log.u_bbr.flex1 = slot; 14613 log.u_bbr.flex2 = len; 14614 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14615 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14616 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14617 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14618 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14619 log.u_bbr.use_lt_bw <<= 1; 14620 log.u_bbr.use_lt_bw |= rack->r_late; 14621 log.u_bbr.use_lt_bw <<= 1; 14622 log.u_bbr.use_lt_bw |= rack->r_early; 14623 log.u_bbr.use_lt_bw <<= 1; 14624 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14625 log.u_bbr.use_lt_bw <<= 1; 14626 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14627 log.u_bbr.use_lt_bw <<= 1; 14628 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14629 log.u_bbr.use_lt_bw <<= 1; 14630 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14631 log.u_bbr.use_lt_bw <<= 1; 14632 log.u_bbr.use_lt_bw |= rack->gp_ready; 14633 log.u_bbr.pkt_epoch = line; 14634 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14635 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14636 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14637 log.u_bbr.bw_inuse = bw_est; 14638 log.u_bbr.delRate = bw; 14639 if (rack->r_ctl.gp_bw == 0) 14640 log.u_bbr.cur_del_rate = 0; 14641 else 14642 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14643 log.u_bbr.rttProp = len_time; 14644 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14645 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14646 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14647 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14648 /* We are in slow start */ 14649 log.u_bbr.flex7 = 1; 14650 } else { 14651 /* we are on congestion avoidance */ 14652 log.u_bbr.flex7 = 0; 14653 } 14654 log.u_bbr.flex8 = method; 14655 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14656 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14657 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14658 log.u_bbr.cwnd_gain <<= 1; 14659 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14660 log.u_bbr.cwnd_gain <<= 1; 14661 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14662 log.u_bbr.bbr_substate = quality; 14663 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14664 &rack->rc_inp->inp_socket->so_rcv, 14665 &rack->rc_inp->inp_socket->so_snd, 14666 BBR_LOG_HPTSI_CALC, 0, 14667 0, &log, false, &tv); 14668 } 14669 } 14670 14671 static uint32_t 14672 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14673 { 14674 uint32_t new_tso, user_max; 14675 14676 user_max = rack->rc_user_set_max_segs * mss; 14677 if (rack->rc_force_max_seg) { 14678 return (user_max); 14679 } 14680 if (rack->use_fixed_rate && 14681 ((rack->r_ctl.crte == NULL) || 14682 (bw != rack->r_ctl.crte->rate))) { 14683 /* Use the user mss since we are not exactly matched */ 14684 return (user_max); 14685 } 14686 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14687 if (new_tso > user_max) 14688 new_tso = user_max; 14689 return (new_tso); 14690 } 14691 14692 static int32_t 14693 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14694 { 14695 uint64_t lentim, fill_bw; 14696 14697 /* Lets first see if we are full, if so continue with normal rate */ 14698 rack->r_via_fill_cw = 0; 14699 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14700 return (slot); 14701 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14702 return (slot); 14703 if (rack->r_ctl.rc_last_us_rtt == 0) 14704 return (slot); 14705 if (rack->rc_pace_fill_if_rttin_range && 14706 (rack->r_ctl.rc_last_us_rtt >= 14707 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14708 /* The rtt is huge, N * smallest, lets not fill */ 14709 return (slot); 14710 } 14711 /* 14712 * first lets calculate the b/w based on the last us-rtt 14713 * and the sndwnd. 14714 */ 14715 fill_bw = rack->r_ctl.cwnd_to_use; 14716 /* Take the rwnd if its smaller */ 14717 if (fill_bw > rack->rc_tp->snd_wnd) 14718 fill_bw = rack->rc_tp->snd_wnd; 14719 if (rack->r_fill_less_agg) { 14720 /* 14721 * Now take away the inflight (this will reduce our 14722 * aggressiveness and yeah, if we get that much out in 1RTT 14723 * we will have had acks come back and still be behind). 14724 */ 14725 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14726 } 14727 /* Now lets make it into a b/w */ 14728 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14729 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14730 /* We are below the min b/w */ 14731 if (non_paced) 14732 *rate_wanted = fill_bw; 14733 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14734 return (slot); 14735 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14736 fill_bw = rack->r_ctl.bw_rate_cap; 14737 rack->r_via_fill_cw = 1; 14738 if (rack->r_rack_hw_rate_caps && 14739 (rack->r_ctl.crte != NULL)) { 14740 uint64_t high_rate; 14741 14742 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14743 if (fill_bw > high_rate) { 14744 /* We are capping bw at the highest rate table entry */ 14745 if (*rate_wanted > high_rate) { 14746 /* The original rate was also capped */ 14747 rack->r_via_fill_cw = 0; 14748 } 14749 rack_log_hdwr_pacing(rack, 14750 fill_bw, high_rate, __LINE__, 14751 0, 3); 14752 fill_bw = high_rate; 14753 if (capped) 14754 *capped = 1; 14755 } 14756 } else if ((rack->r_ctl.crte == NULL) && 14757 (rack->rack_hdrw_pacing == 0) && 14758 (rack->rack_hdw_pace_ena) && 14759 rack->r_rack_hw_rate_caps && 14760 (rack->rack_attempt_hdwr_pace == 0) && 14761 (rack->rc_inp->inp_route.ro_nh != NULL) && 14762 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14763 /* 14764 * Ok we may have a first attempt that is greater than our top rate 14765 * lets check. 14766 */ 14767 uint64_t high_rate; 14768 14769 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14770 if (high_rate) { 14771 if (fill_bw > high_rate) { 14772 fill_bw = high_rate; 14773 if (capped) 14774 *capped = 1; 14775 } 14776 } 14777 } 14778 /* 14779 * Ok fill_bw holds our mythical b/w to fill the cwnd 14780 * in a rtt, what does that time wise equate too? 14781 */ 14782 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14783 lentim /= fill_bw; 14784 *rate_wanted = fill_bw; 14785 if (non_paced || (lentim < slot)) { 14786 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14787 0, lentim, 12, __LINE__, NULL, 0); 14788 return ((int32_t)lentim); 14789 } else 14790 return (slot); 14791 } 14792 14793 static int32_t 14794 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14795 { 14796 uint64_t srtt; 14797 int32_t slot = 0; 14798 int can_start_hw_pacing = 1; 14799 int err; 14800 14801 if (rack->rc_always_pace == 0) { 14802 /* 14803 * We use the most optimistic possible cwnd/srtt for 14804 * sending calculations. This will make our 14805 * calculation anticipate getting more through 14806 * quicker then possible. But thats ok we don't want 14807 * the peer to have a gap in data sending. 14808 */ 14809 uint64_t cwnd, tr_perms = 0; 14810 int32_t reduce = 0; 14811 14812 old_method: 14813 /* 14814 * We keep no precise pacing with the old method 14815 * instead we use the pacer to mitigate bursts. 14816 */ 14817 if (rack->r_ctl.rc_rack_min_rtt) 14818 srtt = rack->r_ctl.rc_rack_min_rtt; 14819 else 14820 srtt = max(tp->t_srtt, 1); 14821 if (rack->r_ctl.rc_rack_largest_cwnd) 14822 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14823 else 14824 cwnd = rack->r_ctl.cwnd_to_use; 14825 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14826 tr_perms = (cwnd * 1000) / srtt; 14827 if (tr_perms == 0) { 14828 tr_perms = ctf_fixed_maxseg(tp); 14829 } 14830 /* 14831 * Calculate how long this will take to drain, if 14832 * the calculation comes out to zero, thats ok we 14833 * will use send_a_lot to possibly spin around for 14834 * more increasing tot_len_this_send to the point 14835 * that its going to require a pace, or we hit the 14836 * cwnd. Which in that case we are just waiting for 14837 * a ACK. 14838 */ 14839 slot = len / tr_perms; 14840 /* Now do we reduce the time so we don't run dry? */ 14841 if (slot && rack_slot_reduction) { 14842 reduce = (slot / rack_slot_reduction); 14843 if (reduce < slot) { 14844 slot -= reduce; 14845 } else 14846 slot = 0; 14847 } 14848 slot *= HPTS_USEC_IN_MSEC; 14849 if (rack->rc_pace_to_cwnd) { 14850 uint64_t rate_wanted = 0; 14851 14852 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14853 rack->rc_ack_can_sendout_data = 1; 14854 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14855 } else 14856 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14857 } else { 14858 uint64_t bw_est, res, lentim, rate_wanted; 14859 uint32_t orig_val, segs, oh; 14860 int capped = 0; 14861 int prev_fill; 14862 14863 if ((rack->r_rr_config == 1) && rsm) { 14864 return (rack->r_ctl.rc_min_to); 14865 } 14866 if (rack->use_fixed_rate) { 14867 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14868 } else if ((rack->r_ctl.init_rate == 0) && 14869 #ifdef NETFLIX_PEAKRATE 14870 (rack->rc_tp->t_maxpeakrate == 0) && 14871 #endif 14872 (rack->r_ctl.gp_bw == 0)) { 14873 /* no way to yet do an estimate */ 14874 bw_est = rate_wanted = 0; 14875 } else { 14876 bw_est = rack_get_bw(rack); 14877 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14878 } 14879 if ((bw_est == 0) || (rate_wanted == 0) || 14880 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14881 /* 14882 * No way yet to make a b/w estimate or 14883 * our raise is set incorrectly. 14884 */ 14885 goto old_method; 14886 } 14887 /* We need to account for all the overheads */ 14888 segs = (len + segsiz - 1) / segsiz; 14889 /* 14890 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14891 * and how much data we put in each packet. Yes this 14892 * means we may be off if we are larger than 1500 bytes 14893 * or smaller. But this just makes us more conservative. 14894 */ 14895 if (rack_hw_rate_min && 14896 (bw_est < rack_hw_rate_min)) 14897 can_start_hw_pacing = 0; 14898 if (ETHERNET_SEGMENT_SIZE > segsiz) 14899 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14900 else 14901 oh = 0; 14902 segs *= oh; 14903 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14904 res = lentim / rate_wanted; 14905 slot = (uint32_t)res; 14906 orig_val = rack->r_ctl.rc_pace_max_segs; 14907 if (rack->r_ctl.crte == NULL) { 14908 /* 14909 * Only do this if we are not hardware pacing 14910 * since if we are doing hw-pacing below we will 14911 * set make a call after setting up or changing 14912 * the rate. 14913 */ 14914 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14915 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14916 /* 14917 * We lost our rate somehow, this can happen 14918 * if the interface changed underneath us. 14919 */ 14920 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14921 rack->r_ctl.crte = NULL; 14922 /* Lets re-allow attempting to setup pacing */ 14923 rack->rack_hdrw_pacing = 0; 14924 rack->rack_attempt_hdwr_pace = 0; 14925 rack_log_hdwr_pacing(rack, 14926 rate_wanted, bw_est, __LINE__, 14927 0, 6); 14928 } 14929 /* Did we change the TSO size, if so log it */ 14930 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14931 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 14932 prev_fill = rack->r_via_fill_cw; 14933 if ((rack->rc_pace_to_cwnd) && 14934 (capped == 0) && 14935 (rack->use_fixed_rate == 0) && 14936 (rack->in_probe_rtt == 0) && 14937 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14938 /* 14939 * We want to pace at our rate *or* faster to 14940 * fill the cwnd to the max if its not full. 14941 */ 14942 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14943 } 14944 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14945 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14946 if ((rack->rack_hdw_pace_ena) && 14947 (can_start_hw_pacing > 0) && 14948 (rack->rack_hdrw_pacing == 0) && 14949 (rack->rack_attempt_hdwr_pace == 0)) { 14950 /* 14951 * Lets attempt to turn on hardware pacing 14952 * if we can. 14953 */ 14954 rack->rack_attempt_hdwr_pace = 1; 14955 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14956 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14957 rate_wanted, 14958 RS_PACING_GEQ, 14959 &err, &rack->r_ctl.crte_prev_rate); 14960 if (rack->r_ctl.crte) { 14961 rack->rack_hdrw_pacing = 1; 14962 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 14963 0, rack->r_ctl.crte, 14964 NULL); 14965 rack_log_hdwr_pacing(rack, 14966 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14967 err, 0); 14968 rack->r_ctl.last_hw_bw_req = rate_wanted; 14969 } else { 14970 counter_u64_add(rack_hw_pace_init_fail, 1); 14971 } 14972 } else if (rack->rack_hdrw_pacing && 14973 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 14974 /* Do we need to adjust our rate? */ 14975 const struct tcp_hwrate_limit_table *nrte; 14976 14977 if (rack->r_up_only && 14978 (rate_wanted < rack->r_ctl.crte->rate)) { 14979 /** 14980 * We have four possible states here 14981 * having to do with the previous time 14982 * and this time. 14983 * previous | this-time 14984 * A) 0 | 0 -- fill_cw not in the picture 14985 * B) 1 | 0 -- we were doing a fill-cw but now are not 14986 * C) 1 | 1 -- all rates from fill_cw 14987 * D) 0 | 1 -- we were doing non-fill and now we are filling 14988 * 14989 * For case A, C and D we don't allow a drop. But for 14990 * case B where we now our on our steady rate we do 14991 * allow a drop. 14992 * 14993 */ 14994 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 14995 goto done_w_hdwr; 14996 } 14997 if ((rate_wanted > rack->r_ctl.crte->rate) || 14998 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 14999 if (rack_hw_rate_to_low && 15000 (bw_est < rack_hw_rate_to_low)) { 15001 /* 15002 * The pacing rate is too low for hardware, but 15003 * do allow hardware pacing to be restarted. 15004 */ 15005 rack_log_hdwr_pacing(rack, 15006 bw_est, rack->r_ctl.crte->rate, __LINE__, 15007 0, 5); 15008 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15009 rack->r_ctl.crte = NULL; 15010 rack->rack_attempt_hdwr_pace = 0; 15011 rack->rack_hdrw_pacing = 0; 15012 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15013 goto done_w_hdwr; 15014 } 15015 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15016 rack->rc_tp, 15017 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15018 rate_wanted, 15019 RS_PACING_GEQ, 15020 &err, &rack->r_ctl.crte_prev_rate); 15021 if (nrte == NULL) { 15022 /* Lost the rate */ 15023 rack->rack_hdrw_pacing = 0; 15024 rack->r_ctl.crte = NULL; 15025 rack_log_hdwr_pacing(rack, 15026 rate_wanted, 0, __LINE__, 15027 err, 1); 15028 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15029 counter_u64_add(rack_hw_pace_lost, 1); 15030 } else if (nrte != rack->r_ctl.crte) { 15031 rack->r_ctl.crte = nrte; 15032 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15033 segsiz, 0, 15034 rack->r_ctl.crte, 15035 NULL); 15036 rack_log_hdwr_pacing(rack, 15037 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15038 err, 2); 15039 rack->r_ctl.last_hw_bw_req = rate_wanted; 15040 } 15041 } else { 15042 /* We just need to adjust the segment size */ 15043 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15044 rack_log_hdwr_pacing(rack, 15045 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15046 0, 4); 15047 rack->r_ctl.last_hw_bw_req = rate_wanted; 15048 } 15049 } 15050 } 15051 if ((rack->r_ctl.crte != NULL) && 15052 (rack->r_ctl.crte->rate == rate_wanted)) { 15053 /* 15054 * We need to add a extra if the rates 15055 * are exactly matched. The idea is 15056 * we want the software to make sure the 15057 * queue is empty before adding more, this 15058 * gives us N MSS extra pace times where 15059 * N is our sysctl 15060 */ 15061 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15062 } 15063 done_w_hdwr: 15064 if (rack_limit_time_with_srtt && 15065 (rack->use_fixed_rate == 0) && 15066 #ifdef NETFLIX_PEAKRATE 15067 (rack->rc_tp->t_maxpeakrate == 0) && 15068 #endif 15069 (rack->rack_hdrw_pacing == 0)) { 15070 /* 15071 * Sanity check, we do not allow the pacing delay 15072 * to be longer than the SRTT of the path. If it is 15073 * a slow path, then adding a packet should increase 15074 * the RTT and compensate for this i.e. the srtt will 15075 * be greater so the allowed pacing time will be greater. 15076 * 15077 * Note this restriction is not for where a peak rate 15078 * is set, we are doing fixed pacing or hardware pacing. 15079 */ 15080 if (rack->rc_tp->t_srtt) 15081 srtt = rack->rc_tp->t_srtt; 15082 else 15083 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15084 if (srtt < (uint64_t)slot) { 15085 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15086 slot = srtt; 15087 } 15088 } 15089 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15090 } 15091 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15092 /* 15093 * If this rate is seeing enobufs when it 15094 * goes to send then either the nic is out 15095 * of gas or we are mis-estimating the time 15096 * somehow and not letting the queue empty 15097 * completely. Lets add to the pacing time. 15098 */ 15099 int hw_boost_delay; 15100 15101 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15102 if (hw_boost_delay > rack_enobuf_hw_max) 15103 hw_boost_delay = rack_enobuf_hw_max; 15104 else if (hw_boost_delay < rack_enobuf_hw_min) 15105 hw_boost_delay = rack_enobuf_hw_min; 15106 slot += hw_boost_delay; 15107 } 15108 return (slot); 15109 } 15110 15111 static void 15112 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15113 tcp_seq startseq, uint32_t sb_offset) 15114 { 15115 struct rack_sendmap *my_rsm = NULL; 15116 struct rack_sendmap fe; 15117 15118 if (tp->t_state < TCPS_ESTABLISHED) { 15119 /* 15120 * We don't start any measurements if we are 15121 * not at least established. 15122 */ 15123 return; 15124 } 15125 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15126 /* 15127 * We will get no more data into the SB 15128 * this means we need to have the data available 15129 * before we start a measurement. 15130 */ 15131 15132 if (sbavail(&tptosocket(tp)->so_snd) < 15133 max(rc_init_window(rack), 15134 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15135 /* Nope not enough data */ 15136 return; 15137 } 15138 } 15139 tp->t_flags |= TF_GPUTINPROG; 15140 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15141 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15142 tp->gput_seq = startseq; 15143 rack->app_limited_needs_set = 0; 15144 if (rack->in_probe_rtt) 15145 rack->measure_saw_probe_rtt = 1; 15146 else if ((rack->measure_saw_probe_rtt) && 15147 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15148 rack->measure_saw_probe_rtt = 0; 15149 if (rack->rc_gp_filled) 15150 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15151 else { 15152 /* Special case initial measurement */ 15153 struct timeval tv; 15154 15155 tp->gput_ts = tcp_get_usecs(&tv); 15156 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15157 } 15158 /* 15159 * We take a guess out into the future, 15160 * if we have no measurement and no 15161 * initial rate, we measure the first 15162 * initial-windows worth of data to 15163 * speed up getting some GP measurement and 15164 * thus start pacing. 15165 */ 15166 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15167 rack->app_limited_needs_set = 1; 15168 tp->gput_ack = startseq + max(rc_init_window(rack), 15169 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15170 rack_log_pacing_delay_calc(rack, 15171 tp->gput_seq, 15172 tp->gput_ack, 15173 0, 15174 tp->gput_ts, 15175 rack->r_ctl.rc_app_limited_cnt, 15176 9, 15177 __LINE__, NULL, 0); 15178 return; 15179 } 15180 if (sb_offset) { 15181 /* 15182 * We are out somewhere in the sb 15183 * can we use the already outstanding data? 15184 */ 15185 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15186 /* 15187 * Yes first one is good and in this case 15188 * the tp->gput_ts is correctly set based on 15189 * the last ack that arrived (no need to 15190 * set things up when an ack comes in). 15191 */ 15192 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15193 if ((my_rsm == NULL) || 15194 (my_rsm->r_rtr_cnt != 1)) { 15195 /* retransmission? */ 15196 goto use_latest; 15197 } 15198 } else { 15199 if (rack->r_ctl.rc_first_appl == NULL) { 15200 /* 15201 * If rc_first_appl is NULL 15202 * then the cnt should be 0. 15203 * This is probably an error, maybe 15204 * a KASSERT would be approprate. 15205 */ 15206 goto use_latest; 15207 } 15208 /* 15209 * If we have a marker pointer to the last one that is 15210 * app limited we can use that, but we need to set 15211 * things up so that when it gets ack'ed we record 15212 * the ack time (if its not already acked). 15213 */ 15214 rack->app_limited_needs_set = 1; 15215 /* 15216 * We want to get to the rsm that is either 15217 * next with space i.e. over 1 MSS or the one 15218 * after that (after the app-limited). 15219 */ 15220 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15221 rack->r_ctl.rc_first_appl); 15222 if (my_rsm) { 15223 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15224 /* Have to use the next one */ 15225 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15226 my_rsm); 15227 else { 15228 /* Use after the first MSS of it is acked */ 15229 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15230 goto start_set; 15231 } 15232 } 15233 if ((my_rsm == NULL) || 15234 (my_rsm->r_rtr_cnt != 1)) { 15235 /* 15236 * Either its a retransmit or 15237 * the last is the app-limited one. 15238 */ 15239 goto use_latest; 15240 } 15241 } 15242 tp->gput_seq = my_rsm->r_start; 15243 start_set: 15244 if (my_rsm->r_flags & RACK_ACKED) { 15245 /* 15246 * This one has been acked use the arrival ack time 15247 */ 15248 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15249 rack->app_limited_needs_set = 0; 15250 } 15251 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15252 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15253 rack_log_pacing_delay_calc(rack, 15254 tp->gput_seq, 15255 tp->gput_ack, 15256 (uint64_t)my_rsm, 15257 tp->gput_ts, 15258 rack->r_ctl.rc_app_limited_cnt, 15259 9, 15260 __LINE__, NULL, 0); 15261 return; 15262 } 15263 15264 use_latest: 15265 /* 15266 * We don't know how long we may have been 15267 * idle or if this is the first-send. Lets 15268 * setup the flag so we will trim off 15269 * the first ack'd data so we get a true 15270 * measurement. 15271 */ 15272 rack->app_limited_needs_set = 1; 15273 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15274 /* Find this guy so we can pull the send time */ 15275 fe.r_start = startseq; 15276 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15277 if (my_rsm) { 15278 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15279 if (my_rsm->r_flags & RACK_ACKED) { 15280 /* 15281 * Unlikely since its probably what was 15282 * just transmitted (but I am paranoid). 15283 */ 15284 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15285 rack->app_limited_needs_set = 0; 15286 } 15287 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15288 /* This also is unlikely */ 15289 tp->gput_seq = my_rsm->r_start; 15290 } 15291 } else { 15292 /* 15293 * TSNH unless we have some send-map limit, 15294 * and even at that it should not be hitting 15295 * that limit (we should have stopped sending). 15296 */ 15297 struct timeval tv; 15298 15299 microuptime(&tv); 15300 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15301 } 15302 rack_log_pacing_delay_calc(rack, 15303 tp->gput_seq, 15304 tp->gput_ack, 15305 (uint64_t)my_rsm, 15306 tp->gput_ts, 15307 rack->r_ctl.rc_app_limited_cnt, 15308 9, __LINE__, NULL, 0); 15309 } 15310 15311 static inline uint32_t 15312 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15313 uint32_t avail, int32_t sb_offset) 15314 { 15315 uint32_t len; 15316 uint32_t sendwin; 15317 15318 if (tp->snd_wnd > cwnd_to_use) 15319 sendwin = cwnd_to_use; 15320 else 15321 sendwin = tp->snd_wnd; 15322 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15323 /* We never want to go over our peers rcv-window */ 15324 len = 0; 15325 } else { 15326 uint32_t flight; 15327 15328 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15329 if (flight >= sendwin) { 15330 /* 15331 * We have in flight what we are allowed by cwnd (if 15332 * it was rwnd blocking it would have hit above out 15333 * >= tp->snd_wnd). 15334 */ 15335 return (0); 15336 } 15337 len = sendwin - flight; 15338 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15339 /* We would send too much (beyond the rwnd) */ 15340 len = tp->snd_wnd - ctf_outstanding(tp); 15341 } 15342 if ((len + sb_offset) > avail) { 15343 /* 15344 * We don't have that much in the SB, how much is 15345 * there? 15346 */ 15347 len = avail - sb_offset; 15348 } 15349 } 15350 return (len); 15351 } 15352 15353 static void 15354 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15355 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15356 int rsm_is_null, int optlen, int line, uint16_t mode) 15357 { 15358 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15359 union tcp_log_stackspecific log; 15360 struct timeval tv; 15361 15362 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15363 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15364 log.u_bbr.flex1 = error; 15365 log.u_bbr.flex2 = flags; 15366 log.u_bbr.flex3 = rsm_is_null; 15367 log.u_bbr.flex4 = ipoptlen; 15368 log.u_bbr.flex5 = tp->rcv_numsacks; 15369 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15370 log.u_bbr.flex7 = optlen; 15371 log.u_bbr.flex8 = rack->r_fsb_inited; 15372 log.u_bbr.applimited = rack->r_fast_output; 15373 log.u_bbr.bw_inuse = rack_get_bw(rack); 15374 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15375 log.u_bbr.cwnd_gain = mode; 15376 log.u_bbr.pkts_out = orig_len; 15377 log.u_bbr.lt_epoch = len; 15378 log.u_bbr.delivered = line; 15379 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15380 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15381 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15382 len, &log, false, NULL, NULL, 0, &tv); 15383 } 15384 } 15385 15386 15387 static struct mbuf * 15388 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15389 struct rack_fast_send_blk *fsb, 15390 int32_t seglimit, int32_t segsize, int hw_tls) 15391 { 15392 #ifdef KERN_TLS 15393 struct ktls_session *tls, *ntls; 15394 #ifdef INVARIANTS 15395 struct mbuf *start; 15396 #endif 15397 #endif 15398 struct mbuf *m, *n, **np, *smb; 15399 struct mbuf *top; 15400 int32_t off, soff; 15401 int32_t len = *plen; 15402 int32_t fragsize; 15403 int32_t len_cp = 0; 15404 uint32_t mlen, frags; 15405 15406 soff = off = the_off; 15407 smb = m = the_m; 15408 np = ⊤ 15409 top = NULL; 15410 #ifdef KERN_TLS 15411 if (hw_tls && (m->m_flags & M_EXTPG)) 15412 tls = m->m_epg_tls; 15413 else 15414 tls = NULL; 15415 #ifdef INVARIANTS 15416 start = m; 15417 #endif 15418 #endif 15419 while (len > 0) { 15420 if (m == NULL) { 15421 *plen = len_cp; 15422 break; 15423 } 15424 #ifdef KERN_TLS 15425 if (hw_tls) { 15426 if (m->m_flags & M_EXTPG) 15427 ntls = m->m_epg_tls; 15428 else 15429 ntls = NULL; 15430 15431 /* 15432 * Avoid mixing TLS records with handshake 15433 * data or TLS records from different 15434 * sessions. 15435 */ 15436 if (tls != ntls) { 15437 MPASS(m != start); 15438 *plen = len_cp; 15439 break; 15440 } 15441 } 15442 #endif 15443 mlen = min(len, m->m_len - off); 15444 if (seglimit) { 15445 /* 15446 * For M_EXTPG mbufs, add 3 segments 15447 * + 1 in case we are crossing page boundaries 15448 * + 2 in case the TLS hdr/trailer are used 15449 * It is cheaper to just add the segments 15450 * than it is to take the cache miss to look 15451 * at the mbuf ext_pgs state in detail. 15452 */ 15453 if (m->m_flags & M_EXTPG) { 15454 fragsize = min(segsize, PAGE_SIZE); 15455 frags = 3; 15456 } else { 15457 fragsize = segsize; 15458 frags = 0; 15459 } 15460 15461 /* Break if we really can't fit anymore. */ 15462 if ((frags + 1) >= seglimit) { 15463 *plen = len_cp; 15464 break; 15465 } 15466 15467 /* 15468 * Reduce size if you can't copy the whole 15469 * mbuf. If we can't copy the whole mbuf, also 15470 * adjust len so the loop will end after this 15471 * mbuf. 15472 */ 15473 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15474 mlen = (seglimit - frags - 1) * fragsize; 15475 len = mlen; 15476 *plen = len_cp + len; 15477 } 15478 frags += howmany(mlen, fragsize); 15479 if (frags == 0) 15480 frags++; 15481 seglimit -= frags; 15482 KASSERT(seglimit > 0, 15483 ("%s: seglimit went too low", __func__)); 15484 } 15485 n = m_get(M_NOWAIT, m->m_type); 15486 *np = n; 15487 if (n == NULL) 15488 goto nospace; 15489 n->m_len = mlen; 15490 soff += mlen; 15491 len_cp += n->m_len; 15492 if (m->m_flags & (M_EXT|M_EXTPG)) { 15493 n->m_data = m->m_data + off; 15494 mb_dupcl(n, m); 15495 } else { 15496 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15497 (u_int)n->m_len); 15498 } 15499 len -= n->m_len; 15500 off = 0; 15501 m = m->m_next; 15502 np = &n->m_next; 15503 if (len || (soff == smb->m_len)) { 15504 /* 15505 * We have more so we move forward or 15506 * we have consumed the entire mbuf and 15507 * len has fell to 0. 15508 */ 15509 soff = 0; 15510 smb = m; 15511 } 15512 15513 } 15514 if (fsb != NULL) { 15515 fsb->m = smb; 15516 fsb->off = soff; 15517 if (smb) { 15518 /* 15519 * Save off the size of the mbuf. We do 15520 * this so that we can recognize when it 15521 * has been trimmed by sbcut() as acks 15522 * come in. 15523 */ 15524 fsb->o_m_len = smb->m_len; 15525 } else { 15526 /* 15527 * This is the case where the next mbuf went to NULL. This 15528 * means with this copy we have sent everything in the sb. 15529 * In theory we could clear the fast_output flag, but lets 15530 * not since its possible that we could get more added 15531 * and acks that call the extend function which would let 15532 * us send more. 15533 */ 15534 fsb->o_m_len = 0; 15535 } 15536 } 15537 return (top); 15538 nospace: 15539 if (top) 15540 m_freem(top); 15541 return (NULL); 15542 15543 } 15544 15545 /* 15546 * This is a copy of m_copym(), taking the TSO segment size/limit 15547 * constraints into account, and advancing the sndptr as it goes. 15548 */ 15549 static struct mbuf * 15550 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15551 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15552 { 15553 struct mbuf *m, *n; 15554 int32_t soff; 15555 15556 soff = rack->r_ctl.fsb.off; 15557 m = rack->r_ctl.fsb.m; 15558 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15559 /* 15560 * The mbuf had the front of it chopped off by an ack 15561 * we need to adjust the soff/off by that difference. 15562 */ 15563 uint32_t delta; 15564 15565 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15566 soff -= delta; 15567 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15568 /* 15569 * The mbuf was expanded probably by 15570 * a m_compress. Just update o_m_len. 15571 */ 15572 rack->r_ctl.fsb.o_m_len = m->m_len; 15573 } 15574 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15575 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15576 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15577 __FUNCTION__, 15578 rack, *plen, m, m->m_len)); 15579 /* Save off the right location before we copy and advance */ 15580 *s_soff = soff; 15581 *s_mb = rack->r_ctl.fsb.m; 15582 n = rack_fo_base_copym(m, soff, plen, 15583 &rack->r_ctl.fsb, 15584 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15585 return (n); 15586 } 15587 15588 static int 15589 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15590 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15591 { 15592 /* 15593 * Enter the fast retransmit path. We are given that a sched_pin is 15594 * in place (if accounting is compliled in) and the cycle count taken 15595 * at the entry is in the ts_val. The concept her is that the rsm 15596 * now holds the mbuf offsets and such so we can directly transmit 15597 * without a lot of overhead, the len field is already set for 15598 * us to prohibit us from sending too much (usually its 1MSS). 15599 */ 15600 struct ip *ip = NULL; 15601 struct udphdr *udp = NULL; 15602 struct tcphdr *th = NULL; 15603 struct mbuf *m = NULL; 15604 struct inpcb *inp; 15605 uint8_t *cpto; 15606 struct tcp_log_buffer *lgb; 15607 #ifdef TCP_ACCOUNTING 15608 uint64_t crtsc; 15609 int cnt_thru = 1; 15610 #endif 15611 struct tcpopt to; 15612 u_char opt[TCP_MAXOLEN]; 15613 uint32_t hdrlen, optlen; 15614 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0; 15615 uint16_t flags; 15616 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15617 uint32_t if_hw_tsomaxsegsize; 15618 15619 #ifdef INET6 15620 struct ip6_hdr *ip6 = NULL; 15621 15622 if (rack->r_is_v6) { 15623 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15624 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15625 } else 15626 #endif /* INET6 */ 15627 { 15628 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15629 hdrlen = sizeof(struct tcpiphdr); 15630 } 15631 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15632 goto failed; 15633 } 15634 if (doing_tlp) { 15635 /* Its a TLP add the flag, it may already be there but be sure */ 15636 rsm->r_flags |= RACK_TLP; 15637 } else { 15638 /* If it was a TLP it is not not on this retransmit */ 15639 rsm->r_flags &= ~RACK_TLP; 15640 } 15641 startseq = rsm->r_start; 15642 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15643 inp = rack->rc_inp; 15644 to.to_flags = 0; 15645 flags = tcp_outflags[tp->t_state]; 15646 if (flags & (TH_SYN|TH_RST)) { 15647 goto failed; 15648 } 15649 if (rsm->r_flags & RACK_HAS_FIN) { 15650 /* We can't send a FIN here */ 15651 goto failed; 15652 } 15653 if (flags & TH_FIN) { 15654 /* We never send a FIN */ 15655 flags &= ~TH_FIN; 15656 } 15657 if (tp->t_flags & TF_RCVD_TSTMP) { 15658 to.to_tsval = ms_cts + tp->ts_offset; 15659 to.to_tsecr = tp->ts_recent; 15660 to.to_flags = TOF_TS; 15661 } 15662 optlen = tcp_addoptions(&to, opt); 15663 hdrlen += optlen; 15664 udp = rack->r_ctl.fsb.udp; 15665 if (udp) 15666 hdrlen += sizeof(struct udphdr); 15667 if (rack->r_ctl.rc_pace_max_segs) 15668 max_val = rack->r_ctl.rc_pace_max_segs; 15669 else if (rack->rc_user_set_max_segs) 15670 max_val = rack->rc_user_set_max_segs * segsiz; 15671 else 15672 max_val = len; 15673 if ((tp->t_flags & TF_TSO) && 15674 V_tcp_do_tso && 15675 (len > segsiz) && 15676 (tp->t_port == 0)) 15677 tso = 1; 15678 #ifdef INET6 15679 if (MHLEN < hdrlen + max_linkhdr) 15680 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15681 else 15682 #endif 15683 m = m_gethdr(M_NOWAIT, MT_DATA); 15684 if (m == NULL) 15685 goto failed; 15686 m->m_data += max_linkhdr; 15687 m->m_len = hdrlen; 15688 th = rack->r_ctl.fsb.th; 15689 /* Establish the len to send */ 15690 if (len > max_val) 15691 len = max_val; 15692 if ((tso) && (len + optlen > tp->t_maxseg)) { 15693 uint32_t if_hw_tsomax; 15694 int32_t max_len; 15695 15696 /* extract TSO information */ 15697 if_hw_tsomax = tp->t_tsomax; 15698 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15699 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15700 /* 15701 * Check if we should limit by maximum payload 15702 * length: 15703 */ 15704 if (if_hw_tsomax != 0) { 15705 /* compute maximum TSO length */ 15706 max_len = (if_hw_tsomax - hdrlen - 15707 max_linkhdr); 15708 if (max_len <= 0) { 15709 goto failed; 15710 } else if (len > max_len) { 15711 len = max_len; 15712 } 15713 } 15714 if (len <= segsiz) { 15715 /* 15716 * In case there are too many small fragments don't 15717 * use TSO: 15718 */ 15719 tso = 0; 15720 } 15721 } else { 15722 tso = 0; 15723 } 15724 if ((tso == 0) && (len > segsiz)) 15725 len = segsiz; 15726 if ((len == 0) || 15727 (len <= MHLEN - hdrlen - max_linkhdr)) { 15728 goto failed; 15729 } 15730 th->th_seq = htonl(rsm->r_start); 15731 th->th_ack = htonl(tp->rcv_nxt); 15732 /* 15733 * The PUSH bit should only be applied 15734 * if the full retransmission is made. If 15735 * we are sending less than this is the 15736 * left hand edge and should not have 15737 * the PUSH bit. 15738 */ 15739 if ((rsm->r_flags & RACK_HAD_PUSH) && 15740 (len == (rsm->r_end - rsm->r_start))) 15741 flags |= TH_PUSH; 15742 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15743 if (th->th_win == 0) { 15744 tp->t_sndzerowin++; 15745 tp->t_flags |= TF_RXWIN0SENT; 15746 } else 15747 tp->t_flags &= ~TF_RXWIN0SENT; 15748 if (rsm->r_flags & RACK_TLP) { 15749 /* 15750 * TLP should not count in retran count, but 15751 * in its own bin 15752 */ 15753 counter_u64_add(rack_tlp_retran, 1); 15754 counter_u64_add(rack_tlp_retran_bytes, len); 15755 } else { 15756 tp->t_sndrexmitpack++; 15757 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15758 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15759 } 15760 #ifdef STATS 15761 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15762 len); 15763 #endif 15764 if (rsm->m == NULL) 15765 goto failed; 15766 if (rsm->orig_m_len != rsm->m->m_len) { 15767 /* Fix up the orig_m_len and possibly the mbuf offset */ 15768 rack_adjust_orig_mlen(rsm); 15769 } 15770 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15771 if (len <= segsiz) { 15772 /* 15773 * Must have ran out of mbufs for the copy 15774 * shorten it to no longer need tso. Lets 15775 * not put on sendalot since we are low on 15776 * mbufs. 15777 */ 15778 tso = 0; 15779 } 15780 if ((m->m_next == NULL) || (len <= 0)){ 15781 goto failed; 15782 } 15783 if (udp) { 15784 if (rack->r_is_v6) 15785 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15786 else 15787 ulen = hdrlen + len - sizeof(struct ip); 15788 udp->uh_ulen = htons(ulen); 15789 } 15790 m->m_pkthdr.rcvif = (struct ifnet *)0; 15791 if (TCPS_HAVERCVDSYN(tp->t_state) && 15792 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 15793 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15794 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15795 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15796 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15797 #ifdef INET6 15798 if (rack->r_is_v6) { 15799 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15800 ip6->ip6_flow |= htonl(ect << 20); 15801 } 15802 else 15803 #endif 15804 { 15805 ip->ip_tos &= ~IPTOS_ECN_MASK; 15806 ip->ip_tos |= ect; 15807 } 15808 } 15809 tcp_set_flags(th, flags); 15810 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15811 #ifdef INET6 15812 if (rack->r_is_v6) { 15813 if (tp->t_port) { 15814 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15815 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15816 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15817 th->th_sum = htons(0); 15818 UDPSTAT_INC(udps_opackets); 15819 } else { 15820 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15821 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15822 th->th_sum = in6_cksum_pseudo(ip6, 15823 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15824 0); 15825 } 15826 } 15827 #endif 15828 #if defined(INET6) && defined(INET) 15829 else 15830 #endif 15831 #ifdef INET 15832 { 15833 if (tp->t_port) { 15834 m->m_pkthdr.csum_flags = CSUM_UDP; 15835 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15836 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15837 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15838 th->th_sum = htons(0); 15839 UDPSTAT_INC(udps_opackets); 15840 } else { 15841 m->m_pkthdr.csum_flags = CSUM_TCP; 15842 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15843 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15844 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15845 IPPROTO_TCP + len + optlen)); 15846 } 15847 /* IP version must be set here for ipv4/ipv6 checking later */ 15848 KASSERT(ip->ip_v == IPVERSION, 15849 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15850 } 15851 #endif 15852 if (tso) { 15853 KASSERT(len > tp->t_maxseg - optlen, 15854 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15855 m->m_pkthdr.csum_flags |= CSUM_TSO; 15856 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15857 } 15858 #ifdef INET6 15859 if (rack->r_is_v6) { 15860 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15861 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15862 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15863 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15864 else 15865 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15866 } 15867 #endif 15868 #if defined(INET) && defined(INET6) 15869 else 15870 #endif 15871 #ifdef INET 15872 { 15873 ip->ip_len = htons(m->m_pkthdr.len); 15874 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15875 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15876 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15877 if (tp->t_port == 0 || len < V_tcp_minmss) { 15878 ip->ip_off |= htons(IP_DF); 15879 } 15880 } else { 15881 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15882 } 15883 } 15884 #endif 15885 /* Time to copy in our header */ 15886 cpto = mtod(m, uint8_t *); 15887 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15888 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15889 if (optlen) { 15890 bcopy(opt, th + 1, optlen); 15891 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15892 } else { 15893 th->th_off = sizeof(struct tcphdr) >> 2; 15894 } 15895 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15896 union tcp_log_stackspecific log; 15897 15898 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 15899 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 15900 counter_u64_add(rack_collapsed_win_rxt, 1); 15901 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 15902 } 15903 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15904 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15905 if (rack->rack_no_prr) 15906 log.u_bbr.flex1 = 0; 15907 else 15908 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15909 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15910 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15911 log.u_bbr.flex4 = max_val; 15912 log.u_bbr.flex5 = 0; 15913 /* Save off the early/late values */ 15914 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15915 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15916 log.u_bbr.bw_inuse = rack_get_bw(rack); 15917 if (doing_tlp == 0) 15918 log.u_bbr.flex8 = 1; 15919 else 15920 log.u_bbr.flex8 = 2; 15921 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15922 log.u_bbr.flex7 = 55; 15923 log.u_bbr.pkts_out = tp->t_maxseg; 15924 log.u_bbr.timeStamp = cts; 15925 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15926 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15927 log.u_bbr.delivered = 0; 15928 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15929 len, &log, false, NULL, NULL, 0, tv); 15930 } else 15931 lgb = NULL; 15932 #ifdef INET6 15933 if (rack->r_is_v6) { 15934 error = ip6_output(m, NULL, 15935 &inp->inp_route6, 15936 0, NULL, NULL, inp); 15937 } 15938 #endif 15939 #if defined(INET) && defined(INET6) 15940 else 15941 #endif 15942 #ifdef INET 15943 { 15944 error = ip_output(m, NULL, 15945 &inp->inp_route, 15946 0, 0, inp); 15947 } 15948 #endif 15949 m = NULL; 15950 if (lgb) { 15951 lgb->tlb_errno = error; 15952 lgb = NULL; 15953 } 15954 if (error) { 15955 goto failed; 15956 } 15957 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15958 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15959 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15960 rack->rc_tlp_in_progress = 1; 15961 rack->r_ctl.rc_tlp_cnt_out++; 15962 } 15963 if (error == 0) { 15964 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15965 if (doing_tlp) { 15966 rack->rc_last_sent_tlp_past_cumack = 0; 15967 rack->rc_last_sent_tlp_seq_valid = 1; 15968 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 15969 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 15970 } 15971 } 15972 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15973 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15974 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15975 rack->r_ctl.retran_during_recovery += len; 15976 { 15977 int idx; 15978 15979 idx = (len / segsiz) + 3; 15980 if (idx >= TCP_MSS_ACCT_ATIMER) 15981 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15982 else 15983 counter_u64_add(rack_out_size[idx], 1); 15984 } 15985 if (tp->t_rtttime == 0) { 15986 tp->t_rtttime = ticks; 15987 tp->t_rtseq = startseq; 15988 KMOD_TCPSTAT_INC(tcps_segstimed); 15989 } 15990 counter_u64_add(rack_fto_rsm_send, 1); 15991 if (error && (error == ENOBUFS)) { 15992 if (rack->r_ctl.crte != NULL) { 15993 rack_trace_point(rack, RACK_TP_HWENOBUF); 15994 } else 15995 rack_trace_point(rack, RACK_TP_ENOBUF); 15996 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 15997 if (rack->rc_enobuf < 0x7f) 15998 rack->rc_enobuf++; 15999 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16000 slot = 10 * HPTS_USEC_IN_MSEC; 16001 } else 16002 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16003 if ((slot == 0) || 16004 (rack->rc_always_pace == 0) || 16005 (rack->r_rr_config == 1)) { 16006 /* 16007 * We have no pacing set or we 16008 * are using old-style rack or 16009 * we are overridden to use the old 1ms pacing. 16010 */ 16011 slot = rack->r_ctl.rc_min_to; 16012 } 16013 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16014 #ifdef TCP_ACCOUNTING 16015 crtsc = get_cyclecount(); 16016 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16017 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16018 } 16019 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16020 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16021 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16022 } 16023 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16024 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16025 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16026 } 16027 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16028 sched_unpin(); 16029 #endif 16030 return (0); 16031 failed: 16032 if (m) 16033 m_free(m); 16034 return (-1); 16035 } 16036 16037 static void 16038 rack_sndbuf_autoscale(struct tcp_rack *rack) 16039 { 16040 /* 16041 * Automatic sizing of send socket buffer. Often the send buffer 16042 * size is not optimally adjusted to the actual network conditions 16043 * at hand (delay bandwidth product). Setting the buffer size too 16044 * small limits throughput on links with high bandwidth and high 16045 * delay (eg. trans-continental/oceanic links). Setting the 16046 * buffer size too big consumes too much real kernel memory, 16047 * especially with many connections on busy servers. 16048 * 16049 * The criteria to step up the send buffer one notch are: 16050 * 1. receive window of remote host is larger than send buffer 16051 * (with a fudge factor of 5/4th); 16052 * 2. send buffer is filled to 7/8th with data (so we actually 16053 * have data to make use of it); 16054 * 3. send buffer fill has not hit maximal automatic size; 16055 * 4. our send window (slow start and cogestion controlled) is 16056 * larger than sent but unacknowledged data in send buffer. 16057 * 16058 * Note that the rack version moves things much faster since 16059 * we want to avoid hitting cache lines in the rack_fast_output() 16060 * path so this is called much less often and thus moves 16061 * the SB forward by a percentage. 16062 */ 16063 struct socket *so; 16064 struct tcpcb *tp; 16065 uint32_t sendwin, scaleup; 16066 16067 tp = rack->rc_tp; 16068 so = rack->rc_inp->inp_socket; 16069 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16070 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16071 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16072 sbused(&so->so_snd) >= 16073 (so->so_snd.sb_hiwat / 8 * 7) && 16074 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16075 sendwin >= (sbused(&so->so_snd) - 16076 (tp->snd_nxt - tp->snd_una))) { 16077 if (rack_autosndbuf_inc) 16078 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16079 else 16080 scaleup = V_tcp_autosndbuf_inc; 16081 if (scaleup < V_tcp_autosndbuf_inc) 16082 scaleup = V_tcp_autosndbuf_inc; 16083 scaleup += so->so_snd.sb_hiwat; 16084 if (scaleup > V_tcp_autosndbuf_max) 16085 scaleup = V_tcp_autosndbuf_max; 16086 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 16087 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16088 } 16089 } 16090 } 16091 16092 static int 16093 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16094 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16095 { 16096 /* 16097 * Enter to do fast output. We are given that the sched_pin is 16098 * in place (if accounting is compiled in) and the cycle count taken 16099 * at entry is in place in ts_val. The idea here is that 16100 * we know how many more bytes needs to be sent (presumably either 16101 * during pacing or to fill the cwnd and that was greater than 16102 * the max-burst). We have how much to send and all the info we 16103 * need to just send. 16104 */ 16105 struct ip *ip = NULL; 16106 struct udphdr *udp = NULL; 16107 struct tcphdr *th = NULL; 16108 struct mbuf *m, *s_mb; 16109 struct inpcb *inp; 16110 uint8_t *cpto; 16111 struct tcp_log_buffer *lgb; 16112 #ifdef TCP_ACCOUNTING 16113 uint64_t crtsc; 16114 #endif 16115 struct tcpopt to; 16116 u_char opt[TCP_MAXOLEN]; 16117 uint32_t hdrlen, optlen; 16118 #ifdef TCP_ACCOUNTING 16119 int cnt_thru = 1; 16120 #endif 16121 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16122 uint16_t flags; 16123 uint32_t s_soff; 16124 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16125 uint32_t if_hw_tsomaxsegsize; 16126 uint16_t add_flag = RACK_SENT_FP; 16127 #ifdef INET6 16128 struct ip6_hdr *ip6 = NULL; 16129 16130 if (rack->r_is_v6) { 16131 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16132 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16133 } else 16134 #endif /* INET6 */ 16135 { 16136 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16137 hdrlen = sizeof(struct tcpiphdr); 16138 } 16139 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16140 m = NULL; 16141 goto failed; 16142 } 16143 startseq = tp->snd_max; 16144 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16145 inp = rack->rc_inp; 16146 len = rack->r_ctl.fsb.left_to_send; 16147 to.to_flags = 0; 16148 flags = rack->r_ctl.fsb.tcp_flags; 16149 if (tp->t_flags & TF_RCVD_TSTMP) { 16150 to.to_tsval = ms_cts + tp->ts_offset; 16151 to.to_tsecr = tp->ts_recent; 16152 to.to_flags = TOF_TS; 16153 } 16154 optlen = tcp_addoptions(&to, opt); 16155 hdrlen += optlen; 16156 udp = rack->r_ctl.fsb.udp; 16157 if (udp) 16158 hdrlen += sizeof(struct udphdr); 16159 if (rack->r_ctl.rc_pace_max_segs) 16160 max_val = rack->r_ctl.rc_pace_max_segs; 16161 else if (rack->rc_user_set_max_segs) 16162 max_val = rack->rc_user_set_max_segs * segsiz; 16163 else 16164 max_val = len; 16165 if ((tp->t_flags & TF_TSO) && 16166 V_tcp_do_tso && 16167 (len > segsiz) && 16168 (tp->t_port == 0)) 16169 tso = 1; 16170 again: 16171 #ifdef INET6 16172 if (MHLEN < hdrlen + max_linkhdr) 16173 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16174 else 16175 #endif 16176 m = m_gethdr(M_NOWAIT, MT_DATA); 16177 if (m == NULL) 16178 goto failed; 16179 m->m_data += max_linkhdr; 16180 m->m_len = hdrlen; 16181 th = rack->r_ctl.fsb.th; 16182 /* Establish the len to send */ 16183 if (len > max_val) 16184 len = max_val; 16185 if ((tso) && (len + optlen > tp->t_maxseg)) { 16186 uint32_t if_hw_tsomax; 16187 int32_t max_len; 16188 16189 /* extract TSO information */ 16190 if_hw_tsomax = tp->t_tsomax; 16191 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16192 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16193 /* 16194 * Check if we should limit by maximum payload 16195 * length: 16196 */ 16197 if (if_hw_tsomax != 0) { 16198 /* compute maximum TSO length */ 16199 max_len = (if_hw_tsomax - hdrlen - 16200 max_linkhdr); 16201 if (max_len <= 0) { 16202 goto failed; 16203 } else if (len > max_len) { 16204 len = max_len; 16205 } 16206 } 16207 if (len <= segsiz) { 16208 /* 16209 * In case there are too many small fragments don't 16210 * use TSO: 16211 */ 16212 tso = 0; 16213 } 16214 } else { 16215 tso = 0; 16216 } 16217 if ((tso == 0) && (len > segsiz)) 16218 len = segsiz; 16219 if ((len == 0) || 16220 (len <= MHLEN - hdrlen - max_linkhdr)) { 16221 goto failed; 16222 } 16223 sb_offset = tp->snd_max - tp->snd_una; 16224 th->th_seq = htonl(tp->snd_max); 16225 th->th_ack = htonl(tp->rcv_nxt); 16226 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16227 if (th->th_win == 0) { 16228 tp->t_sndzerowin++; 16229 tp->t_flags |= TF_RXWIN0SENT; 16230 } else 16231 tp->t_flags &= ~TF_RXWIN0SENT; 16232 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16233 KMOD_TCPSTAT_INC(tcps_sndpack); 16234 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16235 #ifdef STATS 16236 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16237 len); 16238 #endif 16239 if (rack->r_ctl.fsb.m == NULL) 16240 goto failed; 16241 16242 /* s_mb and s_soff are saved for rack_log_output */ 16243 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16244 &s_mb, &s_soff); 16245 if (len <= segsiz) { 16246 /* 16247 * Must have ran out of mbufs for the copy 16248 * shorten it to no longer need tso. Lets 16249 * not put on sendalot since we are low on 16250 * mbufs. 16251 */ 16252 tso = 0; 16253 } 16254 if (rack->r_ctl.fsb.rfo_apply_push && 16255 (len == rack->r_ctl.fsb.left_to_send)) { 16256 flags |= TH_PUSH; 16257 add_flag |= RACK_HAD_PUSH; 16258 } 16259 if ((m->m_next == NULL) || (len <= 0)){ 16260 goto failed; 16261 } 16262 if (udp) { 16263 if (rack->r_is_v6) 16264 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16265 else 16266 ulen = hdrlen + len - sizeof(struct ip); 16267 udp->uh_ulen = htons(ulen); 16268 } 16269 m->m_pkthdr.rcvif = (struct ifnet *)0; 16270 if (TCPS_HAVERCVDSYN(tp->t_state) && 16271 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 16272 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16273 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16274 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16275 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16276 #ifdef INET6 16277 if (rack->r_is_v6) { 16278 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16279 ip6->ip6_flow |= htonl(ect << 20); 16280 } 16281 else 16282 #endif 16283 { 16284 ip->ip_tos &= ~IPTOS_ECN_MASK; 16285 ip->ip_tos |= ect; 16286 } 16287 } 16288 tcp_set_flags(th, flags); 16289 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16290 #ifdef INET6 16291 if (rack->r_is_v6) { 16292 if (tp->t_port) { 16293 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16294 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16295 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16296 th->th_sum = htons(0); 16297 UDPSTAT_INC(udps_opackets); 16298 } else { 16299 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16300 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16301 th->th_sum = in6_cksum_pseudo(ip6, 16302 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16303 0); 16304 } 16305 } 16306 #endif 16307 #if defined(INET6) && defined(INET) 16308 else 16309 #endif 16310 #ifdef INET 16311 { 16312 if (tp->t_port) { 16313 m->m_pkthdr.csum_flags = CSUM_UDP; 16314 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16315 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16316 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16317 th->th_sum = htons(0); 16318 UDPSTAT_INC(udps_opackets); 16319 } else { 16320 m->m_pkthdr.csum_flags = CSUM_TCP; 16321 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16322 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16323 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16324 IPPROTO_TCP + len + optlen)); 16325 } 16326 /* IP version must be set here for ipv4/ipv6 checking later */ 16327 KASSERT(ip->ip_v == IPVERSION, 16328 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16329 } 16330 #endif 16331 if (tso) { 16332 KASSERT(len > tp->t_maxseg - optlen, 16333 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16334 m->m_pkthdr.csum_flags |= CSUM_TSO; 16335 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16336 } 16337 #ifdef INET6 16338 if (rack->r_is_v6) { 16339 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16340 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16341 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16342 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16343 else 16344 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16345 } 16346 #endif 16347 #if defined(INET) && defined(INET6) 16348 else 16349 #endif 16350 #ifdef INET 16351 { 16352 ip->ip_len = htons(m->m_pkthdr.len); 16353 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16354 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16355 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16356 if (tp->t_port == 0 || len < V_tcp_minmss) { 16357 ip->ip_off |= htons(IP_DF); 16358 } 16359 } else { 16360 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16361 } 16362 } 16363 #endif 16364 /* Time to copy in our header */ 16365 cpto = mtod(m, uint8_t *); 16366 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16367 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16368 if (optlen) { 16369 bcopy(opt, th + 1, optlen); 16370 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16371 } else { 16372 th->th_off = sizeof(struct tcphdr) >> 2; 16373 } 16374 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16375 union tcp_log_stackspecific log; 16376 16377 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16378 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16379 if (rack->rack_no_prr) 16380 log.u_bbr.flex1 = 0; 16381 else 16382 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16383 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16384 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16385 log.u_bbr.flex4 = max_val; 16386 log.u_bbr.flex5 = 0; 16387 /* Save off the early/late values */ 16388 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16389 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16390 log.u_bbr.bw_inuse = rack_get_bw(rack); 16391 log.u_bbr.flex8 = 0; 16392 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16393 log.u_bbr.flex7 = 44; 16394 log.u_bbr.pkts_out = tp->t_maxseg; 16395 log.u_bbr.timeStamp = cts; 16396 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16397 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16398 log.u_bbr.delivered = 0; 16399 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16400 len, &log, false, NULL, NULL, 0, tv); 16401 } else 16402 lgb = NULL; 16403 #ifdef INET6 16404 if (rack->r_is_v6) { 16405 error = ip6_output(m, NULL, 16406 &inp->inp_route6, 16407 0, NULL, NULL, inp); 16408 } 16409 #endif 16410 #if defined(INET) && defined(INET6) 16411 else 16412 #endif 16413 #ifdef INET 16414 { 16415 error = ip_output(m, NULL, 16416 &inp->inp_route, 16417 0, 0, inp); 16418 } 16419 #endif 16420 if (lgb) { 16421 lgb->tlb_errno = error; 16422 lgb = NULL; 16423 } 16424 if (error) { 16425 *send_err = error; 16426 m = NULL; 16427 goto failed; 16428 } 16429 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16430 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16431 m = NULL; 16432 if (tp->snd_una == tp->snd_max) { 16433 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16434 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16435 tp->t_acktime = ticks; 16436 } 16437 if (error == 0) 16438 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16439 16440 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16441 tot_len += len; 16442 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16443 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16444 tp->snd_max += len; 16445 tp->snd_nxt = tp->snd_max; 16446 { 16447 int idx; 16448 16449 idx = (len / segsiz) + 3; 16450 if (idx >= TCP_MSS_ACCT_ATIMER) 16451 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16452 else 16453 counter_u64_add(rack_out_size[idx], 1); 16454 } 16455 if (len <= rack->r_ctl.fsb.left_to_send) 16456 rack->r_ctl.fsb.left_to_send -= len; 16457 else 16458 rack->r_ctl.fsb.left_to_send = 0; 16459 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16460 rack->r_fast_output = 0; 16461 rack->r_ctl.fsb.left_to_send = 0; 16462 /* At the end of fast_output scale up the sb */ 16463 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16464 rack_sndbuf_autoscale(rack); 16465 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16466 } 16467 if (tp->t_rtttime == 0) { 16468 tp->t_rtttime = ticks; 16469 tp->t_rtseq = startseq; 16470 KMOD_TCPSTAT_INC(tcps_segstimed); 16471 } 16472 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16473 (max_val > len) && 16474 (tso == 0)) { 16475 max_val -= len; 16476 len = segsiz; 16477 th = rack->r_ctl.fsb.th; 16478 #ifdef TCP_ACCOUNTING 16479 cnt_thru++; 16480 #endif 16481 goto again; 16482 } 16483 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16484 counter_u64_add(rack_fto_send, 1); 16485 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16486 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16487 #ifdef TCP_ACCOUNTING 16488 crtsc = get_cyclecount(); 16489 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16490 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16491 } 16492 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16493 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16494 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16495 } 16496 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16497 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16498 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16499 } 16500 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16501 sched_unpin(); 16502 #endif 16503 return (0); 16504 failed: 16505 if (m) 16506 m_free(m); 16507 rack->r_fast_output = 0; 16508 return (-1); 16509 } 16510 16511 static struct rack_sendmap * 16512 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 16513 { 16514 struct rack_sendmap *rsm = NULL; 16515 struct rack_sendmap fe; 16516 int thresh; 16517 16518 restart: 16519 fe.r_start = rack->r_ctl.last_collapse_point; 16520 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 16521 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 16522 /* Nothing, strange turn off validity */ 16523 rack->r_collapse_point_valid = 0; 16524 return (NULL); 16525 } 16526 /* Can we send it yet? */ 16527 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 16528 /* 16529 * Receiver window has not grown enough for 16530 * the segment to be put on the wire. 16531 */ 16532 return (NULL); 16533 } 16534 if (rsm->r_flags & RACK_ACKED) { 16535 /* 16536 * It has been sacked, lets move to the 16537 * next one if possible. 16538 */ 16539 rack->r_ctl.last_collapse_point = rsm->r_end; 16540 /* Are we done? */ 16541 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16542 rack->r_ctl.high_collapse_point)) { 16543 rack->r_collapse_point_valid = 0; 16544 return (NULL); 16545 } 16546 goto restart; 16547 } 16548 /* Now has it been long enough ? */ 16549 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 16550 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 16551 rack_log_collapse(rack, rsm->r_start, 16552 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16553 thresh, __LINE__, 6, rsm->r_flags, rsm); 16554 return (rsm); 16555 } 16556 /* Not enough time */ 16557 rack_log_collapse(rack, rsm->r_start, 16558 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16559 thresh, __LINE__, 7, rsm->r_flags, rsm); 16560 return (NULL); 16561 } 16562 16563 static int 16564 rack_output(struct tcpcb *tp) 16565 { 16566 struct socket *so; 16567 uint32_t recwin; 16568 uint32_t sb_offset, s_moff = 0; 16569 int32_t len, error = 0; 16570 uint16_t flags; 16571 struct mbuf *m, *s_mb = NULL; 16572 struct mbuf *mb; 16573 uint32_t if_hw_tsomaxsegcount = 0; 16574 uint32_t if_hw_tsomaxsegsize; 16575 int32_t segsiz, minseg; 16576 long tot_len_this_send = 0; 16577 #ifdef INET 16578 struct ip *ip = NULL; 16579 #endif 16580 struct udphdr *udp = NULL; 16581 struct tcp_rack *rack; 16582 struct tcphdr *th; 16583 uint8_t pass = 0; 16584 uint8_t mark = 0; 16585 uint8_t wanted_cookie = 0; 16586 u_char opt[TCP_MAXOLEN]; 16587 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16588 uint32_t rack_seq; 16589 16590 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16591 unsigned ipsec_optlen = 0; 16592 16593 #endif 16594 int32_t idle, sendalot; 16595 int32_t sub_from_prr = 0; 16596 volatile int32_t sack_rxmit; 16597 struct rack_sendmap *rsm = NULL; 16598 int32_t tso, mtu; 16599 struct tcpopt to; 16600 int32_t slot = 0; 16601 int32_t sup_rack = 0; 16602 uint32_t cts, ms_cts, delayed, early; 16603 uint16_t add_flag = RACK_SENT_SP; 16604 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16605 uint8_t hpts_calling, doing_tlp = 0; 16606 uint32_t cwnd_to_use, pace_max_seg; 16607 int32_t do_a_prefetch = 0; 16608 int32_t prefetch_rsm = 0; 16609 int32_t orig_len = 0; 16610 struct timeval tv; 16611 int32_t prefetch_so_done = 0; 16612 struct tcp_log_buffer *lgb; 16613 struct inpcb *inp = tptoinpcb(tp); 16614 struct sockbuf *sb; 16615 uint64_t ts_val = 0; 16616 #ifdef TCP_ACCOUNTING 16617 uint64_t crtsc; 16618 #endif 16619 #ifdef INET6 16620 struct ip6_hdr *ip6 = NULL; 16621 int32_t isipv6; 16622 #endif 16623 bool hw_tls = false; 16624 16625 NET_EPOCH_ASSERT(); 16626 INP_WLOCK_ASSERT(inp); 16627 16628 /* setup and take the cache hits here */ 16629 rack = (struct tcp_rack *)tp->t_fb_ptr; 16630 #ifdef TCP_ACCOUNTING 16631 sched_pin(); 16632 ts_val = get_cyclecount(); 16633 #endif 16634 hpts_calling = inp->inp_hpts_calls; 16635 #ifdef TCP_OFFLOAD 16636 if (tp->t_flags & TF_TOE) { 16637 #ifdef TCP_ACCOUNTING 16638 sched_unpin(); 16639 #endif 16640 return (tcp_offload_output(tp)); 16641 } 16642 #endif 16643 /* 16644 * For TFO connections in SYN_RECEIVED, only allow the initial 16645 * SYN|ACK and those sent by the retransmit timer. 16646 */ 16647 if (IS_FASTOPEN(tp->t_flags) && 16648 (tp->t_state == TCPS_SYN_RECEIVED) && 16649 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16650 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16651 #ifdef TCP_ACCOUNTING 16652 sched_unpin(); 16653 #endif 16654 return (0); 16655 } 16656 #ifdef INET6 16657 if (rack->r_state) { 16658 /* Use the cache line loaded if possible */ 16659 isipv6 = rack->r_is_v6; 16660 } else { 16661 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16662 } 16663 #endif 16664 early = 0; 16665 cts = tcp_get_usecs(&tv); 16666 ms_cts = tcp_tv_to_mssectick(&tv); 16667 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16668 tcp_in_hpts(rack->rc_inp)) { 16669 /* 16670 * We are on the hpts for some timer but not hptsi output. 16671 * Remove from the hpts unconditionally. 16672 */ 16673 rack_timer_cancel(tp, rack, cts, __LINE__); 16674 } 16675 /* Are we pacing and late? */ 16676 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16677 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16678 /* We are delayed */ 16679 delayed = cts - rack->r_ctl.rc_last_output_to; 16680 } else { 16681 delayed = 0; 16682 } 16683 /* Do the timers, which may override the pacer */ 16684 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16685 int retval; 16686 16687 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16688 &doing_tlp); 16689 if (retval != 0) { 16690 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16691 #ifdef TCP_ACCOUNTING 16692 sched_unpin(); 16693 #endif 16694 /* 16695 * If timers want tcp_drop(), then pass error out, 16696 * otherwise suppress it. 16697 */ 16698 return (retval < 0 ? retval : 0); 16699 } 16700 } 16701 if (rack->rc_in_persist) { 16702 if (tcp_in_hpts(rack->rc_inp) == 0) { 16703 /* Timer is not running */ 16704 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16705 } 16706 #ifdef TCP_ACCOUNTING 16707 sched_unpin(); 16708 #endif 16709 return (0); 16710 } 16711 if ((rack->rc_ack_required == 1) && 16712 (rack->r_timer_override == 0)){ 16713 /* A timeout occurred and no ack has arrived */ 16714 if (tcp_in_hpts(rack->rc_inp) == 0) { 16715 /* Timer is not running */ 16716 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16717 } 16718 #ifdef TCP_ACCOUNTING 16719 sched_unpin(); 16720 #endif 16721 return (0); 16722 } 16723 if ((rack->r_timer_override) || 16724 (rack->rc_ack_can_sendout_data) || 16725 (delayed) || 16726 (tp->t_state < TCPS_ESTABLISHED)) { 16727 rack->rc_ack_can_sendout_data = 0; 16728 if (tcp_in_hpts(rack->rc_inp)) 16729 tcp_hpts_remove(rack->rc_inp); 16730 } else if (tcp_in_hpts(rack->rc_inp)) { 16731 /* 16732 * On the hpts you can't pass even if ACKNOW is on, we will 16733 * when the hpts fires. 16734 */ 16735 #ifdef TCP_ACCOUNTING 16736 crtsc = get_cyclecount(); 16737 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16738 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16739 } 16740 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16741 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16742 tp->tcp_cnt_counters[SND_BLOCKED]++; 16743 } 16744 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16745 sched_unpin(); 16746 #endif 16747 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16748 return (0); 16749 } 16750 rack->rc_inp->inp_hpts_calls = 0; 16751 /* Finish out both pacing early and late accounting */ 16752 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16753 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16754 early = rack->r_ctl.rc_last_output_to - cts; 16755 } else 16756 early = 0; 16757 if (delayed) { 16758 rack->r_ctl.rc_agg_delayed += delayed; 16759 rack->r_late = 1; 16760 } else if (early) { 16761 rack->r_ctl.rc_agg_early += early; 16762 rack->r_early = 1; 16763 } 16764 /* Now that early/late accounting is done turn off the flag */ 16765 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16766 rack->r_wanted_output = 0; 16767 rack->r_timer_override = 0; 16768 if ((tp->t_state != rack->r_state) && 16769 TCPS_HAVEESTABLISHED(tp->t_state)) { 16770 rack_set_state(tp, rack); 16771 } 16772 if ((rack->r_fast_output) && 16773 (doing_tlp == 0) && 16774 (tp->rcv_numsacks == 0)) { 16775 int ret; 16776 16777 error = 0; 16778 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16779 if (ret >= 0) 16780 return(ret); 16781 else if (error) { 16782 inp = rack->rc_inp; 16783 so = inp->inp_socket; 16784 sb = &so->so_snd; 16785 goto nomore; 16786 } 16787 } 16788 inp = rack->rc_inp; 16789 /* 16790 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16791 * only allow the initial SYN or SYN|ACK and those sent 16792 * by the retransmit timer. 16793 */ 16794 if (IS_FASTOPEN(tp->t_flags) && 16795 ((tp->t_state == TCPS_SYN_RECEIVED) || 16796 (tp->t_state == TCPS_SYN_SENT)) && 16797 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16798 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16799 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16800 so = inp->inp_socket; 16801 sb = &so->so_snd; 16802 goto just_return_nolock; 16803 } 16804 /* 16805 * Determine length of data that should be transmitted, and flags 16806 * that will be used. If there is some data or critical controls 16807 * (SYN, RST) to send, then transmit; otherwise, investigate 16808 * further. 16809 */ 16810 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16811 if (tp->t_idle_reduce) { 16812 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16813 rack_cc_after_idle(rack, tp); 16814 } 16815 tp->t_flags &= ~TF_LASTIDLE; 16816 if (idle) { 16817 if (tp->t_flags & TF_MORETOCOME) { 16818 tp->t_flags |= TF_LASTIDLE; 16819 idle = 0; 16820 } 16821 } 16822 if ((tp->snd_una == tp->snd_max) && 16823 rack->r_ctl.rc_went_idle_time && 16824 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16825 idle = cts - rack->r_ctl.rc_went_idle_time; 16826 if (idle > rack_min_probertt_hold) { 16827 /* Count as a probe rtt */ 16828 if (rack->in_probe_rtt == 0) { 16829 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16830 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16831 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16832 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16833 } else { 16834 rack_exit_probertt(rack, cts); 16835 } 16836 } 16837 idle = 0; 16838 } 16839 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16840 rack_init_fsb_block(tp, rack); 16841 again: 16842 /* 16843 * If we've recently taken a timeout, snd_max will be greater than 16844 * snd_nxt. There may be SACK information that allows us to avoid 16845 * resending already delivered data. Adjust snd_nxt accordingly. 16846 */ 16847 sendalot = 0; 16848 cts = tcp_get_usecs(&tv); 16849 ms_cts = tcp_tv_to_mssectick(&tv); 16850 tso = 0; 16851 mtu = 0; 16852 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16853 minseg = segsiz; 16854 if (rack->r_ctl.rc_pace_max_segs == 0) 16855 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16856 else 16857 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16858 sb_offset = tp->snd_max - tp->snd_una; 16859 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16860 flags = tcp_outflags[tp->t_state]; 16861 while (rack->rc_free_cnt < rack_free_cache) { 16862 rsm = rack_alloc(rack); 16863 if (rsm == NULL) { 16864 if (inp->inp_hpts_calls) 16865 /* Retry in a ms */ 16866 slot = (1 * HPTS_USEC_IN_MSEC); 16867 so = inp->inp_socket; 16868 sb = &so->so_snd; 16869 goto just_return_nolock; 16870 } 16871 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16872 rack->rc_free_cnt++; 16873 rsm = NULL; 16874 } 16875 if (inp->inp_hpts_calls) 16876 inp->inp_hpts_calls = 0; 16877 sack_rxmit = 0; 16878 len = 0; 16879 rsm = NULL; 16880 if (flags & TH_RST) { 16881 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16882 so = inp->inp_socket; 16883 sb = &so->so_snd; 16884 goto send; 16885 } 16886 if (rack->r_ctl.rc_resend) { 16887 /* Retransmit timer */ 16888 rsm = rack->r_ctl.rc_resend; 16889 rack->r_ctl.rc_resend = NULL; 16890 len = rsm->r_end - rsm->r_start; 16891 sack_rxmit = 1; 16892 sendalot = 0; 16893 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16894 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16895 __func__, __LINE__, 16896 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16897 sb_offset = rsm->r_start - tp->snd_una; 16898 if (len >= segsiz) 16899 len = segsiz; 16900 } else if (rack->r_collapse_point_valid && 16901 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 16902 /* 16903 * If an RSM is returned then enough time has passed 16904 * for us to retransmit it. Move up the collapse point, 16905 * since this rsm has its chance to retransmit now. 16906 */ 16907 rack_trace_point(rack, RACK_TP_COLLAPSED_RXT); 16908 rack->r_ctl.last_collapse_point = rsm->r_end; 16909 /* Are we done? */ 16910 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16911 rack->r_ctl.high_collapse_point)) 16912 rack->r_collapse_point_valid = 0; 16913 sack_rxmit = 1; 16914 /* We are not doing a TLP */ 16915 doing_tlp = 0; 16916 len = rsm->r_end - rsm->r_start; 16917 sb_offset = rsm->r_start - tp->snd_una; 16918 sendalot = 0; 16919 if ((rack->full_size_rxt == 0) && 16920 (rack->shape_rxt_to_pacing_min == 0) && 16921 (len >= segsiz)) 16922 len = segsiz; 16923 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16924 /* We have a retransmit that takes precedence */ 16925 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16926 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 16927 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16928 /* Enter recovery if not induced by a time-out */ 16929 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 16930 } 16931 #ifdef INVARIANTS 16932 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16933 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16934 tp, rack, rsm, rsm->r_start, tp->snd_una); 16935 } 16936 #endif 16937 len = rsm->r_end - rsm->r_start; 16938 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16939 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16940 __func__, __LINE__, 16941 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16942 sb_offset = rsm->r_start - tp->snd_una; 16943 sendalot = 0; 16944 if (len >= segsiz) 16945 len = segsiz; 16946 if (len > 0) { 16947 sack_rxmit = 1; 16948 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16949 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16950 min(len, segsiz)); 16951 } 16952 } else if (rack->r_ctl.rc_tlpsend) { 16953 /* Tail loss probe */ 16954 long cwin; 16955 long tlen; 16956 16957 /* 16958 * Check if we can do a TLP with a RACK'd packet 16959 * this can happen if we are not doing the rack 16960 * cheat and we skipped to a TLP and it 16961 * went off. 16962 */ 16963 rsm = rack->r_ctl.rc_tlpsend; 16964 /* We are doing a TLP make sure the flag is preent */ 16965 rsm->r_flags |= RACK_TLP; 16966 rack->r_ctl.rc_tlpsend = NULL; 16967 sack_rxmit = 1; 16968 tlen = rsm->r_end - rsm->r_start; 16969 if (tlen > segsiz) 16970 tlen = segsiz; 16971 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16972 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16973 __func__, __LINE__, 16974 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16975 sb_offset = rsm->r_start - tp->snd_una; 16976 cwin = min(tp->snd_wnd, tlen); 16977 len = cwin; 16978 } 16979 if (rack->r_must_retran && 16980 (doing_tlp == 0) && 16981 (SEQ_GT(tp->snd_max, tp->snd_una)) && 16982 (rsm == NULL)) { 16983 /* 16984 * There are two different ways that we 16985 * can get into this block: 16986 * a) This is a non-sack connection, we had a time-out 16987 * and thus r_must_retran was set and everything 16988 * left outstanding as been marked for retransmit. 16989 * b) The MTU of the path shrank, so that everything 16990 * was marked to be retransmitted with the smaller 16991 * mtu and r_must_retran was set. 16992 * 16993 * This means that we expect the sendmap (outstanding) 16994 * to all be marked must. We can use the tmap to 16995 * look at them. 16996 * 16997 */ 16998 int sendwin, flight; 16999 17000 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17001 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17002 if (flight >= sendwin) { 17003 /* 17004 * We can't send yet. 17005 */ 17006 so = inp->inp_socket; 17007 sb = &so->so_snd; 17008 goto just_return_nolock; 17009 } 17010 /* 17011 * This is the case a/b mentioned above. All 17012 * outstanding/not-acked should be marked. 17013 * We can use the tmap to find them. 17014 */ 17015 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17016 if (rsm == NULL) { 17017 /* TSNH */ 17018 rack->r_must_retran = 0; 17019 rack->r_ctl.rc_out_at_rto = 0; 17020 so = inp->inp_socket; 17021 sb = &so->so_snd; 17022 goto just_return_nolock; 17023 } 17024 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 17025 /* 17026 * The first one does not have the flag, did we collapse 17027 * further up in our list? 17028 */ 17029 rack->r_must_retran = 0; 17030 rack->r_ctl.rc_out_at_rto = 0; 17031 rsm = NULL; 17032 sack_rxmit = 0; 17033 } else { 17034 sack_rxmit = 1; 17035 len = rsm->r_end - rsm->r_start; 17036 sb_offset = rsm->r_start - tp->snd_una; 17037 sendalot = 0; 17038 if ((rack->full_size_rxt == 0) && 17039 (rack->shape_rxt_to_pacing_min == 0) && 17040 (len >= segsiz)) 17041 len = segsiz; 17042 /* 17043 * Delay removing the flag RACK_MUST_RXT so 17044 * that the fastpath for retransmit will 17045 * work with this rsm. 17046 */ 17047 } 17048 } 17049 /* 17050 * Enforce a connection sendmap count limit if set 17051 * as long as we are not retransmiting. 17052 */ 17053 if ((rsm == NULL) && 17054 (rack->do_detection == 0) && 17055 (V_tcp_map_entries_limit > 0) && 17056 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17057 counter_u64_add(rack_to_alloc_limited, 1); 17058 if (!rack->alloc_limit_reported) { 17059 rack->alloc_limit_reported = 1; 17060 counter_u64_add(rack_alloc_limited_conns, 1); 17061 } 17062 so = inp->inp_socket; 17063 sb = &so->so_snd; 17064 goto just_return_nolock; 17065 } 17066 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17067 /* we are retransmitting the fin */ 17068 len--; 17069 if (len) { 17070 /* 17071 * When retransmitting data do *not* include the 17072 * FIN. This could happen from a TLP probe. 17073 */ 17074 flags &= ~TH_FIN; 17075 } 17076 } 17077 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17078 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17079 int ret; 17080 17081 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17082 if (ret == 0) 17083 return (0); 17084 } 17085 so = inp->inp_socket; 17086 sb = &so->so_snd; 17087 if (do_a_prefetch == 0) { 17088 kern_prefetch(sb, &do_a_prefetch); 17089 do_a_prefetch = 1; 17090 } 17091 #ifdef NETFLIX_SHARED_CWND 17092 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17093 rack->rack_enable_scwnd) { 17094 /* We are doing cwnd sharing */ 17095 if (rack->gp_ready && 17096 (rack->rack_attempted_scwnd == 0) && 17097 (rack->r_ctl.rc_scw == NULL) && 17098 tp->t_lib) { 17099 /* The pcbid is in, lets make an attempt */ 17100 counter_u64_add(rack_try_scwnd, 1); 17101 rack->rack_attempted_scwnd = 1; 17102 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17103 &rack->r_ctl.rc_scw_index, 17104 segsiz); 17105 } 17106 if (rack->r_ctl.rc_scw && 17107 (rack->rack_scwnd_is_idle == 1) && 17108 sbavail(&so->so_snd)) { 17109 /* we are no longer out of data */ 17110 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17111 rack->rack_scwnd_is_idle = 0; 17112 } 17113 if (rack->r_ctl.rc_scw) { 17114 /* First lets update and get the cwnd */ 17115 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17116 rack->r_ctl.rc_scw_index, 17117 tp->snd_cwnd, tp->snd_wnd, segsiz); 17118 } 17119 } 17120 #endif 17121 /* 17122 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17123 * state flags. 17124 */ 17125 if (tp->t_flags & TF_NEEDFIN) 17126 flags |= TH_FIN; 17127 if (tp->t_flags & TF_NEEDSYN) 17128 flags |= TH_SYN; 17129 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17130 void *end_rsm; 17131 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17132 if (end_rsm) 17133 kern_prefetch(end_rsm, &prefetch_rsm); 17134 prefetch_rsm = 1; 17135 } 17136 SOCKBUF_LOCK(sb); 17137 /* 17138 * If snd_nxt == snd_max and we have transmitted a FIN, the 17139 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17140 * negative length. This can also occur when TCP opens up its 17141 * congestion window while receiving additional duplicate acks after 17142 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17143 * the fast-retransmit. 17144 * 17145 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17146 * set to snd_una, the sb_offset will be 0, and the length may wind 17147 * up 0. 17148 * 17149 * If sack_rxmit is true we are retransmitting from the scoreboard 17150 * in which case len is already set. 17151 */ 17152 if ((sack_rxmit == 0) && 17153 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17154 uint32_t avail; 17155 17156 avail = sbavail(sb); 17157 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17158 sb_offset = tp->snd_nxt - tp->snd_una; 17159 else 17160 sb_offset = 0; 17161 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17162 if (rack->r_ctl.rc_tlp_new_data) { 17163 /* TLP is forcing out new data */ 17164 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17165 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17166 } 17167 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17168 if (tp->snd_wnd > sb_offset) 17169 len = tp->snd_wnd - sb_offset; 17170 else 17171 len = 0; 17172 } else { 17173 len = rack->r_ctl.rc_tlp_new_data; 17174 } 17175 rack->r_ctl.rc_tlp_new_data = 0; 17176 } else { 17177 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17178 } 17179 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17180 /* 17181 * For prr=off, we need to send only 1 MSS 17182 * at a time. We do this because another sack could 17183 * be arriving that causes us to send retransmits and 17184 * we don't want to be on a long pace due to a larger send 17185 * that keeps us from sending out the retransmit. 17186 */ 17187 len = segsiz; 17188 } 17189 } else { 17190 uint32_t outstanding; 17191 /* 17192 * We are inside of a Fast recovery episode, this 17193 * is caused by a SACK or 3 dup acks. At this point 17194 * we have sent all the retransmissions and we rely 17195 * on PRR to dictate what we will send in the form of 17196 * new data. 17197 */ 17198 17199 outstanding = tp->snd_max - tp->snd_una; 17200 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17201 if (tp->snd_wnd > outstanding) { 17202 len = tp->snd_wnd - outstanding; 17203 /* Check to see if we have the data */ 17204 if ((sb_offset + len) > avail) { 17205 /* It does not all fit */ 17206 if (avail > sb_offset) 17207 len = avail - sb_offset; 17208 else 17209 len = 0; 17210 } 17211 } else { 17212 len = 0; 17213 } 17214 } else if (avail > sb_offset) { 17215 len = avail - sb_offset; 17216 } else { 17217 len = 0; 17218 } 17219 if (len > 0) { 17220 if (len > rack->r_ctl.rc_prr_sndcnt) { 17221 len = rack->r_ctl.rc_prr_sndcnt; 17222 } 17223 if (len > 0) { 17224 sub_from_prr = 1; 17225 } 17226 } 17227 if (len > segsiz) { 17228 /* 17229 * We should never send more than a MSS when 17230 * retransmitting or sending new data in prr 17231 * mode unless the override flag is on. Most 17232 * likely the PRR algorithm is not going to 17233 * let us send a lot as well :-) 17234 */ 17235 if (rack->r_ctl.rc_prr_sendalot == 0) { 17236 len = segsiz; 17237 } 17238 } else if (len < segsiz) { 17239 /* 17240 * Do we send any? The idea here is if the 17241 * send empty's the socket buffer we want to 17242 * do it. However if not then lets just wait 17243 * for our prr_sndcnt to get bigger. 17244 */ 17245 long leftinsb; 17246 17247 leftinsb = sbavail(sb) - sb_offset; 17248 if (leftinsb > len) { 17249 /* This send does not empty the sb */ 17250 len = 0; 17251 } 17252 } 17253 } 17254 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17255 /* 17256 * If you have not established 17257 * and are not doing FAST OPEN 17258 * no data please. 17259 */ 17260 if ((sack_rxmit == 0) && 17261 (!IS_FASTOPEN(tp->t_flags))){ 17262 len = 0; 17263 sb_offset = 0; 17264 } 17265 } 17266 if (prefetch_so_done == 0) { 17267 kern_prefetch(so, &prefetch_so_done); 17268 prefetch_so_done = 1; 17269 } 17270 /* 17271 * Lop off SYN bit if it has already been sent. However, if this is 17272 * SYN-SENT state and if segment contains data and if we don't know 17273 * that foreign host supports TAO, suppress sending segment. 17274 */ 17275 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17276 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17277 /* 17278 * When sending additional segments following a TFO SYN|ACK, 17279 * do not include the SYN bit. 17280 */ 17281 if (IS_FASTOPEN(tp->t_flags) && 17282 (tp->t_state == TCPS_SYN_RECEIVED)) 17283 flags &= ~TH_SYN; 17284 } 17285 /* 17286 * Be careful not to send data and/or FIN on SYN segments. This 17287 * measure is needed to prevent interoperability problems with not 17288 * fully conformant TCP implementations. 17289 */ 17290 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17291 len = 0; 17292 flags &= ~TH_FIN; 17293 } 17294 /* 17295 * On TFO sockets, ensure no data is sent in the following cases: 17296 * 17297 * - When retransmitting SYN|ACK on a passively-created socket 17298 * 17299 * - When retransmitting SYN on an actively created socket 17300 * 17301 * - When sending a zero-length cookie (cookie request) on an 17302 * actively created socket 17303 * 17304 * - When the socket is in the CLOSED state (RST is being sent) 17305 */ 17306 if (IS_FASTOPEN(tp->t_flags) && 17307 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17308 ((tp->t_state == TCPS_SYN_SENT) && 17309 (tp->t_tfo_client_cookie_len == 0)) || 17310 (flags & TH_RST))) { 17311 sack_rxmit = 0; 17312 len = 0; 17313 } 17314 /* Without fast-open there should never be data sent on a SYN */ 17315 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17316 tp->snd_nxt = tp->iss; 17317 len = 0; 17318 } 17319 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17320 /* We only send 1 MSS if we have a DSACK block */ 17321 add_flag |= RACK_SENT_W_DSACK; 17322 len = segsiz; 17323 } 17324 orig_len = len; 17325 if (len <= 0) { 17326 /* 17327 * If FIN has been sent but not acked, but we haven't been 17328 * called to retransmit, len will be < 0. Otherwise, window 17329 * shrank after we sent into it. If window shrank to 0, 17330 * cancel pending retransmit, pull snd_nxt back to (closed) 17331 * window, and set the persist timer if it isn't already 17332 * going. If the window didn't close completely, just wait 17333 * for an ACK. 17334 * 17335 * We also do a general check here to ensure that we will 17336 * set the persist timer when we have data to send, but a 17337 * 0-byte window. This makes sure the persist timer is set 17338 * even if the packet hits one of the "goto send" lines 17339 * below. 17340 */ 17341 len = 0; 17342 if ((tp->snd_wnd == 0) && 17343 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17344 (tp->snd_una == tp->snd_max) && 17345 (sb_offset < (int)sbavail(sb))) { 17346 rack_enter_persist(tp, rack, cts); 17347 } 17348 } else if ((rsm == NULL) && 17349 (doing_tlp == 0) && 17350 (len < pace_max_seg)) { 17351 /* 17352 * We are not sending a maximum sized segment for 17353 * some reason. Should we not send anything (think 17354 * sws or persists)? 17355 */ 17356 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17357 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17358 (len < minseg) && 17359 (len < (int)(sbavail(sb) - sb_offset))) { 17360 /* 17361 * Here the rwnd is less than 17362 * the minimum pacing size, this is not a retransmit, 17363 * we are established and 17364 * the send is not the last in the socket buffer 17365 * we send nothing, and we may enter persists 17366 * if nothing is outstanding. 17367 */ 17368 len = 0; 17369 if (tp->snd_max == tp->snd_una) { 17370 /* 17371 * Nothing out we can 17372 * go into persists. 17373 */ 17374 rack_enter_persist(tp, rack, cts); 17375 } 17376 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17377 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17378 (len < (int)(sbavail(sb) - sb_offset)) && 17379 (len < minseg)) { 17380 /* 17381 * Here we are not retransmitting, and 17382 * the cwnd is not so small that we could 17383 * not send at least a min size (rxt timer 17384 * not having gone off), We have 2 segments or 17385 * more already in flight, its not the tail end 17386 * of the socket buffer and the cwnd is blocking 17387 * us from sending out a minimum pacing segment size. 17388 * Lets not send anything. 17389 */ 17390 len = 0; 17391 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17392 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17393 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17394 (len < (int)(sbavail(sb) - sb_offset)) && 17395 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17396 /* 17397 * Here we have a send window but we have 17398 * filled it up and we can't send another pacing segment. 17399 * We also have in flight more than 2 segments 17400 * and we are not completing the sb i.e. we allow 17401 * the last bytes of the sb to go out even if 17402 * its not a full pacing segment. 17403 */ 17404 len = 0; 17405 } else if ((rack->r_ctl.crte != NULL) && 17406 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17407 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17408 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17409 (len < (int)(sbavail(sb) - sb_offset))) { 17410 /* 17411 * Here we are doing hardware pacing, this is not a TLP, 17412 * we are not sending a pace max segment size, there is rwnd 17413 * room to send at least N pace_max_seg, the cwnd is greater 17414 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17415 * more segments in flight and its not the tail of the socket buffer. 17416 * 17417 * We don't want to send instead we need to get more ack's in to 17418 * allow us to send a full pacing segment. Normally, if we are pacing 17419 * about the right speed, we should have finished our pacing 17420 * send as most of the acks have come back if we are at the 17421 * right rate. This is a bit fuzzy since return path delay 17422 * can delay the acks, which is why we want to make sure we 17423 * have cwnd space to have a bit more than a max pace segments in flight. 17424 * 17425 * If we have not gotten our acks back we are pacing at too high a 17426 * rate delaying will not hurt and will bring our GP estimate down by 17427 * injecting the delay. If we don't do this we will send 17428 * 2 MSS out in response to the acks being clocked in which 17429 * defeats the point of hw-pacing (i.e. to help us get 17430 * larger TSO's out). 17431 */ 17432 len = 0; 17433 17434 } 17435 17436 } 17437 /* len will be >= 0 after this point. */ 17438 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17439 rack_sndbuf_autoscale(rack); 17440 /* 17441 * Decide if we can use TCP Segmentation Offloading (if supported by 17442 * hardware). 17443 * 17444 * TSO may only be used if we are in a pure bulk sending state. The 17445 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17446 * options prevent using TSO. With TSO the TCP header is the same 17447 * (except for the sequence number) for all generated packets. This 17448 * makes it impossible to transmit any options which vary per 17449 * generated segment or packet. 17450 * 17451 * IPv4 handling has a clear separation of ip options and ip header 17452 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17453 * the right thing below to provide length of just ip options and thus 17454 * checking for ipoptlen is enough to decide if ip options are present. 17455 */ 17456 ipoptlen = 0; 17457 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17458 /* 17459 * Pre-calculate here as we save another lookup into the darknesses 17460 * of IPsec that way and can actually decide if TSO is ok. 17461 */ 17462 #ifdef INET6 17463 if (isipv6 && IPSEC_ENABLED(ipv6)) 17464 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 17465 #ifdef INET 17466 else 17467 #endif 17468 #endif /* INET6 */ 17469 #ifdef INET 17470 if (IPSEC_ENABLED(ipv4)) 17471 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 17472 #endif /* INET */ 17473 #endif 17474 17475 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17476 ipoptlen += ipsec_optlen; 17477 #endif 17478 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17479 (tp->t_port == 0) && 17480 ((tp->t_flags & TF_SIGNATURE) == 0) && 17481 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17482 ipoptlen == 0) 17483 tso = 1; 17484 { 17485 uint32_t outstanding __unused; 17486 17487 outstanding = tp->snd_max - tp->snd_una; 17488 if (tp->t_flags & TF_SENTFIN) { 17489 /* 17490 * If we sent a fin, snd_max is 1 higher than 17491 * snd_una 17492 */ 17493 outstanding--; 17494 } 17495 if (sack_rxmit) { 17496 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17497 flags &= ~TH_FIN; 17498 } else { 17499 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17500 sbused(sb))) 17501 flags &= ~TH_FIN; 17502 } 17503 } 17504 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17505 (long)TCP_MAXWIN << tp->rcv_scale); 17506 17507 /* 17508 * Sender silly window avoidance. We transmit under the following 17509 * conditions when len is non-zero: 17510 * 17511 * - We have a full segment (or more with TSO) - This is the last 17512 * buffer in a write()/send() and we are either idle or running 17513 * NODELAY - we've timed out (e.g. persist timer) - we have more 17514 * then 1/2 the maximum send window's worth of data (receiver may be 17515 * limited the window size) - we need to retransmit 17516 */ 17517 if (len) { 17518 if (len >= segsiz) { 17519 goto send; 17520 } 17521 /* 17522 * NOTE! on localhost connections an 'ack' from the remote 17523 * end may occur synchronously with the output and cause us 17524 * to flush a buffer queued with moretocome. XXX 17525 * 17526 */ 17527 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17528 (idle || (tp->t_flags & TF_NODELAY)) && 17529 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17530 (tp->t_flags & TF_NOPUSH) == 0) { 17531 pass = 2; 17532 goto send; 17533 } 17534 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17535 pass = 22; 17536 goto send; 17537 } 17538 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17539 pass = 4; 17540 goto send; 17541 } 17542 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17543 pass = 5; 17544 goto send; 17545 } 17546 if (sack_rxmit) { 17547 pass = 6; 17548 goto send; 17549 } 17550 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17551 (ctf_outstanding(tp) < (segsiz * 2))) { 17552 /* 17553 * We have less than two MSS outstanding (delayed ack) 17554 * and our rwnd will not let us send a full sized 17555 * MSS. Lets go ahead and let this small segment 17556 * out because we want to try to have at least two 17557 * packets inflight to not be caught by delayed ack. 17558 */ 17559 pass = 12; 17560 goto send; 17561 } 17562 } 17563 /* 17564 * Sending of standalone window updates. 17565 * 17566 * Window updates are important when we close our window due to a 17567 * full socket buffer and are opening it again after the application 17568 * reads data from it. Once the window has opened again and the 17569 * remote end starts to send again the ACK clock takes over and 17570 * provides the most current window information. 17571 * 17572 * We must avoid the silly window syndrome whereas every read from 17573 * the receive buffer, no matter how small, causes a window update 17574 * to be sent. We also should avoid sending a flurry of window 17575 * updates when the socket buffer had queued a lot of data and the 17576 * application is doing small reads. 17577 * 17578 * Prevent a flurry of pointless window updates by only sending an 17579 * update when we can increase the advertized window by more than 17580 * 1/4th of the socket buffer capacity. When the buffer is getting 17581 * full or is very small be more aggressive and send an update 17582 * whenever we can increase by two mss sized segments. In all other 17583 * situations the ACK's to new incoming data will carry further 17584 * window increases. 17585 * 17586 * Don't send an independent window update if a delayed ACK is 17587 * pending (it will get piggy-backed on it) or the remote side 17588 * already has done a half-close and won't send more data. Skip 17589 * this if the connection is in T/TCP half-open state. 17590 */ 17591 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17592 !(tp->t_flags & TF_DELACK) && 17593 !TCPS_HAVERCVDFIN(tp->t_state)) { 17594 /* 17595 * "adv" is the amount we could increase the window, taking 17596 * into account that we are limited by TCP_MAXWIN << 17597 * tp->rcv_scale. 17598 */ 17599 int32_t adv; 17600 int oldwin; 17601 17602 adv = recwin; 17603 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17604 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17605 if (adv > oldwin) 17606 adv -= oldwin; 17607 else { 17608 /* We can't increase the window */ 17609 adv = 0; 17610 } 17611 } else 17612 oldwin = 0; 17613 17614 /* 17615 * If the new window size ends up being the same as or less 17616 * than the old size when it is scaled, then don't force 17617 * a window update. 17618 */ 17619 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17620 goto dontupdate; 17621 17622 if (adv >= (int32_t)(2 * segsiz) && 17623 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17624 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17625 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17626 pass = 7; 17627 goto send; 17628 } 17629 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17630 pass = 23; 17631 goto send; 17632 } 17633 } 17634 dontupdate: 17635 17636 /* 17637 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17638 * is also a catch-all for the retransmit timer timeout case. 17639 */ 17640 if (tp->t_flags & TF_ACKNOW) { 17641 pass = 8; 17642 goto send; 17643 } 17644 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17645 pass = 9; 17646 goto send; 17647 } 17648 /* 17649 * If our state indicates that FIN should be sent and we have not 17650 * yet done so, then we need to send. 17651 */ 17652 if ((flags & TH_FIN) && 17653 (tp->snd_nxt == tp->snd_una)) { 17654 pass = 11; 17655 goto send; 17656 } 17657 /* 17658 * No reason to send a segment, just return. 17659 */ 17660 just_return: 17661 SOCKBUF_UNLOCK(sb); 17662 just_return_nolock: 17663 { 17664 int app_limited = CTF_JR_SENT_DATA; 17665 17666 if (tot_len_this_send > 0) { 17667 /* Make sure snd_nxt is up to max */ 17668 rack->r_ctl.fsb.recwin = recwin; 17669 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17670 if ((error == 0) && 17671 rack_use_rfo && 17672 ((flags & (TH_SYN|TH_FIN)) == 0) && 17673 (ipoptlen == 0) && 17674 (tp->snd_nxt == tp->snd_max) && 17675 (tp->rcv_numsacks == 0) && 17676 rack->r_fsb_inited && 17677 TCPS_HAVEESTABLISHED(tp->t_state) && 17678 (rack->r_must_retran == 0) && 17679 ((tp->t_flags & TF_NEEDFIN) == 0) && 17680 (len > 0) && (orig_len > 0) && 17681 (orig_len > len) && 17682 ((orig_len - len) >= segsiz) && 17683 ((optlen == 0) || 17684 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17685 /* We can send at least one more MSS using our fsb */ 17686 17687 rack->r_fast_output = 1; 17688 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17689 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17690 rack->r_ctl.fsb.tcp_flags = flags; 17691 rack->r_ctl.fsb.left_to_send = orig_len - len; 17692 if (hw_tls) 17693 rack->r_ctl.fsb.hw_tls = 1; 17694 else 17695 rack->r_ctl.fsb.hw_tls = 0; 17696 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17697 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17698 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17699 (tp->snd_max - tp->snd_una))); 17700 if (rack->r_ctl.fsb.left_to_send < segsiz) 17701 rack->r_fast_output = 0; 17702 else { 17703 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17704 rack->r_ctl.fsb.rfo_apply_push = 1; 17705 else 17706 rack->r_ctl.fsb.rfo_apply_push = 0; 17707 } 17708 } else 17709 rack->r_fast_output = 0; 17710 17711 17712 rack_log_fsb(rack, tp, so, flags, 17713 ipoptlen, orig_len, len, 0, 17714 1, optlen, __LINE__, 1); 17715 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17716 tp->snd_nxt = tp->snd_max; 17717 } else { 17718 int end_window = 0; 17719 uint32_t seq = tp->gput_ack; 17720 17721 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17722 if (rsm) { 17723 /* 17724 * Mark the last sent that we just-returned (hinting 17725 * that delayed ack may play a role in any rtt measurement). 17726 */ 17727 rsm->r_just_ret = 1; 17728 } 17729 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17730 rack->r_ctl.rc_agg_delayed = 0; 17731 rack->r_early = 0; 17732 rack->r_late = 0; 17733 rack->r_ctl.rc_agg_early = 0; 17734 if ((ctf_outstanding(tp) + 17735 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17736 minseg)) >= tp->snd_wnd) { 17737 /* We are limited by the rwnd */ 17738 app_limited = CTF_JR_RWND_LIMITED; 17739 if (IN_FASTRECOVERY(tp->t_flags)) 17740 rack->r_ctl.rc_prr_sndcnt = 0; 17741 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17742 /* We are limited by whats available -- app limited */ 17743 app_limited = CTF_JR_APP_LIMITED; 17744 if (IN_FASTRECOVERY(tp->t_flags)) 17745 rack->r_ctl.rc_prr_sndcnt = 0; 17746 } else if ((idle == 0) && 17747 ((tp->t_flags & TF_NODELAY) == 0) && 17748 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17749 (len < segsiz)) { 17750 /* 17751 * No delay is not on and the 17752 * user is sending less than 1MSS. This 17753 * brings out SWS avoidance so we 17754 * don't send. Another app-limited case. 17755 */ 17756 app_limited = CTF_JR_APP_LIMITED; 17757 } else if (tp->t_flags & TF_NOPUSH) { 17758 /* 17759 * The user has requested no push of 17760 * the last segment and we are 17761 * at the last segment. Another app 17762 * limited case. 17763 */ 17764 app_limited = CTF_JR_APP_LIMITED; 17765 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17766 /* Its the cwnd */ 17767 app_limited = CTF_JR_CWND_LIMITED; 17768 } else if (IN_FASTRECOVERY(tp->t_flags) && 17769 (rack->rack_no_prr == 0) && 17770 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17771 app_limited = CTF_JR_PRR; 17772 } else { 17773 /* Now why here are we not sending? */ 17774 #ifdef NOW 17775 #ifdef INVARIANTS 17776 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17777 #endif 17778 #endif 17779 app_limited = CTF_JR_ASSESSING; 17780 } 17781 /* 17782 * App limited in some fashion, for our pacing GP 17783 * measurements we don't want any gap (even cwnd). 17784 * Close down the measurement window. 17785 */ 17786 if (rack_cwnd_block_ends_measure && 17787 ((app_limited == CTF_JR_CWND_LIMITED) || 17788 (app_limited == CTF_JR_PRR))) { 17789 /* 17790 * The reason we are not sending is 17791 * the cwnd (or prr). We have been configured 17792 * to end the measurement window in 17793 * this case. 17794 */ 17795 end_window = 1; 17796 } else if (rack_rwnd_block_ends_measure && 17797 (app_limited == CTF_JR_RWND_LIMITED)) { 17798 /* 17799 * We are rwnd limited and have been 17800 * configured to end the measurement 17801 * window in this case. 17802 */ 17803 end_window = 1; 17804 } else if (app_limited == CTF_JR_APP_LIMITED) { 17805 /* 17806 * A true application limited period, we have 17807 * ran out of data. 17808 */ 17809 end_window = 1; 17810 } else if (app_limited == CTF_JR_ASSESSING) { 17811 /* 17812 * In the assessing case we hit the end of 17813 * the if/else and had no known reason 17814 * This will panic us under invariants.. 17815 * 17816 * If we get this out in logs we need to 17817 * investagate which reason we missed. 17818 */ 17819 end_window = 1; 17820 } 17821 if (end_window) { 17822 uint8_t log = 0; 17823 17824 /* Adjust the Gput measurement */ 17825 if ((tp->t_flags & TF_GPUTINPROG) && 17826 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17827 tp->gput_ack = tp->snd_max; 17828 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17829 /* 17830 * There is not enough to measure. 17831 */ 17832 tp->t_flags &= ~TF_GPUTINPROG; 17833 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17834 rack->r_ctl.rc_gp_srtt /*flex1*/, 17835 tp->gput_seq, 17836 0, 0, 18, __LINE__, NULL, 0); 17837 } else 17838 log = 1; 17839 } 17840 /* Mark the last packet has app limited */ 17841 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17842 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17843 if (rack->r_ctl.rc_app_limited_cnt == 0) 17844 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17845 else { 17846 /* 17847 * Go out to the end app limited and mark 17848 * this new one as next and move the end_appl up 17849 * to this guy. 17850 */ 17851 if (rack->r_ctl.rc_end_appl) 17852 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17853 rack->r_ctl.rc_end_appl = rsm; 17854 } 17855 rsm->r_flags |= RACK_APP_LIMITED; 17856 rack->r_ctl.rc_app_limited_cnt++; 17857 } 17858 if (log) 17859 rack_log_pacing_delay_calc(rack, 17860 rack->r_ctl.rc_app_limited_cnt, seq, 17861 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17862 } 17863 } 17864 /* Check if we need to go into persists or not */ 17865 if ((tp->snd_max == tp->snd_una) && 17866 TCPS_HAVEESTABLISHED(tp->t_state) && 17867 sbavail(sb) && 17868 (sbavail(sb) > tp->snd_wnd) && 17869 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17870 /* Yes lets make sure to move to persist before timer-start */ 17871 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17872 } 17873 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17874 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17875 } 17876 #ifdef NETFLIX_SHARED_CWND 17877 if ((sbavail(sb) == 0) && 17878 rack->r_ctl.rc_scw) { 17879 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17880 rack->rack_scwnd_is_idle = 1; 17881 } 17882 #endif 17883 #ifdef TCP_ACCOUNTING 17884 if (tot_len_this_send > 0) { 17885 crtsc = get_cyclecount(); 17886 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17887 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17888 } 17889 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17890 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17891 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17892 } 17893 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17894 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17895 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17896 } 17897 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17898 } else { 17899 crtsc = get_cyclecount(); 17900 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17901 tp->tcp_cnt_counters[SND_LIMITED]++; 17902 } 17903 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17904 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17905 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17906 } 17907 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17908 } 17909 sched_unpin(); 17910 #endif 17911 return (0); 17912 17913 send: 17914 if (rsm || sack_rxmit) 17915 counter_u64_add(rack_nfto_resend, 1); 17916 else 17917 counter_u64_add(rack_non_fto_send, 1); 17918 if ((flags & TH_FIN) && 17919 sbavail(sb)) { 17920 /* 17921 * We do not transmit a FIN 17922 * with data outstanding. We 17923 * need to make it so all data 17924 * is acked first. 17925 */ 17926 flags &= ~TH_FIN; 17927 } 17928 /* Enforce stack imposed max seg size if we have one */ 17929 if (rack->r_ctl.rc_pace_max_segs && 17930 (len > rack->r_ctl.rc_pace_max_segs)) { 17931 mark = 1; 17932 len = rack->r_ctl.rc_pace_max_segs; 17933 } 17934 SOCKBUF_LOCK_ASSERT(sb); 17935 if (len > 0) { 17936 if (len >= segsiz) 17937 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17938 else 17939 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17940 } 17941 /* 17942 * Before ESTABLISHED, force sending of initial options unless TCP 17943 * set not to do any options. NOTE: we assume that the IP/TCP header 17944 * plus TCP options always fit in a single mbuf, leaving room for a 17945 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17946 * + optlen <= MCLBYTES 17947 */ 17948 optlen = 0; 17949 #ifdef INET6 17950 if (isipv6) 17951 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17952 else 17953 #endif 17954 hdrlen = sizeof(struct tcpiphdr); 17955 17956 /* 17957 * Compute options for segment. We only have to care about SYN and 17958 * established connection segments. Options for SYN-ACK segments 17959 * are handled in TCP syncache. 17960 */ 17961 to.to_flags = 0; 17962 if ((tp->t_flags & TF_NOOPT) == 0) { 17963 /* Maximum segment size. */ 17964 if (flags & TH_SYN) { 17965 tp->snd_nxt = tp->iss; 17966 to.to_mss = tcp_mssopt(&inp->inp_inc); 17967 if (tp->t_port) 17968 to.to_mss -= V_tcp_udp_tunneling_overhead; 17969 to.to_flags |= TOF_MSS; 17970 17971 /* 17972 * On SYN or SYN|ACK transmits on TFO connections, 17973 * only include the TFO option if it is not a 17974 * retransmit, as the presence of the TFO option may 17975 * have caused the original SYN or SYN|ACK to have 17976 * been dropped by a middlebox. 17977 */ 17978 if (IS_FASTOPEN(tp->t_flags) && 17979 (tp->t_rxtshift == 0)) { 17980 if (tp->t_state == TCPS_SYN_RECEIVED) { 17981 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17982 to.to_tfo_cookie = 17983 (u_int8_t *)&tp->t_tfo_cookie.server; 17984 to.to_flags |= TOF_FASTOPEN; 17985 wanted_cookie = 1; 17986 } else if (tp->t_state == TCPS_SYN_SENT) { 17987 to.to_tfo_len = 17988 tp->t_tfo_client_cookie_len; 17989 to.to_tfo_cookie = 17990 tp->t_tfo_cookie.client; 17991 to.to_flags |= TOF_FASTOPEN; 17992 wanted_cookie = 1; 17993 /* 17994 * If we wind up having more data to 17995 * send with the SYN than can fit in 17996 * one segment, don't send any more 17997 * until the SYN|ACK comes back from 17998 * the other end. 17999 */ 18000 sendalot = 0; 18001 } 18002 } 18003 } 18004 /* Window scaling. */ 18005 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18006 to.to_wscale = tp->request_r_scale; 18007 to.to_flags |= TOF_SCALE; 18008 } 18009 /* Timestamps. */ 18010 if ((tp->t_flags & TF_RCVD_TSTMP) || 18011 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18012 to.to_tsval = ms_cts + tp->ts_offset; 18013 to.to_tsecr = tp->ts_recent; 18014 to.to_flags |= TOF_TS; 18015 } 18016 /* Set receive buffer autosizing timestamp. */ 18017 if (tp->rfbuf_ts == 0 && 18018 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18019 tp->rfbuf_ts = tcp_ts_getticks(); 18020 /* Selective ACK's. */ 18021 if (tp->t_flags & TF_SACK_PERMIT) { 18022 if (flags & TH_SYN) 18023 to.to_flags |= TOF_SACKPERM; 18024 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18025 tp->rcv_numsacks > 0) { 18026 to.to_flags |= TOF_SACK; 18027 to.to_nsacks = tp->rcv_numsacks; 18028 to.to_sacks = (u_char *)tp->sackblks; 18029 } 18030 } 18031 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18032 /* TCP-MD5 (RFC2385). */ 18033 if (tp->t_flags & TF_SIGNATURE) 18034 to.to_flags |= TOF_SIGNATURE; 18035 #endif /* TCP_SIGNATURE */ 18036 18037 /* Processing the options. */ 18038 hdrlen += optlen = tcp_addoptions(&to, opt); 18039 /* 18040 * If we wanted a TFO option to be added, but it was unable 18041 * to fit, ensure no data is sent. 18042 */ 18043 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18044 !(to.to_flags & TOF_FASTOPEN)) 18045 len = 0; 18046 } 18047 if (tp->t_port) { 18048 if (V_tcp_udp_tunneling_port == 0) { 18049 /* The port was removed?? */ 18050 SOCKBUF_UNLOCK(&so->so_snd); 18051 #ifdef TCP_ACCOUNTING 18052 crtsc = get_cyclecount(); 18053 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18054 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18055 } 18056 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18057 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18058 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18059 } 18060 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18061 sched_unpin(); 18062 #endif 18063 return (EHOSTUNREACH); 18064 } 18065 hdrlen += sizeof(struct udphdr); 18066 } 18067 #ifdef INET6 18068 if (isipv6) 18069 ipoptlen = ip6_optlen(inp); 18070 else 18071 #endif 18072 if (inp->inp_options) 18073 ipoptlen = inp->inp_options->m_len - 18074 offsetof(struct ipoption, ipopt_list); 18075 else 18076 ipoptlen = 0; 18077 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18078 ipoptlen += ipsec_optlen; 18079 #endif 18080 18081 /* 18082 * Adjust data length if insertion of options will bump the packet 18083 * length beyond the t_maxseg length. Clear the FIN bit because we 18084 * cut off the tail of the segment. 18085 */ 18086 if (len + optlen + ipoptlen > tp->t_maxseg) { 18087 if (tso) { 18088 uint32_t if_hw_tsomax; 18089 uint32_t moff; 18090 int32_t max_len; 18091 18092 /* extract TSO information */ 18093 if_hw_tsomax = tp->t_tsomax; 18094 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18095 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18096 KASSERT(ipoptlen == 0, 18097 ("%s: TSO can't do IP options", __func__)); 18098 18099 /* 18100 * Check if we should limit by maximum payload 18101 * length: 18102 */ 18103 if (if_hw_tsomax != 0) { 18104 /* compute maximum TSO length */ 18105 max_len = (if_hw_tsomax - hdrlen - 18106 max_linkhdr); 18107 if (max_len <= 0) { 18108 len = 0; 18109 } else if (len > max_len) { 18110 sendalot = 1; 18111 len = max_len; 18112 mark = 2; 18113 } 18114 } 18115 /* 18116 * Prevent the last segment from being fractional 18117 * unless the send sockbuf can be emptied: 18118 */ 18119 max_len = (tp->t_maxseg - optlen); 18120 if ((sb_offset + len) < sbavail(sb)) { 18121 moff = len % (u_int)max_len; 18122 if (moff != 0) { 18123 mark = 3; 18124 len -= moff; 18125 } 18126 } 18127 /* 18128 * In case there are too many small fragments don't 18129 * use TSO: 18130 */ 18131 if (len <= segsiz) { 18132 mark = 4; 18133 tso = 0; 18134 } 18135 /* 18136 * Send the FIN in a separate segment after the bulk 18137 * sending is done. We don't trust the TSO 18138 * implementations to clear the FIN flag on all but 18139 * the last segment. 18140 */ 18141 if (tp->t_flags & TF_NEEDFIN) { 18142 sendalot = 4; 18143 } 18144 } else { 18145 mark = 5; 18146 if (optlen + ipoptlen >= tp->t_maxseg) { 18147 /* 18148 * Since we don't have enough space to put 18149 * the IP header chain and the TCP header in 18150 * one packet as required by RFC 7112, don't 18151 * send it. Also ensure that at least one 18152 * byte of the payload can be put into the 18153 * TCP segment. 18154 */ 18155 SOCKBUF_UNLOCK(&so->so_snd); 18156 error = EMSGSIZE; 18157 sack_rxmit = 0; 18158 goto out; 18159 } 18160 len = tp->t_maxseg - optlen - ipoptlen; 18161 sendalot = 5; 18162 } 18163 } else { 18164 tso = 0; 18165 mark = 6; 18166 } 18167 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18168 ("%s: len > IP_MAXPACKET", __func__)); 18169 #ifdef DIAGNOSTIC 18170 #ifdef INET6 18171 if (max_linkhdr + hdrlen > MCLBYTES) 18172 #else 18173 if (max_linkhdr + hdrlen > MHLEN) 18174 #endif 18175 panic("tcphdr too big"); 18176 #endif 18177 18178 /* 18179 * This KASSERT is here to catch edge cases at a well defined place. 18180 * Before, those had triggered (random) panic conditions further 18181 * down. 18182 */ 18183 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18184 if ((len == 0) && 18185 (flags & TH_FIN) && 18186 (sbused(sb))) { 18187 /* 18188 * We have outstanding data, don't send a fin by itself!. 18189 */ 18190 goto just_return; 18191 } 18192 /* 18193 * Grab a header mbuf, attaching a copy of data to be transmitted, 18194 * and initialize the header from the template for sends on this 18195 * connection. 18196 */ 18197 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18198 if (len) { 18199 uint32_t max_val; 18200 uint32_t moff; 18201 18202 if (rack->r_ctl.rc_pace_max_segs) 18203 max_val = rack->r_ctl.rc_pace_max_segs; 18204 else if (rack->rc_user_set_max_segs) 18205 max_val = rack->rc_user_set_max_segs * segsiz; 18206 else 18207 max_val = len; 18208 /* 18209 * We allow a limit on sending with hptsi. 18210 */ 18211 if (len > max_val) { 18212 mark = 7; 18213 len = max_val; 18214 } 18215 #ifdef INET6 18216 if (MHLEN < hdrlen + max_linkhdr) 18217 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18218 else 18219 #endif 18220 m = m_gethdr(M_NOWAIT, MT_DATA); 18221 18222 if (m == NULL) { 18223 SOCKBUF_UNLOCK(sb); 18224 error = ENOBUFS; 18225 sack_rxmit = 0; 18226 goto out; 18227 } 18228 m->m_data += max_linkhdr; 18229 m->m_len = hdrlen; 18230 18231 /* 18232 * Start the m_copy functions from the closest mbuf to the 18233 * sb_offset in the socket buffer chain. 18234 */ 18235 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18236 s_mb = mb; 18237 s_moff = moff; 18238 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18239 m_copydata(mb, moff, (int)len, 18240 mtod(m, caddr_t)+hdrlen); 18241 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18242 sbsndptr_adv(sb, mb, len); 18243 m->m_len += len; 18244 } else { 18245 struct sockbuf *msb; 18246 18247 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18248 msb = NULL; 18249 else 18250 msb = sb; 18251 m->m_next = tcp_m_copym( 18252 mb, moff, &len, 18253 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18254 ((rsm == NULL) ? hw_tls : 0) 18255 #ifdef NETFLIX_COPY_ARGS 18256 , &s_mb, &s_moff 18257 #endif 18258 ); 18259 if (len <= (tp->t_maxseg - optlen)) { 18260 /* 18261 * Must have ran out of mbufs for the copy 18262 * shorten it to no longer need tso. Lets 18263 * not put on sendalot since we are low on 18264 * mbufs. 18265 */ 18266 tso = 0; 18267 } 18268 if (m->m_next == NULL) { 18269 SOCKBUF_UNLOCK(sb); 18270 (void)m_free(m); 18271 error = ENOBUFS; 18272 sack_rxmit = 0; 18273 goto out; 18274 } 18275 } 18276 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18277 if (rsm && (rsm->r_flags & RACK_TLP)) { 18278 /* 18279 * TLP should not count in retran count, but 18280 * in its own bin 18281 */ 18282 counter_u64_add(rack_tlp_retran, 1); 18283 counter_u64_add(rack_tlp_retran_bytes, len); 18284 } else { 18285 tp->t_sndrexmitpack++; 18286 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18287 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18288 } 18289 #ifdef STATS 18290 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18291 len); 18292 #endif 18293 } else { 18294 KMOD_TCPSTAT_INC(tcps_sndpack); 18295 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18296 #ifdef STATS 18297 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18298 len); 18299 #endif 18300 } 18301 /* 18302 * If we're sending everything we've got, set PUSH. (This 18303 * will keep happy those implementations which only give 18304 * data to the user when a buffer fills or a PUSH comes in.) 18305 */ 18306 if (sb_offset + len == sbused(sb) && 18307 sbused(sb) && 18308 !(flags & TH_SYN)) { 18309 flags |= TH_PUSH; 18310 add_flag |= RACK_HAD_PUSH; 18311 } 18312 18313 SOCKBUF_UNLOCK(sb); 18314 } else { 18315 SOCKBUF_UNLOCK(sb); 18316 if (tp->t_flags & TF_ACKNOW) 18317 KMOD_TCPSTAT_INC(tcps_sndacks); 18318 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18319 KMOD_TCPSTAT_INC(tcps_sndctrl); 18320 else 18321 KMOD_TCPSTAT_INC(tcps_sndwinup); 18322 18323 m = m_gethdr(M_NOWAIT, MT_DATA); 18324 if (m == NULL) { 18325 error = ENOBUFS; 18326 sack_rxmit = 0; 18327 goto out; 18328 } 18329 #ifdef INET6 18330 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18331 MHLEN >= hdrlen) { 18332 M_ALIGN(m, hdrlen); 18333 } else 18334 #endif 18335 m->m_data += max_linkhdr; 18336 m->m_len = hdrlen; 18337 } 18338 SOCKBUF_UNLOCK_ASSERT(sb); 18339 m->m_pkthdr.rcvif = (struct ifnet *)0; 18340 #ifdef MAC 18341 mac_inpcb_create_mbuf(inp, m); 18342 #endif 18343 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18344 #ifdef INET6 18345 if (isipv6) 18346 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18347 else 18348 #endif /* INET6 */ 18349 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18350 th = rack->r_ctl.fsb.th; 18351 udp = rack->r_ctl.fsb.udp; 18352 if (udp) { 18353 #ifdef INET6 18354 if (isipv6) 18355 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18356 else 18357 #endif /* INET6 */ 18358 ulen = hdrlen + len - sizeof(struct ip); 18359 udp->uh_ulen = htons(ulen); 18360 } 18361 } else { 18362 #ifdef INET6 18363 if (isipv6) { 18364 ip6 = mtod(m, struct ip6_hdr *); 18365 if (tp->t_port) { 18366 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18367 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18368 udp->uh_dport = tp->t_port; 18369 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18370 udp->uh_ulen = htons(ulen); 18371 th = (struct tcphdr *)(udp + 1); 18372 } else 18373 th = (struct tcphdr *)(ip6 + 1); 18374 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18375 } else 18376 #endif /* INET6 */ 18377 { 18378 ip = mtod(m, struct ip *); 18379 if (tp->t_port) { 18380 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18381 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18382 udp->uh_dport = tp->t_port; 18383 ulen = hdrlen + len - sizeof(struct ip); 18384 udp->uh_ulen = htons(ulen); 18385 th = (struct tcphdr *)(udp + 1); 18386 } else 18387 th = (struct tcphdr *)(ip + 1); 18388 tcpip_fillheaders(inp, tp->t_port, ip, th); 18389 } 18390 } 18391 /* 18392 * Fill in fields, remembering maximum advertised window for use in 18393 * delaying messages about window sizes. If resending a FIN, be sure 18394 * not to use a new sequence number. 18395 */ 18396 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18397 tp->snd_nxt == tp->snd_max) 18398 tp->snd_nxt--; 18399 /* 18400 * If we are starting a connection, send ECN setup SYN packet. If we 18401 * are on a retransmit, we may resend those bits a number of times 18402 * as per RFC 3168. 18403 */ 18404 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18405 flags |= tcp_ecn_output_syn_sent(tp); 18406 } 18407 /* Also handle parallel SYN for ECN */ 18408 if (TCPS_HAVERCVDSYN(tp->t_state) && 18409 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18410 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18411 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18412 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18413 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18414 #ifdef INET6 18415 if (isipv6) { 18416 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18417 ip6->ip6_flow |= htonl(ect << 20); 18418 } 18419 else 18420 #endif 18421 { 18422 ip->ip_tos &= ~IPTOS_ECN_MASK; 18423 ip->ip_tos |= ect; 18424 } 18425 } 18426 /* 18427 * If we are doing retransmissions, then snd_nxt will not reflect 18428 * the first unsent octet. For ACK only packets, we do not want the 18429 * sequence number of the retransmitted packet, we want the sequence 18430 * number of the next unsent octet. So, if there is no data (and no 18431 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18432 * ti_seq. But if we are in persist state, snd_max might reflect 18433 * one byte beyond the right edge of the window, so use snd_nxt in 18434 * that case, since we know we aren't doing a retransmission. 18435 * (retransmit and persist are mutually exclusive...) 18436 */ 18437 if (sack_rxmit == 0) { 18438 if (len || (flags & (TH_SYN | TH_FIN))) { 18439 th->th_seq = htonl(tp->snd_nxt); 18440 rack_seq = tp->snd_nxt; 18441 } else { 18442 th->th_seq = htonl(tp->snd_max); 18443 rack_seq = tp->snd_max; 18444 } 18445 } else { 18446 th->th_seq = htonl(rsm->r_start); 18447 rack_seq = rsm->r_start; 18448 } 18449 th->th_ack = htonl(tp->rcv_nxt); 18450 tcp_set_flags(th, flags); 18451 /* 18452 * Calculate receive window. Don't shrink window, but avoid silly 18453 * window syndrome. 18454 * If a RST segment is sent, advertise a window of zero. 18455 */ 18456 if (flags & TH_RST) { 18457 recwin = 0; 18458 } else { 18459 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18460 recwin < (long)segsiz) { 18461 recwin = 0; 18462 } 18463 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18464 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18465 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18466 } 18467 18468 /* 18469 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18470 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18471 * handled in syncache. 18472 */ 18473 if (flags & TH_SYN) 18474 th->th_win = htons((u_short) 18475 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18476 else { 18477 /* Avoid shrinking window with window scaling. */ 18478 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18479 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18480 } 18481 /* 18482 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18483 * window. This may cause the remote transmitter to stall. This 18484 * flag tells soreceive() to disable delayed acknowledgements when 18485 * draining the buffer. This can occur if the receiver is 18486 * attempting to read more data than can be buffered prior to 18487 * transmitting on the connection. 18488 */ 18489 if (th->th_win == 0) { 18490 tp->t_sndzerowin++; 18491 tp->t_flags |= TF_RXWIN0SENT; 18492 } else 18493 tp->t_flags &= ~TF_RXWIN0SENT; 18494 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18495 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18496 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18497 uint8_t *cpto; 18498 18499 cpto = mtod(m, uint8_t *); 18500 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18501 /* 18502 * We have just copied in: 18503 * IP/IP6 18504 * <optional udphdr> 18505 * tcphdr (no options) 18506 * 18507 * We need to grab the correct pointers into the mbuf 18508 * for both the tcp header, and possibly the udp header (if tunneling). 18509 * We do this by using the offset in the copy buffer and adding it 18510 * to the mbuf base pointer (cpto). 18511 */ 18512 #ifdef INET6 18513 if (isipv6) 18514 ip6 = mtod(m, struct ip6_hdr *); 18515 else 18516 #endif /* INET6 */ 18517 ip = mtod(m, struct ip *); 18518 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18519 /* If we have a udp header lets set it into the mbuf as well */ 18520 if (udp) 18521 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18522 } 18523 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18524 if (to.to_flags & TOF_SIGNATURE) { 18525 /* 18526 * Calculate MD5 signature and put it into the place 18527 * determined before. 18528 * NOTE: since TCP options buffer doesn't point into 18529 * mbuf's data, calculate offset and use it. 18530 */ 18531 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18532 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18533 /* 18534 * Do not send segment if the calculation of MD5 18535 * digest has failed. 18536 */ 18537 goto out; 18538 } 18539 } 18540 #endif 18541 if (optlen) { 18542 bcopy(opt, th + 1, optlen); 18543 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18544 } 18545 /* 18546 * Put TCP length in extended header, and then checksum extended 18547 * header and data. 18548 */ 18549 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18550 #ifdef INET6 18551 if (isipv6) { 18552 /* 18553 * ip6_plen is not need to be filled now, and will be filled 18554 * in ip6_output. 18555 */ 18556 if (tp->t_port) { 18557 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18558 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18559 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18560 th->th_sum = htons(0); 18561 UDPSTAT_INC(udps_opackets); 18562 } else { 18563 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18564 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18565 th->th_sum = in6_cksum_pseudo(ip6, 18566 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18567 0); 18568 } 18569 } 18570 #endif 18571 #if defined(INET6) && defined(INET) 18572 else 18573 #endif 18574 #ifdef INET 18575 { 18576 if (tp->t_port) { 18577 m->m_pkthdr.csum_flags = CSUM_UDP; 18578 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18579 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18580 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18581 th->th_sum = htons(0); 18582 UDPSTAT_INC(udps_opackets); 18583 } else { 18584 m->m_pkthdr.csum_flags = CSUM_TCP; 18585 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18586 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18587 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18588 IPPROTO_TCP + len + optlen)); 18589 } 18590 /* IP version must be set here for ipv4/ipv6 checking later */ 18591 KASSERT(ip->ip_v == IPVERSION, 18592 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18593 } 18594 #endif 18595 /* 18596 * Enable TSO and specify the size of the segments. The TCP pseudo 18597 * header checksum is always provided. XXX: Fixme: This is currently 18598 * not the case for IPv6. 18599 */ 18600 if (tso) { 18601 KASSERT(len > tp->t_maxseg - optlen, 18602 ("%s: len <= tso_segsz", __func__)); 18603 m->m_pkthdr.csum_flags |= CSUM_TSO; 18604 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18605 } 18606 KASSERT(len + hdrlen == m_length(m, NULL), 18607 ("%s: mbuf chain different than expected: %d + %u != %u", 18608 __func__, len, hdrlen, m_length(m, NULL))); 18609 18610 #ifdef TCP_HHOOK 18611 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18612 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18613 #endif 18614 /* We're getting ready to send; log now. */ 18615 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18616 union tcp_log_stackspecific log; 18617 18618 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18619 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18620 if (rack->rack_no_prr) 18621 log.u_bbr.flex1 = 0; 18622 else 18623 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18624 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18625 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18626 log.u_bbr.flex4 = orig_len; 18627 /* Save off the early/late values */ 18628 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18629 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18630 log.u_bbr.bw_inuse = rack_get_bw(rack); 18631 log.u_bbr.flex8 = 0; 18632 if (rsm) { 18633 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18634 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18635 counter_u64_add(rack_collapsed_win_rxt, 1); 18636 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18637 } 18638 if (doing_tlp) 18639 log.u_bbr.flex8 = 2; 18640 else 18641 log.u_bbr.flex8 = 1; 18642 } else { 18643 if (doing_tlp) 18644 log.u_bbr.flex8 = 3; 18645 else 18646 log.u_bbr.flex8 = 0; 18647 } 18648 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18649 log.u_bbr.flex7 = mark; 18650 log.u_bbr.flex7 <<= 8; 18651 log.u_bbr.flex7 |= pass; 18652 log.u_bbr.pkts_out = tp->t_maxseg; 18653 log.u_bbr.timeStamp = cts; 18654 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18655 log.u_bbr.lt_epoch = cwnd_to_use; 18656 log.u_bbr.delivered = sendalot; 18657 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18658 len, &log, false, NULL, NULL, 0, &tv); 18659 } else 18660 lgb = NULL; 18661 18662 /* 18663 * Fill in IP length and desired time to live and send to IP level. 18664 * There should be a better way to handle ttl and tos; we could keep 18665 * them in the template, but need a way to checksum without them. 18666 */ 18667 /* 18668 * m->m_pkthdr.len should have been set before cksum calcuration, 18669 * because in6_cksum() need it. 18670 */ 18671 #ifdef INET6 18672 if (isipv6) { 18673 /* 18674 * we separately set hoplimit for every segment, since the 18675 * user might want to change the value via setsockopt. Also, 18676 * desired default hop limit might be changed via Neighbor 18677 * Discovery. 18678 */ 18679 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18680 18681 /* 18682 * Set the packet size here for the benefit of DTrace 18683 * probes. ip6_output() will set it properly; it's supposed 18684 * to include the option header lengths as well. 18685 */ 18686 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18687 18688 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18689 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18690 else 18691 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18692 18693 if (tp->t_state == TCPS_SYN_SENT) 18694 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18695 18696 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18697 /* TODO: IPv6 IP6TOS_ECT bit on */ 18698 error = ip6_output(m, 18699 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18700 inp->in6p_outputopts, 18701 #else 18702 NULL, 18703 #endif 18704 &inp->inp_route6, 18705 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18706 NULL, NULL, inp); 18707 18708 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18709 mtu = inp->inp_route6.ro_nh->nh_mtu; 18710 } 18711 #endif /* INET6 */ 18712 #if defined(INET) && defined(INET6) 18713 else 18714 #endif 18715 #ifdef INET 18716 { 18717 ip->ip_len = htons(m->m_pkthdr.len); 18718 #ifdef INET6 18719 if (inp->inp_vflag & INP_IPV6PROTO) 18720 ip->ip_ttl = in6_selecthlim(inp, NULL); 18721 #endif /* INET6 */ 18722 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18723 /* 18724 * If we do path MTU discovery, then we set DF on every 18725 * packet. This might not be the best thing to do according 18726 * to RFC3390 Section 2. However the tcp hostcache migitates 18727 * the problem so it affects only the first tcp connection 18728 * with a host. 18729 * 18730 * NB: Don't set DF on small MTU/MSS to have a safe 18731 * fallback. 18732 */ 18733 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18734 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18735 if (tp->t_port == 0 || len < V_tcp_minmss) { 18736 ip->ip_off |= htons(IP_DF); 18737 } 18738 } else { 18739 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18740 } 18741 18742 if (tp->t_state == TCPS_SYN_SENT) 18743 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18744 18745 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18746 18747 error = ip_output(m, 18748 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18749 inp->inp_options, 18750 #else 18751 NULL, 18752 #endif 18753 &inp->inp_route, 18754 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18755 inp); 18756 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18757 mtu = inp->inp_route.ro_nh->nh_mtu; 18758 } 18759 #endif /* INET */ 18760 18761 out: 18762 if (lgb) { 18763 lgb->tlb_errno = error; 18764 lgb = NULL; 18765 } 18766 /* 18767 * In transmit state, time the transmission and arrange for the 18768 * retransmit. In persist state, just set snd_max. 18769 */ 18770 if (error == 0) { 18771 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18772 if (rsm && doing_tlp) { 18773 rack->rc_last_sent_tlp_past_cumack = 0; 18774 rack->rc_last_sent_tlp_seq_valid = 1; 18775 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18776 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18777 } 18778 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18779 if (rsm && (doing_tlp == 0)) { 18780 /* Set we retransmitted */ 18781 rack->rc_gp_saw_rec = 1; 18782 } else { 18783 if (cwnd_to_use > tp->snd_ssthresh) { 18784 /* Set we sent in CA */ 18785 rack->rc_gp_saw_ca = 1; 18786 } else { 18787 /* Set we sent in SS */ 18788 rack->rc_gp_saw_ss = 1; 18789 } 18790 } 18791 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18792 (tp->t_flags & TF_SACK_PERMIT) && 18793 tp->rcv_numsacks > 0) 18794 tcp_clean_dsack_blocks(tp); 18795 tot_len_this_send += len; 18796 if (len == 0) 18797 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18798 else if (len == 1) { 18799 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18800 } else if (len > 1) { 18801 int idx; 18802 18803 idx = (len / segsiz) + 3; 18804 if (idx >= TCP_MSS_ACCT_ATIMER) 18805 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18806 else 18807 counter_u64_add(rack_out_size[idx], 1); 18808 } 18809 } 18810 if ((rack->rack_no_prr == 0) && 18811 sub_from_prr && 18812 (error == 0)) { 18813 if (rack->r_ctl.rc_prr_sndcnt >= len) 18814 rack->r_ctl.rc_prr_sndcnt -= len; 18815 else 18816 rack->r_ctl.rc_prr_sndcnt = 0; 18817 } 18818 sub_from_prr = 0; 18819 if (doing_tlp) { 18820 /* Make sure the TLP is added */ 18821 add_flag |= RACK_TLP; 18822 } else if (rsm) { 18823 /* If its a resend without TLP then it must not have the flag */ 18824 rsm->r_flags &= ~RACK_TLP; 18825 } 18826 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18827 rack_to_usec_ts(&tv), 18828 rsm, add_flag, s_mb, s_moff, hw_tls); 18829 18830 18831 if ((error == 0) && 18832 (len > 0) && 18833 (tp->snd_una == tp->snd_max)) 18834 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18835 { 18836 tcp_seq startseq = tp->snd_nxt; 18837 18838 /* Track our lost count */ 18839 if (rsm && (doing_tlp == 0)) 18840 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18841 /* 18842 * Advance snd_nxt over sequence space of this segment. 18843 */ 18844 if (error) 18845 /* We don't log or do anything with errors */ 18846 goto nomore; 18847 if (doing_tlp == 0) { 18848 if (rsm == NULL) { 18849 /* 18850 * Not a retransmission of some 18851 * sort, new data is going out so 18852 * clear our TLP count and flag. 18853 */ 18854 rack->rc_tlp_in_progress = 0; 18855 rack->r_ctl.rc_tlp_cnt_out = 0; 18856 } 18857 } else { 18858 /* 18859 * We have just sent a TLP, mark that it is true 18860 * and make sure our in progress is set so we 18861 * continue to check the count. 18862 */ 18863 rack->rc_tlp_in_progress = 1; 18864 rack->r_ctl.rc_tlp_cnt_out++; 18865 } 18866 if (flags & (TH_SYN | TH_FIN)) { 18867 if (flags & TH_SYN) 18868 tp->snd_nxt++; 18869 if (flags & TH_FIN) { 18870 tp->snd_nxt++; 18871 tp->t_flags |= TF_SENTFIN; 18872 } 18873 } 18874 /* In the ENOBUFS case we do *not* update snd_max */ 18875 if (sack_rxmit) 18876 goto nomore; 18877 18878 tp->snd_nxt += len; 18879 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18880 if (tp->snd_una == tp->snd_max) { 18881 /* 18882 * Update the time we just added data since 18883 * none was outstanding. 18884 */ 18885 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18886 tp->t_acktime = ticks; 18887 } 18888 tp->snd_max = tp->snd_nxt; 18889 /* 18890 * Time this transmission if not a retransmission and 18891 * not currently timing anything. 18892 * This is only relevant in case of switching back to 18893 * the base stack. 18894 */ 18895 if (tp->t_rtttime == 0) { 18896 tp->t_rtttime = ticks; 18897 tp->t_rtseq = startseq; 18898 KMOD_TCPSTAT_INC(tcps_segstimed); 18899 } 18900 if (len && 18901 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18902 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18903 } 18904 /* 18905 * If we are doing FO we need to update the mbuf position and subtract 18906 * this happens when the peer sends us duplicate information and 18907 * we thus want to send a DSACK. 18908 * 18909 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18910 * turned off? If not then we are going to echo multiple DSACK blocks 18911 * out (with the TSO), which we should not be doing. 18912 */ 18913 if (rack->r_fast_output && len) { 18914 if (rack->r_ctl.fsb.left_to_send > len) 18915 rack->r_ctl.fsb.left_to_send -= len; 18916 else 18917 rack->r_ctl.fsb.left_to_send = 0; 18918 if (rack->r_ctl.fsb.left_to_send < segsiz) 18919 rack->r_fast_output = 0; 18920 if (rack->r_fast_output) { 18921 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18922 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18923 } 18924 } 18925 } 18926 nomore: 18927 if (error) { 18928 rack->r_ctl.rc_agg_delayed = 0; 18929 rack->r_early = 0; 18930 rack->r_late = 0; 18931 rack->r_ctl.rc_agg_early = 0; 18932 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18933 /* 18934 * Failures do not advance the seq counter above. For the 18935 * case of ENOBUFS we will fall out and retry in 1ms with 18936 * the hpts. Everything else will just have to retransmit 18937 * with the timer. 18938 * 18939 * In any case, we do not want to loop around for another 18940 * send without a good reason. 18941 */ 18942 sendalot = 0; 18943 switch (error) { 18944 case EPERM: 18945 tp->t_softerror = error; 18946 #ifdef TCP_ACCOUNTING 18947 crtsc = get_cyclecount(); 18948 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18949 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18950 } 18951 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18952 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18953 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18954 } 18955 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18956 sched_unpin(); 18957 #endif 18958 return (error); 18959 case ENOBUFS: 18960 /* 18961 * Pace us right away to retry in a some 18962 * time 18963 */ 18964 if (rack->r_ctl.crte != NULL) { 18965 rack_trace_point(rack, RACK_TP_HWENOBUF); 18966 } else 18967 rack_trace_point(rack, RACK_TP_ENOBUF); 18968 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18969 if (rack->rc_enobuf < 0x7f) 18970 rack->rc_enobuf++; 18971 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18972 slot = 10 * HPTS_USEC_IN_MSEC; 18973 if (rack->r_ctl.crte != NULL) { 18974 counter_u64_add(rack_saw_enobuf_hw, 1); 18975 tcp_rl_log_enobuf(rack->r_ctl.crte); 18976 } 18977 counter_u64_add(rack_saw_enobuf, 1); 18978 goto enobufs; 18979 case EMSGSIZE: 18980 /* 18981 * For some reason the interface we used initially 18982 * to send segments changed to another or lowered 18983 * its MTU. If TSO was active we either got an 18984 * interface without TSO capabilits or TSO was 18985 * turned off. If we obtained mtu from ip_output() 18986 * then update it and try again. 18987 */ 18988 if (tso) 18989 tp->t_flags &= ~TF_TSO; 18990 if (mtu != 0) { 18991 tcp_mss_update(tp, -1, mtu, NULL, NULL); 18992 goto again; 18993 } 18994 slot = 10 * HPTS_USEC_IN_MSEC; 18995 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18996 #ifdef TCP_ACCOUNTING 18997 crtsc = get_cyclecount(); 18998 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18999 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19000 } 19001 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19002 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19003 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19004 } 19005 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19006 sched_unpin(); 19007 #endif 19008 return (error); 19009 case ENETUNREACH: 19010 counter_u64_add(rack_saw_enetunreach, 1); 19011 case EHOSTDOWN: 19012 case EHOSTUNREACH: 19013 case ENETDOWN: 19014 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19015 tp->t_softerror = error; 19016 } 19017 /* FALLTHROUGH */ 19018 default: 19019 slot = 10 * HPTS_USEC_IN_MSEC; 19020 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19021 #ifdef TCP_ACCOUNTING 19022 crtsc = get_cyclecount(); 19023 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19024 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19025 } 19026 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19027 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19028 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19029 } 19030 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19031 sched_unpin(); 19032 #endif 19033 return (error); 19034 } 19035 } else { 19036 rack->rc_enobuf = 0; 19037 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19038 rack->r_ctl.retran_during_recovery += len; 19039 } 19040 KMOD_TCPSTAT_INC(tcps_sndtotal); 19041 19042 /* 19043 * Data sent (as far as we can tell). If this advertises a larger 19044 * window than any other segment, then remember the size of the 19045 * advertised window. Any pending ACK has now been sent. 19046 */ 19047 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19048 tp->rcv_adv = tp->rcv_nxt + recwin; 19049 19050 tp->last_ack_sent = tp->rcv_nxt; 19051 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19052 enobufs: 19053 if (sendalot) { 19054 /* Do we need to turn off sendalot? */ 19055 if (rack->r_ctl.rc_pace_max_segs && 19056 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19057 /* We hit our max. */ 19058 sendalot = 0; 19059 } else if ((rack->rc_user_set_max_segs) && 19060 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19061 /* We hit the user defined max */ 19062 sendalot = 0; 19063 } 19064 } 19065 if ((error == 0) && (flags & TH_FIN)) 19066 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19067 if (flags & TH_RST) { 19068 /* 19069 * We don't send again after sending a RST. 19070 */ 19071 slot = 0; 19072 sendalot = 0; 19073 if (error == 0) 19074 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19075 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19076 /* 19077 * Get our pacing rate, if an error 19078 * occurred in sending (ENOBUF) we would 19079 * hit the else if with slot preset. Other 19080 * errors return. 19081 */ 19082 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19083 } 19084 if (rsm && 19085 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19086 rack->use_rack_rr) { 19087 /* Its a retransmit and we use the rack cheat? */ 19088 if ((slot == 0) || 19089 (rack->rc_always_pace == 0) || 19090 (rack->r_rr_config == 1)) { 19091 /* 19092 * We have no pacing set or we 19093 * are using old-style rack or 19094 * we are overridden to use the old 1ms pacing. 19095 */ 19096 slot = rack->r_ctl.rc_min_to; 19097 } 19098 } 19099 /* We have sent clear the flag */ 19100 rack->r_ent_rec_ns = 0; 19101 if (rack->r_must_retran) { 19102 if (rsm) { 19103 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19104 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19105 /* 19106 * We have retransmitted all. 19107 */ 19108 rack->r_must_retran = 0; 19109 rack->r_ctl.rc_out_at_rto = 0; 19110 } 19111 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19112 /* 19113 * Sending new data will also kill 19114 * the loop. 19115 */ 19116 rack->r_must_retran = 0; 19117 rack->r_ctl.rc_out_at_rto = 0; 19118 } 19119 } 19120 rack->r_ctl.fsb.recwin = recwin; 19121 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19122 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19123 /* 19124 * We hit an RTO and now have past snd_max at the RTO 19125 * clear all the WAS flags. 19126 */ 19127 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19128 } 19129 if (slot) { 19130 /* set the rack tcb into the slot N */ 19131 if ((error == 0) && 19132 rack_use_rfo && 19133 ((flags & (TH_SYN|TH_FIN)) == 0) && 19134 (rsm == NULL) && 19135 (tp->snd_nxt == tp->snd_max) && 19136 (ipoptlen == 0) && 19137 (tp->rcv_numsacks == 0) && 19138 rack->r_fsb_inited && 19139 TCPS_HAVEESTABLISHED(tp->t_state) && 19140 (rack->r_must_retran == 0) && 19141 ((tp->t_flags & TF_NEEDFIN) == 0) && 19142 (len > 0) && (orig_len > 0) && 19143 (orig_len > len) && 19144 ((orig_len - len) >= segsiz) && 19145 ((optlen == 0) || 19146 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19147 /* We can send at least one more MSS using our fsb */ 19148 19149 rack->r_fast_output = 1; 19150 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19151 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19152 rack->r_ctl.fsb.tcp_flags = flags; 19153 rack->r_ctl.fsb.left_to_send = orig_len - len; 19154 if (hw_tls) 19155 rack->r_ctl.fsb.hw_tls = 1; 19156 else 19157 rack->r_ctl.fsb.hw_tls = 0; 19158 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19159 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19160 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19161 (tp->snd_max - tp->snd_una))); 19162 if (rack->r_ctl.fsb.left_to_send < segsiz) 19163 rack->r_fast_output = 0; 19164 else { 19165 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19166 rack->r_ctl.fsb.rfo_apply_push = 1; 19167 else 19168 rack->r_ctl.fsb.rfo_apply_push = 0; 19169 } 19170 } else 19171 rack->r_fast_output = 0; 19172 rack_log_fsb(rack, tp, so, flags, 19173 ipoptlen, orig_len, len, error, 19174 (rsm == NULL), optlen, __LINE__, 2); 19175 } else if (sendalot) { 19176 int ret; 19177 19178 sack_rxmit = 0; 19179 if ((error == 0) && 19180 rack_use_rfo && 19181 ((flags & (TH_SYN|TH_FIN)) == 0) && 19182 (rsm == NULL) && 19183 (ipoptlen == 0) && 19184 (tp->rcv_numsacks == 0) && 19185 (tp->snd_nxt == tp->snd_max) && 19186 (rack->r_must_retran == 0) && 19187 rack->r_fsb_inited && 19188 TCPS_HAVEESTABLISHED(tp->t_state) && 19189 ((tp->t_flags & TF_NEEDFIN) == 0) && 19190 (len > 0) && (orig_len > 0) && 19191 (orig_len > len) && 19192 ((orig_len - len) >= segsiz) && 19193 ((optlen == 0) || 19194 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19195 /* we can use fast_output for more */ 19196 19197 rack->r_fast_output = 1; 19198 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19199 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19200 rack->r_ctl.fsb.tcp_flags = flags; 19201 rack->r_ctl.fsb.left_to_send = orig_len - len; 19202 if (hw_tls) 19203 rack->r_ctl.fsb.hw_tls = 1; 19204 else 19205 rack->r_ctl.fsb.hw_tls = 0; 19206 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19207 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19208 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19209 (tp->snd_max - tp->snd_una))); 19210 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19211 rack->r_fast_output = 0; 19212 } 19213 if (rack->r_fast_output) { 19214 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19215 rack->r_ctl.fsb.rfo_apply_push = 1; 19216 else 19217 rack->r_ctl.fsb.rfo_apply_push = 0; 19218 rack_log_fsb(rack, tp, so, flags, 19219 ipoptlen, orig_len, len, error, 19220 (rsm == NULL), optlen, __LINE__, 3); 19221 error = 0; 19222 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19223 if (ret >= 0) 19224 return (ret); 19225 else if (error) 19226 goto nomore; 19227 19228 } 19229 } 19230 goto again; 19231 } 19232 /* Assure when we leave that snd_nxt will point to top */ 19233 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19234 tp->snd_nxt = tp->snd_max; 19235 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19236 #ifdef TCP_ACCOUNTING 19237 crtsc = get_cyclecount() - ts_val; 19238 if (tot_len_this_send) { 19239 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19240 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19241 } 19242 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19243 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19244 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19245 } 19246 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19247 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19248 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19249 } 19250 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19251 } else { 19252 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19253 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19254 } 19255 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19256 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19257 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19258 } 19259 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19260 } 19261 sched_unpin(); 19262 #endif 19263 if (error == ENOBUFS) 19264 error = 0; 19265 return (error); 19266 } 19267 19268 static void 19269 rack_update_seg(struct tcp_rack *rack) 19270 { 19271 uint32_t orig_val; 19272 19273 orig_val = rack->r_ctl.rc_pace_max_segs; 19274 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19275 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19276 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19277 } 19278 19279 static void 19280 rack_mtu_change(struct tcpcb *tp) 19281 { 19282 /* 19283 * The MSS may have changed 19284 */ 19285 struct tcp_rack *rack; 19286 struct rack_sendmap *rsm; 19287 19288 rack = (struct tcp_rack *)tp->t_fb_ptr; 19289 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19290 /* 19291 * The MTU has changed we need to resend everything 19292 * since all we have sent is lost. We first fix 19293 * up the mtu though. 19294 */ 19295 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19296 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19297 rack_remxt_tmr(tp); 19298 rack->r_fast_output = 0; 19299 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19300 rack->r_ctl.rc_sacked); 19301 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19302 rack->r_must_retran = 1; 19303 /* Mark all inflight to needing to be rxt'd */ 19304 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19305 rsm->r_flags |= RACK_MUST_RXT; 19306 } 19307 } 19308 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19309 /* We don't use snd_nxt to retransmit */ 19310 tp->snd_nxt = tp->snd_max; 19311 } 19312 19313 static int 19314 rack_set_profile(struct tcp_rack *rack, int prof) 19315 { 19316 int err = EINVAL; 19317 if (prof == 1) { 19318 /* pace_always=1 */ 19319 if (rack->rc_always_pace == 0) { 19320 if (tcp_can_enable_pacing() == 0) 19321 return (EBUSY); 19322 } 19323 rack->rc_always_pace = 1; 19324 if (rack->use_fixed_rate || rack->gp_ready) 19325 rack_set_cc_pacing(rack); 19326 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19327 rack->rack_attempt_hdwr_pace = 0; 19328 /* cmpack=1 */ 19329 if (rack_use_cmp_acks) 19330 rack->r_use_cmp_ack = 1; 19331 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19332 rack->r_use_cmp_ack) 19333 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19334 /* scwnd=1 */ 19335 rack->rack_enable_scwnd = 1; 19336 /* dynamic=100 */ 19337 rack->rc_gp_dyn_mul = 1; 19338 /* gp_inc_ca */ 19339 rack->r_ctl.rack_per_of_gp_ca = 100; 19340 /* rrr_conf=3 */ 19341 rack->r_rr_config = 3; 19342 /* npush=2 */ 19343 rack->r_ctl.rc_no_push_at_mrtt = 2; 19344 /* fillcw=1 */ 19345 rack->rc_pace_to_cwnd = 1; 19346 rack->rc_pace_fill_if_rttin_range = 0; 19347 rack->rtt_limit_mul = 0; 19348 /* noprr=1 */ 19349 rack->rack_no_prr = 1; 19350 /* lscwnd=1 */ 19351 rack->r_limit_scw = 1; 19352 /* gp_inc_rec */ 19353 rack->r_ctl.rack_per_of_gp_rec = 90; 19354 err = 0; 19355 19356 } else if (prof == 3) { 19357 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19358 /* pace_always=1 */ 19359 if (rack->rc_always_pace == 0) { 19360 if (tcp_can_enable_pacing() == 0) 19361 return (EBUSY); 19362 } 19363 rack->rc_always_pace = 1; 19364 if (rack->use_fixed_rate || rack->gp_ready) 19365 rack_set_cc_pacing(rack); 19366 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19367 rack->rack_attempt_hdwr_pace = 0; 19368 /* cmpack=1 */ 19369 if (rack_use_cmp_acks) 19370 rack->r_use_cmp_ack = 1; 19371 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19372 rack->r_use_cmp_ack) 19373 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19374 /* scwnd=1 */ 19375 rack->rack_enable_scwnd = 1; 19376 /* dynamic=100 */ 19377 rack->rc_gp_dyn_mul = 1; 19378 /* gp_inc_ca */ 19379 rack->r_ctl.rack_per_of_gp_ca = 100; 19380 /* rrr_conf=3 */ 19381 rack->r_rr_config = 3; 19382 /* npush=2 */ 19383 rack->r_ctl.rc_no_push_at_mrtt = 2; 19384 /* fillcw=2 */ 19385 rack->rc_pace_to_cwnd = 1; 19386 rack->r_fill_less_agg = 1; 19387 rack->rc_pace_fill_if_rttin_range = 0; 19388 rack->rtt_limit_mul = 0; 19389 /* noprr=1 */ 19390 rack->rack_no_prr = 1; 19391 /* lscwnd=1 */ 19392 rack->r_limit_scw = 1; 19393 /* gp_inc_rec */ 19394 rack->r_ctl.rack_per_of_gp_rec = 90; 19395 err = 0; 19396 19397 19398 } else if (prof == 2) { 19399 /* cmpack=1 */ 19400 if (rack->rc_always_pace == 0) { 19401 if (tcp_can_enable_pacing() == 0) 19402 return (EBUSY); 19403 } 19404 rack->rc_always_pace = 1; 19405 if (rack->use_fixed_rate || rack->gp_ready) 19406 rack_set_cc_pacing(rack); 19407 rack->r_use_cmp_ack = 1; 19408 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19409 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19410 /* pace_always=1 */ 19411 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19412 /* scwnd=1 */ 19413 rack->rack_enable_scwnd = 1; 19414 /* dynamic=100 */ 19415 rack->rc_gp_dyn_mul = 1; 19416 rack->r_ctl.rack_per_of_gp_ca = 100; 19417 /* rrr_conf=3 */ 19418 rack->r_rr_config = 3; 19419 /* npush=2 */ 19420 rack->r_ctl.rc_no_push_at_mrtt = 2; 19421 /* fillcw=1 */ 19422 rack->rc_pace_to_cwnd = 1; 19423 rack->rc_pace_fill_if_rttin_range = 0; 19424 rack->rtt_limit_mul = 0; 19425 /* noprr=1 */ 19426 rack->rack_no_prr = 1; 19427 /* lscwnd=0 */ 19428 rack->r_limit_scw = 0; 19429 err = 0; 19430 } else if (prof == 0) { 19431 /* This changes things back to the default settings */ 19432 err = 0; 19433 if (rack->rc_always_pace) { 19434 tcp_decrement_paced_conn(); 19435 rack_undo_cc_pacing(rack); 19436 rack->rc_always_pace = 0; 19437 } 19438 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19439 rack->rc_always_pace = 1; 19440 if (rack->use_fixed_rate || rack->gp_ready) 19441 rack_set_cc_pacing(rack); 19442 } else 19443 rack->rc_always_pace = 0; 19444 if (rack_dsack_std_based & 0x1) { 19445 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19446 rack->rc_rack_tmr_std_based = 1; 19447 } 19448 if (rack_dsack_std_based & 0x2) { 19449 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19450 rack->rc_rack_use_dsack = 1; 19451 } 19452 if (rack_use_cmp_acks) 19453 rack->r_use_cmp_ack = 1; 19454 else 19455 rack->r_use_cmp_ack = 0; 19456 if (rack_disable_prr) 19457 rack->rack_no_prr = 1; 19458 else 19459 rack->rack_no_prr = 0; 19460 if (rack_gp_no_rec_chg) 19461 rack->rc_gp_no_rec_chg = 1; 19462 else 19463 rack->rc_gp_no_rec_chg = 0; 19464 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19465 rack->r_mbuf_queue = 1; 19466 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19467 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19468 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19469 } else { 19470 rack->r_mbuf_queue = 0; 19471 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19472 } 19473 if (rack_enable_shared_cwnd) 19474 rack->rack_enable_scwnd = 1; 19475 else 19476 rack->rack_enable_scwnd = 0; 19477 if (rack_do_dyn_mul) { 19478 /* When dynamic adjustment is on CA needs to start at 100% */ 19479 rack->rc_gp_dyn_mul = 1; 19480 if (rack_do_dyn_mul >= 100) 19481 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19482 } else { 19483 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19484 rack->rc_gp_dyn_mul = 0; 19485 } 19486 rack->r_rr_config = 0; 19487 rack->r_ctl.rc_no_push_at_mrtt = 0; 19488 rack->rc_pace_to_cwnd = 0; 19489 rack->rc_pace_fill_if_rttin_range = 0; 19490 rack->rtt_limit_mul = 0; 19491 19492 if (rack_enable_hw_pacing) 19493 rack->rack_hdw_pace_ena = 1; 19494 else 19495 rack->rack_hdw_pace_ena = 0; 19496 if (rack_disable_prr) 19497 rack->rack_no_prr = 1; 19498 else 19499 rack->rack_no_prr = 0; 19500 if (rack_limits_scwnd) 19501 rack->r_limit_scw = 1; 19502 else 19503 rack->r_limit_scw = 0; 19504 err = 0; 19505 } 19506 return (err); 19507 } 19508 19509 static int 19510 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19511 { 19512 struct deferred_opt_list *dol; 19513 19514 dol = malloc(sizeof(struct deferred_opt_list), 19515 M_TCPFSB, M_NOWAIT|M_ZERO); 19516 if (dol == NULL) { 19517 /* 19518 * No space yikes -- fail out.. 19519 */ 19520 return (0); 19521 } 19522 dol->optname = sopt_name; 19523 dol->optval = loptval; 19524 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19525 return (1); 19526 } 19527 19528 static int 19529 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19530 uint32_t optval, uint64_t loptval) 19531 { 19532 struct epoch_tracker et; 19533 struct sockopt sopt; 19534 struct cc_newreno_opts opt; 19535 struct inpcb *inp = tptoinpcb(tp); 19536 uint64_t val; 19537 int error = 0; 19538 uint16_t ca, ss; 19539 19540 switch (sopt_name) { 19541 19542 case TCP_RACK_DSACK_OPT: 19543 RACK_OPTS_INC(tcp_rack_dsack_opt); 19544 if (optval & 0x1) { 19545 rack->rc_rack_tmr_std_based = 1; 19546 } else { 19547 rack->rc_rack_tmr_std_based = 0; 19548 } 19549 if (optval & 0x2) { 19550 rack->rc_rack_use_dsack = 1; 19551 } else { 19552 rack->rc_rack_use_dsack = 0; 19553 } 19554 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19555 break; 19556 case TCP_RACK_PACING_BETA: 19557 RACK_OPTS_INC(tcp_rack_beta); 19558 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 19559 /* This only works for newreno. */ 19560 error = EINVAL; 19561 break; 19562 } 19563 if (rack->rc_pacing_cc_set) { 19564 /* 19565 * Set them into the real CC module 19566 * whats in the rack pcb is the old values 19567 * to be used on restoral/ 19568 */ 19569 sopt.sopt_dir = SOPT_SET; 19570 opt.name = CC_NEWRENO_BETA; 19571 opt.val = optval; 19572 if (CC_ALGO(tp)->ctl_output != NULL) 19573 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 19574 else { 19575 error = ENOENT; 19576 break; 19577 } 19578 } else { 19579 /* 19580 * Not pacing yet so set it into our local 19581 * rack pcb storage. 19582 */ 19583 rack->r_ctl.rc_saved_beta.beta = optval; 19584 } 19585 break; 19586 case TCP_RACK_TIMER_SLOP: 19587 RACK_OPTS_INC(tcp_rack_timer_slop); 19588 rack->r_ctl.timer_slop = optval; 19589 if (rack->rc_tp->t_srtt) { 19590 /* 19591 * If we have an SRTT lets update t_rxtcur 19592 * to have the new slop. 19593 */ 19594 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19595 rack_rto_min, rack_rto_max, 19596 rack->r_ctl.timer_slop); 19597 } 19598 break; 19599 case TCP_RACK_PACING_BETA_ECN: 19600 RACK_OPTS_INC(tcp_rack_beta_ecn); 19601 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 19602 /* This only works for newreno. */ 19603 error = EINVAL; 19604 break; 19605 } 19606 if (rack->rc_pacing_cc_set) { 19607 /* 19608 * Set them into the real CC module 19609 * whats in the rack pcb is the old values 19610 * to be used on restoral/ 19611 */ 19612 sopt.sopt_dir = SOPT_SET; 19613 opt.name = CC_NEWRENO_BETA_ECN; 19614 opt.val = optval; 19615 if (CC_ALGO(tp)->ctl_output != NULL) 19616 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 19617 else 19618 error = ENOENT; 19619 } else { 19620 /* 19621 * Not pacing yet so set it into our local 19622 * rack pcb storage. 19623 */ 19624 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19625 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19626 } 19627 break; 19628 case TCP_DEFER_OPTIONS: 19629 RACK_OPTS_INC(tcp_defer_opt); 19630 if (optval) { 19631 if (rack->gp_ready) { 19632 /* Too late */ 19633 error = EINVAL; 19634 break; 19635 } 19636 rack->defer_options = 1; 19637 } else 19638 rack->defer_options = 0; 19639 break; 19640 case TCP_RACK_MEASURE_CNT: 19641 RACK_OPTS_INC(tcp_rack_measure_cnt); 19642 if (optval && (optval <= 0xff)) { 19643 rack->r_ctl.req_measurements = optval; 19644 } else 19645 error = EINVAL; 19646 break; 19647 case TCP_REC_ABC_VAL: 19648 RACK_OPTS_INC(tcp_rec_abc_val); 19649 if (optval > 0) 19650 rack->r_use_labc_for_rec = 1; 19651 else 19652 rack->r_use_labc_for_rec = 0; 19653 break; 19654 case TCP_RACK_ABC_VAL: 19655 RACK_OPTS_INC(tcp_rack_abc_val); 19656 if ((optval > 0) && (optval < 255)) 19657 rack->rc_labc = optval; 19658 else 19659 error = EINVAL; 19660 break; 19661 case TCP_HDWR_UP_ONLY: 19662 RACK_OPTS_INC(tcp_pacing_up_only); 19663 if (optval) 19664 rack->r_up_only = 1; 19665 else 19666 rack->r_up_only = 0; 19667 break; 19668 case TCP_PACING_RATE_CAP: 19669 RACK_OPTS_INC(tcp_pacing_rate_cap); 19670 rack->r_ctl.bw_rate_cap = loptval; 19671 break; 19672 case TCP_RACK_PROFILE: 19673 RACK_OPTS_INC(tcp_profile); 19674 error = rack_set_profile(rack, optval); 19675 break; 19676 case TCP_USE_CMP_ACKS: 19677 RACK_OPTS_INC(tcp_use_cmp_acks); 19678 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19679 /* You can't turn it off once its on! */ 19680 error = EINVAL; 19681 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19682 rack->r_use_cmp_ack = 1; 19683 rack->r_mbuf_queue = 1; 19684 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19685 } 19686 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19687 inp->inp_flags2 |= INP_MBUF_ACKCMP; 19688 break; 19689 case TCP_SHARED_CWND_TIME_LIMIT: 19690 RACK_OPTS_INC(tcp_lscwnd); 19691 if (optval) 19692 rack->r_limit_scw = 1; 19693 else 19694 rack->r_limit_scw = 0; 19695 break; 19696 case TCP_RACK_PACE_TO_FILL: 19697 RACK_OPTS_INC(tcp_fillcw); 19698 if (optval == 0) 19699 rack->rc_pace_to_cwnd = 0; 19700 else { 19701 rack->rc_pace_to_cwnd = 1; 19702 if (optval > 1) 19703 rack->r_fill_less_agg = 1; 19704 } 19705 if ((optval >= rack_gp_rtt_maxmul) && 19706 rack_gp_rtt_maxmul && 19707 (optval < 0xf)) { 19708 rack->rc_pace_fill_if_rttin_range = 1; 19709 rack->rtt_limit_mul = optval; 19710 } else { 19711 rack->rc_pace_fill_if_rttin_range = 0; 19712 rack->rtt_limit_mul = 0; 19713 } 19714 break; 19715 case TCP_RACK_NO_PUSH_AT_MAX: 19716 RACK_OPTS_INC(tcp_npush); 19717 if (optval == 0) 19718 rack->r_ctl.rc_no_push_at_mrtt = 0; 19719 else if (optval < 0xff) 19720 rack->r_ctl.rc_no_push_at_mrtt = optval; 19721 else 19722 error = EINVAL; 19723 break; 19724 case TCP_SHARED_CWND_ENABLE: 19725 RACK_OPTS_INC(tcp_rack_scwnd); 19726 if (optval == 0) 19727 rack->rack_enable_scwnd = 0; 19728 else 19729 rack->rack_enable_scwnd = 1; 19730 break; 19731 case TCP_RACK_MBUF_QUEUE: 19732 /* Now do we use the LRO mbuf-queue feature */ 19733 RACK_OPTS_INC(tcp_rack_mbufq); 19734 if (optval || rack->r_use_cmp_ack) 19735 rack->r_mbuf_queue = 1; 19736 else 19737 rack->r_mbuf_queue = 0; 19738 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19739 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19740 else 19741 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19742 break; 19743 case TCP_RACK_NONRXT_CFG_RATE: 19744 RACK_OPTS_INC(tcp_rack_cfg_rate); 19745 if (optval == 0) 19746 rack->rack_rec_nonrxt_use_cr = 0; 19747 else 19748 rack->rack_rec_nonrxt_use_cr = 1; 19749 break; 19750 case TCP_NO_PRR: 19751 RACK_OPTS_INC(tcp_rack_noprr); 19752 if (optval == 0) 19753 rack->rack_no_prr = 0; 19754 else if (optval == 1) 19755 rack->rack_no_prr = 1; 19756 else if (optval == 2) 19757 rack->no_prr_addback = 1; 19758 else 19759 error = EINVAL; 19760 break; 19761 case TCP_TIMELY_DYN_ADJ: 19762 RACK_OPTS_INC(tcp_timely_dyn); 19763 if (optval == 0) 19764 rack->rc_gp_dyn_mul = 0; 19765 else { 19766 rack->rc_gp_dyn_mul = 1; 19767 if (optval >= 100) { 19768 /* 19769 * If the user sets something 100 or more 19770 * its the gp_ca value. 19771 */ 19772 rack->r_ctl.rack_per_of_gp_ca = optval; 19773 } 19774 } 19775 break; 19776 case TCP_RACK_DO_DETECTION: 19777 RACK_OPTS_INC(tcp_rack_do_detection); 19778 if (optval == 0) 19779 rack->do_detection = 0; 19780 else 19781 rack->do_detection = 1; 19782 break; 19783 case TCP_RACK_TLP_USE: 19784 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19785 error = EINVAL; 19786 break; 19787 } 19788 RACK_OPTS_INC(tcp_tlp_use); 19789 rack->rack_tlp_threshold_use = optval; 19790 break; 19791 case TCP_RACK_TLP_REDUCE: 19792 /* RACK TLP cwnd reduction (bool) */ 19793 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19794 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19795 break; 19796 /* Pacing related ones */ 19797 case TCP_RACK_PACE_ALWAYS: 19798 /* 19799 * zero is old rack method, 1 is new 19800 * method using a pacing rate. 19801 */ 19802 RACK_OPTS_INC(tcp_rack_pace_always); 19803 if (optval > 0) { 19804 if (rack->rc_always_pace) { 19805 error = EALREADY; 19806 break; 19807 } else if (tcp_can_enable_pacing()) { 19808 rack->rc_always_pace = 1; 19809 if (rack->use_fixed_rate || rack->gp_ready) 19810 rack_set_cc_pacing(rack); 19811 } 19812 else { 19813 error = ENOSPC; 19814 break; 19815 } 19816 } else { 19817 if (rack->rc_always_pace) { 19818 tcp_decrement_paced_conn(); 19819 rack->rc_always_pace = 0; 19820 rack_undo_cc_pacing(rack); 19821 } 19822 } 19823 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19824 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19825 else 19826 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19827 /* A rate may be set irate or other, if so set seg size */ 19828 rack_update_seg(rack); 19829 break; 19830 case TCP_BBR_RACK_INIT_RATE: 19831 RACK_OPTS_INC(tcp_initial_rate); 19832 val = optval; 19833 /* Change from kbits per second to bytes per second */ 19834 val *= 1000; 19835 val /= 8; 19836 rack->r_ctl.init_rate = val; 19837 if (rack->rc_init_win != rack_default_init_window) { 19838 uint32_t win, snt; 19839 19840 /* 19841 * Options don't always get applied 19842 * in the order you think. So in order 19843 * to assure we update a cwnd we need 19844 * to check and see if we are still 19845 * where we should raise the cwnd. 19846 */ 19847 win = rc_init_window(rack); 19848 if (SEQ_GT(tp->snd_max, tp->iss)) 19849 snt = tp->snd_max - tp->iss; 19850 else 19851 snt = 0; 19852 if ((snt < win) && 19853 (tp->snd_cwnd < win)) 19854 tp->snd_cwnd = win; 19855 } 19856 if (rack->rc_always_pace) 19857 rack_update_seg(rack); 19858 break; 19859 case TCP_BBR_IWINTSO: 19860 RACK_OPTS_INC(tcp_initial_win); 19861 if (optval && (optval <= 0xff)) { 19862 uint32_t win, snt; 19863 19864 rack->rc_init_win = optval; 19865 win = rc_init_window(rack); 19866 if (SEQ_GT(tp->snd_max, tp->iss)) 19867 snt = tp->snd_max - tp->iss; 19868 else 19869 snt = 0; 19870 if ((snt < win) && 19871 (tp->t_srtt | 19872 #ifdef NETFLIX_PEAKRATE 19873 tp->t_maxpeakrate | 19874 #endif 19875 rack->r_ctl.init_rate)) { 19876 /* 19877 * We are not past the initial window 19878 * and we have some bases for pacing, 19879 * so we need to possibly adjust up 19880 * the cwnd. Note even if we don't set 19881 * the cwnd, its still ok to raise the rc_init_win 19882 * which can be used coming out of idle when we 19883 * would have a rate. 19884 */ 19885 if (tp->snd_cwnd < win) 19886 tp->snd_cwnd = win; 19887 } 19888 if (rack->rc_always_pace) 19889 rack_update_seg(rack); 19890 } else 19891 error = EINVAL; 19892 break; 19893 case TCP_RACK_FORCE_MSEG: 19894 RACK_OPTS_INC(tcp_rack_force_max_seg); 19895 if (optval) 19896 rack->rc_force_max_seg = 1; 19897 else 19898 rack->rc_force_max_seg = 0; 19899 break; 19900 case TCP_RACK_PACE_MAX_SEG: 19901 /* Max segments size in a pace in bytes */ 19902 RACK_OPTS_INC(tcp_rack_max_seg); 19903 rack->rc_user_set_max_segs = optval; 19904 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19905 break; 19906 case TCP_RACK_PACE_RATE_REC: 19907 /* Set the fixed pacing rate in Bytes per second ca */ 19908 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19909 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19910 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19911 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19912 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19913 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19914 rack->use_fixed_rate = 1; 19915 if (rack->rc_always_pace) 19916 rack_set_cc_pacing(rack); 19917 rack_log_pacing_delay_calc(rack, 19918 rack->r_ctl.rc_fixed_pacing_rate_ss, 19919 rack->r_ctl.rc_fixed_pacing_rate_ca, 19920 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19921 __LINE__, NULL,0); 19922 break; 19923 19924 case TCP_RACK_PACE_RATE_SS: 19925 /* Set the fixed pacing rate in Bytes per second ca */ 19926 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19927 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19928 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19929 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19930 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19931 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19932 rack->use_fixed_rate = 1; 19933 if (rack->rc_always_pace) 19934 rack_set_cc_pacing(rack); 19935 rack_log_pacing_delay_calc(rack, 19936 rack->r_ctl.rc_fixed_pacing_rate_ss, 19937 rack->r_ctl.rc_fixed_pacing_rate_ca, 19938 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19939 __LINE__, NULL, 0); 19940 break; 19941 19942 case TCP_RACK_PACE_RATE_CA: 19943 /* Set the fixed pacing rate in Bytes per second ca */ 19944 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19945 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19946 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19947 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19948 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19949 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19950 rack->use_fixed_rate = 1; 19951 if (rack->rc_always_pace) 19952 rack_set_cc_pacing(rack); 19953 rack_log_pacing_delay_calc(rack, 19954 rack->r_ctl.rc_fixed_pacing_rate_ss, 19955 rack->r_ctl.rc_fixed_pacing_rate_ca, 19956 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19957 __LINE__, NULL, 0); 19958 break; 19959 case TCP_RACK_GP_INCREASE_REC: 19960 RACK_OPTS_INC(tcp_gp_inc_rec); 19961 rack->r_ctl.rack_per_of_gp_rec = optval; 19962 rack_log_pacing_delay_calc(rack, 19963 rack->r_ctl.rack_per_of_gp_ss, 19964 rack->r_ctl.rack_per_of_gp_ca, 19965 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19966 __LINE__, NULL, 0); 19967 break; 19968 case TCP_RACK_GP_INCREASE_CA: 19969 RACK_OPTS_INC(tcp_gp_inc_ca); 19970 ca = optval; 19971 if (ca < 100) { 19972 /* 19973 * We don't allow any reduction 19974 * over the GP b/w. 19975 */ 19976 error = EINVAL; 19977 break; 19978 } 19979 rack->r_ctl.rack_per_of_gp_ca = ca; 19980 rack_log_pacing_delay_calc(rack, 19981 rack->r_ctl.rack_per_of_gp_ss, 19982 rack->r_ctl.rack_per_of_gp_ca, 19983 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19984 __LINE__, NULL, 0); 19985 break; 19986 case TCP_RACK_GP_INCREASE_SS: 19987 RACK_OPTS_INC(tcp_gp_inc_ss); 19988 ss = optval; 19989 if (ss < 100) { 19990 /* 19991 * We don't allow any reduction 19992 * over the GP b/w. 19993 */ 19994 error = EINVAL; 19995 break; 19996 } 19997 rack->r_ctl.rack_per_of_gp_ss = ss; 19998 rack_log_pacing_delay_calc(rack, 19999 rack->r_ctl.rack_per_of_gp_ss, 20000 rack->r_ctl.rack_per_of_gp_ca, 20001 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20002 __LINE__, NULL, 0); 20003 break; 20004 case TCP_RACK_RR_CONF: 20005 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20006 if (optval && optval <= 3) 20007 rack->r_rr_config = optval; 20008 else 20009 rack->r_rr_config = 0; 20010 break; 20011 case TCP_HDWR_RATE_CAP: 20012 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20013 if (optval) { 20014 if (rack->r_rack_hw_rate_caps == 0) 20015 rack->r_rack_hw_rate_caps = 1; 20016 else 20017 error = EALREADY; 20018 } else { 20019 rack->r_rack_hw_rate_caps = 0; 20020 } 20021 break; 20022 case TCP_BBR_HDWR_PACE: 20023 RACK_OPTS_INC(tcp_hdwr_pacing); 20024 if (optval){ 20025 if (rack->rack_hdrw_pacing == 0) { 20026 rack->rack_hdw_pace_ena = 1; 20027 rack->rack_attempt_hdwr_pace = 0; 20028 } else 20029 error = EALREADY; 20030 } else { 20031 rack->rack_hdw_pace_ena = 0; 20032 #ifdef RATELIMIT 20033 if (rack->r_ctl.crte != NULL) { 20034 rack->rack_hdrw_pacing = 0; 20035 rack->rack_attempt_hdwr_pace = 0; 20036 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20037 rack->r_ctl.crte = NULL; 20038 } 20039 #endif 20040 } 20041 break; 20042 /* End Pacing related ones */ 20043 case TCP_RACK_PRR_SENDALOT: 20044 /* Allow PRR to send more than one seg */ 20045 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20046 rack->r_ctl.rc_prr_sendalot = optval; 20047 break; 20048 case TCP_RACK_MIN_TO: 20049 /* Minimum time between rack t-o's in ms */ 20050 RACK_OPTS_INC(tcp_rack_min_to); 20051 rack->r_ctl.rc_min_to = optval; 20052 break; 20053 case TCP_RACK_EARLY_SEG: 20054 /* If early recovery max segments */ 20055 RACK_OPTS_INC(tcp_rack_early_seg); 20056 rack->r_ctl.rc_early_recovery_segs = optval; 20057 break; 20058 case TCP_RACK_ENABLE_HYSTART: 20059 { 20060 if (optval) { 20061 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 20062 if (rack_do_hystart > RACK_HYSTART_ON) 20063 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 20064 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 20065 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 20066 } else { 20067 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 20068 } 20069 } 20070 break; 20071 case TCP_RACK_REORD_THRESH: 20072 /* RACK reorder threshold (shift amount) */ 20073 RACK_OPTS_INC(tcp_rack_reord_thresh); 20074 if ((optval > 0) && (optval < 31)) 20075 rack->r_ctl.rc_reorder_shift = optval; 20076 else 20077 error = EINVAL; 20078 break; 20079 case TCP_RACK_REORD_FADE: 20080 /* Does reordering fade after ms time */ 20081 RACK_OPTS_INC(tcp_rack_reord_fade); 20082 rack->r_ctl.rc_reorder_fade = optval; 20083 break; 20084 case TCP_RACK_TLP_THRESH: 20085 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20086 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20087 if (optval) 20088 rack->r_ctl.rc_tlp_threshold = optval; 20089 else 20090 error = EINVAL; 20091 break; 20092 case TCP_BBR_USE_RACK_RR: 20093 RACK_OPTS_INC(tcp_rack_rr); 20094 if (optval) 20095 rack->use_rack_rr = 1; 20096 else 20097 rack->use_rack_rr = 0; 20098 break; 20099 case TCP_FAST_RSM_HACK: 20100 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20101 if (optval) 20102 rack->fast_rsm_hack = 1; 20103 else 20104 rack->fast_rsm_hack = 0; 20105 break; 20106 case TCP_RACK_PKT_DELAY: 20107 /* RACK added ms i.e. rack-rtt + reord + N */ 20108 RACK_OPTS_INC(tcp_rack_pkt_delay); 20109 rack->r_ctl.rc_pkt_delay = optval; 20110 break; 20111 case TCP_DELACK: 20112 RACK_OPTS_INC(tcp_rack_delayed_ack); 20113 if (optval == 0) 20114 tp->t_delayed_ack = 0; 20115 else 20116 tp->t_delayed_ack = 1; 20117 if (tp->t_flags & TF_DELACK) { 20118 tp->t_flags &= ~TF_DELACK; 20119 tp->t_flags |= TF_ACKNOW; 20120 NET_EPOCH_ENTER(et); 20121 rack_output(tp); 20122 NET_EPOCH_EXIT(et); 20123 } 20124 break; 20125 20126 case TCP_BBR_RACK_RTT_USE: 20127 RACK_OPTS_INC(tcp_rack_rtt_use); 20128 if ((optval != USE_RTT_HIGH) && 20129 (optval != USE_RTT_LOW) && 20130 (optval != USE_RTT_AVG)) 20131 error = EINVAL; 20132 else 20133 rack->r_ctl.rc_rate_sample_method = optval; 20134 break; 20135 case TCP_DATA_AFTER_CLOSE: 20136 RACK_OPTS_INC(tcp_data_after_close); 20137 if (optval) 20138 rack->rc_allow_data_af_clo = 1; 20139 else 20140 rack->rc_allow_data_af_clo = 0; 20141 break; 20142 default: 20143 break; 20144 } 20145 #ifdef NETFLIX_STATS 20146 tcp_log_socket_option(tp, sopt_name, optval, error); 20147 #endif 20148 return (error); 20149 } 20150 20151 20152 static void 20153 rack_apply_deferred_options(struct tcp_rack *rack) 20154 { 20155 struct deferred_opt_list *dol, *sdol; 20156 uint32_t s_optval; 20157 20158 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20159 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20160 /* Disadvantage of deferal is you loose the error return */ 20161 s_optval = (uint32_t)dol->optval; 20162 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20163 free(dol, M_TCPDO); 20164 } 20165 } 20166 20167 static void 20168 rack_hw_tls_change(struct tcpcb *tp, int chg) 20169 { 20170 /* 20171 * HW tls state has changed.. fix all 20172 * rsm's in flight. 20173 */ 20174 struct tcp_rack *rack; 20175 struct rack_sendmap *rsm; 20176 20177 rack = (struct tcp_rack *)tp->t_fb_ptr; 20178 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20179 if (chg) 20180 rsm->r_hw_tls = 1; 20181 else 20182 rsm->r_hw_tls = 0; 20183 } 20184 if (chg) 20185 rack->r_ctl.fsb.hw_tls = 1; 20186 else 20187 rack->r_ctl.fsb.hw_tls = 0; 20188 } 20189 20190 static int 20191 rack_pru_options(struct tcpcb *tp, int flags) 20192 { 20193 if (flags & PRUS_OOB) 20194 return (EOPNOTSUPP); 20195 return (0); 20196 } 20197 20198 static struct tcp_function_block __tcp_rack = { 20199 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20200 .tfb_tcp_output = rack_output, 20201 .tfb_do_queued_segments = ctf_do_queued_segments, 20202 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20203 .tfb_tcp_do_segment = rack_do_segment, 20204 .tfb_tcp_ctloutput = rack_ctloutput, 20205 .tfb_tcp_fb_init = rack_init, 20206 .tfb_tcp_fb_fini = rack_fini, 20207 .tfb_tcp_timer_stop_all = rack_stopall, 20208 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20209 .tfb_tcp_handoff_ok = rack_handoff_ok, 20210 .tfb_tcp_mtu_chg = rack_mtu_change, 20211 .tfb_pru_options = rack_pru_options, 20212 .tfb_hwtls_change = rack_hw_tls_change, 20213 .tfb_compute_pipe = rack_compute_pipe, 20214 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20215 }; 20216 20217 /* 20218 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20219 * socket option arguments. When it re-acquires the lock after the copy, it 20220 * has to revalidate that the connection is still valid for the socket 20221 * option. 20222 */ 20223 static int 20224 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20225 { 20226 #ifdef INET6 20227 struct ip6_hdr *ip6; 20228 #endif 20229 #ifdef INET 20230 struct ip *ip; 20231 #endif 20232 struct tcpcb *tp; 20233 struct tcp_rack *rack; 20234 uint64_t loptval; 20235 int32_t error = 0, optval; 20236 20237 tp = intotcpcb(inp); 20238 rack = (struct tcp_rack *)tp->t_fb_ptr; 20239 if (rack == NULL) { 20240 INP_WUNLOCK(inp); 20241 return (EINVAL); 20242 } 20243 #ifdef INET6 20244 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20245 #endif 20246 #ifdef INET 20247 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20248 #endif 20249 20250 switch (sopt->sopt_level) { 20251 #ifdef INET6 20252 case IPPROTO_IPV6: 20253 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20254 switch (sopt->sopt_name) { 20255 case IPV6_USE_MIN_MTU: 20256 tcp6_use_min_mtu(tp); 20257 break; 20258 case IPV6_TCLASS: 20259 /* 20260 * The DSCP codepoint has changed, update the fsb. 20261 */ 20262 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20263 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20264 break; 20265 } 20266 INP_WUNLOCK(inp); 20267 return (0); 20268 #endif 20269 #ifdef INET 20270 case IPPROTO_IP: 20271 switch (sopt->sopt_name) { 20272 case IP_TOS: 20273 /* 20274 * The DSCP codepoint has changed, update the fsb. 20275 */ 20276 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20277 break; 20278 case IP_TTL: 20279 /* 20280 * The TTL has changed, update the fsb. 20281 */ 20282 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20283 break; 20284 } 20285 INP_WUNLOCK(inp); 20286 return (0); 20287 #endif 20288 } 20289 20290 switch (sopt->sopt_name) { 20291 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20292 /* Pacing related ones */ 20293 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20294 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20295 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20296 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20297 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20298 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20299 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20300 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20301 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20302 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20303 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20304 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20305 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20306 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20307 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20308 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20309 /* End pacing related */ 20310 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20311 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20312 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20313 case TCP_RACK_MIN_TO: /* URL:min_to */ 20314 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20315 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20316 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20317 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20318 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20319 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20320 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20321 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20322 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20323 case TCP_NO_PRR: /* URL:noprr */ 20324 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20325 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20326 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20327 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20328 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20329 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20330 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20331 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20332 case TCP_RACK_PROFILE: /* URL:profile */ 20333 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20334 case TCP_RACK_ABC_VAL: /* URL:labc */ 20335 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20336 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20337 case TCP_DEFER_OPTIONS: /* URL:defer */ 20338 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20339 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20340 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20341 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20342 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20343 break; 20344 default: 20345 /* Filter off all unknown options to the base stack */ 20346 return (tcp_default_ctloutput(inp, sopt)); 20347 break; 20348 } 20349 INP_WUNLOCK(inp); 20350 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20351 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20352 /* 20353 * We truncate it down to 32 bits for the socket-option trace this 20354 * means rates > 34Gbps won't show right, but thats probably ok. 20355 */ 20356 optval = (uint32_t)loptval; 20357 } else { 20358 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20359 /* Save it in 64 bit form too */ 20360 loptval = optval; 20361 } 20362 if (error) 20363 return (error); 20364 INP_WLOCK(inp); 20365 if (inp->inp_flags & INP_DROPPED) { 20366 INP_WUNLOCK(inp); 20367 return (ECONNRESET); 20368 } 20369 if (tp->t_fb != &__tcp_rack) { 20370 INP_WUNLOCK(inp); 20371 return (ENOPROTOOPT); 20372 } 20373 if (rack->defer_options && (rack->gp_ready == 0) && 20374 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20375 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20376 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20377 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20378 /* Options are beind deferred */ 20379 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20380 INP_WUNLOCK(inp); 20381 return (0); 20382 } else { 20383 /* No memory to defer, fail */ 20384 INP_WUNLOCK(inp); 20385 return (ENOMEM); 20386 } 20387 } 20388 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20389 INP_WUNLOCK(inp); 20390 return (error); 20391 } 20392 20393 static void 20394 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20395 { 20396 20397 INP_WLOCK_ASSERT(tptoinpcb(tp)); 20398 bzero(ti, sizeof(*ti)); 20399 20400 ti->tcpi_state = tp->t_state; 20401 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20402 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20403 if (tp->t_flags & TF_SACK_PERMIT) 20404 ti->tcpi_options |= TCPI_OPT_SACK; 20405 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20406 ti->tcpi_options |= TCPI_OPT_WSCALE; 20407 ti->tcpi_snd_wscale = tp->snd_scale; 20408 ti->tcpi_rcv_wscale = tp->rcv_scale; 20409 } 20410 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 20411 ti->tcpi_options |= TCPI_OPT_ECN; 20412 if (tp->t_flags & TF_FASTOPEN) 20413 ti->tcpi_options |= TCPI_OPT_TFO; 20414 /* still kept in ticks is t_rcvtime */ 20415 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20416 /* Since we hold everything in precise useconds this is easy */ 20417 ti->tcpi_rtt = tp->t_srtt; 20418 ti->tcpi_rttvar = tp->t_rttvar; 20419 ti->tcpi_rto = tp->t_rxtcur; 20420 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20421 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20422 /* 20423 * FreeBSD-specific extension fields for tcp_info. 20424 */ 20425 ti->tcpi_rcv_space = tp->rcv_wnd; 20426 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20427 ti->tcpi_snd_wnd = tp->snd_wnd; 20428 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20429 ti->tcpi_snd_nxt = tp->snd_nxt; 20430 ti->tcpi_snd_mss = tp->t_maxseg; 20431 ti->tcpi_rcv_mss = tp->t_maxseg; 20432 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20433 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20434 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20435 #ifdef NETFLIX_STATS 20436 ti->tcpi_total_tlp = tp->t_sndtlppack; 20437 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20438 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20439 #endif 20440 #ifdef TCP_OFFLOAD 20441 if (tp->t_flags & TF_TOE) { 20442 ti->tcpi_options |= TCPI_OPT_TOE; 20443 tcp_offload_tcp_info(tp, ti); 20444 } 20445 #endif 20446 } 20447 20448 static int 20449 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20450 { 20451 struct tcpcb *tp; 20452 struct tcp_rack *rack; 20453 int32_t error, optval; 20454 uint64_t val, loptval; 20455 struct tcp_info ti; 20456 /* 20457 * Because all our options are either boolean or an int, we can just 20458 * pull everything into optval and then unlock and copy. If we ever 20459 * add a option that is not a int, then this will have quite an 20460 * impact to this routine. 20461 */ 20462 error = 0; 20463 tp = intotcpcb(inp); 20464 rack = (struct tcp_rack *)tp->t_fb_ptr; 20465 if (rack == NULL) { 20466 INP_WUNLOCK(inp); 20467 return (EINVAL); 20468 } 20469 switch (sopt->sopt_name) { 20470 case TCP_INFO: 20471 /* First get the info filled */ 20472 rack_fill_info(tp, &ti); 20473 /* Fix up the rtt related fields if needed */ 20474 INP_WUNLOCK(inp); 20475 error = sooptcopyout(sopt, &ti, sizeof ti); 20476 return (error); 20477 /* 20478 * Beta is the congestion control value for NewReno that influences how 20479 * much of a backoff happens when loss is detected. It is normally set 20480 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20481 * when you exit recovery. 20482 */ 20483 case TCP_RACK_PACING_BETA: 20484 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 20485 error = EINVAL; 20486 else if (rack->rc_pacing_cc_set == 0) 20487 optval = rack->r_ctl.rc_saved_beta.beta; 20488 else { 20489 /* 20490 * Reach out into the CC data and report back what 20491 * I have previously set. Yeah it looks hackish but 20492 * we don't want to report the saved values. 20493 */ 20494 if (tp->t_ccv.cc_data) 20495 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 20496 else 20497 error = EINVAL; 20498 } 20499 break; 20500 /* 20501 * Beta_ecn is the congestion control value for NewReno that influences how 20502 * much of a backoff happens when a ECN mark is detected. It is normally set 20503 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20504 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20505 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20506 */ 20507 20508 case TCP_RACK_PACING_BETA_ECN: 20509 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 20510 error = EINVAL; 20511 else if (rack->rc_pacing_cc_set == 0) 20512 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20513 else { 20514 /* 20515 * Reach out into the CC data and report back what 20516 * I have previously set. Yeah it looks hackish but 20517 * we don't want to report the saved values. 20518 */ 20519 if (tp->t_ccv.cc_data) 20520 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 20521 else 20522 error = EINVAL; 20523 } 20524 break; 20525 case TCP_RACK_DSACK_OPT: 20526 optval = 0; 20527 if (rack->rc_rack_tmr_std_based) { 20528 optval |= 1; 20529 } 20530 if (rack->rc_rack_use_dsack) { 20531 optval |= 2; 20532 } 20533 break; 20534 case TCP_RACK_ENABLE_HYSTART: 20535 { 20536 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 20537 optval = RACK_HYSTART_ON; 20538 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 20539 optval = RACK_HYSTART_ON_W_SC; 20540 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 20541 optval = RACK_HYSTART_ON_W_SC_C; 20542 } else { 20543 optval = RACK_HYSTART_OFF; 20544 } 20545 } 20546 break; 20547 case TCP_FAST_RSM_HACK: 20548 optval = rack->fast_rsm_hack; 20549 break; 20550 case TCP_DEFER_OPTIONS: 20551 optval = rack->defer_options; 20552 break; 20553 case TCP_RACK_MEASURE_CNT: 20554 optval = rack->r_ctl.req_measurements; 20555 break; 20556 case TCP_REC_ABC_VAL: 20557 optval = rack->r_use_labc_for_rec; 20558 break; 20559 case TCP_RACK_ABC_VAL: 20560 optval = rack->rc_labc; 20561 break; 20562 case TCP_HDWR_UP_ONLY: 20563 optval= rack->r_up_only; 20564 break; 20565 case TCP_PACING_RATE_CAP: 20566 loptval = rack->r_ctl.bw_rate_cap; 20567 break; 20568 case TCP_RACK_PROFILE: 20569 /* You cannot retrieve a profile, its write only */ 20570 error = EINVAL; 20571 break; 20572 case TCP_USE_CMP_ACKS: 20573 optval = rack->r_use_cmp_ack; 20574 break; 20575 case TCP_RACK_PACE_TO_FILL: 20576 optval = rack->rc_pace_to_cwnd; 20577 if (optval && rack->r_fill_less_agg) 20578 optval++; 20579 break; 20580 case TCP_RACK_NO_PUSH_AT_MAX: 20581 optval = rack->r_ctl.rc_no_push_at_mrtt; 20582 break; 20583 case TCP_SHARED_CWND_ENABLE: 20584 optval = rack->rack_enable_scwnd; 20585 break; 20586 case TCP_RACK_NONRXT_CFG_RATE: 20587 optval = rack->rack_rec_nonrxt_use_cr; 20588 break; 20589 case TCP_NO_PRR: 20590 if (rack->rack_no_prr == 1) 20591 optval = 1; 20592 else if (rack->no_prr_addback == 1) 20593 optval = 2; 20594 else 20595 optval = 0; 20596 break; 20597 case TCP_RACK_DO_DETECTION: 20598 optval = rack->do_detection; 20599 break; 20600 case TCP_RACK_MBUF_QUEUE: 20601 /* Now do we use the LRO mbuf-queue feature */ 20602 optval = rack->r_mbuf_queue; 20603 break; 20604 case TCP_TIMELY_DYN_ADJ: 20605 optval = rack->rc_gp_dyn_mul; 20606 break; 20607 case TCP_BBR_IWINTSO: 20608 optval = rack->rc_init_win; 20609 break; 20610 case TCP_RACK_TLP_REDUCE: 20611 /* RACK TLP cwnd reduction (bool) */ 20612 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20613 break; 20614 case TCP_BBR_RACK_INIT_RATE: 20615 val = rack->r_ctl.init_rate; 20616 /* convert to kbits per sec */ 20617 val *= 8; 20618 val /= 1000; 20619 optval = (uint32_t)val; 20620 break; 20621 case TCP_RACK_FORCE_MSEG: 20622 optval = rack->rc_force_max_seg; 20623 break; 20624 case TCP_RACK_PACE_MAX_SEG: 20625 /* Max segments in a pace */ 20626 optval = rack->rc_user_set_max_segs; 20627 break; 20628 case TCP_RACK_PACE_ALWAYS: 20629 /* Use the always pace method */ 20630 optval = rack->rc_always_pace; 20631 break; 20632 case TCP_RACK_PRR_SENDALOT: 20633 /* Allow PRR to send more than one seg */ 20634 optval = rack->r_ctl.rc_prr_sendalot; 20635 break; 20636 case TCP_RACK_MIN_TO: 20637 /* Minimum time between rack t-o's in ms */ 20638 optval = rack->r_ctl.rc_min_to; 20639 break; 20640 case TCP_RACK_EARLY_SEG: 20641 /* If early recovery max segments */ 20642 optval = rack->r_ctl.rc_early_recovery_segs; 20643 break; 20644 case TCP_RACK_REORD_THRESH: 20645 /* RACK reorder threshold (shift amount) */ 20646 optval = rack->r_ctl.rc_reorder_shift; 20647 break; 20648 case TCP_RACK_REORD_FADE: 20649 /* Does reordering fade after ms time */ 20650 optval = rack->r_ctl.rc_reorder_fade; 20651 break; 20652 case TCP_BBR_USE_RACK_RR: 20653 /* Do we use the rack cheat for rxt */ 20654 optval = rack->use_rack_rr; 20655 break; 20656 case TCP_RACK_RR_CONF: 20657 optval = rack->r_rr_config; 20658 break; 20659 case TCP_HDWR_RATE_CAP: 20660 optval = rack->r_rack_hw_rate_caps; 20661 break; 20662 case TCP_BBR_HDWR_PACE: 20663 optval = rack->rack_hdw_pace_ena; 20664 break; 20665 case TCP_RACK_TLP_THRESH: 20666 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20667 optval = rack->r_ctl.rc_tlp_threshold; 20668 break; 20669 case TCP_RACK_PKT_DELAY: 20670 /* RACK added ms i.e. rack-rtt + reord + N */ 20671 optval = rack->r_ctl.rc_pkt_delay; 20672 break; 20673 case TCP_RACK_TLP_USE: 20674 optval = rack->rack_tlp_threshold_use; 20675 break; 20676 case TCP_RACK_PACE_RATE_CA: 20677 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20678 break; 20679 case TCP_RACK_PACE_RATE_SS: 20680 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20681 break; 20682 case TCP_RACK_PACE_RATE_REC: 20683 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20684 break; 20685 case TCP_RACK_GP_INCREASE_SS: 20686 optval = rack->r_ctl.rack_per_of_gp_ca; 20687 break; 20688 case TCP_RACK_GP_INCREASE_CA: 20689 optval = rack->r_ctl.rack_per_of_gp_ss; 20690 break; 20691 case TCP_BBR_RACK_RTT_USE: 20692 optval = rack->r_ctl.rc_rate_sample_method; 20693 break; 20694 case TCP_DELACK: 20695 optval = tp->t_delayed_ack; 20696 break; 20697 case TCP_DATA_AFTER_CLOSE: 20698 optval = rack->rc_allow_data_af_clo; 20699 break; 20700 case TCP_SHARED_CWND_TIME_LIMIT: 20701 optval = rack->r_limit_scw; 20702 break; 20703 case TCP_RACK_TIMER_SLOP: 20704 optval = rack->r_ctl.timer_slop; 20705 break; 20706 default: 20707 return (tcp_default_ctloutput(inp, sopt)); 20708 break; 20709 } 20710 INP_WUNLOCK(inp); 20711 if (error == 0) { 20712 if (TCP_PACING_RATE_CAP) 20713 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20714 else 20715 error = sooptcopyout(sopt, &optval, sizeof optval); 20716 } 20717 return (error); 20718 } 20719 20720 static int 20721 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20722 { 20723 if (sopt->sopt_dir == SOPT_SET) { 20724 return (rack_set_sockopt(inp, sopt)); 20725 } else if (sopt->sopt_dir == SOPT_GET) { 20726 return (rack_get_sockopt(inp, sopt)); 20727 } else { 20728 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20729 } 20730 } 20731 20732 static const char *rack_stack_names[] = { 20733 __XSTRING(STACKNAME), 20734 #ifdef STACKALIAS 20735 __XSTRING(STACKALIAS), 20736 #endif 20737 }; 20738 20739 static int 20740 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20741 { 20742 memset(mem, 0, size); 20743 return (0); 20744 } 20745 20746 static void 20747 rack_dtor(void *mem, int32_t size, void *arg) 20748 { 20749 20750 } 20751 20752 static bool rack_mod_inited = false; 20753 20754 static int 20755 tcp_addrack(module_t mod, int32_t type, void *data) 20756 { 20757 int32_t err = 0; 20758 int num_stacks; 20759 20760 switch (type) { 20761 case MOD_LOAD: 20762 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20763 sizeof(struct rack_sendmap), 20764 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20765 20766 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20767 sizeof(struct tcp_rack), 20768 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20769 20770 sysctl_ctx_init(&rack_sysctl_ctx); 20771 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20772 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20773 OID_AUTO, 20774 #ifdef STACKALIAS 20775 __XSTRING(STACKALIAS), 20776 #else 20777 __XSTRING(STACKNAME), 20778 #endif 20779 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20780 ""); 20781 if (rack_sysctl_root == NULL) { 20782 printf("Failed to add sysctl node\n"); 20783 err = EFAULT; 20784 goto free_uma; 20785 } 20786 rack_init_sysctls(); 20787 num_stacks = nitems(rack_stack_names); 20788 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20789 rack_stack_names, &num_stacks); 20790 if (err) { 20791 printf("Failed to register %s stack name for " 20792 "%s module\n", rack_stack_names[num_stacks], 20793 __XSTRING(MODNAME)); 20794 sysctl_ctx_free(&rack_sysctl_ctx); 20795 free_uma: 20796 uma_zdestroy(rack_zone); 20797 uma_zdestroy(rack_pcb_zone); 20798 rack_counter_destroy(); 20799 printf("Failed to register rack module -- err:%d\n", err); 20800 return (err); 20801 } 20802 tcp_lro_reg_mbufq(); 20803 rack_mod_inited = true; 20804 break; 20805 case MOD_QUIESCE: 20806 err = deregister_tcp_functions(&__tcp_rack, true, false); 20807 break; 20808 case MOD_UNLOAD: 20809 err = deregister_tcp_functions(&__tcp_rack, false, true); 20810 if (err == EBUSY) 20811 break; 20812 if (rack_mod_inited) { 20813 uma_zdestroy(rack_zone); 20814 uma_zdestroy(rack_pcb_zone); 20815 sysctl_ctx_free(&rack_sysctl_ctx); 20816 rack_counter_destroy(); 20817 rack_mod_inited = false; 20818 } 20819 tcp_lro_dereg_mbufq(); 20820 err = 0; 20821 break; 20822 default: 20823 return (EOPNOTSUPP); 20824 } 20825 return (err); 20826 } 20827 20828 static moduledata_t tcp_rack = { 20829 .name = __XSTRING(MODNAME), 20830 .evhand = tcp_addrack, 20831 .priv = 0 20832 }; 20833 20834 MODULE_VERSION(MODNAME, 1); 20835 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20836 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20837