1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCPDEBUG 109 #include <netinet/tcp_debug.h> 110 #endif /* TCPDEBUG */ 111 #ifdef TCP_OFFLOAD 112 #include <netinet/tcp_offload.h> 113 #endif 114 #ifdef INET6 115 #include <netinet6/tcp6_var.h> 116 #endif 117 #include <netinet/tcp_ecn.h> 118 119 #include <netipsec/ipsec_support.h> 120 121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 122 #include <netipsec/ipsec.h> 123 #include <netipsec/ipsec6.h> 124 #endif /* IPSEC */ 125 126 #include <netinet/udp.h> 127 #include <netinet/udp_var.h> 128 #include <machine/in_cksum.h> 129 130 #ifdef MAC 131 #include <security/mac/mac_framework.h> 132 #endif 133 #include "sack_filter.h" 134 #include "tcp_rack.h" 135 #include "rack_bbr_common.h" 136 137 uma_zone_t rack_zone; 138 uma_zone_t rack_pcb_zone; 139 140 #ifndef TICKS2SBT 141 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 142 #endif 143 144 VNET_DECLARE(uint32_t, newreno_beta); 145 VNET_DECLARE(uint32_t, newreno_beta_ecn); 146 #define V_newreno_beta VNET(newreno_beta) 147 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 148 149 150 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 151 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 152 153 struct sysctl_ctx_list rack_sysctl_ctx; 154 struct sysctl_oid *rack_sysctl_root; 155 156 #define CUM_ACKED 1 157 #define SACKED 2 158 159 /* 160 * The RACK module incorporates a number of 161 * TCP ideas that have been put out into the IETF 162 * over the last few years: 163 * - Matt Mathis's Rate Halving which slowly drops 164 * the congestion window so that the ack clock can 165 * be maintained during a recovery. 166 * - Yuchung Cheng's RACK TCP (for which its named) that 167 * will stop us using the number of dup acks and instead 168 * use time as the gage of when we retransmit. 169 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 170 * of Dukkipati et.al. 171 * RACK depends on SACK, so if an endpoint arrives that 172 * cannot do SACK the state machine below will shuttle the 173 * connection back to using the "default" TCP stack that is 174 * in FreeBSD. 175 * 176 * To implement RACK the original TCP stack was first decomposed 177 * into a functional state machine with individual states 178 * for each of the possible TCP connection states. The do_segment 179 * functions role in life is to mandate the connection supports SACK 180 * initially and then assure that the RACK state matches the conenction 181 * state before calling the states do_segment function. Each 182 * state is simplified due to the fact that the original do_segment 183 * has been decomposed and we *know* what state we are in (no 184 * switches on the state) and all tests for SACK are gone. This 185 * greatly simplifies what each state does. 186 * 187 * TCP output is also over-written with a new version since it 188 * must maintain the new rack scoreboard. 189 * 190 */ 191 static int32_t rack_tlp_thresh = 1; 192 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 193 static int32_t rack_tlp_use_greater = 1; 194 static int32_t rack_reorder_thresh = 2; 195 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 196 * - 60 seconds */ 197 static uint8_t rack_req_measurements = 1; 198 /* Attack threshold detections */ 199 static uint32_t rack_highest_sack_thresh_seen = 0; 200 static uint32_t rack_highest_move_thresh_seen = 0; 201 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 202 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 203 static int32_t rack_hw_rate_caps = 1; /* 1; */ 204 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 205 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 206 static int32_t rack_hw_up_only = 1; 207 static int32_t rack_stats_gets_ms_rtt = 1; 208 static int32_t rack_prr_addbackmax = 2; 209 static int32_t rack_do_hystart = 0; 210 static int32_t rack_apply_rtt_with_reduced_conf = 0; 211 212 static int32_t rack_pkt_delay = 1000; 213 static int32_t rack_send_a_lot_in_prr = 1; 214 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 215 static int32_t rack_verbose_logging = 0; 216 static int32_t rack_ignore_data_after_close = 1; 217 static int32_t rack_enable_shared_cwnd = 1; 218 static int32_t rack_use_cmp_acks = 1; 219 static int32_t rack_use_fsb = 1; 220 static int32_t rack_use_rfo = 1; 221 static int32_t rack_use_rsm_rfo = 1; 222 static int32_t rack_max_abc_post_recovery = 2; 223 static int32_t rack_client_low_buf = 0; 224 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 225 #ifdef TCP_ACCOUNTING 226 static int32_t rack_tcp_accounting = 0; 227 #endif 228 static int32_t rack_limits_scwnd = 1; 229 static int32_t rack_enable_mqueue_for_nonpaced = 0; 230 static int32_t rack_disable_prr = 0; 231 static int32_t use_rack_rr = 1; 232 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 233 static int32_t rack_persist_min = 250000; /* 250usec */ 234 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 235 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 236 static int32_t rack_default_init_window = 0; /* Use system default */ 237 static int32_t rack_limit_time_with_srtt = 0; 238 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 239 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 240 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 241 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 242 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 243 244 /* 245 * Currently regular tcp has a rto_min of 30ms 246 * the backoff goes 12 times so that ends up 247 * being a total of 122.850 seconds before a 248 * connection is killed. 249 */ 250 static uint32_t rack_def_data_window = 20; 251 static uint32_t rack_goal_bdp = 2; 252 static uint32_t rack_min_srtts = 1; 253 static uint32_t rack_min_measure_usec = 0; 254 static int32_t rack_tlp_min = 10000; /* 10ms */ 255 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 256 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 257 static const int32_t rack_free_cache = 2; 258 static int32_t rack_hptsi_segments = 40; 259 static int32_t rack_rate_sample_method = USE_RTT_LOW; 260 static int32_t rack_pace_every_seg = 0; 261 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 262 static int32_t rack_slot_reduction = 4; 263 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 264 static int32_t rack_cwnd_block_ends_measure = 0; 265 static int32_t rack_rwnd_block_ends_measure = 0; 266 static int32_t rack_def_profile = 0; 267 268 static int32_t rack_lower_cwnd_at_tlp = 0; 269 static int32_t rack_limited_retran = 0; 270 static int32_t rack_always_send_oldest = 0; 271 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 272 273 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 274 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 275 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 276 277 /* Probertt */ 278 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 279 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 280 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 281 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 282 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 283 284 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 285 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 286 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 287 static uint32_t rack_probertt_use_min_rtt_exit = 0; 288 static uint32_t rack_probe_rtt_sets_cwnd = 0; 289 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 290 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 291 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 292 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 293 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 294 static uint32_t rack_probertt_filter_life = 10000000; 295 static uint32_t rack_probertt_lower_within = 10; 296 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 297 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 298 static int32_t rack_probertt_clear_is = 1; 299 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 300 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 301 302 /* Part of pacing */ 303 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 304 305 /* Timely information */ 306 /* Combine these two gives the range of 'no change' to bw */ 307 /* ie the up/down provide the upper and lower bound */ 308 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 309 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 310 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 311 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 312 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 313 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */ 314 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 315 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 316 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 317 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 318 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 319 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 320 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 321 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 322 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 323 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 324 static int32_t rack_use_max_for_nobackoff = 0; 325 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 326 static int32_t rack_timely_no_stopping = 0; 327 static int32_t rack_down_raise_thresh = 100; 328 static int32_t rack_req_segs = 1; 329 static uint64_t rack_bw_rate_cap = 0; 330 static uint32_t rack_trace_point_config = 0; 331 static uint32_t rack_trace_point_bb_mode = 4; 332 static int32_t rack_trace_point_count = 0; 333 334 335 /* Weird delayed ack mode */ 336 static int32_t rack_use_imac_dack = 0; 337 /* Rack specific counters */ 338 counter_u64_t rack_saw_enobuf; 339 counter_u64_t rack_saw_enobuf_hw; 340 counter_u64_t rack_saw_enetunreach; 341 counter_u64_t rack_persists_sends; 342 counter_u64_t rack_persists_acks; 343 counter_u64_t rack_persists_loss; 344 counter_u64_t rack_persists_lost_ends; 345 #ifdef INVARIANTS 346 counter_u64_t rack_adjust_map_bw; 347 #endif 348 /* Tail loss probe counters */ 349 counter_u64_t rack_tlp_tot; 350 counter_u64_t rack_tlp_newdata; 351 counter_u64_t rack_tlp_retran; 352 counter_u64_t rack_tlp_retran_bytes; 353 counter_u64_t rack_to_tot; 354 counter_u64_t rack_hot_alloc; 355 counter_u64_t rack_to_alloc; 356 counter_u64_t rack_to_alloc_hard; 357 counter_u64_t rack_to_alloc_emerg; 358 counter_u64_t rack_to_alloc_limited; 359 counter_u64_t rack_alloc_limited_conns; 360 counter_u64_t rack_split_limited; 361 362 counter_u64_t rack_multi_single_eq; 363 counter_u64_t rack_proc_non_comp_ack; 364 365 counter_u64_t rack_fto_send; 366 counter_u64_t rack_fto_rsm_send; 367 counter_u64_t rack_nfto_resend; 368 counter_u64_t rack_non_fto_send; 369 counter_u64_t rack_extended_rfo; 370 371 counter_u64_t rack_sack_proc_all; 372 counter_u64_t rack_sack_proc_short; 373 counter_u64_t rack_sack_proc_restart; 374 counter_u64_t rack_sack_attacks_detected; 375 counter_u64_t rack_sack_attacks_reversed; 376 counter_u64_t rack_sack_used_next_merge; 377 counter_u64_t rack_sack_splits; 378 counter_u64_t rack_sack_used_prev_merge; 379 counter_u64_t rack_sack_skipped_acked; 380 counter_u64_t rack_ack_total; 381 counter_u64_t rack_express_sack; 382 counter_u64_t rack_sack_total; 383 counter_u64_t rack_move_none; 384 counter_u64_t rack_move_some; 385 386 counter_u64_t rack_input_idle_reduces; 387 counter_u64_t rack_collapsed_win; 388 counter_u64_t rack_collapsed_win_seen; 389 counter_u64_t rack_collapsed_win_rxt; 390 counter_u64_t rack_collapsed_win_rxt_bytes; 391 counter_u64_t rack_try_scwnd; 392 counter_u64_t rack_hw_pace_init_fail; 393 counter_u64_t rack_hw_pace_lost; 394 395 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 396 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 397 398 399 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 400 401 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 402 (tv) = (value) + slop; \ 403 if ((u_long)(tv) < (u_long)(tvmin)) \ 404 (tv) = (tvmin); \ 405 if ((u_long)(tv) > (u_long)(tvmax)) \ 406 (tv) = (tvmax); \ 407 } while (0) 408 409 static void 410 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 411 412 static int 413 rack_process_ack(struct mbuf *m, struct tcphdr *th, 414 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 415 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 416 static int 417 rack_process_data(struct mbuf *m, struct tcphdr *th, 418 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 419 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 420 static void 421 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 422 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 423 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 424 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 425 uint8_t limit_type); 426 static struct rack_sendmap * 427 rack_check_recovery_mode(struct tcpcb *tp, 428 uint32_t tsused); 429 static void 430 rack_cong_signal(struct tcpcb *tp, 431 uint32_t type, uint32_t ack, int ); 432 static void rack_counter_destroy(void); 433 static int 434 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 435 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 436 static void 437 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 438 static void 439 rack_do_segment(struct mbuf *m, struct tcphdr *th, 440 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 441 uint8_t iptos); 442 static void rack_dtor(void *mem, int32_t size, void *arg); 443 static void 444 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 445 uint32_t flex1, uint32_t flex2, 446 uint32_t flex3, uint32_t flex4, 447 uint32_t flex5, uint32_t flex6, 448 uint16_t flex7, uint8_t mod); 449 450 static void 451 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 452 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 453 struct rack_sendmap *rsm, uint8_t quality); 454 static struct rack_sendmap * 455 rack_find_high_nonack(struct tcp_rack *rack, 456 struct rack_sendmap *rsm); 457 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 458 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 459 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 460 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 461 static void 462 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 463 tcp_seq th_ack, int line, uint8_t quality); 464 static uint32_t 465 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 466 static int32_t rack_handoff_ok(struct tcpcb *tp); 467 static int32_t rack_init(struct tcpcb *tp); 468 static void rack_init_sysctls(void); 469 static void 470 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 471 struct tcphdr *th, int entered_rec, int dup_ack_struck); 472 static void 473 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 474 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 475 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 476 477 static void 478 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 479 struct rack_sendmap *rsm); 480 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 481 static int32_t rack_output(struct tcpcb *tp); 482 483 static uint32_t 484 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 485 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 486 uint32_t cts, int *moved_two); 487 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 488 static void rack_remxt_tmr(struct tcpcb *tp); 489 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 490 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 491 static int32_t rack_stopall(struct tcpcb *tp); 492 static void 493 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 494 uint32_t delta); 495 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 496 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 497 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 498 static uint32_t 499 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 500 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 501 static void 502 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 503 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 504 static int 505 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 506 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 507 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 508 static int 509 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 510 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 511 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 512 static int 513 rack_do_closing(struct mbuf *m, struct tcphdr *th, 514 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 515 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 516 static int 517 rack_do_established(struct mbuf *m, struct tcphdr *th, 518 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 519 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 520 static int 521 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 522 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 523 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 524 static int 525 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 526 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 527 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 528 static int 529 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 532 static int 533 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 536 static int 537 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 538 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 539 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 540 static int 541 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 542 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 543 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 544 struct rack_sendmap * 545 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 546 uint32_t tsused); 547 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 548 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 549 static void 550 tcp_rack_partialack(struct tcpcb *tp); 551 static int 552 rack_set_profile(struct tcp_rack *rack, int prof); 553 static void 554 rack_apply_deferred_options(struct tcp_rack *rack); 555 556 int32_t rack_clear_counter=0; 557 558 static inline void 559 rack_trace_point(struct tcp_rack *rack, int num) 560 { 561 if (((rack_trace_point_config == num) || 562 (rack_trace_point_config = 0xffffffff)) && 563 (rack_trace_point_bb_mode != 0) && 564 (rack_trace_point_count > 0) && 565 (rack->rc_tp->t_logstate == 0)) { 566 int res; 567 res = atomic_fetchadd_int(&rack_trace_point_count, -1); 568 if (res > 0) { 569 rack->rc_tp->t_logstate = rack_trace_point_bb_mode; 570 } else { 571 /* Loss a race assure its zero now */ 572 rack_trace_point_count = 0; 573 } 574 } 575 } 576 577 static void 578 rack_set_cc_pacing(struct tcp_rack *rack) 579 { 580 struct sockopt sopt; 581 struct cc_newreno_opts opt; 582 struct newreno old, *ptr; 583 struct tcpcb *tp; 584 int error; 585 586 if (rack->rc_pacing_cc_set) 587 return; 588 589 tp = rack->rc_tp; 590 if (tp->cc_algo == NULL) { 591 /* Tcb is leaving */ 592 return; 593 } 594 rack->rc_pacing_cc_set = 1; 595 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 596 /* Not new-reno we can't play games with beta! */ 597 goto out; 598 } 599 ptr = ((struct newreno *)tp->ccv->cc_data); 600 if (CC_ALGO(tp)->ctl_output == NULL) { 601 /* Huh, why does new_reno no longer have a set function? */ 602 goto out; 603 } 604 if (ptr == NULL) { 605 /* Just the default values */ 606 old.beta = V_newreno_beta_ecn; 607 old.beta_ecn = V_newreno_beta_ecn; 608 old.newreno_flags = 0; 609 } else { 610 old.beta = ptr->beta; 611 old.beta_ecn = ptr->beta_ecn; 612 old.newreno_flags = ptr->newreno_flags; 613 } 614 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 615 sopt.sopt_dir = SOPT_SET; 616 opt.name = CC_NEWRENO_BETA; 617 opt.val = rack->r_ctl.rc_saved_beta.beta; 618 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 619 if (error) { 620 goto out; 621 } 622 /* 623 * Hack alert we need to set in our newreno_flags 624 * so that Abe behavior is also applied. 625 */ 626 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 627 opt.name = CC_NEWRENO_BETA_ECN; 628 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 629 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 630 if (error) { 631 goto out; 632 } 633 /* Save off the original values for restoral */ 634 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 635 out: 636 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 637 union tcp_log_stackspecific log; 638 struct timeval tv; 639 640 ptr = ((struct newreno *)tp->ccv->cc_data); 641 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 642 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 643 if (ptr) { 644 log.u_bbr.flex1 = ptr->beta; 645 log.u_bbr.flex2 = ptr->beta_ecn; 646 log.u_bbr.flex3 = ptr->newreno_flags; 647 } 648 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 649 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 650 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 651 log.u_bbr.flex7 = rack->gp_ready; 652 log.u_bbr.flex7 <<= 1; 653 log.u_bbr.flex7 |= rack->use_fixed_rate; 654 log.u_bbr.flex7 <<= 1; 655 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 656 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 657 log.u_bbr.flex8 = 3; 658 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 659 0, &log, false, NULL, NULL, 0, &tv); 660 } 661 } 662 663 static void 664 rack_undo_cc_pacing(struct tcp_rack *rack) 665 { 666 struct newreno old, *ptr; 667 struct tcpcb *tp; 668 669 if (rack->rc_pacing_cc_set == 0) 670 return; 671 tp = rack->rc_tp; 672 rack->rc_pacing_cc_set = 0; 673 if (tp->cc_algo == NULL) 674 /* Tcb is leaving */ 675 return; 676 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 677 /* Not new-reno nothing to do! */ 678 return; 679 } 680 ptr = ((struct newreno *)tp->ccv->cc_data); 681 if (ptr == NULL) { 682 /* 683 * This happens at rack_fini() if the 684 * cc module gets freed on us. In that 685 * case we loose our "new" settings but 686 * thats ok, since the tcb is going away anyway. 687 */ 688 return; 689 } 690 /* Grab out our set values */ 691 memcpy(&old, ptr, sizeof(struct newreno)); 692 /* Copy back in the original values */ 693 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 694 /* Now save back the values we had set in (for when pacing is restored) */ 695 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 696 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 697 union tcp_log_stackspecific log; 698 struct timeval tv; 699 700 ptr = ((struct newreno *)tp->ccv->cc_data); 701 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 702 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 703 log.u_bbr.flex1 = ptr->beta; 704 log.u_bbr.flex2 = ptr->beta_ecn; 705 log.u_bbr.flex3 = ptr->newreno_flags; 706 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 707 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 708 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 709 log.u_bbr.flex7 = rack->gp_ready; 710 log.u_bbr.flex7 <<= 1; 711 log.u_bbr.flex7 |= rack->use_fixed_rate; 712 log.u_bbr.flex7 <<= 1; 713 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 714 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 715 log.u_bbr.flex8 = 4; 716 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 717 0, &log, false, NULL, NULL, 0, &tv); 718 } 719 } 720 721 #ifdef NETFLIX_PEAKRATE 722 static inline void 723 rack_update_peakrate_thr(struct tcpcb *tp) 724 { 725 /* Keep in mind that t_maxpeakrate is in B/s. */ 726 uint64_t peak; 727 peak = uqmax((tp->t_maxseg * 2), 728 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 729 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 730 } 731 #endif 732 733 static int 734 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 735 { 736 uint32_t stat; 737 int32_t error; 738 739 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 740 if (error || req->newptr == NULL) 741 return error; 742 743 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 744 if (error) 745 return (error); 746 if (stat == 1) { 747 #ifdef INVARIANTS 748 printf("Clearing RACK counters\n"); 749 #endif 750 counter_u64_zero(rack_tlp_tot); 751 counter_u64_zero(rack_tlp_newdata); 752 counter_u64_zero(rack_tlp_retran); 753 counter_u64_zero(rack_tlp_retran_bytes); 754 counter_u64_zero(rack_to_tot); 755 counter_u64_zero(rack_saw_enobuf); 756 counter_u64_zero(rack_saw_enobuf_hw); 757 counter_u64_zero(rack_saw_enetunreach); 758 counter_u64_zero(rack_persists_sends); 759 counter_u64_zero(rack_persists_acks); 760 counter_u64_zero(rack_persists_loss); 761 counter_u64_zero(rack_persists_lost_ends); 762 #ifdef INVARIANTS 763 counter_u64_zero(rack_adjust_map_bw); 764 #endif 765 counter_u64_zero(rack_to_alloc_hard); 766 counter_u64_zero(rack_to_alloc_emerg); 767 counter_u64_zero(rack_sack_proc_all); 768 counter_u64_zero(rack_fto_send); 769 counter_u64_zero(rack_fto_rsm_send); 770 counter_u64_zero(rack_extended_rfo); 771 counter_u64_zero(rack_hw_pace_init_fail); 772 counter_u64_zero(rack_hw_pace_lost); 773 counter_u64_zero(rack_non_fto_send); 774 counter_u64_zero(rack_nfto_resend); 775 counter_u64_zero(rack_sack_proc_short); 776 counter_u64_zero(rack_sack_proc_restart); 777 counter_u64_zero(rack_to_alloc); 778 counter_u64_zero(rack_to_alloc_limited); 779 counter_u64_zero(rack_alloc_limited_conns); 780 counter_u64_zero(rack_split_limited); 781 counter_u64_zero(rack_multi_single_eq); 782 counter_u64_zero(rack_proc_non_comp_ack); 783 counter_u64_zero(rack_sack_attacks_detected); 784 counter_u64_zero(rack_sack_attacks_reversed); 785 counter_u64_zero(rack_sack_used_next_merge); 786 counter_u64_zero(rack_sack_used_prev_merge); 787 counter_u64_zero(rack_sack_splits); 788 counter_u64_zero(rack_sack_skipped_acked); 789 counter_u64_zero(rack_ack_total); 790 counter_u64_zero(rack_express_sack); 791 counter_u64_zero(rack_sack_total); 792 counter_u64_zero(rack_move_none); 793 counter_u64_zero(rack_move_some); 794 counter_u64_zero(rack_try_scwnd); 795 counter_u64_zero(rack_collapsed_win); 796 counter_u64_zero(rack_collapsed_win_rxt); 797 counter_u64_zero(rack_collapsed_win_seen); 798 counter_u64_zero(rack_collapsed_win_rxt_bytes); 799 } 800 rack_clear_counter = 0; 801 return (0); 802 } 803 804 static void 805 rack_init_sysctls(void) 806 { 807 struct sysctl_oid *rack_counters; 808 struct sysctl_oid *rack_attack; 809 struct sysctl_oid *rack_pacing; 810 struct sysctl_oid *rack_timely; 811 struct sysctl_oid *rack_timers; 812 struct sysctl_oid *rack_tlp; 813 struct sysctl_oid *rack_misc; 814 struct sysctl_oid *rack_features; 815 struct sysctl_oid *rack_measure; 816 struct sysctl_oid *rack_probertt; 817 struct sysctl_oid *rack_hw_pacing; 818 struct sysctl_oid *rack_tracepoint; 819 820 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 821 SYSCTL_CHILDREN(rack_sysctl_root), 822 OID_AUTO, 823 "sack_attack", 824 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 825 "Rack Sack Attack Counters and Controls"); 826 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 827 SYSCTL_CHILDREN(rack_sysctl_root), 828 OID_AUTO, 829 "stats", 830 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 831 "Rack Counters"); 832 SYSCTL_ADD_S32(&rack_sysctl_ctx, 833 SYSCTL_CHILDREN(rack_sysctl_root), 834 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 835 &rack_rate_sample_method , USE_RTT_LOW, 836 "What method should we use for rate sampling 0=high, 1=low "); 837 /* Probe rtt related controls */ 838 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 839 SYSCTL_CHILDREN(rack_sysctl_root), 840 OID_AUTO, 841 "probertt", 842 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 843 "ProbeRTT related Controls"); 844 SYSCTL_ADD_U16(&rack_sysctl_ctx, 845 SYSCTL_CHILDREN(rack_probertt), 846 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 847 &rack_atexit_prtt_hbp, 130, 848 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 849 SYSCTL_ADD_U16(&rack_sysctl_ctx, 850 SYSCTL_CHILDREN(rack_probertt), 851 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 852 &rack_atexit_prtt, 130, 853 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 854 SYSCTL_ADD_U16(&rack_sysctl_ctx, 855 SYSCTL_CHILDREN(rack_probertt), 856 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 857 &rack_per_of_gp_probertt, 60, 858 "What percentage of goodput do we pace at in probertt"); 859 SYSCTL_ADD_U16(&rack_sysctl_ctx, 860 SYSCTL_CHILDREN(rack_probertt), 861 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 862 &rack_per_of_gp_probertt_reduce, 10, 863 "What percentage of goodput do we reduce every gp_srtt"); 864 SYSCTL_ADD_U16(&rack_sysctl_ctx, 865 SYSCTL_CHILDREN(rack_probertt), 866 OID_AUTO, "gp_per_low", CTLFLAG_RW, 867 &rack_per_of_gp_lowthresh, 40, 868 "What percentage of goodput do we allow the multiplier to fall to"); 869 SYSCTL_ADD_U32(&rack_sysctl_ctx, 870 SYSCTL_CHILDREN(rack_probertt), 871 OID_AUTO, "time_between", CTLFLAG_RW, 872 & rack_time_between_probertt, 96000000, 873 "How many useconds between the lowest rtt falling must past before we enter probertt"); 874 SYSCTL_ADD_U32(&rack_sysctl_ctx, 875 SYSCTL_CHILDREN(rack_probertt), 876 OID_AUTO, "safety", CTLFLAG_RW, 877 &rack_probe_rtt_safety_val, 2000000, 878 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 879 SYSCTL_ADD_U32(&rack_sysctl_ctx, 880 SYSCTL_CHILDREN(rack_probertt), 881 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 882 &rack_probe_rtt_sets_cwnd, 0, 883 "Do we set the cwnd too (if always_lower is on)"); 884 SYSCTL_ADD_U32(&rack_sysctl_ctx, 885 SYSCTL_CHILDREN(rack_probertt), 886 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 887 &rack_max_drain_wait, 2, 888 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 889 SYSCTL_ADD_U32(&rack_sysctl_ctx, 890 SYSCTL_CHILDREN(rack_probertt), 891 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 892 &rack_must_drain, 1, 893 "We must drain this many gp_srtt's waiting for flight to reach goal"); 894 SYSCTL_ADD_U32(&rack_sysctl_ctx, 895 SYSCTL_CHILDREN(rack_probertt), 896 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 897 &rack_probertt_use_min_rtt_entry, 1, 898 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 899 SYSCTL_ADD_U32(&rack_sysctl_ctx, 900 SYSCTL_CHILDREN(rack_probertt), 901 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 902 &rack_probertt_use_min_rtt_exit, 0, 903 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 904 SYSCTL_ADD_U32(&rack_sysctl_ctx, 905 SYSCTL_CHILDREN(rack_probertt), 906 OID_AUTO, "length_div", CTLFLAG_RW, 907 &rack_probertt_gpsrtt_cnt_div, 0, 908 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 909 SYSCTL_ADD_U32(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_probertt), 911 OID_AUTO, "length_mul", CTLFLAG_RW, 912 &rack_probertt_gpsrtt_cnt_mul, 0, 913 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 914 SYSCTL_ADD_U32(&rack_sysctl_ctx, 915 SYSCTL_CHILDREN(rack_probertt), 916 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 917 &rack_min_probertt_hold, 200000, 918 "What is the minimum time we hold probertt at target"); 919 SYSCTL_ADD_U32(&rack_sysctl_ctx, 920 SYSCTL_CHILDREN(rack_probertt), 921 OID_AUTO, "filter_life", CTLFLAG_RW, 922 &rack_probertt_filter_life, 10000000, 923 "What is the time for the filters life in useconds"); 924 SYSCTL_ADD_U32(&rack_sysctl_ctx, 925 SYSCTL_CHILDREN(rack_probertt), 926 OID_AUTO, "lower_within", CTLFLAG_RW, 927 &rack_probertt_lower_within, 10, 928 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 929 SYSCTL_ADD_U32(&rack_sysctl_ctx, 930 SYSCTL_CHILDREN(rack_probertt), 931 OID_AUTO, "must_move", CTLFLAG_RW, 932 &rack_min_rtt_movement, 250, 933 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 934 SYSCTL_ADD_U32(&rack_sysctl_ctx, 935 SYSCTL_CHILDREN(rack_probertt), 936 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 937 &rack_probertt_clear_is, 1, 938 "Do we clear I/S counts on exiting probe-rtt"); 939 SYSCTL_ADD_S32(&rack_sysctl_ctx, 940 SYSCTL_CHILDREN(rack_probertt), 941 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 942 &rack_max_drain_hbp, 1, 943 "How many extra drain gpsrtt's do we get in highly buffered paths"); 944 SYSCTL_ADD_S32(&rack_sysctl_ctx, 945 SYSCTL_CHILDREN(rack_probertt), 946 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 947 &rack_hbp_thresh, 3, 948 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 949 950 rack_tracepoint = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_sysctl_root), 952 OID_AUTO, 953 "tp", 954 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 955 "Rack tracepoint facility"); 956 SYSCTL_ADD_U32(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_tracepoint), 958 OID_AUTO, "number", CTLFLAG_RW, 959 &rack_trace_point_config, 0, 960 "What is the trace point number to activate (0=none, 0xffffffff = all)?"); 961 SYSCTL_ADD_U32(&rack_sysctl_ctx, 962 SYSCTL_CHILDREN(rack_tracepoint), 963 OID_AUTO, "bbmode", CTLFLAG_RW, 964 &rack_trace_point_bb_mode, 4, 965 "What is BB logging mode that is activated?"); 966 SYSCTL_ADD_S32(&rack_sysctl_ctx, 967 SYSCTL_CHILDREN(rack_tracepoint), 968 OID_AUTO, "count", CTLFLAG_RW, 969 &rack_trace_point_count, 0, 970 "How many connections will have BB logging turned on that hit the tracepoint?"); 971 /* Pacing related sysctls */ 972 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_sysctl_root), 974 OID_AUTO, 975 "pacing", 976 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 977 "Pacing related Controls"); 978 SYSCTL_ADD_S32(&rack_sysctl_ctx, 979 SYSCTL_CHILDREN(rack_pacing), 980 OID_AUTO, "max_pace_over", CTLFLAG_RW, 981 &rack_max_per_above, 30, 982 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 983 SYSCTL_ADD_S32(&rack_sysctl_ctx, 984 SYSCTL_CHILDREN(rack_pacing), 985 OID_AUTO, "pace_to_one", CTLFLAG_RW, 986 &rack_pace_one_seg, 0, 987 "Do we allow low b/w pacing of 1MSS instead of two"); 988 SYSCTL_ADD_S32(&rack_sysctl_ctx, 989 SYSCTL_CHILDREN(rack_pacing), 990 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 991 &rack_limit_time_with_srtt, 0, 992 "Do we limit pacing time based on srtt"); 993 SYSCTL_ADD_S32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_pacing), 995 OID_AUTO, "init_win", CTLFLAG_RW, 996 &rack_default_init_window, 0, 997 "Do we have a rack initial window 0 = system default"); 998 SYSCTL_ADD_U16(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_pacing), 1000 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1001 &rack_per_of_gp_ss, 250, 1002 "If non zero, what percentage of goodput to pace at in slow start"); 1003 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_pacing), 1005 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1006 &rack_per_of_gp_ca, 150, 1007 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1008 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_pacing), 1010 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1011 &rack_per_of_gp_rec, 200, 1012 "If non zero, what percentage of goodput to pace at in recovery"); 1013 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_pacing), 1015 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1016 &rack_hptsi_segments, 40, 1017 "What size is the max for TSO segments in pacing and burst mitigation"); 1018 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_pacing), 1020 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1021 &rack_slot_reduction, 4, 1022 "When doing only burst mitigation what is the reduce divisor"); 1023 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1024 SYSCTL_CHILDREN(rack_sysctl_root), 1025 OID_AUTO, "use_pacing", CTLFLAG_RW, 1026 &rack_pace_every_seg, 0, 1027 "If set we use pacing, if clear we use only the original burst mitigation"); 1028 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1029 SYSCTL_CHILDREN(rack_pacing), 1030 OID_AUTO, "rate_cap", CTLFLAG_RW, 1031 &rack_bw_rate_cap, 0, 1032 "If set we apply this value to the absolute rate cap used by pacing"); 1033 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_sysctl_root), 1035 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1036 &rack_req_measurements, 1, 1037 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1038 /* Hardware pacing */ 1039 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_sysctl_root), 1041 OID_AUTO, 1042 "hdwr_pacing", 1043 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1044 "Pacing related Controls"); 1045 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1046 SYSCTL_CHILDREN(rack_hw_pacing), 1047 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1048 &rack_hw_rwnd_factor, 2, 1049 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_hw_pacing), 1052 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1053 &rack_enobuf_hw_boost_mult, 2, 1054 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_hw_pacing), 1057 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1058 &rack_enobuf_hw_max, 2, 1059 "What is the max boost the pacing time if we see a ENOBUFS?"); 1060 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_hw_pacing), 1062 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1063 &rack_enobuf_hw_min, 2, 1064 "What is the min boost the pacing time if we see a ENOBUFS?"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_hw_pacing), 1067 OID_AUTO, "enable", CTLFLAG_RW, 1068 &rack_enable_hw_pacing, 0, 1069 "Should RACK attempt to use hw pacing?"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_hw_pacing), 1072 OID_AUTO, "rate_cap", CTLFLAG_RW, 1073 &rack_hw_rate_caps, 1, 1074 "Does the highest hardware pacing rate cap the rate we will send at??"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_hw_pacing), 1077 OID_AUTO, "rate_min", CTLFLAG_RW, 1078 &rack_hw_rate_min, 0, 1079 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_hw_pacing), 1082 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1083 &rack_hw_rate_to_low, 0, 1084 "If we fall below this rate, dis-engage hw pacing?"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_hw_pacing), 1087 OID_AUTO, "up_only", CTLFLAG_RW, 1088 &rack_hw_up_only, 1, 1089 "Do we allow hw pacing to lower the rate selected?"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_hw_pacing), 1092 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1093 &rack_hw_pace_extra_slots, 2, 1094 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1095 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_sysctl_root), 1097 OID_AUTO, 1098 "timely", 1099 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1100 "Rack Timely RTT Controls"); 1101 /* Timely based GP dynmics */ 1102 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1103 SYSCTL_CHILDREN(rack_timely), 1104 OID_AUTO, "upper", CTLFLAG_RW, 1105 &rack_gp_per_bw_mul_up, 2, 1106 "Rack timely upper range for equal b/w (in percentage)"); 1107 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1108 SYSCTL_CHILDREN(rack_timely), 1109 OID_AUTO, "lower", CTLFLAG_RW, 1110 &rack_gp_per_bw_mul_down, 4, 1111 "Rack timely lower range for equal b/w (in percentage)"); 1112 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1113 SYSCTL_CHILDREN(rack_timely), 1114 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1115 &rack_gp_rtt_maxmul, 3, 1116 "Rack timely multiplier of lowest rtt for rtt_max"); 1117 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1118 SYSCTL_CHILDREN(rack_timely), 1119 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1120 &rack_gp_rtt_mindiv, 4, 1121 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1122 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1123 SYSCTL_CHILDREN(rack_timely), 1124 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1125 &rack_gp_rtt_minmul, 1, 1126 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1127 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_timely), 1129 OID_AUTO, "decrease", CTLFLAG_RW, 1130 &rack_gp_decrease_per, 20, 1131 "Rack timely decrease percentage of our GP multiplication factor"); 1132 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1133 SYSCTL_CHILDREN(rack_timely), 1134 OID_AUTO, "increase", CTLFLAG_RW, 1135 &rack_gp_increase_per, 2, 1136 "Rack timely increase perentage of our GP multiplication factor"); 1137 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1138 SYSCTL_CHILDREN(rack_timely), 1139 OID_AUTO, "lowerbound", CTLFLAG_RW, 1140 &rack_per_lower_bound, 50, 1141 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1142 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1143 SYSCTL_CHILDREN(rack_timely), 1144 OID_AUTO, "upperboundss", CTLFLAG_RW, 1145 &rack_per_upper_bound_ss, 0, 1146 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1147 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1148 SYSCTL_CHILDREN(rack_timely), 1149 OID_AUTO, "upperboundca", CTLFLAG_RW, 1150 &rack_per_upper_bound_ca, 0, 1151 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1152 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1153 SYSCTL_CHILDREN(rack_timely), 1154 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1155 &rack_do_dyn_mul, 0, 1156 "Rack timely do we enable dynmaic timely goodput by default"); 1157 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1158 SYSCTL_CHILDREN(rack_timely), 1159 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1160 &rack_gp_no_rec_chg, 1, 1161 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1162 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1163 SYSCTL_CHILDREN(rack_timely), 1164 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1165 &rack_timely_dec_clear, 6, 1166 "Rack timely what threshold do we count to before another boost during b/w decent"); 1167 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1168 SYSCTL_CHILDREN(rack_timely), 1169 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1170 &rack_timely_max_push_rise, 3, 1171 "Rack timely how many times do we push up with b/w increase"); 1172 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_timely), 1174 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1175 &rack_timely_max_push_drop, 3, 1176 "Rack timely how many times do we push back on b/w decent"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_timely), 1179 OID_AUTO, "min_segs", CTLFLAG_RW, 1180 &rack_timely_min_segs, 4, 1181 "Rack timely when setting the cwnd what is the min num segments"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_timely), 1184 OID_AUTO, "noback_max", CTLFLAG_RW, 1185 &rack_use_max_for_nobackoff, 0, 1186 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_timely), 1189 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1190 &rack_timely_int_timely_only, 0, 1191 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_timely), 1194 OID_AUTO, "nonstop", CTLFLAG_RW, 1195 &rack_timely_no_stopping, 0, 1196 "Rack timely don't stop increase"); 1197 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_timely), 1199 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1200 &rack_down_raise_thresh, 100, 1201 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1202 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1203 SYSCTL_CHILDREN(rack_timely), 1204 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1205 &rack_req_segs, 1, 1206 "Bottom dragging if not these many segments outstanding and room"); 1207 1208 /* TLP and Rack related parameters */ 1209 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1210 SYSCTL_CHILDREN(rack_sysctl_root), 1211 OID_AUTO, 1212 "tlp", 1213 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1214 "TLP and Rack related Controls"); 1215 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1216 SYSCTL_CHILDREN(rack_tlp), 1217 OID_AUTO, "use_rrr", CTLFLAG_RW, 1218 &use_rack_rr, 1, 1219 "Do we use Rack Rapid Recovery"); 1220 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1221 SYSCTL_CHILDREN(rack_tlp), 1222 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1223 &rack_max_abc_post_recovery, 2, 1224 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1225 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1226 SYSCTL_CHILDREN(rack_tlp), 1227 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1228 &rack_non_rxt_use_cr, 0, 1229 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1230 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1231 SYSCTL_CHILDREN(rack_tlp), 1232 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1233 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1234 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1235 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1236 SYSCTL_CHILDREN(rack_tlp), 1237 OID_AUTO, "limit", CTLFLAG_RW, 1238 &rack_tlp_limit, 2, 1239 "How many TLP's can be sent without sending new data"); 1240 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1241 SYSCTL_CHILDREN(rack_tlp), 1242 OID_AUTO, "use_greater", CTLFLAG_RW, 1243 &rack_tlp_use_greater, 1, 1244 "Should we use the rack_rtt time if its greater than srtt"); 1245 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1246 SYSCTL_CHILDREN(rack_tlp), 1247 OID_AUTO, "tlpminto", CTLFLAG_RW, 1248 &rack_tlp_min, 10000, 1249 "TLP minimum timeout per the specification (in microseconds)"); 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_tlp), 1252 OID_AUTO, "send_oldest", CTLFLAG_RW, 1253 &rack_always_send_oldest, 0, 1254 "Should we always send the oldest TLP and RACK-TLP"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_tlp), 1257 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1258 &rack_limited_retran, 0, 1259 "How many times can a rack timeout drive out sends"); 1260 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_tlp), 1262 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1263 &rack_lower_cwnd_at_tlp, 0, 1264 "When a TLP completes a retran should we enter recovery"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_tlp), 1267 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1268 &rack_reorder_thresh, 2, 1269 "What factor for rack will be added when seeing reordering (shift right)"); 1270 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1271 SYSCTL_CHILDREN(rack_tlp), 1272 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1273 &rack_tlp_thresh, 1, 1274 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1275 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1276 SYSCTL_CHILDREN(rack_tlp), 1277 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1278 &rack_reorder_fade, 60000000, 1279 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_tlp), 1282 OID_AUTO, "pktdelay", CTLFLAG_RW, 1283 &rack_pkt_delay, 1000, 1284 "Extra RACK time (in microseconds) besides reordering thresh"); 1285 1286 /* Timer related controls */ 1287 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_sysctl_root), 1289 OID_AUTO, 1290 "timers", 1291 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1292 "Timer related controls"); 1293 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1294 SYSCTL_CHILDREN(rack_timers), 1295 OID_AUTO, "persmin", CTLFLAG_RW, 1296 &rack_persist_min, 250000, 1297 "What is the minimum time in microseconds between persists"); 1298 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1299 SYSCTL_CHILDREN(rack_timers), 1300 OID_AUTO, "persmax", CTLFLAG_RW, 1301 &rack_persist_max, 2000000, 1302 "What is the largest delay in microseconds between persists"); 1303 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1304 SYSCTL_CHILDREN(rack_timers), 1305 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1306 &rack_delayed_ack_time, 40000, 1307 "Delayed ack time (40ms in microseconds)"); 1308 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1309 SYSCTL_CHILDREN(rack_timers), 1310 OID_AUTO, "minrto", CTLFLAG_RW, 1311 &rack_rto_min, 30000, 1312 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1313 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_timers), 1315 OID_AUTO, "maxrto", CTLFLAG_RW, 1316 &rack_rto_max, 4000000, 1317 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1318 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1319 SYSCTL_CHILDREN(rack_timers), 1320 OID_AUTO, "minto", CTLFLAG_RW, 1321 &rack_min_to, 1000, 1322 "Minimum rack timeout in microseconds"); 1323 /* Measure controls */ 1324 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_sysctl_root), 1326 OID_AUTO, 1327 "measure", 1328 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1329 "Measure related controls"); 1330 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1331 SYSCTL_CHILDREN(rack_measure), 1332 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1333 &rack_wma_divisor, 8, 1334 "When doing b/w calculation what is the divisor for the WMA"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_measure), 1337 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1338 &rack_cwnd_block_ends_measure, 0, 1339 "Does a cwnd just-return end the measurement window (app limited)"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_measure), 1342 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1343 &rack_rwnd_block_ends_measure, 0, 1344 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1345 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_measure), 1347 OID_AUTO, "min_target", CTLFLAG_RW, 1348 &rack_def_data_window, 20, 1349 "What is the minimum target window (in mss) for a GP measurements"); 1350 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_measure), 1352 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1353 &rack_goal_bdp, 2, 1354 "What is the goal BDP to measure"); 1355 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_measure), 1357 OID_AUTO, "min_srtts", CTLFLAG_RW, 1358 &rack_min_srtts, 1, 1359 "What is the goal BDP to measure"); 1360 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_measure), 1362 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1363 &rack_min_measure_usec, 0, 1364 "What is the Minimum time time for a measurement if 0, this is off"); 1365 /* Features */ 1366 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1367 SYSCTL_CHILDREN(rack_sysctl_root), 1368 OID_AUTO, 1369 "features", 1370 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1371 "Feature controls"); 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_features), 1374 OID_AUTO, "cmpack", CTLFLAG_RW, 1375 &rack_use_cmp_acks, 1, 1376 "Should RACK have LRO send compressed acks"); 1377 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_features), 1379 OID_AUTO, "fsb", CTLFLAG_RW, 1380 &rack_use_fsb, 1, 1381 "Should RACK use the fast send block?"); 1382 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1383 SYSCTL_CHILDREN(rack_features), 1384 OID_AUTO, "rfo", CTLFLAG_RW, 1385 &rack_use_rfo, 1, 1386 "Should RACK use rack_fast_output()?"); 1387 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1388 SYSCTL_CHILDREN(rack_features), 1389 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1390 &rack_use_rsm_rfo, 1, 1391 "Should RACK use rack_fast_rsm_output()?"); 1392 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1393 SYSCTL_CHILDREN(rack_features), 1394 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1395 &rack_enable_mqueue_for_nonpaced, 0, 1396 "Should RACK use mbuf queuing for non-paced connections"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_features), 1399 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1400 &rack_do_hystart, 0, 1401 "Should RACK enable HyStart++ on connections?"); 1402 /* Misc rack controls */ 1403 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1404 SYSCTL_CHILDREN(rack_sysctl_root), 1405 OID_AUTO, 1406 "misc", 1407 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1408 "Misc related controls"); 1409 #ifdef TCP_ACCOUNTING 1410 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1411 SYSCTL_CHILDREN(rack_misc), 1412 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1413 &rack_tcp_accounting, 0, 1414 "Should we turn on TCP accounting for all rack sessions?"); 1415 #endif 1416 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1417 SYSCTL_CHILDREN(rack_misc), 1418 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1419 &rack_apply_rtt_with_reduced_conf, 0, 1420 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1421 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1422 SYSCTL_CHILDREN(rack_misc), 1423 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1424 &rack_dsack_std_based, 3, 1425 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1426 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1427 SYSCTL_CHILDREN(rack_misc), 1428 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1429 &rack_prr_addbackmax, 2, 1430 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1431 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1432 SYSCTL_CHILDREN(rack_misc), 1433 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1434 &rack_stats_gets_ms_rtt, 1, 1435 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1436 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1437 SYSCTL_CHILDREN(rack_misc), 1438 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1439 &rack_client_low_buf, 0, 1440 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1441 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1442 SYSCTL_CHILDREN(rack_misc), 1443 OID_AUTO, "defprofile", CTLFLAG_RW, 1444 &rack_def_profile, 0, 1445 "Should RACK use a default profile (0=no, num == profile num)?"); 1446 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1447 SYSCTL_CHILDREN(rack_misc), 1448 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1449 &rack_enable_shared_cwnd, 1, 1450 "Should RACK try to use the shared cwnd on connections where allowed"); 1451 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1452 SYSCTL_CHILDREN(rack_misc), 1453 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1454 &rack_limits_scwnd, 1, 1455 "Should RACK place low end time limits on the shared cwnd feature"); 1456 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1457 SYSCTL_CHILDREN(rack_misc), 1458 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1459 &rack_use_imac_dack, 0, 1460 "Should RACK try to emulate iMac delayed ack"); 1461 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1462 SYSCTL_CHILDREN(rack_misc), 1463 OID_AUTO, "no_prr", CTLFLAG_RW, 1464 &rack_disable_prr, 0, 1465 "Should RACK not use prr and only pace (must have pacing on)"); 1466 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1467 SYSCTL_CHILDREN(rack_misc), 1468 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1469 &rack_verbose_logging, 0, 1470 "Should RACK black box logging be verbose"); 1471 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_misc), 1473 OID_AUTO, "data_after_close", CTLFLAG_RW, 1474 &rack_ignore_data_after_close, 1, 1475 "Do we hold off sending a RST until all pending data is ack'd"); 1476 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1477 SYSCTL_CHILDREN(rack_misc), 1478 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1479 &rack_sack_not_required, 1, 1480 "Do we allow rack to run on connections not supporting SACK"); 1481 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1482 SYSCTL_CHILDREN(rack_misc), 1483 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1484 &rack_send_a_lot_in_prr, 1, 1485 "Send a lot in prr"); 1486 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1487 SYSCTL_CHILDREN(rack_misc), 1488 OID_AUTO, "autoscale", CTLFLAG_RW, 1489 &rack_autosndbuf_inc, 20, 1490 "What percentage should rack scale up its snd buffer by?"); 1491 /* Sack Attacker detection stuff */ 1492 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_attack), 1494 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1495 &rack_highest_sack_thresh_seen, 0, 1496 "Highest sack to ack ratio seen"); 1497 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_attack), 1499 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1500 &rack_highest_move_thresh_seen, 0, 1501 "Highest move to non-move ratio seen"); 1502 rack_ack_total = counter_u64_alloc(M_WAITOK); 1503 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1504 SYSCTL_CHILDREN(rack_attack), 1505 OID_AUTO, "acktotal", CTLFLAG_RD, 1506 &rack_ack_total, 1507 "Total number of Ack's"); 1508 rack_express_sack = counter_u64_alloc(M_WAITOK); 1509 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1510 SYSCTL_CHILDREN(rack_attack), 1511 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1512 &rack_express_sack, 1513 "Total expresss number of Sack's"); 1514 rack_sack_total = counter_u64_alloc(M_WAITOK); 1515 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1516 SYSCTL_CHILDREN(rack_attack), 1517 OID_AUTO, "sacktotal", CTLFLAG_RD, 1518 &rack_sack_total, 1519 "Total number of SACKs"); 1520 rack_move_none = counter_u64_alloc(M_WAITOK); 1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_attack), 1523 OID_AUTO, "move_none", CTLFLAG_RD, 1524 &rack_move_none, 1525 "Total number of SACK index reuse of positions under threshold"); 1526 rack_move_some = counter_u64_alloc(M_WAITOK); 1527 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1528 SYSCTL_CHILDREN(rack_attack), 1529 OID_AUTO, "move_some", CTLFLAG_RD, 1530 &rack_move_some, 1531 "Total number of SACK index reuse of positions over threshold"); 1532 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1533 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1534 SYSCTL_CHILDREN(rack_attack), 1535 OID_AUTO, "attacks", CTLFLAG_RD, 1536 &rack_sack_attacks_detected, 1537 "Total number of SACK attackers that had sack disabled"); 1538 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1539 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_attack), 1541 OID_AUTO, "reversed", CTLFLAG_RD, 1542 &rack_sack_attacks_reversed, 1543 "Total number of SACK attackers that were later determined false positive"); 1544 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1545 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1546 SYSCTL_CHILDREN(rack_attack), 1547 OID_AUTO, "nextmerge", CTLFLAG_RD, 1548 &rack_sack_used_next_merge, 1549 "Total number of times we used the next merge"); 1550 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1551 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1552 SYSCTL_CHILDREN(rack_attack), 1553 OID_AUTO, "prevmerge", CTLFLAG_RD, 1554 &rack_sack_used_prev_merge, 1555 "Total number of times we used the prev merge"); 1556 /* Counters */ 1557 rack_fto_send = counter_u64_alloc(M_WAITOK); 1558 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1559 SYSCTL_CHILDREN(rack_counters), 1560 OID_AUTO, "fto_send", CTLFLAG_RD, 1561 &rack_fto_send, "Total number of rack_fast_output sends"); 1562 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1563 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1564 SYSCTL_CHILDREN(rack_counters), 1565 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1566 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1567 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1568 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1569 SYSCTL_CHILDREN(rack_counters), 1570 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1571 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1572 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_counters), 1575 OID_AUTO, "nfto_send", CTLFLAG_RD, 1576 &rack_non_fto_send, "Total number of rack_output first sends"); 1577 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_counters), 1580 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1581 &rack_extended_rfo, "Total number of times we extended rfo"); 1582 1583 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1585 SYSCTL_CHILDREN(rack_counters), 1586 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1587 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1588 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1589 1590 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1591 SYSCTL_CHILDREN(rack_counters), 1592 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1593 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1594 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1596 SYSCTL_CHILDREN(rack_counters), 1597 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1598 &rack_tlp_tot, 1599 "Total number of tail loss probe expirations"); 1600 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1601 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_counters), 1603 OID_AUTO, "tlp_new", CTLFLAG_RD, 1604 &rack_tlp_newdata, 1605 "Total number of tail loss probe sending new data"); 1606 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1608 SYSCTL_CHILDREN(rack_counters), 1609 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1610 &rack_tlp_retran, 1611 "Total number of tail loss probe sending retransmitted data"); 1612 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1614 SYSCTL_CHILDREN(rack_counters), 1615 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1616 &rack_tlp_retran_bytes, 1617 "Total bytes of tail loss probe sending retransmitted data"); 1618 rack_to_tot = counter_u64_alloc(M_WAITOK); 1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1620 SYSCTL_CHILDREN(rack_counters), 1621 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1622 &rack_to_tot, 1623 "Total number of times the rack to expired"); 1624 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_counters), 1627 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1628 &rack_saw_enobuf, 1629 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1630 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1632 SYSCTL_CHILDREN(rack_counters), 1633 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1634 &rack_saw_enobuf_hw, 1635 "Total number of times a send returned enobuf for hdwr paced connections"); 1636 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1638 SYSCTL_CHILDREN(rack_counters), 1639 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1640 &rack_saw_enetunreach, 1641 "Total number of times a send received a enetunreachable"); 1642 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1644 SYSCTL_CHILDREN(rack_counters), 1645 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1646 &rack_hot_alloc, 1647 "Total allocations from the top of our list"); 1648 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_counters), 1651 OID_AUTO, "allocs", CTLFLAG_RD, 1652 &rack_to_alloc, 1653 "Total allocations of tracking structures"); 1654 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_counters), 1657 OID_AUTO, "allochard", CTLFLAG_RD, 1658 &rack_to_alloc_hard, 1659 "Total allocations done with sleeping the hard way"); 1660 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1662 SYSCTL_CHILDREN(rack_counters), 1663 OID_AUTO, "allocemerg", CTLFLAG_RD, 1664 &rack_to_alloc_emerg, 1665 "Total allocations done from emergency cache"); 1666 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1668 SYSCTL_CHILDREN(rack_counters), 1669 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1670 &rack_to_alloc_limited, 1671 "Total allocations dropped due to limit"); 1672 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_counters), 1675 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1676 &rack_alloc_limited_conns, 1677 "Connections with allocations dropped due to limit"); 1678 rack_split_limited = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "split_limited", CTLFLAG_RD, 1682 &rack_split_limited, 1683 "Split allocations dropped due to limit"); 1684 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "persist_sends", CTLFLAG_RD, 1688 &rack_persists_sends, 1689 "Number of times we sent a persist probe"); 1690 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1692 SYSCTL_CHILDREN(rack_counters), 1693 OID_AUTO, "persist_acks", CTLFLAG_RD, 1694 &rack_persists_acks, 1695 "Number of times a persist probe was acked"); 1696 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1698 SYSCTL_CHILDREN(rack_counters), 1699 OID_AUTO, "persist_loss", CTLFLAG_RD, 1700 &rack_persists_loss, 1701 "Number of times we detected a lost persist probe (no ack)"); 1702 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1704 SYSCTL_CHILDREN(rack_counters), 1705 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1706 &rack_persists_lost_ends, 1707 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1708 #ifdef INVARIANTS 1709 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1711 SYSCTL_CHILDREN(rack_counters), 1712 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1713 &rack_adjust_map_bw, 1714 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1715 #endif 1716 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1717 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1718 SYSCTL_CHILDREN(rack_counters), 1719 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1720 &rack_multi_single_eq, 1721 "Number of compressed acks total represented"); 1722 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1723 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1724 SYSCTL_CHILDREN(rack_counters), 1725 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1726 &rack_proc_non_comp_ack, 1727 "Number of non compresseds acks that we processed"); 1728 1729 1730 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_counters), 1733 OID_AUTO, "sack_long", CTLFLAG_RD, 1734 &rack_sack_proc_all, 1735 "Total times we had to walk whole list for sack processing"); 1736 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_counters), 1739 OID_AUTO, "sack_restart", CTLFLAG_RD, 1740 &rack_sack_proc_restart, 1741 "Total times we had to walk whole list due to a restart"); 1742 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "sack_short", CTLFLAG_RD, 1746 &rack_sack_proc_short, 1747 "Total times we took shortcut for sack processing"); 1748 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_attack), 1751 OID_AUTO, "skipacked", CTLFLAG_RD, 1752 &rack_sack_skipped_acked, 1753 "Total number of times we skipped previously sacked"); 1754 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_attack), 1757 OID_AUTO, "ofsplit", CTLFLAG_RD, 1758 &rack_sack_splits, 1759 "Total number of times we did the old fashion tree split"); 1760 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_counters), 1763 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1764 &rack_input_idle_reduces, 1765 "Total number of idle reductions on input"); 1766 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_counters), 1769 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1770 &rack_collapsed_win_seen, 1771 "Total number of collapsed window events seen (where our window shrinks)"); 1772 1773 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1775 SYSCTL_CHILDREN(rack_counters), 1776 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1777 &rack_collapsed_win, 1778 "Total number of collapsed window events where we mark packets"); 1779 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1781 SYSCTL_CHILDREN(rack_counters), 1782 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1783 &rack_collapsed_win_rxt, 1784 "Total number of packets that were retransmitted"); 1785 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1786 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1787 SYSCTL_CHILDREN(rack_counters), 1788 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1789 &rack_collapsed_win_rxt_bytes, 1790 "Total number of bytes that were retransmitted"); 1791 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1792 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1793 SYSCTL_CHILDREN(rack_counters), 1794 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1795 &rack_try_scwnd, 1796 "Total number of scwnd attempts"); 1797 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1798 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1799 OID_AUTO, "outsize", CTLFLAG_RD, 1800 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1801 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1802 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1803 OID_AUTO, "opts", CTLFLAG_RD, 1804 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1805 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1806 SYSCTL_CHILDREN(rack_sysctl_root), 1807 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1808 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1809 } 1810 1811 static __inline int 1812 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1813 { 1814 if (SEQ_GEQ(b->r_start, a->r_start) && 1815 SEQ_LT(b->r_start, a->r_end)) { 1816 /* 1817 * The entry b is within the 1818 * block a. i.e.: 1819 * a -- |-------------| 1820 * b -- |----| 1821 * <or> 1822 * b -- |------| 1823 * <or> 1824 * b -- |-----------| 1825 */ 1826 return (0); 1827 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1828 /* 1829 * b falls as either the next 1830 * sequence block after a so a 1831 * is said to be smaller than b. 1832 * i.e: 1833 * a -- |------| 1834 * b -- |--------| 1835 * or 1836 * b -- |-----| 1837 */ 1838 return (1); 1839 } 1840 /* 1841 * Whats left is where a is 1842 * larger than b. i.e: 1843 * a -- |-------| 1844 * b -- |---| 1845 * or even possibly 1846 * b -- |--------------| 1847 */ 1848 return (-1); 1849 } 1850 1851 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1852 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1853 1854 static uint32_t 1855 rc_init_window(struct tcp_rack *rack) 1856 { 1857 uint32_t win; 1858 1859 if (rack->rc_init_win == 0) { 1860 /* 1861 * Nothing set by the user, use the system stack 1862 * default. 1863 */ 1864 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1865 } 1866 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1867 return (win); 1868 } 1869 1870 static uint64_t 1871 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1872 { 1873 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1874 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1875 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1876 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1877 else 1878 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1879 } 1880 1881 static uint64_t 1882 rack_get_bw(struct tcp_rack *rack) 1883 { 1884 if (rack->use_fixed_rate) { 1885 /* Return the fixed pacing rate */ 1886 return (rack_get_fixed_pacing_bw(rack)); 1887 } 1888 if (rack->r_ctl.gp_bw == 0) { 1889 /* 1890 * We have yet no b/w measurement, 1891 * if we have a user set initial bw 1892 * return it. If we don't have that and 1893 * we have an srtt, use the tcp IW (10) to 1894 * calculate a fictional b/w over the SRTT 1895 * which is more or less a guess. Note 1896 * we don't use our IW from rack on purpose 1897 * so if we have like IW=30, we are not 1898 * calculating a "huge" b/w. 1899 */ 1900 uint64_t bw, srtt; 1901 if (rack->r_ctl.init_rate) 1902 return (rack->r_ctl.init_rate); 1903 1904 /* Has the user set a max peak rate? */ 1905 #ifdef NETFLIX_PEAKRATE 1906 if (rack->rc_tp->t_maxpeakrate) 1907 return (rack->rc_tp->t_maxpeakrate); 1908 #endif 1909 /* Ok lets come up with the IW guess, if we have a srtt */ 1910 if (rack->rc_tp->t_srtt == 0) { 1911 /* 1912 * Go with old pacing method 1913 * i.e. burst mitigation only. 1914 */ 1915 return (0); 1916 } 1917 /* Ok lets get the initial TCP win (not racks) */ 1918 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1919 srtt = (uint64_t)rack->rc_tp->t_srtt; 1920 bw *= (uint64_t)USECS_IN_SECOND; 1921 bw /= srtt; 1922 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1923 bw = rack->r_ctl.bw_rate_cap; 1924 return (bw); 1925 } else { 1926 uint64_t bw; 1927 1928 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1929 /* Averaging is done, we can return the value */ 1930 bw = rack->r_ctl.gp_bw; 1931 } else { 1932 /* Still doing initial average must calculate */ 1933 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1934 } 1935 #ifdef NETFLIX_PEAKRATE 1936 if ((rack->rc_tp->t_maxpeakrate) && 1937 (bw > rack->rc_tp->t_maxpeakrate)) { 1938 /* The user has set a peak rate to pace at 1939 * don't allow us to pace faster than that. 1940 */ 1941 return (rack->rc_tp->t_maxpeakrate); 1942 } 1943 #endif 1944 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1945 bw = rack->r_ctl.bw_rate_cap; 1946 return (bw); 1947 } 1948 } 1949 1950 static uint16_t 1951 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1952 { 1953 if (rack->use_fixed_rate) { 1954 return (100); 1955 } else if (rack->in_probe_rtt && (rsm == NULL)) 1956 return (rack->r_ctl.rack_per_of_gp_probertt); 1957 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1958 rack->r_ctl.rack_per_of_gp_rec)) { 1959 if (rsm) { 1960 /* a retransmission always use the recovery rate */ 1961 return (rack->r_ctl.rack_per_of_gp_rec); 1962 } else if (rack->rack_rec_nonrxt_use_cr) { 1963 /* Directed to use the configured rate */ 1964 goto configured_rate; 1965 } else if (rack->rack_no_prr && 1966 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1967 /* No PRR, lets just use the b/w estimate only */ 1968 return (100); 1969 } else { 1970 /* 1971 * Here we may have a non-retransmit but we 1972 * have no overrides, so just use the recovery 1973 * rate (prr is in effect). 1974 */ 1975 return (rack->r_ctl.rack_per_of_gp_rec); 1976 } 1977 } 1978 configured_rate: 1979 /* For the configured rate we look at our cwnd vs the ssthresh */ 1980 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1981 return (rack->r_ctl.rack_per_of_gp_ss); 1982 else 1983 return (rack->r_ctl.rack_per_of_gp_ca); 1984 } 1985 1986 static void 1987 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1988 { 1989 /* 1990 * Types of logs (mod value) 1991 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1992 * 2 = a dsack round begins, persist is reset to 16. 1993 * 3 = a dsack round ends 1994 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1995 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1996 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1997 */ 1998 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1999 union tcp_log_stackspecific log; 2000 struct timeval tv; 2001 2002 memset(&log, 0, sizeof(log)); 2003 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2004 log.u_bbr.flex1 <<= 1; 2005 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2006 log.u_bbr.flex1 <<= 1; 2007 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2008 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2009 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2010 log.u_bbr.flex4 = flex4; 2011 log.u_bbr.flex5 = flex5; 2012 log.u_bbr.flex6 = flex6; 2013 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2014 log.u_bbr.flex8 = mod; 2015 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2016 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2017 &rack->rc_inp->inp_socket->so_rcv, 2018 &rack->rc_inp->inp_socket->so_snd, 2019 RACK_DSACK_HANDLING, 0, 2020 0, &log, false, &tv); 2021 } 2022 } 2023 2024 static void 2025 rack_log_hdwr_pacing(struct tcp_rack *rack, 2026 uint64_t rate, uint64_t hw_rate, int line, 2027 int error, uint16_t mod) 2028 { 2029 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2030 union tcp_log_stackspecific log; 2031 struct timeval tv; 2032 const struct ifnet *ifp; 2033 2034 memset(&log, 0, sizeof(log)); 2035 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2036 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2037 if (rack->r_ctl.crte) { 2038 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2039 } else if (rack->rc_inp->inp_route.ro_nh && 2040 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2041 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2042 } else 2043 ifp = NULL; 2044 if (ifp) { 2045 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2046 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2047 } 2048 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2049 log.u_bbr.bw_inuse = rate; 2050 log.u_bbr.flex5 = line; 2051 log.u_bbr.flex6 = error; 2052 log.u_bbr.flex7 = mod; 2053 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2054 log.u_bbr.flex8 = rack->use_fixed_rate; 2055 log.u_bbr.flex8 <<= 1; 2056 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2057 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2058 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2059 if (rack->r_ctl.crte) 2060 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2061 else 2062 log.u_bbr.cur_del_rate = 0; 2063 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2064 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2065 &rack->rc_inp->inp_socket->so_rcv, 2066 &rack->rc_inp->inp_socket->so_snd, 2067 BBR_LOG_HDWR_PACE, 0, 2068 0, &log, false, &tv); 2069 } 2070 } 2071 2072 static uint64_t 2073 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2074 { 2075 /* 2076 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2077 */ 2078 uint64_t bw_est, high_rate; 2079 uint64_t gain; 2080 2081 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2082 bw_est = bw * gain; 2083 bw_est /= (uint64_t)100; 2084 /* Never fall below the minimum (def 64kbps) */ 2085 if (bw_est < RACK_MIN_BW) 2086 bw_est = RACK_MIN_BW; 2087 if (rack->r_rack_hw_rate_caps) { 2088 /* Rate caps are in place */ 2089 if (rack->r_ctl.crte != NULL) { 2090 /* We have a hdwr rate already */ 2091 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2092 if (bw_est >= high_rate) { 2093 /* We are capping bw at the highest rate table entry */ 2094 rack_log_hdwr_pacing(rack, 2095 bw_est, high_rate, __LINE__, 2096 0, 3); 2097 bw_est = high_rate; 2098 if (capped) 2099 *capped = 1; 2100 } 2101 } else if ((rack->rack_hdrw_pacing == 0) && 2102 (rack->rack_hdw_pace_ena) && 2103 (rack->rack_attempt_hdwr_pace == 0) && 2104 (rack->rc_inp->inp_route.ro_nh != NULL) && 2105 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2106 /* 2107 * Special case, we have not yet attempted hardware 2108 * pacing, and yet we may, when we do, find out if we are 2109 * above the highest rate. We need to know the maxbw for the interface 2110 * in question (if it supports ratelimiting). We get back 2111 * a 0, if the interface is not found in the RL lists. 2112 */ 2113 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2114 if (high_rate) { 2115 /* Yep, we have a rate is it above this rate? */ 2116 if (bw_est > high_rate) { 2117 bw_est = high_rate; 2118 if (capped) 2119 *capped = 1; 2120 } 2121 } 2122 } 2123 } 2124 return (bw_est); 2125 } 2126 2127 static void 2128 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2129 { 2130 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2131 union tcp_log_stackspecific log; 2132 struct timeval tv; 2133 2134 if ((mod != 1) && (rack_verbose_logging == 0)) { 2135 /* 2136 * We get 3 values currently for mod 2137 * 1 - We are retransmitting and this tells the reason. 2138 * 2 - We are clearing a dup-ack count. 2139 * 3 - We are incrementing a dup-ack count. 2140 * 2141 * The clear/increment are only logged 2142 * if you have BBverbose on. 2143 */ 2144 return; 2145 } 2146 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2147 log.u_bbr.flex1 = tsused; 2148 log.u_bbr.flex2 = thresh; 2149 log.u_bbr.flex3 = rsm->r_flags; 2150 log.u_bbr.flex4 = rsm->r_dupack; 2151 log.u_bbr.flex5 = rsm->r_start; 2152 log.u_bbr.flex6 = rsm->r_end; 2153 log.u_bbr.flex8 = mod; 2154 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2155 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2156 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2157 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2158 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2159 log.u_bbr.pacing_gain = rack->r_must_retran; 2160 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2161 &rack->rc_inp->inp_socket->so_rcv, 2162 &rack->rc_inp->inp_socket->so_snd, 2163 BBR_LOG_SETTINGS_CHG, 0, 2164 0, &log, false, &tv); 2165 } 2166 } 2167 2168 static void 2169 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2170 { 2171 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2172 union tcp_log_stackspecific log; 2173 struct timeval tv; 2174 2175 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2176 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2177 log.u_bbr.flex2 = to; 2178 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2179 log.u_bbr.flex4 = slot; 2180 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2181 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2182 log.u_bbr.flex7 = rack->rc_in_persist; 2183 log.u_bbr.flex8 = which; 2184 if (rack->rack_no_prr) 2185 log.u_bbr.pkts_out = 0; 2186 else 2187 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2189 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2190 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2191 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2192 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2193 log.u_bbr.pacing_gain = rack->r_must_retran; 2194 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2195 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2196 log.u_bbr.lost = rack_rto_min; 2197 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2198 &rack->rc_inp->inp_socket->so_rcv, 2199 &rack->rc_inp->inp_socket->so_snd, 2200 BBR_LOG_TIMERSTAR, 0, 2201 0, &log, false, &tv); 2202 } 2203 } 2204 2205 static void 2206 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2207 { 2208 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2209 union tcp_log_stackspecific log; 2210 struct timeval tv; 2211 2212 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2213 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2214 log.u_bbr.flex8 = to_num; 2215 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2216 log.u_bbr.flex2 = rack->rc_rack_rtt; 2217 if (rsm == NULL) 2218 log.u_bbr.flex3 = 0; 2219 else 2220 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2221 if (rack->rack_no_prr) 2222 log.u_bbr.flex5 = 0; 2223 else 2224 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2225 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2226 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2227 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2228 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2229 log.u_bbr.pacing_gain = rack->r_must_retran; 2230 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2231 &rack->rc_inp->inp_socket->so_rcv, 2232 &rack->rc_inp->inp_socket->so_snd, 2233 BBR_LOG_RTO, 0, 2234 0, &log, false, &tv); 2235 } 2236 } 2237 2238 static void 2239 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2240 struct rack_sendmap *prev, 2241 struct rack_sendmap *rsm, 2242 struct rack_sendmap *next, 2243 int flag, uint32_t th_ack, int line) 2244 { 2245 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2246 union tcp_log_stackspecific log; 2247 struct timeval tv; 2248 2249 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2250 log.u_bbr.flex8 = flag; 2251 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2252 log.u_bbr.cur_del_rate = (uint64_t)prev; 2253 log.u_bbr.delRate = (uint64_t)rsm; 2254 log.u_bbr.rttProp = (uint64_t)next; 2255 log.u_bbr.flex7 = 0; 2256 if (prev) { 2257 log.u_bbr.flex1 = prev->r_start; 2258 log.u_bbr.flex2 = prev->r_end; 2259 log.u_bbr.flex7 |= 0x4; 2260 } 2261 if (rsm) { 2262 log.u_bbr.flex3 = rsm->r_start; 2263 log.u_bbr.flex4 = rsm->r_end; 2264 log.u_bbr.flex7 |= 0x2; 2265 } 2266 if (next) { 2267 log.u_bbr.flex5 = next->r_start; 2268 log.u_bbr.flex6 = next->r_end; 2269 log.u_bbr.flex7 |= 0x1; 2270 } 2271 log.u_bbr.applimited = line; 2272 log.u_bbr.pkts_out = th_ack; 2273 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2274 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2275 if (rack->rack_no_prr) 2276 log.u_bbr.lost = 0; 2277 else 2278 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2279 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2280 &rack->rc_inp->inp_socket->so_rcv, 2281 &rack->rc_inp->inp_socket->so_snd, 2282 TCP_LOG_MAPCHG, 0, 2283 0, &log, false, &tv); 2284 } 2285 } 2286 2287 static void 2288 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2289 struct rack_sendmap *rsm, int conf) 2290 { 2291 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2292 union tcp_log_stackspecific log; 2293 struct timeval tv; 2294 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2295 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2296 log.u_bbr.flex1 = t; 2297 log.u_bbr.flex2 = len; 2298 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2299 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2300 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2301 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2302 log.u_bbr.flex7 = conf; 2303 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2304 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2305 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2306 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2307 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2308 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2309 if (rsm) { 2310 log.u_bbr.pkt_epoch = rsm->r_start; 2311 log.u_bbr.lost = rsm->r_end; 2312 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2313 /* We loose any upper of the 24 bits */ 2314 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2315 } else { 2316 /* Its a SYN */ 2317 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2318 log.u_bbr.lost = 0; 2319 log.u_bbr.cwnd_gain = 0; 2320 log.u_bbr.pacing_gain = 0; 2321 } 2322 /* Write out general bits of interest rrs here */ 2323 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2324 log.u_bbr.use_lt_bw <<= 1; 2325 log.u_bbr.use_lt_bw |= rack->forced_ack; 2326 log.u_bbr.use_lt_bw <<= 1; 2327 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2328 log.u_bbr.use_lt_bw <<= 1; 2329 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2330 log.u_bbr.use_lt_bw <<= 1; 2331 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2332 log.u_bbr.use_lt_bw <<= 1; 2333 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2334 log.u_bbr.use_lt_bw <<= 1; 2335 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2336 log.u_bbr.use_lt_bw <<= 1; 2337 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2338 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2339 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2340 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2341 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2342 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2343 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2344 log.u_bbr.bw_inuse <<= 32; 2345 if (rsm) 2346 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2347 TCP_LOG_EVENTP(tp, NULL, 2348 &rack->rc_inp->inp_socket->so_rcv, 2349 &rack->rc_inp->inp_socket->so_snd, 2350 BBR_LOG_BBRRTT, 0, 2351 0, &log, false, &tv); 2352 2353 2354 } 2355 } 2356 2357 static void 2358 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2359 { 2360 /* 2361 * Log the rtt sample we are 2362 * applying to the srtt algorithm in 2363 * useconds. 2364 */ 2365 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2366 union tcp_log_stackspecific log; 2367 struct timeval tv; 2368 2369 /* Convert our ms to a microsecond */ 2370 memset(&log, 0, sizeof(log)); 2371 log.u_bbr.flex1 = rtt; 2372 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2373 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2374 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2375 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2376 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2377 log.u_bbr.flex7 = 1; 2378 log.u_bbr.flex8 = rack->sack_attack_disable; 2379 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2380 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2381 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2382 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2383 log.u_bbr.pacing_gain = rack->r_must_retran; 2384 /* 2385 * We capture in delRate the upper 32 bits as 2386 * the confidence level we had declared, and the 2387 * lower 32 bits as the actual RTT using the arrival 2388 * timestamp. 2389 */ 2390 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2391 log.u_bbr.delRate <<= 32; 2392 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2393 /* Lets capture all the things that make up t_rtxcur */ 2394 log.u_bbr.applimited = rack_rto_min; 2395 log.u_bbr.epoch = rack_rto_max; 2396 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2397 log.u_bbr.lost = rack_rto_min; 2398 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2399 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2400 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2401 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2402 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2403 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2404 &rack->rc_inp->inp_socket->so_rcv, 2405 &rack->rc_inp->inp_socket->so_snd, 2406 TCP_LOG_RTT, 0, 2407 0, &log, false, &tv); 2408 } 2409 } 2410 2411 static void 2412 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2413 { 2414 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2415 union tcp_log_stackspecific log; 2416 struct timeval tv; 2417 2418 /* Convert our ms to a microsecond */ 2419 memset(&log, 0, sizeof(log)); 2420 log.u_bbr.flex1 = rtt; 2421 log.u_bbr.flex2 = send_time; 2422 log.u_bbr.flex3 = ack_time; 2423 log.u_bbr.flex4 = where; 2424 log.u_bbr.flex7 = 2; 2425 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2426 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2427 &rack->rc_inp->inp_socket->so_rcv, 2428 &rack->rc_inp->inp_socket->so_snd, 2429 TCP_LOG_RTT, 0, 2430 0, &log, false, &tv); 2431 } 2432 } 2433 2434 2435 2436 static inline void 2437 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2438 { 2439 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2440 union tcp_log_stackspecific log; 2441 struct timeval tv; 2442 2443 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2444 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2445 log.u_bbr.flex1 = line; 2446 log.u_bbr.flex2 = tick; 2447 log.u_bbr.flex3 = tp->t_maxunacktime; 2448 log.u_bbr.flex4 = tp->t_acktime; 2449 log.u_bbr.flex8 = event; 2450 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2451 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2452 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2453 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2454 log.u_bbr.pacing_gain = rack->r_must_retran; 2455 TCP_LOG_EVENTP(tp, NULL, 2456 &rack->rc_inp->inp_socket->so_rcv, 2457 &rack->rc_inp->inp_socket->so_snd, 2458 BBR_LOG_PROGRESS, 0, 2459 0, &log, false, &tv); 2460 } 2461 } 2462 2463 static void 2464 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2465 { 2466 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2467 union tcp_log_stackspecific log; 2468 2469 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2470 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2471 log.u_bbr.flex1 = slot; 2472 if (rack->rack_no_prr) 2473 log.u_bbr.flex2 = 0; 2474 else 2475 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2476 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2477 log.u_bbr.flex8 = rack->rc_in_persist; 2478 log.u_bbr.timeStamp = cts; 2479 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2480 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2481 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2482 log.u_bbr.pacing_gain = rack->r_must_retran; 2483 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2484 &rack->rc_inp->inp_socket->so_rcv, 2485 &rack->rc_inp->inp_socket->so_snd, 2486 BBR_LOG_BBRSND, 0, 2487 0, &log, false, tv); 2488 } 2489 } 2490 2491 static void 2492 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2493 { 2494 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2495 union tcp_log_stackspecific log; 2496 struct timeval tv; 2497 2498 memset(&log, 0, sizeof(log)); 2499 log.u_bbr.flex1 = did_out; 2500 log.u_bbr.flex2 = nxt_pkt; 2501 log.u_bbr.flex3 = way_out; 2502 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2503 if (rack->rack_no_prr) 2504 log.u_bbr.flex5 = 0; 2505 else 2506 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2507 log.u_bbr.flex6 = nsegs; 2508 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2509 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2510 log.u_bbr.flex7 <<= 1; 2511 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2512 log.u_bbr.flex7 <<= 1; 2513 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2514 log.u_bbr.flex8 = rack->rc_in_persist; 2515 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2516 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2517 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2518 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2519 log.u_bbr.use_lt_bw <<= 1; 2520 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2521 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2522 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2523 log.u_bbr.pacing_gain = rack->r_must_retran; 2524 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2525 &rack->rc_inp->inp_socket->so_rcv, 2526 &rack->rc_inp->inp_socket->so_snd, 2527 BBR_LOG_DOSEG_DONE, 0, 2528 0, &log, false, &tv); 2529 } 2530 } 2531 2532 static void 2533 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2534 { 2535 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2536 union tcp_log_stackspecific log; 2537 struct timeval tv; 2538 2539 memset(&log, 0, sizeof(log)); 2540 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2541 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2542 log.u_bbr.flex4 = arg1; 2543 log.u_bbr.flex5 = arg2; 2544 log.u_bbr.flex6 = arg3; 2545 log.u_bbr.flex8 = frm; 2546 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2547 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2548 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2549 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2550 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2551 log.u_bbr.pacing_gain = rack->r_must_retran; 2552 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 2553 &tptosocket(tp)->so_snd, 2554 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 2555 } 2556 } 2557 2558 static void 2559 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2560 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2561 { 2562 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2563 union tcp_log_stackspecific log; 2564 struct timeval tv; 2565 2566 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2567 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2568 log.u_bbr.flex1 = slot; 2569 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2570 log.u_bbr.flex4 = reason; 2571 if (rack->rack_no_prr) 2572 log.u_bbr.flex5 = 0; 2573 else 2574 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2575 log.u_bbr.flex7 = hpts_calling; 2576 log.u_bbr.flex8 = rack->rc_in_persist; 2577 log.u_bbr.lt_epoch = cwnd_to_use; 2578 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2579 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2580 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2581 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2582 log.u_bbr.pacing_gain = rack->r_must_retran; 2583 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2584 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2585 &rack->rc_inp->inp_socket->so_rcv, 2586 &rack->rc_inp->inp_socket->so_snd, 2587 BBR_LOG_JUSTRET, 0, 2588 tlen, &log, false, &tv); 2589 } 2590 } 2591 2592 static void 2593 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2594 struct timeval *tv, uint32_t flags_on_entry) 2595 { 2596 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2597 union tcp_log_stackspecific log; 2598 2599 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2600 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2601 log.u_bbr.flex1 = line; 2602 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2603 log.u_bbr.flex3 = flags_on_entry; 2604 log.u_bbr.flex4 = us_cts; 2605 if (rack->rack_no_prr) 2606 log.u_bbr.flex5 = 0; 2607 else 2608 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2609 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2610 log.u_bbr.flex7 = hpts_removed; 2611 log.u_bbr.flex8 = 1; 2612 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2613 log.u_bbr.timeStamp = us_cts; 2614 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2615 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2616 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2617 log.u_bbr.pacing_gain = rack->r_must_retran; 2618 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2619 &rack->rc_inp->inp_socket->so_rcv, 2620 &rack->rc_inp->inp_socket->so_snd, 2621 BBR_LOG_TIMERCANC, 0, 2622 0, &log, false, tv); 2623 } 2624 } 2625 2626 static void 2627 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2628 uint32_t flex1, uint32_t flex2, 2629 uint32_t flex3, uint32_t flex4, 2630 uint32_t flex5, uint32_t flex6, 2631 uint16_t flex7, uint8_t mod) 2632 { 2633 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2634 union tcp_log_stackspecific log; 2635 struct timeval tv; 2636 2637 if (mod == 1) { 2638 /* No you can't use 1, its for the real to cancel */ 2639 return; 2640 } 2641 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2642 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2643 log.u_bbr.flex1 = flex1; 2644 log.u_bbr.flex2 = flex2; 2645 log.u_bbr.flex3 = flex3; 2646 log.u_bbr.flex4 = flex4; 2647 log.u_bbr.flex5 = flex5; 2648 log.u_bbr.flex6 = flex6; 2649 log.u_bbr.flex7 = flex7; 2650 log.u_bbr.flex8 = mod; 2651 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2652 &rack->rc_inp->inp_socket->so_rcv, 2653 &rack->rc_inp->inp_socket->so_snd, 2654 BBR_LOG_TIMERCANC, 0, 2655 0, &log, false, &tv); 2656 } 2657 } 2658 2659 static void 2660 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2661 { 2662 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2663 union tcp_log_stackspecific log; 2664 struct timeval tv; 2665 2666 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2667 log.u_bbr.flex1 = timers; 2668 log.u_bbr.flex2 = ret; 2669 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2670 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2671 log.u_bbr.flex5 = cts; 2672 if (rack->rack_no_prr) 2673 log.u_bbr.flex6 = 0; 2674 else 2675 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2676 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2677 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2678 log.u_bbr.pacing_gain = rack->r_must_retran; 2679 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2680 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2681 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2682 &rack->rc_inp->inp_socket->so_rcv, 2683 &rack->rc_inp->inp_socket->so_snd, 2684 BBR_LOG_TO_PROCESS, 0, 2685 0, &log, false, &tv); 2686 } 2687 } 2688 2689 static void 2690 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 2691 { 2692 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2693 union tcp_log_stackspecific log; 2694 struct timeval tv; 2695 2696 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2697 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2698 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2699 if (rack->rack_no_prr) 2700 log.u_bbr.flex3 = 0; 2701 else 2702 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2703 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2704 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2705 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2706 log.u_bbr.flex7 = line; 2707 log.u_bbr.flex8 = frm; 2708 log.u_bbr.pkts_out = orig_cwnd; 2709 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2710 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2711 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2712 log.u_bbr.use_lt_bw <<= 1; 2713 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2714 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2715 &rack->rc_inp->inp_socket->so_rcv, 2716 &rack->rc_inp->inp_socket->so_snd, 2717 BBR_LOG_BBRUPD, 0, 2718 0, &log, false, &tv); 2719 } 2720 } 2721 2722 #ifdef NETFLIX_EXP_DETECTION 2723 static void 2724 rack_log_sad(struct tcp_rack *rack, int event) 2725 { 2726 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2727 union tcp_log_stackspecific log; 2728 struct timeval tv; 2729 2730 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2731 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2732 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2733 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2734 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2735 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2736 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2737 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2738 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2739 log.u_bbr.lt_epoch |= rack->do_detection; 2740 log.u_bbr.applimited = tcp_map_minimum; 2741 log.u_bbr.flex7 = rack->sack_attack_disable; 2742 log.u_bbr.flex8 = event; 2743 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2744 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2745 log.u_bbr.delivered = tcp_sad_decay_val; 2746 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2747 &rack->rc_inp->inp_socket->so_rcv, 2748 &rack->rc_inp->inp_socket->so_snd, 2749 TCP_SAD_DETECTION, 0, 2750 0, &log, false, &tv); 2751 } 2752 } 2753 #endif 2754 2755 static void 2756 rack_counter_destroy(void) 2757 { 2758 counter_u64_free(rack_fto_send); 2759 counter_u64_free(rack_fto_rsm_send); 2760 counter_u64_free(rack_nfto_resend); 2761 counter_u64_free(rack_hw_pace_init_fail); 2762 counter_u64_free(rack_hw_pace_lost); 2763 counter_u64_free(rack_non_fto_send); 2764 counter_u64_free(rack_extended_rfo); 2765 counter_u64_free(rack_ack_total); 2766 counter_u64_free(rack_express_sack); 2767 counter_u64_free(rack_sack_total); 2768 counter_u64_free(rack_move_none); 2769 counter_u64_free(rack_move_some); 2770 counter_u64_free(rack_sack_attacks_detected); 2771 counter_u64_free(rack_sack_attacks_reversed); 2772 counter_u64_free(rack_sack_used_next_merge); 2773 counter_u64_free(rack_sack_used_prev_merge); 2774 counter_u64_free(rack_tlp_tot); 2775 counter_u64_free(rack_tlp_newdata); 2776 counter_u64_free(rack_tlp_retran); 2777 counter_u64_free(rack_tlp_retran_bytes); 2778 counter_u64_free(rack_to_tot); 2779 counter_u64_free(rack_saw_enobuf); 2780 counter_u64_free(rack_saw_enobuf_hw); 2781 counter_u64_free(rack_saw_enetunreach); 2782 counter_u64_free(rack_hot_alloc); 2783 counter_u64_free(rack_to_alloc); 2784 counter_u64_free(rack_to_alloc_hard); 2785 counter_u64_free(rack_to_alloc_emerg); 2786 counter_u64_free(rack_to_alloc_limited); 2787 counter_u64_free(rack_alloc_limited_conns); 2788 counter_u64_free(rack_split_limited); 2789 counter_u64_free(rack_multi_single_eq); 2790 counter_u64_free(rack_proc_non_comp_ack); 2791 counter_u64_free(rack_sack_proc_all); 2792 counter_u64_free(rack_sack_proc_restart); 2793 counter_u64_free(rack_sack_proc_short); 2794 counter_u64_free(rack_sack_skipped_acked); 2795 counter_u64_free(rack_sack_splits); 2796 counter_u64_free(rack_input_idle_reduces); 2797 counter_u64_free(rack_collapsed_win); 2798 counter_u64_free(rack_collapsed_win_rxt); 2799 counter_u64_free(rack_collapsed_win_rxt_bytes); 2800 counter_u64_free(rack_collapsed_win_seen); 2801 counter_u64_free(rack_try_scwnd); 2802 counter_u64_free(rack_persists_sends); 2803 counter_u64_free(rack_persists_acks); 2804 counter_u64_free(rack_persists_loss); 2805 counter_u64_free(rack_persists_lost_ends); 2806 #ifdef INVARIANTS 2807 counter_u64_free(rack_adjust_map_bw); 2808 #endif 2809 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2810 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2811 } 2812 2813 static struct rack_sendmap * 2814 rack_alloc(struct tcp_rack *rack) 2815 { 2816 struct rack_sendmap *rsm; 2817 2818 /* 2819 * First get the top of the list it in 2820 * theory is the "hottest" rsm we have, 2821 * possibly just freed by ack processing. 2822 */ 2823 if (rack->rc_free_cnt > rack_free_cache) { 2824 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2825 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2826 counter_u64_add(rack_hot_alloc, 1); 2827 rack->rc_free_cnt--; 2828 return (rsm); 2829 } 2830 /* 2831 * Once we get under our free cache we probably 2832 * no longer have a "hot" one available. Lets 2833 * get one from UMA. 2834 */ 2835 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2836 if (rsm) { 2837 rack->r_ctl.rc_num_maps_alloced++; 2838 counter_u64_add(rack_to_alloc, 1); 2839 return (rsm); 2840 } 2841 /* 2842 * Dig in to our aux rsm's (the last two) since 2843 * UMA failed to get us one. 2844 */ 2845 if (rack->rc_free_cnt) { 2846 counter_u64_add(rack_to_alloc_emerg, 1); 2847 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2848 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2849 rack->rc_free_cnt--; 2850 return (rsm); 2851 } 2852 return (NULL); 2853 } 2854 2855 static struct rack_sendmap * 2856 rack_alloc_full_limit(struct tcp_rack *rack) 2857 { 2858 if ((V_tcp_map_entries_limit > 0) && 2859 (rack->do_detection == 0) && 2860 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2861 counter_u64_add(rack_to_alloc_limited, 1); 2862 if (!rack->alloc_limit_reported) { 2863 rack->alloc_limit_reported = 1; 2864 counter_u64_add(rack_alloc_limited_conns, 1); 2865 } 2866 return (NULL); 2867 } 2868 return (rack_alloc(rack)); 2869 } 2870 2871 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2872 static struct rack_sendmap * 2873 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2874 { 2875 struct rack_sendmap *rsm; 2876 2877 if (limit_type) { 2878 /* currently there is only one limit type */ 2879 if (V_tcp_map_split_limit > 0 && 2880 (rack->do_detection == 0) && 2881 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2882 counter_u64_add(rack_split_limited, 1); 2883 if (!rack->alloc_limit_reported) { 2884 rack->alloc_limit_reported = 1; 2885 counter_u64_add(rack_alloc_limited_conns, 1); 2886 } 2887 return (NULL); 2888 } 2889 } 2890 2891 /* allocate and mark in the limit type, if set */ 2892 rsm = rack_alloc(rack); 2893 if (rsm != NULL && limit_type) { 2894 rsm->r_limit_type = limit_type; 2895 rack->r_ctl.rc_num_split_allocs++; 2896 } 2897 return (rsm); 2898 } 2899 2900 static void 2901 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2902 { 2903 if (rsm->r_flags & RACK_APP_LIMITED) { 2904 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2905 rack->r_ctl.rc_app_limited_cnt--; 2906 } 2907 } 2908 if (rsm->r_limit_type) { 2909 /* currently there is only one limit type */ 2910 rack->r_ctl.rc_num_split_allocs--; 2911 } 2912 if (rsm == rack->r_ctl.rc_first_appl) { 2913 if (rack->r_ctl.rc_app_limited_cnt == 0) 2914 rack->r_ctl.rc_first_appl = NULL; 2915 else { 2916 /* Follow the next one out */ 2917 struct rack_sendmap fe; 2918 2919 fe.r_start = rsm->r_nseq_appl; 2920 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2921 } 2922 } 2923 if (rsm == rack->r_ctl.rc_resend) 2924 rack->r_ctl.rc_resend = NULL; 2925 if (rsm == rack->r_ctl.rc_end_appl) 2926 rack->r_ctl.rc_end_appl = NULL; 2927 if (rack->r_ctl.rc_tlpsend == rsm) 2928 rack->r_ctl.rc_tlpsend = NULL; 2929 if (rack->r_ctl.rc_sacklast == rsm) 2930 rack->r_ctl.rc_sacklast = NULL; 2931 memset(rsm, 0, sizeof(struct rack_sendmap)); 2932 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2933 rack->rc_free_cnt++; 2934 } 2935 2936 static void 2937 rack_free_trim(struct tcp_rack *rack) 2938 { 2939 struct rack_sendmap *rsm; 2940 2941 /* 2942 * Free up all the tail entries until 2943 * we get our list down to the limit. 2944 */ 2945 while (rack->rc_free_cnt > rack_free_cache) { 2946 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2947 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2948 rack->rc_free_cnt--; 2949 uma_zfree(rack_zone, rsm); 2950 } 2951 } 2952 2953 2954 static uint32_t 2955 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2956 { 2957 uint64_t srtt, bw, len, tim; 2958 uint32_t segsiz, def_len, minl; 2959 2960 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2961 def_len = rack_def_data_window * segsiz; 2962 if (rack->rc_gp_filled == 0) { 2963 /* 2964 * We have no measurement (IW is in flight?) so 2965 * we can only guess using our data_window sysctl 2966 * value (usually 20MSS). 2967 */ 2968 return (def_len); 2969 } 2970 /* 2971 * Now we have a number of factors to consider. 2972 * 2973 * 1) We have a desired BDP which is usually 2974 * at least 2. 2975 * 2) We have a minimum number of rtt's usually 1 SRTT 2976 * but we allow it too to be more. 2977 * 3) We want to make sure a measurement last N useconds (if 2978 * we have set rack_min_measure_usec. 2979 * 2980 * We handle the first concern here by trying to create a data 2981 * window of max(rack_def_data_window, DesiredBDP). The 2982 * second concern we handle in not letting the measurement 2983 * window end normally until at least the required SRTT's 2984 * have gone by which is done further below in 2985 * rack_enough_for_measurement(). Finally the third concern 2986 * we also handle here by calculating how long that time 2987 * would take at the current BW and then return the 2988 * max of our first calculation and that length. Note 2989 * that if rack_min_measure_usec is 0, we don't deal 2990 * with concern 3. Also for both Concern 1 and 3 an 2991 * application limited period could end the measurement 2992 * earlier. 2993 * 2994 * So lets calculate the BDP with the "known" b/w using 2995 * the SRTT has our rtt and then multiply it by the 2996 * goal. 2997 */ 2998 bw = rack_get_bw(rack); 2999 srtt = (uint64_t)tp->t_srtt; 3000 len = bw * srtt; 3001 len /= (uint64_t)HPTS_USEC_IN_SEC; 3002 len *= max(1, rack_goal_bdp); 3003 /* Now we need to round up to the nearest MSS */ 3004 len = roundup(len, segsiz); 3005 if (rack_min_measure_usec) { 3006 /* Now calculate our min length for this b/w */ 3007 tim = rack_min_measure_usec; 3008 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3009 if (minl == 0) 3010 minl = 1; 3011 minl = roundup(minl, segsiz); 3012 if (len < minl) 3013 len = minl; 3014 } 3015 /* 3016 * Now if we have a very small window we want 3017 * to attempt to get the window that is 3018 * as small as possible. This happens on 3019 * low b/w connections and we don't want to 3020 * span huge numbers of rtt's between measurements. 3021 * 3022 * We basically include 2 over our "MIN window" so 3023 * that the measurement can be shortened (possibly) by 3024 * an ack'ed packet. 3025 */ 3026 if (len < def_len) 3027 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3028 else 3029 return (max((uint32_t)len, def_len)); 3030 3031 } 3032 3033 static int 3034 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3035 { 3036 uint32_t tim, srtts, segsiz; 3037 3038 /* 3039 * Has enough time passed for the GP measurement to be valid? 3040 */ 3041 if ((tp->snd_max == tp->snd_una) || 3042 (th_ack == tp->snd_max)){ 3043 /* All is acked */ 3044 *quality = RACK_QUALITY_ALLACKED; 3045 return (1); 3046 } 3047 if (SEQ_LT(th_ack, tp->gput_seq)) { 3048 /* Not enough bytes yet */ 3049 return (0); 3050 } 3051 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3052 if (SEQ_LT(th_ack, tp->gput_ack) && 3053 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3054 /* Not enough bytes yet */ 3055 return (0); 3056 } 3057 if (rack->r_ctl.rc_first_appl && 3058 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3059 /* 3060 * We are up to the app limited send point 3061 * we have to measure irrespective of the time.. 3062 */ 3063 *quality = RACK_QUALITY_APPLIMITED; 3064 return (1); 3065 } 3066 /* Now what about time? */ 3067 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3068 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3069 if (tim >= srtts) { 3070 *quality = RACK_QUALITY_HIGH; 3071 return (1); 3072 } 3073 /* Nope not even a full SRTT has passed */ 3074 return (0); 3075 } 3076 3077 static void 3078 rack_log_timely(struct tcp_rack *rack, 3079 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3080 uint64_t up_bnd, int line, uint8_t method) 3081 { 3082 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3083 union tcp_log_stackspecific log; 3084 struct timeval tv; 3085 3086 memset(&log, 0, sizeof(log)); 3087 log.u_bbr.flex1 = logged; 3088 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3089 log.u_bbr.flex2 <<= 4; 3090 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3091 log.u_bbr.flex2 <<= 4; 3092 log.u_bbr.flex2 |= rack->rc_gp_incr; 3093 log.u_bbr.flex2 <<= 4; 3094 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3095 log.u_bbr.flex3 = rack->rc_gp_incr; 3096 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3097 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3098 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3099 log.u_bbr.flex7 = rack->rc_gp_bwred; 3100 log.u_bbr.flex8 = method; 3101 log.u_bbr.cur_del_rate = cur_bw; 3102 log.u_bbr.delRate = low_bnd; 3103 log.u_bbr.bw_inuse = up_bnd; 3104 log.u_bbr.rttProp = rack_get_bw(rack); 3105 log.u_bbr.pkt_epoch = line; 3106 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3107 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3108 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3109 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3110 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3111 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3112 log.u_bbr.cwnd_gain <<= 1; 3113 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3114 log.u_bbr.cwnd_gain <<= 1; 3115 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3116 log.u_bbr.cwnd_gain <<= 1; 3117 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3118 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3119 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3120 &rack->rc_inp->inp_socket->so_rcv, 3121 &rack->rc_inp->inp_socket->so_snd, 3122 TCP_TIMELY_WORK, 0, 3123 0, &log, false, &tv); 3124 } 3125 } 3126 3127 static int 3128 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3129 { 3130 /* 3131 * Before we increase we need to know if 3132 * the estimate just made was less than 3133 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3134 * 3135 * If we already are pacing at a fast enough 3136 * rate to push us faster there is no sense of 3137 * increasing. 3138 * 3139 * We first caculate our actual pacing rate (ss or ca multiplier 3140 * times our cur_bw). 3141 * 3142 * Then we take the last measured rate and multipy by our 3143 * maximum pacing overage to give us a max allowable rate. 3144 * 3145 * If our act_rate is smaller than our max_allowable rate 3146 * then we should increase. Else we should hold steady. 3147 * 3148 */ 3149 uint64_t act_rate, max_allow_rate; 3150 3151 if (rack_timely_no_stopping) 3152 return (1); 3153 3154 if ((cur_bw == 0) || (last_bw_est == 0)) { 3155 /* 3156 * Initial startup case or 3157 * everything is acked case. 3158 */ 3159 rack_log_timely(rack, mult, cur_bw, 0, 0, 3160 __LINE__, 9); 3161 return (1); 3162 } 3163 if (mult <= 100) { 3164 /* 3165 * We can always pace at or slightly above our rate. 3166 */ 3167 rack_log_timely(rack, mult, cur_bw, 0, 0, 3168 __LINE__, 9); 3169 return (1); 3170 } 3171 act_rate = cur_bw * (uint64_t)mult; 3172 act_rate /= 100; 3173 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3174 max_allow_rate /= 100; 3175 if (act_rate < max_allow_rate) { 3176 /* 3177 * Here the rate we are actually pacing at 3178 * is smaller than 10% above our last measurement. 3179 * This means we are pacing below what we would 3180 * like to try to achieve (plus some wiggle room). 3181 */ 3182 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3183 __LINE__, 9); 3184 return (1); 3185 } else { 3186 /* 3187 * Here we are already pacing at least rack_max_per_above(10%) 3188 * what we are getting back. This indicates most likely 3189 * that we are being limited (cwnd/rwnd/app) and can't 3190 * get any more b/w. There is no sense of trying to 3191 * raise up the pacing rate its not speeding us up 3192 * and we already are pacing faster than we are getting. 3193 */ 3194 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3195 __LINE__, 8); 3196 return (0); 3197 } 3198 } 3199 3200 static void 3201 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3202 { 3203 /* 3204 * When we drag bottom, we want to assure 3205 * that no multiplier is below 1.0, if so 3206 * we want to restore it to at least that. 3207 */ 3208 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3209 /* This is unlikely we usually do not touch recovery */ 3210 rack->r_ctl.rack_per_of_gp_rec = 100; 3211 } 3212 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3213 rack->r_ctl.rack_per_of_gp_ca = 100; 3214 } 3215 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3216 rack->r_ctl.rack_per_of_gp_ss = 100; 3217 } 3218 } 3219 3220 static void 3221 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3222 { 3223 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3224 rack->r_ctl.rack_per_of_gp_ca = 100; 3225 } 3226 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3227 rack->r_ctl.rack_per_of_gp_ss = 100; 3228 } 3229 } 3230 3231 static void 3232 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3233 { 3234 int32_t calc, logged, plus; 3235 3236 logged = 0; 3237 3238 if (override) { 3239 /* 3240 * override is passed when we are 3241 * loosing b/w and making one last 3242 * gasp at trying to not loose out 3243 * to a new-reno flow. 3244 */ 3245 goto extra_boost; 3246 } 3247 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3248 if (rack->rc_gp_incr && 3249 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3250 /* 3251 * Reset and get 5 strokes more before the boost. Note 3252 * that the count is 0 based so we have to add one. 3253 */ 3254 extra_boost: 3255 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3256 rack->rc_gp_timely_inc_cnt = 0; 3257 } else 3258 plus = (uint32_t)rack_gp_increase_per; 3259 /* Must be at least 1% increase for true timely increases */ 3260 if ((plus < 1) && 3261 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3262 plus = 1; 3263 if (rack->rc_gp_saw_rec && 3264 (rack->rc_gp_no_rec_chg == 0) && 3265 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3266 rack->r_ctl.rack_per_of_gp_rec)) { 3267 /* We have been in recovery ding it too */ 3268 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3269 if (calc > 0xffff) 3270 calc = 0xffff; 3271 logged |= 1; 3272 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3273 if (rack_per_upper_bound_ss && 3274 (rack->rc_dragged_bottom == 0) && 3275 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3276 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3277 } 3278 if (rack->rc_gp_saw_ca && 3279 (rack->rc_gp_saw_ss == 0) && 3280 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3281 rack->r_ctl.rack_per_of_gp_ca)) { 3282 /* In CA */ 3283 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3284 if (calc > 0xffff) 3285 calc = 0xffff; 3286 logged |= 2; 3287 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3288 if (rack_per_upper_bound_ca && 3289 (rack->rc_dragged_bottom == 0) && 3290 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3291 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3292 } 3293 if (rack->rc_gp_saw_ss && 3294 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3295 rack->r_ctl.rack_per_of_gp_ss)) { 3296 /* In SS */ 3297 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3298 if (calc > 0xffff) 3299 calc = 0xffff; 3300 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3301 if (rack_per_upper_bound_ss && 3302 (rack->rc_dragged_bottom == 0) && 3303 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3304 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3305 logged |= 4; 3306 } 3307 if (logged && 3308 (rack->rc_gp_incr == 0)){ 3309 /* Go into increment mode */ 3310 rack->rc_gp_incr = 1; 3311 rack->rc_gp_timely_inc_cnt = 0; 3312 } 3313 if (rack->rc_gp_incr && 3314 logged && 3315 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3316 rack->rc_gp_timely_inc_cnt++; 3317 } 3318 rack_log_timely(rack, logged, plus, 0, 0, 3319 __LINE__, 1); 3320 } 3321 3322 static uint32_t 3323 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3324 { 3325 /* 3326 * norm_grad = rtt_diff / minrtt; 3327 * new_per = curper * (1 - B * norm_grad) 3328 * 3329 * B = rack_gp_decrease_per (default 10%) 3330 * rtt_dif = input var current rtt-diff 3331 * curper = input var current percentage 3332 * minrtt = from rack filter 3333 * 3334 */ 3335 uint64_t perf; 3336 3337 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3338 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3339 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3340 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3341 (uint64_t)1000000)) / 3342 (uint64_t)1000000); 3343 if (perf > curper) { 3344 /* TSNH */ 3345 perf = curper - 1; 3346 } 3347 return ((uint32_t)perf); 3348 } 3349 3350 static uint32_t 3351 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3352 { 3353 /* 3354 * highrttthresh 3355 * result = curper * (1 - (B * ( 1 - ------ )) 3356 * gp_srtt 3357 * 3358 * B = rack_gp_decrease_per (default 10%) 3359 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3360 */ 3361 uint64_t perf; 3362 uint32_t highrttthresh; 3363 3364 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3365 3366 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3367 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3368 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3369 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3370 return (perf); 3371 } 3372 3373 static void 3374 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3375 { 3376 uint64_t logvar, logvar2, logvar3; 3377 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3378 3379 if (rack->rc_gp_incr) { 3380 /* Turn off increment counting */ 3381 rack->rc_gp_incr = 0; 3382 rack->rc_gp_timely_inc_cnt = 0; 3383 } 3384 ss_red = ca_red = rec_red = 0; 3385 logged = 0; 3386 /* Calculate the reduction value */ 3387 if (rtt_diff < 0) { 3388 rtt_diff *= -1; 3389 } 3390 /* Must be at least 1% reduction */ 3391 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3392 /* We have been in recovery ding it too */ 3393 if (timely_says == 2) { 3394 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3395 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3396 if (alt < new_per) 3397 val = alt; 3398 else 3399 val = new_per; 3400 } else 3401 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3402 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3403 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3404 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3405 } else { 3406 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3407 rec_red = 0; 3408 } 3409 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3410 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3411 logged |= 1; 3412 } 3413 if (rack->rc_gp_saw_ss) { 3414 /* Sent in SS */ 3415 if (timely_says == 2) { 3416 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3417 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3418 if (alt < new_per) 3419 val = alt; 3420 else 3421 val = new_per; 3422 } else 3423 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3424 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3425 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3426 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3427 } else { 3428 ss_red = new_per; 3429 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3430 logvar = new_per; 3431 logvar <<= 32; 3432 logvar |= alt; 3433 logvar2 = (uint32_t)rtt; 3434 logvar2 <<= 32; 3435 logvar2 |= (uint32_t)rtt_diff; 3436 logvar3 = rack_gp_rtt_maxmul; 3437 logvar3 <<= 32; 3438 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3439 rack_log_timely(rack, timely_says, 3440 logvar2, logvar3, 3441 logvar, __LINE__, 10); 3442 } 3443 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3444 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3445 logged |= 4; 3446 } else if (rack->rc_gp_saw_ca) { 3447 /* Sent in CA */ 3448 if (timely_says == 2) { 3449 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3450 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3451 if (alt < new_per) 3452 val = alt; 3453 else 3454 val = new_per; 3455 } else 3456 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3457 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3458 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3459 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3460 } else { 3461 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3462 ca_red = 0; 3463 logvar = new_per; 3464 logvar <<= 32; 3465 logvar |= alt; 3466 logvar2 = (uint32_t)rtt; 3467 logvar2 <<= 32; 3468 logvar2 |= (uint32_t)rtt_diff; 3469 logvar3 = rack_gp_rtt_maxmul; 3470 logvar3 <<= 32; 3471 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3472 rack_log_timely(rack, timely_says, 3473 logvar2, logvar3, 3474 logvar, __LINE__, 10); 3475 } 3476 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3477 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3478 logged |= 2; 3479 } 3480 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3481 rack->rc_gp_timely_dec_cnt++; 3482 if (rack_timely_dec_clear && 3483 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3484 rack->rc_gp_timely_dec_cnt = 0; 3485 } 3486 logvar = ss_red; 3487 logvar <<= 32; 3488 logvar |= ca_red; 3489 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3490 __LINE__, 2); 3491 } 3492 3493 static void 3494 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3495 uint32_t rtt, uint32_t line, uint8_t reas) 3496 { 3497 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3498 union tcp_log_stackspecific log; 3499 struct timeval tv; 3500 3501 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3502 log.u_bbr.flex1 = line; 3503 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3504 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3505 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3506 log.u_bbr.flex5 = rtt; 3507 log.u_bbr.flex6 = rack->rc_highly_buffered; 3508 log.u_bbr.flex6 <<= 1; 3509 log.u_bbr.flex6 |= rack->forced_ack; 3510 log.u_bbr.flex6 <<= 1; 3511 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3512 log.u_bbr.flex6 <<= 1; 3513 log.u_bbr.flex6 |= rack->in_probe_rtt; 3514 log.u_bbr.flex6 <<= 1; 3515 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3516 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3517 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3518 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3519 log.u_bbr.flex8 = reas; 3520 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3521 log.u_bbr.delRate = rack_get_bw(rack); 3522 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3523 log.u_bbr.cur_del_rate <<= 32; 3524 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3525 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3526 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3527 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3528 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3529 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3530 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3531 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3532 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3533 log.u_bbr.rttProp = us_cts; 3534 log.u_bbr.rttProp <<= 32; 3535 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3536 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3537 &rack->rc_inp->inp_socket->so_rcv, 3538 &rack->rc_inp->inp_socket->so_snd, 3539 BBR_LOG_RTT_SHRINKS, 0, 3540 0, &log, false, &rack->r_ctl.act_rcv_time); 3541 } 3542 } 3543 3544 static void 3545 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3546 { 3547 uint64_t bwdp; 3548 3549 bwdp = rack_get_bw(rack); 3550 bwdp *= (uint64_t)rtt; 3551 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3552 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3553 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3554 /* 3555 * A window protocol must be able to have 4 packets 3556 * outstanding as the floor in order to function 3557 * (especially considering delayed ack :D). 3558 */ 3559 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3560 } 3561 } 3562 3563 static void 3564 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3565 { 3566 /** 3567 * ProbeRTT is a bit different in rack_pacing than in 3568 * BBR. It is like BBR in that it uses the lowering of 3569 * the RTT as a signal that we saw something new and 3570 * counts from there for how long between. But it is 3571 * different in that its quite simple. It does not 3572 * play with the cwnd and wait until we get down 3573 * to N segments outstanding and hold that for 3574 * 200ms. Instead it just sets the pacing reduction 3575 * rate to a set percentage (70 by default) and hold 3576 * that for a number of recent GP Srtt's. 3577 */ 3578 uint32_t segsiz; 3579 3580 if (rack->rc_gp_dyn_mul == 0) 3581 return; 3582 3583 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3584 /* We are idle */ 3585 return; 3586 } 3587 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3588 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3589 /* 3590 * Stop the goodput now, the idea here is 3591 * that future measurements with in_probe_rtt 3592 * won't register if they are not greater so 3593 * we want to get what info (if any) is available 3594 * now. 3595 */ 3596 rack_do_goodput_measurement(rack->rc_tp, rack, 3597 rack->rc_tp->snd_una, __LINE__, 3598 RACK_QUALITY_PROBERTT); 3599 } 3600 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3601 rack->r_ctl.rc_time_probertt_entered = us_cts; 3602 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3603 rack->r_ctl.rc_pace_min_segs); 3604 rack->in_probe_rtt = 1; 3605 rack->measure_saw_probe_rtt = 1; 3606 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3607 rack->r_ctl.rc_time_probertt_starts = 0; 3608 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3609 if (rack_probertt_use_min_rtt_entry) 3610 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3611 else 3612 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3613 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3614 __LINE__, RACK_RTTS_ENTERPROBE); 3615 } 3616 3617 static void 3618 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3619 { 3620 struct rack_sendmap *rsm; 3621 uint32_t segsiz; 3622 3623 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3624 rack->r_ctl.rc_pace_min_segs); 3625 rack->in_probe_rtt = 0; 3626 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3627 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3628 /* 3629 * Stop the goodput now, the idea here is 3630 * that future measurements with in_probe_rtt 3631 * won't register if they are not greater so 3632 * we want to get what info (if any) is available 3633 * now. 3634 */ 3635 rack_do_goodput_measurement(rack->rc_tp, rack, 3636 rack->rc_tp->snd_una, __LINE__, 3637 RACK_QUALITY_PROBERTT); 3638 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3639 /* 3640 * We don't have enough data to make a measurement. 3641 * So lets just stop and start here after exiting 3642 * probe-rtt. We probably are not interested in 3643 * the results anyway. 3644 */ 3645 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3646 } 3647 /* 3648 * Measurements through the current snd_max are going 3649 * to be limited by the slower pacing rate. 3650 * 3651 * We need to mark these as app-limited so we 3652 * don't collapse the b/w. 3653 */ 3654 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3655 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3656 if (rack->r_ctl.rc_app_limited_cnt == 0) 3657 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3658 else { 3659 /* 3660 * Go out to the end app limited and mark 3661 * this new one as next and move the end_appl up 3662 * to this guy. 3663 */ 3664 if (rack->r_ctl.rc_end_appl) 3665 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3666 rack->r_ctl.rc_end_appl = rsm; 3667 } 3668 rsm->r_flags |= RACK_APP_LIMITED; 3669 rack->r_ctl.rc_app_limited_cnt++; 3670 } 3671 /* 3672 * Now, we need to examine our pacing rate multipliers. 3673 * If its under 100%, we need to kick it back up to 3674 * 100%. We also don't let it be over our "max" above 3675 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3676 * Note setting clamp_atexit_prtt to 0 has the effect 3677 * of setting CA/SS to 100% always at exit (which is 3678 * the default behavior). 3679 */ 3680 if (rack_probertt_clear_is) { 3681 rack->rc_gp_incr = 0; 3682 rack->rc_gp_bwred = 0; 3683 rack->rc_gp_timely_inc_cnt = 0; 3684 rack->rc_gp_timely_dec_cnt = 0; 3685 } 3686 /* Do we do any clamping at exit? */ 3687 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3688 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3689 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3690 } 3691 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3692 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3693 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3694 } 3695 /* 3696 * Lets set rtt_diff to 0, so that we will get a "boost" 3697 * after exiting. 3698 */ 3699 rack->r_ctl.rc_rtt_diff = 0; 3700 3701 /* Clear all flags so we start fresh */ 3702 rack->rc_tp->t_bytes_acked = 0; 3703 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3704 /* 3705 * If configured to, set the cwnd and ssthresh to 3706 * our targets. 3707 */ 3708 if (rack_probe_rtt_sets_cwnd) { 3709 uint64_t ebdp; 3710 uint32_t setto; 3711 3712 /* Set ssthresh so we get into CA once we hit our target */ 3713 if (rack_probertt_use_min_rtt_exit == 1) { 3714 /* Set to min rtt */ 3715 rack_set_prtt_target(rack, segsiz, 3716 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3717 } else if (rack_probertt_use_min_rtt_exit == 2) { 3718 /* Set to current gp rtt */ 3719 rack_set_prtt_target(rack, segsiz, 3720 rack->r_ctl.rc_gp_srtt); 3721 } else if (rack_probertt_use_min_rtt_exit == 3) { 3722 /* Set to entry gp rtt */ 3723 rack_set_prtt_target(rack, segsiz, 3724 rack->r_ctl.rc_entry_gp_rtt); 3725 } else { 3726 uint64_t sum; 3727 uint32_t setval; 3728 3729 sum = rack->r_ctl.rc_entry_gp_rtt; 3730 sum *= 10; 3731 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3732 if (sum >= 20) { 3733 /* 3734 * A highly buffered path needs 3735 * cwnd space for timely to work. 3736 * Lets set things up as if 3737 * we are heading back here again. 3738 */ 3739 setval = rack->r_ctl.rc_entry_gp_rtt; 3740 } else if (sum >= 15) { 3741 /* 3742 * Lets take the smaller of the 3743 * two since we are just somewhat 3744 * buffered. 3745 */ 3746 setval = rack->r_ctl.rc_gp_srtt; 3747 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3748 setval = rack->r_ctl.rc_entry_gp_rtt; 3749 } else { 3750 /* 3751 * Here we are not highly buffered 3752 * and should pick the min we can to 3753 * keep from causing loss. 3754 */ 3755 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3756 } 3757 rack_set_prtt_target(rack, segsiz, 3758 setval); 3759 } 3760 if (rack_probe_rtt_sets_cwnd > 1) { 3761 /* There is a percentage here to boost */ 3762 ebdp = rack->r_ctl.rc_target_probertt_flight; 3763 ebdp *= rack_probe_rtt_sets_cwnd; 3764 ebdp /= 100; 3765 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3766 } else 3767 setto = rack->r_ctl.rc_target_probertt_flight; 3768 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3769 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3770 /* Enforce a min */ 3771 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3772 } 3773 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3774 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3775 } 3776 rack_log_rtt_shrinks(rack, us_cts, 3777 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3778 __LINE__, RACK_RTTS_EXITPROBE); 3779 /* Clear times last so log has all the info */ 3780 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3781 rack->r_ctl.rc_time_probertt_entered = us_cts; 3782 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3783 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3784 } 3785 3786 static void 3787 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3788 { 3789 /* Check in on probe-rtt */ 3790 if (rack->rc_gp_filled == 0) { 3791 /* We do not do p-rtt unless we have gp measurements */ 3792 return; 3793 } 3794 if (rack->in_probe_rtt) { 3795 uint64_t no_overflow; 3796 uint32_t endtime, must_stay; 3797 3798 if (rack->r_ctl.rc_went_idle_time && 3799 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3800 /* 3801 * We went idle during prtt, just exit now. 3802 */ 3803 rack_exit_probertt(rack, us_cts); 3804 } else if (rack_probe_rtt_safety_val && 3805 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3806 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3807 /* 3808 * Probe RTT safety value triggered! 3809 */ 3810 rack_log_rtt_shrinks(rack, us_cts, 3811 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3812 __LINE__, RACK_RTTS_SAFETY); 3813 rack_exit_probertt(rack, us_cts); 3814 } 3815 /* Calculate the max we will wait */ 3816 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3817 if (rack->rc_highly_buffered) 3818 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3819 /* Calculate the min we must wait */ 3820 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3821 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3822 TSTMP_LT(us_cts, endtime)) { 3823 uint32_t calc; 3824 /* Do we lower more? */ 3825 no_exit: 3826 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3827 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3828 else 3829 calc = 0; 3830 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3831 if (calc) { 3832 /* Maybe */ 3833 calc *= rack_per_of_gp_probertt_reduce; 3834 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3835 /* Limit it too */ 3836 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3837 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3838 } 3839 /* We must reach target or the time set */ 3840 return; 3841 } 3842 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3843 if ((TSTMP_LT(us_cts, must_stay) && 3844 rack->rc_highly_buffered) || 3845 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3846 rack->r_ctl.rc_target_probertt_flight)) { 3847 /* We are not past the must_stay time */ 3848 goto no_exit; 3849 } 3850 rack_log_rtt_shrinks(rack, us_cts, 3851 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3852 __LINE__, RACK_RTTS_REACHTARGET); 3853 rack->r_ctl.rc_time_probertt_starts = us_cts; 3854 if (rack->r_ctl.rc_time_probertt_starts == 0) 3855 rack->r_ctl.rc_time_probertt_starts = 1; 3856 /* Restore back to our rate we want to pace at in prtt */ 3857 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3858 } 3859 /* 3860 * Setup our end time, some number of gp_srtts plus 200ms. 3861 */ 3862 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3863 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3864 if (rack_probertt_gpsrtt_cnt_div) 3865 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3866 else 3867 endtime = 0; 3868 endtime += rack_min_probertt_hold; 3869 endtime += rack->r_ctl.rc_time_probertt_starts; 3870 if (TSTMP_GEQ(us_cts, endtime)) { 3871 /* yes, exit probertt */ 3872 rack_exit_probertt(rack, us_cts); 3873 } 3874 3875 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3876 /* Go into probertt, its been too long since we went lower */ 3877 rack_enter_probertt(rack, us_cts); 3878 } 3879 } 3880 3881 static void 3882 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3883 uint32_t rtt, int32_t rtt_diff) 3884 { 3885 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3886 uint32_t losses; 3887 3888 if ((rack->rc_gp_dyn_mul == 0) || 3889 (rack->use_fixed_rate) || 3890 (rack->in_probe_rtt) || 3891 (rack->rc_always_pace == 0)) { 3892 /* No dynamic GP multiplier in play */ 3893 return; 3894 } 3895 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3896 cur_bw = rack_get_bw(rack); 3897 /* Calculate our up and down range */ 3898 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3899 up_bnd /= 100; 3900 up_bnd += rack->r_ctl.last_gp_comp_bw; 3901 3902 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3903 subfr /= 100; 3904 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3905 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3906 /* 3907 * This is the case where our RTT is above 3908 * the max target and we have been configured 3909 * to just do timely no bonus up stuff in that case. 3910 * 3911 * There are two configurations, set to 1, and we 3912 * just do timely if we are over our max. If its 3913 * set above 1 then we slam the multipliers down 3914 * to 100 and then decrement per timely. 3915 */ 3916 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3917 __LINE__, 3); 3918 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3919 rack_validate_multipliers_at_or_below_100(rack); 3920 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3921 } else if ((last_bw_est < low_bnd) && !losses) { 3922 /* 3923 * We are decreasing this is a bit complicated this 3924 * means we are loosing ground. This could be 3925 * because another flow entered and we are competing 3926 * for b/w with it. This will push the RTT up which 3927 * makes timely unusable unless we want to get shoved 3928 * into a corner and just be backed off (the age 3929 * old problem with delay based CC). 3930 * 3931 * On the other hand if it was a route change we 3932 * would like to stay somewhat contained and not 3933 * blow out the buffers. 3934 */ 3935 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3936 __LINE__, 3); 3937 rack->r_ctl.last_gp_comp_bw = cur_bw; 3938 if (rack->rc_gp_bwred == 0) { 3939 /* Go into reduction counting */ 3940 rack->rc_gp_bwred = 1; 3941 rack->rc_gp_timely_dec_cnt = 0; 3942 } 3943 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3944 (timely_says == 0)) { 3945 /* 3946 * Push another time with a faster pacing 3947 * to try to gain back (we include override to 3948 * get a full raise factor). 3949 */ 3950 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3951 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3952 (timely_says == 0) || 3953 (rack_down_raise_thresh == 0)) { 3954 /* 3955 * Do an override up in b/w if we were 3956 * below the threshold or if the threshold 3957 * is zero we always do the raise. 3958 */ 3959 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3960 } else { 3961 /* Log it stays the same */ 3962 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3963 __LINE__, 11); 3964 } 3965 rack->rc_gp_timely_dec_cnt++; 3966 /* We are not incrementing really no-count */ 3967 rack->rc_gp_incr = 0; 3968 rack->rc_gp_timely_inc_cnt = 0; 3969 } else { 3970 /* 3971 * Lets just use the RTT 3972 * information and give up 3973 * pushing. 3974 */ 3975 goto use_timely; 3976 } 3977 } else if ((timely_says != 2) && 3978 !losses && 3979 (last_bw_est > up_bnd)) { 3980 /* 3981 * We are increasing b/w lets keep going, updating 3982 * our b/w and ignoring any timely input, unless 3983 * of course we are at our max raise (if there is one). 3984 */ 3985 3986 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3987 __LINE__, 3); 3988 rack->r_ctl.last_gp_comp_bw = cur_bw; 3989 if (rack->rc_gp_saw_ss && 3990 rack_per_upper_bound_ss && 3991 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3992 /* 3993 * In cases where we can't go higher 3994 * we should just use timely. 3995 */ 3996 goto use_timely; 3997 } 3998 if (rack->rc_gp_saw_ca && 3999 rack_per_upper_bound_ca && 4000 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4001 /* 4002 * In cases where we can't go higher 4003 * we should just use timely. 4004 */ 4005 goto use_timely; 4006 } 4007 rack->rc_gp_bwred = 0; 4008 rack->rc_gp_timely_dec_cnt = 0; 4009 /* You get a set number of pushes if timely is trying to reduce */ 4010 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4011 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4012 } else { 4013 /* Log it stays the same */ 4014 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4015 __LINE__, 12); 4016 } 4017 return; 4018 } else { 4019 /* 4020 * We are staying between the lower and upper range bounds 4021 * so use timely to decide. 4022 */ 4023 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4024 __LINE__, 3); 4025 use_timely: 4026 if (timely_says) { 4027 rack->rc_gp_incr = 0; 4028 rack->rc_gp_timely_inc_cnt = 0; 4029 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4030 !losses && 4031 (last_bw_est < low_bnd)) { 4032 /* We are loosing ground */ 4033 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4034 rack->rc_gp_timely_dec_cnt++; 4035 /* We are not incrementing really no-count */ 4036 rack->rc_gp_incr = 0; 4037 rack->rc_gp_timely_inc_cnt = 0; 4038 } else 4039 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4040 } else { 4041 rack->rc_gp_bwred = 0; 4042 rack->rc_gp_timely_dec_cnt = 0; 4043 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4044 } 4045 } 4046 } 4047 4048 static int32_t 4049 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4050 { 4051 int32_t timely_says; 4052 uint64_t log_mult, log_rtt_a_diff; 4053 4054 log_rtt_a_diff = rtt; 4055 log_rtt_a_diff <<= 32; 4056 log_rtt_a_diff |= (uint32_t)rtt_diff; 4057 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4058 rack_gp_rtt_maxmul)) { 4059 /* Reduce the b/w multiplier */ 4060 timely_says = 2; 4061 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4062 log_mult <<= 32; 4063 log_mult |= prev_rtt; 4064 rack_log_timely(rack, timely_says, log_mult, 4065 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4066 log_rtt_a_diff, __LINE__, 4); 4067 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4068 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4069 max(rack_gp_rtt_mindiv , 1)))) { 4070 /* Increase the b/w multiplier */ 4071 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4072 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4073 max(rack_gp_rtt_mindiv , 1)); 4074 log_mult <<= 32; 4075 log_mult |= prev_rtt; 4076 timely_says = 0; 4077 rack_log_timely(rack, timely_says, log_mult , 4078 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4079 log_rtt_a_diff, __LINE__, 5); 4080 } else { 4081 /* 4082 * Use a gradient to find it the timely gradient 4083 * is: 4084 * grad = rc_rtt_diff / min_rtt; 4085 * 4086 * anything below or equal to 0 will be 4087 * a increase indication. Anything above 4088 * zero is a decrease. Note we take care 4089 * of the actual gradient calculation 4090 * in the reduction (its not needed for 4091 * increase). 4092 */ 4093 log_mult = prev_rtt; 4094 if (rtt_diff <= 0) { 4095 /* 4096 * Rttdiff is less than zero, increase the 4097 * b/w multiplier (its 0 or negative) 4098 */ 4099 timely_says = 0; 4100 rack_log_timely(rack, timely_says, log_mult, 4101 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4102 } else { 4103 /* Reduce the b/w multiplier */ 4104 timely_says = 1; 4105 rack_log_timely(rack, timely_says, log_mult, 4106 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4107 } 4108 } 4109 return (timely_says); 4110 } 4111 4112 static void 4113 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4114 tcp_seq th_ack, int line, uint8_t quality) 4115 { 4116 uint64_t tim, bytes_ps, ltim, stim, utim; 4117 uint32_t segsiz, bytes, reqbytes, us_cts; 4118 int32_t gput, new_rtt_diff, timely_says; 4119 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4120 int did_add = 0; 4121 4122 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4123 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4124 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4125 tim = us_cts - tp->gput_ts; 4126 else 4127 tim = 0; 4128 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4129 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4130 else 4131 stim = 0; 4132 /* 4133 * Use the larger of the send time or ack time. This prevents us 4134 * from being influenced by ack artifacts to come up with too 4135 * high of measurement. Note that since we are spanning over many more 4136 * bytes in most of our measurements hopefully that is less likely to 4137 * occur. 4138 */ 4139 if (tim > stim) 4140 utim = max(tim, 1); 4141 else 4142 utim = max(stim, 1); 4143 /* Lets get a msec time ltim too for the old stuff */ 4144 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4145 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4146 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4147 if ((tim == 0) && (stim == 0)) { 4148 /* 4149 * Invalid measurement time, maybe 4150 * all on one ack/one send? 4151 */ 4152 bytes = 0; 4153 bytes_ps = 0; 4154 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4155 0, 0, 0, 10, __LINE__, NULL, quality); 4156 goto skip_measurement; 4157 } 4158 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4159 /* We never made a us_rtt measurement? */ 4160 bytes = 0; 4161 bytes_ps = 0; 4162 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4163 0, 0, 0, 10, __LINE__, NULL, quality); 4164 goto skip_measurement; 4165 } 4166 /* 4167 * Calculate the maximum possible b/w this connection 4168 * could have. We base our calculation on the lowest 4169 * rtt we have seen during the measurement and the 4170 * largest rwnd the client has given us in that time. This 4171 * forms a BDP that is the maximum that we could ever 4172 * get to the client. Anything larger is not valid. 4173 * 4174 * I originally had code here that rejected measurements 4175 * where the time was less than 1/2 the latest us_rtt. 4176 * But after thinking on that I realized its wrong since 4177 * say you had a 150Mbps or even 1Gbps link, and you 4178 * were a long way away.. example I am in Europe (100ms rtt) 4179 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4180 * bytes my time would be 1.2ms, and yet my rtt would say 4181 * the measurement was invalid the time was < 50ms. The 4182 * same thing is true for 150Mb (8ms of time). 4183 * 4184 * A better way I realized is to look at what the maximum 4185 * the connection could possibly do. This is gated on 4186 * the lowest RTT we have seen and the highest rwnd. 4187 * We should in theory never exceed that, if we are 4188 * then something on the path is storing up packets 4189 * and then feeding them all at once to our endpoint 4190 * messing up our measurement. 4191 */ 4192 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4193 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4194 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4195 if (SEQ_LT(th_ack, tp->gput_seq)) { 4196 /* No measurement can be made */ 4197 bytes = 0; 4198 bytes_ps = 0; 4199 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4200 0, 0, 0, 10, __LINE__, NULL, quality); 4201 goto skip_measurement; 4202 } else 4203 bytes = (th_ack - tp->gput_seq); 4204 bytes_ps = (uint64_t)bytes; 4205 /* 4206 * Don't measure a b/w for pacing unless we have gotten at least 4207 * an initial windows worth of data in this measurement interval. 4208 * 4209 * Small numbers of bytes get badly influenced by delayed ack and 4210 * other artifacts. Note we take the initial window or our 4211 * defined minimum GP (defaulting to 10 which hopefully is the 4212 * IW). 4213 */ 4214 if (rack->rc_gp_filled == 0) { 4215 /* 4216 * The initial estimate is special. We 4217 * have blasted out an IW worth of packets 4218 * without a real valid ack ts results. We 4219 * then setup the app_limited_needs_set flag, 4220 * this should get the first ack in (probably 2 4221 * MSS worth) to be recorded as the timestamp. 4222 * We thus allow a smaller number of bytes i.e. 4223 * IW - 2MSS. 4224 */ 4225 reqbytes -= (2 * segsiz); 4226 /* Also lets fill previous for our first measurement to be neutral */ 4227 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4228 } 4229 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4230 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4231 rack->r_ctl.rc_app_limited_cnt, 4232 0, 0, 10, __LINE__, NULL, quality); 4233 goto skip_measurement; 4234 } 4235 /* 4236 * We now need to calculate the Timely like status so 4237 * we can update (possibly) the b/w multipliers. 4238 */ 4239 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4240 if (rack->rc_gp_filled == 0) { 4241 /* No previous reading */ 4242 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4243 } else { 4244 if (rack->measure_saw_probe_rtt == 0) { 4245 /* 4246 * We don't want a probertt to be counted 4247 * since it will be negative incorrectly. We 4248 * expect to be reducing the RTT when we 4249 * pace at a slower rate. 4250 */ 4251 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4252 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4253 } 4254 } 4255 timely_says = rack_make_timely_judgement(rack, 4256 rack->r_ctl.rc_gp_srtt, 4257 rack->r_ctl.rc_rtt_diff, 4258 rack->r_ctl.rc_prev_gp_srtt 4259 ); 4260 bytes_ps *= HPTS_USEC_IN_SEC; 4261 bytes_ps /= utim; 4262 if (bytes_ps > rack->r_ctl.last_max_bw) { 4263 /* 4264 * Something is on path playing 4265 * since this b/w is not possible based 4266 * on our BDP (highest rwnd and lowest rtt 4267 * we saw in the measurement window). 4268 * 4269 * Another option here would be to 4270 * instead skip the measurement. 4271 */ 4272 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4273 bytes_ps, rack->r_ctl.last_max_bw, 0, 4274 11, __LINE__, NULL, quality); 4275 bytes_ps = rack->r_ctl.last_max_bw; 4276 } 4277 /* We store gp for b/w in bytes per second */ 4278 if (rack->rc_gp_filled == 0) { 4279 /* Initial measurement */ 4280 if (bytes_ps) { 4281 rack->r_ctl.gp_bw = bytes_ps; 4282 rack->rc_gp_filled = 1; 4283 rack->r_ctl.num_measurements = 1; 4284 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4285 } else { 4286 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4287 rack->r_ctl.rc_app_limited_cnt, 4288 0, 0, 10, __LINE__, NULL, quality); 4289 } 4290 if (tcp_in_hpts(rack->rc_inp) && 4291 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4292 /* 4293 * Ok we can't trust the pacer in this case 4294 * where we transition from un-paced to paced. 4295 * Or for that matter when the burst mitigation 4296 * was making a wild guess and got it wrong. 4297 * Stop the pacer and clear up all the aggregate 4298 * delays etc. 4299 */ 4300 tcp_hpts_remove(rack->rc_inp); 4301 rack->r_ctl.rc_hpts_flags = 0; 4302 rack->r_ctl.rc_last_output_to = 0; 4303 } 4304 did_add = 2; 4305 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4306 /* Still a small number run an average */ 4307 rack->r_ctl.gp_bw += bytes_ps; 4308 addpart = rack->r_ctl.num_measurements; 4309 rack->r_ctl.num_measurements++; 4310 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4311 /* We have collected enough to move forward */ 4312 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4313 } 4314 did_add = 3; 4315 } else { 4316 /* 4317 * We want to take 1/wma of the goodput and add in to 7/8th 4318 * of the old value weighted by the srtt. So if your measurement 4319 * period is say 2 SRTT's long you would get 1/4 as the 4320 * value, if it was like 1/2 SRTT then you would get 1/16th. 4321 * 4322 * But we must be careful not to take too much i.e. if the 4323 * srtt is say 20ms and the measurement is taken over 4324 * 400ms our weight would be 400/20 i.e. 20. On the 4325 * other hand if we get a measurement over 1ms with a 4326 * 10ms rtt we only want to take a much smaller portion. 4327 */ 4328 if (rack->r_ctl.num_measurements < 0xff) { 4329 rack->r_ctl.num_measurements++; 4330 } 4331 srtt = (uint64_t)tp->t_srtt; 4332 if (srtt == 0) { 4333 /* 4334 * Strange why did t_srtt go back to zero? 4335 */ 4336 if (rack->r_ctl.rc_rack_min_rtt) 4337 srtt = rack->r_ctl.rc_rack_min_rtt; 4338 else 4339 srtt = HPTS_USEC_IN_MSEC; 4340 } 4341 /* 4342 * XXXrrs: Note for reviewers, in playing with 4343 * dynamic pacing I discovered this GP calculation 4344 * as done originally leads to some undesired results. 4345 * Basically you can get longer measurements contributing 4346 * too much to the WMA. Thus I changed it if you are doing 4347 * dynamic adjustments to only do the aportioned adjustment 4348 * if we have a very small (time wise) measurement. Longer 4349 * measurements just get there weight (defaulting to 1/8) 4350 * add to the WMA. We may want to think about changing 4351 * this to always do that for both sides i.e. dynamic 4352 * and non-dynamic... but considering lots of folks 4353 * were playing with this I did not want to change the 4354 * calculation per.se. without your thoughts.. Lawerence? 4355 * Peter?? 4356 */ 4357 if (rack->rc_gp_dyn_mul == 0) { 4358 subpart = rack->r_ctl.gp_bw * utim; 4359 subpart /= (srtt * 8); 4360 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4361 /* 4362 * The b/w update takes no more 4363 * away then 1/2 our running total 4364 * so factor it in. 4365 */ 4366 addpart = bytes_ps * utim; 4367 addpart /= (srtt * 8); 4368 } else { 4369 /* 4370 * Don't allow a single measurement 4371 * to account for more than 1/2 of the 4372 * WMA. This could happen on a retransmission 4373 * where utim becomes huge compared to 4374 * srtt (multiple retransmissions when using 4375 * the sending rate which factors in all the 4376 * transmissions from the first one). 4377 */ 4378 subpart = rack->r_ctl.gp_bw / 2; 4379 addpart = bytes_ps / 2; 4380 } 4381 resid_bw = rack->r_ctl.gp_bw - subpart; 4382 rack->r_ctl.gp_bw = resid_bw + addpart; 4383 did_add = 1; 4384 } else { 4385 if ((utim / srtt) <= 1) { 4386 /* 4387 * The b/w update was over a small period 4388 * of time. The idea here is to prevent a small 4389 * measurement time period from counting 4390 * too much. So we scale it based on the 4391 * time so it attributes less than 1/rack_wma_divisor 4392 * of its measurement. 4393 */ 4394 subpart = rack->r_ctl.gp_bw * utim; 4395 subpart /= (srtt * rack_wma_divisor); 4396 addpart = bytes_ps * utim; 4397 addpart /= (srtt * rack_wma_divisor); 4398 } else { 4399 /* 4400 * The scaled measurement was long 4401 * enough so lets just add in the 4402 * portion of the measurement i.e. 1/rack_wma_divisor 4403 */ 4404 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4405 addpart = bytes_ps / rack_wma_divisor; 4406 } 4407 if ((rack->measure_saw_probe_rtt == 0) || 4408 (bytes_ps > rack->r_ctl.gp_bw)) { 4409 /* 4410 * For probe-rtt we only add it in 4411 * if its larger, all others we just 4412 * add in. 4413 */ 4414 did_add = 1; 4415 resid_bw = rack->r_ctl.gp_bw - subpart; 4416 rack->r_ctl.gp_bw = resid_bw + addpart; 4417 } 4418 } 4419 } 4420 if ((rack->gp_ready == 0) && 4421 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4422 /* We have enough measurements now */ 4423 rack->gp_ready = 1; 4424 rack_set_cc_pacing(rack); 4425 if (rack->defer_options) 4426 rack_apply_deferred_options(rack); 4427 } 4428 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4429 rack_get_bw(rack), 22, did_add, NULL, quality); 4430 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4431 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4432 rack_update_multiplier(rack, timely_says, bytes_ps, 4433 rack->r_ctl.rc_gp_srtt, 4434 rack->r_ctl.rc_rtt_diff); 4435 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4436 rack_get_bw(rack), 3, line, NULL, quality); 4437 /* reset the gp srtt and setup the new prev */ 4438 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4439 /* Record the lost count for the next measurement */ 4440 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4441 /* 4442 * We restart our diffs based on the gpsrtt in the 4443 * measurement window. 4444 */ 4445 rack->rc_gp_rtt_set = 0; 4446 rack->rc_gp_saw_rec = 0; 4447 rack->rc_gp_saw_ca = 0; 4448 rack->rc_gp_saw_ss = 0; 4449 rack->rc_dragged_bottom = 0; 4450 skip_measurement: 4451 4452 #ifdef STATS 4453 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4454 gput); 4455 /* 4456 * XXXLAS: This is a temporary hack, and should be 4457 * chained off VOI_TCP_GPUT when stats(9) grows an 4458 * API to deal with chained VOIs. 4459 */ 4460 if (tp->t_stats_gput_prev > 0) 4461 stats_voi_update_abs_s32(tp->t_stats, 4462 VOI_TCP_GPUT_ND, 4463 ((gput - tp->t_stats_gput_prev) * 100) / 4464 tp->t_stats_gput_prev); 4465 #endif 4466 tp->t_flags &= ~TF_GPUTINPROG; 4467 tp->t_stats_gput_prev = gput; 4468 /* 4469 * Now are we app limited now and there is space from where we 4470 * were to where we want to go? 4471 * 4472 * We don't do the other case i.e. non-applimited here since 4473 * the next send will trigger us picking up the missing data. 4474 */ 4475 if (rack->r_ctl.rc_first_appl && 4476 TCPS_HAVEESTABLISHED(tp->t_state) && 4477 rack->r_ctl.rc_app_limited_cnt && 4478 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4479 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4480 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4481 /* 4482 * Yep there is enough outstanding to make a measurement here. 4483 */ 4484 struct rack_sendmap *rsm, fe; 4485 4486 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4487 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4488 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4489 rack->app_limited_needs_set = 0; 4490 tp->gput_seq = th_ack; 4491 if (rack->in_probe_rtt) 4492 rack->measure_saw_probe_rtt = 1; 4493 else if ((rack->measure_saw_probe_rtt) && 4494 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4495 rack->measure_saw_probe_rtt = 0; 4496 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4497 /* There is a full window to gain info from */ 4498 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4499 } else { 4500 /* We can only measure up to the applimited point */ 4501 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4502 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4503 /* 4504 * We don't have enough to make a measurement. 4505 */ 4506 tp->t_flags &= ~TF_GPUTINPROG; 4507 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4508 0, 0, 0, 6, __LINE__, NULL, quality); 4509 return; 4510 } 4511 } 4512 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4513 /* 4514 * We will get no more data into the SB 4515 * this means we need to have the data available 4516 * before we start a measurement. 4517 */ 4518 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4519 /* Nope not enough data. */ 4520 return; 4521 } 4522 } 4523 tp->t_flags |= TF_GPUTINPROG; 4524 /* 4525 * Now we need to find the timestamp of the send at tp->gput_seq 4526 * for the send based measurement. 4527 */ 4528 fe.r_start = tp->gput_seq; 4529 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4530 if (rsm) { 4531 /* Ok send-based limit is set */ 4532 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4533 /* 4534 * Move back to include the earlier part 4535 * so our ack time lines up right (this may 4536 * make an overlapping measurement but thats 4537 * ok). 4538 */ 4539 tp->gput_seq = rsm->r_start; 4540 } 4541 if (rsm->r_flags & RACK_ACKED) 4542 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4543 else 4544 rack->app_limited_needs_set = 1; 4545 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4546 } else { 4547 /* 4548 * If we don't find the rsm due to some 4549 * send-limit set the current time, which 4550 * basically disables the send-limit. 4551 */ 4552 struct timeval tv; 4553 4554 microuptime(&tv); 4555 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4556 } 4557 rack_log_pacing_delay_calc(rack, 4558 tp->gput_seq, 4559 tp->gput_ack, 4560 (uint64_t)rsm, 4561 tp->gput_ts, 4562 rack->r_ctl.rc_app_limited_cnt, 4563 9, 4564 __LINE__, NULL, quality); 4565 } 4566 } 4567 4568 /* 4569 * CC wrapper hook functions 4570 */ 4571 static void 4572 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4573 uint16_t type, int32_t recovery) 4574 { 4575 uint32_t prior_cwnd, acked; 4576 struct tcp_log_buffer *lgb = NULL; 4577 uint8_t labc_to_use, quality; 4578 4579 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4580 tp->ccv->nsegs = nsegs; 4581 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4582 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4583 uint32_t max; 4584 4585 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4586 if (tp->ccv->bytes_this_ack > max) { 4587 tp->ccv->bytes_this_ack = max; 4588 } 4589 } 4590 #ifdef STATS 4591 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4592 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4593 #endif 4594 quality = RACK_QUALITY_NONE; 4595 if ((tp->t_flags & TF_GPUTINPROG) && 4596 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4597 /* Measure the Goodput */ 4598 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4599 #ifdef NETFLIX_PEAKRATE 4600 if ((type == CC_ACK) && 4601 (tp->t_maxpeakrate)) { 4602 /* 4603 * We update t_peakrate_thr. This gives us roughly 4604 * one update per round trip time. Note 4605 * it will only be used if pace_always is off i.e 4606 * we don't do this for paced flows. 4607 */ 4608 rack_update_peakrate_thr(tp); 4609 } 4610 #endif 4611 } 4612 /* Which way our we limited, if not cwnd limited no advance in CA */ 4613 if (tp->snd_cwnd <= tp->snd_wnd) 4614 tp->ccv->flags |= CCF_CWND_LIMITED; 4615 else 4616 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4617 if (tp->snd_cwnd > tp->snd_ssthresh) { 4618 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4619 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4620 /* For the setting of a window past use the actual scwnd we are using */ 4621 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4622 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4623 tp->ccv->flags |= CCF_ABC_SENTAWND; 4624 } 4625 } else { 4626 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4627 tp->t_bytes_acked = 0; 4628 } 4629 prior_cwnd = tp->snd_cwnd; 4630 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4631 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4632 labc_to_use = rack->rc_labc; 4633 else 4634 labc_to_use = rack_max_abc_post_recovery; 4635 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4636 union tcp_log_stackspecific log; 4637 struct timeval tv; 4638 4639 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4640 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4641 log.u_bbr.flex1 = th_ack; 4642 log.u_bbr.flex2 = tp->ccv->flags; 4643 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4644 log.u_bbr.flex4 = tp->ccv->nsegs; 4645 log.u_bbr.flex5 = labc_to_use; 4646 log.u_bbr.flex6 = prior_cwnd; 4647 log.u_bbr.flex7 = V_tcp_do_newsack; 4648 log.u_bbr.flex8 = 1; 4649 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4650 0, &log, false, NULL, NULL, 0, &tv); 4651 } 4652 if (CC_ALGO(tp)->ack_received != NULL) { 4653 /* XXXLAS: Find a way to live without this */ 4654 tp->ccv->curack = th_ack; 4655 tp->ccv->labc = labc_to_use; 4656 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4657 CC_ALGO(tp)->ack_received(tp->ccv, type); 4658 } 4659 if (lgb) { 4660 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4661 } 4662 if (rack->r_must_retran) { 4663 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4664 /* 4665 * We now are beyond the rxt point so lets disable 4666 * the flag. 4667 */ 4668 rack->r_ctl.rc_out_at_rto = 0; 4669 rack->r_must_retran = 0; 4670 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4671 /* 4672 * Only decrement the rc_out_at_rto if the cwnd advances 4673 * at least a whole segment. Otherwise next time the peer 4674 * acks, we won't be able to send this generaly happens 4675 * when we are in Congestion Avoidance. 4676 */ 4677 if (acked <= rack->r_ctl.rc_out_at_rto){ 4678 rack->r_ctl.rc_out_at_rto -= acked; 4679 } else { 4680 rack->r_ctl.rc_out_at_rto = 0; 4681 } 4682 } 4683 } 4684 #ifdef STATS 4685 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4686 #endif 4687 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4688 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4689 } 4690 #ifdef NETFLIX_PEAKRATE 4691 /* we enforce max peak rate if it is set and we are not pacing */ 4692 if ((rack->rc_always_pace == 0) && 4693 tp->t_peakrate_thr && 4694 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4695 tp->snd_cwnd = tp->t_peakrate_thr; 4696 } 4697 #endif 4698 } 4699 4700 static void 4701 tcp_rack_partialack(struct tcpcb *tp) 4702 { 4703 struct tcp_rack *rack; 4704 4705 rack = (struct tcp_rack *)tp->t_fb_ptr; 4706 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4707 /* 4708 * If we are doing PRR and have enough 4709 * room to send <or> we are pacing and prr 4710 * is disabled we will want to see if we 4711 * can send data (by setting r_wanted_output to 4712 * true). 4713 */ 4714 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4715 rack->rack_no_prr) 4716 rack->r_wanted_output = 1; 4717 } 4718 4719 static void 4720 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4721 { 4722 struct tcp_rack *rack; 4723 uint32_t orig_cwnd; 4724 4725 orig_cwnd = tp->snd_cwnd; 4726 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4727 rack = (struct tcp_rack *)tp->t_fb_ptr; 4728 /* only alert CC if we alerted when we entered */ 4729 if (CC_ALGO(tp)->post_recovery != NULL) { 4730 tp->ccv->curack = th_ack; 4731 CC_ALGO(tp)->post_recovery(tp->ccv); 4732 if (tp->snd_cwnd < tp->snd_ssthresh) { 4733 /* 4734 * Rack has burst control and pacing 4735 * so lets not set this any lower than 4736 * snd_ssthresh per RFC-6582 (option 2). 4737 */ 4738 tp->snd_cwnd = tp->snd_ssthresh; 4739 } 4740 } 4741 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4742 union tcp_log_stackspecific log; 4743 struct timeval tv; 4744 4745 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4746 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4747 log.u_bbr.flex1 = th_ack; 4748 log.u_bbr.flex2 = tp->ccv->flags; 4749 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4750 log.u_bbr.flex4 = tp->ccv->nsegs; 4751 log.u_bbr.flex5 = V_tcp_abc_l_var; 4752 log.u_bbr.flex6 = orig_cwnd; 4753 log.u_bbr.flex7 = V_tcp_do_newsack; 4754 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4755 log.u_bbr.flex8 = 2; 4756 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4757 0, &log, false, NULL, NULL, 0, &tv); 4758 } 4759 if ((rack->rack_no_prr == 0) && 4760 (rack->no_prr_addback == 0) && 4761 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4762 /* 4763 * Suck the next prr cnt back into cwnd, but 4764 * only do that if we are not application limited. 4765 */ 4766 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 4767 /* 4768 * We are allowed to add back to the cwnd the amount we did 4769 * not get out if: 4770 * a) no_prr_addback is off. 4771 * b) we are not app limited 4772 * c) we are doing prr 4773 * <and> 4774 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4775 */ 4776 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4777 rack->r_ctl.rc_prr_sndcnt); 4778 } 4779 rack->r_ctl.rc_prr_sndcnt = 0; 4780 rack_log_to_prr(rack, 1, 0, __LINE__); 4781 } 4782 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 4783 tp->snd_recover = tp->snd_una; 4784 if (rack->r_ctl.dsack_persist) { 4785 rack->r_ctl.dsack_persist--; 4786 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4787 rack->r_ctl.num_dsack = 0; 4788 } 4789 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4790 } 4791 EXIT_RECOVERY(tp->t_flags); 4792 } 4793 4794 static void 4795 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 4796 { 4797 struct tcp_rack *rack; 4798 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4799 4800 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4801 #ifdef STATS 4802 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4803 #endif 4804 if (IN_RECOVERY(tp->t_flags) == 0) { 4805 in_rec_at_entry = 0; 4806 ssthresh_enter = tp->snd_ssthresh; 4807 cwnd_enter = tp->snd_cwnd; 4808 } else 4809 in_rec_at_entry = 1; 4810 rack = (struct tcp_rack *)tp->t_fb_ptr; 4811 switch (type) { 4812 case CC_NDUPACK: 4813 tp->t_flags &= ~TF_WASFRECOVERY; 4814 tp->t_flags &= ~TF_WASCRECOVERY; 4815 if (!IN_FASTRECOVERY(tp->t_flags)) { 4816 rack->r_ctl.rc_prr_delivered = 0; 4817 rack->r_ctl.rc_prr_out = 0; 4818 if (rack->rack_no_prr == 0) { 4819 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4820 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 4821 } 4822 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4823 tp->snd_recover = tp->snd_max; 4824 if (tp->t_flags2 & TF2_ECN_PERMIT) 4825 tp->t_flags2 |= TF2_ECN_SND_CWR; 4826 } 4827 break; 4828 case CC_ECN: 4829 if (!IN_CONGRECOVERY(tp->t_flags) || 4830 /* 4831 * Allow ECN reaction on ACK to CWR, if 4832 * that data segment was also CE marked. 4833 */ 4834 SEQ_GEQ(ack, tp->snd_recover)) { 4835 EXIT_CONGRECOVERY(tp->t_flags); 4836 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4837 tp->snd_recover = tp->snd_max + 1; 4838 if (tp->t_flags2 & TF2_ECN_PERMIT) 4839 tp->t_flags2 |= TF2_ECN_SND_CWR; 4840 } 4841 break; 4842 case CC_RTO: 4843 tp->t_dupacks = 0; 4844 tp->t_bytes_acked = 0; 4845 EXIT_RECOVERY(tp->t_flags); 4846 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4847 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4848 orig_cwnd = tp->snd_cwnd; 4849 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4850 rack_log_to_prr(rack, 16, orig_cwnd, line); 4851 if (tp->t_flags2 & TF2_ECN_PERMIT) 4852 tp->t_flags2 |= TF2_ECN_SND_CWR; 4853 break; 4854 case CC_RTO_ERR: 4855 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4856 /* RTO was unnecessary, so reset everything. */ 4857 tp->snd_cwnd = tp->snd_cwnd_prev; 4858 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4859 tp->snd_recover = tp->snd_recover_prev; 4860 if (tp->t_flags & TF_WASFRECOVERY) { 4861 ENTER_FASTRECOVERY(tp->t_flags); 4862 tp->t_flags &= ~TF_WASFRECOVERY; 4863 } 4864 if (tp->t_flags & TF_WASCRECOVERY) { 4865 ENTER_CONGRECOVERY(tp->t_flags); 4866 tp->t_flags &= ~TF_WASCRECOVERY; 4867 } 4868 tp->snd_nxt = tp->snd_max; 4869 tp->t_badrxtwin = 0; 4870 break; 4871 } 4872 if ((CC_ALGO(tp)->cong_signal != NULL) && 4873 (type != CC_RTO)){ 4874 tp->ccv->curack = ack; 4875 CC_ALGO(tp)->cong_signal(tp->ccv, type); 4876 } 4877 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4878 rack_log_to_prr(rack, 15, cwnd_enter, line); 4879 rack->r_ctl.dsack_byte_cnt = 0; 4880 rack->r_ctl.retran_during_recovery = 0; 4881 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4882 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4883 rack->r_ent_rec_ns = 1; 4884 } 4885 } 4886 4887 static inline void 4888 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4889 { 4890 uint32_t i_cwnd; 4891 4892 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4893 4894 #ifdef NETFLIX_STATS 4895 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4896 if (tp->t_state == TCPS_ESTABLISHED) 4897 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4898 #endif 4899 if (CC_ALGO(tp)->after_idle != NULL) 4900 CC_ALGO(tp)->after_idle(tp->ccv); 4901 4902 if (tp->snd_cwnd == 1) 4903 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4904 else 4905 i_cwnd = rc_init_window(rack); 4906 4907 /* 4908 * Being idle is no different than the initial window. If the cc 4909 * clamps it down below the initial window raise it to the initial 4910 * window. 4911 */ 4912 if (tp->snd_cwnd < i_cwnd) { 4913 tp->snd_cwnd = i_cwnd; 4914 } 4915 } 4916 4917 /* 4918 * Indicate whether this ack should be delayed. We can delay the ack if 4919 * following conditions are met: 4920 * - There is no delayed ack timer in progress. 4921 * - Our last ack wasn't a 0-sized window. We never want to delay 4922 * the ack that opens up a 0-sized window. 4923 * - LRO wasn't used for this segment. We make sure by checking that the 4924 * segment size is not larger than the MSS. 4925 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4926 * connection. 4927 */ 4928 #define DELAY_ACK(tp, tlen) \ 4929 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4930 ((tp->t_flags & TF_DELACK) == 0) && \ 4931 (tlen <= tp->t_maxseg) && \ 4932 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4933 4934 static struct rack_sendmap * 4935 rack_find_lowest_rsm(struct tcp_rack *rack) 4936 { 4937 struct rack_sendmap *rsm; 4938 4939 /* 4940 * Walk the time-order transmitted list looking for an rsm that is 4941 * not acked. This will be the one that was sent the longest time 4942 * ago that is still outstanding. 4943 */ 4944 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4945 if (rsm->r_flags & RACK_ACKED) { 4946 continue; 4947 } 4948 goto finish; 4949 } 4950 finish: 4951 return (rsm); 4952 } 4953 4954 static struct rack_sendmap * 4955 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4956 { 4957 struct rack_sendmap *prsm; 4958 4959 /* 4960 * Walk the sequence order list backward until we hit and arrive at 4961 * the highest seq not acked. In theory when this is called it 4962 * should be the last segment (which it was not). 4963 */ 4964 prsm = rsm; 4965 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4966 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4967 continue; 4968 } 4969 return (prsm); 4970 } 4971 return (NULL); 4972 } 4973 4974 static uint32_t 4975 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4976 { 4977 int32_t lro; 4978 uint32_t thresh; 4979 4980 /* 4981 * lro is the flag we use to determine if we have seen reordering. 4982 * If it gets set we have seen reordering. The reorder logic either 4983 * works in one of two ways: 4984 * 4985 * If reorder-fade is configured, then we track the last time we saw 4986 * re-ordering occur. If we reach the point where enough time as 4987 * passed we no longer consider reordering has occuring. 4988 * 4989 * Or if reorder-face is 0, then once we see reordering we consider 4990 * the connection to alway be subject to reordering and just set lro 4991 * to 1. 4992 * 4993 * In the end if lro is non-zero we add the extra time for 4994 * reordering in. 4995 */ 4996 if (srtt == 0) 4997 srtt = 1; 4998 if (rack->r_ctl.rc_reorder_ts) { 4999 if (rack->r_ctl.rc_reorder_fade) { 5000 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5001 lro = cts - rack->r_ctl.rc_reorder_ts; 5002 if (lro == 0) { 5003 /* 5004 * No time as passed since the last 5005 * reorder, mark it as reordering. 5006 */ 5007 lro = 1; 5008 } 5009 } else { 5010 /* Negative time? */ 5011 lro = 0; 5012 } 5013 if (lro > rack->r_ctl.rc_reorder_fade) { 5014 /* Turn off reordering seen too */ 5015 rack->r_ctl.rc_reorder_ts = 0; 5016 lro = 0; 5017 } 5018 } else { 5019 /* Reodering does not fade */ 5020 lro = 1; 5021 } 5022 } else { 5023 lro = 0; 5024 } 5025 if (rack->rc_rack_tmr_std_based == 0) { 5026 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5027 } else { 5028 /* Standards based pkt-delay is 1/4 srtt */ 5029 thresh = srtt + (srtt >> 2); 5030 } 5031 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5032 /* It must be set, if not you get 1/4 rtt */ 5033 if (rack->r_ctl.rc_reorder_shift) 5034 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5035 else 5036 thresh += (srtt >> 2); 5037 } 5038 if (rack->rc_rack_use_dsack && 5039 lro && 5040 (rack->r_ctl.num_dsack > 0)) { 5041 /* 5042 * We only increase the reordering window if we 5043 * have seen reordering <and> we have a DSACK count. 5044 */ 5045 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5046 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5047 } 5048 /* SRTT * 2 is the ceiling */ 5049 if (thresh > (srtt * 2)) { 5050 thresh = srtt * 2; 5051 } 5052 /* And we don't want it above the RTO max either */ 5053 if (thresh > rack_rto_max) { 5054 thresh = rack_rto_max; 5055 } 5056 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5057 return (thresh); 5058 } 5059 5060 static uint32_t 5061 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5062 struct rack_sendmap *rsm, uint32_t srtt) 5063 { 5064 struct rack_sendmap *prsm; 5065 uint32_t thresh, len; 5066 int segsiz; 5067 5068 if (srtt == 0) 5069 srtt = 1; 5070 if (rack->r_ctl.rc_tlp_threshold) 5071 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5072 else 5073 thresh = (srtt * 2); 5074 5075 /* Get the previous sent packet, if any */ 5076 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5077 len = rsm->r_end - rsm->r_start; 5078 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5079 /* Exactly like the ID */ 5080 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5081 uint32_t alt_thresh; 5082 /* 5083 * Compensate for delayed-ack with the d-ack time. 5084 */ 5085 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5086 if (alt_thresh > thresh) 5087 thresh = alt_thresh; 5088 } 5089 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5090 /* 2.1 behavior */ 5091 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5092 if (prsm && (len <= segsiz)) { 5093 /* 5094 * Two packets outstanding, thresh should be (2*srtt) + 5095 * possible inter-packet delay (if any). 5096 */ 5097 uint32_t inter_gap = 0; 5098 int idx, nidx; 5099 5100 idx = rsm->r_rtr_cnt - 1; 5101 nidx = prsm->r_rtr_cnt - 1; 5102 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5103 /* Yes it was sent later (or at the same time) */ 5104 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5105 } 5106 thresh += inter_gap; 5107 } else if (len <= segsiz) { 5108 /* 5109 * Possibly compensate for delayed-ack. 5110 */ 5111 uint32_t alt_thresh; 5112 5113 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5114 if (alt_thresh > thresh) 5115 thresh = alt_thresh; 5116 } 5117 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5118 /* 2.2 behavior */ 5119 if (len <= segsiz) { 5120 uint32_t alt_thresh; 5121 /* 5122 * Compensate for delayed-ack with the d-ack time. 5123 */ 5124 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5125 if (alt_thresh > thresh) 5126 thresh = alt_thresh; 5127 } 5128 } 5129 /* Not above an RTO */ 5130 if (thresh > tp->t_rxtcur) { 5131 thresh = tp->t_rxtcur; 5132 } 5133 /* Not above a RTO max */ 5134 if (thresh > rack_rto_max) { 5135 thresh = rack_rto_max; 5136 } 5137 /* Apply user supplied min TLP */ 5138 if (thresh < rack_tlp_min) { 5139 thresh = rack_tlp_min; 5140 } 5141 return (thresh); 5142 } 5143 5144 static uint32_t 5145 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5146 { 5147 /* 5148 * We want the rack_rtt which is the 5149 * last rtt we measured. However if that 5150 * does not exist we fallback to the srtt (which 5151 * we probably will never do) and then as a last 5152 * resort we use RACK_INITIAL_RTO if no srtt is 5153 * yet set. 5154 */ 5155 if (rack->rc_rack_rtt) 5156 return (rack->rc_rack_rtt); 5157 else if (tp->t_srtt == 0) 5158 return (RACK_INITIAL_RTO); 5159 return (tp->t_srtt); 5160 } 5161 5162 static struct rack_sendmap * 5163 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5164 { 5165 /* 5166 * Check to see that we don't need to fall into recovery. We will 5167 * need to do so if our oldest transmit is past the time we should 5168 * have had an ack. 5169 */ 5170 struct tcp_rack *rack; 5171 struct rack_sendmap *rsm; 5172 int32_t idx; 5173 uint32_t srtt, thresh; 5174 5175 rack = (struct tcp_rack *)tp->t_fb_ptr; 5176 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5177 return (NULL); 5178 } 5179 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5180 if (rsm == NULL) 5181 return (NULL); 5182 5183 5184 if (rsm->r_flags & RACK_ACKED) { 5185 rsm = rack_find_lowest_rsm(rack); 5186 if (rsm == NULL) 5187 return (NULL); 5188 } 5189 idx = rsm->r_rtr_cnt - 1; 5190 srtt = rack_grab_rtt(tp, rack); 5191 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5192 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5193 return (NULL); 5194 } 5195 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5196 return (NULL); 5197 } 5198 /* Ok if we reach here we are over-due and this guy can be sent */ 5199 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 5200 return (rsm); 5201 } 5202 5203 static uint32_t 5204 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5205 { 5206 int32_t t; 5207 int32_t tt; 5208 uint32_t ret_val; 5209 5210 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5211 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5212 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5213 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5214 ret_val = (uint32_t)tt; 5215 return (ret_val); 5216 } 5217 5218 static uint32_t 5219 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5220 { 5221 /* 5222 * Start the FR timer, we do this based on getting the first one in 5223 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5224 * events we need to stop the running timer (if its running) before 5225 * starting the new one. 5226 */ 5227 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5228 uint32_t srtt_cur; 5229 int32_t idx; 5230 int32_t is_tlp_timer = 0; 5231 struct rack_sendmap *rsm; 5232 5233 if (rack->t_timers_stopped) { 5234 /* All timers have been stopped none are to run */ 5235 return (0); 5236 } 5237 if (rack->rc_in_persist) { 5238 /* We can't start any timer in persists */ 5239 return (rack_get_persists_timer_val(tp, rack)); 5240 } 5241 rack->rc_on_min_to = 0; 5242 if ((tp->t_state < TCPS_ESTABLISHED) || 5243 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5244 goto activate_rxt; 5245 } 5246 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5247 if ((rsm == NULL) || sup_rack) { 5248 /* Nothing on the send map or no rack */ 5249 activate_rxt: 5250 time_since_sent = 0; 5251 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5252 if (rsm) { 5253 /* 5254 * Should we discount the RTX timer any? 5255 * 5256 * We want to discount it the smallest amount. 5257 * If a timer (Rack/TLP or RXT) has gone off more 5258 * recently thats the discount we want to use (now - timer time). 5259 * If the retransmit of the oldest packet was more recent then 5260 * we want to use that (now - oldest-packet-last_transmit_time). 5261 * 5262 */ 5263 idx = rsm->r_rtr_cnt - 1; 5264 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5265 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5266 else 5267 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5268 if (TSTMP_GT(cts, tstmp_touse)) 5269 time_since_sent = cts - tstmp_touse; 5270 } 5271 if (SEQ_LT(tp->snd_una, tp->snd_max) || 5272 sbavail(&tptosocket(tp)->so_snd)) { 5273 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5274 to = tp->t_rxtcur; 5275 if (to > time_since_sent) 5276 to -= time_since_sent; 5277 else 5278 to = rack->r_ctl.rc_min_to; 5279 if (to == 0) 5280 to = 1; 5281 /* Special case for KEEPINIT */ 5282 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5283 (TP_KEEPINIT(tp) != 0) && 5284 rsm) { 5285 /* 5286 * We have to put a ceiling on the rxt timer 5287 * of the keep-init timeout. 5288 */ 5289 uint32_t max_time, red; 5290 5291 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5292 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5293 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5294 if (red < max_time) 5295 max_time -= red; 5296 else 5297 max_time = 1; 5298 } 5299 /* Reduce timeout to the keep value if needed */ 5300 if (max_time < to) 5301 to = max_time; 5302 } 5303 return (to); 5304 } 5305 return (0); 5306 } 5307 if (rsm->r_flags & RACK_ACKED) { 5308 rsm = rack_find_lowest_rsm(rack); 5309 if (rsm == NULL) { 5310 /* No lowest? */ 5311 goto activate_rxt; 5312 } 5313 } 5314 if (rack->sack_attack_disable) { 5315 /* 5316 * We don't want to do 5317 * any TLP's if you are an attacker. 5318 * Though if you are doing what 5319 * is expected you may still have 5320 * SACK-PASSED marks. 5321 */ 5322 goto activate_rxt; 5323 } 5324 /* Convert from ms to usecs */ 5325 if ((rsm->r_flags & RACK_SACK_PASSED) || 5326 (rsm->r_flags & RACK_RWND_COLLAPSED) || 5327 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5328 if ((tp->t_flags & TF_SENTFIN) && 5329 ((tp->snd_max - tp->snd_una) == 1) && 5330 (rsm->r_flags & RACK_HAS_FIN)) { 5331 /* 5332 * We don't start a rack timer if all we have is a 5333 * FIN outstanding. 5334 */ 5335 goto activate_rxt; 5336 } 5337 if ((rack->use_rack_rr == 0) && 5338 (IN_FASTRECOVERY(tp->t_flags)) && 5339 (rack->rack_no_prr == 0) && 5340 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5341 /* 5342 * We are not cheating, in recovery and 5343 * not enough ack's to yet get our next 5344 * retransmission out. 5345 * 5346 * Note that classified attackers do not 5347 * get to use the rack-cheat. 5348 */ 5349 goto activate_tlp; 5350 } 5351 srtt = rack_grab_rtt(tp, rack); 5352 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5353 idx = rsm->r_rtr_cnt - 1; 5354 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5355 if (SEQ_GEQ(exp, cts)) { 5356 to = exp - cts; 5357 if (to < rack->r_ctl.rc_min_to) { 5358 to = rack->r_ctl.rc_min_to; 5359 if (rack->r_rr_config == 3) 5360 rack->rc_on_min_to = 1; 5361 } 5362 } else { 5363 to = rack->r_ctl.rc_min_to; 5364 if (rack->r_rr_config == 3) 5365 rack->rc_on_min_to = 1; 5366 } 5367 } else { 5368 /* Ok we need to do a TLP not RACK */ 5369 activate_tlp: 5370 if ((rack->rc_tlp_in_progress != 0) && 5371 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5372 /* 5373 * The previous send was a TLP and we have sent 5374 * N TLP's without sending new data. 5375 */ 5376 goto activate_rxt; 5377 } 5378 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5379 if (rsm == NULL) { 5380 /* We found no rsm to TLP with. */ 5381 goto activate_rxt; 5382 } 5383 if (rsm->r_flags & RACK_HAS_FIN) { 5384 /* If its a FIN we dont do TLP */ 5385 rsm = NULL; 5386 goto activate_rxt; 5387 } 5388 idx = rsm->r_rtr_cnt - 1; 5389 time_since_sent = 0; 5390 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5391 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5392 else 5393 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5394 if (TSTMP_GT(cts, tstmp_touse)) 5395 time_since_sent = cts - tstmp_touse; 5396 is_tlp_timer = 1; 5397 if (tp->t_srtt) { 5398 if ((rack->rc_srtt_measure_made == 0) && 5399 (tp->t_srtt == 1)) { 5400 /* 5401 * If another stack as run and set srtt to 1, 5402 * then the srtt was 0, so lets use the initial. 5403 */ 5404 srtt = RACK_INITIAL_RTO; 5405 } else { 5406 srtt_cur = tp->t_srtt; 5407 srtt = srtt_cur; 5408 } 5409 } else 5410 srtt = RACK_INITIAL_RTO; 5411 /* 5412 * If the SRTT is not keeping up and the 5413 * rack RTT has spiked we want to use 5414 * the last RTT not the smoothed one. 5415 */ 5416 if (rack_tlp_use_greater && 5417 tp->t_srtt && 5418 (srtt < rack_grab_rtt(tp, rack))) { 5419 srtt = rack_grab_rtt(tp, rack); 5420 } 5421 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5422 if (thresh > time_since_sent) { 5423 to = thresh - time_since_sent; 5424 } else { 5425 to = rack->r_ctl.rc_min_to; 5426 rack_log_alt_to_to_cancel(rack, 5427 thresh, /* flex1 */ 5428 time_since_sent, /* flex2 */ 5429 tstmp_touse, /* flex3 */ 5430 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5431 (uint32_t)rsm->r_tim_lastsent[idx], 5432 srtt, 5433 idx, 99); 5434 } 5435 if (to < rack_tlp_min) { 5436 to = rack_tlp_min; 5437 } 5438 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5439 /* 5440 * If the TLP time works out to larger than the max 5441 * RTO lets not do TLP.. just RTO. 5442 */ 5443 goto activate_rxt; 5444 } 5445 } 5446 if (is_tlp_timer == 0) { 5447 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5448 } else { 5449 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5450 } 5451 if (to == 0) 5452 to = 1; 5453 return (to); 5454 } 5455 5456 static void 5457 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5458 { 5459 if (rack->rc_in_persist == 0) { 5460 if (tp->t_flags & TF_GPUTINPROG) { 5461 /* 5462 * Stop the goodput now, the calling of the 5463 * measurement function clears the flag. 5464 */ 5465 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5466 RACK_QUALITY_PERSIST); 5467 } 5468 #ifdef NETFLIX_SHARED_CWND 5469 if (rack->r_ctl.rc_scw) { 5470 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5471 rack->rack_scwnd_is_idle = 1; 5472 } 5473 #endif 5474 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5475 if (rack->r_ctl.rc_went_idle_time == 0) 5476 rack->r_ctl.rc_went_idle_time = 1; 5477 rack_timer_cancel(tp, rack, cts, __LINE__); 5478 rack->r_ctl.persist_lost_ends = 0; 5479 rack->probe_not_answered = 0; 5480 rack->forced_ack = 0; 5481 tp->t_rxtshift = 0; 5482 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5483 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5484 rack->rc_in_persist = 1; 5485 } 5486 } 5487 5488 static void 5489 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5490 { 5491 if (tcp_in_hpts(rack->rc_inp)) { 5492 tcp_hpts_remove(rack->rc_inp); 5493 rack->r_ctl.rc_hpts_flags = 0; 5494 } 5495 #ifdef NETFLIX_SHARED_CWND 5496 if (rack->r_ctl.rc_scw) { 5497 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5498 rack->rack_scwnd_is_idle = 0; 5499 } 5500 #endif 5501 if (rack->rc_gp_dyn_mul && 5502 (rack->use_fixed_rate == 0) && 5503 (rack->rc_always_pace)) { 5504 /* 5505 * Do we count this as if a probe-rtt just 5506 * finished? 5507 */ 5508 uint32_t time_idle, idle_min; 5509 5510 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5511 idle_min = rack_min_probertt_hold; 5512 if (rack_probertt_gpsrtt_cnt_div) { 5513 uint64_t extra; 5514 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5515 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5516 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5517 idle_min += (uint32_t)extra; 5518 } 5519 if (time_idle >= idle_min) { 5520 /* Yes, we count it as a probe-rtt. */ 5521 uint32_t us_cts; 5522 5523 us_cts = tcp_get_usecs(NULL); 5524 if (rack->in_probe_rtt == 0) { 5525 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5526 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5527 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5528 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5529 } else { 5530 rack_exit_probertt(rack, us_cts); 5531 } 5532 } 5533 } 5534 rack->rc_in_persist = 0; 5535 rack->r_ctl.rc_went_idle_time = 0; 5536 tp->t_rxtshift = 0; 5537 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5538 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5539 rack->r_ctl.rc_agg_delayed = 0; 5540 rack->r_early = 0; 5541 rack->r_late = 0; 5542 rack->r_ctl.rc_agg_early = 0; 5543 } 5544 5545 static void 5546 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5547 struct hpts_diag *diag, struct timeval *tv) 5548 { 5549 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5550 union tcp_log_stackspecific log; 5551 5552 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5553 log.u_bbr.flex1 = diag->p_nxt_slot; 5554 log.u_bbr.flex2 = diag->p_cur_slot; 5555 log.u_bbr.flex3 = diag->slot_req; 5556 log.u_bbr.flex4 = diag->inp_hptsslot; 5557 log.u_bbr.flex5 = diag->slot_remaining; 5558 log.u_bbr.flex6 = diag->need_new_to; 5559 log.u_bbr.flex7 = diag->p_hpts_active; 5560 log.u_bbr.flex8 = diag->p_on_min_sleep; 5561 /* Hijack other fields as needed */ 5562 log.u_bbr.epoch = diag->have_slept; 5563 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5564 log.u_bbr.pkts_out = diag->co_ret; 5565 log.u_bbr.applimited = diag->hpts_sleep_time; 5566 log.u_bbr.delivered = diag->p_prev_slot; 5567 log.u_bbr.inflight = diag->p_runningslot; 5568 log.u_bbr.bw_inuse = diag->wheel_slot; 5569 log.u_bbr.rttProp = diag->wheel_cts; 5570 log.u_bbr.timeStamp = cts; 5571 log.u_bbr.delRate = diag->maxslots; 5572 log.u_bbr.cur_del_rate = diag->p_curtick; 5573 log.u_bbr.cur_del_rate <<= 32; 5574 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5575 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5576 &rack->rc_inp->inp_socket->so_rcv, 5577 &rack->rc_inp->inp_socket->so_snd, 5578 BBR_LOG_HPTSDIAG, 0, 5579 0, &log, false, tv); 5580 } 5581 5582 } 5583 5584 static void 5585 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5586 { 5587 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5588 union tcp_log_stackspecific log; 5589 struct timeval tv; 5590 5591 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5592 log.u_bbr.flex1 = sb->sb_flags; 5593 log.u_bbr.flex2 = len; 5594 log.u_bbr.flex3 = sb->sb_state; 5595 log.u_bbr.flex8 = type; 5596 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5597 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5598 &rack->rc_inp->inp_socket->so_rcv, 5599 &rack->rc_inp->inp_socket->so_snd, 5600 TCP_LOG_SB_WAKE, 0, 5601 len, &log, false, &tv); 5602 } 5603 } 5604 5605 static void 5606 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5607 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5608 { 5609 struct hpts_diag diag; 5610 struct inpcb *inp = tptoinpcb(tp); 5611 struct timeval tv; 5612 uint32_t delayed_ack = 0; 5613 uint32_t hpts_timeout; 5614 uint32_t entry_slot = slot; 5615 uint8_t stopped; 5616 uint32_t left = 0; 5617 uint32_t us_cts; 5618 5619 if ((tp->t_state == TCPS_CLOSED) || 5620 (tp->t_state == TCPS_LISTEN)) { 5621 return; 5622 } 5623 if (tcp_in_hpts(inp)) { 5624 /* Already on the pacer */ 5625 return; 5626 } 5627 stopped = rack->rc_tmr_stopped; 5628 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5629 left = rack->r_ctl.rc_timer_exp - cts; 5630 } 5631 rack->r_ctl.rc_timer_exp = 0; 5632 rack->r_ctl.rc_hpts_flags = 0; 5633 us_cts = tcp_get_usecs(&tv); 5634 /* Now early/late accounting */ 5635 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5636 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5637 /* 5638 * We have a early carry over set, 5639 * we can always add more time so we 5640 * can always make this compensation. 5641 * 5642 * Note if ack's are allowed to wake us do not 5643 * penalize the next timer for being awoke 5644 * by an ack aka the rc_agg_early (non-paced mode). 5645 */ 5646 slot += rack->r_ctl.rc_agg_early; 5647 rack->r_early = 0; 5648 rack->r_ctl.rc_agg_early = 0; 5649 } 5650 if (rack->r_late) { 5651 /* 5652 * This is harder, we can 5653 * compensate some but it 5654 * really depends on what 5655 * the current pacing time is. 5656 */ 5657 if (rack->r_ctl.rc_agg_delayed >= slot) { 5658 /* 5659 * We can't compensate for it all. 5660 * And we have to have some time 5661 * on the clock. We always have a min 5662 * 10 slots (10 x 10 i.e. 100 usecs). 5663 */ 5664 if (slot <= HPTS_TICKS_PER_SLOT) { 5665 /* We gain delay */ 5666 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5667 slot = HPTS_TICKS_PER_SLOT; 5668 } else { 5669 /* We take off some */ 5670 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5671 slot = HPTS_TICKS_PER_SLOT; 5672 } 5673 } else { 5674 slot -= rack->r_ctl.rc_agg_delayed; 5675 rack->r_ctl.rc_agg_delayed = 0; 5676 /* Make sure we have 100 useconds at minimum */ 5677 if (slot < HPTS_TICKS_PER_SLOT) { 5678 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5679 slot = HPTS_TICKS_PER_SLOT; 5680 } 5681 if (rack->r_ctl.rc_agg_delayed == 0) 5682 rack->r_late = 0; 5683 } 5684 } 5685 if (slot) { 5686 /* We are pacing too */ 5687 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5688 } 5689 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5690 #ifdef NETFLIX_EXP_DETECTION 5691 if (rack->sack_attack_disable && 5692 (slot < tcp_sad_pacing_interval)) { 5693 /* 5694 * We have a potential attacker on 5695 * the line. We have possibly some 5696 * (or now) pacing time set. We want to 5697 * slow down the processing of sacks by some 5698 * amount (if it is an attacker). Set the default 5699 * slot for attackers in place (unless the orginal 5700 * interval is longer). Its stored in 5701 * micro-seconds, so lets convert to msecs. 5702 */ 5703 slot = tcp_sad_pacing_interval; 5704 } 5705 #endif 5706 if (tp->t_flags & TF_DELACK) { 5707 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5708 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5709 } 5710 if (delayed_ack && ((hpts_timeout == 0) || 5711 (delayed_ack < hpts_timeout))) 5712 hpts_timeout = delayed_ack; 5713 else 5714 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5715 /* 5716 * If no timers are going to run and we will fall off the hptsi 5717 * wheel, we resort to a keep-alive timer if its configured. 5718 */ 5719 if ((hpts_timeout == 0) && 5720 (slot == 0)) { 5721 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5722 (tp->t_state <= TCPS_CLOSING)) { 5723 /* 5724 * Ok we have no timer (persists, rack, tlp, rxt or 5725 * del-ack), we don't have segments being paced. So 5726 * all that is left is the keepalive timer. 5727 */ 5728 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5729 /* Get the established keep-alive time */ 5730 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5731 } else { 5732 /* 5733 * Get the initial setup keep-alive time, 5734 * note that this is probably not going to 5735 * happen, since rack will be running a rxt timer 5736 * if a SYN of some sort is outstanding. It is 5737 * actually handled in rack_timeout_rxt(). 5738 */ 5739 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5740 } 5741 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5742 if (rack->in_probe_rtt) { 5743 /* 5744 * We want to instead not wake up a long time from 5745 * now but to wake up about the time we would 5746 * exit probe-rtt and initiate a keep-alive ack. 5747 * This will get us out of probe-rtt and update 5748 * our min-rtt. 5749 */ 5750 hpts_timeout = rack_min_probertt_hold; 5751 } 5752 } 5753 } 5754 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5755 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5756 /* 5757 * RACK, TLP, persists and RXT timers all are restartable 5758 * based on actions input .. i.e we received a packet (ack 5759 * or sack) and that changes things (rw, or snd_una etc). 5760 * Thus we can restart them with a new value. For 5761 * keep-alive, delayed_ack we keep track of what was left 5762 * and restart the timer with a smaller value. 5763 */ 5764 if (left < hpts_timeout) 5765 hpts_timeout = left; 5766 } 5767 if (hpts_timeout) { 5768 /* 5769 * Hack alert for now we can't time-out over 2,147,483 5770 * seconds (a bit more than 596 hours), which is probably ok 5771 * :). 5772 */ 5773 if (hpts_timeout > 0x7ffffffe) 5774 hpts_timeout = 0x7ffffffe; 5775 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5776 } 5777 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5778 if ((rack->gp_ready == 0) && 5779 (rack->use_fixed_rate == 0) && 5780 (hpts_timeout < slot) && 5781 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5782 /* 5783 * We have no good estimate yet for the 5784 * old clunky burst mitigation or the 5785 * real pacing. And the tlp or rxt is smaller 5786 * than the pacing calculation. Lets not 5787 * pace that long since we know the calculation 5788 * so far is not accurate. 5789 */ 5790 slot = hpts_timeout; 5791 } 5792 /** 5793 * Turn off all the flags for queuing by default. The 5794 * flags have important meanings to what happens when 5795 * LRO interacts with the transport. Most likely (by default now) 5796 * mbuf_queueing and ack compression are on. So the transport 5797 * has a couple of flags that control what happens (if those 5798 * are not on then these flags won't have any effect since it 5799 * won't go through the queuing LRO path). 5800 * 5801 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5802 * pacing output, so don't disturb. But 5803 * it also means LRO can wake me if there 5804 * is a SACK arrival. 5805 * 5806 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5807 * with the above flag (QUEUE_READY) and 5808 * when present it says don't even wake me 5809 * if a SACK arrives. 5810 * 5811 * The idea behind these flags is that if we are pacing we 5812 * set the MBUF_QUEUE_READY and only get woken up if 5813 * a SACK arrives (which could change things) or if 5814 * our pacing timer expires. If, however, we have a rack 5815 * timer running, then we don't even want a sack to wake 5816 * us since the rack timer has to expire before we can send. 5817 * 5818 * Other cases should usually have none of the flags set 5819 * so LRO can call into us. 5820 */ 5821 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5822 if (slot) { 5823 rack->r_ctl.rc_last_output_to = us_cts + slot; 5824 /* 5825 * A pacing timer (slot) is being set, in 5826 * such a case we cannot send (we are blocked by 5827 * the timer). So lets tell LRO that it should not 5828 * wake us unless there is a SACK. Note this only 5829 * will be effective if mbuf queueing is on or 5830 * compressed acks are being processed. 5831 */ 5832 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5833 /* 5834 * But wait if we have a Rack timer running 5835 * even a SACK should not disturb us (with 5836 * the exception of r_rr_config 3). 5837 */ 5838 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5839 (rack->r_rr_config != 3)) 5840 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5841 if (rack->rc_ack_can_sendout_data) { 5842 /* 5843 * Ahh but wait, this is that special case 5844 * where the pacing timer can be disturbed 5845 * backout the changes (used for non-paced 5846 * burst limiting). 5847 */ 5848 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5849 } 5850 if ((rack->use_rack_rr) && 5851 (rack->r_rr_config < 2) && 5852 ((hpts_timeout) && (hpts_timeout < slot))) { 5853 /* 5854 * Arrange for the hpts to kick back in after the 5855 * t-o if the t-o does not cause a send. 5856 */ 5857 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5858 __LINE__, &diag); 5859 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5860 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5861 } else { 5862 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot), 5863 __LINE__, &diag); 5864 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5865 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5866 } 5867 } else if (hpts_timeout) { 5868 /* 5869 * With respect to inp_flags2 here, lets let any new acks wake 5870 * us up here. Since we are not pacing (no pacing timer), output 5871 * can happen so we should let it. If its a Rack timer, then any inbound 5872 * packet probably won't change the sending (we will be blocked) 5873 * but it may change the prr stats so letting it in (the set defaults 5874 * at the start of this block) are good enough. 5875 */ 5876 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5877 __LINE__, &diag); 5878 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5879 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5880 } else { 5881 /* No timer starting */ 5882 #ifdef INVARIANTS 5883 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5884 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5885 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5886 } 5887 #endif 5888 } 5889 rack->rc_tmr_stopped = 0; 5890 if (slot) 5891 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5892 } 5893 5894 /* 5895 * RACK Timer, here we simply do logging and house keeping. 5896 * the normal rack_output() function will call the 5897 * appropriate thing to check if we need to do a RACK retransmit. 5898 * We return 1, saying don't proceed with rack_output only 5899 * when all timers have been stopped (destroyed PCB?). 5900 */ 5901 static int 5902 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5903 { 5904 /* 5905 * This timer simply provides an internal trigger to send out data. 5906 * The check_recovery_mode call will see if there are needed 5907 * retransmissions, if so we will enter fast-recovery. The output 5908 * call may or may not do the same thing depending on sysctl 5909 * settings. 5910 */ 5911 struct rack_sendmap *rsm; 5912 5913 if (tp->t_timers->tt_flags & TT_STOPPED) { 5914 return (1); 5915 } 5916 counter_u64_add(rack_to_tot, 1); 5917 if (rack->r_state && (rack->r_state != tp->t_state)) 5918 rack_set_state(tp, rack); 5919 rack->rc_on_min_to = 0; 5920 rsm = rack_check_recovery_mode(tp, cts); 5921 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5922 if (rsm) { 5923 rack->r_ctl.rc_resend = rsm; 5924 rack->r_timer_override = 1; 5925 if (rack->use_rack_rr) { 5926 /* 5927 * Don't accumulate extra pacing delay 5928 * we are allowing the rack timer to 5929 * over-ride pacing i.e. rrr takes precedence 5930 * if the pacing interval is longer than the rrr 5931 * time (in other words we get the min pacing 5932 * time versus rrr pacing time). 5933 */ 5934 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5935 } 5936 } 5937 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5938 if (rsm == NULL) { 5939 /* restart a timer and return 1 */ 5940 rack_start_hpts_timer(rack, tp, cts, 5941 0, 0, 0); 5942 return (1); 5943 } 5944 return (0); 5945 } 5946 5947 static void 5948 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5949 { 5950 if (rsm->m->m_len > rsm->orig_m_len) { 5951 /* 5952 * Mbuf grew, caused by sbcompress, our offset does 5953 * not change. 5954 */ 5955 rsm->orig_m_len = rsm->m->m_len; 5956 } else if (rsm->m->m_len < rsm->orig_m_len) { 5957 /* 5958 * Mbuf shrank, trimmed off the top by an ack, our 5959 * offset changes. 5960 */ 5961 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5962 rsm->orig_m_len = rsm->m->m_len; 5963 } 5964 } 5965 5966 static void 5967 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5968 { 5969 struct mbuf *m; 5970 uint32_t soff; 5971 5972 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5973 /* Fix up the orig_m_len and possibly the mbuf offset */ 5974 rack_adjust_orig_mlen(src_rsm); 5975 } 5976 m = src_rsm->m; 5977 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5978 while (soff >= m->m_len) { 5979 /* Move out past this mbuf */ 5980 soff -= m->m_len; 5981 m = m->m_next; 5982 KASSERT((m != NULL), 5983 ("rsm:%p nrsm:%p hit at soff:%u null m", 5984 src_rsm, rsm, soff)); 5985 } 5986 rsm->m = m; 5987 rsm->soff = soff; 5988 rsm->orig_m_len = m->m_len; 5989 } 5990 5991 static __inline void 5992 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5993 struct rack_sendmap *rsm, uint32_t start) 5994 { 5995 int idx; 5996 5997 nrsm->r_start = start; 5998 nrsm->r_end = rsm->r_end; 5999 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6000 nrsm->r_flags = rsm->r_flags; 6001 nrsm->r_dupack = rsm->r_dupack; 6002 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6003 nrsm->r_rtr_bytes = 0; 6004 nrsm->r_fas = rsm->r_fas; 6005 rsm->r_end = nrsm->r_start; 6006 nrsm->r_just_ret = rsm->r_just_ret; 6007 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6008 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6009 } 6010 /* Now if we have SYN flag we keep it on the left edge */ 6011 if (nrsm->r_flags & RACK_HAS_SYN) 6012 nrsm->r_flags &= ~RACK_HAS_SYN; 6013 /* Now if we have a FIN flag we keep it on the right edge */ 6014 if (rsm->r_flags & RACK_HAS_FIN) 6015 rsm->r_flags &= ~RACK_HAS_FIN; 6016 /* Push bit must go to the right edge as well */ 6017 if (rsm->r_flags & RACK_HAD_PUSH) 6018 rsm->r_flags &= ~RACK_HAD_PUSH; 6019 /* Clone over the state of the hw_tls flag */ 6020 nrsm->r_hw_tls = rsm->r_hw_tls; 6021 /* 6022 * Now we need to find nrsm's new location in the mbuf chain 6023 * we basically calculate a new offset, which is soff + 6024 * how much is left in original rsm. Then we walk out the mbuf 6025 * chain to find the righ position, it may be the same mbuf 6026 * or maybe not. 6027 */ 6028 KASSERT(((rsm->m != NULL) || 6029 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6030 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6031 if (rsm->m) 6032 rack_setup_offset_for_rsm(rsm, nrsm); 6033 } 6034 6035 static struct rack_sendmap * 6036 rack_merge_rsm(struct tcp_rack *rack, 6037 struct rack_sendmap *l_rsm, 6038 struct rack_sendmap *r_rsm) 6039 { 6040 /* 6041 * We are merging two ack'd RSM's, 6042 * the l_rsm is on the left (lower seq 6043 * values) and the r_rsm is on the right 6044 * (higher seq value). The simplest way 6045 * to merge these is to move the right 6046 * one into the left. I don't think there 6047 * is any reason we need to try to find 6048 * the oldest (or last oldest retransmitted). 6049 */ 6050 #ifdef INVARIANTS 6051 struct rack_sendmap *rm; 6052 #endif 6053 rack_log_map_chg(rack->rc_tp, rack, NULL, 6054 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6055 l_rsm->r_end = r_rsm->r_end; 6056 if (l_rsm->r_dupack < r_rsm->r_dupack) 6057 l_rsm->r_dupack = r_rsm->r_dupack; 6058 if (r_rsm->r_rtr_bytes) 6059 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6060 if (r_rsm->r_in_tmap) { 6061 /* This really should not happen */ 6062 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6063 r_rsm->r_in_tmap = 0; 6064 } 6065 6066 /* Now the flags */ 6067 if (r_rsm->r_flags & RACK_HAS_FIN) 6068 l_rsm->r_flags |= RACK_HAS_FIN; 6069 if (r_rsm->r_flags & RACK_TLP) 6070 l_rsm->r_flags |= RACK_TLP; 6071 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6072 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6073 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6074 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6075 /* 6076 * If both are app-limited then let the 6077 * free lower the count. If right is app 6078 * limited and left is not, transfer. 6079 */ 6080 l_rsm->r_flags |= RACK_APP_LIMITED; 6081 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6082 if (r_rsm == rack->r_ctl.rc_first_appl) 6083 rack->r_ctl.rc_first_appl = l_rsm; 6084 } 6085 #ifndef INVARIANTS 6086 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6087 #else 6088 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6089 if (rm != r_rsm) { 6090 panic("removing head in rack:%p rsm:%p rm:%p", 6091 rack, r_rsm, rm); 6092 } 6093 #endif 6094 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6095 /* Transfer the split limit to the map we free */ 6096 r_rsm->r_limit_type = l_rsm->r_limit_type; 6097 l_rsm->r_limit_type = 0; 6098 } 6099 rack_free(rack, r_rsm); 6100 return (l_rsm); 6101 } 6102 6103 /* 6104 * TLP Timer, here we simply setup what segment we want to 6105 * have the TLP expire on, the normal rack_output() will then 6106 * send it out. 6107 * 6108 * We return 1, saying don't proceed with rack_output only 6109 * when all timers have been stopped (destroyed PCB?). 6110 */ 6111 static int 6112 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6113 { 6114 /* 6115 * Tail Loss Probe. 6116 */ 6117 struct rack_sendmap *rsm = NULL; 6118 #ifdef INVARIANTS 6119 struct rack_sendmap *insret; 6120 #endif 6121 struct socket *so = tptosocket(tp); 6122 uint32_t amm; 6123 uint32_t out, avail; 6124 int collapsed_win = 0; 6125 6126 if (tp->t_timers->tt_flags & TT_STOPPED) { 6127 return (1); 6128 } 6129 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6130 /* Its not time yet */ 6131 return (0); 6132 } 6133 if (ctf_progress_timeout_check(tp, true)) { 6134 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6135 return (-ETIMEDOUT); /* tcp_drop() */ 6136 } 6137 /* 6138 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6139 * need to figure out how to force a full MSS segment out. 6140 */ 6141 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6142 rack->r_ctl.retran_during_recovery = 0; 6143 rack->r_ctl.dsack_byte_cnt = 0; 6144 counter_u64_add(rack_tlp_tot, 1); 6145 if (rack->r_state && (rack->r_state != tp->t_state)) 6146 rack_set_state(tp, rack); 6147 avail = sbavail(&so->so_snd); 6148 out = tp->snd_max - tp->snd_una; 6149 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 6150 /* special case, we need a retransmission */ 6151 collapsed_win = 1; 6152 goto need_retran; 6153 } 6154 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6155 rack->r_ctl.dsack_persist--; 6156 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6157 rack->r_ctl.num_dsack = 0; 6158 } 6159 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6160 } 6161 if ((tp->t_flags & TF_GPUTINPROG) && 6162 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6163 /* 6164 * If this is the second in a row 6165 * TLP and we are doing a measurement 6166 * its time to abandon the measurement. 6167 * Something is likely broken on 6168 * the clients network and measuring a 6169 * broken network does us no good. 6170 */ 6171 tp->t_flags &= ~TF_GPUTINPROG; 6172 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6173 rack->r_ctl.rc_gp_srtt /*flex1*/, 6174 tp->gput_seq, 6175 0, 0, 18, __LINE__, NULL, 0); 6176 } 6177 /* 6178 * Check our send oldest always settings, and if 6179 * there is an oldest to send jump to the need_retran. 6180 */ 6181 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6182 goto need_retran; 6183 6184 if (avail > out) { 6185 /* New data is available */ 6186 amm = avail - out; 6187 if (amm > ctf_fixed_maxseg(tp)) { 6188 amm = ctf_fixed_maxseg(tp); 6189 if ((amm + out) > tp->snd_wnd) { 6190 /* We are rwnd limited */ 6191 goto need_retran; 6192 } 6193 } else if (amm < ctf_fixed_maxseg(tp)) { 6194 /* not enough to fill a MTU */ 6195 goto need_retran; 6196 } 6197 if (IN_FASTRECOVERY(tp->t_flags)) { 6198 /* Unlikely */ 6199 if (rack->rack_no_prr == 0) { 6200 if (out + amm <= tp->snd_wnd) { 6201 rack->r_ctl.rc_prr_sndcnt = amm; 6202 rack->r_ctl.rc_tlp_new_data = amm; 6203 rack_log_to_prr(rack, 4, 0, __LINE__); 6204 } 6205 } else 6206 goto need_retran; 6207 } else { 6208 /* Set the send-new override */ 6209 if (out + amm <= tp->snd_wnd) 6210 rack->r_ctl.rc_tlp_new_data = amm; 6211 else 6212 goto need_retran; 6213 } 6214 rack->r_ctl.rc_tlpsend = NULL; 6215 counter_u64_add(rack_tlp_newdata, 1); 6216 goto send; 6217 } 6218 need_retran: 6219 /* 6220 * Ok we need to arrange the last un-acked segment to be re-sent, or 6221 * optionally the first un-acked segment. 6222 */ 6223 if (collapsed_win == 0) { 6224 if (rack_always_send_oldest) 6225 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6226 else { 6227 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6228 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6229 rsm = rack_find_high_nonack(rack, rsm); 6230 } 6231 } 6232 if (rsm == NULL) { 6233 #ifdef TCP_BLACKBOX 6234 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6235 #endif 6236 goto out; 6237 } 6238 } else { 6239 /* 6240 * We must find the last segment 6241 * that was acceptable by the client. 6242 */ 6243 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6244 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6245 /* Found one */ 6246 break; 6247 } 6248 } 6249 if (rsm == NULL) { 6250 /* None? if so send the first */ 6251 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6252 if (rsm == NULL) { 6253 #ifdef TCP_BLACKBOX 6254 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6255 #endif 6256 goto out; 6257 } 6258 } 6259 } 6260 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6261 /* 6262 * We need to split this the last segment in two. 6263 */ 6264 struct rack_sendmap *nrsm; 6265 6266 nrsm = rack_alloc_full_limit(rack); 6267 if (nrsm == NULL) { 6268 /* 6269 * No memory to split, we will just exit and punt 6270 * off to the RXT timer. 6271 */ 6272 goto out; 6273 } 6274 rack_clone_rsm(rack, nrsm, rsm, 6275 (rsm->r_end - ctf_fixed_maxseg(tp))); 6276 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6277 #ifndef INVARIANTS 6278 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6279 #else 6280 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6281 if (insret != NULL) { 6282 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6283 nrsm, insret, rack, rsm); 6284 } 6285 #endif 6286 if (rsm->r_in_tmap) { 6287 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6288 nrsm->r_in_tmap = 1; 6289 } 6290 rsm = nrsm; 6291 } 6292 rack->r_ctl.rc_tlpsend = rsm; 6293 send: 6294 /* Make sure output path knows we are doing a TLP */ 6295 *doing_tlp = 1; 6296 rack->r_timer_override = 1; 6297 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6298 return (0); 6299 out: 6300 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6301 return (0); 6302 } 6303 6304 /* 6305 * Delayed ack Timer, here we simply need to setup the 6306 * ACK_NOW flag and remove the DELACK flag. From there 6307 * the output routine will send the ack out. 6308 * 6309 * We only return 1, saying don't proceed, if all timers 6310 * are stopped (destroyed PCB?). 6311 */ 6312 static int 6313 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6314 { 6315 if (tp->t_timers->tt_flags & TT_STOPPED) { 6316 return (1); 6317 } 6318 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6319 tp->t_flags &= ~TF_DELACK; 6320 tp->t_flags |= TF_ACKNOW; 6321 KMOD_TCPSTAT_INC(tcps_delack); 6322 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6323 return (0); 6324 } 6325 6326 /* 6327 * Persists timer, here we simply send the 6328 * same thing as a keepalive will. 6329 * the one byte send. 6330 * 6331 * We only return 1, saying don't proceed, if all timers 6332 * are stopped (destroyed PCB?). 6333 */ 6334 static int 6335 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6336 { 6337 struct tcptemp *t_template; 6338 int32_t retval = 1; 6339 6340 if (tp->t_timers->tt_flags & TT_STOPPED) { 6341 return (1); 6342 } 6343 if (rack->rc_in_persist == 0) 6344 return (0); 6345 if (ctf_progress_timeout_check(tp, false)) { 6346 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6347 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6348 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6349 return (-ETIMEDOUT); /* tcp_drop() */ 6350 } 6351 /* 6352 * Persistence timer into zero window. Force a byte to be output, if 6353 * possible. 6354 */ 6355 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6356 /* 6357 * Hack: if the peer is dead/unreachable, we do not time out if the 6358 * window is closed. After a full backoff, drop the connection if 6359 * the idle time (no responses to probes) reaches the maximum 6360 * backoff that we would use if retransmitting. 6361 */ 6362 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6363 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6364 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6365 KMOD_TCPSTAT_INC(tcps_persistdrop); 6366 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6367 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6368 retval = -ETIMEDOUT; /* tcp_drop() */ 6369 goto out; 6370 } 6371 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6372 tp->snd_una == tp->snd_max) 6373 rack_exit_persist(tp, rack, cts); 6374 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6375 /* 6376 * If the user has closed the socket then drop a persisting 6377 * connection after a much reduced timeout. 6378 */ 6379 if (tp->t_state > TCPS_CLOSE_WAIT && 6380 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6381 KMOD_TCPSTAT_INC(tcps_persistdrop); 6382 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6383 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6384 retval = -ETIMEDOUT; /* tcp_drop() */ 6385 goto out; 6386 } 6387 t_template = tcpip_maketemplate(rack->rc_inp); 6388 if (t_template) { 6389 /* only set it if we were answered */ 6390 if (rack->forced_ack == 0) { 6391 rack->forced_ack = 1; 6392 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6393 } else { 6394 rack->probe_not_answered = 1; 6395 counter_u64_add(rack_persists_loss, 1); 6396 rack->r_ctl.persist_lost_ends++; 6397 } 6398 counter_u64_add(rack_persists_sends, 1); 6399 tcp_respond(tp, t_template->tt_ipgen, 6400 &t_template->tt_t, (struct mbuf *)NULL, 6401 tp->rcv_nxt, tp->snd_una - 1, 0); 6402 /* This sends an ack */ 6403 if (tp->t_flags & TF_DELACK) 6404 tp->t_flags &= ~TF_DELACK; 6405 free(t_template, M_TEMP); 6406 } 6407 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6408 tp->t_rxtshift++; 6409 out: 6410 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6411 rack_start_hpts_timer(rack, tp, cts, 6412 0, 0, 0); 6413 return (retval); 6414 } 6415 6416 /* 6417 * If a keepalive goes off, we had no other timers 6418 * happening. We always return 1 here since this 6419 * routine either drops the connection or sends 6420 * out a segment with respond. 6421 */ 6422 static int 6423 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6424 { 6425 struct tcptemp *t_template; 6426 struct inpcb *inp = tptoinpcb(tp); 6427 6428 if (tp->t_timers->tt_flags & TT_STOPPED) { 6429 return (1); 6430 } 6431 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6432 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6433 /* 6434 * Keep-alive timer went off; send something or drop connection if 6435 * idle for too long. 6436 */ 6437 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6438 if (tp->t_state < TCPS_ESTABLISHED) 6439 goto dropit; 6440 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6441 tp->t_state <= TCPS_CLOSING) { 6442 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6443 goto dropit; 6444 /* 6445 * Send a packet designed to force a response if the peer is 6446 * up and reachable: either an ACK if the connection is 6447 * still alive, or an RST if the peer has closed the 6448 * connection due to timeout or reboot. Using sequence 6449 * number tp->snd_una-1 causes the transmitted zero-length 6450 * segment to lie outside the receive window; by the 6451 * protocol spec, this requires the correspondent TCP to 6452 * respond. 6453 */ 6454 KMOD_TCPSTAT_INC(tcps_keepprobe); 6455 t_template = tcpip_maketemplate(inp); 6456 if (t_template) { 6457 if (rack->forced_ack == 0) { 6458 rack->forced_ack = 1; 6459 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6460 } else { 6461 rack->probe_not_answered = 1; 6462 } 6463 tcp_respond(tp, t_template->tt_ipgen, 6464 &t_template->tt_t, (struct mbuf *)NULL, 6465 tp->rcv_nxt, tp->snd_una - 1, 0); 6466 free(t_template, M_TEMP); 6467 } 6468 } 6469 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6470 return (1); 6471 dropit: 6472 KMOD_TCPSTAT_INC(tcps_keepdrops); 6473 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6474 return (-ETIMEDOUT); /* tcp_drop() */ 6475 } 6476 6477 /* 6478 * Retransmit helper function, clear up all the ack 6479 * flags and take care of important book keeping. 6480 */ 6481 static void 6482 rack_remxt_tmr(struct tcpcb *tp) 6483 { 6484 /* 6485 * The retransmit timer went off, all sack'd blocks must be 6486 * un-acked. 6487 */ 6488 struct rack_sendmap *rsm, *trsm = NULL; 6489 struct tcp_rack *rack; 6490 6491 rack = (struct tcp_rack *)tp->t_fb_ptr; 6492 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6493 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6494 if (rack->r_state && (rack->r_state != tp->t_state)) 6495 rack_set_state(tp, rack); 6496 /* 6497 * Ideally we would like to be able to 6498 * mark SACK-PASS on anything not acked here. 6499 * 6500 * However, if we do that we would burst out 6501 * all that data 1ms apart. This would be unwise, 6502 * so for now we will just let the normal rxt timer 6503 * and tlp timer take care of it. 6504 * 6505 * Also we really need to stick them back in sequence 6506 * order. This way we send in the proper order and any 6507 * sacks that come floating in will "re-ack" the data. 6508 * To do this we zap the tmap with an INIT and then 6509 * walk through and place every rsm in the RB tree 6510 * back in its seq ordered place. 6511 */ 6512 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6513 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6514 rsm->r_dupack = 0; 6515 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6516 /* We must re-add it back to the tlist */ 6517 if (trsm == NULL) { 6518 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6519 } else { 6520 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6521 } 6522 rsm->r_in_tmap = 1; 6523 trsm = rsm; 6524 if (rsm->r_flags & RACK_ACKED) 6525 rsm->r_flags |= RACK_WAS_ACKED; 6526 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 6527 rsm->r_flags |= RACK_MUST_RXT; 6528 } 6529 /* Clear the count (we just un-acked them) */ 6530 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6531 rack->r_ctl.rc_sacked = 0; 6532 rack->r_ctl.rc_sacklast = NULL; 6533 rack->r_ctl.rc_agg_delayed = 0; 6534 rack->r_early = 0; 6535 rack->r_ctl.rc_agg_early = 0; 6536 rack->r_late = 0; 6537 /* Clear the tlp rtx mark */ 6538 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6539 if (rack->r_ctl.rc_resend != NULL) 6540 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6541 rack->r_ctl.rc_prr_sndcnt = 0; 6542 rack_log_to_prr(rack, 6, 0, __LINE__); 6543 rack->r_timer_override = 1; 6544 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6545 #ifdef NETFLIX_EXP_DETECTION 6546 || (rack->sack_attack_disable != 0) 6547 #endif 6548 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6549 /* 6550 * For non-sack customers new data 6551 * needs to go out as retransmits until 6552 * we retransmit up to snd_max. 6553 */ 6554 rack->r_must_retran = 1; 6555 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6556 rack->r_ctl.rc_sacked); 6557 } 6558 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6559 } 6560 6561 static void 6562 rack_convert_rtts(struct tcpcb *tp) 6563 { 6564 if (tp->t_srtt > 1) { 6565 uint32_t val, frac; 6566 6567 val = tp->t_srtt >> TCP_RTT_SHIFT; 6568 frac = tp->t_srtt & 0x1f; 6569 tp->t_srtt = TICKS_2_USEC(val); 6570 /* 6571 * frac is the fractional part of the srtt (if any) 6572 * but its in ticks and every bit represents 6573 * 1/32nd of a hz. 6574 */ 6575 if (frac) { 6576 if (hz == 1000) { 6577 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6578 } else { 6579 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6580 } 6581 tp->t_srtt += frac; 6582 } 6583 } 6584 if (tp->t_rttvar) { 6585 uint32_t val, frac; 6586 6587 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6588 frac = tp->t_rttvar & 0x1f; 6589 tp->t_rttvar = TICKS_2_USEC(val); 6590 /* 6591 * frac is the fractional part of the srtt (if any) 6592 * but its in ticks and every bit represents 6593 * 1/32nd of a hz. 6594 */ 6595 if (frac) { 6596 if (hz == 1000) { 6597 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6598 } else { 6599 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6600 } 6601 tp->t_rttvar += frac; 6602 } 6603 } 6604 tp->t_rxtcur = RACK_REXMTVAL(tp); 6605 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6606 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6607 } 6608 if (tp->t_rxtcur > rack_rto_max) { 6609 tp->t_rxtcur = rack_rto_max; 6610 } 6611 } 6612 6613 static void 6614 rack_cc_conn_init(struct tcpcb *tp) 6615 { 6616 struct tcp_rack *rack; 6617 uint32_t srtt; 6618 6619 rack = (struct tcp_rack *)tp->t_fb_ptr; 6620 srtt = tp->t_srtt; 6621 cc_conn_init(tp); 6622 /* 6623 * Now convert to rack's internal format, 6624 * if required. 6625 */ 6626 if ((srtt == 0) && (tp->t_srtt != 0)) 6627 rack_convert_rtts(tp); 6628 /* 6629 * We want a chance to stay in slowstart as 6630 * we create a connection. TCP spec says that 6631 * initially ssthresh is infinite. For our 6632 * purposes that is the snd_wnd. 6633 */ 6634 if (tp->snd_ssthresh < tp->snd_wnd) { 6635 tp->snd_ssthresh = tp->snd_wnd; 6636 } 6637 /* 6638 * We also want to assure a IW worth of 6639 * data can get inflight. 6640 */ 6641 if (rc_init_window(rack) < tp->snd_cwnd) 6642 tp->snd_cwnd = rc_init_window(rack); 6643 } 6644 6645 /* 6646 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6647 * we will setup to retransmit the lowest seq number outstanding. 6648 */ 6649 static int 6650 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6651 { 6652 struct inpcb *inp = tptoinpcb(tp); 6653 int32_t rexmt; 6654 int32_t retval = 0; 6655 bool isipv6; 6656 6657 if (tp->t_timers->tt_flags & TT_STOPPED) { 6658 return (1); 6659 } 6660 if ((tp->t_flags & TF_GPUTINPROG) && 6661 (tp->t_rxtshift)) { 6662 /* 6663 * We have had a second timeout 6664 * measurements on successive rxt's are not profitable. 6665 * It is unlikely to be of any use (the network is 6666 * broken or the client went away). 6667 */ 6668 tp->t_flags &= ~TF_GPUTINPROG; 6669 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6670 rack->r_ctl.rc_gp_srtt /*flex1*/, 6671 tp->gput_seq, 6672 0, 0, 18, __LINE__, NULL, 0); 6673 } 6674 if (ctf_progress_timeout_check(tp, false)) { 6675 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6676 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6677 return (-ETIMEDOUT); /* tcp_drop() */ 6678 } 6679 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6680 rack->r_ctl.retran_during_recovery = 0; 6681 rack->rc_ack_required = 1; 6682 rack->r_ctl.dsack_byte_cnt = 0; 6683 if (IN_FASTRECOVERY(tp->t_flags)) 6684 tp->t_flags |= TF_WASFRECOVERY; 6685 else 6686 tp->t_flags &= ~TF_WASFRECOVERY; 6687 if (IN_CONGRECOVERY(tp->t_flags)) 6688 tp->t_flags |= TF_WASCRECOVERY; 6689 else 6690 tp->t_flags &= ~TF_WASCRECOVERY; 6691 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6692 (tp->snd_una == tp->snd_max)) { 6693 /* Nothing outstanding .. nothing to do */ 6694 return (0); 6695 } 6696 if (rack->r_ctl.dsack_persist) { 6697 rack->r_ctl.dsack_persist--; 6698 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6699 rack->r_ctl.num_dsack = 0; 6700 } 6701 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6702 } 6703 /* 6704 * Rack can only run one timer at a time, so we cannot 6705 * run a KEEPINIT (gating SYN sending) and a retransmit 6706 * timer for the SYN. So if we are in a front state and 6707 * have a KEEPINIT timer we need to check the first transmit 6708 * against now to see if we have exceeded the KEEPINIT time 6709 * (if one is set). 6710 */ 6711 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6712 (TP_KEEPINIT(tp) != 0)) { 6713 struct rack_sendmap *rsm; 6714 6715 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6716 if (rsm) { 6717 /* Ok we have something outstanding to test keepinit with */ 6718 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6719 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6720 /* We have exceeded the KEEPINIT time */ 6721 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6722 goto drop_it; 6723 } 6724 } 6725 } 6726 /* 6727 * Retransmission timer went off. Message has not been acked within 6728 * retransmit interval. Back off to a longer retransmit interval 6729 * and retransmit one segment. 6730 */ 6731 rack_remxt_tmr(tp); 6732 if ((rack->r_ctl.rc_resend == NULL) || 6733 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6734 /* 6735 * If the rwnd collapsed on 6736 * the one we are retransmitting 6737 * it does not count against the 6738 * rxt count. 6739 */ 6740 tp->t_rxtshift++; 6741 } 6742 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6743 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6744 drop_it: 6745 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6746 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6747 /* XXXGL: previously t_softerror was casted to uint16_t */ 6748 MPASS(tp->t_softerror >= 0); 6749 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6750 goto out; /* tcp_drop() */ 6751 } 6752 if (tp->t_state == TCPS_SYN_SENT) { 6753 /* 6754 * If the SYN was retransmitted, indicate CWND to be limited 6755 * to 1 segment in cc_conn_init(). 6756 */ 6757 tp->snd_cwnd = 1; 6758 } else if (tp->t_rxtshift == 1) { 6759 /* 6760 * first retransmit; record ssthresh and cwnd so they can be 6761 * recovered if this turns out to be a "bad" retransmit. A 6762 * retransmit is considered "bad" if an ACK for this segment 6763 * is received within RTT/2 interval; the assumption here is 6764 * that the ACK was already in flight. See "On Estimating 6765 * End-to-End Network Path Properties" by Allman and Paxson 6766 * for more details. 6767 */ 6768 tp->snd_cwnd_prev = tp->snd_cwnd; 6769 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6770 tp->snd_recover_prev = tp->snd_recover; 6771 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6772 tp->t_flags |= TF_PREVVALID; 6773 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6774 tp->t_flags &= ~TF_PREVVALID; 6775 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6776 if ((tp->t_state == TCPS_SYN_SENT) || 6777 (tp->t_state == TCPS_SYN_RECEIVED)) 6778 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6779 else 6780 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6781 6782 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6783 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6784 /* 6785 * We enter the path for PLMTUD if connection is established or, if 6786 * connection is FIN_WAIT_1 status, reason for the last is that if 6787 * amount of data we send is very small, we could send it in couple 6788 * of packets and process straight to FIN. In that case we won't 6789 * catch ESTABLISHED state. 6790 */ 6791 #ifdef INET6 6792 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 6793 #else 6794 isipv6 = false; 6795 #endif 6796 if (((V_tcp_pmtud_blackhole_detect == 1) || 6797 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6798 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6799 ((tp->t_state == TCPS_ESTABLISHED) || 6800 (tp->t_state == TCPS_FIN_WAIT_1))) { 6801 /* 6802 * Idea here is that at each stage of mtu probe (usually, 6803 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6804 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6805 * should take care of that. 6806 */ 6807 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6808 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6809 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6810 tp->t_rxtshift % 2 == 0)) { 6811 /* 6812 * Enter Path MTU Black-hole Detection mechanism: - 6813 * Disable Path MTU Discovery (IP "DF" bit). - 6814 * Reduce MTU to lower value than what we negotiated 6815 * with peer. 6816 */ 6817 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6818 /* Record that we may have found a black hole. */ 6819 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6820 /* Keep track of previous MSS. */ 6821 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6822 } 6823 6824 /* 6825 * Reduce the MSS to blackhole value or to the 6826 * default in an attempt to retransmit. 6827 */ 6828 #ifdef INET6 6829 if (isipv6 && 6830 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6831 /* Use the sysctl tuneable blackhole MSS. */ 6832 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6833 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6834 } else if (isipv6) { 6835 /* Use the default MSS. */ 6836 tp->t_maxseg = V_tcp_v6mssdflt; 6837 /* 6838 * Disable Path MTU Discovery when we switch 6839 * to minmss. 6840 */ 6841 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6842 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6843 } 6844 #endif 6845 #if defined(INET6) && defined(INET) 6846 else 6847 #endif 6848 #ifdef INET 6849 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6850 /* Use the sysctl tuneable blackhole MSS. */ 6851 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6852 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6853 } else { 6854 /* Use the default MSS. */ 6855 tp->t_maxseg = V_tcp_mssdflt; 6856 /* 6857 * Disable Path MTU Discovery when we switch 6858 * to minmss. 6859 */ 6860 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6861 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6862 } 6863 #endif 6864 } else { 6865 /* 6866 * If further retransmissions are still unsuccessful 6867 * with a lowered MTU, maybe this isn't a blackhole 6868 * and we restore the previous MSS and blackhole 6869 * detection flags. The limit '6' is determined by 6870 * giving each probe stage (1448, 1188, 524) 2 6871 * chances to recover. 6872 */ 6873 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6874 (tp->t_rxtshift >= 6)) { 6875 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6876 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6877 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6878 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6879 } 6880 } 6881 } 6882 /* 6883 * Disable RFC1323 and SACK if we haven't got any response to 6884 * our third SYN to work-around some broken terminal servers 6885 * (most of which have hopefully been retired) that have bad VJ 6886 * header compression code which trashes TCP segments containing 6887 * unknown-to-them TCP options. 6888 */ 6889 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6890 (tp->t_rxtshift == 3)) 6891 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6892 /* 6893 * If we backed off this far, our srtt estimate is probably bogus. 6894 * Clobber it so we'll take the next rtt measurement as our srtt; 6895 * move the current srtt into rttvar to keep the current retransmit 6896 * times until then. 6897 */ 6898 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6899 #ifdef INET6 6900 if ((inp->inp_vflag & INP_IPV6) != 0) 6901 in6_losing(inp); 6902 else 6903 #endif 6904 in_losing(inp); 6905 tp->t_rttvar += tp->t_srtt; 6906 tp->t_srtt = 0; 6907 } 6908 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6909 tp->snd_recover = tp->snd_max; 6910 tp->t_flags |= TF_ACKNOW; 6911 tp->t_rtttime = 0; 6912 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 6913 out: 6914 return (retval); 6915 } 6916 6917 static int 6918 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6919 { 6920 int32_t ret = 0; 6921 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6922 6923 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6924 (tp->t_flags & TF_GPUTINPROG)) { 6925 /* 6926 * We have a goodput in progress 6927 * and we have entered a late state. 6928 * Do we have enough data in the sb 6929 * to handle the GPUT request? 6930 */ 6931 uint32_t bytes; 6932 6933 bytes = tp->gput_ack - tp->gput_seq; 6934 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6935 bytes += tp->gput_seq - tp->snd_una; 6936 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 6937 /* 6938 * There are not enough bytes in the socket 6939 * buffer that have been sent to cover this 6940 * measurement. Cancel it. 6941 */ 6942 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6943 rack->r_ctl.rc_gp_srtt /*flex1*/, 6944 tp->gput_seq, 6945 0, 0, 18, __LINE__, NULL, 0); 6946 tp->t_flags &= ~TF_GPUTINPROG; 6947 } 6948 } 6949 if (timers == 0) { 6950 return (0); 6951 } 6952 if (tp->t_state == TCPS_LISTEN) { 6953 /* no timers on listen sockets */ 6954 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6955 return (0); 6956 return (1); 6957 } 6958 if ((timers & PACE_TMR_RACK) && 6959 rack->rc_on_min_to) { 6960 /* 6961 * For the rack timer when we 6962 * are on a min-timeout (which means rrr_conf = 3) 6963 * we don't want to check the timer. It may 6964 * be going off for a pace and thats ok we 6965 * want to send the retransmit (if its ready). 6966 * 6967 * If its on a normal rack timer (non-min) then 6968 * we will check if its expired. 6969 */ 6970 goto skip_time_check; 6971 } 6972 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6973 uint32_t left; 6974 6975 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6976 ret = -1; 6977 rack_log_to_processing(rack, cts, ret, 0); 6978 return (0); 6979 } 6980 if (hpts_calling == 0) { 6981 /* 6982 * A user send or queued mbuf (sack) has called us? We 6983 * return 0 and let the pacing guards 6984 * deal with it if they should or 6985 * should not cause a send. 6986 */ 6987 ret = -2; 6988 rack_log_to_processing(rack, cts, ret, 0); 6989 return (0); 6990 } 6991 /* 6992 * Ok our timer went off early and we are not paced false 6993 * alarm, go back to sleep. 6994 */ 6995 ret = -3; 6996 left = rack->r_ctl.rc_timer_exp - cts; 6997 tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left)); 6998 rack_log_to_processing(rack, cts, ret, left); 6999 return (1); 7000 } 7001 skip_time_check: 7002 rack->rc_tmr_stopped = 0; 7003 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 7004 if (timers & PACE_TMR_DELACK) { 7005 ret = rack_timeout_delack(tp, rack, cts); 7006 } else if (timers & PACE_TMR_RACK) { 7007 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7008 rack->r_fast_output = 0; 7009 ret = rack_timeout_rack(tp, rack, cts); 7010 } else if (timers & PACE_TMR_TLP) { 7011 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7012 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 7013 } else if (timers & PACE_TMR_RXT) { 7014 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7015 rack->r_fast_output = 0; 7016 ret = rack_timeout_rxt(tp, rack, cts); 7017 } else if (timers & PACE_TMR_PERSIT) { 7018 ret = rack_timeout_persist(tp, rack, cts); 7019 } else if (timers & PACE_TMR_KEEP) { 7020 ret = rack_timeout_keepalive(tp, rack, cts); 7021 } 7022 rack_log_to_processing(rack, cts, ret, timers); 7023 return (ret); 7024 } 7025 7026 static void 7027 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 7028 { 7029 struct timeval tv; 7030 uint32_t us_cts, flags_on_entry; 7031 uint8_t hpts_removed = 0; 7032 7033 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7034 us_cts = tcp_get_usecs(&tv); 7035 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7036 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7037 ((tp->snd_max - tp->snd_una) == 0))) { 7038 tcp_hpts_remove(rack->rc_inp); 7039 hpts_removed = 1; 7040 /* If we were not delayed cancel out the flag. */ 7041 if ((tp->snd_max - tp->snd_una) == 0) 7042 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7043 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7044 } 7045 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7046 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7047 if (tcp_in_hpts(rack->rc_inp) && 7048 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7049 /* 7050 * Canceling timer's when we have no output being 7051 * paced. We also must remove ourselves from the 7052 * hpts. 7053 */ 7054 tcp_hpts_remove(rack->rc_inp); 7055 hpts_removed = 1; 7056 } 7057 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7058 } 7059 if (hpts_removed == 0) 7060 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7061 } 7062 7063 static void 7064 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7065 { 7066 return; 7067 } 7068 7069 static int 7070 rack_stopall(struct tcpcb *tp) 7071 { 7072 struct tcp_rack *rack; 7073 rack = (struct tcp_rack *)tp->t_fb_ptr; 7074 rack->t_timers_stopped = 1; 7075 return (0); 7076 } 7077 7078 static void 7079 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7080 { 7081 return; 7082 } 7083 7084 static int 7085 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7086 { 7087 return (0); 7088 } 7089 7090 static void 7091 rack_stop_all_timers(struct tcpcb *tp) 7092 { 7093 struct tcp_rack *rack; 7094 7095 /* 7096 * Assure no timers are running. 7097 */ 7098 if (tcp_timer_active(tp, TT_PERSIST)) { 7099 /* We enter in persists, set the flag appropriately */ 7100 rack = (struct tcp_rack *)tp->t_fb_ptr; 7101 rack->rc_in_persist = 1; 7102 } 7103 tcp_timer_suspend(tp, TT_PERSIST); 7104 tcp_timer_suspend(tp, TT_REXMT); 7105 tcp_timer_suspend(tp, TT_KEEP); 7106 tcp_timer_suspend(tp, TT_DELACK); 7107 } 7108 7109 static void 7110 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7111 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7112 { 7113 int32_t idx; 7114 7115 rsm->r_rtr_cnt++; 7116 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7117 rsm->r_dupack = 0; 7118 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7119 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7120 rsm->r_flags |= RACK_OVERMAX; 7121 } 7122 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7123 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7124 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7125 } 7126 idx = rsm->r_rtr_cnt - 1; 7127 rsm->r_tim_lastsent[idx] = ts; 7128 /* 7129 * Here we don't add in the len of send, since its already 7130 * in snduna <->snd_max. 7131 */ 7132 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7133 rack->r_ctl.rc_sacked); 7134 if (rsm->r_flags & RACK_ACKED) { 7135 /* Problably MTU discovery messing with us */ 7136 rsm->r_flags &= ~RACK_ACKED; 7137 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7138 } 7139 if (rsm->r_in_tmap) { 7140 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7141 rsm->r_in_tmap = 0; 7142 } 7143 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7144 rsm->r_in_tmap = 1; 7145 /* Take off the must retransmit flag, if its on */ 7146 if (rsm->r_flags & RACK_MUST_RXT) { 7147 if (rack->r_must_retran) 7148 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 7149 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 7150 /* 7151 * We have retransmitted all we need. Clear 7152 * any must retransmit flags. 7153 */ 7154 rack->r_must_retran = 0; 7155 rack->r_ctl.rc_out_at_rto = 0; 7156 } 7157 rsm->r_flags &= ~RACK_MUST_RXT; 7158 } 7159 if (rsm->r_flags & RACK_SACK_PASSED) { 7160 /* We have retransmitted due to the SACK pass */ 7161 rsm->r_flags &= ~RACK_SACK_PASSED; 7162 rsm->r_flags |= RACK_WAS_SACKPASS; 7163 } 7164 } 7165 7166 static uint32_t 7167 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7168 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7169 { 7170 /* 7171 * We (re-)transmitted starting at rsm->r_start for some length 7172 * (possibly less than r_end. 7173 */ 7174 struct rack_sendmap *nrsm; 7175 #ifdef INVARIANTS 7176 struct rack_sendmap *insret; 7177 #endif 7178 uint32_t c_end; 7179 int32_t len; 7180 7181 len = *lenp; 7182 c_end = rsm->r_start + len; 7183 if (SEQ_GEQ(c_end, rsm->r_end)) { 7184 /* 7185 * We retransmitted the whole piece or more than the whole 7186 * slopping into the next rsm. 7187 */ 7188 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7189 if (c_end == rsm->r_end) { 7190 *lenp = 0; 7191 return (0); 7192 } else { 7193 int32_t act_len; 7194 7195 /* Hangs over the end return whats left */ 7196 act_len = rsm->r_end - rsm->r_start; 7197 *lenp = (len - act_len); 7198 return (rsm->r_end); 7199 } 7200 /* We don't get out of this block. */ 7201 } 7202 /* 7203 * Here we retransmitted less than the whole thing which means we 7204 * have to split this into what was transmitted and what was not. 7205 */ 7206 nrsm = rack_alloc_full_limit(rack); 7207 if (nrsm == NULL) { 7208 /* 7209 * We can't get memory, so lets not proceed. 7210 */ 7211 *lenp = 0; 7212 return (0); 7213 } 7214 /* 7215 * So here we are going to take the original rsm and make it what we 7216 * retransmitted. nrsm will be the tail portion we did not 7217 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7218 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7219 * 1, 6 and the new piece will be 6, 11. 7220 */ 7221 rack_clone_rsm(rack, nrsm, rsm, c_end); 7222 nrsm->r_dupack = 0; 7223 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7224 #ifndef INVARIANTS 7225 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7226 #else 7227 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7228 if (insret != NULL) { 7229 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7230 nrsm, insret, rack, rsm); 7231 } 7232 #endif 7233 if (rsm->r_in_tmap) { 7234 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7235 nrsm->r_in_tmap = 1; 7236 } 7237 rsm->r_flags &= (~RACK_HAS_FIN); 7238 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7239 /* Log a split of rsm into rsm and nrsm */ 7240 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7241 *lenp = 0; 7242 return (0); 7243 } 7244 7245 static void 7246 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7247 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7248 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7249 { 7250 struct tcp_rack *rack; 7251 struct rack_sendmap *rsm, *nrsm, fe; 7252 #ifdef INVARIANTS 7253 struct rack_sendmap *insret; 7254 #endif 7255 register uint32_t snd_max, snd_una; 7256 7257 /* 7258 * Add to the RACK log of packets in flight or retransmitted. If 7259 * there is a TS option we will use the TS echoed, if not we will 7260 * grab a TS. 7261 * 7262 * Retransmissions will increment the count and move the ts to its 7263 * proper place. Note that if options do not include TS's then we 7264 * won't be able to effectively use the ACK for an RTT on a retran. 7265 * 7266 * Notes about r_start and r_end. Lets consider a send starting at 7267 * sequence 1 for 10 bytes. In such an example the r_start would be 7268 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7269 * This means that r_end is actually the first sequence for the next 7270 * slot (11). 7271 * 7272 */ 7273 /* 7274 * If err is set what do we do XXXrrs? should we not add the thing? 7275 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7276 * i.e. proceed with add ** do this for now. 7277 */ 7278 INP_WLOCK_ASSERT(tptoinpcb(tp)); 7279 if (err) 7280 /* 7281 * We don't log errors -- we could but snd_max does not 7282 * advance in this case either. 7283 */ 7284 return; 7285 7286 if (th_flags & TH_RST) { 7287 /* 7288 * We don't log resets and we return immediately from 7289 * sending 7290 */ 7291 return; 7292 } 7293 rack = (struct tcp_rack *)tp->t_fb_ptr; 7294 snd_una = tp->snd_una; 7295 snd_max = tp->snd_max; 7296 if (th_flags & (TH_SYN | TH_FIN)) { 7297 /* 7298 * The call to rack_log_output is made before bumping 7299 * snd_max. This means we can record one extra byte on a SYN 7300 * or FIN if seq_out is adding more on and a FIN is present 7301 * (and we are not resending). 7302 */ 7303 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7304 len++; 7305 if (th_flags & TH_FIN) 7306 len++; 7307 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7308 /* 7309 * The add/update as not been done for the FIN/SYN 7310 * yet. 7311 */ 7312 snd_max = tp->snd_nxt; 7313 } 7314 } 7315 if (SEQ_LEQ((seq_out + len), snd_una)) { 7316 /* Are sending an old segment to induce an ack (keep-alive)? */ 7317 return; 7318 } 7319 if (SEQ_LT(seq_out, snd_una)) { 7320 /* huh? should we panic? */ 7321 uint32_t end; 7322 7323 end = seq_out + len; 7324 seq_out = snd_una; 7325 if (SEQ_GEQ(end, seq_out)) 7326 len = end - seq_out; 7327 else 7328 len = 0; 7329 } 7330 if (len == 0) { 7331 /* We don't log zero window probes */ 7332 return; 7333 } 7334 if (IN_FASTRECOVERY(tp->t_flags)) { 7335 rack->r_ctl.rc_prr_out += len; 7336 } 7337 /* First question is it a retransmission or new? */ 7338 if (seq_out == snd_max) { 7339 /* Its new */ 7340 again: 7341 rsm = rack_alloc(rack); 7342 if (rsm == NULL) { 7343 /* 7344 * Hmm out of memory and the tcb got destroyed while 7345 * we tried to wait. 7346 */ 7347 return; 7348 } 7349 if (th_flags & TH_FIN) { 7350 rsm->r_flags = RACK_HAS_FIN|add_flag; 7351 } else { 7352 rsm->r_flags = add_flag; 7353 } 7354 if (hw_tls) 7355 rsm->r_hw_tls = 1; 7356 rsm->r_tim_lastsent[0] = cts; 7357 rsm->r_rtr_cnt = 1; 7358 rsm->r_rtr_bytes = 0; 7359 if (th_flags & TH_SYN) { 7360 /* The data space is one beyond snd_una */ 7361 rsm->r_flags |= RACK_HAS_SYN; 7362 } 7363 rsm->r_start = seq_out; 7364 rsm->r_end = rsm->r_start + len; 7365 rsm->r_dupack = 0; 7366 /* 7367 * save off the mbuf location that 7368 * sndmbuf_noadv returned (which is 7369 * where we started copying from).. 7370 */ 7371 rsm->m = s_mb; 7372 rsm->soff = s_moff; 7373 /* 7374 * Here we do add in the len of send, since its not yet 7375 * reflected in in snduna <->snd_max 7376 */ 7377 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7378 rack->r_ctl.rc_sacked) + 7379 (rsm->r_end - rsm->r_start)); 7380 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7381 if (rsm->m) { 7382 if (rsm->m->m_len <= rsm->soff) { 7383 /* 7384 * XXXrrs Question, will this happen? 7385 * 7386 * If sbsndptr is set at the correct place 7387 * then s_moff should always be somewhere 7388 * within rsm->m. But if the sbsndptr was 7389 * off then that won't be true. If it occurs 7390 * we need to walkout to the correct location. 7391 */ 7392 struct mbuf *lm; 7393 7394 lm = rsm->m; 7395 while (lm->m_len <= rsm->soff) { 7396 rsm->soff -= lm->m_len; 7397 lm = lm->m_next; 7398 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7399 __func__, rack, s_moff, s_mb, rsm->soff)); 7400 } 7401 rsm->m = lm; 7402 } 7403 rsm->orig_m_len = rsm->m->m_len; 7404 } else 7405 rsm->orig_m_len = 0; 7406 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7407 /* Log a new rsm */ 7408 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7409 #ifndef INVARIANTS 7410 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7411 #else 7412 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7413 if (insret != NULL) { 7414 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7415 nrsm, insret, rack, rsm); 7416 } 7417 #endif 7418 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7419 rsm->r_in_tmap = 1; 7420 /* 7421 * Special case detection, is there just a single 7422 * packet outstanding when we are not in recovery? 7423 * 7424 * If this is true mark it so. 7425 */ 7426 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7427 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7428 struct rack_sendmap *prsm; 7429 7430 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7431 if (prsm) 7432 prsm->r_one_out_nr = 1; 7433 } 7434 return; 7435 } 7436 /* 7437 * If we reach here its a retransmission and we need to find it. 7438 */ 7439 memset(&fe, 0, sizeof(fe)); 7440 more: 7441 if (hintrsm && (hintrsm->r_start == seq_out)) { 7442 rsm = hintrsm; 7443 hintrsm = NULL; 7444 } else { 7445 /* No hints sorry */ 7446 rsm = NULL; 7447 } 7448 if ((rsm) && (rsm->r_start == seq_out)) { 7449 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7450 if (len == 0) { 7451 return; 7452 } else { 7453 goto more; 7454 } 7455 } 7456 /* Ok it was not the last pointer go through it the hard way. */ 7457 refind: 7458 fe.r_start = seq_out; 7459 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7460 if (rsm) { 7461 if (rsm->r_start == seq_out) { 7462 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7463 if (len == 0) { 7464 return; 7465 } else { 7466 goto refind; 7467 } 7468 } 7469 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7470 /* Transmitted within this piece */ 7471 /* 7472 * Ok we must split off the front and then let the 7473 * update do the rest 7474 */ 7475 nrsm = rack_alloc_full_limit(rack); 7476 if (nrsm == NULL) { 7477 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7478 return; 7479 } 7480 /* 7481 * copy rsm to nrsm and then trim the front of rsm 7482 * to not include this part. 7483 */ 7484 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7485 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7486 #ifndef INVARIANTS 7487 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7488 #else 7489 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7490 if (insret != NULL) { 7491 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7492 nrsm, insret, rack, rsm); 7493 } 7494 #endif 7495 if (rsm->r_in_tmap) { 7496 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7497 nrsm->r_in_tmap = 1; 7498 } 7499 rsm->r_flags &= (~RACK_HAS_FIN); 7500 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7501 if (len == 0) { 7502 return; 7503 } else if (len > 0) 7504 goto refind; 7505 } 7506 } 7507 /* 7508 * Hmm not found in map did they retransmit both old and on into the 7509 * new? 7510 */ 7511 if (seq_out == tp->snd_max) { 7512 goto again; 7513 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7514 #ifdef INVARIANTS 7515 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7516 seq_out, len, tp->snd_una, tp->snd_max); 7517 printf("Starting Dump of all rack entries\n"); 7518 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7519 printf("rsm:%p start:%u end:%u\n", 7520 rsm, rsm->r_start, rsm->r_end); 7521 } 7522 printf("Dump complete\n"); 7523 panic("seq_out not found rack:%p tp:%p", 7524 rack, tp); 7525 #endif 7526 } else { 7527 #ifdef INVARIANTS 7528 /* 7529 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7530 * flag) 7531 */ 7532 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7533 seq_out, len, tp->snd_max, tp); 7534 #endif 7535 } 7536 } 7537 7538 /* 7539 * Record one of the RTT updates from an ack into 7540 * our sample structure. 7541 */ 7542 7543 static void 7544 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7545 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7546 { 7547 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7548 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7549 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7550 } 7551 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7552 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7553 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7554 } 7555 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7556 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7557 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7558 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7559 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7560 } 7561 if ((confidence == 1) && 7562 ((rsm == NULL) || 7563 (rsm->r_just_ret) || 7564 (rsm->r_one_out_nr && 7565 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7566 /* 7567 * If the rsm had a just return 7568 * hit it then we can't trust the 7569 * rtt measurement for buffer deterimination 7570 * Note that a confidence of 2, indicates 7571 * SACK'd which overrides the r_just_ret or 7572 * the r_one_out_nr. If it was a CUM-ACK and 7573 * we had only two outstanding, but get an 7574 * ack for only 1. Then that also lowers our 7575 * confidence. 7576 */ 7577 confidence = 0; 7578 } 7579 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7580 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7581 if (rack->r_ctl.rack_rs.confidence == 0) { 7582 /* 7583 * We take anything with no current confidence 7584 * saved. 7585 */ 7586 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7587 rack->r_ctl.rack_rs.confidence = confidence; 7588 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7589 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7590 /* 7591 * Once we have a confident number, 7592 * we can update it with a smaller 7593 * value since this confident number 7594 * may include the DSACK time until 7595 * the next segment (the second one) arrived. 7596 */ 7597 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7598 rack->r_ctl.rack_rs.confidence = confidence; 7599 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7600 } 7601 } 7602 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7603 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7604 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7605 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7606 } 7607 7608 /* 7609 * Collect new round-trip time estimate 7610 * and update averages and current timeout. 7611 */ 7612 static void 7613 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7614 { 7615 int32_t delta; 7616 int32_t rtt; 7617 7618 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7619 /* No valid sample */ 7620 return; 7621 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7622 /* We are to use the lowest RTT seen in a single ack */ 7623 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7624 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7625 /* We are to use the highest RTT seen in a single ack */ 7626 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7627 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7628 /* We are to use the average RTT seen in a single ack */ 7629 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7630 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7631 } else { 7632 #ifdef INVARIANTS 7633 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7634 #endif 7635 return; 7636 } 7637 if (rtt == 0) 7638 rtt = 1; 7639 if (rack->rc_gp_rtt_set == 0) { 7640 /* 7641 * With no RTT we have to accept 7642 * even one we are not confident of. 7643 */ 7644 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7645 rack->rc_gp_rtt_set = 1; 7646 } else if (rack->r_ctl.rack_rs.confidence) { 7647 /* update the running gp srtt */ 7648 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7649 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7650 } 7651 if (rack->r_ctl.rack_rs.confidence) { 7652 /* 7653 * record the low and high for highly buffered path computation, 7654 * we only do this if we are confident (not a retransmission). 7655 */ 7656 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7657 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7658 } 7659 if (rack->rc_highly_buffered == 0) { 7660 /* 7661 * Currently once we declare a path has 7662 * highly buffered there is no going 7663 * back, which may be a problem... 7664 */ 7665 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7666 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7667 rack->r_ctl.rc_highest_us_rtt, 7668 rack->r_ctl.rc_lowest_us_rtt, 7669 RACK_RTTS_SEEHBP); 7670 rack->rc_highly_buffered = 1; 7671 } 7672 } 7673 } 7674 if ((rack->r_ctl.rack_rs.confidence) || 7675 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7676 /* 7677 * If we are highly confident of it <or> it was 7678 * never retransmitted we accept it as the last us_rtt. 7679 */ 7680 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7681 /* The lowest rtt can be set if its was not retransmited */ 7682 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7683 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7684 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7685 rack->r_ctl.rc_lowest_us_rtt = 1; 7686 } 7687 } 7688 rack = (struct tcp_rack *)tp->t_fb_ptr; 7689 if (tp->t_srtt != 0) { 7690 /* 7691 * We keep a simple srtt in microseconds, like our rtt 7692 * measurement. We don't need to do any tricks with shifting 7693 * etc. Instead we just add in 1/8th of the new measurement 7694 * and subtract out 1/8 of the old srtt. We do the same with 7695 * the variance after finding the absolute value of the 7696 * difference between this sample and the current srtt. 7697 */ 7698 delta = tp->t_srtt - rtt; 7699 /* Take off 1/8th of the current sRTT */ 7700 tp->t_srtt -= (tp->t_srtt >> 3); 7701 /* Add in 1/8th of the new RTT just measured */ 7702 tp->t_srtt += (rtt >> 3); 7703 if (tp->t_srtt <= 0) 7704 tp->t_srtt = 1; 7705 /* Now lets make the absolute value of the variance */ 7706 if (delta < 0) 7707 delta = -delta; 7708 /* Subtract out 1/8th */ 7709 tp->t_rttvar -= (tp->t_rttvar >> 3); 7710 /* Add in 1/8th of the new variance we just saw */ 7711 tp->t_rttvar += (delta >> 3); 7712 if (tp->t_rttvar <= 0) 7713 tp->t_rttvar = 1; 7714 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7715 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7716 } else { 7717 /* 7718 * No rtt measurement yet - use the unsmoothed rtt. Set the 7719 * variance to half the rtt (so our first retransmit happens 7720 * at 3*rtt). 7721 */ 7722 tp->t_srtt = rtt; 7723 tp->t_rttvar = rtt >> 1; 7724 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7725 } 7726 rack->rc_srtt_measure_made = 1; 7727 KMOD_TCPSTAT_INC(tcps_rttupdated); 7728 tp->t_rttupdated++; 7729 #ifdef STATS 7730 if (rack_stats_gets_ms_rtt == 0) { 7731 /* Send in the microsecond rtt used for rxt timeout purposes */ 7732 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7733 } else if (rack_stats_gets_ms_rtt == 1) { 7734 /* Send in the millisecond rtt used for rxt timeout purposes */ 7735 int32_t ms_rtt; 7736 7737 /* Round up */ 7738 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7739 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7740 } else if (rack_stats_gets_ms_rtt == 2) { 7741 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7742 int32_t ms_rtt; 7743 7744 /* Round up */ 7745 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7746 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7747 } else { 7748 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7749 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7750 } 7751 7752 #endif 7753 /* 7754 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7755 * way we do the smoothing, srtt and rttvar will each average +1/2 7756 * tick of bias. When we compute the retransmit timer, we want 1/2 7757 * tick of rounding and 1 extra tick because of +-1/2 tick 7758 * uncertainty in the firing of the timer. The bias will give us 7759 * exactly the 1.5 tick we need. But, because the bias is 7760 * statistical, we have to test that we don't drop below the minimum 7761 * feasible timer (which is 2 ticks). 7762 */ 7763 tp->t_rxtshift = 0; 7764 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7765 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7766 rack_log_rtt_sample(rack, rtt); 7767 tp->t_softerror = 0; 7768 } 7769 7770 7771 static void 7772 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7773 { 7774 /* 7775 * Apply to filter the inbound us-rtt at us_cts. 7776 */ 7777 uint32_t old_rtt; 7778 7779 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7780 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7781 us_rtt, us_cts); 7782 if (old_rtt > us_rtt) { 7783 /* We just hit a new lower rtt time */ 7784 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7785 __LINE__, RACK_RTTS_NEWRTT); 7786 /* 7787 * Only count it if its lower than what we saw within our 7788 * calculated range. 7789 */ 7790 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7791 if (rack_probertt_lower_within && 7792 rack->rc_gp_dyn_mul && 7793 (rack->use_fixed_rate == 0) && 7794 (rack->rc_always_pace)) { 7795 /* 7796 * We are seeing a new lower rtt very close 7797 * to the time that we would have entered probe-rtt. 7798 * This is probably due to the fact that a peer flow 7799 * has entered probe-rtt. Lets go in now too. 7800 */ 7801 uint32_t val; 7802 7803 val = rack_probertt_lower_within * rack_time_between_probertt; 7804 val /= 100; 7805 if ((rack->in_probe_rtt == 0) && 7806 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7807 rack_enter_probertt(rack, us_cts); 7808 } 7809 } 7810 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7811 } 7812 } 7813 } 7814 7815 static int 7816 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7817 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7818 { 7819 uint32_t us_rtt; 7820 int32_t i, all; 7821 uint32_t t, len_acked; 7822 7823 if ((rsm->r_flags & RACK_ACKED) || 7824 (rsm->r_flags & RACK_WAS_ACKED)) 7825 /* Already done */ 7826 return (0); 7827 if (rsm->r_no_rtt_allowed) { 7828 /* Not allowed */ 7829 return (0); 7830 } 7831 if (ack_type == CUM_ACKED) { 7832 if (SEQ_GT(th_ack, rsm->r_end)) { 7833 len_acked = rsm->r_end - rsm->r_start; 7834 all = 1; 7835 } else { 7836 len_acked = th_ack - rsm->r_start; 7837 all = 0; 7838 } 7839 } else { 7840 len_acked = rsm->r_end - rsm->r_start; 7841 all = 0; 7842 } 7843 if (rsm->r_rtr_cnt == 1) { 7844 7845 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7846 if ((int)t <= 0) 7847 t = 1; 7848 if (!tp->t_rttlow || tp->t_rttlow > t) 7849 tp->t_rttlow = t; 7850 if (!rack->r_ctl.rc_rack_min_rtt || 7851 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7852 rack->r_ctl.rc_rack_min_rtt = t; 7853 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7854 rack->r_ctl.rc_rack_min_rtt = 1; 7855 } 7856 } 7857 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7858 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7859 else 7860 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7861 if (us_rtt == 0) 7862 us_rtt = 1; 7863 if (CC_ALGO(tp)->rttsample != NULL) { 7864 /* Kick the RTT to the CC */ 7865 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7866 } 7867 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7868 if (ack_type == SACKED) { 7869 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7870 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7871 } else { 7872 /* 7873 * We need to setup what our confidence 7874 * is in this ack. 7875 * 7876 * If the rsm was app limited and it is 7877 * less than a mss in length (the end 7878 * of the send) then we have a gap. If we 7879 * were app limited but say we were sending 7880 * multiple MSS's then we are more confident 7881 * int it. 7882 * 7883 * When we are not app-limited then we see if 7884 * the rsm is being included in the current 7885 * measurement, we tell this by the app_limited_needs_set 7886 * flag. 7887 * 7888 * Note that being cwnd blocked is not applimited 7889 * as well as the pacing delay between packets which 7890 * are sending only 1 or 2 MSS's also will show up 7891 * in the RTT. We probably need to examine this algorithm 7892 * a bit more and enhance it to account for the delay 7893 * between rsm's. We could do that by saving off the 7894 * pacing delay of each rsm (in an rsm) and then 7895 * factoring that in somehow though for now I am 7896 * not sure how :) 7897 */ 7898 int calc_conf = 0; 7899 7900 if (rsm->r_flags & RACK_APP_LIMITED) { 7901 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7902 calc_conf = 0; 7903 else 7904 calc_conf = 1; 7905 } else if (rack->app_limited_needs_set == 0) { 7906 calc_conf = 1; 7907 } else { 7908 calc_conf = 0; 7909 } 7910 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7911 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7912 calc_conf, rsm, rsm->r_rtr_cnt); 7913 } 7914 if ((rsm->r_flags & RACK_TLP) && 7915 (!IN_FASTRECOVERY(tp->t_flags))) { 7916 /* Segment was a TLP and our retrans matched */ 7917 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7918 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 7919 } 7920 } 7921 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7922 /* New more recent rack_tmit_time */ 7923 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7924 rack->rc_rack_rtt = t; 7925 } 7926 return (1); 7927 } 7928 /* 7929 * We clear the soft/rxtshift since we got an ack. 7930 * There is no assurance we will call the commit() function 7931 * so we need to clear these to avoid incorrect handling. 7932 */ 7933 tp->t_rxtshift = 0; 7934 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7935 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7936 tp->t_softerror = 0; 7937 if (to && (to->to_flags & TOF_TS) && 7938 (ack_type == CUM_ACKED) && 7939 (to->to_tsecr) && 7940 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7941 /* 7942 * Now which timestamp does it match? In this block the ACK 7943 * must be coming from a previous transmission. 7944 */ 7945 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7946 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7947 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7948 if ((int)t <= 0) 7949 t = 1; 7950 if (CC_ALGO(tp)->rttsample != NULL) { 7951 /* 7952 * Kick the RTT to the CC, here 7953 * we lie a bit in that we know the 7954 * retransmission is correct even though 7955 * we retransmitted. This is because 7956 * we match the timestamps. 7957 */ 7958 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7959 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7960 else 7961 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7962 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7963 } 7964 if ((i + 1) < rsm->r_rtr_cnt) { 7965 /* 7966 * The peer ack'd from our previous 7967 * transmission. We have a spurious 7968 * retransmission and thus we dont 7969 * want to update our rack_rtt. 7970 * 7971 * Hmm should there be a CC revert here? 7972 * 7973 */ 7974 return (0); 7975 } 7976 if (!tp->t_rttlow || tp->t_rttlow > t) 7977 tp->t_rttlow = t; 7978 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7979 rack->r_ctl.rc_rack_min_rtt = t; 7980 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7981 rack->r_ctl.rc_rack_min_rtt = 1; 7982 } 7983 } 7984 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7985 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7986 /* New more recent rack_tmit_time */ 7987 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7988 rack->rc_rack_rtt = t; 7989 } 7990 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7991 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7992 rsm->r_rtr_cnt); 7993 return (1); 7994 } 7995 } 7996 goto ts_not_found; 7997 } else { 7998 /* 7999 * Ok its a SACK block that we retransmitted. or a windows 8000 * machine without timestamps. We can tell nothing from the 8001 * time-stamp since its not there or the time the peer last 8002 * recieved a segment that moved forward its cum-ack point. 8003 */ 8004 ts_not_found: 8005 i = rsm->r_rtr_cnt - 1; 8006 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8007 if ((int)t <= 0) 8008 t = 1; 8009 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8010 /* 8011 * We retransmitted and the ack came back in less 8012 * than the smallest rtt we have observed. We most 8013 * likely did an improper retransmit as outlined in 8014 * 6.2 Step 2 point 2 in the rack-draft so we 8015 * don't want to update our rack_rtt. We in 8016 * theory (in future) might want to think about reverting our 8017 * cwnd state but we won't for now. 8018 */ 8019 return (0); 8020 } else if (rack->r_ctl.rc_rack_min_rtt) { 8021 /* 8022 * We retransmitted it and the retransmit did the 8023 * job. 8024 */ 8025 if (!rack->r_ctl.rc_rack_min_rtt || 8026 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8027 rack->r_ctl.rc_rack_min_rtt = t; 8028 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8029 rack->r_ctl.rc_rack_min_rtt = 1; 8030 } 8031 } 8032 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 8033 /* New more recent rack_tmit_time */ 8034 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 8035 rack->rc_rack_rtt = t; 8036 } 8037 return (1); 8038 } 8039 } 8040 return (0); 8041 } 8042 8043 /* 8044 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 8045 */ 8046 static void 8047 rack_log_sack_passed(struct tcpcb *tp, 8048 struct tcp_rack *rack, struct rack_sendmap *rsm) 8049 { 8050 struct rack_sendmap *nrsm; 8051 8052 nrsm = rsm; 8053 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 8054 rack_head, r_tnext) { 8055 if (nrsm == rsm) { 8056 /* Skip orginal segment he is acked */ 8057 continue; 8058 } 8059 if (nrsm->r_flags & RACK_ACKED) { 8060 /* 8061 * Skip ack'd segments, though we 8062 * should not see these, since tmap 8063 * should not have ack'd segments. 8064 */ 8065 continue; 8066 } 8067 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 8068 /* 8069 * If the peer dropped the rwnd on 8070 * these then we don't worry about them. 8071 */ 8072 continue; 8073 } 8074 if (nrsm->r_flags & RACK_SACK_PASSED) { 8075 /* 8076 * We found one that is already marked 8077 * passed, we have been here before and 8078 * so all others below this are marked. 8079 */ 8080 break; 8081 } 8082 nrsm->r_flags |= RACK_SACK_PASSED; 8083 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8084 } 8085 } 8086 8087 static void 8088 rack_need_set_test(struct tcpcb *tp, 8089 struct tcp_rack *rack, 8090 struct rack_sendmap *rsm, 8091 tcp_seq th_ack, 8092 int line, 8093 int use_which) 8094 { 8095 8096 if ((tp->t_flags & TF_GPUTINPROG) && 8097 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8098 /* 8099 * We were app limited, and this ack 8100 * butts up or goes beyond the point where we want 8101 * to start our next measurement. We need 8102 * to record the new gput_ts as here and 8103 * possibly update the start sequence. 8104 */ 8105 uint32_t seq, ts; 8106 8107 if (rsm->r_rtr_cnt > 1) { 8108 /* 8109 * This is a retransmit, can we 8110 * really make any assessment at this 8111 * point? We are not really sure of 8112 * the timestamp, is it this or the 8113 * previous transmission? 8114 * 8115 * Lets wait for something better that 8116 * is not retransmitted. 8117 */ 8118 return; 8119 } 8120 seq = tp->gput_seq; 8121 ts = tp->gput_ts; 8122 rack->app_limited_needs_set = 0; 8123 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8124 /* Do we start at a new end? */ 8125 if ((use_which == RACK_USE_BEG) && 8126 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8127 /* 8128 * When we get an ACK that just eats 8129 * up some of the rsm, we set RACK_USE_BEG 8130 * since whats at r_start (i.e. th_ack) 8131 * is left unacked and thats where the 8132 * measurement not starts. 8133 */ 8134 tp->gput_seq = rsm->r_start; 8135 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8136 } 8137 if ((use_which == RACK_USE_END) && 8138 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8139 /* 8140 * We use the end when the cumack 8141 * is moving forward and completely 8142 * deleting the rsm passed so basically 8143 * r_end holds th_ack. 8144 * 8145 * For SACK's we also want to use the end 8146 * since this piece just got sacked and 8147 * we want to target anything after that 8148 * in our measurement. 8149 */ 8150 tp->gput_seq = rsm->r_end; 8151 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8152 } 8153 if (use_which == RACK_USE_END_OR_THACK) { 8154 /* 8155 * special case for ack moving forward, 8156 * not a sack, we need to move all the 8157 * way up to where this ack cum-ack moves 8158 * to. 8159 */ 8160 if (SEQ_GT(th_ack, rsm->r_end)) 8161 tp->gput_seq = th_ack; 8162 else 8163 tp->gput_seq = rsm->r_end; 8164 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8165 } 8166 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8167 /* 8168 * We moved beyond this guy's range, re-calculate 8169 * the new end point. 8170 */ 8171 if (rack->rc_gp_filled == 0) { 8172 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8173 } else { 8174 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8175 } 8176 } 8177 /* 8178 * We are moving the goal post, we may be able to clear the 8179 * measure_saw_probe_rtt flag. 8180 */ 8181 if ((rack->in_probe_rtt == 0) && 8182 (rack->measure_saw_probe_rtt) && 8183 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8184 rack->measure_saw_probe_rtt = 0; 8185 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8186 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8187 if (rack->rc_gp_filled && 8188 ((tp->gput_ack - tp->gput_seq) < 8189 max(rc_init_window(rack), (MIN_GP_WIN * 8190 ctf_fixed_maxseg(tp))))) { 8191 uint32_t ideal_amount; 8192 8193 ideal_amount = rack_get_measure_window(tp, rack); 8194 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 8195 /* 8196 * There is no sense of continuing this measurement 8197 * because its too small to gain us anything we 8198 * trust. Skip it and that way we can start a new 8199 * measurement quicker. 8200 */ 8201 tp->t_flags &= ~TF_GPUTINPROG; 8202 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8203 0, 0, 0, 6, __LINE__, NULL, 0); 8204 } else { 8205 /* 8206 * Reset the window further out. 8207 */ 8208 tp->gput_ack = tp->gput_seq + ideal_amount; 8209 } 8210 } 8211 } 8212 } 8213 8214 static inline int 8215 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8216 { 8217 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8218 /* Behind our TLP definition or right at */ 8219 return (0); 8220 } 8221 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8222 /* The start is beyond or right at our end of TLP definition */ 8223 return (0); 8224 } 8225 /* It has to be a sub-part of the original TLP recorded */ 8226 return (1); 8227 } 8228 8229 8230 static uint32_t 8231 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8232 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8233 { 8234 uint32_t start, end, changed = 0; 8235 struct rack_sendmap stack_map; 8236 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8237 #ifdef INVARIANTS 8238 struct rack_sendmap *insret; 8239 #endif 8240 int32_t used_ref = 1; 8241 int moved = 0; 8242 8243 start = sack->start; 8244 end = sack->end; 8245 rsm = *prsm; 8246 memset(&fe, 0, sizeof(fe)); 8247 do_rest_ofb: 8248 if ((rsm == NULL) || 8249 (SEQ_LT(end, rsm->r_start)) || 8250 (SEQ_GEQ(start, rsm->r_end)) || 8251 (SEQ_LT(start, rsm->r_start))) { 8252 /* 8253 * We are not in the right spot, 8254 * find the correct spot in the tree. 8255 */ 8256 used_ref = 0; 8257 fe.r_start = start; 8258 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8259 moved++; 8260 } 8261 if (rsm == NULL) { 8262 /* TSNH */ 8263 goto out; 8264 } 8265 /* Ok we have an ACK for some piece of this rsm */ 8266 if (rsm->r_start != start) { 8267 if ((rsm->r_flags & RACK_ACKED) == 0) { 8268 /* 8269 * Before any splitting or hookery is 8270 * done is it a TLP of interest i.e. rxt? 8271 */ 8272 if ((rsm->r_flags & RACK_TLP) && 8273 (rsm->r_rtr_cnt > 1)) { 8274 /* 8275 * We are splitting a rxt TLP, check 8276 * if we need to save off the start/end 8277 */ 8278 if (rack->rc_last_tlp_acked_set && 8279 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8280 /* 8281 * We already turned this on since we are inside 8282 * the previous one was a partially sack now we 8283 * are getting another one (maybe all of it). 8284 * 8285 */ 8286 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8287 /* 8288 * Lets make sure we have all of it though. 8289 */ 8290 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8291 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8292 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8293 rack->r_ctl.last_tlp_acked_end); 8294 } 8295 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8296 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8297 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8298 rack->r_ctl.last_tlp_acked_end); 8299 } 8300 } else { 8301 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8302 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8303 rack->rc_last_tlp_past_cumack = 0; 8304 rack->rc_last_tlp_acked_set = 1; 8305 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8306 } 8307 } 8308 /** 8309 * Need to split this in two pieces the before and after, 8310 * the before remains in the map, the after must be 8311 * added. In other words we have: 8312 * rsm |--------------| 8313 * sackblk |-------> 8314 * rsm will become 8315 * rsm |---| 8316 * and nrsm will be the sacked piece 8317 * nrsm |----------| 8318 * 8319 * But before we start down that path lets 8320 * see if the sack spans over on top of 8321 * the next guy and it is already sacked. 8322 * 8323 */ 8324 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8325 if (next && (next->r_flags & RACK_ACKED) && 8326 SEQ_GEQ(end, next->r_start)) { 8327 /** 8328 * So the next one is already acked, and 8329 * we can thus by hookery use our stack_map 8330 * to reflect the piece being sacked and 8331 * then adjust the two tree entries moving 8332 * the start and ends around. So we start like: 8333 * rsm |------------| (not-acked) 8334 * next |-----------| (acked) 8335 * sackblk |--------> 8336 * We want to end like so: 8337 * rsm |------| (not-acked) 8338 * next |-----------------| (acked) 8339 * nrsm |-----| 8340 * Where nrsm is a temporary stack piece we 8341 * use to update all the gizmos. 8342 */ 8343 /* Copy up our fudge block */ 8344 nrsm = &stack_map; 8345 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8346 /* Now adjust our tree blocks */ 8347 rsm->r_end = start; 8348 next->r_start = start; 8349 /* Now we must adjust back where next->m is */ 8350 rack_setup_offset_for_rsm(rsm, next); 8351 8352 /* We don't need to adjust rsm, it did not change */ 8353 /* Clear out the dup ack count of the remainder */ 8354 rsm->r_dupack = 0; 8355 rsm->r_just_ret = 0; 8356 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8357 /* Now lets make sure our fudge block is right */ 8358 nrsm->r_start = start; 8359 /* Now lets update all the stats and such */ 8360 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8361 if (rack->app_limited_needs_set) 8362 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8363 changed += (nrsm->r_end - nrsm->r_start); 8364 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8365 if (nrsm->r_flags & RACK_SACK_PASSED) { 8366 rack->r_ctl.rc_reorder_ts = cts; 8367 } 8368 /* 8369 * Now we want to go up from rsm (the 8370 * one left un-acked) to the next one 8371 * in the tmap. We do this so when 8372 * we walk backwards we include marking 8373 * sack-passed on rsm (The one passed in 8374 * is skipped since it is generally called 8375 * on something sacked before removing it 8376 * from the tmap). 8377 */ 8378 if (rsm->r_in_tmap) { 8379 nrsm = TAILQ_NEXT(rsm, r_tnext); 8380 /* 8381 * Now that we have the next 8382 * one walk backwards from there. 8383 */ 8384 if (nrsm && nrsm->r_in_tmap) 8385 rack_log_sack_passed(tp, rack, nrsm); 8386 } 8387 /* Now are we done? */ 8388 if (SEQ_LT(end, next->r_end) || 8389 (end == next->r_end)) { 8390 /* Done with block */ 8391 goto out; 8392 } 8393 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8394 counter_u64_add(rack_sack_used_next_merge, 1); 8395 /* Postion for the next block */ 8396 start = next->r_end; 8397 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8398 if (rsm == NULL) 8399 goto out; 8400 } else { 8401 /** 8402 * We can't use any hookery here, so we 8403 * need to split the map. We enter like 8404 * so: 8405 * rsm |--------| 8406 * sackblk |-----> 8407 * We will add the new block nrsm and 8408 * that will be the new portion, and then 8409 * fall through after reseting rsm. So we 8410 * split and look like this: 8411 * rsm |----| 8412 * sackblk |-----> 8413 * nrsm |---| 8414 * We then fall through reseting 8415 * rsm to nrsm, so the next block 8416 * picks it up. 8417 */ 8418 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8419 if (nrsm == NULL) { 8420 /* 8421 * failed XXXrrs what can we do but loose the sack 8422 * info? 8423 */ 8424 goto out; 8425 } 8426 counter_u64_add(rack_sack_splits, 1); 8427 rack_clone_rsm(rack, nrsm, rsm, start); 8428 rsm->r_just_ret = 0; 8429 #ifndef INVARIANTS 8430 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8431 #else 8432 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8433 if (insret != NULL) { 8434 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8435 nrsm, insret, rack, rsm); 8436 } 8437 #endif 8438 if (rsm->r_in_tmap) { 8439 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8440 nrsm->r_in_tmap = 1; 8441 } 8442 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8443 rsm->r_flags &= (~RACK_HAS_FIN); 8444 /* Position us to point to the new nrsm that starts the sack blk */ 8445 rsm = nrsm; 8446 } 8447 } else { 8448 /* Already sacked this piece */ 8449 counter_u64_add(rack_sack_skipped_acked, 1); 8450 moved++; 8451 if (end == rsm->r_end) { 8452 /* Done with block */ 8453 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8454 goto out; 8455 } else if (SEQ_LT(end, rsm->r_end)) { 8456 /* A partial sack to a already sacked block */ 8457 moved++; 8458 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8459 goto out; 8460 } else { 8461 /* 8462 * The end goes beyond this guy 8463 * reposition the start to the 8464 * next block. 8465 */ 8466 start = rsm->r_end; 8467 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8468 if (rsm == NULL) 8469 goto out; 8470 } 8471 } 8472 } 8473 if (SEQ_GEQ(end, rsm->r_end)) { 8474 /** 8475 * The end of this block is either beyond this guy or right 8476 * at this guy. I.e.: 8477 * rsm --- |-----| 8478 * end |-----| 8479 * <or> 8480 * end |---------| 8481 */ 8482 if ((rsm->r_flags & RACK_ACKED) == 0) { 8483 /* 8484 * Is it a TLP of interest? 8485 */ 8486 if ((rsm->r_flags & RACK_TLP) && 8487 (rsm->r_rtr_cnt > 1)) { 8488 /* 8489 * We are splitting a rxt TLP, check 8490 * if we need to save off the start/end 8491 */ 8492 if (rack->rc_last_tlp_acked_set && 8493 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8494 /* 8495 * We already turned this on since we are inside 8496 * the previous one was a partially sack now we 8497 * are getting another one (maybe all of it). 8498 */ 8499 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8500 /* 8501 * Lets make sure we have all of it though. 8502 */ 8503 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8504 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8505 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8506 rack->r_ctl.last_tlp_acked_end); 8507 } 8508 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8509 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8510 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8511 rack->r_ctl.last_tlp_acked_end); 8512 } 8513 } else { 8514 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8515 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8516 rack->rc_last_tlp_past_cumack = 0; 8517 rack->rc_last_tlp_acked_set = 1; 8518 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8519 } 8520 } 8521 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8522 changed += (rsm->r_end - rsm->r_start); 8523 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8524 if (rsm->r_in_tmap) /* should be true */ 8525 rack_log_sack_passed(tp, rack, rsm); 8526 /* Is Reordering occuring? */ 8527 if (rsm->r_flags & RACK_SACK_PASSED) { 8528 rsm->r_flags &= ~RACK_SACK_PASSED; 8529 rack->r_ctl.rc_reorder_ts = cts; 8530 } 8531 if (rack->app_limited_needs_set) 8532 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8533 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8534 rsm->r_flags |= RACK_ACKED; 8535 if (rsm->r_in_tmap) { 8536 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8537 rsm->r_in_tmap = 0; 8538 } 8539 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8540 } else { 8541 counter_u64_add(rack_sack_skipped_acked, 1); 8542 moved++; 8543 } 8544 if (end == rsm->r_end) { 8545 /* This block only - done, setup for next */ 8546 goto out; 8547 } 8548 /* 8549 * There is more not coverend by this rsm move on 8550 * to the next block in the RB tree. 8551 */ 8552 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8553 start = rsm->r_end; 8554 rsm = nrsm; 8555 if (rsm == NULL) 8556 goto out; 8557 goto do_rest_ofb; 8558 } 8559 /** 8560 * The end of this sack block is smaller than 8561 * our rsm i.e.: 8562 * rsm --- |-----| 8563 * end |--| 8564 */ 8565 if ((rsm->r_flags & RACK_ACKED) == 0) { 8566 /* 8567 * Is it a TLP of interest? 8568 */ 8569 if ((rsm->r_flags & RACK_TLP) && 8570 (rsm->r_rtr_cnt > 1)) { 8571 /* 8572 * We are splitting a rxt TLP, check 8573 * if we need to save off the start/end 8574 */ 8575 if (rack->rc_last_tlp_acked_set && 8576 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8577 /* 8578 * We already turned this on since we are inside 8579 * the previous one was a partially sack now we 8580 * are getting another one (maybe all of it). 8581 */ 8582 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8583 /* 8584 * Lets make sure we have all of it though. 8585 */ 8586 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8587 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8588 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8589 rack->r_ctl.last_tlp_acked_end); 8590 } 8591 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8592 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8593 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8594 rack->r_ctl.last_tlp_acked_end); 8595 } 8596 } else { 8597 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8598 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8599 rack->rc_last_tlp_past_cumack = 0; 8600 rack->rc_last_tlp_acked_set = 1; 8601 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8602 } 8603 } 8604 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8605 if (prev && 8606 (prev->r_flags & RACK_ACKED)) { 8607 /** 8608 * Goal, we want the right remainder of rsm to shrink 8609 * in place and span from (rsm->r_start = end) to rsm->r_end. 8610 * We want to expand prev to go all the way 8611 * to prev->r_end <- end. 8612 * so in the tree we have before: 8613 * prev |--------| (acked) 8614 * rsm |-------| (non-acked) 8615 * sackblk |-| 8616 * We churn it so we end up with 8617 * prev |----------| (acked) 8618 * rsm |-----| (non-acked) 8619 * nrsm |-| (temporary) 8620 * 8621 * Note if either prev/rsm is a TLP we don't 8622 * do this. 8623 */ 8624 nrsm = &stack_map; 8625 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8626 prev->r_end = end; 8627 rsm->r_start = end; 8628 /* Now adjust nrsm (stack copy) to be 8629 * the one that is the small 8630 * piece that was "sacked". 8631 */ 8632 nrsm->r_end = end; 8633 rsm->r_dupack = 0; 8634 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8635 /* 8636 * Now that the rsm has had its start moved forward 8637 * lets go ahead and get its new place in the world. 8638 */ 8639 rack_setup_offset_for_rsm(prev, rsm); 8640 /* 8641 * Now nrsm is our new little piece 8642 * that is acked (which was merged 8643 * to prev). Update the rtt and changed 8644 * based on that. Also check for reordering. 8645 */ 8646 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8647 if (rack->app_limited_needs_set) 8648 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8649 changed += (nrsm->r_end - nrsm->r_start); 8650 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8651 if (nrsm->r_flags & RACK_SACK_PASSED) { 8652 rack->r_ctl.rc_reorder_ts = cts; 8653 } 8654 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8655 rsm = prev; 8656 counter_u64_add(rack_sack_used_prev_merge, 1); 8657 } else { 8658 /** 8659 * This is the case where our previous 8660 * block is not acked either, so we must 8661 * split the block in two. 8662 */ 8663 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8664 if (nrsm == NULL) { 8665 /* failed rrs what can we do but loose the sack info? */ 8666 goto out; 8667 } 8668 if ((rsm->r_flags & RACK_TLP) && 8669 (rsm->r_rtr_cnt > 1)) { 8670 /* 8671 * We are splitting a rxt TLP, check 8672 * if we need to save off the start/end 8673 */ 8674 if (rack->rc_last_tlp_acked_set && 8675 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8676 /* 8677 * We already turned this on since this block is inside 8678 * the previous one was a partially sack now we 8679 * are getting another one (maybe all of it). 8680 */ 8681 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8682 /* 8683 * Lets make sure we have all of it though. 8684 */ 8685 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8686 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8687 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8688 rack->r_ctl.last_tlp_acked_end); 8689 } 8690 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8691 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8692 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8693 rack->r_ctl.last_tlp_acked_end); 8694 } 8695 } else { 8696 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8697 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8698 rack->rc_last_tlp_acked_set = 1; 8699 rack->rc_last_tlp_past_cumack = 0; 8700 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8701 } 8702 } 8703 /** 8704 * In this case nrsm becomes 8705 * nrsm->r_start = end; 8706 * nrsm->r_end = rsm->r_end; 8707 * which is un-acked. 8708 * <and> 8709 * rsm->r_end = nrsm->r_start; 8710 * i.e. the remaining un-acked 8711 * piece is left on the left 8712 * hand side. 8713 * 8714 * So we start like this 8715 * rsm |----------| (not acked) 8716 * sackblk |---| 8717 * build it so we have 8718 * rsm |---| (acked) 8719 * nrsm |------| (not acked) 8720 */ 8721 counter_u64_add(rack_sack_splits, 1); 8722 rack_clone_rsm(rack, nrsm, rsm, end); 8723 rsm->r_flags &= (~RACK_HAS_FIN); 8724 rsm->r_just_ret = 0; 8725 #ifndef INVARIANTS 8726 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8727 #else 8728 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8729 if (insret != NULL) { 8730 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8731 nrsm, insret, rack, rsm); 8732 } 8733 #endif 8734 if (rsm->r_in_tmap) { 8735 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8736 nrsm->r_in_tmap = 1; 8737 } 8738 nrsm->r_dupack = 0; 8739 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8740 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8741 changed += (rsm->r_end - rsm->r_start); 8742 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8743 if (rsm->r_in_tmap) /* should be true */ 8744 rack_log_sack_passed(tp, rack, rsm); 8745 /* Is Reordering occuring? */ 8746 if (rsm->r_flags & RACK_SACK_PASSED) { 8747 rsm->r_flags &= ~RACK_SACK_PASSED; 8748 rack->r_ctl.rc_reorder_ts = cts; 8749 } 8750 if (rack->app_limited_needs_set) 8751 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8752 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8753 rsm->r_flags |= RACK_ACKED; 8754 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8755 if (rsm->r_in_tmap) { 8756 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8757 rsm->r_in_tmap = 0; 8758 } 8759 } 8760 } else if (start != end){ 8761 /* 8762 * The block was already acked. 8763 */ 8764 counter_u64_add(rack_sack_skipped_acked, 1); 8765 moved++; 8766 } 8767 out: 8768 if (rsm && 8769 ((rsm->r_flags & RACK_TLP) == 0) && 8770 (rsm->r_flags & RACK_ACKED)) { 8771 /* 8772 * Now can we merge where we worked 8773 * with either the previous or 8774 * next block? 8775 */ 8776 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8777 while (next) { 8778 if (next->r_flags & RACK_TLP) 8779 break; 8780 if (next->r_flags & RACK_ACKED) { 8781 /* yep this and next can be merged */ 8782 rsm = rack_merge_rsm(rack, rsm, next); 8783 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8784 } else 8785 break; 8786 } 8787 /* Now what about the previous? */ 8788 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8789 while (prev) { 8790 if (prev->r_flags & RACK_TLP) 8791 break; 8792 if (prev->r_flags & RACK_ACKED) { 8793 /* yep the previous and this can be merged */ 8794 rsm = rack_merge_rsm(rack, prev, rsm); 8795 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8796 } else 8797 break; 8798 } 8799 } 8800 if (used_ref == 0) { 8801 counter_u64_add(rack_sack_proc_all, 1); 8802 } else { 8803 counter_u64_add(rack_sack_proc_short, 1); 8804 } 8805 /* Save off the next one for quick reference. */ 8806 if (rsm) 8807 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8808 else 8809 nrsm = NULL; 8810 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8811 /* Pass back the moved. */ 8812 *moved_two = moved; 8813 return (changed); 8814 } 8815 8816 static void inline 8817 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8818 { 8819 struct rack_sendmap *tmap; 8820 8821 tmap = NULL; 8822 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8823 /* Its no longer sacked, mark it so */ 8824 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8825 #ifdef INVARIANTS 8826 if (rsm->r_in_tmap) { 8827 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8828 rack, rsm, rsm->r_flags); 8829 } 8830 #endif 8831 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8832 /* Rebuild it into our tmap */ 8833 if (tmap == NULL) { 8834 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8835 tmap = rsm; 8836 } else { 8837 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8838 tmap = rsm; 8839 } 8840 tmap->r_in_tmap = 1; 8841 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8842 } 8843 /* 8844 * Now lets possibly clear the sack filter so we start 8845 * recognizing sacks that cover this area. 8846 */ 8847 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8848 8849 } 8850 8851 static void 8852 rack_do_decay(struct tcp_rack *rack) 8853 { 8854 struct timeval res; 8855 8856 #define timersub(tvp, uvp, vvp) \ 8857 do { \ 8858 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8859 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8860 if ((vvp)->tv_usec < 0) { \ 8861 (vvp)->tv_sec--; \ 8862 (vvp)->tv_usec += 1000000; \ 8863 } \ 8864 } while (0) 8865 8866 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8867 #undef timersub 8868 8869 rack->r_ctl.input_pkt++; 8870 if ((rack->rc_in_persist) || 8871 (res.tv_sec >= 1) || 8872 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8873 /* 8874 * Check for decay of non-SAD, 8875 * we want all SAD detection metrics to 8876 * decay 1/4 per second (or more) passed. 8877 */ 8878 #ifdef NETFLIX_EXP_DETECTION 8879 uint32_t pkt_delta; 8880 8881 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8882 #endif 8883 /* Update our saved tracking values */ 8884 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8885 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8886 /* Now do we escape without decay? */ 8887 #ifdef NETFLIX_EXP_DETECTION 8888 if (rack->rc_in_persist || 8889 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8890 (pkt_delta < tcp_sad_low_pps)){ 8891 /* 8892 * We don't decay idle connections 8893 * or ones that have a low input pps. 8894 */ 8895 return; 8896 } 8897 /* Decay the counters */ 8898 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8899 tcp_sad_decay_val); 8900 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8901 tcp_sad_decay_val); 8902 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8903 tcp_sad_decay_val); 8904 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8905 tcp_sad_decay_val); 8906 #endif 8907 } 8908 } 8909 8910 static void 8911 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8912 { 8913 struct rack_sendmap *rsm; 8914 #ifdef INVARIANTS 8915 struct rack_sendmap *rm; 8916 #endif 8917 8918 /* 8919 * The ACK point is advancing to th_ack, we must drop off 8920 * the packets in the rack log and calculate any eligble 8921 * RTT's. 8922 */ 8923 rack->r_wanted_output = 1; 8924 8925 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8926 if ((rack->rc_last_tlp_acked_set == 1)&& 8927 (rack->rc_last_tlp_past_cumack == 1) && 8928 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8929 /* 8930 * We have reached the point where our last rack 8931 * tlp retransmit sequence is ahead of the cum-ack. 8932 * This can only happen when the cum-ack moves all 8933 * the way around (its been a full 2^^31+1 bytes 8934 * or more since we sent a retransmitted TLP). Lets 8935 * turn off the valid flag since its not really valid. 8936 * 8937 * Note since sack's also turn on this event we have 8938 * a complication, we have to wait to age it out until 8939 * the cum-ack is by the TLP before checking which is 8940 * what the next else clause does. 8941 */ 8942 rack_log_dsack_event(rack, 9, __LINE__, 8943 rack->r_ctl.last_tlp_acked_start, 8944 rack->r_ctl.last_tlp_acked_end); 8945 rack->rc_last_tlp_acked_set = 0; 8946 rack->rc_last_tlp_past_cumack = 0; 8947 } else if ((rack->rc_last_tlp_acked_set == 1) && 8948 (rack->rc_last_tlp_past_cumack == 0) && 8949 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8950 /* 8951 * It is safe to start aging TLP's out. 8952 */ 8953 rack->rc_last_tlp_past_cumack = 1; 8954 } 8955 /* We do the same for the tlp send seq as well */ 8956 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8957 (rack->rc_last_sent_tlp_past_cumack == 1) && 8958 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8959 rack_log_dsack_event(rack, 9, __LINE__, 8960 rack->r_ctl.last_sent_tlp_seq, 8961 (rack->r_ctl.last_sent_tlp_seq + 8962 rack->r_ctl.last_sent_tlp_len)); 8963 rack->rc_last_sent_tlp_seq_valid = 0; 8964 rack->rc_last_sent_tlp_past_cumack = 0; 8965 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8966 (rack->rc_last_sent_tlp_past_cumack == 0) && 8967 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8968 /* 8969 * It is safe to start aging TLP's send. 8970 */ 8971 rack->rc_last_sent_tlp_past_cumack = 1; 8972 } 8973 more: 8974 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8975 if (rsm == NULL) { 8976 if ((th_ack - 1) == tp->iss) { 8977 /* 8978 * For the SYN incoming case we will not 8979 * have called tcp_output for the sending of 8980 * the SYN, so there will be no map. All 8981 * other cases should probably be a panic. 8982 */ 8983 return; 8984 } 8985 if (tp->t_flags & TF_SENTFIN) { 8986 /* if we sent a FIN we often will not have map */ 8987 return; 8988 } 8989 #ifdef INVARIANTS 8990 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8991 tp, 8992 tp->t_state, th_ack, rack, 8993 tp->snd_una, tp->snd_max, tp->snd_nxt); 8994 #endif 8995 return; 8996 } 8997 if (SEQ_LT(th_ack, rsm->r_start)) { 8998 /* Huh map is missing this */ 8999 #ifdef INVARIANTS 9000 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 9001 rsm->r_start, 9002 th_ack, tp->t_state, rack->r_state); 9003 #endif 9004 return; 9005 } 9006 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 9007 9008 /* Now was it a retransmitted TLP? */ 9009 if ((rsm->r_flags & RACK_TLP) && 9010 (rsm->r_rtr_cnt > 1)) { 9011 /* 9012 * Yes, this rsm was a TLP and retransmitted, remember that 9013 * since if a DSACK comes back on this we don't want 9014 * to think of it as a reordered segment. This may 9015 * get updated again with possibly even other TLPs 9016 * in flight, but thats ok. Only when we don't send 9017 * a retransmitted TLP for 1/2 the sequences space 9018 * will it get turned off (above). 9019 */ 9020 if (rack->rc_last_tlp_acked_set && 9021 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9022 /* 9023 * We already turned this on since the end matches, 9024 * the previous one was a partially ack now we 9025 * are getting another one (maybe all of it). 9026 */ 9027 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9028 /* 9029 * Lets make sure we have all of it though. 9030 */ 9031 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9032 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9033 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9034 rack->r_ctl.last_tlp_acked_end); 9035 } 9036 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9037 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9038 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9039 rack->r_ctl.last_tlp_acked_end); 9040 } 9041 } else { 9042 rack->rc_last_tlp_past_cumack = 1; 9043 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9044 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9045 rack->rc_last_tlp_acked_set = 1; 9046 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9047 } 9048 } 9049 /* Now do we consume the whole thing? */ 9050 if (SEQ_GEQ(th_ack, rsm->r_end)) { 9051 /* Its all consumed. */ 9052 uint32_t left; 9053 uint8_t newly_acked; 9054 9055 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 9056 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 9057 rsm->r_rtr_bytes = 0; 9058 /* Record the time of highest cumack sent */ 9059 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9060 #ifndef INVARIANTS 9061 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9062 #else 9063 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9064 if (rm != rsm) { 9065 panic("removing head in rack:%p rsm:%p rm:%p", 9066 rack, rsm, rm); 9067 } 9068 #endif 9069 if (rsm->r_in_tmap) { 9070 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9071 rsm->r_in_tmap = 0; 9072 } 9073 newly_acked = 1; 9074 if (rsm->r_flags & RACK_ACKED) { 9075 /* 9076 * It was acked on the scoreboard -- remove 9077 * it from total 9078 */ 9079 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9080 newly_acked = 0; 9081 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9082 /* 9083 * There are segments ACKED on the 9084 * scoreboard further up. We are seeing 9085 * reordering. 9086 */ 9087 rsm->r_flags &= ~RACK_SACK_PASSED; 9088 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9089 rsm->r_flags |= RACK_ACKED; 9090 rack->r_ctl.rc_reorder_ts = cts; 9091 if (rack->r_ent_rec_ns) { 9092 /* 9093 * We have sent no more, and we saw an sack 9094 * then ack arrive. 9095 */ 9096 rack->r_might_revert = 1; 9097 } 9098 } 9099 if ((rsm->r_flags & RACK_TO_REXT) && 9100 (tp->t_flags & TF_RCVD_TSTMP) && 9101 (to->to_flags & TOF_TS) && 9102 (to->to_tsecr != 0) && 9103 (tp->t_flags & TF_PREVVALID)) { 9104 /* 9105 * We can use the timestamp to see 9106 * if this retransmission was from the 9107 * first transmit. If so we made a mistake. 9108 */ 9109 tp->t_flags &= ~TF_PREVVALID; 9110 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9111 /* The first transmit is what this ack is for */ 9112 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 9113 } 9114 } 9115 left = th_ack - rsm->r_end; 9116 if (rack->app_limited_needs_set && newly_acked) 9117 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9118 /* Free back to zone */ 9119 rack_free(rack, rsm); 9120 if (left) { 9121 goto more; 9122 } 9123 /* Check for reneging */ 9124 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9125 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9126 /* 9127 * The peer has moved snd_una up to 9128 * the edge of this send, i.e. one 9129 * that it had previously acked. The only 9130 * way that can be true if the peer threw 9131 * away data (space issues) that it had 9132 * previously sacked (else it would have 9133 * given us snd_una up to (rsm->r_end). 9134 * We need to undo the acked markings here. 9135 * 9136 * Note we have to look to make sure th_ack is 9137 * our rsm->r_start in case we get an old ack 9138 * where th_ack is behind snd_una. 9139 */ 9140 rack_peer_reneges(rack, rsm, th_ack); 9141 } 9142 return; 9143 } 9144 if (rsm->r_flags & RACK_ACKED) { 9145 /* 9146 * It was acked on the scoreboard -- remove it from 9147 * total for the part being cum-acked. 9148 */ 9149 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9150 } 9151 /* 9152 * Clear the dup ack count for 9153 * the piece that remains. 9154 */ 9155 rsm->r_dupack = 0; 9156 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9157 if (rsm->r_rtr_bytes) { 9158 /* 9159 * It was retransmitted adjust the 9160 * sack holes for what was acked. 9161 */ 9162 int ack_am; 9163 9164 ack_am = (th_ack - rsm->r_start); 9165 if (ack_am >= rsm->r_rtr_bytes) { 9166 rack->r_ctl.rc_holes_rxt -= ack_am; 9167 rsm->r_rtr_bytes -= ack_am; 9168 } 9169 } 9170 /* 9171 * Update where the piece starts and record 9172 * the time of send of highest cumack sent. 9173 */ 9174 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9175 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9176 /* Now we need to move our offset forward too */ 9177 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9178 /* Fix up the orig_m_len and possibly the mbuf offset */ 9179 rack_adjust_orig_mlen(rsm); 9180 } 9181 rsm->soff += (th_ack - rsm->r_start); 9182 rsm->r_start = th_ack; 9183 /* Now do we need to move the mbuf fwd too? */ 9184 if (rsm->m) { 9185 while (rsm->soff >= rsm->m->m_len) { 9186 rsm->soff -= rsm->m->m_len; 9187 rsm->m = rsm->m->m_next; 9188 KASSERT((rsm->m != NULL), 9189 (" nrsm:%p hit at soff:%u null m", 9190 rsm, rsm->soff)); 9191 } 9192 rsm->orig_m_len = rsm->m->m_len; 9193 } 9194 if (rack->app_limited_needs_set) 9195 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9196 } 9197 9198 static void 9199 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9200 { 9201 struct rack_sendmap *rsm; 9202 int sack_pass_fnd = 0; 9203 9204 if (rack->r_might_revert) { 9205 /* 9206 * Ok we have reordering, have not sent anything, we 9207 * might want to revert the congestion state if nothing 9208 * further has SACK_PASSED on it. Lets check. 9209 * 9210 * We also get here when we have DSACKs come in for 9211 * all the data that we FR'd. Note that a rxt or tlp 9212 * timer clears this from happening. 9213 */ 9214 9215 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9216 if (rsm->r_flags & RACK_SACK_PASSED) { 9217 sack_pass_fnd = 1; 9218 break; 9219 } 9220 } 9221 if (sack_pass_fnd == 0) { 9222 /* 9223 * We went into recovery 9224 * incorrectly due to reordering! 9225 */ 9226 int orig_cwnd; 9227 9228 rack->r_ent_rec_ns = 0; 9229 orig_cwnd = tp->snd_cwnd; 9230 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9231 tp->snd_recover = tp->snd_una; 9232 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 9233 EXIT_RECOVERY(tp->t_flags); 9234 } 9235 rack->r_might_revert = 0; 9236 } 9237 } 9238 9239 #ifdef NETFLIX_EXP_DETECTION 9240 static void 9241 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9242 { 9243 if ((rack->do_detection || tcp_force_detection) && 9244 tcp_sack_to_ack_thresh && 9245 tcp_sack_to_move_thresh && 9246 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9247 /* 9248 * We have thresholds set to find 9249 * possible attackers and disable sack. 9250 * Check them. 9251 */ 9252 uint64_t ackratio, moveratio, movetotal; 9253 9254 /* Log detecting */ 9255 rack_log_sad(rack, 1); 9256 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9257 ackratio *= (uint64_t)(1000); 9258 if (rack->r_ctl.ack_count) 9259 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9260 else { 9261 /* We really should not hit here */ 9262 ackratio = 1000; 9263 } 9264 if ((rack->sack_attack_disable == 0) && 9265 (ackratio > rack_highest_sack_thresh_seen)) 9266 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9267 movetotal = rack->r_ctl.sack_moved_extra; 9268 movetotal += rack->r_ctl.sack_noextra_move; 9269 moveratio = rack->r_ctl.sack_moved_extra; 9270 moveratio *= (uint64_t)1000; 9271 if (movetotal) 9272 moveratio /= movetotal; 9273 else { 9274 /* No moves, thats pretty good */ 9275 moveratio = 0; 9276 } 9277 if ((rack->sack_attack_disable == 0) && 9278 (moveratio > rack_highest_move_thresh_seen)) 9279 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9280 if (rack->sack_attack_disable == 0) { 9281 if ((ackratio > tcp_sack_to_ack_thresh) && 9282 (moveratio > tcp_sack_to_move_thresh)) { 9283 /* Disable sack processing */ 9284 rack->sack_attack_disable = 1; 9285 if (rack->r_rep_attack == 0) { 9286 rack->r_rep_attack = 1; 9287 counter_u64_add(rack_sack_attacks_detected, 1); 9288 } 9289 if (tcp_attack_on_turns_on_logging) { 9290 /* 9291 * Turn on logging, used for debugging 9292 * false positives. 9293 */ 9294 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9295 } 9296 /* Clamp the cwnd at flight size */ 9297 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9298 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9299 rack_log_sad(rack, 2); 9300 } 9301 } else { 9302 /* We are sack-disabled check for false positives */ 9303 if ((ackratio <= tcp_restoral_thresh) || 9304 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9305 rack->sack_attack_disable = 0; 9306 rack_log_sad(rack, 3); 9307 /* Restart counting */ 9308 rack->r_ctl.sack_count = 0; 9309 rack->r_ctl.sack_moved_extra = 0; 9310 rack->r_ctl.sack_noextra_move = 1; 9311 rack->r_ctl.ack_count = max(1, 9312 (bytes_this_ack / segsiz)); 9313 9314 if (rack->r_rep_reverse == 0) { 9315 rack->r_rep_reverse = 1; 9316 counter_u64_add(rack_sack_attacks_reversed, 1); 9317 } 9318 /* Restore the cwnd */ 9319 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9320 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9321 } 9322 } 9323 } 9324 } 9325 #endif 9326 9327 static int 9328 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9329 { 9330 9331 uint32_t am, l_end; 9332 int was_tlp = 0; 9333 9334 if (SEQ_GT(end, start)) 9335 am = end - start; 9336 else 9337 am = 0; 9338 if ((rack->rc_last_tlp_acked_set ) && 9339 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9340 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9341 /* 9342 * The DSACK is because of a TLP which we don't 9343 * do anything with the reordering window over since 9344 * it was not reordering that caused the DSACK but 9345 * our previous retransmit TLP. 9346 */ 9347 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9348 was_tlp = 1; 9349 goto skip_dsack_round; 9350 } 9351 if (rack->rc_last_sent_tlp_seq_valid) { 9352 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9353 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9354 (SEQ_LEQ(end, l_end))) { 9355 /* 9356 * This dsack is from the last sent TLP, ignore it 9357 * for reordering purposes. 9358 */ 9359 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9360 was_tlp = 1; 9361 goto skip_dsack_round; 9362 } 9363 } 9364 if (rack->rc_dsack_round_seen == 0) { 9365 rack->rc_dsack_round_seen = 1; 9366 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9367 rack->r_ctl.num_dsack++; 9368 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9369 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9370 } 9371 skip_dsack_round: 9372 /* 9373 * We keep track of how many DSACK blocks we get 9374 * after a recovery incident. 9375 */ 9376 rack->r_ctl.dsack_byte_cnt += am; 9377 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9378 rack->r_ctl.retran_during_recovery && 9379 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9380 /* 9381 * False recovery most likely culprit is reordering. If 9382 * nothing else is missing we need to revert. 9383 */ 9384 rack->r_might_revert = 1; 9385 rack_handle_might_revert(rack->rc_tp, rack); 9386 rack->r_might_revert = 0; 9387 rack->r_ctl.retran_during_recovery = 0; 9388 rack->r_ctl.dsack_byte_cnt = 0; 9389 } 9390 return (was_tlp); 9391 } 9392 9393 static uint32_t 9394 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 9395 { 9396 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 9397 } 9398 9399 static int32_t 9400 rack_compute_pipe(struct tcpcb *tp) 9401 { 9402 return ((int32_t)do_rack_compute_pipe(tp, 9403 (struct tcp_rack *)tp->t_fb_ptr, 9404 tp->snd_una)); 9405 } 9406 9407 static void 9408 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9409 { 9410 /* Deal with changed and PRR here (in recovery only) */ 9411 uint32_t pipe, snd_una; 9412 9413 rack->r_ctl.rc_prr_delivered += changed; 9414 9415 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9416 /* 9417 * It is all outstanding, we are application limited 9418 * and thus we don't need more room to send anything. 9419 * Note we use tp->snd_una here and not th_ack because 9420 * the data as yet not been cut from the sb. 9421 */ 9422 rack->r_ctl.rc_prr_sndcnt = 0; 9423 return; 9424 } 9425 /* Compute prr_sndcnt */ 9426 if (SEQ_GT(tp->snd_una, th_ack)) { 9427 snd_una = tp->snd_una; 9428 } else { 9429 snd_una = th_ack; 9430 } 9431 pipe = do_rack_compute_pipe(tp, rack, snd_una); 9432 if (pipe > tp->snd_ssthresh) { 9433 long sndcnt; 9434 9435 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9436 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9437 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9438 else { 9439 rack->r_ctl.rc_prr_sndcnt = 0; 9440 rack_log_to_prr(rack, 9, 0, __LINE__); 9441 sndcnt = 0; 9442 } 9443 sndcnt++; 9444 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9445 sndcnt -= rack->r_ctl.rc_prr_out; 9446 else 9447 sndcnt = 0; 9448 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9449 rack_log_to_prr(rack, 10, 0, __LINE__); 9450 } else { 9451 uint32_t limit; 9452 9453 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9454 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9455 else 9456 limit = 0; 9457 if (changed > limit) 9458 limit = changed; 9459 limit += ctf_fixed_maxseg(tp); 9460 if (tp->snd_ssthresh > pipe) { 9461 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9462 rack_log_to_prr(rack, 11, 0, __LINE__); 9463 } else { 9464 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9465 rack_log_to_prr(rack, 12, 0, __LINE__); 9466 } 9467 } 9468 } 9469 9470 static void 9471 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9472 { 9473 uint32_t changed; 9474 struct tcp_rack *rack; 9475 struct rack_sendmap *rsm; 9476 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9477 register uint32_t th_ack; 9478 int32_t i, j, k, num_sack_blks = 0; 9479 uint32_t cts, acked, ack_point; 9480 int loop_start = 0, moved_two = 0; 9481 uint32_t tsused; 9482 9483 9484 INP_WLOCK_ASSERT(tptoinpcb(tp)); 9485 if (tcp_get_flags(th) & TH_RST) { 9486 /* We don't log resets */ 9487 return; 9488 } 9489 rack = (struct tcp_rack *)tp->t_fb_ptr; 9490 cts = tcp_get_usecs(NULL); 9491 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9492 changed = 0; 9493 th_ack = th->th_ack; 9494 if (rack->sack_attack_disable == 0) 9495 rack_do_decay(rack); 9496 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9497 /* 9498 * You only get credit for 9499 * MSS and greater (and you get extra 9500 * credit for larger cum-ack moves). 9501 */ 9502 int ac; 9503 9504 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9505 rack->r_ctl.ack_count += ac; 9506 counter_u64_add(rack_ack_total, ac); 9507 } 9508 if (rack->r_ctl.ack_count > 0xfff00000) { 9509 /* 9510 * reduce the number to keep us under 9511 * a uint32_t. 9512 */ 9513 rack->r_ctl.ack_count /= 2; 9514 rack->r_ctl.sack_count /= 2; 9515 } 9516 if (SEQ_GT(th_ack, tp->snd_una)) { 9517 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9518 tp->t_acktime = ticks; 9519 } 9520 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9521 changed = th_ack - rsm->r_start; 9522 if (changed) { 9523 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9524 } 9525 if ((to->to_flags & TOF_SACK) == 0) { 9526 /* We are done nothing left and no sack. */ 9527 rack_handle_might_revert(tp, rack); 9528 /* 9529 * For cases where we struck a dup-ack 9530 * with no SACK, add to the changes so 9531 * PRR will work right. 9532 */ 9533 if (dup_ack_struck && (changed == 0)) { 9534 changed += ctf_fixed_maxseg(rack->rc_tp); 9535 } 9536 goto out; 9537 } 9538 /* Sack block processing */ 9539 if (SEQ_GT(th_ack, tp->snd_una)) 9540 ack_point = th_ack; 9541 else 9542 ack_point = tp->snd_una; 9543 for (i = 0; i < to->to_nsacks; i++) { 9544 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9545 &sack, sizeof(sack)); 9546 sack.start = ntohl(sack.start); 9547 sack.end = ntohl(sack.end); 9548 if (SEQ_GT(sack.end, sack.start) && 9549 SEQ_GT(sack.start, ack_point) && 9550 SEQ_LT(sack.start, tp->snd_max) && 9551 SEQ_GT(sack.end, ack_point) && 9552 SEQ_LEQ(sack.end, tp->snd_max)) { 9553 sack_blocks[num_sack_blks] = sack; 9554 num_sack_blks++; 9555 } else if (SEQ_LEQ(sack.start, th_ack) && 9556 SEQ_LEQ(sack.end, th_ack)) { 9557 int was_tlp; 9558 9559 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9560 /* 9561 * Its a D-SACK block. 9562 */ 9563 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9564 } 9565 } 9566 if (rack->rc_dsack_round_seen) { 9567 /* Is the dsack roound over? */ 9568 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9569 /* Yes it is */ 9570 rack->rc_dsack_round_seen = 0; 9571 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9572 } 9573 } 9574 /* 9575 * Sort the SACK blocks so we can update the rack scoreboard with 9576 * just one pass. 9577 */ 9578 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9579 num_sack_blks, th->th_ack); 9580 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9581 if (num_sack_blks == 0) { 9582 /* Nothing to sack (DSACKs?) */ 9583 goto out_with_totals; 9584 } 9585 if (num_sack_blks < 2) { 9586 /* Only one, we don't need to sort */ 9587 goto do_sack_work; 9588 } 9589 /* Sort the sacks */ 9590 for (i = 0; i < num_sack_blks; i++) { 9591 for (j = i + 1; j < num_sack_blks; j++) { 9592 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9593 sack = sack_blocks[i]; 9594 sack_blocks[i] = sack_blocks[j]; 9595 sack_blocks[j] = sack; 9596 } 9597 } 9598 } 9599 /* 9600 * Now are any of the sack block ends the same (yes some 9601 * implementations send these)? 9602 */ 9603 again: 9604 if (num_sack_blks == 0) 9605 goto out_with_totals; 9606 if (num_sack_blks > 1) { 9607 for (i = 0; i < num_sack_blks; i++) { 9608 for (j = i + 1; j < num_sack_blks; j++) { 9609 if (sack_blocks[i].end == sack_blocks[j].end) { 9610 /* 9611 * Ok these two have the same end we 9612 * want the smallest end and then 9613 * throw away the larger and start 9614 * again. 9615 */ 9616 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9617 /* 9618 * The second block covers 9619 * more area use that 9620 */ 9621 sack_blocks[i].start = sack_blocks[j].start; 9622 } 9623 /* 9624 * Now collapse out the dup-sack and 9625 * lower the count 9626 */ 9627 for (k = (j + 1); k < num_sack_blks; k++) { 9628 sack_blocks[j].start = sack_blocks[k].start; 9629 sack_blocks[j].end = sack_blocks[k].end; 9630 j++; 9631 } 9632 num_sack_blks--; 9633 goto again; 9634 } 9635 } 9636 } 9637 } 9638 do_sack_work: 9639 /* 9640 * First lets look to see if 9641 * we have retransmitted and 9642 * can use the transmit next? 9643 */ 9644 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9645 if (rsm && 9646 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9647 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9648 /* 9649 * We probably did the FR and the next 9650 * SACK in continues as we would expect. 9651 */ 9652 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9653 if (acked) { 9654 rack->r_wanted_output = 1; 9655 changed += acked; 9656 } 9657 if (num_sack_blks == 1) { 9658 /* 9659 * This is what we would expect from 9660 * a normal implementation to happen 9661 * after we have retransmitted the FR, 9662 * i.e the sack-filter pushes down 9663 * to 1 block and the next to be retransmitted 9664 * is the sequence in the sack block (has more 9665 * are acked). Count this as ACK'd data to boost 9666 * up the chances of recovering any false positives. 9667 */ 9668 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9669 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9670 counter_u64_add(rack_express_sack, 1); 9671 if (rack->r_ctl.ack_count > 0xfff00000) { 9672 /* 9673 * reduce the number to keep us under 9674 * a uint32_t. 9675 */ 9676 rack->r_ctl.ack_count /= 2; 9677 rack->r_ctl.sack_count /= 2; 9678 } 9679 goto out_with_totals; 9680 } else { 9681 /* 9682 * Start the loop through the 9683 * rest of blocks, past the first block. 9684 */ 9685 moved_two = 0; 9686 loop_start = 1; 9687 } 9688 } 9689 /* Its a sack of some sort */ 9690 rack->r_ctl.sack_count++; 9691 if (rack->r_ctl.sack_count > 0xfff00000) { 9692 /* 9693 * reduce the number to keep us under 9694 * a uint32_t. 9695 */ 9696 rack->r_ctl.ack_count /= 2; 9697 rack->r_ctl.sack_count /= 2; 9698 } 9699 counter_u64_add(rack_sack_total, 1); 9700 if (rack->sack_attack_disable) { 9701 /* An attacker disablement is in place */ 9702 if (num_sack_blks > 1) { 9703 rack->r_ctl.sack_count += (num_sack_blks - 1); 9704 rack->r_ctl.sack_moved_extra++; 9705 counter_u64_add(rack_move_some, 1); 9706 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9707 rack->r_ctl.sack_moved_extra /= 2; 9708 rack->r_ctl.sack_noextra_move /= 2; 9709 } 9710 } 9711 goto out; 9712 } 9713 rsm = rack->r_ctl.rc_sacklast; 9714 for (i = loop_start; i < num_sack_blks; i++) { 9715 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9716 if (acked) { 9717 rack->r_wanted_output = 1; 9718 changed += acked; 9719 } 9720 if (moved_two) { 9721 /* 9722 * If we did not get a SACK for at least a MSS and 9723 * had to move at all, or if we moved more than our 9724 * threshold, it counts against the "extra" move. 9725 */ 9726 rack->r_ctl.sack_moved_extra += moved_two; 9727 counter_u64_add(rack_move_some, 1); 9728 } else { 9729 /* 9730 * else we did not have to move 9731 * any more than we would expect. 9732 */ 9733 rack->r_ctl.sack_noextra_move++; 9734 counter_u64_add(rack_move_none, 1); 9735 } 9736 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9737 /* 9738 * If the SACK was not a full MSS then 9739 * we add to sack_count the number of 9740 * MSS's (or possibly more than 9741 * a MSS if its a TSO send) we had to skip by. 9742 */ 9743 rack->r_ctl.sack_count += moved_two; 9744 counter_u64_add(rack_sack_total, moved_two); 9745 } 9746 /* 9747 * Now we need to setup for the next 9748 * round. First we make sure we won't 9749 * exceed the size of our uint32_t on 9750 * the various counts, and then clear out 9751 * moved_two. 9752 */ 9753 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9754 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9755 rack->r_ctl.sack_moved_extra /= 2; 9756 rack->r_ctl.sack_noextra_move /= 2; 9757 } 9758 if (rack->r_ctl.sack_count > 0xfff00000) { 9759 rack->r_ctl.ack_count /= 2; 9760 rack->r_ctl.sack_count /= 2; 9761 } 9762 moved_two = 0; 9763 } 9764 out_with_totals: 9765 if (num_sack_blks > 1) { 9766 /* 9767 * You get an extra stroke if 9768 * you have more than one sack-blk, this 9769 * could be where we are skipping forward 9770 * and the sack-filter is still working, or 9771 * it could be an attacker constantly 9772 * moving us. 9773 */ 9774 rack->r_ctl.sack_moved_extra++; 9775 counter_u64_add(rack_move_some, 1); 9776 } 9777 out: 9778 #ifdef NETFLIX_EXP_DETECTION 9779 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9780 #endif 9781 if (changed) { 9782 /* Something changed cancel the rack timer */ 9783 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9784 } 9785 tsused = tcp_get_usecs(NULL); 9786 rsm = tcp_rack_output(tp, rack, tsused); 9787 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9788 rsm && 9789 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 9790 /* Enter recovery */ 9791 entered_recovery = 1; 9792 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9793 /* 9794 * When we enter recovery we need to assure we send 9795 * one packet. 9796 */ 9797 if (rack->rack_no_prr == 0) { 9798 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9799 rack_log_to_prr(rack, 8, 0, __LINE__); 9800 } 9801 rack->r_timer_override = 1; 9802 rack->r_early = 0; 9803 rack->r_ctl.rc_agg_early = 0; 9804 } else if (IN_FASTRECOVERY(tp->t_flags) && 9805 rsm && 9806 (rack->r_rr_config == 3)) { 9807 /* 9808 * Assure we can output and we get no 9809 * remembered pace time except the retransmit. 9810 */ 9811 rack->r_timer_override = 1; 9812 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9813 rack->r_ctl.rc_resend = rsm; 9814 } 9815 if (IN_FASTRECOVERY(tp->t_flags) && 9816 (rack->rack_no_prr == 0) && 9817 (entered_recovery == 0)) { 9818 rack_update_prr(tp, rack, changed, th_ack); 9819 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9820 ((tcp_in_hpts(rack->rc_inp) == 0) && 9821 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9822 /* 9823 * If you are pacing output you don't want 9824 * to override. 9825 */ 9826 rack->r_early = 0; 9827 rack->r_ctl.rc_agg_early = 0; 9828 rack->r_timer_override = 1; 9829 } 9830 } 9831 } 9832 9833 static void 9834 rack_strike_dupack(struct tcp_rack *rack) 9835 { 9836 struct rack_sendmap *rsm; 9837 9838 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9839 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9840 rsm = TAILQ_NEXT(rsm, r_tnext); 9841 if (rsm->r_flags & RACK_MUST_RXT) { 9842 /* Sendmap entries that are marked to 9843 * be retransmitted do not need dupack's 9844 * struck. We get these marks for a number 9845 * of reasons (rxt timeout with no sack, 9846 * mtu change, or rwnd collapses). When 9847 * these events occur, we know we must retransmit 9848 * them and mark the sendmap entries. Dupack counting 9849 * is not needed since we are already set to retransmit 9850 * it as soon as we can. 9851 */ 9852 continue; 9853 } 9854 } 9855 if (rsm && (rsm->r_dupack < 0xff)) { 9856 rsm->r_dupack++; 9857 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9858 struct timeval tv; 9859 uint32_t cts; 9860 /* 9861 * Here we see if we need to retransmit. For 9862 * a SACK type connection if enough time has passed 9863 * we will get a return of the rsm. For a non-sack 9864 * connection we will get the rsm returned if the 9865 * dupack value is 3 or more. 9866 */ 9867 cts = tcp_get_usecs(&tv); 9868 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9869 if (rack->r_ctl.rc_resend != NULL) { 9870 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9871 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9872 rack->rc_tp->snd_una, __LINE__); 9873 } 9874 rack->r_wanted_output = 1; 9875 rack->r_timer_override = 1; 9876 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9877 } 9878 } else { 9879 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9880 } 9881 } 9882 } 9883 9884 static void 9885 rack_check_bottom_drag(struct tcpcb *tp, 9886 struct tcp_rack *rack, 9887 struct socket *so, int32_t acked) 9888 { 9889 uint32_t segsiz, minseg; 9890 9891 segsiz = ctf_fixed_maxseg(tp); 9892 minseg = segsiz; 9893 9894 if (tp->snd_max == tp->snd_una) { 9895 /* 9896 * We are doing dynamic pacing and we are way 9897 * under. Basically everything got acked while 9898 * we were still waiting on the pacer to expire. 9899 * 9900 * This means we need to boost the b/w in 9901 * addition to any earlier boosting of 9902 * the multiplier. 9903 */ 9904 rack->rc_dragged_bottom = 1; 9905 rack_validate_multipliers_at_or_above100(rack); 9906 /* 9907 * Lets use the segment bytes acked plus 9908 * the lowest RTT seen as the basis to 9909 * form a b/w estimate. This will be off 9910 * due to the fact that the true estimate 9911 * should be around 1/2 the time of the RTT 9912 * but we can settle for that. 9913 */ 9914 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9915 acked) { 9916 uint64_t bw, calc_bw, rtt; 9917 9918 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9919 if (rtt == 0) { 9920 /* no us sample is there a ms one? */ 9921 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9922 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9923 } else { 9924 goto no_measurement; 9925 } 9926 } 9927 bw = acked; 9928 calc_bw = bw * 1000000; 9929 calc_bw /= rtt; 9930 if (rack->r_ctl.last_max_bw && 9931 (rack->r_ctl.last_max_bw < calc_bw)) { 9932 /* 9933 * If we have a last calculated max bw 9934 * enforce it. 9935 */ 9936 calc_bw = rack->r_ctl.last_max_bw; 9937 } 9938 /* now plop it in */ 9939 if (rack->rc_gp_filled == 0) { 9940 if (calc_bw > ONE_POINT_TWO_MEG) { 9941 /* 9942 * If we have no measurement 9943 * don't let us set in more than 9944 * 1.2Mbps. If we are still too 9945 * low after pacing with this we 9946 * will hopefully have a max b/w 9947 * available to sanity check things. 9948 */ 9949 calc_bw = ONE_POINT_TWO_MEG; 9950 } 9951 rack->r_ctl.rc_rtt_diff = 0; 9952 rack->r_ctl.gp_bw = calc_bw; 9953 rack->rc_gp_filled = 1; 9954 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9955 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9956 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9957 } else if (calc_bw > rack->r_ctl.gp_bw) { 9958 rack->r_ctl.rc_rtt_diff = 0; 9959 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9960 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9961 rack->r_ctl.gp_bw = calc_bw; 9962 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9963 } else 9964 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9965 if ((rack->gp_ready == 0) && 9966 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9967 /* We have enough measurements now */ 9968 rack->gp_ready = 1; 9969 rack_set_cc_pacing(rack); 9970 if (rack->defer_options) 9971 rack_apply_deferred_options(rack); 9972 } 9973 /* 9974 * For acks over 1mss we do a extra boost to simulate 9975 * where we would get 2 acks (we want 110 for the mul). 9976 */ 9977 if (acked > segsiz) 9978 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9979 } else { 9980 /* 9981 * zero rtt possibly?, settle for just an old increase. 9982 */ 9983 no_measurement: 9984 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9985 } 9986 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9987 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9988 minseg)) && 9989 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9990 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9991 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9992 (segsiz * rack_req_segs))) { 9993 /* 9994 * We are doing dynamic GP pacing and 9995 * we have everything except 1MSS or less 9996 * bytes left out. We are still pacing away. 9997 * And there is data that could be sent, This 9998 * means we are inserting delayed ack time in 9999 * our measurements because we are pacing too slow. 10000 */ 10001 rack_validate_multipliers_at_or_above100(rack); 10002 rack->rc_dragged_bottom = 1; 10003 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10004 } 10005 } 10006 10007 10008 10009 static void 10010 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 10011 { 10012 /* 10013 * The fast output path is enabled and we 10014 * have moved the cumack forward. Lets see if 10015 * we can expand forward the fast path length by 10016 * that amount. What we would ideally like to 10017 * do is increase the number of bytes in the 10018 * fast path block (left_to_send) by the 10019 * acked amount. However we have to gate that 10020 * by two factors: 10021 * 1) The amount outstanding and the rwnd of the peer 10022 * (i.e. we don't want to exceed the rwnd of the peer). 10023 * <and> 10024 * 2) The amount of data left in the socket buffer (i.e. 10025 * we can't send beyond what is in the buffer). 10026 * 10027 * Note that this does not take into account any increase 10028 * in the cwnd. We will only extend the fast path by 10029 * what was acked. 10030 */ 10031 uint32_t new_total, gating_val; 10032 10033 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 10034 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 10035 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 10036 if (new_total <= gating_val) { 10037 /* We can increase left_to_send by the acked amount */ 10038 counter_u64_add(rack_extended_rfo, 1); 10039 rack->r_ctl.fsb.left_to_send = new_total; 10040 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 10041 ("rack:%p left_to_send:%u sbavail:%u out:%u", 10042 rack, rack->r_ctl.fsb.left_to_send, 10043 sbavail(&rack->rc_inp->inp_socket->so_snd), 10044 (tp->snd_max - tp->snd_una))); 10045 10046 } 10047 } 10048 10049 static void 10050 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 10051 { 10052 /* 10053 * Here any sendmap entry that points to the 10054 * beginning mbuf must be adjusted to the correct 10055 * offset. This must be called with: 10056 * 1) The socket buffer locked 10057 * 2) snd_una adjusted to its new postion. 10058 * 10059 * Note that (2) implies rack_ack_received has also 10060 * been called. 10061 * 10062 * We grab the first mbuf in the socket buffer and 10063 * then go through the front of the sendmap, recalculating 10064 * the stored offset for any sendmap entry that has 10065 * that mbuf. We must use the sb functions to do this 10066 * since its possible an add was done has well as 10067 * the subtraction we may have just completed. This should 10068 * not be a penalty though, since we just referenced the sb 10069 * to go in and trim off the mbufs that we freed (of course 10070 * there will be a penalty for the sendmap references though). 10071 */ 10072 struct mbuf *m; 10073 struct rack_sendmap *rsm; 10074 10075 SOCKBUF_LOCK_ASSERT(sb); 10076 m = sb->sb_mb; 10077 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 10078 if ((rsm == NULL) || (m == NULL)) { 10079 /* Nothing outstanding */ 10080 return; 10081 } 10082 while (rsm->m && (rsm->m == m)) { 10083 /* one to adjust */ 10084 #ifdef INVARIANTS 10085 struct mbuf *tm; 10086 uint32_t soff; 10087 10088 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10089 if (rsm->orig_m_len != m->m_len) { 10090 rack_adjust_orig_mlen(rsm); 10091 } 10092 if (rsm->soff != soff) { 10093 /* 10094 * This is not a fatal error, we anticipate it 10095 * might happen (the else code), so we count it here 10096 * so that under invariant we can see that it really 10097 * does happen. 10098 */ 10099 counter_u64_add(rack_adjust_map_bw, 1); 10100 } 10101 rsm->m = tm; 10102 rsm->soff = soff; 10103 if (tm) 10104 rsm->orig_m_len = rsm->m->m_len; 10105 else 10106 rsm->orig_m_len = 0; 10107 #else 10108 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10109 if (rsm->m) 10110 rsm->orig_m_len = rsm->m->m_len; 10111 else 10112 rsm->orig_m_len = 0; 10113 #endif 10114 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10115 rsm); 10116 if (rsm == NULL) 10117 break; 10118 } 10119 } 10120 10121 /* 10122 * Return value of 1, we do not need to call rack_process_data(). 10123 * return value of 0, rack_process_data can be called. 10124 * For ret_val if its 0 the TCP is locked, if its non-zero 10125 * its unlocked and probably unsafe to touch the TCB. 10126 */ 10127 static int 10128 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10129 struct tcpcb *tp, struct tcpopt *to, 10130 uint32_t tiwin, int32_t tlen, 10131 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10132 { 10133 int32_t ourfinisacked = 0; 10134 int32_t nsegs, acked_amount; 10135 int32_t acked; 10136 struct mbuf *mfree; 10137 struct tcp_rack *rack; 10138 int32_t under_pacing = 0; 10139 int32_t recovery = 0; 10140 10141 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10142 10143 rack = (struct tcp_rack *)tp->t_fb_ptr; 10144 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10145 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10146 &rack->r_ctl.challenge_ack_ts, 10147 &rack->r_ctl.challenge_ack_cnt); 10148 rack->r_wanted_output = 1; 10149 return (1); 10150 } 10151 if (rack->gp_ready && 10152 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10153 under_pacing = 1; 10154 } 10155 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10156 int in_rec, dup_ack_struck = 0; 10157 10158 in_rec = IN_FASTRECOVERY(tp->t_flags); 10159 if (rack->rc_in_persist) { 10160 tp->t_rxtshift = 0; 10161 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10162 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10163 } 10164 if ((th->th_ack == tp->snd_una) && 10165 (tiwin == tp->snd_wnd) && 10166 ((to->to_flags & TOF_SACK) == 0)) { 10167 rack_strike_dupack(rack); 10168 dup_ack_struck = 1; 10169 } 10170 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10171 } 10172 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10173 /* 10174 * Old ack, behind (or duplicate to) the last one rcv'd 10175 * Note: We mark reordering is occuring if its 10176 * less than and we have not closed our window. 10177 */ 10178 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10179 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10180 } 10181 return (0); 10182 } 10183 /* 10184 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10185 * something we sent. 10186 */ 10187 if (tp->t_flags & TF_NEEDSYN) { 10188 /* 10189 * T/TCP: Connection was half-synchronized, and our SYN has 10190 * been ACK'd (so connection is now fully synchronized). Go 10191 * to non-starred state, increment snd_una for ACK of SYN, 10192 * and check if we can do window scaling. 10193 */ 10194 tp->t_flags &= ~TF_NEEDSYN; 10195 tp->snd_una++; 10196 /* Do window scaling? */ 10197 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10198 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10199 tp->rcv_scale = tp->request_r_scale; 10200 /* Send window already scaled. */ 10201 } 10202 } 10203 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10204 10205 acked = BYTES_THIS_ACK(tp, th); 10206 if (acked) { 10207 /* 10208 * Any time we move the cum-ack forward clear 10209 * keep-alive tied probe-not-answered. The 10210 * persists clears its own on entry. 10211 */ 10212 rack->probe_not_answered = 0; 10213 } 10214 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10215 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10216 /* 10217 * If we just performed our first retransmit, and the ACK arrives 10218 * within our recovery window, then it was a mistake to do the 10219 * retransmit in the first place. Recover our original cwnd and 10220 * ssthresh, and proceed to transmit where we left off. 10221 */ 10222 if ((tp->t_flags & TF_PREVVALID) && 10223 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10224 tp->t_flags &= ~TF_PREVVALID; 10225 if (tp->t_rxtshift == 1 && 10226 (int)(ticks - tp->t_badrxtwin) < 0) 10227 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10228 } 10229 if (acked) { 10230 /* assure we are not backed off */ 10231 tp->t_rxtshift = 0; 10232 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10233 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10234 rack->rc_tlp_in_progress = 0; 10235 rack->r_ctl.rc_tlp_cnt_out = 0; 10236 /* 10237 * If it is the RXT timer we want to 10238 * stop it, so we can restart a TLP. 10239 */ 10240 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10241 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10242 #ifdef NETFLIX_HTTP_LOGGING 10243 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10244 #endif 10245 } 10246 /* 10247 * If we have a timestamp reply, update smoothed round trip time. If 10248 * no timestamp is present but transmit timer is running and timed 10249 * sequence number was acked, update smoothed round trip time. Since 10250 * we now have an rtt measurement, cancel the timer backoff (cf., 10251 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10252 * timer. 10253 * 10254 * Some boxes send broken timestamp replies during the SYN+ACK 10255 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10256 * and blow up the retransmit timer. 10257 */ 10258 /* 10259 * If all outstanding data is acked, stop retransmit timer and 10260 * remember to restart (more output or persist). If there is more 10261 * data to be acked, restart retransmit timer, using current 10262 * (possibly backed-off) value. 10263 */ 10264 if (acked == 0) { 10265 if (ofia) 10266 *ofia = ourfinisacked; 10267 return (0); 10268 } 10269 if (IN_RECOVERY(tp->t_flags)) { 10270 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10271 (SEQ_LT(th->th_ack, tp->snd_max))) { 10272 tcp_rack_partialack(tp); 10273 } else { 10274 rack_post_recovery(tp, th->th_ack); 10275 recovery = 1; 10276 } 10277 } 10278 /* 10279 * Let the congestion control algorithm update congestion control 10280 * related information. This typically means increasing the 10281 * congestion window. 10282 */ 10283 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10284 SOCKBUF_LOCK(&so->so_snd); 10285 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10286 tp->snd_wnd -= acked_amount; 10287 mfree = sbcut_locked(&so->so_snd, acked_amount); 10288 if ((sbused(&so->so_snd) == 0) && 10289 (acked > acked_amount) && 10290 (tp->t_state >= TCPS_FIN_WAIT_1) && 10291 (tp->t_flags & TF_SENTFIN)) { 10292 /* 10293 * We must be sure our fin 10294 * was sent and acked (we can be 10295 * in FIN_WAIT_1 without having 10296 * sent the fin). 10297 */ 10298 ourfinisacked = 1; 10299 } 10300 tp->snd_una = th->th_ack; 10301 if (acked_amount && sbavail(&so->so_snd)) 10302 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10303 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10304 /* NB: sowwakeup_locked() does an implicit unlock. */ 10305 sowwakeup_locked(so); 10306 m_freem(mfree); 10307 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10308 tp->snd_recover = tp->snd_una; 10309 10310 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10311 tp->snd_nxt = tp->snd_una; 10312 } 10313 if (under_pacing && 10314 (rack->use_fixed_rate == 0) && 10315 (rack->in_probe_rtt == 0) && 10316 rack->rc_gp_dyn_mul && 10317 rack->rc_always_pace) { 10318 /* Check if we are dragging bottom */ 10319 rack_check_bottom_drag(tp, rack, so, acked); 10320 } 10321 if (tp->snd_una == tp->snd_max) { 10322 /* Nothing left outstanding */ 10323 tp->t_flags &= ~TF_PREVVALID; 10324 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10325 rack->r_ctl.retran_during_recovery = 0; 10326 rack->r_ctl.dsack_byte_cnt = 0; 10327 if (rack->r_ctl.rc_went_idle_time == 0) 10328 rack->r_ctl.rc_went_idle_time = 1; 10329 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10330 if (sbavail(&tptosocket(tp)->so_snd) == 0) 10331 tp->t_acktime = 0; 10332 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10333 /* Set need output so persist might get set */ 10334 rack->r_wanted_output = 1; 10335 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10336 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10337 (sbavail(&so->so_snd) == 0) && 10338 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10339 /* 10340 * The socket was gone and the 10341 * peer sent data (now or in the past), time to 10342 * reset him. 10343 */ 10344 *ret_val = 1; 10345 /* tcp_close will kill the inp pre-log the Reset */ 10346 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10347 tp = tcp_close(tp); 10348 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10349 return (1); 10350 } 10351 } 10352 if (ofia) 10353 *ofia = ourfinisacked; 10354 return (0); 10355 } 10356 10357 10358 static void 10359 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 10360 int dir, uint32_t flags, struct rack_sendmap *rsm) 10361 { 10362 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 10363 union tcp_log_stackspecific log; 10364 struct timeval tv; 10365 10366 memset(&log, 0, sizeof(log)); 10367 log.u_bbr.flex1 = cnt; 10368 log.u_bbr.flex2 = split; 10369 log.u_bbr.flex3 = out; 10370 log.u_bbr.flex4 = line; 10371 log.u_bbr.flex5 = rack->r_must_retran; 10372 log.u_bbr.flex6 = flags; 10373 log.u_bbr.flex7 = rack->rc_has_collapsed; 10374 log.u_bbr.flex8 = dir; /* 10375 * 1 is collapsed, 0 is uncollapsed, 10376 * 2 is log of a rsm being marked, 3 is a split. 10377 */ 10378 if (rsm == NULL) 10379 log.u_bbr.rttProp = 0; 10380 else 10381 log.u_bbr.rttProp = (uint64_t)rsm; 10382 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 10383 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10384 TCP_LOG_EVENTP(rack->rc_tp, NULL, 10385 &rack->rc_inp->inp_socket->so_rcv, 10386 &rack->rc_inp->inp_socket->so_snd, 10387 TCP_RACK_LOG_COLLAPSE, 0, 10388 0, &log, false, &tv); 10389 } 10390 } 10391 10392 static void 10393 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line) 10394 { 10395 /* 10396 * Here all we do is mark the collapsed point and set the flag. 10397 * This may happen again and again, but there is no 10398 * sense splitting our map until we know where the 10399 * peer finally lands in the collapse. 10400 */ 10401 rack_trace_point(rack, RACK_TP_COLLAPSED_WND); 10402 if ((rack->rc_has_collapsed == 0) || 10403 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd))) 10404 counter_u64_add(rack_collapsed_win_seen, 1); 10405 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10406 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 10407 rack->rc_has_collapsed = 1; 10408 rack->r_collapse_point_valid = 1; 10409 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 10410 } 10411 10412 static void 10413 rack_un_collapse_window(struct tcp_rack *rack, int line) 10414 { 10415 struct rack_sendmap *nrsm, *rsm, fe; 10416 int cnt = 0, split = 0; 10417 #ifdef INVARIANTS 10418 struct rack_sendmap *insret; 10419 #endif 10420 10421 memset(&fe, 0, sizeof(fe)); 10422 rack->rc_has_collapsed = 0; 10423 fe.r_start = rack->r_ctl.last_collapse_point; 10424 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10425 if (rsm == NULL) { 10426 /* Nothing to do maybe the peer ack'ed it all */ 10427 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10428 return; 10429 } 10430 /* Now do we need to split this one? */ 10431 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 10432 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 10433 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 10434 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10435 if (nrsm == NULL) { 10436 /* We can't get a rsm, mark all? */ 10437 nrsm = rsm; 10438 goto no_split; 10439 } 10440 /* Clone it */ 10441 split = 1; 10442 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 10443 #ifndef INVARIANTS 10444 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10445 #else 10446 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10447 if (insret != NULL) { 10448 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10449 nrsm, insret, rack, rsm); 10450 } 10451 #endif 10452 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 10453 rack->r_ctl.last_collapse_point, __LINE__); 10454 if (rsm->r_in_tmap) { 10455 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10456 nrsm->r_in_tmap = 1; 10457 } 10458 /* 10459 * Set in the new RSM as the 10460 * collapsed starting point 10461 */ 10462 rsm = nrsm; 10463 } 10464 no_split: 10465 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10466 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10467 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 10468 cnt++; 10469 } 10470 if (cnt) { 10471 counter_u64_add(rack_collapsed_win, 1); 10472 } 10473 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10474 } 10475 10476 static void 10477 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10478 int32_t tlen, int32_t tfo_syn) 10479 { 10480 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10481 if (rack->rc_dack_mode && 10482 (tlen > 500) && 10483 (rack->rc_dack_toggle == 1)) { 10484 goto no_delayed_ack; 10485 } 10486 rack_timer_cancel(tp, rack, 10487 rack->r_ctl.rc_rcvtime, __LINE__); 10488 tp->t_flags |= TF_DELACK; 10489 } else { 10490 no_delayed_ack: 10491 rack->r_wanted_output = 1; 10492 tp->t_flags |= TF_ACKNOW; 10493 if (rack->rc_dack_mode) { 10494 if (tp->t_flags & TF_DELACK) 10495 rack->rc_dack_toggle = 1; 10496 else 10497 rack->rc_dack_toggle = 0; 10498 } 10499 } 10500 } 10501 10502 static void 10503 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10504 { 10505 /* 10506 * If fast output is in progress, lets validate that 10507 * the new window did not shrink on us and make it 10508 * so fast output should end. 10509 */ 10510 if (rack->r_fast_output) { 10511 uint32_t out; 10512 10513 /* 10514 * Calculate what we will send if left as is 10515 * and compare that to our send window. 10516 */ 10517 out = ctf_outstanding(tp); 10518 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10519 /* ok we have an issue */ 10520 if (out >= tp->snd_wnd) { 10521 /* Turn off fast output the window is met or collapsed */ 10522 rack->r_fast_output = 0; 10523 } else { 10524 /* we have some room left */ 10525 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10526 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10527 /* If not at least 1 full segment never mind */ 10528 rack->r_fast_output = 0; 10529 } 10530 } 10531 } 10532 } 10533 } 10534 10535 10536 /* 10537 * Return value of 1, the TCB is unlocked and most 10538 * likely gone, return value of 0, the TCP is still 10539 * locked. 10540 */ 10541 static int 10542 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10543 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10544 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10545 { 10546 /* 10547 * Update window information. Don't look at window if no ACK: TAC's 10548 * send garbage on first SYN. 10549 */ 10550 int32_t nsegs; 10551 int32_t tfo_syn; 10552 struct tcp_rack *rack; 10553 10554 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10555 10556 rack = (struct tcp_rack *)tp->t_fb_ptr; 10557 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10558 if ((thflags & TH_ACK) && 10559 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10560 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10561 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10562 /* keep track of pure window updates */ 10563 if (tlen == 0 && 10564 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10565 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10566 tp->snd_wnd = tiwin; 10567 rack_validate_fo_sendwin_up(tp, rack); 10568 tp->snd_wl1 = th->th_seq; 10569 tp->snd_wl2 = th->th_ack; 10570 if (tp->snd_wnd > tp->max_sndwnd) 10571 tp->max_sndwnd = tp->snd_wnd; 10572 rack->r_wanted_output = 1; 10573 } else if (thflags & TH_ACK) { 10574 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10575 tp->snd_wnd = tiwin; 10576 rack_validate_fo_sendwin_up(tp, rack); 10577 tp->snd_wl1 = th->th_seq; 10578 tp->snd_wl2 = th->th_ack; 10579 } 10580 } 10581 if (tp->snd_wnd < ctf_outstanding(tp)) 10582 /* The peer collapsed the window */ 10583 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 10584 else if (rack->rc_has_collapsed) 10585 rack_un_collapse_window(rack, __LINE__); 10586 if ((rack->r_collapse_point_valid) && 10587 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 10588 rack->r_collapse_point_valid = 0; 10589 /* Was persist timer active and now we have window space? */ 10590 if ((rack->rc_in_persist != 0) && 10591 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10592 rack->r_ctl.rc_pace_min_segs))) { 10593 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10594 tp->snd_nxt = tp->snd_max; 10595 /* Make sure we output to start the timer */ 10596 rack->r_wanted_output = 1; 10597 } 10598 /* Do we enter persists? */ 10599 if ((rack->rc_in_persist == 0) && 10600 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10601 TCPS_HAVEESTABLISHED(tp->t_state) && 10602 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10603 sbavail(&tptosocket(tp)->so_snd) && 10604 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10605 /* 10606 * Here the rwnd is less than 10607 * the pacing size, we are established, 10608 * nothing is outstanding, and there is 10609 * data to send. Enter persists. 10610 */ 10611 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10612 } 10613 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10614 m_freem(m); 10615 return (0); 10616 } 10617 /* 10618 * don't process the URG bit, ignore them drag 10619 * along the up. 10620 */ 10621 tp->rcv_up = tp->rcv_nxt; 10622 10623 /* 10624 * Process the segment text, merging it into the TCP sequencing 10625 * queue, and arranging for acknowledgment of receipt if necessary. 10626 * This process logically involves adjusting tp->rcv_wnd as data is 10627 * presented to the user (this happens in tcp_usrreq.c, case 10628 * PRU_RCVD). If a FIN has already been received on this connection 10629 * then we just ignore the text. 10630 */ 10631 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10632 IS_FASTOPEN(tp->t_flags)); 10633 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10634 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10635 tcp_seq save_start = th->th_seq; 10636 tcp_seq save_rnxt = tp->rcv_nxt; 10637 int save_tlen = tlen; 10638 10639 m_adj(m, drop_hdrlen); /* delayed header drop */ 10640 /* 10641 * Insert segment which includes th into TCP reassembly 10642 * queue with control block tp. Set thflags to whether 10643 * reassembly now includes a segment with FIN. This handles 10644 * the common case inline (segment is the next to be 10645 * received on an established connection, and the queue is 10646 * empty), avoiding linkage into and removal from the queue 10647 * and repetition of various conversions. Set DELACK for 10648 * segments received in order, but ack immediately when 10649 * segments are out of order (so fast retransmit can work). 10650 */ 10651 if (th->th_seq == tp->rcv_nxt && 10652 SEGQ_EMPTY(tp) && 10653 (TCPS_HAVEESTABLISHED(tp->t_state) || 10654 tfo_syn)) { 10655 #ifdef NETFLIX_SB_LIMITS 10656 u_int mcnt, appended; 10657 10658 if (so->so_rcv.sb_shlim) { 10659 mcnt = m_memcnt(m); 10660 appended = 0; 10661 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10662 CFO_NOSLEEP, NULL) == false) { 10663 counter_u64_add(tcp_sb_shlim_fails, 1); 10664 m_freem(m); 10665 return (0); 10666 } 10667 } 10668 #endif 10669 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10670 tp->rcv_nxt += tlen; 10671 if (tlen && 10672 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10673 (tp->t_fbyte_in == 0)) { 10674 tp->t_fbyte_in = ticks; 10675 if (tp->t_fbyte_in == 0) 10676 tp->t_fbyte_in = 1; 10677 if (tp->t_fbyte_out && tp->t_fbyte_in) 10678 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10679 } 10680 thflags = tcp_get_flags(th) & TH_FIN; 10681 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10682 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10683 SOCKBUF_LOCK(&so->so_rcv); 10684 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10685 m_freem(m); 10686 } else 10687 #ifdef NETFLIX_SB_LIMITS 10688 appended = 10689 #endif 10690 sbappendstream_locked(&so->so_rcv, m, 0); 10691 10692 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10693 /* NB: sorwakeup_locked() does an implicit unlock. */ 10694 sorwakeup_locked(so); 10695 #ifdef NETFLIX_SB_LIMITS 10696 if (so->so_rcv.sb_shlim && appended != mcnt) 10697 counter_fo_release(so->so_rcv.sb_shlim, 10698 mcnt - appended); 10699 #endif 10700 } else { 10701 /* 10702 * XXX: Due to the header drop above "th" is 10703 * theoretically invalid by now. Fortunately 10704 * m_adj() doesn't actually frees any mbufs when 10705 * trimming from the head. 10706 */ 10707 tcp_seq temp = save_start; 10708 10709 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10710 tp->t_flags |= TF_ACKNOW; 10711 if (tp->t_flags & TF_WAKESOR) { 10712 tp->t_flags &= ~TF_WAKESOR; 10713 /* NB: sorwakeup_locked() does an implicit unlock. */ 10714 sorwakeup_locked(so); 10715 } 10716 } 10717 if ((tp->t_flags & TF_SACK_PERMIT) && 10718 (save_tlen > 0) && 10719 TCPS_HAVEESTABLISHED(tp->t_state)) { 10720 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10721 /* 10722 * DSACK actually handled in the fastpath 10723 * above. 10724 */ 10725 RACK_OPTS_INC(tcp_sack_path_1); 10726 tcp_update_sack_list(tp, save_start, 10727 save_start + save_tlen); 10728 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10729 if ((tp->rcv_numsacks >= 1) && 10730 (tp->sackblks[0].end == save_start)) { 10731 /* 10732 * Partial overlap, recorded at todrop 10733 * above. 10734 */ 10735 RACK_OPTS_INC(tcp_sack_path_2a); 10736 tcp_update_sack_list(tp, 10737 tp->sackblks[0].start, 10738 tp->sackblks[0].end); 10739 } else { 10740 RACK_OPTS_INC(tcp_sack_path_2b); 10741 tcp_update_dsack_list(tp, save_start, 10742 save_start + save_tlen); 10743 } 10744 } else if (tlen >= save_tlen) { 10745 /* Update of sackblks. */ 10746 RACK_OPTS_INC(tcp_sack_path_3); 10747 tcp_update_dsack_list(tp, save_start, 10748 save_start + save_tlen); 10749 } else if (tlen > 0) { 10750 RACK_OPTS_INC(tcp_sack_path_4); 10751 tcp_update_dsack_list(tp, save_start, 10752 save_start + tlen); 10753 } 10754 } 10755 } else { 10756 m_freem(m); 10757 thflags &= ~TH_FIN; 10758 } 10759 10760 /* 10761 * If FIN is received ACK the FIN and let the user know that the 10762 * connection is closing. 10763 */ 10764 if (thflags & TH_FIN) { 10765 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10766 /* The socket upcall is handled by socantrcvmore. */ 10767 socantrcvmore(so); 10768 /* 10769 * If connection is half-synchronized (ie NEEDSYN 10770 * flag on) then delay ACK, so it may be piggybacked 10771 * when SYN is sent. Otherwise, since we received a 10772 * FIN then no more input can be expected, send ACK 10773 * now. 10774 */ 10775 if (tp->t_flags & TF_NEEDSYN) { 10776 rack_timer_cancel(tp, rack, 10777 rack->r_ctl.rc_rcvtime, __LINE__); 10778 tp->t_flags |= TF_DELACK; 10779 } else { 10780 tp->t_flags |= TF_ACKNOW; 10781 } 10782 tp->rcv_nxt++; 10783 } 10784 switch (tp->t_state) { 10785 /* 10786 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10787 * CLOSE_WAIT state. 10788 */ 10789 case TCPS_SYN_RECEIVED: 10790 tp->t_starttime = ticks; 10791 /* FALLTHROUGH */ 10792 case TCPS_ESTABLISHED: 10793 rack_timer_cancel(tp, rack, 10794 rack->r_ctl.rc_rcvtime, __LINE__); 10795 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10796 break; 10797 10798 /* 10799 * If still in FIN_WAIT_1 STATE FIN has not been 10800 * acked so enter the CLOSING state. 10801 */ 10802 case TCPS_FIN_WAIT_1: 10803 rack_timer_cancel(tp, rack, 10804 rack->r_ctl.rc_rcvtime, __LINE__); 10805 tcp_state_change(tp, TCPS_CLOSING); 10806 break; 10807 10808 /* 10809 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10810 * starting the time-wait timer, turning off the 10811 * other standard timers. 10812 */ 10813 case TCPS_FIN_WAIT_2: 10814 rack_timer_cancel(tp, rack, 10815 rack->r_ctl.rc_rcvtime, __LINE__); 10816 tcp_twstart(tp); 10817 return (1); 10818 } 10819 } 10820 /* 10821 * Return any desired output. 10822 */ 10823 if ((tp->t_flags & TF_ACKNOW) || 10824 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10825 rack->r_wanted_output = 1; 10826 } 10827 return (0); 10828 } 10829 10830 /* 10831 * Here nothing is really faster, its just that we 10832 * have broken out the fast-data path also just like 10833 * the fast-ack. 10834 */ 10835 static int 10836 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10837 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10838 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10839 { 10840 int32_t nsegs; 10841 int32_t newsize = 0; /* automatic sockbuf scaling */ 10842 struct tcp_rack *rack; 10843 #ifdef NETFLIX_SB_LIMITS 10844 u_int mcnt, appended; 10845 #endif 10846 #ifdef TCPDEBUG 10847 /* 10848 * The size of tcp_saveipgen must be the size of the max ip header, 10849 * now IPv6. 10850 */ 10851 u_char tcp_saveipgen[IP6_HDR_LEN]; 10852 struct tcphdr tcp_savetcp; 10853 short ostate = 0; 10854 10855 #endif 10856 /* 10857 * If last ACK falls within this segment's sequence numbers, record 10858 * the timestamp. NOTE that the test is modified according to the 10859 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10860 */ 10861 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10862 return (0); 10863 } 10864 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10865 return (0); 10866 } 10867 if (tiwin && tiwin != tp->snd_wnd) { 10868 return (0); 10869 } 10870 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10871 return (0); 10872 } 10873 if (__predict_false((to->to_flags & TOF_TS) && 10874 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10875 return (0); 10876 } 10877 if (__predict_false((th->th_ack != tp->snd_una))) { 10878 return (0); 10879 } 10880 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10881 return (0); 10882 } 10883 if ((to->to_flags & TOF_TS) != 0 && 10884 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10885 tp->ts_recent_age = tcp_ts_getticks(); 10886 tp->ts_recent = to->to_tsval; 10887 } 10888 rack = (struct tcp_rack *)tp->t_fb_ptr; 10889 /* 10890 * This is a pure, in-sequence data packet with nothing on the 10891 * reassembly queue and we have enough buffer space to take it. 10892 */ 10893 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10894 10895 #ifdef NETFLIX_SB_LIMITS 10896 if (so->so_rcv.sb_shlim) { 10897 mcnt = m_memcnt(m); 10898 appended = 0; 10899 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10900 CFO_NOSLEEP, NULL) == false) { 10901 counter_u64_add(tcp_sb_shlim_fails, 1); 10902 m_freem(m); 10903 return (1); 10904 } 10905 } 10906 #endif 10907 /* Clean receiver SACK report if present */ 10908 if (tp->rcv_numsacks) 10909 tcp_clean_sackreport(tp); 10910 KMOD_TCPSTAT_INC(tcps_preddat); 10911 tp->rcv_nxt += tlen; 10912 if (tlen && 10913 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10914 (tp->t_fbyte_in == 0)) { 10915 tp->t_fbyte_in = ticks; 10916 if (tp->t_fbyte_in == 0) 10917 tp->t_fbyte_in = 1; 10918 if (tp->t_fbyte_out && tp->t_fbyte_in) 10919 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10920 } 10921 /* 10922 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10923 */ 10924 tp->snd_wl1 = th->th_seq; 10925 /* 10926 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10927 */ 10928 tp->rcv_up = tp->rcv_nxt; 10929 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10930 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10931 #ifdef TCPDEBUG 10932 if (so->so_options & SO_DEBUG) 10933 tcp_trace(TA_INPUT, ostate, tp, 10934 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10935 #endif 10936 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10937 10938 /* Add data to socket buffer. */ 10939 SOCKBUF_LOCK(&so->so_rcv); 10940 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10941 m_freem(m); 10942 } else { 10943 /* 10944 * Set new socket buffer size. Give up when limit is 10945 * reached. 10946 */ 10947 if (newsize) 10948 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 10949 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10950 m_adj(m, drop_hdrlen); /* delayed header drop */ 10951 #ifdef NETFLIX_SB_LIMITS 10952 appended = 10953 #endif 10954 sbappendstream_locked(&so->so_rcv, m, 0); 10955 ctf_calc_rwin(so, tp); 10956 } 10957 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10958 /* NB: sorwakeup_locked() does an implicit unlock. */ 10959 sorwakeup_locked(so); 10960 #ifdef NETFLIX_SB_LIMITS 10961 if (so->so_rcv.sb_shlim && mcnt != appended) 10962 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10963 #endif 10964 rack_handle_delayed_ack(tp, rack, tlen, 0); 10965 if (tp->snd_una == tp->snd_max) 10966 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10967 return (1); 10968 } 10969 10970 /* 10971 * This subfunction is used to try to highly optimize the 10972 * fast path. We again allow window updates that are 10973 * in sequence to remain in the fast-path. We also add 10974 * in the __predict's to attempt to help the compiler. 10975 * Note that if we return a 0, then we can *not* process 10976 * it and the caller should push the packet into the 10977 * slow-path. 10978 */ 10979 static int 10980 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10981 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10982 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10983 { 10984 int32_t acked; 10985 int32_t nsegs; 10986 #ifdef TCPDEBUG 10987 /* 10988 * The size of tcp_saveipgen must be the size of the max ip header, 10989 * now IPv6. 10990 */ 10991 u_char tcp_saveipgen[IP6_HDR_LEN]; 10992 struct tcphdr tcp_savetcp; 10993 short ostate = 0; 10994 #endif 10995 int32_t under_pacing = 0; 10996 struct tcp_rack *rack; 10997 10998 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10999 /* Old ack, behind (or duplicate to) the last one rcv'd */ 11000 return (0); 11001 } 11002 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 11003 /* Above what we have sent? */ 11004 return (0); 11005 } 11006 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 11007 /* We are retransmitting */ 11008 return (0); 11009 } 11010 if (__predict_false(tiwin == 0)) { 11011 /* zero window */ 11012 return (0); 11013 } 11014 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 11015 /* We need a SYN or a FIN, unlikely.. */ 11016 return (0); 11017 } 11018 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 11019 /* Timestamp is behind .. old ack with seq wrap? */ 11020 return (0); 11021 } 11022 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 11023 /* Still recovering */ 11024 return (0); 11025 } 11026 rack = (struct tcp_rack *)tp->t_fb_ptr; 11027 if (rack->r_ctl.rc_sacked) { 11028 /* We have sack holes on our scoreboard */ 11029 return (0); 11030 } 11031 /* Ok if we reach here, we can process a fast-ack */ 11032 if (rack->gp_ready && 11033 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11034 under_pacing = 1; 11035 } 11036 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11037 rack_log_ack(tp, to, th, 0, 0); 11038 /* Did the window get updated? */ 11039 if (tiwin != tp->snd_wnd) { 11040 tp->snd_wnd = tiwin; 11041 rack_validate_fo_sendwin_up(tp, rack); 11042 tp->snd_wl1 = th->th_seq; 11043 if (tp->snd_wnd > tp->max_sndwnd) 11044 tp->max_sndwnd = tp->snd_wnd; 11045 } 11046 /* Do we exit persists? */ 11047 if ((rack->rc_in_persist != 0) && 11048 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 11049 rack->r_ctl.rc_pace_min_segs))) { 11050 rack_exit_persist(tp, rack, cts); 11051 } 11052 /* Do we enter persists? */ 11053 if ((rack->rc_in_persist == 0) && 11054 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 11055 TCPS_HAVEESTABLISHED(tp->t_state) && 11056 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 11057 sbavail(&tptosocket(tp)->so_snd) && 11058 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 11059 /* 11060 * Here the rwnd is less than 11061 * the pacing size, we are established, 11062 * nothing is outstanding, and there is 11063 * data to send. Enter persists. 11064 */ 11065 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 11066 } 11067 /* 11068 * If last ACK falls within this segment's sequence numbers, record 11069 * the timestamp. NOTE that the test is modified according to the 11070 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 11071 */ 11072 if ((to->to_flags & TOF_TS) != 0 && 11073 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11074 tp->ts_recent_age = tcp_ts_getticks(); 11075 tp->ts_recent = to->to_tsval; 11076 } 11077 /* 11078 * This is a pure ack for outstanding data. 11079 */ 11080 KMOD_TCPSTAT_INC(tcps_predack); 11081 11082 /* 11083 * "bad retransmit" recovery. 11084 */ 11085 if ((tp->t_flags & TF_PREVVALID) && 11086 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11087 tp->t_flags &= ~TF_PREVVALID; 11088 if (tp->t_rxtshift == 1 && 11089 (int)(ticks - tp->t_badrxtwin) < 0) 11090 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11091 } 11092 /* 11093 * Recalculate the transmit timer / rtt. 11094 * 11095 * Some boxes send broken timestamp replies during the SYN+ACK 11096 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11097 * and blow up the retransmit timer. 11098 */ 11099 acked = BYTES_THIS_ACK(tp, th); 11100 11101 #ifdef TCP_HHOOK 11102 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11103 hhook_run_tcp_est_in(tp, th, to); 11104 #endif 11105 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11106 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11107 if (acked) { 11108 struct mbuf *mfree; 11109 11110 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11111 SOCKBUF_LOCK(&so->so_snd); 11112 mfree = sbcut_locked(&so->so_snd, acked); 11113 tp->snd_una = th->th_ack; 11114 /* Note we want to hold the sb lock through the sendmap adjust */ 11115 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11116 /* Wake up the socket if we have room to write more */ 11117 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11118 sowwakeup_locked(so); 11119 m_freem(mfree); 11120 tp->t_rxtshift = 0; 11121 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11122 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11123 rack->rc_tlp_in_progress = 0; 11124 rack->r_ctl.rc_tlp_cnt_out = 0; 11125 /* 11126 * If it is the RXT timer we want to 11127 * stop it, so we can restart a TLP. 11128 */ 11129 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11130 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11131 #ifdef NETFLIX_HTTP_LOGGING 11132 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11133 #endif 11134 } 11135 /* 11136 * Let the congestion control algorithm update congestion control 11137 * related information. This typically means increasing the 11138 * congestion window. 11139 */ 11140 if (tp->snd_wnd < ctf_outstanding(tp)) { 11141 /* The peer collapsed the window */ 11142 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 11143 } else if (rack->rc_has_collapsed) 11144 rack_un_collapse_window(rack, __LINE__); 11145 if ((rack->r_collapse_point_valid) && 11146 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 11147 rack->r_collapse_point_valid = 0; 11148 /* 11149 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11150 */ 11151 tp->snd_wl2 = th->th_ack; 11152 tp->t_dupacks = 0; 11153 m_freem(m); 11154 /* ND6_HINT(tp); *//* Some progress has been made. */ 11155 11156 /* 11157 * If all outstanding data are acked, stop retransmit timer, 11158 * otherwise restart timer using current (possibly backed-off) 11159 * value. If process is waiting for space, wakeup/selwakeup/signal. 11160 * If data are ready to send, let tcp_output decide between more 11161 * output or persist. 11162 */ 11163 #ifdef TCPDEBUG 11164 if (so->so_options & SO_DEBUG) 11165 tcp_trace(TA_INPUT, ostate, tp, 11166 (void *)tcp_saveipgen, 11167 &tcp_savetcp, 0); 11168 #endif 11169 if (under_pacing && 11170 (rack->use_fixed_rate == 0) && 11171 (rack->in_probe_rtt == 0) && 11172 rack->rc_gp_dyn_mul && 11173 rack->rc_always_pace) { 11174 /* Check if we are dragging bottom */ 11175 rack_check_bottom_drag(tp, rack, so, acked); 11176 } 11177 if (tp->snd_una == tp->snd_max) { 11178 tp->t_flags &= ~TF_PREVVALID; 11179 rack->r_ctl.retran_during_recovery = 0; 11180 rack->r_ctl.dsack_byte_cnt = 0; 11181 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11182 if (rack->r_ctl.rc_went_idle_time == 0) 11183 rack->r_ctl.rc_went_idle_time = 1; 11184 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11185 if (sbavail(&tptosocket(tp)->so_snd) == 0) 11186 tp->t_acktime = 0; 11187 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11188 } 11189 if (acked && rack->r_fast_output) 11190 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11191 if (sbavail(&so->so_snd)) { 11192 rack->r_wanted_output = 1; 11193 } 11194 return (1); 11195 } 11196 11197 /* 11198 * Return value of 1, the TCB is unlocked and most 11199 * likely gone, return value of 0, the TCP is still 11200 * locked. 11201 */ 11202 static int 11203 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11204 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11205 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11206 { 11207 int32_t ret_val = 0; 11208 int32_t todrop; 11209 int32_t ourfinisacked = 0; 11210 struct tcp_rack *rack; 11211 11212 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11213 11214 ctf_calc_rwin(so, tp); 11215 /* 11216 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11217 * SYN, drop the input. if seg contains a RST, then drop the 11218 * connection. if seg does not contain SYN, then drop it. Otherwise 11219 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11220 * tp->irs if seg contains ack then advance tp->snd_una if seg 11221 * contains an ECE and ECN support is enabled, the stream is ECN 11222 * capable. if SYN has been acked change to ESTABLISHED else 11223 * SYN_RCVD state arrange for segment to be acked (eventually) 11224 * continue processing rest of data/controls. 11225 */ 11226 if ((thflags & TH_ACK) && 11227 (SEQ_LEQ(th->th_ack, tp->iss) || 11228 SEQ_GT(th->th_ack, tp->snd_max))) { 11229 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11230 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11231 return (1); 11232 } 11233 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11234 TCP_PROBE5(connect__refused, NULL, tp, 11235 mtod(m, const char *), tp, th); 11236 tp = tcp_drop(tp, ECONNREFUSED); 11237 ctf_do_drop(m, tp); 11238 return (1); 11239 } 11240 if (thflags & TH_RST) { 11241 ctf_do_drop(m, tp); 11242 return (1); 11243 } 11244 if (!(thflags & TH_SYN)) { 11245 ctf_do_drop(m, tp); 11246 return (1); 11247 } 11248 tp->irs = th->th_seq; 11249 tcp_rcvseqinit(tp); 11250 rack = (struct tcp_rack *)tp->t_fb_ptr; 11251 if (thflags & TH_ACK) { 11252 int tfo_partial = 0; 11253 11254 KMOD_TCPSTAT_INC(tcps_connects); 11255 soisconnected(so); 11256 #ifdef MAC 11257 mac_socketpeer_set_from_mbuf(m, so); 11258 #endif 11259 /* Do window scaling on this connection? */ 11260 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11261 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11262 tp->rcv_scale = tp->request_r_scale; 11263 } 11264 tp->rcv_adv += min(tp->rcv_wnd, 11265 TCP_MAXWIN << tp->rcv_scale); 11266 /* 11267 * If not all the data that was sent in the TFO SYN 11268 * has been acked, resend the remainder right away. 11269 */ 11270 if (IS_FASTOPEN(tp->t_flags) && 11271 (tp->snd_una != tp->snd_max)) { 11272 tp->snd_nxt = th->th_ack; 11273 tfo_partial = 1; 11274 } 11275 /* 11276 * If there's data, delay ACK; if there's also a FIN ACKNOW 11277 * will be turned on later. 11278 */ 11279 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11280 rack_timer_cancel(tp, rack, 11281 rack->r_ctl.rc_rcvtime, __LINE__); 11282 tp->t_flags |= TF_DELACK; 11283 } else { 11284 rack->r_wanted_output = 1; 11285 tp->t_flags |= TF_ACKNOW; 11286 rack->rc_dack_toggle = 0; 11287 } 11288 11289 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11290 11291 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11292 /* 11293 * We advance snd_una for the 11294 * fast open case. If th_ack is 11295 * acknowledging data beyond 11296 * snd_una we can't just call 11297 * ack-processing since the 11298 * data stream in our send-map 11299 * will start at snd_una + 1 (one 11300 * beyond the SYN). If its just 11301 * equal we don't need to do that 11302 * and there is no send_map. 11303 */ 11304 tp->snd_una++; 11305 } 11306 /* 11307 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11308 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11309 */ 11310 tp->t_starttime = ticks; 11311 if (tp->t_flags & TF_NEEDFIN) { 11312 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11313 tp->t_flags &= ~TF_NEEDFIN; 11314 thflags &= ~TH_SYN; 11315 } else { 11316 tcp_state_change(tp, TCPS_ESTABLISHED); 11317 TCP_PROBE5(connect__established, NULL, tp, 11318 mtod(m, const char *), tp, th); 11319 rack_cc_conn_init(tp); 11320 } 11321 } else { 11322 /* 11323 * Received initial SYN in SYN-SENT[*] state => simultaneous 11324 * open. If segment contains CC option and there is a 11325 * cached CC, apply TAO test. If it succeeds, connection is * 11326 * half-synchronized. Otherwise, do 3-way handshake: 11327 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11328 * there was no CC option, clear cached CC value. 11329 */ 11330 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 11331 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11332 } 11333 /* 11334 * Advance th->th_seq to correspond to first data byte. If data, 11335 * trim to stay within window, dropping FIN if necessary. 11336 */ 11337 th->th_seq++; 11338 if (tlen > tp->rcv_wnd) { 11339 todrop = tlen - tp->rcv_wnd; 11340 m_adj(m, -todrop); 11341 tlen = tp->rcv_wnd; 11342 thflags &= ~TH_FIN; 11343 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11344 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11345 } 11346 tp->snd_wl1 = th->th_seq - 1; 11347 tp->rcv_up = th->th_seq; 11348 /* 11349 * Client side of transaction: already sent SYN and data. If the 11350 * remote host used T/TCP to validate the SYN, our data will be 11351 * ACK'd; if so, enter normal data segment processing in the middle 11352 * of step 5, ack processing. Otherwise, goto step 6. 11353 */ 11354 if (thflags & TH_ACK) { 11355 /* For syn-sent we need to possibly update the rtt */ 11356 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11357 uint32_t t, mcts; 11358 11359 mcts = tcp_ts_getticks(); 11360 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11361 if (!tp->t_rttlow || tp->t_rttlow > t) 11362 tp->t_rttlow = t; 11363 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11364 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11365 tcp_rack_xmit_timer_commit(rack, tp); 11366 } 11367 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11368 return (ret_val); 11369 /* We may have changed to FIN_WAIT_1 above */ 11370 if (tp->t_state == TCPS_FIN_WAIT_1) { 11371 /* 11372 * In FIN_WAIT_1 STATE in addition to the processing 11373 * for the ESTABLISHED state if our FIN is now 11374 * acknowledged then enter FIN_WAIT_2. 11375 */ 11376 if (ourfinisacked) { 11377 /* 11378 * If we can't receive any more data, then 11379 * closing user can proceed. Starting the 11380 * timer is contrary to the specification, 11381 * but if we don't get a FIN we'll hang 11382 * forever. 11383 * 11384 * XXXjl: we should release the tp also, and 11385 * use a compressed state. 11386 */ 11387 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11388 soisdisconnected(so); 11389 tcp_timer_activate(tp, TT_2MSL, 11390 (tcp_fast_finwait2_recycle ? 11391 tcp_finwait2_timeout : 11392 TP_MAXIDLE(tp))); 11393 } 11394 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11395 } 11396 } 11397 } 11398 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11399 tiwin, thflags, nxt_pkt)); 11400 } 11401 11402 /* 11403 * Return value of 1, the TCB is unlocked and most 11404 * likely gone, return value of 0, the TCP is still 11405 * locked. 11406 */ 11407 static int 11408 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11409 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11410 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11411 { 11412 struct tcp_rack *rack; 11413 int32_t ret_val = 0; 11414 int32_t ourfinisacked = 0; 11415 11416 ctf_calc_rwin(so, tp); 11417 if ((thflags & TH_ACK) && 11418 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11419 SEQ_GT(th->th_ack, tp->snd_max))) { 11420 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11421 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11422 return (1); 11423 } 11424 rack = (struct tcp_rack *)tp->t_fb_ptr; 11425 if (IS_FASTOPEN(tp->t_flags)) { 11426 /* 11427 * When a TFO connection is in SYN_RECEIVED, the 11428 * only valid packets are the initial SYN, a 11429 * retransmit/copy of the initial SYN (possibly with 11430 * a subset of the original data), a valid ACK, a 11431 * FIN, or a RST. 11432 */ 11433 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11434 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11435 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11436 return (1); 11437 } else if (thflags & TH_SYN) { 11438 /* non-initial SYN is ignored */ 11439 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11440 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11441 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11442 ctf_do_drop(m, NULL); 11443 return (0); 11444 } 11445 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11446 ctf_do_drop(m, NULL); 11447 return (0); 11448 } 11449 } 11450 11451 if ((thflags & TH_RST) || 11452 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11453 return (__ctf_process_rst(m, th, so, tp, 11454 &rack->r_ctl.challenge_ack_ts, 11455 &rack->r_ctl.challenge_ack_cnt)); 11456 /* 11457 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11458 * it's less than ts_recent, drop it. 11459 */ 11460 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11461 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11462 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11463 return (ret_val); 11464 } 11465 /* 11466 * In the SYN-RECEIVED state, validate that the packet belongs to 11467 * this connection before trimming the data to fit the receive 11468 * window. Check the sequence number versus IRS since we know the 11469 * sequence numbers haven't wrapped. This is a partial fix for the 11470 * "LAND" DoS attack. 11471 */ 11472 if (SEQ_LT(th->th_seq, tp->irs)) { 11473 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11474 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11475 return (1); 11476 } 11477 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11478 &rack->r_ctl.challenge_ack_ts, 11479 &rack->r_ctl.challenge_ack_cnt)) { 11480 return (ret_val); 11481 } 11482 /* 11483 * If last ACK falls within this segment's sequence numbers, record 11484 * its timestamp. NOTE: 1) That the test incorporates suggestions 11485 * from the latest proposal of the tcplw@cray.com list (Braden 11486 * 1993/04/26). 2) That updating only on newer timestamps interferes 11487 * with our earlier PAWS tests, so this check should be solely 11488 * predicated on the sequence space of this segment. 3) That we 11489 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11490 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11491 * SEG.Len, This modified check allows us to overcome RFC1323's 11492 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11493 * p.869. In such cases, we can still calculate the RTT correctly 11494 * when RCV.NXT == Last.ACK.Sent. 11495 */ 11496 if ((to->to_flags & TOF_TS) != 0 && 11497 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11498 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11499 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11500 tp->ts_recent_age = tcp_ts_getticks(); 11501 tp->ts_recent = to->to_tsval; 11502 } 11503 tp->snd_wnd = tiwin; 11504 rack_validate_fo_sendwin_up(tp, rack); 11505 /* 11506 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11507 * is on (half-synchronized state), then queue data for later 11508 * processing; else drop segment and return. 11509 */ 11510 if ((thflags & TH_ACK) == 0) { 11511 if (IS_FASTOPEN(tp->t_flags)) { 11512 rack_cc_conn_init(tp); 11513 } 11514 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11515 tiwin, thflags, nxt_pkt)); 11516 } 11517 KMOD_TCPSTAT_INC(tcps_connects); 11518 if (tp->t_flags & TF_SONOTCONN) { 11519 tp->t_flags &= ~TF_SONOTCONN; 11520 soisconnected(so); 11521 } 11522 /* Do window scaling? */ 11523 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11524 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11525 tp->rcv_scale = tp->request_r_scale; 11526 } 11527 /* 11528 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11529 * FIN-WAIT-1 11530 */ 11531 tp->t_starttime = ticks; 11532 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11533 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11534 tp->t_tfo_pending = NULL; 11535 } 11536 if (tp->t_flags & TF_NEEDFIN) { 11537 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11538 tp->t_flags &= ~TF_NEEDFIN; 11539 } else { 11540 tcp_state_change(tp, TCPS_ESTABLISHED); 11541 TCP_PROBE5(accept__established, NULL, tp, 11542 mtod(m, const char *), tp, th); 11543 /* 11544 * TFO connections call cc_conn_init() during SYN 11545 * processing. Calling it again here for such connections 11546 * is not harmless as it would undo the snd_cwnd reduction 11547 * that occurs when a TFO SYN|ACK is retransmitted. 11548 */ 11549 if (!IS_FASTOPEN(tp->t_flags)) 11550 rack_cc_conn_init(tp); 11551 } 11552 /* 11553 * Account for the ACK of our SYN prior to 11554 * regular ACK processing below, except for 11555 * simultaneous SYN, which is handled later. 11556 */ 11557 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11558 tp->snd_una++; 11559 /* 11560 * If segment contains data or ACK, will call tcp_reass() later; if 11561 * not, do so now to pass queued data to user. 11562 */ 11563 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11564 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11565 (struct mbuf *)0); 11566 if (tp->t_flags & TF_WAKESOR) { 11567 tp->t_flags &= ~TF_WAKESOR; 11568 /* NB: sorwakeup_locked() does an implicit unlock. */ 11569 sorwakeup_locked(so); 11570 } 11571 } 11572 tp->snd_wl1 = th->th_seq - 1; 11573 /* For syn-recv we need to possibly update the rtt */ 11574 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11575 uint32_t t, mcts; 11576 11577 mcts = tcp_ts_getticks(); 11578 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11579 if (!tp->t_rttlow || tp->t_rttlow > t) 11580 tp->t_rttlow = t; 11581 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11582 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11583 tcp_rack_xmit_timer_commit(rack, tp); 11584 } 11585 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11586 return (ret_val); 11587 } 11588 if (tp->t_state == TCPS_FIN_WAIT_1) { 11589 /* We could have went to FIN_WAIT_1 (or EST) above */ 11590 /* 11591 * In FIN_WAIT_1 STATE in addition to the processing for the 11592 * ESTABLISHED state if our FIN is now acknowledged then 11593 * enter FIN_WAIT_2. 11594 */ 11595 if (ourfinisacked) { 11596 /* 11597 * If we can't receive any more data, then closing 11598 * user can proceed. Starting the timer is contrary 11599 * to the specification, but if we don't get a FIN 11600 * we'll hang forever. 11601 * 11602 * XXXjl: we should release the tp also, and use a 11603 * compressed state. 11604 */ 11605 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11606 soisdisconnected(so); 11607 tcp_timer_activate(tp, TT_2MSL, 11608 (tcp_fast_finwait2_recycle ? 11609 tcp_finwait2_timeout : 11610 TP_MAXIDLE(tp))); 11611 } 11612 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11613 } 11614 } 11615 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11616 tiwin, thflags, nxt_pkt)); 11617 } 11618 11619 /* 11620 * Return value of 1, the TCB is unlocked and most 11621 * likely gone, return value of 0, the TCP is still 11622 * locked. 11623 */ 11624 static int 11625 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11626 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11627 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11628 { 11629 int32_t ret_val = 0; 11630 struct tcp_rack *rack; 11631 11632 /* 11633 * Header prediction: check for the two common cases of a 11634 * uni-directional data xfer. If the packet has no control flags, 11635 * is in-sequence, the window didn't change and we're not 11636 * retransmitting, it's a candidate. If the length is zero and the 11637 * ack moved forward, we're the sender side of the xfer. Just free 11638 * the data acked & wake any higher level process that was blocked 11639 * waiting for space. If the length is non-zero and the ack didn't 11640 * move, we're the receiver side. If we're getting packets in-order 11641 * (the reassembly queue is empty), add the data toc The socket 11642 * buffer and note that we need a delayed ack. Make sure that the 11643 * hidden state-flags are also off. Since we check for 11644 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11645 */ 11646 rack = (struct tcp_rack *)tp->t_fb_ptr; 11647 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11648 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11649 __predict_true(SEGQ_EMPTY(tp)) && 11650 __predict_true(th->th_seq == tp->rcv_nxt)) { 11651 if (tlen == 0) { 11652 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11653 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11654 return (0); 11655 } 11656 } else { 11657 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11658 tiwin, nxt_pkt, iptos)) { 11659 return (0); 11660 } 11661 } 11662 } 11663 ctf_calc_rwin(so, tp); 11664 11665 if ((thflags & TH_RST) || 11666 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11667 return (__ctf_process_rst(m, th, so, tp, 11668 &rack->r_ctl.challenge_ack_ts, 11669 &rack->r_ctl.challenge_ack_cnt)); 11670 11671 /* 11672 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11673 * synchronized state. 11674 */ 11675 if (thflags & TH_SYN) { 11676 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11677 return (ret_val); 11678 } 11679 /* 11680 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11681 * it's less than ts_recent, drop it. 11682 */ 11683 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11684 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11685 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11686 return (ret_val); 11687 } 11688 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11689 &rack->r_ctl.challenge_ack_ts, 11690 &rack->r_ctl.challenge_ack_cnt)) { 11691 return (ret_val); 11692 } 11693 /* 11694 * If last ACK falls within this segment's sequence numbers, record 11695 * its timestamp. NOTE: 1) That the test incorporates suggestions 11696 * from the latest proposal of the tcplw@cray.com list (Braden 11697 * 1993/04/26). 2) That updating only on newer timestamps interferes 11698 * with our earlier PAWS tests, so this check should be solely 11699 * predicated on the sequence space of this segment. 3) That we 11700 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11701 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11702 * SEG.Len, This modified check allows us to overcome RFC1323's 11703 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11704 * p.869. In such cases, we can still calculate the RTT correctly 11705 * when RCV.NXT == Last.ACK.Sent. 11706 */ 11707 if ((to->to_flags & TOF_TS) != 0 && 11708 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11709 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11710 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11711 tp->ts_recent_age = tcp_ts_getticks(); 11712 tp->ts_recent = to->to_tsval; 11713 } 11714 /* 11715 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11716 * is on (half-synchronized state), then queue data for later 11717 * processing; else drop segment and return. 11718 */ 11719 if ((thflags & TH_ACK) == 0) { 11720 if (tp->t_flags & TF_NEEDSYN) { 11721 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11722 tiwin, thflags, nxt_pkt)); 11723 11724 } else if (tp->t_flags & TF_ACKNOW) { 11725 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11726 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11727 return (ret_val); 11728 } else { 11729 ctf_do_drop(m, NULL); 11730 return (0); 11731 } 11732 } 11733 /* 11734 * Ack processing. 11735 */ 11736 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11737 return (ret_val); 11738 } 11739 if (sbavail(&so->so_snd)) { 11740 if (ctf_progress_timeout_check(tp, true)) { 11741 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11742 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11743 return (1); 11744 } 11745 } 11746 /* State changes only happen in rack_process_data() */ 11747 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11748 tiwin, thflags, nxt_pkt)); 11749 } 11750 11751 /* 11752 * Return value of 1, the TCB is unlocked and most 11753 * likely gone, return value of 0, the TCP is still 11754 * locked. 11755 */ 11756 static int 11757 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11758 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11759 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11760 { 11761 int32_t ret_val = 0; 11762 struct tcp_rack *rack; 11763 11764 rack = (struct tcp_rack *)tp->t_fb_ptr; 11765 ctf_calc_rwin(so, tp); 11766 if ((thflags & TH_RST) || 11767 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11768 return (__ctf_process_rst(m, th, so, tp, 11769 &rack->r_ctl.challenge_ack_ts, 11770 &rack->r_ctl.challenge_ack_cnt)); 11771 /* 11772 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11773 * synchronized state. 11774 */ 11775 if (thflags & TH_SYN) { 11776 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11777 return (ret_val); 11778 } 11779 /* 11780 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11781 * it's less than ts_recent, drop it. 11782 */ 11783 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11784 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11785 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11786 return (ret_val); 11787 } 11788 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11789 &rack->r_ctl.challenge_ack_ts, 11790 &rack->r_ctl.challenge_ack_cnt)) { 11791 return (ret_val); 11792 } 11793 /* 11794 * If last ACK falls within this segment's sequence numbers, record 11795 * its timestamp. NOTE: 1) That the test incorporates suggestions 11796 * from the latest proposal of the tcplw@cray.com list (Braden 11797 * 1993/04/26). 2) That updating only on newer timestamps interferes 11798 * with our earlier PAWS tests, so this check should be solely 11799 * predicated on the sequence space of this segment. 3) That we 11800 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11801 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11802 * SEG.Len, This modified check allows us to overcome RFC1323's 11803 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11804 * p.869. In such cases, we can still calculate the RTT correctly 11805 * when RCV.NXT == Last.ACK.Sent. 11806 */ 11807 if ((to->to_flags & TOF_TS) != 0 && 11808 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11809 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11810 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11811 tp->ts_recent_age = tcp_ts_getticks(); 11812 tp->ts_recent = to->to_tsval; 11813 } 11814 /* 11815 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11816 * is on (half-synchronized state), then queue data for later 11817 * processing; else drop segment and return. 11818 */ 11819 if ((thflags & TH_ACK) == 0) { 11820 if (tp->t_flags & TF_NEEDSYN) { 11821 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11822 tiwin, thflags, nxt_pkt)); 11823 11824 } else if (tp->t_flags & TF_ACKNOW) { 11825 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11826 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11827 return (ret_val); 11828 } else { 11829 ctf_do_drop(m, NULL); 11830 return (0); 11831 } 11832 } 11833 /* 11834 * Ack processing. 11835 */ 11836 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11837 return (ret_val); 11838 } 11839 if (sbavail(&so->so_snd)) { 11840 if (ctf_progress_timeout_check(tp, true)) { 11841 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11842 tp, tick, PROGRESS_DROP, __LINE__); 11843 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11844 return (1); 11845 } 11846 } 11847 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11848 tiwin, thflags, nxt_pkt)); 11849 } 11850 11851 static int 11852 rack_check_data_after_close(struct mbuf *m, 11853 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11854 { 11855 struct tcp_rack *rack; 11856 11857 rack = (struct tcp_rack *)tp->t_fb_ptr; 11858 if (rack->rc_allow_data_af_clo == 0) { 11859 close_now: 11860 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11861 /* tcp_close will kill the inp pre-log the Reset */ 11862 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11863 tp = tcp_close(tp); 11864 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11865 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11866 return (1); 11867 } 11868 if (sbavail(&so->so_snd) == 0) 11869 goto close_now; 11870 /* Ok we allow data that is ignored and a followup reset */ 11871 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11872 tp->rcv_nxt = th->th_seq + *tlen; 11873 tp->t_flags2 |= TF2_DROP_AF_DATA; 11874 rack->r_wanted_output = 1; 11875 *tlen = 0; 11876 return (0); 11877 } 11878 11879 /* 11880 * Return value of 1, the TCB is unlocked and most 11881 * likely gone, return value of 0, the TCP is still 11882 * locked. 11883 */ 11884 static int 11885 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11886 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11887 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11888 { 11889 int32_t ret_val = 0; 11890 int32_t ourfinisacked = 0; 11891 struct tcp_rack *rack; 11892 11893 rack = (struct tcp_rack *)tp->t_fb_ptr; 11894 ctf_calc_rwin(so, tp); 11895 11896 if ((thflags & TH_RST) || 11897 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11898 return (__ctf_process_rst(m, th, so, tp, 11899 &rack->r_ctl.challenge_ack_ts, 11900 &rack->r_ctl.challenge_ack_cnt)); 11901 /* 11902 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11903 * synchronized state. 11904 */ 11905 if (thflags & TH_SYN) { 11906 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11907 return (ret_val); 11908 } 11909 /* 11910 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11911 * it's less than ts_recent, drop it. 11912 */ 11913 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11914 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11915 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11916 return (ret_val); 11917 } 11918 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11919 &rack->r_ctl.challenge_ack_ts, 11920 &rack->r_ctl.challenge_ack_cnt)) { 11921 return (ret_val); 11922 } 11923 /* 11924 * If new data are received on a connection after the user processes 11925 * are gone, then RST the other end. 11926 */ 11927 if ((tp->t_flags & TF_CLOSED) && tlen && 11928 rack_check_data_after_close(m, tp, &tlen, th, so)) 11929 return (1); 11930 /* 11931 * If last ACK falls within this segment's sequence numbers, record 11932 * its timestamp. NOTE: 1) That the test incorporates suggestions 11933 * from the latest proposal of the tcplw@cray.com list (Braden 11934 * 1993/04/26). 2) That updating only on newer timestamps interferes 11935 * with our earlier PAWS tests, so this check should be solely 11936 * predicated on the sequence space of this segment. 3) That we 11937 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11938 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11939 * SEG.Len, This modified check allows us to overcome RFC1323's 11940 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11941 * p.869. In such cases, we can still calculate the RTT correctly 11942 * when RCV.NXT == Last.ACK.Sent. 11943 */ 11944 if ((to->to_flags & TOF_TS) != 0 && 11945 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11946 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11947 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11948 tp->ts_recent_age = tcp_ts_getticks(); 11949 tp->ts_recent = to->to_tsval; 11950 } 11951 /* 11952 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11953 * is on (half-synchronized state), then queue data for later 11954 * processing; else drop segment and return. 11955 */ 11956 if ((thflags & TH_ACK) == 0) { 11957 if (tp->t_flags & TF_NEEDSYN) { 11958 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11959 tiwin, thflags, nxt_pkt)); 11960 } else if (tp->t_flags & TF_ACKNOW) { 11961 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11962 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11963 return (ret_val); 11964 } else { 11965 ctf_do_drop(m, NULL); 11966 return (0); 11967 } 11968 } 11969 /* 11970 * Ack processing. 11971 */ 11972 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11973 return (ret_val); 11974 } 11975 if (ourfinisacked) { 11976 /* 11977 * If we can't receive any more data, then closing user can 11978 * proceed. Starting the timer is contrary to the 11979 * specification, but if we don't get a FIN we'll hang 11980 * forever. 11981 * 11982 * XXXjl: we should release the tp also, and use a 11983 * compressed state. 11984 */ 11985 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11986 soisdisconnected(so); 11987 tcp_timer_activate(tp, TT_2MSL, 11988 (tcp_fast_finwait2_recycle ? 11989 tcp_finwait2_timeout : 11990 TP_MAXIDLE(tp))); 11991 } 11992 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11993 } 11994 if (sbavail(&so->so_snd)) { 11995 if (ctf_progress_timeout_check(tp, true)) { 11996 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11997 tp, tick, PROGRESS_DROP, __LINE__); 11998 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11999 return (1); 12000 } 12001 } 12002 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12003 tiwin, thflags, nxt_pkt)); 12004 } 12005 12006 /* 12007 * Return value of 1, the TCB is unlocked and most 12008 * likely gone, return value of 0, the TCP is still 12009 * locked. 12010 */ 12011 static int 12012 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 12013 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12014 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12015 { 12016 int32_t ret_val = 0; 12017 int32_t ourfinisacked = 0; 12018 struct tcp_rack *rack; 12019 12020 rack = (struct tcp_rack *)tp->t_fb_ptr; 12021 ctf_calc_rwin(so, tp); 12022 12023 if ((thflags & TH_RST) || 12024 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12025 return (__ctf_process_rst(m, th, so, tp, 12026 &rack->r_ctl.challenge_ack_ts, 12027 &rack->r_ctl.challenge_ack_cnt)); 12028 /* 12029 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12030 * synchronized state. 12031 */ 12032 if (thflags & TH_SYN) { 12033 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12034 return (ret_val); 12035 } 12036 /* 12037 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12038 * it's less than ts_recent, drop it. 12039 */ 12040 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12041 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12042 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12043 return (ret_val); 12044 } 12045 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12046 &rack->r_ctl.challenge_ack_ts, 12047 &rack->r_ctl.challenge_ack_cnt)) { 12048 return (ret_val); 12049 } 12050 /* 12051 * If new data are received on a connection after the user processes 12052 * are gone, then RST the other end. 12053 */ 12054 if ((tp->t_flags & TF_CLOSED) && tlen && 12055 rack_check_data_after_close(m, tp, &tlen, th, so)) 12056 return (1); 12057 /* 12058 * If last ACK falls within this segment's sequence numbers, record 12059 * its timestamp. NOTE: 1) That the test incorporates suggestions 12060 * from the latest proposal of the tcplw@cray.com list (Braden 12061 * 1993/04/26). 2) That updating only on newer timestamps interferes 12062 * with our earlier PAWS tests, so this check should be solely 12063 * predicated on the sequence space of this segment. 3) That we 12064 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12065 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12066 * SEG.Len, This modified check allows us to overcome RFC1323's 12067 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12068 * p.869. In such cases, we can still calculate the RTT correctly 12069 * when RCV.NXT == Last.ACK.Sent. 12070 */ 12071 if ((to->to_flags & TOF_TS) != 0 && 12072 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12073 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12074 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12075 tp->ts_recent_age = tcp_ts_getticks(); 12076 tp->ts_recent = to->to_tsval; 12077 } 12078 /* 12079 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12080 * is on (half-synchronized state), then queue data for later 12081 * processing; else drop segment and return. 12082 */ 12083 if ((thflags & TH_ACK) == 0) { 12084 if (tp->t_flags & TF_NEEDSYN) { 12085 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12086 tiwin, thflags, nxt_pkt)); 12087 } else if (tp->t_flags & TF_ACKNOW) { 12088 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12089 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12090 return (ret_val); 12091 } else { 12092 ctf_do_drop(m, NULL); 12093 return (0); 12094 } 12095 } 12096 /* 12097 * Ack processing. 12098 */ 12099 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12100 return (ret_val); 12101 } 12102 if (ourfinisacked) { 12103 tcp_twstart(tp); 12104 m_freem(m); 12105 return (1); 12106 } 12107 if (sbavail(&so->so_snd)) { 12108 if (ctf_progress_timeout_check(tp, true)) { 12109 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12110 tp, tick, PROGRESS_DROP, __LINE__); 12111 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12112 return (1); 12113 } 12114 } 12115 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12116 tiwin, thflags, nxt_pkt)); 12117 } 12118 12119 /* 12120 * Return value of 1, the TCB is unlocked and most 12121 * likely gone, return value of 0, the TCP is still 12122 * locked. 12123 */ 12124 static int 12125 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12126 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12127 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12128 { 12129 int32_t ret_val = 0; 12130 int32_t ourfinisacked = 0; 12131 struct tcp_rack *rack; 12132 12133 rack = (struct tcp_rack *)tp->t_fb_ptr; 12134 ctf_calc_rwin(so, tp); 12135 12136 if ((thflags & TH_RST) || 12137 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12138 return (__ctf_process_rst(m, th, so, tp, 12139 &rack->r_ctl.challenge_ack_ts, 12140 &rack->r_ctl.challenge_ack_cnt)); 12141 /* 12142 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12143 * synchronized state. 12144 */ 12145 if (thflags & TH_SYN) { 12146 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12147 return (ret_val); 12148 } 12149 /* 12150 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12151 * it's less than ts_recent, drop it. 12152 */ 12153 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12154 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12155 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12156 return (ret_val); 12157 } 12158 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12159 &rack->r_ctl.challenge_ack_ts, 12160 &rack->r_ctl.challenge_ack_cnt)) { 12161 return (ret_val); 12162 } 12163 /* 12164 * If new data are received on a connection after the user processes 12165 * are gone, then RST the other end. 12166 */ 12167 if ((tp->t_flags & TF_CLOSED) && tlen && 12168 rack_check_data_after_close(m, tp, &tlen, th, so)) 12169 return (1); 12170 /* 12171 * If last ACK falls within this segment's sequence numbers, record 12172 * its timestamp. NOTE: 1) That the test incorporates suggestions 12173 * from the latest proposal of the tcplw@cray.com list (Braden 12174 * 1993/04/26). 2) That updating only on newer timestamps interferes 12175 * with our earlier PAWS tests, so this check should be solely 12176 * predicated on the sequence space of this segment. 3) That we 12177 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12178 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12179 * SEG.Len, This modified check allows us to overcome RFC1323's 12180 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12181 * p.869. In such cases, we can still calculate the RTT correctly 12182 * when RCV.NXT == Last.ACK.Sent. 12183 */ 12184 if ((to->to_flags & TOF_TS) != 0 && 12185 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12186 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12187 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12188 tp->ts_recent_age = tcp_ts_getticks(); 12189 tp->ts_recent = to->to_tsval; 12190 } 12191 /* 12192 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12193 * is on (half-synchronized state), then queue data for later 12194 * processing; else drop segment and return. 12195 */ 12196 if ((thflags & TH_ACK) == 0) { 12197 if (tp->t_flags & TF_NEEDSYN) { 12198 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12199 tiwin, thflags, nxt_pkt)); 12200 } else if (tp->t_flags & TF_ACKNOW) { 12201 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12202 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12203 return (ret_val); 12204 } else { 12205 ctf_do_drop(m, NULL); 12206 return (0); 12207 } 12208 } 12209 /* 12210 * case TCPS_LAST_ACK: Ack processing. 12211 */ 12212 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12213 return (ret_val); 12214 } 12215 if (ourfinisacked) { 12216 tp = tcp_close(tp); 12217 ctf_do_drop(m, tp); 12218 return (1); 12219 } 12220 if (sbavail(&so->so_snd)) { 12221 if (ctf_progress_timeout_check(tp, true)) { 12222 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12223 tp, tick, PROGRESS_DROP, __LINE__); 12224 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12225 return (1); 12226 } 12227 } 12228 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12229 tiwin, thflags, nxt_pkt)); 12230 } 12231 12232 /* 12233 * Return value of 1, the TCB is unlocked and most 12234 * likely gone, return value of 0, the TCP is still 12235 * locked. 12236 */ 12237 static int 12238 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12239 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12240 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12241 { 12242 int32_t ret_val = 0; 12243 int32_t ourfinisacked = 0; 12244 struct tcp_rack *rack; 12245 12246 rack = (struct tcp_rack *)tp->t_fb_ptr; 12247 ctf_calc_rwin(so, tp); 12248 12249 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12250 if ((thflags & TH_RST) || 12251 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12252 return (__ctf_process_rst(m, th, so, tp, 12253 &rack->r_ctl.challenge_ack_ts, 12254 &rack->r_ctl.challenge_ack_cnt)); 12255 /* 12256 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12257 * synchronized state. 12258 */ 12259 if (thflags & TH_SYN) { 12260 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12261 return (ret_val); 12262 } 12263 /* 12264 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12265 * it's less than ts_recent, drop it. 12266 */ 12267 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12268 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12269 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12270 return (ret_val); 12271 } 12272 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12273 &rack->r_ctl.challenge_ack_ts, 12274 &rack->r_ctl.challenge_ack_cnt)) { 12275 return (ret_val); 12276 } 12277 /* 12278 * If new data are received on a connection after the user processes 12279 * are gone, then RST the other end. 12280 */ 12281 if ((tp->t_flags & TF_CLOSED) && tlen && 12282 rack_check_data_after_close(m, tp, &tlen, th, so)) 12283 return (1); 12284 /* 12285 * If last ACK falls within this segment's sequence numbers, record 12286 * its timestamp. NOTE: 1) That the test incorporates suggestions 12287 * from the latest proposal of the tcplw@cray.com list (Braden 12288 * 1993/04/26). 2) That updating only on newer timestamps interferes 12289 * with our earlier PAWS tests, so this check should be solely 12290 * predicated on the sequence space of this segment. 3) That we 12291 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12292 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12293 * SEG.Len, This modified check allows us to overcome RFC1323's 12294 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12295 * p.869. In such cases, we can still calculate the RTT correctly 12296 * when RCV.NXT == Last.ACK.Sent. 12297 */ 12298 if ((to->to_flags & TOF_TS) != 0 && 12299 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12300 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12301 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12302 tp->ts_recent_age = tcp_ts_getticks(); 12303 tp->ts_recent = to->to_tsval; 12304 } 12305 /* 12306 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12307 * is on (half-synchronized state), then queue data for later 12308 * processing; else drop segment and return. 12309 */ 12310 if ((thflags & TH_ACK) == 0) { 12311 if (tp->t_flags & TF_NEEDSYN) { 12312 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12313 tiwin, thflags, nxt_pkt)); 12314 } else if (tp->t_flags & TF_ACKNOW) { 12315 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12316 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12317 return (ret_val); 12318 } else { 12319 ctf_do_drop(m, NULL); 12320 return (0); 12321 } 12322 } 12323 /* 12324 * Ack processing. 12325 */ 12326 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12327 return (ret_val); 12328 } 12329 if (sbavail(&so->so_snd)) { 12330 if (ctf_progress_timeout_check(tp, true)) { 12331 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12332 tp, tick, PROGRESS_DROP, __LINE__); 12333 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12334 return (1); 12335 } 12336 } 12337 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12338 tiwin, thflags, nxt_pkt)); 12339 } 12340 12341 static void inline 12342 rack_clear_rate_sample(struct tcp_rack *rack) 12343 { 12344 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12345 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12346 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12347 } 12348 12349 static void 12350 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12351 { 12352 uint64_t bw_est, rate_wanted; 12353 int chged = 0; 12354 uint32_t user_max, orig_min, orig_max; 12355 12356 orig_min = rack->r_ctl.rc_pace_min_segs; 12357 orig_max = rack->r_ctl.rc_pace_max_segs; 12358 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12359 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12360 chged = 1; 12361 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12362 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12363 if (user_max != rack->r_ctl.rc_pace_max_segs) 12364 chged = 1; 12365 } 12366 if (rack->rc_force_max_seg) { 12367 rack->r_ctl.rc_pace_max_segs = user_max; 12368 } else if (rack->use_fixed_rate) { 12369 bw_est = rack_get_bw(rack); 12370 if ((rack->r_ctl.crte == NULL) || 12371 (bw_est != rack->r_ctl.crte->rate)) { 12372 rack->r_ctl.rc_pace_max_segs = user_max; 12373 } else { 12374 /* We are pacing right at the hardware rate */ 12375 uint32_t segsiz; 12376 12377 segsiz = min(ctf_fixed_maxseg(tp), 12378 rack->r_ctl.rc_pace_min_segs); 12379 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12380 tp, bw_est, segsiz, 0, 12381 rack->r_ctl.crte, NULL); 12382 } 12383 } else if (rack->rc_always_pace) { 12384 if (rack->r_ctl.gp_bw || 12385 #ifdef NETFLIX_PEAKRATE 12386 rack->rc_tp->t_maxpeakrate || 12387 #endif 12388 rack->r_ctl.init_rate) { 12389 /* We have a rate of some sort set */ 12390 uint32_t orig; 12391 12392 bw_est = rack_get_bw(rack); 12393 orig = rack->r_ctl.rc_pace_max_segs; 12394 if (fill_override) 12395 rate_wanted = *fill_override; 12396 else 12397 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12398 if (rate_wanted) { 12399 /* We have something */ 12400 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12401 rate_wanted, 12402 ctf_fixed_maxseg(rack->rc_tp)); 12403 } else 12404 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12405 if (orig != rack->r_ctl.rc_pace_max_segs) 12406 chged = 1; 12407 } else if ((rack->r_ctl.gp_bw == 0) && 12408 (rack->r_ctl.rc_pace_max_segs == 0)) { 12409 /* 12410 * If we have nothing limit us to bursting 12411 * out IW sized pieces. 12412 */ 12413 chged = 1; 12414 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12415 } 12416 } 12417 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12418 chged = 1; 12419 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12420 } 12421 if (chged) 12422 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12423 } 12424 12425 12426 static void 12427 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12428 { 12429 #ifdef INET6 12430 struct ip6_hdr *ip6 = NULL; 12431 #endif 12432 #ifdef INET 12433 struct ip *ip = NULL; 12434 #endif 12435 struct udphdr *udp = NULL; 12436 12437 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12438 #ifdef INET6 12439 if (rack->r_is_v6) { 12440 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12441 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12442 if (tp->t_port) { 12443 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12444 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12445 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12446 udp->uh_dport = tp->t_port; 12447 rack->r_ctl.fsb.udp = udp; 12448 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12449 } else 12450 { 12451 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12452 rack->r_ctl.fsb.udp = NULL; 12453 } 12454 tcpip_fillheaders(rack->rc_inp, 12455 tp->t_port, 12456 ip6, rack->r_ctl.fsb.th); 12457 } else 12458 #endif /* INET6 */ 12459 { 12460 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12461 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12462 if (tp->t_port) { 12463 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12464 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12465 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12466 udp->uh_dport = tp->t_port; 12467 rack->r_ctl.fsb.udp = udp; 12468 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12469 } else 12470 { 12471 rack->r_ctl.fsb.udp = NULL; 12472 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12473 } 12474 tcpip_fillheaders(rack->rc_inp, 12475 tp->t_port, 12476 ip, rack->r_ctl.fsb.th); 12477 } 12478 rack->r_fsb_inited = 1; 12479 } 12480 12481 static int 12482 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12483 { 12484 /* 12485 * Allocate the larger of spaces V6 if available else just 12486 * V4 and include udphdr (overbook) 12487 */ 12488 #ifdef INET6 12489 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12490 #else 12491 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12492 #endif 12493 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12494 M_TCPFSB, M_NOWAIT|M_ZERO); 12495 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12496 return (ENOMEM); 12497 } 12498 rack->r_fsb_inited = 0; 12499 return (0); 12500 } 12501 12502 static int 12503 rack_init(struct tcpcb *tp) 12504 { 12505 struct inpcb *inp = tptoinpcb(tp); 12506 struct tcp_rack *rack = NULL; 12507 #ifdef INVARIANTS 12508 struct rack_sendmap *insret; 12509 #endif 12510 uint32_t iwin, snt, us_cts; 12511 int err; 12512 12513 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12514 if (tp->t_fb_ptr == NULL) { 12515 /* 12516 * We need to allocate memory but cant. The INP and INP_INFO 12517 * locks and they are recursive (happens during setup. So a 12518 * scheme to drop the locks fails :( 12519 * 12520 */ 12521 return (ENOMEM); 12522 } 12523 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12524 12525 rack = (struct tcp_rack *)tp->t_fb_ptr; 12526 RB_INIT(&rack->r_ctl.rc_mtree); 12527 TAILQ_INIT(&rack->r_ctl.rc_free); 12528 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12529 rack->rc_tp = tp; 12530 rack->rc_inp = inp; 12531 /* Set the flag */ 12532 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 12533 /* Probably not needed but lets be sure */ 12534 rack_clear_rate_sample(rack); 12535 /* 12536 * Save off the default values, socket options will poke 12537 * at these if pacing is not on or we have not yet 12538 * reached where pacing is on (gp_ready/fixed enabled). 12539 * When they get set into the CC module (when gp_ready 12540 * is enabled or we enable fixed) then we will set these 12541 * values into the CC and place in here the old values 12542 * so we have a restoral. Then we will set the flag 12543 * rc_pacing_cc_set. That way whenever we turn off pacing 12544 * or switch off this stack, we will know to go restore 12545 * the saved values. 12546 */ 12547 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12548 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12549 /* We want abe like behavior as well */ 12550 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12551 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12552 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12553 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12554 rack->r_ctl.roundends = tp->snd_max; 12555 if (use_rack_rr) 12556 rack->use_rack_rr = 1; 12557 if (V_tcp_delack_enabled) 12558 tp->t_delayed_ack = 1; 12559 else 12560 tp->t_delayed_ack = 0; 12561 #ifdef TCP_ACCOUNTING 12562 if (rack_tcp_accounting) { 12563 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12564 } 12565 #endif 12566 if (rack_enable_shared_cwnd) 12567 rack->rack_enable_scwnd = 1; 12568 rack->rc_user_set_max_segs = rack_hptsi_segments; 12569 rack->rc_force_max_seg = 0; 12570 if (rack_use_imac_dack) 12571 rack->rc_dack_mode = 1; 12572 TAILQ_INIT(&rack->r_ctl.opt_list); 12573 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12574 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12575 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12576 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12577 rack->r_ctl.rc_highest_us_rtt = 0; 12578 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12579 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12580 if (rack_use_cmp_acks) 12581 rack->r_use_cmp_ack = 1; 12582 if (rack_disable_prr) 12583 rack->rack_no_prr = 1; 12584 if (rack_gp_no_rec_chg) 12585 rack->rc_gp_no_rec_chg = 1; 12586 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12587 rack->rc_always_pace = 1; 12588 if (rack->use_fixed_rate || rack->gp_ready) 12589 rack_set_cc_pacing(rack); 12590 } else 12591 rack->rc_always_pace = 0; 12592 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12593 rack->r_mbuf_queue = 1; 12594 else 12595 rack->r_mbuf_queue = 0; 12596 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12597 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12598 else 12599 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12600 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12601 if (rack_limits_scwnd) 12602 rack->r_limit_scw = 1; 12603 else 12604 rack->r_limit_scw = 0; 12605 rack->rc_labc = V_tcp_abc_l_var; 12606 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12607 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12608 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12609 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12610 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12611 rack->r_ctl.rc_min_to = rack_min_to; 12612 microuptime(&rack->r_ctl.act_rcv_time); 12613 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12614 rack->rc_init_win = rack_default_init_window; 12615 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12616 if (rack_hw_up_only) 12617 rack->r_up_only = 1; 12618 if (rack_do_dyn_mul) { 12619 /* When dynamic adjustment is on CA needs to start at 100% */ 12620 rack->rc_gp_dyn_mul = 1; 12621 if (rack_do_dyn_mul >= 100) 12622 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12623 } else 12624 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12625 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12626 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12627 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12628 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12629 rack_probertt_filter_life); 12630 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12631 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12632 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12633 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12634 rack->r_ctl.rc_time_probertt_starts = 0; 12635 if (rack_dsack_std_based & 0x1) { 12636 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12637 rack->rc_rack_tmr_std_based = 1; 12638 } 12639 if (rack_dsack_std_based & 0x2) { 12640 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12641 rack->rc_rack_use_dsack = 1; 12642 } 12643 /* We require at least one measurement, even if the sysctl is 0 */ 12644 if (rack_req_measurements) 12645 rack->r_ctl.req_measurements = rack_req_measurements; 12646 else 12647 rack->r_ctl.req_measurements = 1; 12648 if (rack_enable_hw_pacing) 12649 rack->rack_hdw_pace_ena = 1; 12650 if (rack_hw_rate_caps) 12651 rack->r_rack_hw_rate_caps = 1; 12652 /* Do we force on detection? */ 12653 #ifdef NETFLIX_EXP_DETECTION 12654 if (tcp_force_detection) 12655 rack->do_detection = 1; 12656 else 12657 #endif 12658 rack->do_detection = 0; 12659 if (rack_non_rxt_use_cr) 12660 rack->rack_rec_nonrxt_use_cr = 1; 12661 err = rack_init_fsb(tp, rack); 12662 if (err) { 12663 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12664 tp->t_fb_ptr = NULL; 12665 return (err); 12666 } 12667 if (tp->snd_una != tp->snd_max) { 12668 /* Create a send map for the current outstanding data */ 12669 struct rack_sendmap *rsm; 12670 12671 rsm = rack_alloc(rack); 12672 if (rsm == NULL) { 12673 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12674 tp->t_fb_ptr = NULL; 12675 return (ENOMEM); 12676 } 12677 rsm->r_no_rtt_allowed = 1; 12678 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12679 rsm->r_rtr_cnt = 1; 12680 rsm->r_rtr_bytes = 0; 12681 if (tp->t_flags & TF_SENTFIN) 12682 rsm->r_flags |= RACK_HAS_FIN; 12683 if ((tp->snd_una == tp->iss) && 12684 !TCPS_HAVEESTABLISHED(tp->t_state)) 12685 rsm->r_flags |= RACK_HAS_SYN; 12686 rsm->r_start = tp->snd_una; 12687 rsm->r_end = tp->snd_max; 12688 rsm->r_dupack = 0; 12689 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12690 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12691 if (rsm->m) 12692 rsm->orig_m_len = rsm->m->m_len; 12693 else 12694 rsm->orig_m_len = 0; 12695 } else { 12696 /* 12697 * This can happen if we have a stand-alone FIN or 12698 * SYN. 12699 */ 12700 rsm->m = NULL; 12701 rsm->orig_m_len = 0; 12702 rsm->soff = 0; 12703 } 12704 #ifndef INVARIANTS 12705 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12706 #else 12707 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12708 if (insret != NULL) { 12709 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12710 insret, rack, rsm); 12711 } 12712 #endif 12713 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12714 rsm->r_in_tmap = 1; 12715 } 12716 /* 12717 * Timers in Rack are kept in microseconds so lets 12718 * convert any initial incoming variables 12719 * from ticks into usecs. Note that we 12720 * also change the values of t_srtt and t_rttvar, if 12721 * they are non-zero. They are kept with a 5 12722 * bit decimal so we have to carefully convert 12723 * these to get the full precision. 12724 */ 12725 rack_convert_rtts(tp); 12726 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12727 if (rack_do_hystart) { 12728 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 12729 if (rack_do_hystart > 1) 12730 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 12731 if (rack_do_hystart > 2) 12732 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 12733 } 12734 if (rack_def_profile) 12735 rack_set_profile(rack, rack_def_profile); 12736 /* Cancel the GP measurement in progress */ 12737 tp->t_flags &= ~TF_GPUTINPROG; 12738 if (SEQ_GT(tp->snd_max, tp->iss)) 12739 snt = tp->snd_max - tp->iss; 12740 else 12741 snt = 0; 12742 iwin = rc_init_window(rack); 12743 if (snt < iwin) { 12744 /* We are not past the initial window 12745 * so we need to make sure cwnd is 12746 * correct. 12747 */ 12748 if (tp->snd_cwnd < iwin) 12749 tp->snd_cwnd = iwin; 12750 /* 12751 * If we are within the initial window 12752 * we want ssthresh to be unlimited. Setting 12753 * it to the rwnd (which the default stack does 12754 * and older racks) is not really a good idea 12755 * since we want to be in SS and grow both the 12756 * cwnd and the rwnd (via dynamic rwnd growth). If 12757 * we set it to the rwnd then as the peer grows its 12758 * rwnd we will be stuck in CA and never hit SS. 12759 * 12760 * Its far better to raise it up high (this takes the 12761 * risk that there as been a loss already, probably 12762 * we should have an indicator in all stacks of loss 12763 * but we don't), but considering the normal use this 12764 * is a risk worth taking. The consequences of not 12765 * hitting SS are far worse than going one more time 12766 * into it early on (before we have sent even a IW). 12767 * It is highly unlikely that we will have had a loss 12768 * before getting the IW out. 12769 */ 12770 tp->snd_ssthresh = 0xffffffff; 12771 } 12772 rack_stop_all_timers(tp); 12773 /* Lets setup the fsb block */ 12774 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12775 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12776 __LINE__, RACK_RTTS_INIT); 12777 return (0); 12778 } 12779 12780 static int 12781 rack_handoff_ok(struct tcpcb *tp) 12782 { 12783 if ((tp->t_state == TCPS_CLOSED) || 12784 (tp->t_state == TCPS_LISTEN)) { 12785 /* Sure no problem though it may not stick */ 12786 return (0); 12787 } 12788 if ((tp->t_state == TCPS_SYN_SENT) || 12789 (tp->t_state == TCPS_SYN_RECEIVED)) { 12790 /* 12791 * We really don't know if you support sack, 12792 * you have to get to ESTAB or beyond to tell. 12793 */ 12794 return (EAGAIN); 12795 } 12796 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12797 /* 12798 * Rack will only send a FIN after all data is acknowledged. 12799 * So in this case we have more data outstanding. We can't 12800 * switch stacks until either all data and only the FIN 12801 * is left (in which case rack_init() now knows how 12802 * to deal with that) <or> all is acknowledged and we 12803 * are only left with incoming data, though why you 12804 * would want to switch to rack after all data is acknowledged 12805 * I have no idea (rrs)! 12806 */ 12807 return (EAGAIN); 12808 } 12809 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12810 return (0); 12811 } 12812 /* 12813 * If we reach here we don't do SACK on this connection so we can 12814 * never do rack. 12815 */ 12816 return (EINVAL); 12817 } 12818 12819 12820 static void 12821 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12822 { 12823 struct inpcb *inp = tptoinpcb(tp); 12824 12825 if (tp->t_fb_ptr) { 12826 struct tcp_rack *rack; 12827 struct rack_sendmap *rsm, *nrsm; 12828 #ifdef INVARIANTS 12829 struct rack_sendmap *rm; 12830 #endif 12831 12832 rack = (struct tcp_rack *)tp->t_fb_ptr; 12833 if (tp->t_in_pkt) { 12834 /* 12835 * It is unsafe to process the packets since a 12836 * reset may be lurking in them (its rare but it 12837 * can occur). If we were to find a RST, then we 12838 * would end up dropping the connection and the 12839 * INP lock, so when we return the caller (tcp_usrreq) 12840 * will blow up when it trys to unlock the inp. 12841 */ 12842 struct mbuf *save, *m; 12843 12844 m = tp->t_in_pkt; 12845 tp->t_in_pkt = NULL; 12846 tp->t_tail_pkt = NULL; 12847 while (m) { 12848 save = m->m_nextpkt; 12849 m->m_nextpkt = NULL; 12850 m_freem(m); 12851 m = save; 12852 } 12853 } 12854 tp->t_flags &= ~TF_FORCEDATA; 12855 #ifdef NETFLIX_SHARED_CWND 12856 if (rack->r_ctl.rc_scw) { 12857 uint32_t limit; 12858 12859 if (rack->r_limit_scw) 12860 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12861 else 12862 limit = 0; 12863 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12864 rack->r_ctl.rc_scw_index, 12865 limit); 12866 rack->r_ctl.rc_scw = NULL; 12867 } 12868 #endif 12869 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12870 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12871 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12872 rack->r_ctl.fsb.th = NULL; 12873 } 12874 /* Convert back to ticks, with */ 12875 if (tp->t_srtt > 1) { 12876 uint32_t val, frac; 12877 12878 val = USEC_2_TICKS(tp->t_srtt); 12879 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12880 tp->t_srtt = val << TCP_RTT_SHIFT; 12881 /* 12882 * frac is the fractional part here is left 12883 * over from converting to hz and shifting. 12884 * We need to convert this to the 5 bit 12885 * remainder. 12886 */ 12887 if (frac) { 12888 if (hz == 1000) { 12889 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12890 } else { 12891 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12892 } 12893 tp->t_srtt += frac; 12894 } 12895 } 12896 if (tp->t_rttvar) { 12897 uint32_t val, frac; 12898 12899 val = USEC_2_TICKS(tp->t_rttvar); 12900 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12901 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12902 /* 12903 * frac is the fractional part here is left 12904 * over from converting to hz and shifting. 12905 * We need to convert this to the 5 bit 12906 * remainder. 12907 */ 12908 if (frac) { 12909 if (hz == 1000) { 12910 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12911 } else { 12912 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12913 } 12914 tp->t_rttvar += frac; 12915 } 12916 } 12917 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12918 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12919 if (rack->rc_always_pace) { 12920 tcp_decrement_paced_conn(); 12921 rack_undo_cc_pacing(rack); 12922 rack->rc_always_pace = 0; 12923 } 12924 /* Clean up any options if they were not applied */ 12925 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12926 struct deferred_opt_list *dol; 12927 12928 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12929 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12930 free(dol, M_TCPDO); 12931 } 12932 /* rack does not use force data but other stacks may clear it */ 12933 if (rack->r_ctl.crte != NULL) { 12934 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12935 rack->rack_hdrw_pacing = 0; 12936 rack->r_ctl.crte = NULL; 12937 } 12938 #ifdef TCP_BLACKBOX 12939 tcp_log_flowend(tp); 12940 #endif 12941 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12942 #ifndef INVARIANTS 12943 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12944 #else 12945 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12946 if (rm != rsm) { 12947 panic("At fini, rack:%p rsm:%p rm:%p", 12948 rack, rsm, rm); 12949 } 12950 #endif 12951 uma_zfree(rack_zone, rsm); 12952 } 12953 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12954 while (rsm) { 12955 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12956 uma_zfree(rack_zone, rsm); 12957 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12958 } 12959 rack->rc_free_cnt = 0; 12960 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12961 tp->t_fb_ptr = NULL; 12962 } 12963 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12964 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12965 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12966 inp->inp_flags2 &= ~INP_MBUF_ACKCMP; 12967 /* Cancel the GP measurement in progress */ 12968 tp->t_flags &= ~TF_GPUTINPROG; 12969 inp->inp_flags2 &= ~INP_MBUF_L_ACKS; 12970 /* Make sure snd_nxt is correctly set */ 12971 tp->snd_nxt = tp->snd_max; 12972 } 12973 12974 static void 12975 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12976 { 12977 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12978 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 12979 } 12980 switch (tp->t_state) { 12981 case TCPS_SYN_SENT: 12982 rack->r_state = TCPS_SYN_SENT; 12983 rack->r_substate = rack_do_syn_sent; 12984 break; 12985 case TCPS_SYN_RECEIVED: 12986 rack->r_state = TCPS_SYN_RECEIVED; 12987 rack->r_substate = rack_do_syn_recv; 12988 break; 12989 case TCPS_ESTABLISHED: 12990 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12991 rack->r_state = TCPS_ESTABLISHED; 12992 rack->r_substate = rack_do_established; 12993 break; 12994 case TCPS_CLOSE_WAIT: 12995 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12996 rack->r_state = TCPS_CLOSE_WAIT; 12997 rack->r_substate = rack_do_close_wait; 12998 break; 12999 case TCPS_FIN_WAIT_1: 13000 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13001 rack->r_state = TCPS_FIN_WAIT_1; 13002 rack->r_substate = rack_do_fin_wait_1; 13003 break; 13004 case TCPS_CLOSING: 13005 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13006 rack->r_state = TCPS_CLOSING; 13007 rack->r_substate = rack_do_closing; 13008 break; 13009 case TCPS_LAST_ACK: 13010 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13011 rack->r_state = TCPS_LAST_ACK; 13012 rack->r_substate = rack_do_lastack; 13013 break; 13014 case TCPS_FIN_WAIT_2: 13015 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13016 rack->r_state = TCPS_FIN_WAIT_2; 13017 rack->r_substate = rack_do_fin_wait_2; 13018 break; 13019 case TCPS_LISTEN: 13020 case TCPS_CLOSED: 13021 case TCPS_TIME_WAIT: 13022 default: 13023 break; 13024 }; 13025 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 13026 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 13027 13028 } 13029 13030 static void 13031 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 13032 { 13033 /* 13034 * We received an ack, and then did not 13035 * call send or were bounced out due to the 13036 * hpts was running. Now a timer is up as well, is 13037 * it the right timer? 13038 */ 13039 struct rack_sendmap *rsm; 13040 int tmr_up; 13041 13042 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 13043 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 13044 return; 13045 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 13046 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 13047 (tmr_up == PACE_TMR_RXT)) { 13048 /* Should be an RXT */ 13049 return; 13050 } 13051 if (rsm == NULL) { 13052 /* Nothing outstanding? */ 13053 if (tp->t_flags & TF_DELACK) { 13054 if (tmr_up == PACE_TMR_DELACK) 13055 /* We are supposed to have delayed ack up and we do */ 13056 return; 13057 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 13058 /* 13059 * if we hit enobufs then we would expect the possibility 13060 * of nothing outstanding and the RXT up (and the hptsi timer). 13061 */ 13062 return; 13063 } else if (((V_tcp_always_keepalive || 13064 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13065 (tp->t_state <= TCPS_CLOSING)) && 13066 (tmr_up == PACE_TMR_KEEP) && 13067 (tp->snd_max == tp->snd_una)) { 13068 /* We should have keep alive up and we do */ 13069 return; 13070 } 13071 } 13072 if (SEQ_GT(tp->snd_max, tp->snd_una) && 13073 ((tmr_up == PACE_TMR_TLP) || 13074 (tmr_up == PACE_TMR_RACK) || 13075 (tmr_up == PACE_TMR_RXT))) { 13076 /* 13077 * Either a Rack, TLP or RXT is fine if we 13078 * have outstanding data. 13079 */ 13080 return; 13081 } else if (tmr_up == PACE_TMR_DELACK) { 13082 /* 13083 * If the delayed ack was going to go off 13084 * before the rtx/tlp/rack timer were going to 13085 * expire, then that would be the timer in control. 13086 * Note we don't check the time here trusting the 13087 * code is correct. 13088 */ 13089 return; 13090 } 13091 /* 13092 * Ok the timer originally started is not what we want now. 13093 * We will force the hpts to be stopped if any, and restart 13094 * with the slot set to what was in the saved slot. 13095 */ 13096 if (tcp_in_hpts(rack->rc_inp)) { 13097 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13098 uint32_t us_cts; 13099 13100 us_cts = tcp_get_usecs(NULL); 13101 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13102 rack->r_early = 1; 13103 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13104 } 13105 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13106 } 13107 tcp_hpts_remove(rack->rc_inp); 13108 } 13109 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13110 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13111 } 13112 13113 13114 static void 13115 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13116 { 13117 if ((SEQ_LT(tp->snd_wl1, seq) || 13118 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13119 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13120 /* keep track of pure window updates */ 13121 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13122 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13123 tp->snd_wnd = tiwin; 13124 rack_validate_fo_sendwin_up(tp, rack); 13125 tp->snd_wl1 = seq; 13126 tp->snd_wl2 = ack; 13127 if (tp->snd_wnd > tp->max_sndwnd) 13128 tp->max_sndwnd = tp->snd_wnd; 13129 rack->r_wanted_output = 1; 13130 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13131 tp->snd_wnd = tiwin; 13132 rack_validate_fo_sendwin_up(tp, rack); 13133 tp->snd_wl1 = seq; 13134 tp->snd_wl2 = ack; 13135 } else { 13136 /* Not a valid win update */ 13137 return; 13138 } 13139 /* Do we exit persists? */ 13140 if ((rack->rc_in_persist != 0) && 13141 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13142 rack->r_ctl.rc_pace_min_segs))) { 13143 rack_exit_persist(tp, rack, cts); 13144 } 13145 /* Do we enter persists? */ 13146 if ((rack->rc_in_persist == 0) && 13147 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13148 TCPS_HAVEESTABLISHED(tp->t_state) && 13149 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13150 sbavail(&tptosocket(tp)->so_snd) && 13151 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13152 /* 13153 * Here the rwnd is less than 13154 * the pacing size, we are established, 13155 * nothing is outstanding, and there is 13156 * data to send. Enter persists. 13157 */ 13158 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13159 } 13160 } 13161 13162 static void 13163 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13164 { 13165 13166 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13167 struct inpcb *inp = tptoinpcb(tp); 13168 union tcp_log_stackspecific log; 13169 struct timeval ltv; 13170 char tcp_hdr_buf[60]; 13171 struct tcphdr *th; 13172 struct timespec ts; 13173 uint32_t orig_snd_una; 13174 uint8_t xx = 0; 13175 13176 #ifdef NETFLIX_HTTP_LOGGING 13177 struct http_sendfile_track *http_req; 13178 13179 if (SEQ_GT(ae->ack, tp->snd_una)) { 13180 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13181 } else { 13182 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13183 } 13184 #endif 13185 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13186 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13187 if (rack->rack_no_prr == 0) 13188 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13189 else 13190 log.u_bbr.flex1 = 0; 13191 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13192 log.u_bbr.use_lt_bw <<= 1; 13193 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13194 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13195 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13196 log.u_bbr.pkts_out = tp->t_maxseg; 13197 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13198 log.u_bbr.flex7 = 1; 13199 log.u_bbr.lost = ae->flags; 13200 log.u_bbr.cwnd_gain = ackval; 13201 log.u_bbr.pacing_gain = 0x2; 13202 if (ae->flags & TSTMP_HDWR) { 13203 /* Record the hardware timestamp if present */ 13204 log.u_bbr.flex3 = M_TSTMP; 13205 ts.tv_sec = ae->timestamp / 1000000000; 13206 ts.tv_nsec = ae->timestamp % 1000000000; 13207 ltv.tv_sec = ts.tv_sec; 13208 ltv.tv_usec = ts.tv_nsec / 1000; 13209 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13210 } else if (ae->flags & TSTMP_LRO) { 13211 /* Record the LRO the arrival timestamp */ 13212 log.u_bbr.flex3 = M_TSTMP_LRO; 13213 ts.tv_sec = ae->timestamp / 1000000000; 13214 ts.tv_nsec = ae->timestamp % 1000000000; 13215 ltv.tv_sec = ts.tv_sec; 13216 ltv.tv_usec = ts.tv_nsec / 1000; 13217 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13218 } 13219 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13220 /* Log the rcv time */ 13221 log.u_bbr.delRate = ae->timestamp; 13222 #ifdef NETFLIX_HTTP_LOGGING 13223 log.u_bbr.applimited = tp->t_http_closed; 13224 log.u_bbr.applimited <<= 8; 13225 log.u_bbr.applimited |= tp->t_http_open; 13226 log.u_bbr.applimited <<= 8; 13227 log.u_bbr.applimited |= tp->t_http_req; 13228 if (http_req) { 13229 /* Copy out any client req info */ 13230 /* seconds */ 13231 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13232 /* useconds */ 13233 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13234 log.u_bbr.rttProp = http_req->timestamp; 13235 log.u_bbr.cur_del_rate = http_req->start; 13236 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13237 log.u_bbr.flex8 |= 1; 13238 } else { 13239 log.u_bbr.flex8 |= 2; 13240 log.u_bbr.bw_inuse = http_req->end; 13241 } 13242 log.u_bbr.flex6 = http_req->start_seq; 13243 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13244 log.u_bbr.flex8 |= 4; 13245 log.u_bbr.epoch = http_req->end_seq; 13246 } 13247 } 13248 #endif 13249 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13250 th = (struct tcphdr *)tcp_hdr_buf; 13251 th->th_seq = ae->seq; 13252 th->th_ack = ae->ack; 13253 th->th_win = ae->win; 13254 /* Now fill in the ports */ 13255 th->th_sport = inp->inp_fport; 13256 th->th_dport = inp->inp_lport; 13257 tcp_set_flags(th, ae->flags); 13258 /* Now do we have a timestamp option? */ 13259 if (ae->flags & HAS_TSTMP) { 13260 u_char *cp; 13261 uint32_t val; 13262 13263 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13264 cp = (u_char *)(th + 1); 13265 *cp = TCPOPT_NOP; 13266 cp++; 13267 *cp = TCPOPT_NOP; 13268 cp++; 13269 *cp = TCPOPT_TIMESTAMP; 13270 cp++; 13271 *cp = TCPOLEN_TIMESTAMP; 13272 cp++; 13273 val = htonl(ae->ts_value); 13274 bcopy((char *)&val, 13275 (char *)cp, sizeof(uint32_t)); 13276 val = htonl(ae->ts_echo); 13277 bcopy((char *)&val, 13278 (char *)(cp + 4), sizeof(uint32_t)); 13279 } else 13280 th->th_off = (sizeof(struct tcphdr) >> 2); 13281 13282 /* 13283 * For sane logging we need to play a little trick. 13284 * If the ack were fully processed we would have moved 13285 * snd_una to high_seq, but since compressed acks are 13286 * processed in two phases, at this point (logging) snd_una 13287 * won't be advanced. So we would see multiple acks showing 13288 * the advancement. We can prevent that by "pretending" that 13289 * snd_una was advanced and then un-advancing it so that the 13290 * logging code has the right value for tlb_snd_una. 13291 */ 13292 if (tp->snd_una != high_seq) { 13293 orig_snd_una = tp->snd_una; 13294 tp->snd_una = high_seq; 13295 xx = 1; 13296 } else 13297 xx = 0; 13298 TCP_LOG_EVENTP(tp, th, 13299 &tptosocket(tp)->so_rcv, 13300 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 13301 0, &log, true, <v); 13302 if (xx) { 13303 tp->snd_una = orig_snd_una; 13304 } 13305 } 13306 13307 } 13308 13309 static void 13310 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13311 { 13312 uint32_t us_rtt; 13313 /* 13314 * A persist or keep-alive was forced out, update our 13315 * min rtt time. Note now worry about lost responses. 13316 * When a subsequent keep-alive or persist times out 13317 * and forced_ack is still on, then the last probe 13318 * was not responded to. In such cases we have a 13319 * sysctl that controls the behavior. Either we apply 13320 * the rtt but with reduced confidence (0). Or we just 13321 * plain don't apply the rtt estimate. Having data flow 13322 * will clear the probe_not_answered flag i.e. cum-ack 13323 * move forward <or> exiting and reentering persists. 13324 */ 13325 13326 rack->forced_ack = 0; 13327 rack->rc_tp->t_rxtshift = 0; 13328 if ((rack->rc_in_persist && 13329 (tiwin == rack->rc_tp->snd_wnd)) || 13330 (rack->rc_in_persist == 0)) { 13331 /* 13332 * In persists only apply the RTT update if this is 13333 * a response to our window probe. And that 13334 * means the rwnd sent must match the current 13335 * snd_wnd. If it does not, then we got a 13336 * window update ack instead. For keepalive 13337 * we allow the answer no matter what the window. 13338 * 13339 * Note that if the probe_not_answered is set then 13340 * the forced_ack_ts is the oldest one i.e. the first 13341 * probe sent that might have been lost. This assures 13342 * us that if we do calculate an RTT it is longer not 13343 * some short thing. 13344 */ 13345 if (rack->rc_in_persist) 13346 counter_u64_add(rack_persists_acks, 1); 13347 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13348 if (us_rtt == 0) 13349 us_rtt = 1; 13350 if (rack->probe_not_answered == 0) { 13351 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13352 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13353 } else { 13354 /* We have a retransmitted probe here too */ 13355 if (rack_apply_rtt_with_reduced_conf) { 13356 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13357 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13358 } 13359 } 13360 } 13361 } 13362 13363 static int 13364 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13365 { 13366 /* 13367 * Handle a "special" compressed ack mbuf. Each incoming 13368 * ack has only four possible dispositions: 13369 * 13370 * A) It moves the cum-ack forward 13371 * B) It is behind the cum-ack. 13372 * C) It is a window-update ack. 13373 * D) It is a dup-ack. 13374 * 13375 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13376 * in the incoming mbuf. We also need to still pay attention 13377 * to nxt_pkt since there may be another packet after this 13378 * one. 13379 */ 13380 #ifdef TCP_ACCOUNTING 13381 uint64_t ts_val; 13382 uint64_t rdstc; 13383 #endif 13384 int segsiz; 13385 struct timespec ts; 13386 struct tcp_rack *rack; 13387 struct tcp_ackent *ae; 13388 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13389 int cnt, i, did_out, ourfinisacked = 0; 13390 struct tcpopt to_holder, *to = NULL; 13391 #ifdef TCP_ACCOUNTING 13392 int win_up_req = 0; 13393 #endif 13394 int nsegs = 0; 13395 int under_pacing = 1; 13396 int recovery = 0; 13397 #ifdef TCP_ACCOUNTING 13398 sched_pin(); 13399 #endif 13400 rack = (struct tcp_rack *)tp->t_fb_ptr; 13401 if (rack->gp_ready && 13402 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13403 under_pacing = 0; 13404 else 13405 under_pacing = 1; 13406 13407 if (rack->r_state != tp->t_state) 13408 rack_set_state(tp, rack); 13409 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13410 (tp->t_flags & TF_GPUTINPROG)) { 13411 /* 13412 * We have a goodput in progress 13413 * and we have entered a late state. 13414 * Do we have enough data in the sb 13415 * to handle the GPUT request? 13416 */ 13417 uint32_t bytes; 13418 13419 bytes = tp->gput_ack - tp->gput_seq; 13420 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13421 bytes += tp->gput_seq - tp->snd_una; 13422 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 13423 /* 13424 * There are not enough bytes in the socket 13425 * buffer that have been sent to cover this 13426 * measurement. Cancel it. 13427 */ 13428 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13429 rack->r_ctl.rc_gp_srtt /*flex1*/, 13430 tp->gput_seq, 13431 0, 0, 18, __LINE__, NULL, 0); 13432 tp->t_flags &= ~TF_GPUTINPROG; 13433 } 13434 } 13435 to = &to_holder; 13436 to->to_flags = 0; 13437 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13438 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13439 cnt = m->m_len / sizeof(struct tcp_ackent); 13440 counter_u64_add(rack_multi_single_eq, cnt); 13441 high_seq = tp->snd_una; 13442 the_win = tp->snd_wnd; 13443 win_seq = tp->snd_wl1; 13444 win_upd_ack = tp->snd_wl2; 13445 cts = tcp_tv_to_usectick(tv); 13446 ms_cts = tcp_tv_to_mssectick(tv); 13447 rack->r_ctl.rc_rcvtime = cts; 13448 segsiz = ctf_fixed_maxseg(tp); 13449 if ((rack->rc_gp_dyn_mul) && 13450 (rack->use_fixed_rate == 0) && 13451 (rack->rc_always_pace)) { 13452 /* Check in on probertt */ 13453 rack_check_probe_rtt(rack, cts); 13454 } 13455 for (i = 0; i < cnt; i++) { 13456 #ifdef TCP_ACCOUNTING 13457 ts_val = get_cyclecount(); 13458 #endif 13459 rack_clear_rate_sample(rack); 13460 ae = ((mtod(m, struct tcp_ackent *)) + i); 13461 /* Setup the window */ 13462 tiwin = ae->win << tp->snd_scale; 13463 if (tiwin > rack->r_ctl.rc_high_rwnd) 13464 rack->r_ctl.rc_high_rwnd = tiwin; 13465 /* figure out the type of ack */ 13466 if (SEQ_LT(ae->ack, high_seq)) { 13467 /* Case B*/ 13468 ae->ack_val_set = ACK_BEHIND; 13469 } else if (SEQ_GT(ae->ack, high_seq)) { 13470 /* Case A */ 13471 ae->ack_val_set = ACK_CUMACK; 13472 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13473 /* Case D */ 13474 ae->ack_val_set = ACK_DUPACK; 13475 } else { 13476 /* Case C */ 13477 ae->ack_val_set = ACK_RWND; 13478 } 13479 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13480 /* Validate timestamp */ 13481 if (ae->flags & HAS_TSTMP) { 13482 /* Setup for a timestamp */ 13483 to->to_flags = TOF_TS; 13484 ae->ts_echo -= tp->ts_offset; 13485 to->to_tsecr = ae->ts_echo; 13486 to->to_tsval = ae->ts_value; 13487 /* 13488 * If echoed timestamp is later than the current time, fall back to 13489 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13490 * were used when this connection was established. 13491 */ 13492 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13493 to->to_tsecr = 0; 13494 if (tp->ts_recent && 13495 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13496 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13497 #ifdef TCP_ACCOUNTING 13498 rdstc = get_cyclecount(); 13499 if (rdstc > ts_val) { 13500 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13501 (rdstc - ts_val)); 13502 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13503 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13504 } 13505 } 13506 #endif 13507 continue; 13508 } 13509 } 13510 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13511 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13512 tp->ts_recent_age = tcp_ts_getticks(); 13513 tp->ts_recent = ae->ts_value; 13514 } 13515 } else { 13516 /* Setup for a no options */ 13517 to->to_flags = 0; 13518 } 13519 /* Update the rcv time and perform idle reduction possibly */ 13520 if (tp->t_idle_reduce && 13521 (tp->snd_max == tp->snd_una) && 13522 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13523 counter_u64_add(rack_input_idle_reduces, 1); 13524 rack_cc_after_idle(rack, tp); 13525 } 13526 tp->t_rcvtime = ticks; 13527 /* Now what about ECN of a chain of pure ACKs? */ 13528 if (tcp_ecn_input_segment(tp, ae->flags, 0, 13529 tcp_packets_this_ack(tp, ae->ack), 13530 ae->codepoint)) 13531 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 13532 #ifdef TCP_ACCOUNTING 13533 /* Count for the specific type of ack in */ 13534 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13535 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13536 tp->tcp_cnt_counters[ae->ack_val_set]++; 13537 } 13538 #endif 13539 /* 13540 * Note how we could move up these in the determination 13541 * above, but we don't so that way the timestamp checks (and ECN) 13542 * is done first before we do any processing on the ACK. 13543 * The non-compressed path through the code has this 13544 * weakness (noted by @jtl) that it actually does some 13545 * processing before verifying the timestamp information. 13546 * We don't take that path here which is why we set 13547 * the ack_val_set first, do the timestamp and ecn 13548 * processing, and then look at what we have setup. 13549 */ 13550 if (ae->ack_val_set == ACK_BEHIND) { 13551 /* 13552 * Case B flag reordering, if window is not closed 13553 * or it could be a keep-alive or persists 13554 */ 13555 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13556 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13557 } 13558 } else if (ae->ack_val_set == ACK_DUPACK) { 13559 /* Case D */ 13560 rack_strike_dupack(rack); 13561 } else if (ae->ack_val_set == ACK_RWND) { 13562 /* Case C */ 13563 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13564 ts.tv_sec = ae->timestamp / 1000000000; 13565 ts.tv_nsec = ae->timestamp % 1000000000; 13566 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13567 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13568 } else { 13569 rack->r_ctl.act_rcv_time = *tv; 13570 } 13571 if (rack->forced_ack) { 13572 rack_handle_probe_response(rack, tiwin, 13573 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13574 } 13575 #ifdef TCP_ACCOUNTING 13576 win_up_req = 1; 13577 #endif 13578 win_upd_ack = ae->ack; 13579 win_seq = ae->seq; 13580 the_win = tiwin; 13581 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13582 } else { 13583 /* Case A */ 13584 if (SEQ_GT(ae->ack, tp->snd_max)) { 13585 /* 13586 * We just send an ack since the incoming 13587 * ack is beyond the largest seq we sent. 13588 */ 13589 if ((tp->t_flags & TF_ACKNOW) == 0) { 13590 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13591 if (tp->t_flags && TF_ACKNOW) 13592 rack->r_wanted_output = 1; 13593 } 13594 } else { 13595 nsegs++; 13596 /* If the window changed setup to update */ 13597 if (tiwin != tp->snd_wnd) { 13598 win_upd_ack = ae->ack; 13599 win_seq = ae->seq; 13600 the_win = tiwin; 13601 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13602 } 13603 #ifdef TCP_ACCOUNTING 13604 /* Account for the acks */ 13605 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13606 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13607 } 13608 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13609 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13610 #endif 13611 high_seq = ae->ack; 13612 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13613 union tcp_log_stackspecific log; 13614 struct timeval tv; 13615 13616 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13617 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13618 log.u_bbr.flex1 = high_seq; 13619 log.u_bbr.flex2 = rack->r_ctl.roundends; 13620 log.u_bbr.flex3 = rack->r_ctl.current_round; 13621 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13622 log.u_bbr.flex8 = 8; 13623 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13624 0, &log, false, NULL, NULL, 0, &tv); 13625 } 13626 /* 13627 * The draft (v3) calls for us to use SEQ_GEQ, but that 13628 * causes issues when we are just going app limited. Lets 13629 * instead use SEQ_GT <or> where its equal but more data 13630 * is outstanding. 13631 */ 13632 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13633 ((high_seq == rack->r_ctl.roundends) && 13634 SEQ_GT(tp->snd_max, tp->snd_una))) { 13635 rack->r_ctl.current_round++; 13636 rack->r_ctl.roundends = tp->snd_max; 13637 if (CC_ALGO(tp)->newround != NULL) { 13638 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13639 } 13640 } 13641 /* Setup our act_rcv_time */ 13642 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13643 ts.tv_sec = ae->timestamp / 1000000000; 13644 ts.tv_nsec = ae->timestamp % 1000000000; 13645 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13646 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13647 } else { 13648 rack->r_ctl.act_rcv_time = *tv; 13649 } 13650 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13651 if (rack->rc_dsack_round_seen) { 13652 /* Is the dsack round over? */ 13653 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13654 /* Yes it is */ 13655 rack->rc_dsack_round_seen = 0; 13656 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13657 } 13658 } 13659 } 13660 } 13661 /* And lets be sure to commit the rtt measurements for this ack */ 13662 tcp_rack_xmit_timer_commit(rack, tp); 13663 #ifdef TCP_ACCOUNTING 13664 rdstc = get_cyclecount(); 13665 if (rdstc > ts_val) { 13666 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13667 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13668 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13669 if (ae->ack_val_set == ACK_CUMACK) 13670 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13671 } 13672 } 13673 #endif 13674 } 13675 #ifdef TCP_ACCOUNTING 13676 ts_val = get_cyclecount(); 13677 #endif 13678 /* Tend to any collapsed window */ 13679 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 13680 /* The peer collapsed the window */ 13681 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__); 13682 } else if (rack->rc_has_collapsed) 13683 rack_un_collapse_window(rack, __LINE__); 13684 if ((rack->r_collapse_point_valid) && 13685 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 13686 rack->r_collapse_point_valid = 0; 13687 acked_amount = acked = (high_seq - tp->snd_una); 13688 if (acked) { 13689 /* 13690 * Clear the probe not answered flag 13691 * since cum-ack moved forward. 13692 */ 13693 rack->probe_not_answered = 0; 13694 if (rack->sack_attack_disable == 0) 13695 rack_do_decay(rack); 13696 if (acked >= segsiz) { 13697 /* 13698 * You only get credit for 13699 * MSS and greater (and you get extra 13700 * credit for larger cum-ack moves). 13701 */ 13702 int ac; 13703 13704 ac = acked / segsiz; 13705 rack->r_ctl.ack_count += ac; 13706 counter_u64_add(rack_ack_total, ac); 13707 } 13708 if (rack->r_ctl.ack_count > 0xfff00000) { 13709 /* 13710 * reduce the number to keep us under 13711 * a uint32_t. 13712 */ 13713 rack->r_ctl.ack_count /= 2; 13714 rack->r_ctl.sack_count /= 2; 13715 } 13716 if (tp->t_flags & TF_NEEDSYN) { 13717 /* 13718 * T/TCP: Connection was half-synchronized, and our SYN has 13719 * been ACK'd (so connection is now fully synchronized). Go 13720 * to non-starred state, increment snd_una for ACK of SYN, 13721 * and check if we can do window scaling. 13722 */ 13723 tp->t_flags &= ~TF_NEEDSYN; 13724 tp->snd_una++; 13725 acked_amount = acked = (high_seq - tp->snd_una); 13726 } 13727 if (acked > sbavail(&so->so_snd)) 13728 acked_amount = sbavail(&so->so_snd); 13729 #ifdef NETFLIX_EXP_DETECTION 13730 /* 13731 * We only care on a cum-ack move if we are in a sack-disabled 13732 * state. We have already added in to the ack_count, and we never 13733 * would disable on a cum-ack move, so we only care to do the 13734 * detection if it may "undo" it, i.e. we were in disabled already. 13735 */ 13736 if (rack->sack_attack_disable) 13737 rack_do_detection(tp, rack, acked_amount, segsiz); 13738 #endif 13739 if (IN_FASTRECOVERY(tp->t_flags) && 13740 (rack->rack_no_prr == 0)) 13741 rack_update_prr(tp, rack, acked_amount, high_seq); 13742 if (IN_RECOVERY(tp->t_flags)) { 13743 if (SEQ_LT(high_seq, tp->snd_recover) && 13744 (SEQ_LT(high_seq, tp->snd_max))) { 13745 tcp_rack_partialack(tp); 13746 } else { 13747 rack_post_recovery(tp, high_seq); 13748 recovery = 1; 13749 } 13750 } 13751 /* Handle the rack-log-ack part (sendmap) */ 13752 if ((sbused(&so->so_snd) == 0) && 13753 (acked > acked_amount) && 13754 (tp->t_state >= TCPS_FIN_WAIT_1) && 13755 (tp->t_flags & TF_SENTFIN)) { 13756 /* 13757 * We must be sure our fin 13758 * was sent and acked (we can be 13759 * in FIN_WAIT_1 without having 13760 * sent the fin). 13761 */ 13762 ourfinisacked = 1; 13763 /* 13764 * Lets make sure snd_una is updated 13765 * since most likely acked_amount = 0 (it 13766 * should be). 13767 */ 13768 tp->snd_una = high_seq; 13769 } 13770 /* Did we make a RTO error? */ 13771 if ((tp->t_flags & TF_PREVVALID) && 13772 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13773 tp->t_flags &= ~TF_PREVVALID; 13774 if (tp->t_rxtshift == 1 && 13775 (int)(ticks - tp->t_badrxtwin) < 0) 13776 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 13777 } 13778 /* Handle the data in the socket buffer */ 13779 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13780 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13781 if (acked_amount > 0) { 13782 struct mbuf *mfree; 13783 13784 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13785 SOCKBUF_LOCK(&so->so_snd); 13786 mfree = sbcut_locked(&so->so_snd, acked_amount); 13787 tp->snd_una = high_seq; 13788 /* Note we want to hold the sb lock through the sendmap adjust */ 13789 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13790 /* Wake up the socket if we have room to write more */ 13791 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13792 sowwakeup_locked(so); 13793 m_freem(mfree); 13794 } 13795 /* update progress */ 13796 tp->t_acktime = ticks; 13797 rack_log_progress_event(rack, tp, tp->t_acktime, 13798 PROGRESS_UPDATE, __LINE__); 13799 /* Clear out shifts and such */ 13800 tp->t_rxtshift = 0; 13801 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13802 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13803 rack->rc_tlp_in_progress = 0; 13804 rack->r_ctl.rc_tlp_cnt_out = 0; 13805 /* Send recover and snd_nxt must be dragged along */ 13806 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13807 tp->snd_recover = tp->snd_una; 13808 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13809 tp->snd_nxt = tp->snd_una; 13810 /* 13811 * If the RXT timer is running we want to 13812 * stop it, so we can restart a TLP (or new RXT). 13813 */ 13814 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13815 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13816 #ifdef NETFLIX_HTTP_LOGGING 13817 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13818 #endif 13819 tp->snd_wl2 = high_seq; 13820 tp->t_dupacks = 0; 13821 if (under_pacing && 13822 (rack->use_fixed_rate == 0) && 13823 (rack->in_probe_rtt == 0) && 13824 rack->rc_gp_dyn_mul && 13825 rack->rc_always_pace) { 13826 /* Check if we are dragging bottom */ 13827 rack_check_bottom_drag(tp, rack, so, acked); 13828 } 13829 if (tp->snd_una == tp->snd_max) { 13830 tp->t_flags &= ~TF_PREVVALID; 13831 rack->r_ctl.retran_during_recovery = 0; 13832 rack->r_ctl.dsack_byte_cnt = 0; 13833 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13834 if (rack->r_ctl.rc_went_idle_time == 0) 13835 rack->r_ctl.rc_went_idle_time = 1; 13836 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13837 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13838 tp->t_acktime = 0; 13839 /* Set so we might enter persists... */ 13840 rack->r_wanted_output = 1; 13841 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13842 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13843 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13844 (sbavail(&so->so_snd) == 0) && 13845 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13846 /* 13847 * The socket was gone and the 13848 * peer sent data (not now in the past), time to 13849 * reset him. 13850 */ 13851 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13852 /* tcp_close will kill the inp pre-log the Reset */ 13853 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13854 #ifdef TCP_ACCOUNTING 13855 rdstc = get_cyclecount(); 13856 if (rdstc > ts_val) { 13857 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13858 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13859 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13860 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13861 } 13862 } 13863 #endif 13864 m_freem(m); 13865 tp = tcp_close(tp); 13866 if (tp == NULL) { 13867 #ifdef TCP_ACCOUNTING 13868 sched_unpin(); 13869 #endif 13870 return (1); 13871 } 13872 /* 13873 * We would normally do drop-with-reset which would 13874 * send back a reset. We can't since we don't have 13875 * all the needed bits. Instead lets arrange for 13876 * a call to tcp_output(). That way since we 13877 * are in the closed state we will generate a reset. 13878 * 13879 * Note if tcp_accounting is on we don't unpin since 13880 * we do that after the goto label. 13881 */ 13882 goto send_out_a_rst; 13883 } 13884 if ((sbused(&so->so_snd) == 0) && 13885 (tp->t_state >= TCPS_FIN_WAIT_1) && 13886 (tp->t_flags & TF_SENTFIN)) { 13887 /* 13888 * If we can't receive any more data, then closing user can 13889 * proceed. Starting the timer is contrary to the 13890 * specification, but if we don't get a FIN we'll hang 13891 * forever. 13892 * 13893 */ 13894 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13895 soisdisconnected(so); 13896 tcp_timer_activate(tp, TT_2MSL, 13897 (tcp_fast_finwait2_recycle ? 13898 tcp_finwait2_timeout : 13899 TP_MAXIDLE(tp))); 13900 } 13901 if (ourfinisacked == 0) { 13902 /* 13903 * We don't change to fin-wait-2 if we have our fin acked 13904 * which means we are probably in TCPS_CLOSING. 13905 */ 13906 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13907 } 13908 } 13909 } 13910 /* Wake up the socket if we have room to write more */ 13911 if (sbavail(&so->so_snd)) { 13912 rack->r_wanted_output = 1; 13913 if (ctf_progress_timeout_check(tp, true)) { 13914 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13915 tp, tick, PROGRESS_DROP, __LINE__); 13916 /* 13917 * We cheat here and don't send a RST, we should send one 13918 * when the pacer drops the connection. 13919 */ 13920 #ifdef TCP_ACCOUNTING 13921 rdstc = get_cyclecount(); 13922 if (rdstc > ts_val) { 13923 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13924 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13925 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13926 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13927 } 13928 } 13929 sched_unpin(); 13930 #endif 13931 (void)tcp_drop(tp, ETIMEDOUT); 13932 m_freem(m); 13933 return (1); 13934 } 13935 } 13936 if (ourfinisacked) { 13937 switch(tp->t_state) { 13938 case TCPS_CLOSING: 13939 #ifdef TCP_ACCOUNTING 13940 rdstc = get_cyclecount(); 13941 if (rdstc > ts_val) { 13942 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13943 (rdstc - ts_val)); 13944 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13945 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13946 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13947 } 13948 } 13949 sched_unpin(); 13950 #endif 13951 tcp_twstart(tp); 13952 m_freem(m); 13953 return (1); 13954 break; 13955 case TCPS_LAST_ACK: 13956 #ifdef TCP_ACCOUNTING 13957 rdstc = get_cyclecount(); 13958 if (rdstc > ts_val) { 13959 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13960 (rdstc - ts_val)); 13961 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13962 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13963 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13964 } 13965 } 13966 sched_unpin(); 13967 #endif 13968 tp = tcp_close(tp); 13969 ctf_do_drop(m, tp); 13970 return (1); 13971 break; 13972 case TCPS_FIN_WAIT_1: 13973 #ifdef TCP_ACCOUNTING 13974 rdstc = get_cyclecount(); 13975 if (rdstc > ts_val) { 13976 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13977 (rdstc - ts_val)); 13978 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13979 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13980 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13981 } 13982 } 13983 #endif 13984 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13985 soisdisconnected(so); 13986 tcp_timer_activate(tp, TT_2MSL, 13987 (tcp_fast_finwait2_recycle ? 13988 tcp_finwait2_timeout : 13989 TP_MAXIDLE(tp))); 13990 } 13991 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13992 break; 13993 default: 13994 break; 13995 } 13996 } 13997 if (rack->r_fast_output) { 13998 /* 13999 * We re doing fast output.. can we expand that? 14000 */ 14001 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 14002 } 14003 #ifdef TCP_ACCOUNTING 14004 rdstc = get_cyclecount(); 14005 if (rdstc > ts_val) { 14006 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 14007 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14008 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14009 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14010 } 14011 } 14012 14013 } else if (win_up_req) { 14014 rdstc = get_cyclecount(); 14015 if (rdstc > ts_val) { 14016 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 14017 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14018 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 14019 } 14020 } 14021 #endif 14022 } 14023 /* Now is there a next packet, if so we are done */ 14024 m_freem(m); 14025 did_out = 0; 14026 if (nxt_pkt) { 14027 #ifdef TCP_ACCOUNTING 14028 sched_unpin(); 14029 #endif 14030 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 14031 return (0); 14032 } 14033 rack_handle_might_revert(tp, rack); 14034 ctf_calc_rwin(so, tp); 14035 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14036 send_out_a_rst: 14037 if (tcp_output(tp) < 0) { 14038 #ifdef TCP_ACCOUNTING 14039 sched_unpin(); 14040 #endif 14041 return (1); 14042 } 14043 did_out = 1; 14044 } 14045 rack_free_trim(rack); 14046 #ifdef TCP_ACCOUNTING 14047 sched_unpin(); 14048 #endif 14049 rack_timer_audit(tp, rack, &so->so_snd); 14050 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 14051 return (0); 14052 } 14053 14054 14055 static int 14056 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 14057 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 14058 int32_t nxt_pkt, struct timeval *tv) 14059 { 14060 struct inpcb *inp = tptoinpcb(tp); 14061 #ifdef TCP_ACCOUNTING 14062 uint64_t ts_val; 14063 #endif 14064 int32_t thflags, retval, did_out = 0; 14065 int32_t way_out = 0; 14066 /* 14067 * cts - is the current time from tv (caller gets ts) in microseconds. 14068 * ms_cts - is the current time from tv in milliseconds. 14069 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 14070 */ 14071 uint32_t cts, us_cts, ms_cts; 14072 uint32_t tiwin, high_seq; 14073 struct timespec ts; 14074 struct tcpopt to; 14075 struct tcp_rack *rack; 14076 struct rack_sendmap *rsm; 14077 int32_t prev_state = 0; 14078 #ifdef TCP_ACCOUNTING 14079 int ack_val_set = 0xf; 14080 #endif 14081 int nsegs; 14082 14083 NET_EPOCH_ASSERT(); 14084 INP_WLOCK_ASSERT(inp); 14085 14086 /* 14087 * tv passed from common code is from either M_TSTMP_LRO or 14088 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 14089 */ 14090 rack = (struct tcp_rack *)tp->t_fb_ptr; 14091 if (m->m_flags & M_ACKCMP) { 14092 /* 14093 * All compressed ack's are ack's by definition so 14094 * remove any ack required flag and then do the processing. 14095 */ 14096 rack->rc_ack_required = 0; 14097 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 14098 } 14099 if (m->m_flags & M_ACKCMP) { 14100 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 14101 } 14102 cts = tcp_tv_to_usectick(tv); 14103 ms_cts = tcp_tv_to_mssectick(tv); 14104 nsegs = m->m_pkthdr.lro_nsegs; 14105 counter_u64_add(rack_proc_non_comp_ack, 1); 14106 thflags = tcp_get_flags(th); 14107 #ifdef TCP_ACCOUNTING 14108 sched_pin(); 14109 if (thflags & TH_ACK) 14110 ts_val = get_cyclecount(); 14111 #endif 14112 if ((m->m_flags & M_TSTMP) || 14113 (m->m_flags & M_TSTMP_LRO)) { 14114 mbuf_tstmp2timespec(m, &ts); 14115 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14116 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14117 } else 14118 rack->r_ctl.act_rcv_time = *tv; 14119 kern_prefetch(rack, &prev_state); 14120 prev_state = 0; 14121 /* 14122 * Unscale the window into a 32-bit value. For the SYN_SENT state 14123 * the scale is zero. 14124 */ 14125 tiwin = th->th_win << tp->snd_scale; 14126 #ifdef TCP_ACCOUNTING 14127 if (thflags & TH_ACK) { 14128 /* 14129 * We have a tradeoff here. We can either do what we are 14130 * doing i.e. pinning to this CPU and then doing the accounting 14131 * <or> we could do a critical enter, setup the rdtsc and cpu 14132 * as in below, and then validate we are on the same CPU on 14133 * exit. I have choosen to not do the critical enter since 14134 * that often will gain you a context switch, and instead lock 14135 * us (line above this if) to the same CPU with sched_pin(). This 14136 * means we may be context switched out for a higher priority 14137 * interupt but we won't be moved to another CPU. 14138 * 14139 * If this occurs (which it won't very often since we most likely 14140 * are running this code in interupt context and only a higher 14141 * priority will bump us ... clock?) we will falsely add in 14142 * to the time the interupt processing time plus the ack processing 14143 * time. This is ok since its a rare event. 14144 */ 14145 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14146 ctf_fixed_maxseg(tp)); 14147 } 14148 #endif 14149 /* 14150 * Parse options on any incoming segment. 14151 */ 14152 memset(&to, 0, sizeof(to)); 14153 tcp_dooptions(&to, (u_char *)(th + 1), 14154 (th->th_off << 2) - sizeof(struct tcphdr), 14155 (thflags & TH_SYN) ? TO_SYN : 0); 14156 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14157 __func__)); 14158 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14159 __func__)); 14160 14161 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14162 (tp->t_flags & TF_GPUTINPROG)) { 14163 /* 14164 * We have a goodput in progress 14165 * and we have entered a late state. 14166 * Do we have enough data in the sb 14167 * to handle the GPUT request? 14168 */ 14169 uint32_t bytes; 14170 14171 bytes = tp->gput_ack - tp->gput_seq; 14172 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14173 bytes += tp->gput_seq - tp->snd_una; 14174 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 14175 /* 14176 * There are not enough bytes in the socket 14177 * buffer that have been sent to cover this 14178 * measurement. Cancel it. 14179 */ 14180 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14181 rack->r_ctl.rc_gp_srtt /*flex1*/, 14182 tp->gput_seq, 14183 0, 0, 18, __LINE__, NULL, 0); 14184 tp->t_flags &= ~TF_GPUTINPROG; 14185 } 14186 } 14187 high_seq = th->th_ack; 14188 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14189 union tcp_log_stackspecific log; 14190 struct timeval ltv; 14191 #ifdef NETFLIX_HTTP_LOGGING 14192 struct http_sendfile_track *http_req; 14193 14194 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14195 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14196 } else { 14197 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14198 } 14199 #endif 14200 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14201 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14202 if (rack->rack_no_prr == 0) 14203 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14204 else 14205 log.u_bbr.flex1 = 0; 14206 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14207 log.u_bbr.use_lt_bw <<= 1; 14208 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14209 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14210 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14211 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14212 log.u_bbr.flex3 = m->m_flags; 14213 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14214 log.u_bbr.lost = thflags; 14215 log.u_bbr.pacing_gain = 0x1; 14216 #ifdef TCP_ACCOUNTING 14217 log.u_bbr.cwnd_gain = ack_val_set; 14218 #endif 14219 log.u_bbr.flex7 = 2; 14220 if (m->m_flags & M_TSTMP) { 14221 /* Record the hardware timestamp if present */ 14222 mbuf_tstmp2timespec(m, &ts); 14223 ltv.tv_sec = ts.tv_sec; 14224 ltv.tv_usec = ts.tv_nsec / 1000; 14225 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14226 } else if (m->m_flags & M_TSTMP_LRO) { 14227 /* Record the LRO the arrival timestamp */ 14228 mbuf_tstmp2timespec(m, &ts); 14229 ltv.tv_sec = ts.tv_sec; 14230 ltv.tv_usec = ts.tv_nsec / 1000; 14231 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14232 } 14233 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14234 /* Log the rcv time */ 14235 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14236 #ifdef NETFLIX_HTTP_LOGGING 14237 log.u_bbr.applimited = tp->t_http_closed; 14238 log.u_bbr.applimited <<= 8; 14239 log.u_bbr.applimited |= tp->t_http_open; 14240 log.u_bbr.applimited <<= 8; 14241 log.u_bbr.applimited |= tp->t_http_req; 14242 if (http_req) { 14243 /* Copy out any client req info */ 14244 /* seconds */ 14245 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14246 /* useconds */ 14247 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14248 log.u_bbr.rttProp = http_req->timestamp; 14249 log.u_bbr.cur_del_rate = http_req->start; 14250 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14251 log.u_bbr.flex8 |= 1; 14252 } else { 14253 log.u_bbr.flex8 |= 2; 14254 log.u_bbr.bw_inuse = http_req->end; 14255 } 14256 log.u_bbr.flex6 = http_req->start_seq; 14257 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14258 log.u_bbr.flex8 |= 4; 14259 log.u_bbr.epoch = http_req->end_seq; 14260 } 14261 } 14262 #endif 14263 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14264 tlen, &log, true, <v); 14265 } 14266 /* Remove ack required flag if set, we have one */ 14267 if (thflags & TH_ACK) 14268 rack->rc_ack_required = 0; 14269 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14270 way_out = 4; 14271 retval = 0; 14272 m_freem(m); 14273 goto done_with_input; 14274 } 14275 /* 14276 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14277 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14278 */ 14279 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14280 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14281 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14282 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14283 #ifdef TCP_ACCOUNTING 14284 sched_unpin(); 14285 #endif 14286 return (1); 14287 } 14288 /* 14289 * If timestamps were negotiated during SYN/ACK and a 14290 * segment without a timestamp is received, silently drop 14291 * the segment, unless it is a RST segment or missing timestamps are 14292 * tolerated. 14293 * See section 3.2 of RFC 7323. 14294 */ 14295 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14296 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14297 way_out = 5; 14298 retval = 0; 14299 m_freem(m); 14300 goto done_with_input; 14301 } 14302 14303 /* 14304 * Segment received on connection. Reset idle time and keep-alive 14305 * timer. XXX: This should be done after segment validation to 14306 * ignore broken/spoofed segs. 14307 */ 14308 if (tp->t_idle_reduce && 14309 (tp->snd_max == tp->snd_una) && 14310 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14311 counter_u64_add(rack_input_idle_reduces, 1); 14312 rack_cc_after_idle(rack, tp); 14313 } 14314 tp->t_rcvtime = ticks; 14315 #ifdef STATS 14316 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14317 #endif 14318 if (tiwin > rack->r_ctl.rc_high_rwnd) 14319 rack->r_ctl.rc_high_rwnd = tiwin; 14320 /* 14321 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14322 * this to occur after we've validated the segment. 14323 */ 14324 if (tcp_ecn_input_segment(tp, thflags, tlen, 14325 tcp_packets_this_ack(tp, th->th_ack), 14326 iptos)) 14327 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 14328 14329 /* 14330 * If echoed timestamp is later than the current time, fall back to 14331 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14332 * were used when this connection was established. 14333 */ 14334 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14335 to.to_tsecr -= tp->ts_offset; 14336 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14337 to.to_tsecr = 0; 14338 } 14339 14340 /* 14341 * If its the first time in we need to take care of options and 14342 * verify we can do SACK for rack! 14343 */ 14344 if (rack->r_state == 0) { 14345 /* Should be init'd by rack_init() */ 14346 KASSERT(rack->rc_inp != NULL, 14347 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14348 if (rack->rc_inp == NULL) { 14349 rack->rc_inp = inp; 14350 } 14351 14352 /* 14353 * Process options only when we get SYN/ACK back. The SYN 14354 * case for incoming connections is handled in tcp_syncache. 14355 * According to RFC1323 the window field in a SYN (i.e., a 14356 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14357 * this is traditional behavior, may need to be cleaned up. 14358 */ 14359 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14360 /* Handle parallel SYN for ECN */ 14361 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14362 if ((to.to_flags & TOF_SCALE) && 14363 (tp->t_flags & TF_REQ_SCALE)) { 14364 tp->t_flags |= TF_RCVD_SCALE; 14365 tp->snd_scale = to.to_wscale; 14366 } else 14367 tp->t_flags &= ~TF_REQ_SCALE; 14368 /* 14369 * Initial send window. It will be updated with the 14370 * next incoming segment to the scaled value. 14371 */ 14372 tp->snd_wnd = th->th_win; 14373 rack_validate_fo_sendwin_up(tp, rack); 14374 if ((to.to_flags & TOF_TS) && 14375 (tp->t_flags & TF_REQ_TSTMP)) { 14376 tp->t_flags |= TF_RCVD_TSTMP; 14377 tp->ts_recent = to.to_tsval; 14378 tp->ts_recent_age = cts; 14379 } else 14380 tp->t_flags &= ~TF_REQ_TSTMP; 14381 if (to.to_flags & TOF_MSS) { 14382 tcp_mss(tp, to.to_mss); 14383 } 14384 if ((tp->t_flags & TF_SACK_PERMIT) && 14385 (to.to_flags & TOF_SACKPERM) == 0) 14386 tp->t_flags &= ~TF_SACK_PERMIT; 14387 if (IS_FASTOPEN(tp->t_flags)) { 14388 if (to.to_flags & TOF_FASTOPEN) { 14389 uint16_t mss; 14390 14391 if (to.to_flags & TOF_MSS) 14392 mss = to.to_mss; 14393 else 14394 if ((inp->inp_vflag & INP_IPV6) != 0) 14395 mss = TCP6_MSS; 14396 else 14397 mss = TCP_MSS; 14398 tcp_fastopen_update_cache(tp, mss, 14399 to.to_tfo_len, to.to_tfo_cookie); 14400 } else 14401 tcp_fastopen_disable_path(tp); 14402 } 14403 } 14404 /* 14405 * At this point we are at the initial call. Here we decide 14406 * if we are doing RACK or not. We do this by seeing if 14407 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14408 * The code now does do dup-ack counting so if you don't 14409 * switch back you won't get rack & TLP, but you will still 14410 * get this stack. 14411 */ 14412 14413 if ((rack_sack_not_required == 0) && 14414 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14415 tcp_switch_back_to_default(tp); 14416 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14417 tlen, iptos); 14418 #ifdef TCP_ACCOUNTING 14419 sched_unpin(); 14420 #endif 14421 return (1); 14422 } 14423 tcp_set_hpts(inp); 14424 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14425 } 14426 if (thflags & TH_FIN) 14427 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14428 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14429 if ((rack->rc_gp_dyn_mul) && 14430 (rack->use_fixed_rate == 0) && 14431 (rack->rc_always_pace)) { 14432 /* Check in on probertt */ 14433 rack_check_probe_rtt(rack, us_cts); 14434 } 14435 rack_clear_rate_sample(rack); 14436 if ((rack->forced_ack) && 14437 ((tcp_get_flags(th) & TH_RST) == 0)) { 14438 rack_handle_probe_response(rack, tiwin, us_cts); 14439 } 14440 /* 14441 * This is the one exception case where we set the rack state 14442 * always. All other times (timers etc) we must have a rack-state 14443 * set (so we assure we have done the checks above for SACK). 14444 */ 14445 rack->r_ctl.rc_rcvtime = cts; 14446 if (rack->r_state != tp->t_state) 14447 rack_set_state(tp, rack); 14448 if (SEQ_GT(th->th_ack, tp->snd_una) && 14449 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14450 kern_prefetch(rsm, &prev_state); 14451 prev_state = rack->r_state; 14452 retval = (*rack->r_substate) (m, th, so, 14453 tp, &to, drop_hdrlen, 14454 tlen, tiwin, thflags, nxt_pkt, iptos); 14455 if (retval == 0) { 14456 /* 14457 * If retval is 1 the tcb is unlocked and most likely the tp 14458 * is gone. 14459 */ 14460 INP_WLOCK_ASSERT(inp); 14461 if ((rack->rc_gp_dyn_mul) && 14462 (rack->rc_always_pace) && 14463 (rack->use_fixed_rate == 0) && 14464 rack->in_probe_rtt && 14465 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14466 /* 14467 * If we are going for target, lets recheck before 14468 * we output. 14469 */ 14470 rack_check_probe_rtt(rack, us_cts); 14471 } 14472 if (rack->set_pacing_done_a_iw == 0) { 14473 /* How much has been acked? */ 14474 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14475 /* We have enough to set in the pacing segment size */ 14476 rack->set_pacing_done_a_iw = 1; 14477 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14478 } 14479 } 14480 tcp_rack_xmit_timer_commit(rack, tp); 14481 #ifdef TCP_ACCOUNTING 14482 /* 14483 * If we set the ack_val_se to what ack processing we are doing 14484 * we also want to track how many cycles we burned. Note 14485 * the bits after tcp_output we let be "free". This is because 14486 * we are also tracking the tcp_output times as well. Note the 14487 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14488 * 0xf cannot be returned and is what we initialize it too to 14489 * indicate we are not doing the tabulations. 14490 */ 14491 if (ack_val_set != 0xf) { 14492 uint64_t crtsc; 14493 14494 crtsc = get_cyclecount(); 14495 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14496 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14497 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14498 } 14499 } 14500 #endif 14501 if (nxt_pkt == 0) { 14502 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14503 do_output_now: 14504 if (tcp_output(tp) < 0) 14505 return (1); 14506 did_out = 1; 14507 } 14508 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14509 rack_free_trim(rack); 14510 } 14511 /* Update any rounds needed */ 14512 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14513 union tcp_log_stackspecific log; 14514 struct timeval tv; 14515 14516 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14517 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14518 log.u_bbr.flex1 = high_seq; 14519 log.u_bbr.flex2 = rack->r_ctl.roundends; 14520 log.u_bbr.flex3 = rack->r_ctl.current_round; 14521 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14522 log.u_bbr.flex8 = 9; 14523 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14524 0, &log, false, NULL, NULL, 0, &tv); 14525 } 14526 /* 14527 * The draft (v3) calls for us to use SEQ_GEQ, but that 14528 * causes issues when we are just going app limited. Lets 14529 * instead use SEQ_GT <or> where its equal but more data 14530 * is outstanding. 14531 */ 14532 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14533 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14534 rack->r_ctl.current_round++; 14535 rack->r_ctl.roundends = tp->snd_max; 14536 if (CC_ALGO(tp)->newround != NULL) { 14537 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14538 } 14539 } 14540 if ((nxt_pkt == 0) && 14541 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14542 (SEQ_GT(tp->snd_max, tp->snd_una) || 14543 (tp->t_flags & TF_DELACK) || 14544 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14545 (tp->t_state <= TCPS_CLOSING)))) { 14546 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14547 if ((tp->snd_max == tp->snd_una) && 14548 ((tp->t_flags & TF_DELACK) == 0) && 14549 (tcp_in_hpts(rack->rc_inp)) && 14550 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14551 /* keep alive not needed if we are hptsi output yet */ 14552 ; 14553 } else { 14554 int late = 0; 14555 if (tcp_in_hpts(inp)) { 14556 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14557 us_cts = tcp_get_usecs(NULL); 14558 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14559 rack->r_early = 1; 14560 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14561 } else 14562 late = 1; 14563 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14564 } 14565 tcp_hpts_remove(inp); 14566 } 14567 if (late && (did_out == 0)) { 14568 /* 14569 * We are late in the sending 14570 * and we did not call the output 14571 * (this probably should not happen). 14572 */ 14573 goto do_output_now; 14574 } 14575 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14576 } 14577 way_out = 1; 14578 } else if (nxt_pkt == 0) { 14579 /* Do we have the correct timer running? */ 14580 rack_timer_audit(tp, rack, &so->so_snd); 14581 way_out = 2; 14582 } 14583 done_with_input: 14584 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14585 if (did_out) 14586 rack->r_wanted_output = 0; 14587 #ifdef TCP_ACCOUNTING 14588 } else { 14589 /* 14590 * Track the time (see above). 14591 */ 14592 if (ack_val_set != 0xf) { 14593 uint64_t crtsc; 14594 14595 crtsc = get_cyclecount(); 14596 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14597 /* 14598 * Note we *DO NOT* increment the per-tcb counters since 14599 * in the else the TP may be gone!! 14600 */ 14601 } 14602 #endif 14603 } 14604 #ifdef TCP_ACCOUNTING 14605 sched_unpin(); 14606 #endif 14607 return (retval); 14608 } 14609 14610 void 14611 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14612 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14613 { 14614 struct timeval tv; 14615 14616 /* First lets see if we have old packets */ 14617 if (tp->t_in_pkt) { 14618 if (ctf_do_queued_segments(so, tp, 1)) { 14619 m_freem(m); 14620 return; 14621 } 14622 } 14623 if (m->m_flags & M_TSTMP_LRO) { 14624 mbuf_tstmp2timeval(m, &tv); 14625 } else { 14626 /* Should not be should we kassert instead? */ 14627 tcp_get_usecs(&tv); 14628 } 14629 if (rack_do_segment_nounlock(m, th, so, tp, 14630 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14631 INP_WUNLOCK(tptoinpcb(tp)); 14632 } 14633 } 14634 14635 struct rack_sendmap * 14636 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14637 { 14638 struct rack_sendmap *rsm = NULL; 14639 int32_t idx; 14640 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14641 14642 /* Return the next guy to be re-transmitted */ 14643 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14644 return (NULL); 14645 } 14646 if (tp->t_flags & TF_SENTFIN) { 14647 /* retran the end FIN? */ 14648 return (NULL); 14649 } 14650 /* ok lets look at this one */ 14651 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14652 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 14653 return (rsm); 14654 } 14655 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14656 goto check_it; 14657 } 14658 rsm = rack_find_lowest_rsm(rack); 14659 if (rsm == NULL) { 14660 return (NULL); 14661 } 14662 check_it: 14663 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14664 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14665 /* 14666 * No sack so we automatically do the 3 strikes and 14667 * retransmit (no rack timer would be started). 14668 */ 14669 14670 return (rsm); 14671 } 14672 if (rsm->r_flags & RACK_ACKED) { 14673 return (NULL); 14674 } 14675 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14676 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14677 /* Its not yet ready */ 14678 return (NULL); 14679 } 14680 srtt = rack_grab_rtt(tp, rack); 14681 idx = rsm->r_rtr_cnt - 1; 14682 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14683 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14684 if ((tsused == ts_low) || 14685 (TSTMP_LT(tsused, ts_low))) { 14686 /* No time since sending */ 14687 return (NULL); 14688 } 14689 if ((tsused - ts_low) < thresh) { 14690 /* It has not been long enough yet */ 14691 return (NULL); 14692 } 14693 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14694 ((rsm->r_flags & RACK_SACK_PASSED) && 14695 (rack->sack_attack_disable == 0))) { 14696 /* 14697 * We have passed the dup-ack threshold <or> 14698 * a SACK has indicated this is missing. 14699 * Note that if you are a declared attacker 14700 * it is only the dup-ack threshold that 14701 * will cause retransmits. 14702 */ 14703 /* log retransmit reason */ 14704 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14705 rack->r_fast_output = 0; 14706 return (rsm); 14707 } 14708 return (NULL); 14709 } 14710 14711 static void 14712 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14713 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14714 int line, struct rack_sendmap *rsm, uint8_t quality) 14715 { 14716 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14717 union tcp_log_stackspecific log; 14718 struct timeval tv; 14719 14720 memset(&log, 0, sizeof(log)); 14721 log.u_bbr.flex1 = slot; 14722 log.u_bbr.flex2 = len; 14723 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14724 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14725 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14726 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14727 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14728 log.u_bbr.use_lt_bw <<= 1; 14729 log.u_bbr.use_lt_bw |= rack->r_late; 14730 log.u_bbr.use_lt_bw <<= 1; 14731 log.u_bbr.use_lt_bw |= rack->r_early; 14732 log.u_bbr.use_lt_bw <<= 1; 14733 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14734 log.u_bbr.use_lt_bw <<= 1; 14735 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14736 log.u_bbr.use_lt_bw <<= 1; 14737 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14738 log.u_bbr.use_lt_bw <<= 1; 14739 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14740 log.u_bbr.use_lt_bw <<= 1; 14741 log.u_bbr.use_lt_bw |= rack->gp_ready; 14742 log.u_bbr.pkt_epoch = line; 14743 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14744 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14745 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14746 log.u_bbr.bw_inuse = bw_est; 14747 log.u_bbr.delRate = bw; 14748 if (rack->r_ctl.gp_bw == 0) 14749 log.u_bbr.cur_del_rate = 0; 14750 else 14751 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14752 log.u_bbr.rttProp = len_time; 14753 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14754 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14755 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14756 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14757 /* We are in slow start */ 14758 log.u_bbr.flex7 = 1; 14759 } else { 14760 /* we are on congestion avoidance */ 14761 log.u_bbr.flex7 = 0; 14762 } 14763 log.u_bbr.flex8 = method; 14764 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14765 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14766 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14767 log.u_bbr.cwnd_gain <<= 1; 14768 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14769 log.u_bbr.cwnd_gain <<= 1; 14770 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14771 log.u_bbr.bbr_substate = quality; 14772 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14773 &rack->rc_inp->inp_socket->so_rcv, 14774 &rack->rc_inp->inp_socket->so_snd, 14775 BBR_LOG_HPTSI_CALC, 0, 14776 0, &log, false, &tv); 14777 } 14778 } 14779 14780 static uint32_t 14781 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14782 { 14783 uint32_t new_tso, user_max; 14784 14785 user_max = rack->rc_user_set_max_segs * mss; 14786 if (rack->rc_force_max_seg) { 14787 return (user_max); 14788 } 14789 if (rack->use_fixed_rate && 14790 ((rack->r_ctl.crte == NULL) || 14791 (bw != rack->r_ctl.crte->rate))) { 14792 /* Use the user mss since we are not exactly matched */ 14793 return (user_max); 14794 } 14795 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14796 if (new_tso > user_max) 14797 new_tso = user_max; 14798 return (new_tso); 14799 } 14800 14801 static int32_t 14802 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14803 { 14804 uint64_t lentim, fill_bw; 14805 14806 /* Lets first see if we are full, if so continue with normal rate */ 14807 rack->r_via_fill_cw = 0; 14808 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14809 return (slot); 14810 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14811 return (slot); 14812 if (rack->r_ctl.rc_last_us_rtt == 0) 14813 return (slot); 14814 if (rack->rc_pace_fill_if_rttin_range && 14815 (rack->r_ctl.rc_last_us_rtt >= 14816 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14817 /* The rtt is huge, N * smallest, lets not fill */ 14818 return (slot); 14819 } 14820 /* 14821 * first lets calculate the b/w based on the last us-rtt 14822 * and the sndwnd. 14823 */ 14824 fill_bw = rack->r_ctl.cwnd_to_use; 14825 /* Take the rwnd if its smaller */ 14826 if (fill_bw > rack->rc_tp->snd_wnd) 14827 fill_bw = rack->rc_tp->snd_wnd; 14828 if (rack->r_fill_less_agg) { 14829 /* 14830 * Now take away the inflight (this will reduce our 14831 * aggressiveness and yeah, if we get that much out in 1RTT 14832 * we will have had acks come back and still be behind). 14833 */ 14834 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14835 } 14836 /* Now lets make it into a b/w */ 14837 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14838 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14839 /* We are below the min b/w */ 14840 if (non_paced) 14841 *rate_wanted = fill_bw; 14842 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14843 return (slot); 14844 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14845 fill_bw = rack->r_ctl.bw_rate_cap; 14846 rack->r_via_fill_cw = 1; 14847 if (rack->r_rack_hw_rate_caps && 14848 (rack->r_ctl.crte != NULL)) { 14849 uint64_t high_rate; 14850 14851 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14852 if (fill_bw > high_rate) { 14853 /* We are capping bw at the highest rate table entry */ 14854 if (*rate_wanted > high_rate) { 14855 /* The original rate was also capped */ 14856 rack->r_via_fill_cw = 0; 14857 } 14858 rack_log_hdwr_pacing(rack, 14859 fill_bw, high_rate, __LINE__, 14860 0, 3); 14861 fill_bw = high_rate; 14862 if (capped) 14863 *capped = 1; 14864 } 14865 } else if ((rack->r_ctl.crte == NULL) && 14866 (rack->rack_hdrw_pacing == 0) && 14867 (rack->rack_hdw_pace_ena) && 14868 rack->r_rack_hw_rate_caps && 14869 (rack->rack_attempt_hdwr_pace == 0) && 14870 (rack->rc_inp->inp_route.ro_nh != NULL) && 14871 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14872 /* 14873 * Ok we may have a first attempt that is greater than our top rate 14874 * lets check. 14875 */ 14876 uint64_t high_rate; 14877 14878 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14879 if (high_rate) { 14880 if (fill_bw > high_rate) { 14881 fill_bw = high_rate; 14882 if (capped) 14883 *capped = 1; 14884 } 14885 } 14886 } 14887 /* 14888 * Ok fill_bw holds our mythical b/w to fill the cwnd 14889 * in a rtt, what does that time wise equate too? 14890 */ 14891 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14892 lentim /= fill_bw; 14893 *rate_wanted = fill_bw; 14894 if (non_paced || (lentim < slot)) { 14895 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14896 0, lentim, 12, __LINE__, NULL, 0); 14897 return ((int32_t)lentim); 14898 } else 14899 return (slot); 14900 } 14901 14902 static int32_t 14903 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14904 { 14905 uint64_t srtt; 14906 int32_t slot = 0; 14907 int can_start_hw_pacing = 1; 14908 int err; 14909 14910 if (rack->rc_always_pace == 0) { 14911 /* 14912 * We use the most optimistic possible cwnd/srtt for 14913 * sending calculations. This will make our 14914 * calculation anticipate getting more through 14915 * quicker then possible. But thats ok we don't want 14916 * the peer to have a gap in data sending. 14917 */ 14918 uint64_t cwnd, tr_perms = 0; 14919 int32_t reduce = 0; 14920 14921 old_method: 14922 /* 14923 * We keep no precise pacing with the old method 14924 * instead we use the pacer to mitigate bursts. 14925 */ 14926 if (rack->r_ctl.rc_rack_min_rtt) 14927 srtt = rack->r_ctl.rc_rack_min_rtt; 14928 else 14929 srtt = max(tp->t_srtt, 1); 14930 if (rack->r_ctl.rc_rack_largest_cwnd) 14931 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14932 else 14933 cwnd = rack->r_ctl.cwnd_to_use; 14934 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14935 tr_perms = (cwnd * 1000) / srtt; 14936 if (tr_perms == 0) { 14937 tr_perms = ctf_fixed_maxseg(tp); 14938 } 14939 /* 14940 * Calculate how long this will take to drain, if 14941 * the calculation comes out to zero, thats ok we 14942 * will use send_a_lot to possibly spin around for 14943 * more increasing tot_len_this_send to the point 14944 * that its going to require a pace, or we hit the 14945 * cwnd. Which in that case we are just waiting for 14946 * a ACK. 14947 */ 14948 slot = len / tr_perms; 14949 /* Now do we reduce the time so we don't run dry? */ 14950 if (slot && rack_slot_reduction) { 14951 reduce = (slot / rack_slot_reduction); 14952 if (reduce < slot) { 14953 slot -= reduce; 14954 } else 14955 slot = 0; 14956 } 14957 slot *= HPTS_USEC_IN_MSEC; 14958 if (rack->rc_pace_to_cwnd) { 14959 uint64_t rate_wanted = 0; 14960 14961 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14962 rack->rc_ack_can_sendout_data = 1; 14963 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14964 } else 14965 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14966 } else { 14967 uint64_t bw_est, res, lentim, rate_wanted; 14968 uint32_t orig_val, segs, oh; 14969 int capped = 0; 14970 int prev_fill; 14971 14972 if ((rack->r_rr_config == 1) && rsm) { 14973 return (rack->r_ctl.rc_min_to); 14974 } 14975 if (rack->use_fixed_rate) { 14976 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14977 } else if ((rack->r_ctl.init_rate == 0) && 14978 #ifdef NETFLIX_PEAKRATE 14979 (rack->rc_tp->t_maxpeakrate == 0) && 14980 #endif 14981 (rack->r_ctl.gp_bw == 0)) { 14982 /* no way to yet do an estimate */ 14983 bw_est = rate_wanted = 0; 14984 } else { 14985 bw_est = rack_get_bw(rack); 14986 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14987 } 14988 if ((bw_est == 0) || (rate_wanted == 0) || 14989 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14990 /* 14991 * No way yet to make a b/w estimate or 14992 * our raise is set incorrectly. 14993 */ 14994 goto old_method; 14995 } 14996 /* We need to account for all the overheads */ 14997 segs = (len + segsiz - 1) / segsiz; 14998 /* 14999 * We need the diff between 1514 bytes (e-mtu with e-hdr) 15000 * and how much data we put in each packet. Yes this 15001 * means we may be off if we are larger than 1500 bytes 15002 * or smaller. But this just makes us more conservative. 15003 */ 15004 if (rack_hw_rate_min && 15005 (bw_est < rack_hw_rate_min)) 15006 can_start_hw_pacing = 0; 15007 if (ETHERNET_SEGMENT_SIZE > segsiz) 15008 oh = ETHERNET_SEGMENT_SIZE - segsiz; 15009 else 15010 oh = 0; 15011 segs *= oh; 15012 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 15013 res = lentim / rate_wanted; 15014 slot = (uint32_t)res; 15015 orig_val = rack->r_ctl.rc_pace_max_segs; 15016 if (rack->r_ctl.crte == NULL) { 15017 /* 15018 * Only do this if we are not hardware pacing 15019 * since if we are doing hw-pacing below we will 15020 * set make a call after setting up or changing 15021 * the rate. 15022 */ 15023 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 15024 } else if (rack->rc_inp->inp_snd_tag == NULL) { 15025 /* 15026 * We lost our rate somehow, this can happen 15027 * if the interface changed underneath us. 15028 */ 15029 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15030 rack->r_ctl.crte = NULL; 15031 /* Lets re-allow attempting to setup pacing */ 15032 rack->rack_hdrw_pacing = 0; 15033 rack->rack_attempt_hdwr_pace = 0; 15034 rack_log_hdwr_pacing(rack, 15035 rate_wanted, bw_est, __LINE__, 15036 0, 6); 15037 } 15038 /* Did we change the TSO size, if so log it */ 15039 if (rack->r_ctl.rc_pace_max_segs != orig_val) 15040 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 15041 prev_fill = rack->r_via_fill_cw; 15042 if ((rack->rc_pace_to_cwnd) && 15043 (capped == 0) && 15044 (rack->use_fixed_rate == 0) && 15045 (rack->in_probe_rtt == 0) && 15046 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 15047 /* 15048 * We want to pace at our rate *or* faster to 15049 * fill the cwnd to the max if its not full. 15050 */ 15051 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 15052 } 15053 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 15054 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 15055 if ((rack->rack_hdw_pace_ena) && 15056 (can_start_hw_pacing > 0) && 15057 (rack->rack_hdrw_pacing == 0) && 15058 (rack->rack_attempt_hdwr_pace == 0)) { 15059 /* 15060 * Lets attempt to turn on hardware pacing 15061 * if we can. 15062 */ 15063 rack->rack_attempt_hdwr_pace = 1; 15064 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 15065 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15066 rate_wanted, 15067 RS_PACING_GEQ, 15068 &err, &rack->r_ctl.crte_prev_rate); 15069 if (rack->r_ctl.crte) { 15070 rack->rack_hdrw_pacing = 1; 15071 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 15072 0, rack->r_ctl.crte, 15073 NULL); 15074 rack_log_hdwr_pacing(rack, 15075 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15076 err, 0); 15077 rack->r_ctl.last_hw_bw_req = rate_wanted; 15078 } else { 15079 counter_u64_add(rack_hw_pace_init_fail, 1); 15080 } 15081 } else if (rack->rack_hdrw_pacing && 15082 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 15083 /* Do we need to adjust our rate? */ 15084 const struct tcp_hwrate_limit_table *nrte; 15085 15086 if (rack->r_up_only && 15087 (rate_wanted < rack->r_ctl.crte->rate)) { 15088 /** 15089 * We have four possible states here 15090 * having to do with the previous time 15091 * and this time. 15092 * previous | this-time 15093 * A) 0 | 0 -- fill_cw not in the picture 15094 * B) 1 | 0 -- we were doing a fill-cw but now are not 15095 * C) 1 | 1 -- all rates from fill_cw 15096 * D) 0 | 1 -- we were doing non-fill and now we are filling 15097 * 15098 * For case A, C and D we don't allow a drop. But for 15099 * case B where we now our on our steady rate we do 15100 * allow a drop. 15101 * 15102 */ 15103 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 15104 goto done_w_hdwr; 15105 } 15106 if ((rate_wanted > rack->r_ctl.crte->rate) || 15107 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15108 if (rack_hw_rate_to_low && 15109 (bw_est < rack_hw_rate_to_low)) { 15110 /* 15111 * The pacing rate is too low for hardware, but 15112 * do allow hardware pacing to be restarted. 15113 */ 15114 rack_log_hdwr_pacing(rack, 15115 bw_est, rack->r_ctl.crte->rate, __LINE__, 15116 0, 5); 15117 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15118 rack->r_ctl.crte = NULL; 15119 rack->rack_attempt_hdwr_pace = 0; 15120 rack->rack_hdrw_pacing = 0; 15121 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15122 goto done_w_hdwr; 15123 } 15124 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15125 rack->rc_tp, 15126 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15127 rate_wanted, 15128 RS_PACING_GEQ, 15129 &err, &rack->r_ctl.crte_prev_rate); 15130 if (nrte == NULL) { 15131 /* Lost the rate */ 15132 rack->rack_hdrw_pacing = 0; 15133 rack->r_ctl.crte = NULL; 15134 rack_log_hdwr_pacing(rack, 15135 rate_wanted, 0, __LINE__, 15136 err, 1); 15137 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15138 counter_u64_add(rack_hw_pace_lost, 1); 15139 } else if (nrte != rack->r_ctl.crte) { 15140 rack->r_ctl.crte = nrte; 15141 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15142 segsiz, 0, 15143 rack->r_ctl.crte, 15144 NULL); 15145 rack_log_hdwr_pacing(rack, 15146 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15147 err, 2); 15148 rack->r_ctl.last_hw_bw_req = rate_wanted; 15149 } 15150 } else { 15151 /* We just need to adjust the segment size */ 15152 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15153 rack_log_hdwr_pacing(rack, 15154 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15155 0, 4); 15156 rack->r_ctl.last_hw_bw_req = rate_wanted; 15157 } 15158 } 15159 } 15160 if ((rack->r_ctl.crte != NULL) && 15161 (rack->r_ctl.crte->rate == rate_wanted)) { 15162 /* 15163 * We need to add a extra if the rates 15164 * are exactly matched. The idea is 15165 * we want the software to make sure the 15166 * queue is empty before adding more, this 15167 * gives us N MSS extra pace times where 15168 * N is our sysctl 15169 */ 15170 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15171 } 15172 done_w_hdwr: 15173 if (rack_limit_time_with_srtt && 15174 (rack->use_fixed_rate == 0) && 15175 #ifdef NETFLIX_PEAKRATE 15176 (rack->rc_tp->t_maxpeakrate == 0) && 15177 #endif 15178 (rack->rack_hdrw_pacing == 0)) { 15179 /* 15180 * Sanity check, we do not allow the pacing delay 15181 * to be longer than the SRTT of the path. If it is 15182 * a slow path, then adding a packet should increase 15183 * the RTT and compensate for this i.e. the srtt will 15184 * be greater so the allowed pacing time will be greater. 15185 * 15186 * Note this restriction is not for where a peak rate 15187 * is set, we are doing fixed pacing or hardware pacing. 15188 */ 15189 if (rack->rc_tp->t_srtt) 15190 srtt = rack->rc_tp->t_srtt; 15191 else 15192 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15193 if (srtt < (uint64_t)slot) { 15194 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15195 slot = srtt; 15196 } 15197 } 15198 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15199 } 15200 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15201 /* 15202 * If this rate is seeing enobufs when it 15203 * goes to send then either the nic is out 15204 * of gas or we are mis-estimating the time 15205 * somehow and not letting the queue empty 15206 * completely. Lets add to the pacing time. 15207 */ 15208 int hw_boost_delay; 15209 15210 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15211 if (hw_boost_delay > rack_enobuf_hw_max) 15212 hw_boost_delay = rack_enobuf_hw_max; 15213 else if (hw_boost_delay < rack_enobuf_hw_min) 15214 hw_boost_delay = rack_enobuf_hw_min; 15215 slot += hw_boost_delay; 15216 } 15217 return (slot); 15218 } 15219 15220 static void 15221 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15222 tcp_seq startseq, uint32_t sb_offset) 15223 { 15224 struct rack_sendmap *my_rsm = NULL; 15225 struct rack_sendmap fe; 15226 15227 if (tp->t_state < TCPS_ESTABLISHED) { 15228 /* 15229 * We don't start any measurements if we are 15230 * not at least established. 15231 */ 15232 return; 15233 } 15234 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15235 /* 15236 * We will get no more data into the SB 15237 * this means we need to have the data available 15238 * before we start a measurement. 15239 */ 15240 15241 if (sbavail(&tptosocket(tp)->so_snd) < 15242 max(rc_init_window(rack), 15243 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15244 /* Nope not enough data */ 15245 return; 15246 } 15247 } 15248 tp->t_flags |= TF_GPUTINPROG; 15249 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15250 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15251 tp->gput_seq = startseq; 15252 rack->app_limited_needs_set = 0; 15253 if (rack->in_probe_rtt) 15254 rack->measure_saw_probe_rtt = 1; 15255 else if ((rack->measure_saw_probe_rtt) && 15256 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15257 rack->measure_saw_probe_rtt = 0; 15258 if (rack->rc_gp_filled) 15259 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15260 else { 15261 /* Special case initial measurement */ 15262 struct timeval tv; 15263 15264 tp->gput_ts = tcp_get_usecs(&tv); 15265 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15266 } 15267 /* 15268 * We take a guess out into the future, 15269 * if we have no measurement and no 15270 * initial rate, we measure the first 15271 * initial-windows worth of data to 15272 * speed up getting some GP measurement and 15273 * thus start pacing. 15274 */ 15275 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15276 rack->app_limited_needs_set = 1; 15277 tp->gput_ack = startseq + max(rc_init_window(rack), 15278 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15279 rack_log_pacing_delay_calc(rack, 15280 tp->gput_seq, 15281 tp->gput_ack, 15282 0, 15283 tp->gput_ts, 15284 rack->r_ctl.rc_app_limited_cnt, 15285 9, 15286 __LINE__, NULL, 0); 15287 return; 15288 } 15289 if (sb_offset) { 15290 /* 15291 * We are out somewhere in the sb 15292 * can we use the already outstanding data? 15293 */ 15294 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15295 /* 15296 * Yes first one is good and in this case 15297 * the tp->gput_ts is correctly set based on 15298 * the last ack that arrived (no need to 15299 * set things up when an ack comes in). 15300 */ 15301 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15302 if ((my_rsm == NULL) || 15303 (my_rsm->r_rtr_cnt != 1)) { 15304 /* retransmission? */ 15305 goto use_latest; 15306 } 15307 } else { 15308 if (rack->r_ctl.rc_first_appl == NULL) { 15309 /* 15310 * If rc_first_appl is NULL 15311 * then the cnt should be 0. 15312 * This is probably an error, maybe 15313 * a KASSERT would be approprate. 15314 */ 15315 goto use_latest; 15316 } 15317 /* 15318 * If we have a marker pointer to the last one that is 15319 * app limited we can use that, but we need to set 15320 * things up so that when it gets ack'ed we record 15321 * the ack time (if its not already acked). 15322 */ 15323 rack->app_limited_needs_set = 1; 15324 /* 15325 * We want to get to the rsm that is either 15326 * next with space i.e. over 1 MSS or the one 15327 * after that (after the app-limited). 15328 */ 15329 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15330 rack->r_ctl.rc_first_appl); 15331 if (my_rsm) { 15332 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15333 /* Have to use the next one */ 15334 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15335 my_rsm); 15336 else { 15337 /* Use after the first MSS of it is acked */ 15338 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15339 goto start_set; 15340 } 15341 } 15342 if ((my_rsm == NULL) || 15343 (my_rsm->r_rtr_cnt != 1)) { 15344 /* 15345 * Either its a retransmit or 15346 * the last is the app-limited one. 15347 */ 15348 goto use_latest; 15349 } 15350 } 15351 tp->gput_seq = my_rsm->r_start; 15352 start_set: 15353 if (my_rsm->r_flags & RACK_ACKED) { 15354 /* 15355 * This one has been acked use the arrival ack time 15356 */ 15357 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15358 rack->app_limited_needs_set = 0; 15359 } 15360 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15361 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15362 rack_log_pacing_delay_calc(rack, 15363 tp->gput_seq, 15364 tp->gput_ack, 15365 (uint64_t)my_rsm, 15366 tp->gput_ts, 15367 rack->r_ctl.rc_app_limited_cnt, 15368 9, 15369 __LINE__, NULL, 0); 15370 return; 15371 } 15372 15373 use_latest: 15374 /* 15375 * We don't know how long we may have been 15376 * idle or if this is the first-send. Lets 15377 * setup the flag so we will trim off 15378 * the first ack'd data so we get a true 15379 * measurement. 15380 */ 15381 rack->app_limited_needs_set = 1; 15382 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15383 /* Find this guy so we can pull the send time */ 15384 fe.r_start = startseq; 15385 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15386 if (my_rsm) { 15387 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15388 if (my_rsm->r_flags & RACK_ACKED) { 15389 /* 15390 * Unlikely since its probably what was 15391 * just transmitted (but I am paranoid). 15392 */ 15393 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15394 rack->app_limited_needs_set = 0; 15395 } 15396 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15397 /* This also is unlikely */ 15398 tp->gput_seq = my_rsm->r_start; 15399 } 15400 } else { 15401 /* 15402 * TSNH unless we have some send-map limit, 15403 * and even at that it should not be hitting 15404 * that limit (we should have stopped sending). 15405 */ 15406 struct timeval tv; 15407 15408 microuptime(&tv); 15409 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15410 } 15411 rack_log_pacing_delay_calc(rack, 15412 tp->gput_seq, 15413 tp->gput_ack, 15414 (uint64_t)my_rsm, 15415 tp->gput_ts, 15416 rack->r_ctl.rc_app_limited_cnt, 15417 9, __LINE__, NULL, 0); 15418 } 15419 15420 static inline uint32_t 15421 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15422 uint32_t avail, int32_t sb_offset) 15423 { 15424 uint32_t len; 15425 uint32_t sendwin; 15426 15427 if (tp->snd_wnd > cwnd_to_use) 15428 sendwin = cwnd_to_use; 15429 else 15430 sendwin = tp->snd_wnd; 15431 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15432 /* We never want to go over our peers rcv-window */ 15433 len = 0; 15434 } else { 15435 uint32_t flight; 15436 15437 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15438 if (flight >= sendwin) { 15439 /* 15440 * We have in flight what we are allowed by cwnd (if 15441 * it was rwnd blocking it would have hit above out 15442 * >= tp->snd_wnd). 15443 */ 15444 return (0); 15445 } 15446 len = sendwin - flight; 15447 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15448 /* We would send too much (beyond the rwnd) */ 15449 len = tp->snd_wnd - ctf_outstanding(tp); 15450 } 15451 if ((len + sb_offset) > avail) { 15452 /* 15453 * We don't have that much in the SB, how much is 15454 * there? 15455 */ 15456 len = avail - sb_offset; 15457 } 15458 } 15459 return (len); 15460 } 15461 15462 static void 15463 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15464 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15465 int rsm_is_null, int optlen, int line, uint16_t mode) 15466 { 15467 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15468 union tcp_log_stackspecific log; 15469 struct timeval tv; 15470 15471 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15472 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15473 log.u_bbr.flex1 = error; 15474 log.u_bbr.flex2 = flags; 15475 log.u_bbr.flex3 = rsm_is_null; 15476 log.u_bbr.flex4 = ipoptlen; 15477 log.u_bbr.flex5 = tp->rcv_numsacks; 15478 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15479 log.u_bbr.flex7 = optlen; 15480 log.u_bbr.flex8 = rack->r_fsb_inited; 15481 log.u_bbr.applimited = rack->r_fast_output; 15482 log.u_bbr.bw_inuse = rack_get_bw(rack); 15483 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15484 log.u_bbr.cwnd_gain = mode; 15485 log.u_bbr.pkts_out = orig_len; 15486 log.u_bbr.lt_epoch = len; 15487 log.u_bbr.delivered = line; 15488 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15489 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15490 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15491 len, &log, false, NULL, NULL, 0, &tv); 15492 } 15493 } 15494 15495 15496 static struct mbuf * 15497 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15498 struct rack_fast_send_blk *fsb, 15499 int32_t seglimit, int32_t segsize, int hw_tls) 15500 { 15501 #ifdef KERN_TLS 15502 struct ktls_session *tls, *ntls; 15503 #ifdef INVARIANTS 15504 struct mbuf *start; 15505 #endif 15506 #endif 15507 struct mbuf *m, *n, **np, *smb; 15508 struct mbuf *top; 15509 int32_t off, soff; 15510 int32_t len = *plen; 15511 int32_t fragsize; 15512 int32_t len_cp = 0; 15513 uint32_t mlen, frags; 15514 15515 soff = off = the_off; 15516 smb = m = the_m; 15517 np = ⊤ 15518 top = NULL; 15519 #ifdef KERN_TLS 15520 if (hw_tls && (m->m_flags & M_EXTPG)) 15521 tls = m->m_epg_tls; 15522 else 15523 tls = NULL; 15524 #ifdef INVARIANTS 15525 start = m; 15526 #endif 15527 #endif 15528 while (len > 0) { 15529 if (m == NULL) { 15530 *plen = len_cp; 15531 break; 15532 } 15533 #ifdef KERN_TLS 15534 if (hw_tls) { 15535 if (m->m_flags & M_EXTPG) 15536 ntls = m->m_epg_tls; 15537 else 15538 ntls = NULL; 15539 15540 /* 15541 * Avoid mixing TLS records with handshake 15542 * data or TLS records from different 15543 * sessions. 15544 */ 15545 if (tls != ntls) { 15546 MPASS(m != start); 15547 *plen = len_cp; 15548 break; 15549 } 15550 } 15551 #endif 15552 mlen = min(len, m->m_len - off); 15553 if (seglimit) { 15554 /* 15555 * For M_EXTPG mbufs, add 3 segments 15556 * + 1 in case we are crossing page boundaries 15557 * + 2 in case the TLS hdr/trailer are used 15558 * It is cheaper to just add the segments 15559 * than it is to take the cache miss to look 15560 * at the mbuf ext_pgs state in detail. 15561 */ 15562 if (m->m_flags & M_EXTPG) { 15563 fragsize = min(segsize, PAGE_SIZE); 15564 frags = 3; 15565 } else { 15566 fragsize = segsize; 15567 frags = 0; 15568 } 15569 15570 /* Break if we really can't fit anymore. */ 15571 if ((frags + 1) >= seglimit) { 15572 *plen = len_cp; 15573 break; 15574 } 15575 15576 /* 15577 * Reduce size if you can't copy the whole 15578 * mbuf. If we can't copy the whole mbuf, also 15579 * adjust len so the loop will end after this 15580 * mbuf. 15581 */ 15582 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15583 mlen = (seglimit - frags - 1) * fragsize; 15584 len = mlen; 15585 *plen = len_cp + len; 15586 } 15587 frags += howmany(mlen, fragsize); 15588 if (frags == 0) 15589 frags++; 15590 seglimit -= frags; 15591 KASSERT(seglimit > 0, 15592 ("%s: seglimit went too low", __func__)); 15593 } 15594 n = m_get(M_NOWAIT, m->m_type); 15595 *np = n; 15596 if (n == NULL) 15597 goto nospace; 15598 n->m_len = mlen; 15599 soff += mlen; 15600 len_cp += n->m_len; 15601 if (m->m_flags & (M_EXT|M_EXTPG)) { 15602 n->m_data = m->m_data + off; 15603 mb_dupcl(n, m); 15604 } else { 15605 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15606 (u_int)n->m_len); 15607 } 15608 len -= n->m_len; 15609 off = 0; 15610 m = m->m_next; 15611 np = &n->m_next; 15612 if (len || (soff == smb->m_len)) { 15613 /* 15614 * We have more so we move forward or 15615 * we have consumed the entire mbuf and 15616 * len has fell to 0. 15617 */ 15618 soff = 0; 15619 smb = m; 15620 } 15621 15622 } 15623 if (fsb != NULL) { 15624 fsb->m = smb; 15625 fsb->off = soff; 15626 if (smb) { 15627 /* 15628 * Save off the size of the mbuf. We do 15629 * this so that we can recognize when it 15630 * has been trimmed by sbcut() as acks 15631 * come in. 15632 */ 15633 fsb->o_m_len = smb->m_len; 15634 } else { 15635 /* 15636 * This is the case where the next mbuf went to NULL. This 15637 * means with this copy we have sent everything in the sb. 15638 * In theory we could clear the fast_output flag, but lets 15639 * not since its possible that we could get more added 15640 * and acks that call the extend function which would let 15641 * us send more. 15642 */ 15643 fsb->o_m_len = 0; 15644 } 15645 } 15646 return (top); 15647 nospace: 15648 if (top) 15649 m_freem(top); 15650 return (NULL); 15651 15652 } 15653 15654 /* 15655 * This is a copy of m_copym(), taking the TSO segment size/limit 15656 * constraints into account, and advancing the sndptr as it goes. 15657 */ 15658 static struct mbuf * 15659 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15660 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15661 { 15662 struct mbuf *m, *n; 15663 int32_t soff; 15664 15665 soff = rack->r_ctl.fsb.off; 15666 m = rack->r_ctl.fsb.m; 15667 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15668 /* 15669 * The mbuf had the front of it chopped off by an ack 15670 * we need to adjust the soff/off by that difference. 15671 */ 15672 uint32_t delta; 15673 15674 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15675 soff -= delta; 15676 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15677 /* 15678 * The mbuf was expanded probably by 15679 * a m_compress. Just update o_m_len. 15680 */ 15681 rack->r_ctl.fsb.o_m_len = m->m_len; 15682 } 15683 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15684 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15685 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15686 __FUNCTION__, 15687 rack, *plen, m, m->m_len)); 15688 /* Save off the right location before we copy and advance */ 15689 *s_soff = soff; 15690 *s_mb = rack->r_ctl.fsb.m; 15691 n = rack_fo_base_copym(m, soff, plen, 15692 &rack->r_ctl.fsb, 15693 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15694 return (n); 15695 } 15696 15697 static int 15698 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15699 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15700 { 15701 /* 15702 * Enter the fast retransmit path. We are given that a sched_pin is 15703 * in place (if accounting is compliled in) and the cycle count taken 15704 * at the entry is in the ts_val. The concept her is that the rsm 15705 * now holds the mbuf offsets and such so we can directly transmit 15706 * without a lot of overhead, the len field is already set for 15707 * us to prohibit us from sending too much (usually its 1MSS). 15708 */ 15709 struct ip *ip = NULL; 15710 struct udphdr *udp = NULL; 15711 struct tcphdr *th = NULL; 15712 struct mbuf *m = NULL; 15713 struct inpcb *inp; 15714 uint8_t *cpto; 15715 struct tcp_log_buffer *lgb; 15716 #ifdef TCP_ACCOUNTING 15717 uint64_t crtsc; 15718 int cnt_thru = 1; 15719 #endif 15720 struct tcpopt to; 15721 u_char opt[TCP_MAXOLEN]; 15722 uint32_t hdrlen, optlen; 15723 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0; 15724 uint16_t flags; 15725 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15726 uint32_t if_hw_tsomaxsegsize; 15727 15728 #ifdef INET6 15729 struct ip6_hdr *ip6 = NULL; 15730 15731 if (rack->r_is_v6) { 15732 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15733 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15734 } else 15735 #endif /* INET6 */ 15736 { 15737 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15738 hdrlen = sizeof(struct tcpiphdr); 15739 } 15740 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15741 goto failed; 15742 } 15743 if (doing_tlp) { 15744 /* Its a TLP add the flag, it may already be there but be sure */ 15745 rsm->r_flags |= RACK_TLP; 15746 } else { 15747 /* If it was a TLP it is not not on this retransmit */ 15748 rsm->r_flags &= ~RACK_TLP; 15749 } 15750 startseq = rsm->r_start; 15751 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15752 inp = rack->rc_inp; 15753 to.to_flags = 0; 15754 flags = tcp_outflags[tp->t_state]; 15755 if (flags & (TH_SYN|TH_RST)) { 15756 goto failed; 15757 } 15758 if (rsm->r_flags & RACK_HAS_FIN) { 15759 /* We can't send a FIN here */ 15760 goto failed; 15761 } 15762 if (flags & TH_FIN) { 15763 /* We never send a FIN */ 15764 flags &= ~TH_FIN; 15765 } 15766 if (tp->t_flags & TF_RCVD_TSTMP) { 15767 to.to_tsval = ms_cts + tp->ts_offset; 15768 to.to_tsecr = tp->ts_recent; 15769 to.to_flags = TOF_TS; 15770 } 15771 optlen = tcp_addoptions(&to, opt); 15772 hdrlen += optlen; 15773 udp = rack->r_ctl.fsb.udp; 15774 if (udp) 15775 hdrlen += sizeof(struct udphdr); 15776 if (rack->r_ctl.rc_pace_max_segs) 15777 max_val = rack->r_ctl.rc_pace_max_segs; 15778 else if (rack->rc_user_set_max_segs) 15779 max_val = rack->rc_user_set_max_segs * segsiz; 15780 else 15781 max_val = len; 15782 if ((tp->t_flags & TF_TSO) && 15783 V_tcp_do_tso && 15784 (len > segsiz) && 15785 (tp->t_port == 0)) 15786 tso = 1; 15787 #ifdef INET6 15788 if (MHLEN < hdrlen + max_linkhdr) 15789 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15790 else 15791 #endif 15792 m = m_gethdr(M_NOWAIT, MT_DATA); 15793 if (m == NULL) 15794 goto failed; 15795 m->m_data += max_linkhdr; 15796 m->m_len = hdrlen; 15797 th = rack->r_ctl.fsb.th; 15798 /* Establish the len to send */ 15799 if (len > max_val) 15800 len = max_val; 15801 if ((tso) && (len + optlen > tp->t_maxseg)) { 15802 uint32_t if_hw_tsomax; 15803 int32_t max_len; 15804 15805 /* extract TSO information */ 15806 if_hw_tsomax = tp->t_tsomax; 15807 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15808 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15809 /* 15810 * Check if we should limit by maximum payload 15811 * length: 15812 */ 15813 if (if_hw_tsomax != 0) { 15814 /* compute maximum TSO length */ 15815 max_len = (if_hw_tsomax - hdrlen - 15816 max_linkhdr); 15817 if (max_len <= 0) { 15818 goto failed; 15819 } else if (len > max_len) { 15820 len = max_len; 15821 } 15822 } 15823 if (len <= segsiz) { 15824 /* 15825 * In case there are too many small fragments don't 15826 * use TSO: 15827 */ 15828 tso = 0; 15829 } 15830 } else { 15831 tso = 0; 15832 } 15833 if ((tso == 0) && (len > segsiz)) 15834 len = segsiz; 15835 if ((len == 0) || 15836 (len <= MHLEN - hdrlen - max_linkhdr)) { 15837 goto failed; 15838 } 15839 th->th_seq = htonl(rsm->r_start); 15840 th->th_ack = htonl(tp->rcv_nxt); 15841 /* 15842 * The PUSH bit should only be applied 15843 * if the full retransmission is made. If 15844 * we are sending less than this is the 15845 * left hand edge and should not have 15846 * the PUSH bit. 15847 */ 15848 if ((rsm->r_flags & RACK_HAD_PUSH) && 15849 (len == (rsm->r_end - rsm->r_start))) 15850 flags |= TH_PUSH; 15851 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15852 if (th->th_win == 0) { 15853 tp->t_sndzerowin++; 15854 tp->t_flags |= TF_RXWIN0SENT; 15855 } else 15856 tp->t_flags &= ~TF_RXWIN0SENT; 15857 if (rsm->r_flags & RACK_TLP) { 15858 /* 15859 * TLP should not count in retran count, but 15860 * in its own bin 15861 */ 15862 counter_u64_add(rack_tlp_retran, 1); 15863 counter_u64_add(rack_tlp_retran_bytes, len); 15864 } else { 15865 tp->t_sndrexmitpack++; 15866 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15867 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15868 } 15869 #ifdef STATS 15870 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15871 len); 15872 #endif 15873 if (rsm->m == NULL) 15874 goto failed; 15875 if (rsm->orig_m_len != rsm->m->m_len) { 15876 /* Fix up the orig_m_len and possibly the mbuf offset */ 15877 rack_adjust_orig_mlen(rsm); 15878 } 15879 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15880 if (len <= segsiz) { 15881 /* 15882 * Must have ran out of mbufs for the copy 15883 * shorten it to no longer need tso. Lets 15884 * not put on sendalot since we are low on 15885 * mbufs. 15886 */ 15887 tso = 0; 15888 } 15889 if ((m->m_next == NULL) || (len <= 0)){ 15890 goto failed; 15891 } 15892 if (udp) { 15893 if (rack->r_is_v6) 15894 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15895 else 15896 ulen = hdrlen + len - sizeof(struct ip); 15897 udp->uh_ulen = htons(ulen); 15898 } 15899 m->m_pkthdr.rcvif = (struct ifnet *)0; 15900 if (TCPS_HAVERCVDSYN(tp->t_state) && 15901 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 15902 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15903 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15904 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15905 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15906 #ifdef INET6 15907 if (rack->r_is_v6) { 15908 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15909 ip6->ip6_flow |= htonl(ect << 20); 15910 } 15911 else 15912 #endif 15913 { 15914 ip->ip_tos &= ~IPTOS_ECN_MASK; 15915 ip->ip_tos |= ect; 15916 } 15917 } 15918 tcp_set_flags(th, flags); 15919 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15920 #ifdef INET6 15921 if (rack->r_is_v6) { 15922 if (tp->t_port) { 15923 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15924 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15925 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15926 th->th_sum = htons(0); 15927 UDPSTAT_INC(udps_opackets); 15928 } else { 15929 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15930 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15931 th->th_sum = in6_cksum_pseudo(ip6, 15932 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15933 0); 15934 } 15935 } 15936 #endif 15937 #if defined(INET6) && defined(INET) 15938 else 15939 #endif 15940 #ifdef INET 15941 { 15942 if (tp->t_port) { 15943 m->m_pkthdr.csum_flags = CSUM_UDP; 15944 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15945 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15946 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15947 th->th_sum = htons(0); 15948 UDPSTAT_INC(udps_opackets); 15949 } else { 15950 m->m_pkthdr.csum_flags = CSUM_TCP; 15951 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15952 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15953 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15954 IPPROTO_TCP + len + optlen)); 15955 } 15956 /* IP version must be set here for ipv4/ipv6 checking later */ 15957 KASSERT(ip->ip_v == IPVERSION, 15958 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15959 } 15960 #endif 15961 if (tso) { 15962 KASSERT(len > tp->t_maxseg - optlen, 15963 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15964 m->m_pkthdr.csum_flags |= CSUM_TSO; 15965 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15966 } 15967 #ifdef INET6 15968 if (rack->r_is_v6) { 15969 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15970 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15971 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15972 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15973 else 15974 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15975 } 15976 #endif 15977 #if defined(INET) && defined(INET6) 15978 else 15979 #endif 15980 #ifdef INET 15981 { 15982 ip->ip_len = htons(m->m_pkthdr.len); 15983 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15984 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15985 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15986 if (tp->t_port == 0 || len < V_tcp_minmss) { 15987 ip->ip_off |= htons(IP_DF); 15988 } 15989 } else { 15990 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15991 } 15992 } 15993 #endif 15994 /* Time to copy in our header */ 15995 cpto = mtod(m, uint8_t *); 15996 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15997 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15998 if (optlen) { 15999 bcopy(opt, th + 1, optlen); 16000 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16001 } else { 16002 th->th_off = sizeof(struct tcphdr) >> 2; 16003 } 16004 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16005 union tcp_log_stackspecific log; 16006 16007 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 16008 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 16009 counter_u64_add(rack_collapsed_win_rxt, 1); 16010 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 16011 } 16012 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16013 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16014 if (rack->rack_no_prr) 16015 log.u_bbr.flex1 = 0; 16016 else 16017 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16018 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16019 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16020 log.u_bbr.flex4 = max_val; 16021 log.u_bbr.flex5 = 0; 16022 /* Save off the early/late values */ 16023 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16024 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16025 log.u_bbr.bw_inuse = rack_get_bw(rack); 16026 if (doing_tlp == 0) 16027 log.u_bbr.flex8 = 1; 16028 else 16029 log.u_bbr.flex8 = 2; 16030 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16031 log.u_bbr.flex7 = 55; 16032 log.u_bbr.pkts_out = tp->t_maxseg; 16033 log.u_bbr.timeStamp = cts; 16034 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16035 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16036 log.u_bbr.delivered = 0; 16037 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16038 len, &log, false, NULL, NULL, 0, tv); 16039 } else 16040 lgb = NULL; 16041 #ifdef INET6 16042 if (rack->r_is_v6) { 16043 error = ip6_output(m, NULL, 16044 &inp->inp_route6, 16045 0, NULL, NULL, inp); 16046 } 16047 #endif 16048 #if defined(INET) && defined(INET6) 16049 else 16050 #endif 16051 #ifdef INET 16052 { 16053 error = ip_output(m, NULL, 16054 &inp->inp_route, 16055 0, 0, inp); 16056 } 16057 #endif 16058 m = NULL; 16059 if (lgb) { 16060 lgb->tlb_errno = error; 16061 lgb = NULL; 16062 } 16063 if (error) { 16064 goto failed; 16065 } 16066 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 16067 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 16068 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 16069 rack->rc_tlp_in_progress = 1; 16070 rack->r_ctl.rc_tlp_cnt_out++; 16071 } 16072 if (error == 0) { 16073 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 16074 if (doing_tlp) { 16075 rack->rc_last_sent_tlp_past_cumack = 0; 16076 rack->rc_last_sent_tlp_seq_valid = 1; 16077 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 16078 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 16079 } 16080 } 16081 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16082 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16083 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 16084 rack->r_ctl.retran_during_recovery += len; 16085 { 16086 int idx; 16087 16088 idx = (len / segsiz) + 3; 16089 if (idx >= TCP_MSS_ACCT_ATIMER) 16090 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16091 else 16092 counter_u64_add(rack_out_size[idx], 1); 16093 } 16094 if (tp->t_rtttime == 0) { 16095 tp->t_rtttime = ticks; 16096 tp->t_rtseq = startseq; 16097 KMOD_TCPSTAT_INC(tcps_segstimed); 16098 } 16099 counter_u64_add(rack_fto_rsm_send, 1); 16100 if (error && (error == ENOBUFS)) { 16101 if (rack->r_ctl.crte != NULL) { 16102 rack_trace_point(rack, RACK_TP_HWENOBUF); 16103 } else 16104 rack_trace_point(rack, RACK_TP_ENOBUF); 16105 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 16106 if (rack->rc_enobuf < 0x7f) 16107 rack->rc_enobuf++; 16108 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16109 slot = 10 * HPTS_USEC_IN_MSEC; 16110 } else 16111 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16112 if ((slot == 0) || 16113 (rack->rc_always_pace == 0) || 16114 (rack->r_rr_config == 1)) { 16115 /* 16116 * We have no pacing set or we 16117 * are using old-style rack or 16118 * we are overridden to use the old 1ms pacing. 16119 */ 16120 slot = rack->r_ctl.rc_min_to; 16121 } 16122 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16123 #ifdef TCP_ACCOUNTING 16124 crtsc = get_cyclecount(); 16125 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16126 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16127 } 16128 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16129 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16130 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16131 } 16132 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16133 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16134 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16135 } 16136 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16137 sched_unpin(); 16138 #endif 16139 return (0); 16140 failed: 16141 if (m) 16142 m_free(m); 16143 return (-1); 16144 } 16145 16146 static void 16147 rack_sndbuf_autoscale(struct tcp_rack *rack) 16148 { 16149 /* 16150 * Automatic sizing of send socket buffer. Often the send buffer 16151 * size is not optimally adjusted to the actual network conditions 16152 * at hand (delay bandwidth product). Setting the buffer size too 16153 * small limits throughput on links with high bandwidth and high 16154 * delay (eg. trans-continental/oceanic links). Setting the 16155 * buffer size too big consumes too much real kernel memory, 16156 * especially with many connections on busy servers. 16157 * 16158 * The criteria to step up the send buffer one notch are: 16159 * 1. receive window of remote host is larger than send buffer 16160 * (with a fudge factor of 5/4th); 16161 * 2. send buffer is filled to 7/8th with data (so we actually 16162 * have data to make use of it); 16163 * 3. send buffer fill has not hit maximal automatic size; 16164 * 4. our send window (slow start and cogestion controlled) is 16165 * larger than sent but unacknowledged data in send buffer. 16166 * 16167 * Note that the rack version moves things much faster since 16168 * we want to avoid hitting cache lines in the rack_fast_output() 16169 * path so this is called much less often and thus moves 16170 * the SB forward by a percentage. 16171 */ 16172 struct socket *so; 16173 struct tcpcb *tp; 16174 uint32_t sendwin, scaleup; 16175 16176 tp = rack->rc_tp; 16177 so = rack->rc_inp->inp_socket; 16178 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16179 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16180 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16181 sbused(&so->so_snd) >= 16182 (so->so_snd.sb_hiwat / 8 * 7) && 16183 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16184 sendwin >= (sbused(&so->so_snd) - 16185 (tp->snd_nxt - tp->snd_una))) { 16186 if (rack_autosndbuf_inc) 16187 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16188 else 16189 scaleup = V_tcp_autosndbuf_inc; 16190 if (scaleup < V_tcp_autosndbuf_inc) 16191 scaleup = V_tcp_autosndbuf_inc; 16192 scaleup += so->so_snd.sb_hiwat; 16193 if (scaleup > V_tcp_autosndbuf_max) 16194 scaleup = V_tcp_autosndbuf_max; 16195 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 16196 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16197 } 16198 } 16199 } 16200 16201 static int 16202 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16203 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16204 { 16205 /* 16206 * Enter to do fast output. We are given that the sched_pin is 16207 * in place (if accounting is compiled in) and the cycle count taken 16208 * at entry is in place in ts_val. The idea here is that 16209 * we know how many more bytes needs to be sent (presumably either 16210 * during pacing or to fill the cwnd and that was greater than 16211 * the max-burst). We have how much to send and all the info we 16212 * need to just send. 16213 */ 16214 struct ip *ip = NULL; 16215 struct udphdr *udp = NULL; 16216 struct tcphdr *th = NULL; 16217 struct mbuf *m, *s_mb; 16218 struct inpcb *inp; 16219 uint8_t *cpto; 16220 struct tcp_log_buffer *lgb; 16221 #ifdef TCP_ACCOUNTING 16222 uint64_t crtsc; 16223 #endif 16224 struct tcpopt to; 16225 u_char opt[TCP_MAXOLEN]; 16226 uint32_t hdrlen, optlen; 16227 #ifdef TCP_ACCOUNTING 16228 int cnt_thru = 1; 16229 #endif 16230 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16231 uint16_t flags; 16232 uint32_t s_soff; 16233 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16234 uint32_t if_hw_tsomaxsegsize; 16235 uint16_t add_flag = RACK_SENT_FP; 16236 #ifdef INET6 16237 struct ip6_hdr *ip6 = NULL; 16238 16239 if (rack->r_is_v6) { 16240 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16241 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16242 } else 16243 #endif /* INET6 */ 16244 { 16245 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16246 hdrlen = sizeof(struct tcpiphdr); 16247 } 16248 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16249 m = NULL; 16250 goto failed; 16251 } 16252 startseq = tp->snd_max; 16253 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16254 inp = rack->rc_inp; 16255 len = rack->r_ctl.fsb.left_to_send; 16256 to.to_flags = 0; 16257 flags = rack->r_ctl.fsb.tcp_flags; 16258 if (tp->t_flags & TF_RCVD_TSTMP) { 16259 to.to_tsval = ms_cts + tp->ts_offset; 16260 to.to_tsecr = tp->ts_recent; 16261 to.to_flags = TOF_TS; 16262 } 16263 optlen = tcp_addoptions(&to, opt); 16264 hdrlen += optlen; 16265 udp = rack->r_ctl.fsb.udp; 16266 if (udp) 16267 hdrlen += sizeof(struct udphdr); 16268 if (rack->r_ctl.rc_pace_max_segs) 16269 max_val = rack->r_ctl.rc_pace_max_segs; 16270 else if (rack->rc_user_set_max_segs) 16271 max_val = rack->rc_user_set_max_segs * segsiz; 16272 else 16273 max_val = len; 16274 if ((tp->t_flags & TF_TSO) && 16275 V_tcp_do_tso && 16276 (len > segsiz) && 16277 (tp->t_port == 0)) 16278 tso = 1; 16279 again: 16280 #ifdef INET6 16281 if (MHLEN < hdrlen + max_linkhdr) 16282 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16283 else 16284 #endif 16285 m = m_gethdr(M_NOWAIT, MT_DATA); 16286 if (m == NULL) 16287 goto failed; 16288 m->m_data += max_linkhdr; 16289 m->m_len = hdrlen; 16290 th = rack->r_ctl.fsb.th; 16291 /* Establish the len to send */ 16292 if (len > max_val) 16293 len = max_val; 16294 if ((tso) && (len + optlen > tp->t_maxseg)) { 16295 uint32_t if_hw_tsomax; 16296 int32_t max_len; 16297 16298 /* extract TSO information */ 16299 if_hw_tsomax = tp->t_tsomax; 16300 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16301 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16302 /* 16303 * Check if we should limit by maximum payload 16304 * length: 16305 */ 16306 if (if_hw_tsomax != 0) { 16307 /* compute maximum TSO length */ 16308 max_len = (if_hw_tsomax - hdrlen - 16309 max_linkhdr); 16310 if (max_len <= 0) { 16311 goto failed; 16312 } else if (len > max_len) { 16313 len = max_len; 16314 } 16315 } 16316 if (len <= segsiz) { 16317 /* 16318 * In case there are too many small fragments don't 16319 * use TSO: 16320 */ 16321 tso = 0; 16322 } 16323 } else { 16324 tso = 0; 16325 } 16326 if ((tso == 0) && (len > segsiz)) 16327 len = segsiz; 16328 if ((len == 0) || 16329 (len <= MHLEN - hdrlen - max_linkhdr)) { 16330 goto failed; 16331 } 16332 sb_offset = tp->snd_max - tp->snd_una; 16333 th->th_seq = htonl(tp->snd_max); 16334 th->th_ack = htonl(tp->rcv_nxt); 16335 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16336 if (th->th_win == 0) { 16337 tp->t_sndzerowin++; 16338 tp->t_flags |= TF_RXWIN0SENT; 16339 } else 16340 tp->t_flags &= ~TF_RXWIN0SENT; 16341 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16342 KMOD_TCPSTAT_INC(tcps_sndpack); 16343 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16344 #ifdef STATS 16345 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16346 len); 16347 #endif 16348 if (rack->r_ctl.fsb.m == NULL) 16349 goto failed; 16350 16351 /* s_mb and s_soff are saved for rack_log_output */ 16352 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16353 &s_mb, &s_soff); 16354 if (len <= segsiz) { 16355 /* 16356 * Must have ran out of mbufs for the copy 16357 * shorten it to no longer need tso. Lets 16358 * not put on sendalot since we are low on 16359 * mbufs. 16360 */ 16361 tso = 0; 16362 } 16363 if (rack->r_ctl.fsb.rfo_apply_push && 16364 (len == rack->r_ctl.fsb.left_to_send)) { 16365 flags |= TH_PUSH; 16366 add_flag |= RACK_HAD_PUSH; 16367 } 16368 if ((m->m_next == NULL) || (len <= 0)){ 16369 goto failed; 16370 } 16371 if (udp) { 16372 if (rack->r_is_v6) 16373 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16374 else 16375 ulen = hdrlen + len - sizeof(struct ip); 16376 udp->uh_ulen = htons(ulen); 16377 } 16378 m->m_pkthdr.rcvif = (struct ifnet *)0; 16379 if (TCPS_HAVERCVDSYN(tp->t_state) && 16380 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 16381 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16382 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16383 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16384 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16385 #ifdef INET6 16386 if (rack->r_is_v6) { 16387 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16388 ip6->ip6_flow |= htonl(ect << 20); 16389 } 16390 else 16391 #endif 16392 { 16393 ip->ip_tos &= ~IPTOS_ECN_MASK; 16394 ip->ip_tos |= ect; 16395 } 16396 } 16397 tcp_set_flags(th, flags); 16398 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16399 #ifdef INET6 16400 if (rack->r_is_v6) { 16401 if (tp->t_port) { 16402 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16403 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16404 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16405 th->th_sum = htons(0); 16406 UDPSTAT_INC(udps_opackets); 16407 } else { 16408 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16409 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16410 th->th_sum = in6_cksum_pseudo(ip6, 16411 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16412 0); 16413 } 16414 } 16415 #endif 16416 #if defined(INET6) && defined(INET) 16417 else 16418 #endif 16419 #ifdef INET 16420 { 16421 if (tp->t_port) { 16422 m->m_pkthdr.csum_flags = CSUM_UDP; 16423 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16424 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16425 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16426 th->th_sum = htons(0); 16427 UDPSTAT_INC(udps_opackets); 16428 } else { 16429 m->m_pkthdr.csum_flags = CSUM_TCP; 16430 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16431 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16432 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16433 IPPROTO_TCP + len + optlen)); 16434 } 16435 /* IP version must be set here for ipv4/ipv6 checking later */ 16436 KASSERT(ip->ip_v == IPVERSION, 16437 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16438 } 16439 #endif 16440 if (tso) { 16441 KASSERT(len > tp->t_maxseg - optlen, 16442 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16443 m->m_pkthdr.csum_flags |= CSUM_TSO; 16444 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16445 } 16446 #ifdef INET6 16447 if (rack->r_is_v6) { 16448 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16449 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16450 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16451 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16452 else 16453 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16454 } 16455 #endif 16456 #if defined(INET) && defined(INET6) 16457 else 16458 #endif 16459 #ifdef INET 16460 { 16461 ip->ip_len = htons(m->m_pkthdr.len); 16462 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16463 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16464 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16465 if (tp->t_port == 0 || len < V_tcp_minmss) { 16466 ip->ip_off |= htons(IP_DF); 16467 } 16468 } else { 16469 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16470 } 16471 } 16472 #endif 16473 /* Time to copy in our header */ 16474 cpto = mtod(m, uint8_t *); 16475 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16476 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16477 if (optlen) { 16478 bcopy(opt, th + 1, optlen); 16479 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16480 } else { 16481 th->th_off = sizeof(struct tcphdr) >> 2; 16482 } 16483 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16484 union tcp_log_stackspecific log; 16485 16486 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16487 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16488 if (rack->rack_no_prr) 16489 log.u_bbr.flex1 = 0; 16490 else 16491 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16492 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16493 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16494 log.u_bbr.flex4 = max_val; 16495 log.u_bbr.flex5 = 0; 16496 /* Save off the early/late values */ 16497 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16498 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16499 log.u_bbr.bw_inuse = rack_get_bw(rack); 16500 log.u_bbr.flex8 = 0; 16501 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16502 log.u_bbr.flex7 = 44; 16503 log.u_bbr.pkts_out = tp->t_maxseg; 16504 log.u_bbr.timeStamp = cts; 16505 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16506 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16507 log.u_bbr.delivered = 0; 16508 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16509 len, &log, false, NULL, NULL, 0, tv); 16510 } else 16511 lgb = NULL; 16512 #ifdef INET6 16513 if (rack->r_is_v6) { 16514 error = ip6_output(m, NULL, 16515 &inp->inp_route6, 16516 0, NULL, NULL, inp); 16517 } 16518 #endif 16519 #if defined(INET) && defined(INET6) 16520 else 16521 #endif 16522 #ifdef INET 16523 { 16524 error = ip_output(m, NULL, 16525 &inp->inp_route, 16526 0, 0, inp); 16527 } 16528 #endif 16529 if (lgb) { 16530 lgb->tlb_errno = error; 16531 lgb = NULL; 16532 } 16533 if (error) { 16534 *send_err = error; 16535 m = NULL; 16536 goto failed; 16537 } 16538 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16539 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16540 m = NULL; 16541 if (tp->snd_una == tp->snd_max) { 16542 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16543 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16544 tp->t_acktime = ticks; 16545 } 16546 if (error == 0) 16547 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16548 16549 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16550 tot_len += len; 16551 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16552 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16553 tp->snd_max += len; 16554 tp->snd_nxt = tp->snd_max; 16555 { 16556 int idx; 16557 16558 idx = (len / segsiz) + 3; 16559 if (idx >= TCP_MSS_ACCT_ATIMER) 16560 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16561 else 16562 counter_u64_add(rack_out_size[idx], 1); 16563 } 16564 if (len <= rack->r_ctl.fsb.left_to_send) 16565 rack->r_ctl.fsb.left_to_send -= len; 16566 else 16567 rack->r_ctl.fsb.left_to_send = 0; 16568 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16569 rack->r_fast_output = 0; 16570 rack->r_ctl.fsb.left_to_send = 0; 16571 /* At the end of fast_output scale up the sb */ 16572 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16573 rack_sndbuf_autoscale(rack); 16574 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16575 } 16576 if (tp->t_rtttime == 0) { 16577 tp->t_rtttime = ticks; 16578 tp->t_rtseq = startseq; 16579 KMOD_TCPSTAT_INC(tcps_segstimed); 16580 } 16581 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16582 (max_val > len) && 16583 (tso == 0)) { 16584 max_val -= len; 16585 len = segsiz; 16586 th = rack->r_ctl.fsb.th; 16587 #ifdef TCP_ACCOUNTING 16588 cnt_thru++; 16589 #endif 16590 goto again; 16591 } 16592 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16593 counter_u64_add(rack_fto_send, 1); 16594 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16595 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16596 #ifdef TCP_ACCOUNTING 16597 crtsc = get_cyclecount(); 16598 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16599 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16600 } 16601 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16602 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16603 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16604 } 16605 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16606 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16607 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16608 } 16609 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16610 sched_unpin(); 16611 #endif 16612 return (0); 16613 failed: 16614 if (m) 16615 m_free(m); 16616 rack->r_fast_output = 0; 16617 return (-1); 16618 } 16619 16620 static struct rack_sendmap * 16621 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 16622 { 16623 struct rack_sendmap *rsm = NULL; 16624 struct rack_sendmap fe; 16625 int thresh; 16626 16627 restart: 16628 fe.r_start = rack->r_ctl.last_collapse_point; 16629 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 16630 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 16631 /* Nothing, strange turn off validity */ 16632 rack->r_collapse_point_valid = 0; 16633 return (NULL); 16634 } 16635 /* Can we send it yet? */ 16636 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 16637 /* 16638 * Receiver window has not grown enough for 16639 * the segment to be put on the wire. 16640 */ 16641 return (NULL); 16642 } 16643 if (rsm->r_flags & RACK_ACKED) { 16644 /* 16645 * It has been sacked, lets move to the 16646 * next one if possible. 16647 */ 16648 rack->r_ctl.last_collapse_point = rsm->r_end; 16649 /* Are we done? */ 16650 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16651 rack->r_ctl.high_collapse_point)) { 16652 rack->r_collapse_point_valid = 0; 16653 return (NULL); 16654 } 16655 goto restart; 16656 } 16657 /* Now has it been long enough ? */ 16658 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 16659 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 16660 rack_log_collapse(rack, rsm->r_start, 16661 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16662 thresh, __LINE__, 6, rsm->r_flags, rsm); 16663 return (rsm); 16664 } 16665 /* Not enough time */ 16666 rack_log_collapse(rack, rsm->r_start, 16667 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16668 thresh, __LINE__, 7, rsm->r_flags, rsm); 16669 return (NULL); 16670 } 16671 16672 static int 16673 rack_output(struct tcpcb *tp) 16674 { 16675 struct socket *so; 16676 uint32_t recwin; 16677 uint32_t sb_offset, s_moff = 0; 16678 int32_t len, error = 0; 16679 uint16_t flags; 16680 struct mbuf *m, *s_mb = NULL; 16681 struct mbuf *mb; 16682 uint32_t if_hw_tsomaxsegcount = 0; 16683 uint32_t if_hw_tsomaxsegsize; 16684 int32_t segsiz, minseg; 16685 long tot_len_this_send = 0; 16686 #ifdef INET 16687 struct ip *ip = NULL; 16688 #endif 16689 struct udphdr *udp = NULL; 16690 struct tcp_rack *rack; 16691 struct tcphdr *th; 16692 uint8_t pass = 0; 16693 uint8_t mark = 0; 16694 uint8_t wanted_cookie = 0; 16695 u_char opt[TCP_MAXOLEN]; 16696 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16697 uint32_t rack_seq; 16698 16699 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16700 unsigned ipsec_optlen = 0; 16701 16702 #endif 16703 int32_t idle, sendalot; 16704 int32_t sub_from_prr = 0; 16705 volatile int32_t sack_rxmit; 16706 struct rack_sendmap *rsm = NULL; 16707 int32_t tso, mtu; 16708 struct tcpopt to; 16709 int32_t slot = 0; 16710 int32_t sup_rack = 0; 16711 uint32_t cts, ms_cts, delayed, early; 16712 uint16_t add_flag = RACK_SENT_SP; 16713 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16714 uint8_t hpts_calling, doing_tlp = 0; 16715 uint32_t cwnd_to_use, pace_max_seg; 16716 int32_t do_a_prefetch = 0; 16717 int32_t prefetch_rsm = 0; 16718 int32_t orig_len = 0; 16719 struct timeval tv; 16720 int32_t prefetch_so_done = 0; 16721 struct tcp_log_buffer *lgb; 16722 struct inpcb *inp = tptoinpcb(tp); 16723 struct sockbuf *sb; 16724 uint64_t ts_val = 0; 16725 #ifdef TCP_ACCOUNTING 16726 uint64_t crtsc; 16727 #endif 16728 #ifdef INET6 16729 struct ip6_hdr *ip6 = NULL; 16730 int32_t isipv6; 16731 #endif 16732 bool hw_tls = false; 16733 16734 NET_EPOCH_ASSERT(); 16735 INP_WLOCK_ASSERT(inp); 16736 16737 /* setup and take the cache hits here */ 16738 rack = (struct tcp_rack *)tp->t_fb_ptr; 16739 #ifdef TCP_ACCOUNTING 16740 sched_pin(); 16741 ts_val = get_cyclecount(); 16742 #endif 16743 hpts_calling = inp->inp_hpts_calls; 16744 #ifdef TCP_OFFLOAD 16745 if (tp->t_flags & TF_TOE) { 16746 #ifdef TCP_ACCOUNTING 16747 sched_unpin(); 16748 #endif 16749 return (tcp_offload_output(tp)); 16750 } 16751 #endif 16752 /* 16753 * For TFO connections in SYN_RECEIVED, only allow the initial 16754 * SYN|ACK and those sent by the retransmit timer. 16755 */ 16756 if (IS_FASTOPEN(tp->t_flags) && 16757 (tp->t_state == TCPS_SYN_RECEIVED) && 16758 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16759 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16760 #ifdef TCP_ACCOUNTING 16761 sched_unpin(); 16762 #endif 16763 return (0); 16764 } 16765 #ifdef INET6 16766 if (rack->r_state) { 16767 /* Use the cache line loaded if possible */ 16768 isipv6 = rack->r_is_v6; 16769 } else { 16770 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16771 } 16772 #endif 16773 early = 0; 16774 cts = tcp_get_usecs(&tv); 16775 ms_cts = tcp_tv_to_mssectick(&tv); 16776 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16777 tcp_in_hpts(rack->rc_inp)) { 16778 /* 16779 * We are on the hpts for some timer but not hptsi output. 16780 * Remove from the hpts unconditionally. 16781 */ 16782 rack_timer_cancel(tp, rack, cts, __LINE__); 16783 } 16784 /* Are we pacing and late? */ 16785 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16786 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16787 /* We are delayed */ 16788 delayed = cts - rack->r_ctl.rc_last_output_to; 16789 } else { 16790 delayed = 0; 16791 } 16792 /* Do the timers, which may override the pacer */ 16793 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16794 int retval; 16795 16796 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16797 &doing_tlp); 16798 if (retval != 0) { 16799 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16800 #ifdef TCP_ACCOUNTING 16801 sched_unpin(); 16802 #endif 16803 /* 16804 * If timers want tcp_drop(), then pass error out, 16805 * otherwise suppress it. 16806 */ 16807 return (retval < 0 ? retval : 0); 16808 } 16809 } 16810 if (rack->rc_in_persist) { 16811 if (tcp_in_hpts(rack->rc_inp) == 0) { 16812 /* Timer is not running */ 16813 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16814 } 16815 #ifdef TCP_ACCOUNTING 16816 sched_unpin(); 16817 #endif 16818 return (0); 16819 } 16820 if ((rack->rc_ack_required == 1) && 16821 (rack->r_timer_override == 0)){ 16822 /* A timeout occurred and no ack has arrived */ 16823 if (tcp_in_hpts(rack->rc_inp) == 0) { 16824 /* Timer is not running */ 16825 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16826 } 16827 #ifdef TCP_ACCOUNTING 16828 sched_unpin(); 16829 #endif 16830 return (0); 16831 } 16832 if ((rack->r_timer_override) || 16833 (rack->rc_ack_can_sendout_data) || 16834 (delayed) || 16835 (tp->t_state < TCPS_ESTABLISHED)) { 16836 rack->rc_ack_can_sendout_data = 0; 16837 if (tcp_in_hpts(rack->rc_inp)) 16838 tcp_hpts_remove(rack->rc_inp); 16839 } else if (tcp_in_hpts(rack->rc_inp)) { 16840 /* 16841 * On the hpts you can't pass even if ACKNOW is on, we will 16842 * when the hpts fires. 16843 */ 16844 #ifdef TCP_ACCOUNTING 16845 crtsc = get_cyclecount(); 16846 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16847 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16848 } 16849 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16850 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16851 tp->tcp_cnt_counters[SND_BLOCKED]++; 16852 } 16853 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16854 sched_unpin(); 16855 #endif 16856 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16857 return (0); 16858 } 16859 rack->rc_inp->inp_hpts_calls = 0; 16860 /* Finish out both pacing early and late accounting */ 16861 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16862 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16863 early = rack->r_ctl.rc_last_output_to - cts; 16864 } else 16865 early = 0; 16866 if (delayed) { 16867 rack->r_ctl.rc_agg_delayed += delayed; 16868 rack->r_late = 1; 16869 } else if (early) { 16870 rack->r_ctl.rc_agg_early += early; 16871 rack->r_early = 1; 16872 } 16873 /* Now that early/late accounting is done turn off the flag */ 16874 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16875 rack->r_wanted_output = 0; 16876 rack->r_timer_override = 0; 16877 if ((tp->t_state != rack->r_state) && 16878 TCPS_HAVEESTABLISHED(tp->t_state)) { 16879 rack_set_state(tp, rack); 16880 } 16881 if ((rack->r_fast_output) && 16882 (doing_tlp == 0) && 16883 (tp->rcv_numsacks == 0)) { 16884 int ret; 16885 16886 error = 0; 16887 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16888 if (ret >= 0) 16889 return(ret); 16890 else if (error) { 16891 inp = rack->rc_inp; 16892 so = inp->inp_socket; 16893 sb = &so->so_snd; 16894 goto nomore; 16895 } 16896 } 16897 inp = rack->rc_inp; 16898 /* 16899 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16900 * only allow the initial SYN or SYN|ACK and those sent 16901 * by the retransmit timer. 16902 */ 16903 if (IS_FASTOPEN(tp->t_flags) && 16904 ((tp->t_state == TCPS_SYN_RECEIVED) || 16905 (tp->t_state == TCPS_SYN_SENT)) && 16906 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16907 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16908 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16909 so = inp->inp_socket; 16910 sb = &so->so_snd; 16911 goto just_return_nolock; 16912 } 16913 /* 16914 * Determine length of data that should be transmitted, and flags 16915 * that will be used. If there is some data or critical controls 16916 * (SYN, RST) to send, then transmit; otherwise, investigate 16917 * further. 16918 */ 16919 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16920 if (tp->t_idle_reduce) { 16921 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16922 rack_cc_after_idle(rack, tp); 16923 } 16924 tp->t_flags &= ~TF_LASTIDLE; 16925 if (idle) { 16926 if (tp->t_flags & TF_MORETOCOME) { 16927 tp->t_flags |= TF_LASTIDLE; 16928 idle = 0; 16929 } 16930 } 16931 if ((tp->snd_una == tp->snd_max) && 16932 rack->r_ctl.rc_went_idle_time && 16933 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16934 idle = cts - rack->r_ctl.rc_went_idle_time; 16935 if (idle > rack_min_probertt_hold) { 16936 /* Count as a probe rtt */ 16937 if (rack->in_probe_rtt == 0) { 16938 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16939 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16940 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16941 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16942 } else { 16943 rack_exit_probertt(rack, cts); 16944 } 16945 } 16946 idle = 0; 16947 } 16948 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16949 rack_init_fsb_block(tp, rack); 16950 again: 16951 /* 16952 * If we've recently taken a timeout, snd_max will be greater than 16953 * snd_nxt. There may be SACK information that allows us to avoid 16954 * resending already delivered data. Adjust snd_nxt accordingly. 16955 */ 16956 sendalot = 0; 16957 cts = tcp_get_usecs(&tv); 16958 ms_cts = tcp_tv_to_mssectick(&tv); 16959 tso = 0; 16960 mtu = 0; 16961 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16962 minseg = segsiz; 16963 if (rack->r_ctl.rc_pace_max_segs == 0) 16964 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16965 else 16966 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16967 sb_offset = tp->snd_max - tp->snd_una; 16968 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16969 flags = tcp_outflags[tp->t_state]; 16970 while (rack->rc_free_cnt < rack_free_cache) { 16971 rsm = rack_alloc(rack); 16972 if (rsm == NULL) { 16973 if (inp->inp_hpts_calls) 16974 /* Retry in a ms */ 16975 slot = (1 * HPTS_USEC_IN_MSEC); 16976 so = inp->inp_socket; 16977 sb = &so->so_snd; 16978 goto just_return_nolock; 16979 } 16980 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16981 rack->rc_free_cnt++; 16982 rsm = NULL; 16983 } 16984 if (inp->inp_hpts_calls) 16985 inp->inp_hpts_calls = 0; 16986 sack_rxmit = 0; 16987 len = 0; 16988 rsm = NULL; 16989 if (flags & TH_RST) { 16990 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16991 so = inp->inp_socket; 16992 sb = &so->so_snd; 16993 goto send; 16994 } 16995 if (rack->r_ctl.rc_resend) { 16996 /* Retransmit timer */ 16997 rsm = rack->r_ctl.rc_resend; 16998 rack->r_ctl.rc_resend = NULL; 16999 len = rsm->r_end - rsm->r_start; 17000 sack_rxmit = 1; 17001 sendalot = 0; 17002 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17003 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17004 __func__, __LINE__, 17005 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17006 sb_offset = rsm->r_start - tp->snd_una; 17007 if (len >= segsiz) 17008 len = segsiz; 17009 } else if (rack->r_collapse_point_valid && 17010 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 17011 /* 17012 * If an RSM is returned then enough time has passed 17013 * for us to retransmit it. Move up the collapse point, 17014 * since this rsm has its chance to retransmit now. 17015 */ 17016 rack_trace_point(rack, RACK_TP_COLLAPSED_RXT); 17017 rack->r_ctl.last_collapse_point = rsm->r_end; 17018 /* Are we done? */ 17019 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 17020 rack->r_ctl.high_collapse_point)) 17021 rack->r_collapse_point_valid = 0; 17022 sack_rxmit = 1; 17023 /* We are not doing a TLP */ 17024 doing_tlp = 0; 17025 len = rsm->r_end - rsm->r_start; 17026 sb_offset = rsm->r_start - tp->snd_una; 17027 sendalot = 0; 17028 if ((rack->full_size_rxt == 0) && 17029 (rack->shape_rxt_to_pacing_min == 0) && 17030 (len >= segsiz)) 17031 len = segsiz; 17032 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 17033 /* We have a retransmit that takes precedence */ 17034 if ((!IN_FASTRECOVERY(tp->t_flags)) && 17035 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 17036 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 17037 /* Enter recovery if not induced by a time-out */ 17038 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 17039 } 17040 #ifdef INVARIANTS 17041 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 17042 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 17043 tp, rack, rsm, rsm->r_start, tp->snd_una); 17044 } 17045 #endif 17046 len = rsm->r_end - rsm->r_start; 17047 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17048 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17049 __func__, __LINE__, 17050 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17051 sb_offset = rsm->r_start - tp->snd_una; 17052 sendalot = 0; 17053 if (len >= segsiz) 17054 len = segsiz; 17055 if (len > 0) { 17056 sack_rxmit = 1; 17057 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 17058 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 17059 min(len, segsiz)); 17060 } 17061 } else if (rack->r_ctl.rc_tlpsend) { 17062 /* Tail loss probe */ 17063 long cwin; 17064 long tlen; 17065 17066 /* 17067 * Check if we can do a TLP with a RACK'd packet 17068 * this can happen if we are not doing the rack 17069 * cheat and we skipped to a TLP and it 17070 * went off. 17071 */ 17072 rsm = rack->r_ctl.rc_tlpsend; 17073 /* We are doing a TLP make sure the flag is preent */ 17074 rsm->r_flags |= RACK_TLP; 17075 rack->r_ctl.rc_tlpsend = NULL; 17076 sack_rxmit = 1; 17077 tlen = rsm->r_end - rsm->r_start; 17078 if (tlen > segsiz) 17079 tlen = segsiz; 17080 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17081 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17082 __func__, __LINE__, 17083 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17084 sb_offset = rsm->r_start - tp->snd_una; 17085 cwin = min(tp->snd_wnd, tlen); 17086 len = cwin; 17087 } 17088 if (rack->r_must_retran && 17089 (doing_tlp == 0) && 17090 (SEQ_GT(tp->snd_max, tp->snd_una)) && 17091 (rsm == NULL)) { 17092 /* 17093 * There are two different ways that we 17094 * can get into this block: 17095 * a) This is a non-sack connection, we had a time-out 17096 * and thus r_must_retran was set and everything 17097 * left outstanding as been marked for retransmit. 17098 * b) The MTU of the path shrank, so that everything 17099 * was marked to be retransmitted with the smaller 17100 * mtu and r_must_retran was set. 17101 * 17102 * This means that we expect the sendmap (outstanding) 17103 * to all be marked must. We can use the tmap to 17104 * look at them. 17105 * 17106 */ 17107 int sendwin, flight; 17108 17109 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17110 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17111 if (flight >= sendwin) { 17112 /* 17113 * We can't send yet. 17114 */ 17115 so = inp->inp_socket; 17116 sb = &so->so_snd; 17117 goto just_return_nolock; 17118 } 17119 /* 17120 * This is the case a/b mentioned above. All 17121 * outstanding/not-acked should be marked. 17122 * We can use the tmap to find them. 17123 */ 17124 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17125 if (rsm == NULL) { 17126 /* TSNH */ 17127 rack->r_must_retran = 0; 17128 rack->r_ctl.rc_out_at_rto = 0; 17129 so = inp->inp_socket; 17130 sb = &so->so_snd; 17131 goto just_return_nolock; 17132 } 17133 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 17134 /* 17135 * The first one does not have the flag, did we collapse 17136 * further up in our list? 17137 */ 17138 rack->r_must_retran = 0; 17139 rack->r_ctl.rc_out_at_rto = 0; 17140 rsm = NULL; 17141 sack_rxmit = 0; 17142 } else { 17143 sack_rxmit = 1; 17144 len = rsm->r_end - rsm->r_start; 17145 sb_offset = rsm->r_start - tp->snd_una; 17146 sendalot = 0; 17147 if ((rack->full_size_rxt == 0) && 17148 (rack->shape_rxt_to_pacing_min == 0) && 17149 (len >= segsiz)) 17150 len = segsiz; 17151 /* 17152 * Delay removing the flag RACK_MUST_RXT so 17153 * that the fastpath for retransmit will 17154 * work with this rsm. 17155 */ 17156 } 17157 } 17158 /* 17159 * Enforce a connection sendmap count limit if set 17160 * as long as we are not retransmiting. 17161 */ 17162 if ((rsm == NULL) && 17163 (rack->do_detection == 0) && 17164 (V_tcp_map_entries_limit > 0) && 17165 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17166 counter_u64_add(rack_to_alloc_limited, 1); 17167 if (!rack->alloc_limit_reported) { 17168 rack->alloc_limit_reported = 1; 17169 counter_u64_add(rack_alloc_limited_conns, 1); 17170 } 17171 so = inp->inp_socket; 17172 sb = &so->so_snd; 17173 goto just_return_nolock; 17174 } 17175 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17176 /* we are retransmitting the fin */ 17177 len--; 17178 if (len) { 17179 /* 17180 * When retransmitting data do *not* include the 17181 * FIN. This could happen from a TLP probe. 17182 */ 17183 flags &= ~TH_FIN; 17184 } 17185 } 17186 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17187 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17188 int ret; 17189 17190 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17191 if (ret == 0) 17192 return (0); 17193 } 17194 so = inp->inp_socket; 17195 sb = &so->so_snd; 17196 if (do_a_prefetch == 0) { 17197 kern_prefetch(sb, &do_a_prefetch); 17198 do_a_prefetch = 1; 17199 } 17200 #ifdef NETFLIX_SHARED_CWND 17201 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17202 rack->rack_enable_scwnd) { 17203 /* We are doing cwnd sharing */ 17204 if (rack->gp_ready && 17205 (rack->rack_attempted_scwnd == 0) && 17206 (rack->r_ctl.rc_scw == NULL) && 17207 tp->t_lib) { 17208 /* The pcbid is in, lets make an attempt */ 17209 counter_u64_add(rack_try_scwnd, 1); 17210 rack->rack_attempted_scwnd = 1; 17211 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17212 &rack->r_ctl.rc_scw_index, 17213 segsiz); 17214 } 17215 if (rack->r_ctl.rc_scw && 17216 (rack->rack_scwnd_is_idle == 1) && 17217 sbavail(&so->so_snd)) { 17218 /* we are no longer out of data */ 17219 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17220 rack->rack_scwnd_is_idle = 0; 17221 } 17222 if (rack->r_ctl.rc_scw) { 17223 /* First lets update and get the cwnd */ 17224 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17225 rack->r_ctl.rc_scw_index, 17226 tp->snd_cwnd, tp->snd_wnd, segsiz); 17227 } 17228 } 17229 #endif 17230 /* 17231 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17232 * state flags. 17233 */ 17234 if (tp->t_flags & TF_NEEDFIN) 17235 flags |= TH_FIN; 17236 if (tp->t_flags & TF_NEEDSYN) 17237 flags |= TH_SYN; 17238 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17239 void *end_rsm; 17240 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17241 if (end_rsm) 17242 kern_prefetch(end_rsm, &prefetch_rsm); 17243 prefetch_rsm = 1; 17244 } 17245 SOCKBUF_LOCK(sb); 17246 /* 17247 * If snd_nxt == snd_max and we have transmitted a FIN, the 17248 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17249 * negative length. This can also occur when TCP opens up its 17250 * congestion window while receiving additional duplicate acks after 17251 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17252 * the fast-retransmit. 17253 * 17254 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17255 * set to snd_una, the sb_offset will be 0, and the length may wind 17256 * up 0. 17257 * 17258 * If sack_rxmit is true we are retransmitting from the scoreboard 17259 * in which case len is already set. 17260 */ 17261 if ((sack_rxmit == 0) && 17262 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17263 uint32_t avail; 17264 17265 avail = sbavail(sb); 17266 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17267 sb_offset = tp->snd_nxt - tp->snd_una; 17268 else 17269 sb_offset = 0; 17270 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17271 if (rack->r_ctl.rc_tlp_new_data) { 17272 /* TLP is forcing out new data */ 17273 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17274 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17275 } 17276 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17277 if (tp->snd_wnd > sb_offset) 17278 len = tp->snd_wnd - sb_offset; 17279 else 17280 len = 0; 17281 } else { 17282 len = rack->r_ctl.rc_tlp_new_data; 17283 } 17284 rack->r_ctl.rc_tlp_new_data = 0; 17285 } else { 17286 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17287 } 17288 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17289 /* 17290 * For prr=off, we need to send only 1 MSS 17291 * at a time. We do this because another sack could 17292 * be arriving that causes us to send retransmits and 17293 * we don't want to be on a long pace due to a larger send 17294 * that keeps us from sending out the retransmit. 17295 */ 17296 len = segsiz; 17297 } 17298 } else { 17299 uint32_t outstanding; 17300 /* 17301 * We are inside of a Fast recovery episode, this 17302 * is caused by a SACK or 3 dup acks. At this point 17303 * we have sent all the retransmissions and we rely 17304 * on PRR to dictate what we will send in the form of 17305 * new data. 17306 */ 17307 17308 outstanding = tp->snd_max - tp->snd_una; 17309 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17310 if (tp->snd_wnd > outstanding) { 17311 len = tp->snd_wnd - outstanding; 17312 /* Check to see if we have the data */ 17313 if ((sb_offset + len) > avail) { 17314 /* It does not all fit */ 17315 if (avail > sb_offset) 17316 len = avail - sb_offset; 17317 else 17318 len = 0; 17319 } 17320 } else { 17321 len = 0; 17322 } 17323 } else if (avail > sb_offset) { 17324 len = avail - sb_offset; 17325 } else { 17326 len = 0; 17327 } 17328 if (len > 0) { 17329 if (len > rack->r_ctl.rc_prr_sndcnt) { 17330 len = rack->r_ctl.rc_prr_sndcnt; 17331 } 17332 if (len > 0) { 17333 sub_from_prr = 1; 17334 } 17335 } 17336 if (len > segsiz) { 17337 /* 17338 * We should never send more than a MSS when 17339 * retransmitting or sending new data in prr 17340 * mode unless the override flag is on. Most 17341 * likely the PRR algorithm is not going to 17342 * let us send a lot as well :-) 17343 */ 17344 if (rack->r_ctl.rc_prr_sendalot == 0) { 17345 len = segsiz; 17346 } 17347 } else if (len < segsiz) { 17348 /* 17349 * Do we send any? The idea here is if the 17350 * send empty's the socket buffer we want to 17351 * do it. However if not then lets just wait 17352 * for our prr_sndcnt to get bigger. 17353 */ 17354 long leftinsb; 17355 17356 leftinsb = sbavail(sb) - sb_offset; 17357 if (leftinsb > len) { 17358 /* This send does not empty the sb */ 17359 len = 0; 17360 } 17361 } 17362 } 17363 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17364 /* 17365 * If you have not established 17366 * and are not doing FAST OPEN 17367 * no data please. 17368 */ 17369 if ((sack_rxmit == 0) && 17370 (!IS_FASTOPEN(tp->t_flags))){ 17371 len = 0; 17372 sb_offset = 0; 17373 } 17374 } 17375 if (prefetch_so_done == 0) { 17376 kern_prefetch(so, &prefetch_so_done); 17377 prefetch_so_done = 1; 17378 } 17379 /* 17380 * Lop off SYN bit if it has already been sent. However, if this is 17381 * SYN-SENT state and if segment contains data and if we don't know 17382 * that foreign host supports TAO, suppress sending segment. 17383 */ 17384 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17385 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17386 /* 17387 * When sending additional segments following a TFO SYN|ACK, 17388 * do not include the SYN bit. 17389 */ 17390 if (IS_FASTOPEN(tp->t_flags) && 17391 (tp->t_state == TCPS_SYN_RECEIVED)) 17392 flags &= ~TH_SYN; 17393 } 17394 /* 17395 * Be careful not to send data and/or FIN on SYN segments. This 17396 * measure is needed to prevent interoperability problems with not 17397 * fully conformant TCP implementations. 17398 */ 17399 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17400 len = 0; 17401 flags &= ~TH_FIN; 17402 } 17403 /* 17404 * On TFO sockets, ensure no data is sent in the following cases: 17405 * 17406 * - When retransmitting SYN|ACK on a passively-created socket 17407 * 17408 * - When retransmitting SYN on an actively created socket 17409 * 17410 * - When sending a zero-length cookie (cookie request) on an 17411 * actively created socket 17412 * 17413 * - When the socket is in the CLOSED state (RST is being sent) 17414 */ 17415 if (IS_FASTOPEN(tp->t_flags) && 17416 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17417 ((tp->t_state == TCPS_SYN_SENT) && 17418 (tp->t_tfo_client_cookie_len == 0)) || 17419 (flags & TH_RST))) { 17420 sack_rxmit = 0; 17421 len = 0; 17422 } 17423 /* Without fast-open there should never be data sent on a SYN */ 17424 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17425 tp->snd_nxt = tp->iss; 17426 len = 0; 17427 } 17428 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17429 /* We only send 1 MSS if we have a DSACK block */ 17430 add_flag |= RACK_SENT_W_DSACK; 17431 len = segsiz; 17432 } 17433 orig_len = len; 17434 if (len <= 0) { 17435 /* 17436 * If FIN has been sent but not acked, but we haven't been 17437 * called to retransmit, len will be < 0. Otherwise, window 17438 * shrank after we sent into it. If window shrank to 0, 17439 * cancel pending retransmit, pull snd_nxt back to (closed) 17440 * window, and set the persist timer if it isn't already 17441 * going. If the window didn't close completely, just wait 17442 * for an ACK. 17443 * 17444 * We also do a general check here to ensure that we will 17445 * set the persist timer when we have data to send, but a 17446 * 0-byte window. This makes sure the persist timer is set 17447 * even if the packet hits one of the "goto send" lines 17448 * below. 17449 */ 17450 len = 0; 17451 if ((tp->snd_wnd == 0) && 17452 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17453 (tp->snd_una == tp->snd_max) && 17454 (sb_offset < (int)sbavail(sb))) { 17455 rack_enter_persist(tp, rack, cts); 17456 } 17457 } else if ((rsm == NULL) && 17458 (doing_tlp == 0) && 17459 (len < pace_max_seg)) { 17460 /* 17461 * We are not sending a maximum sized segment for 17462 * some reason. Should we not send anything (think 17463 * sws or persists)? 17464 */ 17465 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17466 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17467 (len < minseg) && 17468 (len < (int)(sbavail(sb) - sb_offset))) { 17469 /* 17470 * Here the rwnd is less than 17471 * the minimum pacing size, this is not a retransmit, 17472 * we are established and 17473 * the send is not the last in the socket buffer 17474 * we send nothing, and we may enter persists 17475 * if nothing is outstanding. 17476 */ 17477 len = 0; 17478 if (tp->snd_max == tp->snd_una) { 17479 /* 17480 * Nothing out we can 17481 * go into persists. 17482 */ 17483 rack_enter_persist(tp, rack, cts); 17484 } 17485 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17486 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17487 (len < (int)(sbavail(sb) - sb_offset)) && 17488 (len < minseg)) { 17489 /* 17490 * Here we are not retransmitting, and 17491 * the cwnd is not so small that we could 17492 * not send at least a min size (rxt timer 17493 * not having gone off), We have 2 segments or 17494 * more already in flight, its not the tail end 17495 * of the socket buffer and the cwnd is blocking 17496 * us from sending out a minimum pacing segment size. 17497 * Lets not send anything. 17498 */ 17499 len = 0; 17500 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17501 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17502 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17503 (len < (int)(sbavail(sb) - sb_offset)) && 17504 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17505 /* 17506 * Here we have a send window but we have 17507 * filled it up and we can't send another pacing segment. 17508 * We also have in flight more than 2 segments 17509 * and we are not completing the sb i.e. we allow 17510 * the last bytes of the sb to go out even if 17511 * its not a full pacing segment. 17512 */ 17513 len = 0; 17514 } else if ((rack->r_ctl.crte != NULL) && 17515 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17516 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17517 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17518 (len < (int)(sbavail(sb) - sb_offset))) { 17519 /* 17520 * Here we are doing hardware pacing, this is not a TLP, 17521 * we are not sending a pace max segment size, there is rwnd 17522 * room to send at least N pace_max_seg, the cwnd is greater 17523 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17524 * more segments in flight and its not the tail of the socket buffer. 17525 * 17526 * We don't want to send instead we need to get more ack's in to 17527 * allow us to send a full pacing segment. Normally, if we are pacing 17528 * about the right speed, we should have finished our pacing 17529 * send as most of the acks have come back if we are at the 17530 * right rate. This is a bit fuzzy since return path delay 17531 * can delay the acks, which is why we want to make sure we 17532 * have cwnd space to have a bit more than a max pace segments in flight. 17533 * 17534 * If we have not gotten our acks back we are pacing at too high a 17535 * rate delaying will not hurt and will bring our GP estimate down by 17536 * injecting the delay. If we don't do this we will send 17537 * 2 MSS out in response to the acks being clocked in which 17538 * defeats the point of hw-pacing (i.e. to help us get 17539 * larger TSO's out). 17540 */ 17541 len = 0; 17542 17543 } 17544 17545 } 17546 /* len will be >= 0 after this point. */ 17547 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17548 rack_sndbuf_autoscale(rack); 17549 /* 17550 * Decide if we can use TCP Segmentation Offloading (if supported by 17551 * hardware). 17552 * 17553 * TSO may only be used if we are in a pure bulk sending state. The 17554 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17555 * options prevent using TSO. With TSO the TCP header is the same 17556 * (except for the sequence number) for all generated packets. This 17557 * makes it impossible to transmit any options which vary per 17558 * generated segment or packet. 17559 * 17560 * IPv4 handling has a clear separation of ip options and ip header 17561 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17562 * the right thing below to provide length of just ip options and thus 17563 * checking for ipoptlen is enough to decide if ip options are present. 17564 */ 17565 ipoptlen = 0; 17566 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17567 /* 17568 * Pre-calculate here as we save another lookup into the darknesses 17569 * of IPsec that way and can actually decide if TSO is ok. 17570 */ 17571 #ifdef INET6 17572 if (isipv6 && IPSEC_ENABLED(ipv6)) 17573 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 17574 #ifdef INET 17575 else 17576 #endif 17577 #endif /* INET6 */ 17578 #ifdef INET 17579 if (IPSEC_ENABLED(ipv4)) 17580 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 17581 #endif /* INET */ 17582 #endif 17583 17584 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17585 ipoptlen += ipsec_optlen; 17586 #endif 17587 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17588 (tp->t_port == 0) && 17589 ((tp->t_flags & TF_SIGNATURE) == 0) && 17590 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17591 ipoptlen == 0) 17592 tso = 1; 17593 { 17594 uint32_t outstanding __unused; 17595 17596 outstanding = tp->snd_max - tp->snd_una; 17597 if (tp->t_flags & TF_SENTFIN) { 17598 /* 17599 * If we sent a fin, snd_max is 1 higher than 17600 * snd_una 17601 */ 17602 outstanding--; 17603 } 17604 if (sack_rxmit) { 17605 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17606 flags &= ~TH_FIN; 17607 } else { 17608 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17609 sbused(sb))) 17610 flags &= ~TH_FIN; 17611 } 17612 } 17613 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17614 (long)TCP_MAXWIN << tp->rcv_scale); 17615 17616 /* 17617 * Sender silly window avoidance. We transmit under the following 17618 * conditions when len is non-zero: 17619 * 17620 * - We have a full segment (or more with TSO) - This is the last 17621 * buffer in a write()/send() and we are either idle or running 17622 * NODELAY - we've timed out (e.g. persist timer) - we have more 17623 * then 1/2 the maximum send window's worth of data (receiver may be 17624 * limited the window size) - we need to retransmit 17625 */ 17626 if (len) { 17627 if (len >= segsiz) { 17628 goto send; 17629 } 17630 /* 17631 * NOTE! on localhost connections an 'ack' from the remote 17632 * end may occur synchronously with the output and cause us 17633 * to flush a buffer queued with moretocome. XXX 17634 * 17635 */ 17636 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17637 (idle || (tp->t_flags & TF_NODELAY)) && 17638 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17639 (tp->t_flags & TF_NOPUSH) == 0) { 17640 pass = 2; 17641 goto send; 17642 } 17643 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17644 pass = 22; 17645 goto send; 17646 } 17647 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17648 pass = 4; 17649 goto send; 17650 } 17651 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17652 pass = 5; 17653 goto send; 17654 } 17655 if (sack_rxmit) { 17656 pass = 6; 17657 goto send; 17658 } 17659 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17660 (ctf_outstanding(tp) < (segsiz * 2))) { 17661 /* 17662 * We have less than two MSS outstanding (delayed ack) 17663 * and our rwnd will not let us send a full sized 17664 * MSS. Lets go ahead and let this small segment 17665 * out because we want to try to have at least two 17666 * packets inflight to not be caught by delayed ack. 17667 */ 17668 pass = 12; 17669 goto send; 17670 } 17671 } 17672 /* 17673 * Sending of standalone window updates. 17674 * 17675 * Window updates are important when we close our window due to a 17676 * full socket buffer and are opening it again after the application 17677 * reads data from it. Once the window has opened again and the 17678 * remote end starts to send again the ACK clock takes over and 17679 * provides the most current window information. 17680 * 17681 * We must avoid the silly window syndrome whereas every read from 17682 * the receive buffer, no matter how small, causes a window update 17683 * to be sent. We also should avoid sending a flurry of window 17684 * updates when the socket buffer had queued a lot of data and the 17685 * application is doing small reads. 17686 * 17687 * Prevent a flurry of pointless window updates by only sending an 17688 * update when we can increase the advertized window by more than 17689 * 1/4th of the socket buffer capacity. When the buffer is getting 17690 * full or is very small be more aggressive and send an update 17691 * whenever we can increase by two mss sized segments. In all other 17692 * situations the ACK's to new incoming data will carry further 17693 * window increases. 17694 * 17695 * Don't send an independent window update if a delayed ACK is 17696 * pending (it will get piggy-backed on it) or the remote side 17697 * already has done a half-close and won't send more data. Skip 17698 * this if the connection is in T/TCP half-open state. 17699 */ 17700 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17701 !(tp->t_flags & TF_DELACK) && 17702 !TCPS_HAVERCVDFIN(tp->t_state)) { 17703 /* 17704 * "adv" is the amount we could increase the window, taking 17705 * into account that we are limited by TCP_MAXWIN << 17706 * tp->rcv_scale. 17707 */ 17708 int32_t adv; 17709 int oldwin; 17710 17711 adv = recwin; 17712 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17713 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17714 if (adv > oldwin) 17715 adv -= oldwin; 17716 else { 17717 /* We can't increase the window */ 17718 adv = 0; 17719 } 17720 } else 17721 oldwin = 0; 17722 17723 /* 17724 * If the new window size ends up being the same as or less 17725 * than the old size when it is scaled, then don't force 17726 * a window update. 17727 */ 17728 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17729 goto dontupdate; 17730 17731 if (adv >= (int32_t)(2 * segsiz) && 17732 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17733 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17734 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17735 pass = 7; 17736 goto send; 17737 } 17738 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17739 pass = 23; 17740 goto send; 17741 } 17742 } 17743 dontupdate: 17744 17745 /* 17746 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17747 * is also a catch-all for the retransmit timer timeout case. 17748 */ 17749 if (tp->t_flags & TF_ACKNOW) { 17750 pass = 8; 17751 goto send; 17752 } 17753 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17754 pass = 9; 17755 goto send; 17756 } 17757 /* 17758 * If our state indicates that FIN should be sent and we have not 17759 * yet done so, then we need to send. 17760 */ 17761 if ((flags & TH_FIN) && 17762 (tp->snd_nxt == tp->snd_una)) { 17763 pass = 11; 17764 goto send; 17765 } 17766 /* 17767 * No reason to send a segment, just return. 17768 */ 17769 just_return: 17770 SOCKBUF_UNLOCK(sb); 17771 just_return_nolock: 17772 { 17773 int app_limited = CTF_JR_SENT_DATA; 17774 17775 if (tot_len_this_send > 0) { 17776 /* Make sure snd_nxt is up to max */ 17777 rack->r_ctl.fsb.recwin = recwin; 17778 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17779 if ((error == 0) && 17780 rack_use_rfo && 17781 ((flags & (TH_SYN|TH_FIN)) == 0) && 17782 (ipoptlen == 0) && 17783 (tp->snd_nxt == tp->snd_max) && 17784 (tp->rcv_numsacks == 0) && 17785 rack->r_fsb_inited && 17786 TCPS_HAVEESTABLISHED(tp->t_state) && 17787 (rack->r_must_retran == 0) && 17788 ((tp->t_flags & TF_NEEDFIN) == 0) && 17789 (len > 0) && (orig_len > 0) && 17790 (orig_len > len) && 17791 ((orig_len - len) >= segsiz) && 17792 ((optlen == 0) || 17793 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17794 /* We can send at least one more MSS using our fsb */ 17795 17796 rack->r_fast_output = 1; 17797 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17798 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17799 rack->r_ctl.fsb.tcp_flags = flags; 17800 rack->r_ctl.fsb.left_to_send = orig_len - len; 17801 if (hw_tls) 17802 rack->r_ctl.fsb.hw_tls = 1; 17803 else 17804 rack->r_ctl.fsb.hw_tls = 0; 17805 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17806 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17807 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17808 (tp->snd_max - tp->snd_una))); 17809 if (rack->r_ctl.fsb.left_to_send < segsiz) 17810 rack->r_fast_output = 0; 17811 else { 17812 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17813 rack->r_ctl.fsb.rfo_apply_push = 1; 17814 else 17815 rack->r_ctl.fsb.rfo_apply_push = 0; 17816 } 17817 } else 17818 rack->r_fast_output = 0; 17819 17820 17821 rack_log_fsb(rack, tp, so, flags, 17822 ipoptlen, orig_len, len, 0, 17823 1, optlen, __LINE__, 1); 17824 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17825 tp->snd_nxt = tp->snd_max; 17826 } else { 17827 int end_window = 0; 17828 uint32_t seq = tp->gput_ack; 17829 17830 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17831 if (rsm) { 17832 /* 17833 * Mark the last sent that we just-returned (hinting 17834 * that delayed ack may play a role in any rtt measurement). 17835 */ 17836 rsm->r_just_ret = 1; 17837 } 17838 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17839 rack->r_ctl.rc_agg_delayed = 0; 17840 rack->r_early = 0; 17841 rack->r_late = 0; 17842 rack->r_ctl.rc_agg_early = 0; 17843 if ((ctf_outstanding(tp) + 17844 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17845 minseg)) >= tp->snd_wnd) { 17846 /* We are limited by the rwnd */ 17847 app_limited = CTF_JR_RWND_LIMITED; 17848 if (IN_FASTRECOVERY(tp->t_flags)) 17849 rack->r_ctl.rc_prr_sndcnt = 0; 17850 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17851 /* We are limited by whats available -- app limited */ 17852 app_limited = CTF_JR_APP_LIMITED; 17853 if (IN_FASTRECOVERY(tp->t_flags)) 17854 rack->r_ctl.rc_prr_sndcnt = 0; 17855 } else if ((idle == 0) && 17856 ((tp->t_flags & TF_NODELAY) == 0) && 17857 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17858 (len < segsiz)) { 17859 /* 17860 * No delay is not on and the 17861 * user is sending less than 1MSS. This 17862 * brings out SWS avoidance so we 17863 * don't send. Another app-limited case. 17864 */ 17865 app_limited = CTF_JR_APP_LIMITED; 17866 } else if (tp->t_flags & TF_NOPUSH) { 17867 /* 17868 * The user has requested no push of 17869 * the last segment and we are 17870 * at the last segment. Another app 17871 * limited case. 17872 */ 17873 app_limited = CTF_JR_APP_LIMITED; 17874 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17875 /* Its the cwnd */ 17876 app_limited = CTF_JR_CWND_LIMITED; 17877 } else if (IN_FASTRECOVERY(tp->t_flags) && 17878 (rack->rack_no_prr == 0) && 17879 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17880 app_limited = CTF_JR_PRR; 17881 } else { 17882 /* Now why here are we not sending? */ 17883 #ifdef NOW 17884 #ifdef INVARIANTS 17885 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17886 #endif 17887 #endif 17888 app_limited = CTF_JR_ASSESSING; 17889 } 17890 /* 17891 * App limited in some fashion, for our pacing GP 17892 * measurements we don't want any gap (even cwnd). 17893 * Close down the measurement window. 17894 */ 17895 if (rack_cwnd_block_ends_measure && 17896 ((app_limited == CTF_JR_CWND_LIMITED) || 17897 (app_limited == CTF_JR_PRR))) { 17898 /* 17899 * The reason we are not sending is 17900 * the cwnd (or prr). We have been configured 17901 * to end the measurement window in 17902 * this case. 17903 */ 17904 end_window = 1; 17905 } else if (rack_rwnd_block_ends_measure && 17906 (app_limited == CTF_JR_RWND_LIMITED)) { 17907 /* 17908 * We are rwnd limited and have been 17909 * configured to end the measurement 17910 * window in this case. 17911 */ 17912 end_window = 1; 17913 } else if (app_limited == CTF_JR_APP_LIMITED) { 17914 /* 17915 * A true application limited period, we have 17916 * ran out of data. 17917 */ 17918 end_window = 1; 17919 } else if (app_limited == CTF_JR_ASSESSING) { 17920 /* 17921 * In the assessing case we hit the end of 17922 * the if/else and had no known reason 17923 * This will panic us under invariants.. 17924 * 17925 * If we get this out in logs we need to 17926 * investagate which reason we missed. 17927 */ 17928 end_window = 1; 17929 } 17930 if (end_window) { 17931 uint8_t log = 0; 17932 17933 /* Adjust the Gput measurement */ 17934 if ((tp->t_flags & TF_GPUTINPROG) && 17935 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17936 tp->gput_ack = tp->snd_max; 17937 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17938 /* 17939 * There is not enough to measure. 17940 */ 17941 tp->t_flags &= ~TF_GPUTINPROG; 17942 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17943 rack->r_ctl.rc_gp_srtt /*flex1*/, 17944 tp->gput_seq, 17945 0, 0, 18, __LINE__, NULL, 0); 17946 } else 17947 log = 1; 17948 } 17949 /* Mark the last packet has app limited */ 17950 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17951 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17952 if (rack->r_ctl.rc_app_limited_cnt == 0) 17953 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17954 else { 17955 /* 17956 * Go out to the end app limited and mark 17957 * this new one as next and move the end_appl up 17958 * to this guy. 17959 */ 17960 if (rack->r_ctl.rc_end_appl) 17961 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17962 rack->r_ctl.rc_end_appl = rsm; 17963 } 17964 rsm->r_flags |= RACK_APP_LIMITED; 17965 rack->r_ctl.rc_app_limited_cnt++; 17966 } 17967 if (log) 17968 rack_log_pacing_delay_calc(rack, 17969 rack->r_ctl.rc_app_limited_cnt, seq, 17970 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17971 } 17972 } 17973 /* Check if we need to go into persists or not */ 17974 if ((tp->snd_max == tp->snd_una) && 17975 TCPS_HAVEESTABLISHED(tp->t_state) && 17976 sbavail(sb) && 17977 (sbavail(sb) > tp->snd_wnd) && 17978 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17979 /* Yes lets make sure to move to persist before timer-start */ 17980 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17981 } 17982 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17983 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17984 } 17985 #ifdef NETFLIX_SHARED_CWND 17986 if ((sbavail(sb) == 0) && 17987 rack->r_ctl.rc_scw) { 17988 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17989 rack->rack_scwnd_is_idle = 1; 17990 } 17991 #endif 17992 #ifdef TCP_ACCOUNTING 17993 if (tot_len_this_send > 0) { 17994 crtsc = get_cyclecount(); 17995 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17996 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17997 } 17998 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17999 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18000 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 18001 } 18002 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 18003 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18004 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 18005 } 18006 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 18007 } else { 18008 crtsc = get_cyclecount(); 18009 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18010 tp->tcp_cnt_counters[SND_LIMITED]++; 18011 } 18012 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 18013 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18014 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 18015 } 18016 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 18017 } 18018 sched_unpin(); 18019 #endif 18020 return (0); 18021 18022 send: 18023 if (rsm || sack_rxmit) 18024 counter_u64_add(rack_nfto_resend, 1); 18025 else 18026 counter_u64_add(rack_non_fto_send, 1); 18027 if ((flags & TH_FIN) && 18028 sbavail(sb)) { 18029 /* 18030 * We do not transmit a FIN 18031 * with data outstanding. We 18032 * need to make it so all data 18033 * is acked first. 18034 */ 18035 flags &= ~TH_FIN; 18036 } 18037 /* Enforce stack imposed max seg size if we have one */ 18038 if (rack->r_ctl.rc_pace_max_segs && 18039 (len > rack->r_ctl.rc_pace_max_segs)) { 18040 mark = 1; 18041 len = rack->r_ctl.rc_pace_max_segs; 18042 } 18043 SOCKBUF_LOCK_ASSERT(sb); 18044 if (len > 0) { 18045 if (len >= segsiz) 18046 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 18047 else 18048 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 18049 } 18050 /* 18051 * Before ESTABLISHED, force sending of initial options unless TCP 18052 * set not to do any options. NOTE: we assume that the IP/TCP header 18053 * plus TCP options always fit in a single mbuf, leaving room for a 18054 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 18055 * + optlen <= MCLBYTES 18056 */ 18057 optlen = 0; 18058 #ifdef INET6 18059 if (isipv6) 18060 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18061 else 18062 #endif 18063 hdrlen = sizeof(struct tcpiphdr); 18064 18065 /* 18066 * Compute options for segment. We only have to care about SYN and 18067 * established connection segments. Options for SYN-ACK segments 18068 * are handled in TCP syncache. 18069 */ 18070 to.to_flags = 0; 18071 if ((tp->t_flags & TF_NOOPT) == 0) { 18072 /* Maximum segment size. */ 18073 if (flags & TH_SYN) { 18074 tp->snd_nxt = tp->iss; 18075 to.to_mss = tcp_mssopt(&inp->inp_inc); 18076 if (tp->t_port) 18077 to.to_mss -= V_tcp_udp_tunneling_overhead; 18078 to.to_flags |= TOF_MSS; 18079 18080 /* 18081 * On SYN or SYN|ACK transmits on TFO connections, 18082 * only include the TFO option if it is not a 18083 * retransmit, as the presence of the TFO option may 18084 * have caused the original SYN or SYN|ACK to have 18085 * been dropped by a middlebox. 18086 */ 18087 if (IS_FASTOPEN(tp->t_flags) && 18088 (tp->t_rxtshift == 0)) { 18089 if (tp->t_state == TCPS_SYN_RECEIVED) { 18090 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 18091 to.to_tfo_cookie = 18092 (u_int8_t *)&tp->t_tfo_cookie.server; 18093 to.to_flags |= TOF_FASTOPEN; 18094 wanted_cookie = 1; 18095 } else if (tp->t_state == TCPS_SYN_SENT) { 18096 to.to_tfo_len = 18097 tp->t_tfo_client_cookie_len; 18098 to.to_tfo_cookie = 18099 tp->t_tfo_cookie.client; 18100 to.to_flags |= TOF_FASTOPEN; 18101 wanted_cookie = 1; 18102 /* 18103 * If we wind up having more data to 18104 * send with the SYN than can fit in 18105 * one segment, don't send any more 18106 * until the SYN|ACK comes back from 18107 * the other end. 18108 */ 18109 sendalot = 0; 18110 } 18111 } 18112 } 18113 /* Window scaling. */ 18114 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18115 to.to_wscale = tp->request_r_scale; 18116 to.to_flags |= TOF_SCALE; 18117 } 18118 /* Timestamps. */ 18119 if ((tp->t_flags & TF_RCVD_TSTMP) || 18120 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18121 to.to_tsval = ms_cts + tp->ts_offset; 18122 to.to_tsecr = tp->ts_recent; 18123 to.to_flags |= TOF_TS; 18124 } 18125 /* Set receive buffer autosizing timestamp. */ 18126 if (tp->rfbuf_ts == 0 && 18127 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18128 tp->rfbuf_ts = tcp_ts_getticks(); 18129 /* Selective ACK's. */ 18130 if (tp->t_flags & TF_SACK_PERMIT) { 18131 if (flags & TH_SYN) 18132 to.to_flags |= TOF_SACKPERM; 18133 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18134 tp->rcv_numsacks > 0) { 18135 to.to_flags |= TOF_SACK; 18136 to.to_nsacks = tp->rcv_numsacks; 18137 to.to_sacks = (u_char *)tp->sackblks; 18138 } 18139 } 18140 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18141 /* TCP-MD5 (RFC2385). */ 18142 if (tp->t_flags & TF_SIGNATURE) 18143 to.to_flags |= TOF_SIGNATURE; 18144 #endif /* TCP_SIGNATURE */ 18145 18146 /* Processing the options. */ 18147 hdrlen += optlen = tcp_addoptions(&to, opt); 18148 /* 18149 * If we wanted a TFO option to be added, but it was unable 18150 * to fit, ensure no data is sent. 18151 */ 18152 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18153 !(to.to_flags & TOF_FASTOPEN)) 18154 len = 0; 18155 } 18156 if (tp->t_port) { 18157 if (V_tcp_udp_tunneling_port == 0) { 18158 /* The port was removed?? */ 18159 SOCKBUF_UNLOCK(&so->so_snd); 18160 #ifdef TCP_ACCOUNTING 18161 crtsc = get_cyclecount(); 18162 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18163 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18164 } 18165 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18166 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18167 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18168 } 18169 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18170 sched_unpin(); 18171 #endif 18172 return (EHOSTUNREACH); 18173 } 18174 hdrlen += sizeof(struct udphdr); 18175 } 18176 #ifdef INET6 18177 if (isipv6) 18178 ipoptlen = ip6_optlen(inp); 18179 else 18180 #endif 18181 if (inp->inp_options) 18182 ipoptlen = inp->inp_options->m_len - 18183 offsetof(struct ipoption, ipopt_list); 18184 else 18185 ipoptlen = 0; 18186 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18187 ipoptlen += ipsec_optlen; 18188 #endif 18189 18190 /* 18191 * Adjust data length if insertion of options will bump the packet 18192 * length beyond the t_maxseg length. Clear the FIN bit because we 18193 * cut off the tail of the segment. 18194 */ 18195 if (len + optlen + ipoptlen > tp->t_maxseg) { 18196 if (tso) { 18197 uint32_t if_hw_tsomax; 18198 uint32_t moff; 18199 int32_t max_len; 18200 18201 /* extract TSO information */ 18202 if_hw_tsomax = tp->t_tsomax; 18203 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18204 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18205 KASSERT(ipoptlen == 0, 18206 ("%s: TSO can't do IP options", __func__)); 18207 18208 /* 18209 * Check if we should limit by maximum payload 18210 * length: 18211 */ 18212 if (if_hw_tsomax != 0) { 18213 /* compute maximum TSO length */ 18214 max_len = (if_hw_tsomax - hdrlen - 18215 max_linkhdr); 18216 if (max_len <= 0) { 18217 len = 0; 18218 } else if (len > max_len) { 18219 sendalot = 1; 18220 len = max_len; 18221 mark = 2; 18222 } 18223 } 18224 /* 18225 * Prevent the last segment from being fractional 18226 * unless the send sockbuf can be emptied: 18227 */ 18228 max_len = (tp->t_maxseg - optlen); 18229 if ((sb_offset + len) < sbavail(sb)) { 18230 moff = len % (u_int)max_len; 18231 if (moff != 0) { 18232 mark = 3; 18233 len -= moff; 18234 } 18235 } 18236 /* 18237 * In case there are too many small fragments don't 18238 * use TSO: 18239 */ 18240 if (len <= segsiz) { 18241 mark = 4; 18242 tso = 0; 18243 } 18244 /* 18245 * Send the FIN in a separate segment after the bulk 18246 * sending is done. We don't trust the TSO 18247 * implementations to clear the FIN flag on all but 18248 * the last segment. 18249 */ 18250 if (tp->t_flags & TF_NEEDFIN) { 18251 sendalot = 4; 18252 } 18253 } else { 18254 mark = 5; 18255 if (optlen + ipoptlen >= tp->t_maxseg) { 18256 /* 18257 * Since we don't have enough space to put 18258 * the IP header chain and the TCP header in 18259 * one packet as required by RFC 7112, don't 18260 * send it. Also ensure that at least one 18261 * byte of the payload can be put into the 18262 * TCP segment. 18263 */ 18264 SOCKBUF_UNLOCK(&so->so_snd); 18265 error = EMSGSIZE; 18266 sack_rxmit = 0; 18267 goto out; 18268 } 18269 len = tp->t_maxseg - optlen - ipoptlen; 18270 sendalot = 5; 18271 } 18272 } else { 18273 tso = 0; 18274 mark = 6; 18275 } 18276 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18277 ("%s: len > IP_MAXPACKET", __func__)); 18278 #ifdef DIAGNOSTIC 18279 #ifdef INET6 18280 if (max_linkhdr + hdrlen > MCLBYTES) 18281 #else 18282 if (max_linkhdr + hdrlen > MHLEN) 18283 #endif 18284 panic("tcphdr too big"); 18285 #endif 18286 18287 /* 18288 * This KASSERT is here to catch edge cases at a well defined place. 18289 * Before, those had triggered (random) panic conditions further 18290 * down. 18291 */ 18292 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18293 if ((len == 0) && 18294 (flags & TH_FIN) && 18295 (sbused(sb))) { 18296 /* 18297 * We have outstanding data, don't send a fin by itself!. 18298 */ 18299 goto just_return; 18300 } 18301 /* 18302 * Grab a header mbuf, attaching a copy of data to be transmitted, 18303 * and initialize the header from the template for sends on this 18304 * connection. 18305 */ 18306 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18307 if (len) { 18308 uint32_t max_val; 18309 uint32_t moff; 18310 18311 if (rack->r_ctl.rc_pace_max_segs) 18312 max_val = rack->r_ctl.rc_pace_max_segs; 18313 else if (rack->rc_user_set_max_segs) 18314 max_val = rack->rc_user_set_max_segs * segsiz; 18315 else 18316 max_val = len; 18317 /* 18318 * We allow a limit on sending with hptsi. 18319 */ 18320 if (len > max_val) { 18321 mark = 7; 18322 len = max_val; 18323 } 18324 #ifdef INET6 18325 if (MHLEN < hdrlen + max_linkhdr) 18326 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18327 else 18328 #endif 18329 m = m_gethdr(M_NOWAIT, MT_DATA); 18330 18331 if (m == NULL) { 18332 SOCKBUF_UNLOCK(sb); 18333 error = ENOBUFS; 18334 sack_rxmit = 0; 18335 goto out; 18336 } 18337 m->m_data += max_linkhdr; 18338 m->m_len = hdrlen; 18339 18340 /* 18341 * Start the m_copy functions from the closest mbuf to the 18342 * sb_offset in the socket buffer chain. 18343 */ 18344 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18345 s_mb = mb; 18346 s_moff = moff; 18347 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18348 m_copydata(mb, moff, (int)len, 18349 mtod(m, caddr_t)+hdrlen); 18350 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18351 sbsndptr_adv(sb, mb, len); 18352 m->m_len += len; 18353 } else { 18354 struct sockbuf *msb; 18355 18356 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18357 msb = NULL; 18358 else 18359 msb = sb; 18360 m->m_next = tcp_m_copym( 18361 mb, moff, &len, 18362 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18363 ((rsm == NULL) ? hw_tls : 0) 18364 #ifdef NETFLIX_COPY_ARGS 18365 , &s_mb, &s_moff 18366 #endif 18367 ); 18368 if (len <= (tp->t_maxseg - optlen)) { 18369 /* 18370 * Must have ran out of mbufs for the copy 18371 * shorten it to no longer need tso. Lets 18372 * not put on sendalot since we are low on 18373 * mbufs. 18374 */ 18375 tso = 0; 18376 } 18377 if (m->m_next == NULL) { 18378 SOCKBUF_UNLOCK(sb); 18379 (void)m_free(m); 18380 error = ENOBUFS; 18381 sack_rxmit = 0; 18382 goto out; 18383 } 18384 } 18385 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18386 if (rsm && (rsm->r_flags & RACK_TLP)) { 18387 /* 18388 * TLP should not count in retran count, but 18389 * in its own bin 18390 */ 18391 counter_u64_add(rack_tlp_retran, 1); 18392 counter_u64_add(rack_tlp_retran_bytes, len); 18393 } else { 18394 tp->t_sndrexmitpack++; 18395 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18396 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18397 } 18398 #ifdef STATS 18399 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18400 len); 18401 #endif 18402 } else { 18403 KMOD_TCPSTAT_INC(tcps_sndpack); 18404 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18405 #ifdef STATS 18406 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18407 len); 18408 #endif 18409 } 18410 /* 18411 * If we're sending everything we've got, set PUSH. (This 18412 * will keep happy those implementations which only give 18413 * data to the user when a buffer fills or a PUSH comes in.) 18414 */ 18415 if (sb_offset + len == sbused(sb) && 18416 sbused(sb) && 18417 !(flags & TH_SYN)) { 18418 flags |= TH_PUSH; 18419 add_flag |= RACK_HAD_PUSH; 18420 } 18421 18422 SOCKBUF_UNLOCK(sb); 18423 } else { 18424 SOCKBUF_UNLOCK(sb); 18425 if (tp->t_flags & TF_ACKNOW) 18426 KMOD_TCPSTAT_INC(tcps_sndacks); 18427 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18428 KMOD_TCPSTAT_INC(tcps_sndctrl); 18429 else 18430 KMOD_TCPSTAT_INC(tcps_sndwinup); 18431 18432 m = m_gethdr(M_NOWAIT, MT_DATA); 18433 if (m == NULL) { 18434 error = ENOBUFS; 18435 sack_rxmit = 0; 18436 goto out; 18437 } 18438 #ifdef INET6 18439 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18440 MHLEN >= hdrlen) { 18441 M_ALIGN(m, hdrlen); 18442 } else 18443 #endif 18444 m->m_data += max_linkhdr; 18445 m->m_len = hdrlen; 18446 } 18447 SOCKBUF_UNLOCK_ASSERT(sb); 18448 m->m_pkthdr.rcvif = (struct ifnet *)0; 18449 #ifdef MAC 18450 mac_inpcb_create_mbuf(inp, m); 18451 #endif 18452 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18453 #ifdef INET6 18454 if (isipv6) 18455 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18456 else 18457 #endif /* INET6 */ 18458 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18459 th = rack->r_ctl.fsb.th; 18460 udp = rack->r_ctl.fsb.udp; 18461 if (udp) { 18462 #ifdef INET6 18463 if (isipv6) 18464 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18465 else 18466 #endif /* INET6 */ 18467 ulen = hdrlen + len - sizeof(struct ip); 18468 udp->uh_ulen = htons(ulen); 18469 } 18470 } else { 18471 #ifdef INET6 18472 if (isipv6) { 18473 ip6 = mtod(m, struct ip6_hdr *); 18474 if (tp->t_port) { 18475 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18476 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18477 udp->uh_dport = tp->t_port; 18478 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18479 udp->uh_ulen = htons(ulen); 18480 th = (struct tcphdr *)(udp + 1); 18481 } else 18482 th = (struct tcphdr *)(ip6 + 1); 18483 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18484 } else 18485 #endif /* INET6 */ 18486 { 18487 ip = mtod(m, struct ip *); 18488 if (tp->t_port) { 18489 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18490 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18491 udp->uh_dport = tp->t_port; 18492 ulen = hdrlen + len - sizeof(struct ip); 18493 udp->uh_ulen = htons(ulen); 18494 th = (struct tcphdr *)(udp + 1); 18495 } else 18496 th = (struct tcphdr *)(ip + 1); 18497 tcpip_fillheaders(inp, tp->t_port, ip, th); 18498 } 18499 } 18500 /* 18501 * Fill in fields, remembering maximum advertised window for use in 18502 * delaying messages about window sizes. If resending a FIN, be sure 18503 * not to use a new sequence number. 18504 */ 18505 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18506 tp->snd_nxt == tp->snd_max) 18507 tp->snd_nxt--; 18508 /* 18509 * If we are starting a connection, send ECN setup SYN packet. If we 18510 * are on a retransmit, we may resend those bits a number of times 18511 * as per RFC 3168. 18512 */ 18513 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18514 flags |= tcp_ecn_output_syn_sent(tp); 18515 } 18516 /* Also handle parallel SYN for ECN */ 18517 if (TCPS_HAVERCVDSYN(tp->t_state) && 18518 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18519 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18520 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18521 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18522 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18523 #ifdef INET6 18524 if (isipv6) { 18525 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18526 ip6->ip6_flow |= htonl(ect << 20); 18527 } 18528 else 18529 #endif 18530 { 18531 ip->ip_tos &= ~IPTOS_ECN_MASK; 18532 ip->ip_tos |= ect; 18533 } 18534 } 18535 /* 18536 * If we are doing retransmissions, then snd_nxt will not reflect 18537 * the first unsent octet. For ACK only packets, we do not want the 18538 * sequence number of the retransmitted packet, we want the sequence 18539 * number of the next unsent octet. So, if there is no data (and no 18540 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18541 * ti_seq. But if we are in persist state, snd_max might reflect 18542 * one byte beyond the right edge of the window, so use snd_nxt in 18543 * that case, since we know we aren't doing a retransmission. 18544 * (retransmit and persist are mutually exclusive...) 18545 */ 18546 if (sack_rxmit == 0) { 18547 if (len || (flags & (TH_SYN | TH_FIN))) { 18548 th->th_seq = htonl(tp->snd_nxt); 18549 rack_seq = tp->snd_nxt; 18550 } else { 18551 th->th_seq = htonl(tp->snd_max); 18552 rack_seq = tp->snd_max; 18553 } 18554 } else { 18555 th->th_seq = htonl(rsm->r_start); 18556 rack_seq = rsm->r_start; 18557 } 18558 th->th_ack = htonl(tp->rcv_nxt); 18559 tcp_set_flags(th, flags); 18560 /* 18561 * Calculate receive window. Don't shrink window, but avoid silly 18562 * window syndrome. 18563 * If a RST segment is sent, advertise a window of zero. 18564 */ 18565 if (flags & TH_RST) { 18566 recwin = 0; 18567 } else { 18568 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18569 recwin < (long)segsiz) { 18570 recwin = 0; 18571 } 18572 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18573 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18574 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18575 } 18576 18577 /* 18578 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18579 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18580 * handled in syncache. 18581 */ 18582 if (flags & TH_SYN) 18583 th->th_win = htons((u_short) 18584 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18585 else { 18586 /* Avoid shrinking window with window scaling. */ 18587 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18588 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18589 } 18590 /* 18591 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18592 * window. This may cause the remote transmitter to stall. This 18593 * flag tells soreceive() to disable delayed acknowledgements when 18594 * draining the buffer. This can occur if the receiver is 18595 * attempting to read more data than can be buffered prior to 18596 * transmitting on the connection. 18597 */ 18598 if (th->th_win == 0) { 18599 tp->t_sndzerowin++; 18600 tp->t_flags |= TF_RXWIN0SENT; 18601 } else 18602 tp->t_flags &= ~TF_RXWIN0SENT; 18603 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18604 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18605 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18606 uint8_t *cpto; 18607 18608 cpto = mtod(m, uint8_t *); 18609 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18610 /* 18611 * We have just copied in: 18612 * IP/IP6 18613 * <optional udphdr> 18614 * tcphdr (no options) 18615 * 18616 * We need to grab the correct pointers into the mbuf 18617 * for both the tcp header, and possibly the udp header (if tunneling). 18618 * We do this by using the offset in the copy buffer and adding it 18619 * to the mbuf base pointer (cpto). 18620 */ 18621 #ifdef INET6 18622 if (isipv6) 18623 ip6 = mtod(m, struct ip6_hdr *); 18624 else 18625 #endif /* INET6 */ 18626 ip = mtod(m, struct ip *); 18627 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18628 /* If we have a udp header lets set it into the mbuf as well */ 18629 if (udp) 18630 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18631 } 18632 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18633 if (to.to_flags & TOF_SIGNATURE) { 18634 /* 18635 * Calculate MD5 signature and put it into the place 18636 * determined before. 18637 * NOTE: since TCP options buffer doesn't point into 18638 * mbuf's data, calculate offset and use it. 18639 */ 18640 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18641 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18642 /* 18643 * Do not send segment if the calculation of MD5 18644 * digest has failed. 18645 */ 18646 goto out; 18647 } 18648 } 18649 #endif 18650 if (optlen) { 18651 bcopy(opt, th + 1, optlen); 18652 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18653 } 18654 /* 18655 * Put TCP length in extended header, and then checksum extended 18656 * header and data. 18657 */ 18658 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18659 #ifdef INET6 18660 if (isipv6) { 18661 /* 18662 * ip6_plen is not need to be filled now, and will be filled 18663 * in ip6_output. 18664 */ 18665 if (tp->t_port) { 18666 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18667 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18668 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18669 th->th_sum = htons(0); 18670 UDPSTAT_INC(udps_opackets); 18671 } else { 18672 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18673 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18674 th->th_sum = in6_cksum_pseudo(ip6, 18675 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18676 0); 18677 } 18678 } 18679 #endif 18680 #if defined(INET6) && defined(INET) 18681 else 18682 #endif 18683 #ifdef INET 18684 { 18685 if (tp->t_port) { 18686 m->m_pkthdr.csum_flags = CSUM_UDP; 18687 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18688 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18689 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18690 th->th_sum = htons(0); 18691 UDPSTAT_INC(udps_opackets); 18692 } else { 18693 m->m_pkthdr.csum_flags = CSUM_TCP; 18694 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18695 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18696 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18697 IPPROTO_TCP + len + optlen)); 18698 } 18699 /* IP version must be set here for ipv4/ipv6 checking later */ 18700 KASSERT(ip->ip_v == IPVERSION, 18701 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18702 } 18703 #endif 18704 /* 18705 * Enable TSO and specify the size of the segments. The TCP pseudo 18706 * header checksum is always provided. XXX: Fixme: This is currently 18707 * not the case for IPv6. 18708 */ 18709 if (tso) { 18710 KASSERT(len > tp->t_maxseg - optlen, 18711 ("%s: len <= tso_segsz", __func__)); 18712 m->m_pkthdr.csum_flags |= CSUM_TSO; 18713 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18714 } 18715 KASSERT(len + hdrlen == m_length(m, NULL), 18716 ("%s: mbuf chain different than expected: %d + %u != %u", 18717 __func__, len, hdrlen, m_length(m, NULL))); 18718 18719 #ifdef TCP_HHOOK 18720 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18721 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18722 #endif 18723 /* We're getting ready to send; log now. */ 18724 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18725 union tcp_log_stackspecific log; 18726 18727 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18728 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18729 if (rack->rack_no_prr) 18730 log.u_bbr.flex1 = 0; 18731 else 18732 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18733 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18734 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18735 log.u_bbr.flex4 = orig_len; 18736 /* Save off the early/late values */ 18737 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18738 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18739 log.u_bbr.bw_inuse = rack_get_bw(rack); 18740 log.u_bbr.flex8 = 0; 18741 if (rsm) { 18742 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18743 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18744 counter_u64_add(rack_collapsed_win_rxt, 1); 18745 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18746 } 18747 if (doing_tlp) 18748 log.u_bbr.flex8 = 2; 18749 else 18750 log.u_bbr.flex8 = 1; 18751 } else { 18752 if (doing_tlp) 18753 log.u_bbr.flex8 = 3; 18754 else 18755 log.u_bbr.flex8 = 0; 18756 } 18757 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18758 log.u_bbr.flex7 = mark; 18759 log.u_bbr.flex7 <<= 8; 18760 log.u_bbr.flex7 |= pass; 18761 log.u_bbr.pkts_out = tp->t_maxseg; 18762 log.u_bbr.timeStamp = cts; 18763 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18764 log.u_bbr.lt_epoch = cwnd_to_use; 18765 log.u_bbr.delivered = sendalot; 18766 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18767 len, &log, false, NULL, NULL, 0, &tv); 18768 } else 18769 lgb = NULL; 18770 18771 /* 18772 * Fill in IP length and desired time to live and send to IP level. 18773 * There should be a better way to handle ttl and tos; we could keep 18774 * them in the template, but need a way to checksum without them. 18775 */ 18776 /* 18777 * m->m_pkthdr.len should have been set before cksum calcuration, 18778 * because in6_cksum() need it. 18779 */ 18780 #ifdef INET6 18781 if (isipv6) { 18782 /* 18783 * we separately set hoplimit for every segment, since the 18784 * user might want to change the value via setsockopt. Also, 18785 * desired default hop limit might be changed via Neighbor 18786 * Discovery. 18787 */ 18788 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18789 18790 /* 18791 * Set the packet size here for the benefit of DTrace 18792 * probes. ip6_output() will set it properly; it's supposed 18793 * to include the option header lengths as well. 18794 */ 18795 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18796 18797 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18798 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18799 else 18800 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18801 18802 if (tp->t_state == TCPS_SYN_SENT) 18803 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18804 18805 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18806 /* TODO: IPv6 IP6TOS_ECT bit on */ 18807 error = ip6_output(m, 18808 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18809 inp->in6p_outputopts, 18810 #else 18811 NULL, 18812 #endif 18813 &inp->inp_route6, 18814 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18815 NULL, NULL, inp); 18816 18817 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18818 mtu = inp->inp_route6.ro_nh->nh_mtu; 18819 } 18820 #endif /* INET6 */ 18821 #if defined(INET) && defined(INET6) 18822 else 18823 #endif 18824 #ifdef INET 18825 { 18826 ip->ip_len = htons(m->m_pkthdr.len); 18827 #ifdef INET6 18828 if (inp->inp_vflag & INP_IPV6PROTO) 18829 ip->ip_ttl = in6_selecthlim(inp, NULL); 18830 #endif /* INET6 */ 18831 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18832 /* 18833 * If we do path MTU discovery, then we set DF on every 18834 * packet. This might not be the best thing to do according 18835 * to RFC3390 Section 2. However the tcp hostcache migitates 18836 * the problem so it affects only the first tcp connection 18837 * with a host. 18838 * 18839 * NB: Don't set DF on small MTU/MSS to have a safe 18840 * fallback. 18841 */ 18842 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18843 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18844 if (tp->t_port == 0 || len < V_tcp_minmss) { 18845 ip->ip_off |= htons(IP_DF); 18846 } 18847 } else { 18848 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18849 } 18850 18851 if (tp->t_state == TCPS_SYN_SENT) 18852 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18853 18854 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18855 18856 error = ip_output(m, 18857 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18858 inp->inp_options, 18859 #else 18860 NULL, 18861 #endif 18862 &inp->inp_route, 18863 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18864 inp); 18865 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18866 mtu = inp->inp_route.ro_nh->nh_mtu; 18867 } 18868 #endif /* INET */ 18869 18870 out: 18871 if (lgb) { 18872 lgb->tlb_errno = error; 18873 lgb = NULL; 18874 } 18875 /* 18876 * In transmit state, time the transmission and arrange for the 18877 * retransmit. In persist state, just set snd_max. 18878 */ 18879 if (error == 0) { 18880 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18881 if (rsm && doing_tlp) { 18882 rack->rc_last_sent_tlp_past_cumack = 0; 18883 rack->rc_last_sent_tlp_seq_valid = 1; 18884 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18885 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18886 } 18887 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18888 if (rsm && (doing_tlp == 0)) { 18889 /* Set we retransmitted */ 18890 rack->rc_gp_saw_rec = 1; 18891 } else { 18892 if (cwnd_to_use > tp->snd_ssthresh) { 18893 /* Set we sent in CA */ 18894 rack->rc_gp_saw_ca = 1; 18895 } else { 18896 /* Set we sent in SS */ 18897 rack->rc_gp_saw_ss = 1; 18898 } 18899 } 18900 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18901 (tp->t_flags & TF_SACK_PERMIT) && 18902 tp->rcv_numsacks > 0) 18903 tcp_clean_dsack_blocks(tp); 18904 tot_len_this_send += len; 18905 if (len == 0) 18906 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18907 else if (len == 1) { 18908 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18909 } else if (len > 1) { 18910 int idx; 18911 18912 idx = (len / segsiz) + 3; 18913 if (idx >= TCP_MSS_ACCT_ATIMER) 18914 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18915 else 18916 counter_u64_add(rack_out_size[idx], 1); 18917 } 18918 } 18919 if ((rack->rack_no_prr == 0) && 18920 sub_from_prr && 18921 (error == 0)) { 18922 if (rack->r_ctl.rc_prr_sndcnt >= len) 18923 rack->r_ctl.rc_prr_sndcnt -= len; 18924 else 18925 rack->r_ctl.rc_prr_sndcnt = 0; 18926 } 18927 sub_from_prr = 0; 18928 if (doing_tlp) { 18929 /* Make sure the TLP is added */ 18930 add_flag |= RACK_TLP; 18931 } else if (rsm) { 18932 /* If its a resend without TLP then it must not have the flag */ 18933 rsm->r_flags &= ~RACK_TLP; 18934 } 18935 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18936 rack_to_usec_ts(&tv), 18937 rsm, add_flag, s_mb, s_moff, hw_tls); 18938 18939 18940 if ((error == 0) && 18941 (len > 0) && 18942 (tp->snd_una == tp->snd_max)) 18943 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18944 { 18945 tcp_seq startseq = tp->snd_nxt; 18946 18947 /* Track our lost count */ 18948 if (rsm && (doing_tlp == 0)) 18949 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18950 /* 18951 * Advance snd_nxt over sequence space of this segment. 18952 */ 18953 if (error) 18954 /* We don't log or do anything with errors */ 18955 goto nomore; 18956 if (doing_tlp == 0) { 18957 if (rsm == NULL) { 18958 /* 18959 * Not a retransmission of some 18960 * sort, new data is going out so 18961 * clear our TLP count and flag. 18962 */ 18963 rack->rc_tlp_in_progress = 0; 18964 rack->r_ctl.rc_tlp_cnt_out = 0; 18965 } 18966 } else { 18967 /* 18968 * We have just sent a TLP, mark that it is true 18969 * and make sure our in progress is set so we 18970 * continue to check the count. 18971 */ 18972 rack->rc_tlp_in_progress = 1; 18973 rack->r_ctl.rc_tlp_cnt_out++; 18974 } 18975 if (flags & (TH_SYN | TH_FIN)) { 18976 if (flags & TH_SYN) 18977 tp->snd_nxt++; 18978 if (flags & TH_FIN) { 18979 tp->snd_nxt++; 18980 tp->t_flags |= TF_SENTFIN; 18981 } 18982 } 18983 /* In the ENOBUFS case we do *not* update snd_max */ 18984 if (sack_rxmit) 18985 goto nomore; 18986 18987 tp->snd_nxt += len; 18988 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18989 if (tp->snd_una == tp->snd_max) { 18990 /* 18991 * Update the time we just added data since 18992 * none was outstanding. 18993 */ 18994 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18995 tp->t_acktime = ticks; 18996 } 18997 tp->snd_max = tp->snd_nxt; 18998 /* 18999 * Time this transmission if not a retransmission and 19000 * not currently timing anything. 19001 * This is only relevant in case of switching back to 19002 * the base stack. 19003 */ 19004 if (tp->t_rtttime == 0) { 19005 tp->t_rtttime = ticks; 19006 tp->t_rtseq = startseq; 19007 KMOD_TCPSTAT_INC(tcps_segstimed); 19008 } 19009 if (len && 19010 ((tp->t_flags & TF_GPUTINPROG) == 0)) 19011 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 19012 } 19013 /* 19014 * If we are doing FO we need to update the mbuf position and subtract 19015 * this happens when the peer sends us duplicate information and 19016 * we thus want to send a DSACK. 19017 * 19018 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 19019 * turned off? If not then we are going to echo multiple DSACK blocks 19020 * out (with the TSO), which we should not be doing. 19021 */ 19022 if (rack->r_fast_output && len) { 19023 if (rack->r_ctl.fsb.left_to_send > len) 19024 rack->r_ctl.fsb.left_to_send -= len; 19025 else 19026 rack->r_ctl.fsb.left_to_send = 0; 19027 if (rack->r_ctl.fsb.left_to_send < segsiz) 19028 rack->r_fast_output = 0; 19029 if (rack->r_fast_output) { 19030 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19031 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19032 } 19033 } 19034 } 19035 nomore: 19036 if (error) { 19037 rack->r_ctl.rc_agg_delayed = 0; 19038 rack->r_early = 0; 19039 rack->r_late = 0; 19040 rack->r_ctl.rc_agg_early = 0; 19041 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 19042 /* 19043 * Failures do not advance the seq counter above. For the 19044 * case of ENOBUFS we will fall out and retry in 1ms with 19045 * the hpts. Everything else will just have to retransmit 19046 * with the timer. 19047 * 19048 * In any case, we do not want to loop around for another 19049 * send without a good reason. 19050 */ 19051 sendalot = 0; 19052 switch (error) { 19053 case EPERM: 19054 tp->t_softerror = error; 19055 #ifdef TCP_ACCOUNTING 19056 crtsc = get_cyclecount(); 19057 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19058 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19059 } 19060 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19061 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19062 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19063 } 19064 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19065 sched_unpin(); 19066 #endif 19067 return (error); 19068 case ENOBUFS: 19069 /* 19070 * Pace us right away to retry in a some 19071 * time 19072 */ 19073 if (rack->r_ctl.crte != NULL) { 19074 rack_trace_point(rack, RACK_TP_HWENOBUF); 19075 } else 19076 rack_trace_point(rack, RACK_TP_ENOBUF); 19077 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19078 if (rack->rc_enobuf < 0x7f) 19079 rack->rc_enobuf++; 19080 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19081 slot = 10 * HPTS_USEC_IN_MSEC; 19082 if (rack->r_ctl.crte != NULL) { 19083 counter_u64_add(rack_saw_enobuf_hw, 1); 19084 tcp_rl_log_enobuf(rack->r_ctl.crte); 19085 } 19086 counter_u64_add(rack_saw_enobuf, 1); 19087 goto enobufs; 19088 case EMSGSIZE: 19089 /* 19090 * For some reason the interface we used initially 19091 * to send segments changed to another or lowered 19092 * its MTU. If TSO was active we either got an 19093 * interface without TSO capabilits or TSO was 19094 * turned off. If we obtained mtu from ip_output() 19095 * then update it and try again. 19096 */ 19097 if (tso) 19098 tp->t_flags &= ~TF_TSO; 19099 if (mtu != 0) { 19100 tcp_mss_update(tp, -1, mtu, NULL, NULL); 19101 goto again; 19102 } 19103 slot = 10 * HPTS_USEC_IN_MSEC; 19104 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19105 #ifdef TCP_ACCOUNTING 19106 crtsc = get_cyclecount(); 19107 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19108 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19109 } 19110 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19111 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19112 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19113 } 19114 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19115 sched_unpin(); 19116 #endif 19117 return (error); 19118 case ENETUNREACH: 19119 counter_u64_add(rack_saw_enetunreach, 1); 19120 case EHOSTDOWN: 19121 case EHOSTUNREACH: 19122 case ENETDOWN: 19123 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19124 tp->t_softerror = error; 19125 } 19126 /* FALLTHROUGH */ 19127 default: 19128 slot = 10 * HPTS_USEC_IN_MSEC; 19129 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19130 #ifdef TCP_ACCOUNTING 19131 crtsc = get_cyclecount(); 19132 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19133 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19134 } 19135 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19136 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19137 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19138 } 19139 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19140 sched_unpin(); 19141 #endif 19142 return (error); 19143 } 19144 } else { 19145 rack->rc_enobuf = 0; 19146 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19147 rack->r_ctl.retran_during_recovery += len; 19148 } 19149 KMOD_TCPSTAT_INC(tcps_sndtotal); 19150 19151 /* 19152 * Data sent (as far as we can tell). If this advertises a larger 19153 * window than any other segment, then remember the size of the 19154 * advertised window. Any pending ACK has now been sent. 19155 */ 19156 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19157 tp->rcv_adv = tp->rcv_nxt + recwin; 19158 19159 tp->last_ack_sent = tp->rcv_nxt; 19160 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19161 enobufs: 19162 if (sendalot) { 19163 /* Do we need to turn off sendalot? */ 19164 if (rack->r_ctl.rc_pace_max_segs && 19165 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19166 /* We hit our max. */ 19167 sendalot = 0; 19168 } else if ((rack->rc_user_set_max_segs) && 19169 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19170 /* We hit the user defined max */ 19171 sendalot = 0; 19172 } 19173 } 19174 if ((error == 0) && (flags & TH_FIN)) 19175 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19176 if (flags & TH_RST) { 19177 /* 19178 * We don't send again after sending a RST. 19179 */ 19180 slot = 0; 19181 sendalot = 0; 19182 if (error == 0) 19183 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19184 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19185 /* 19186 * Get our pacing rate, if an error 19187 * occurred in sending (ENOBUF) we would 19188 * hit the else if with slot preset. Other 19189 * errors return. 19190 */ 19191 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19192 } 19193 if (rsm && 19194 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19195 rack->use_rack_rr) { 19196 /* Its a retransmit and we use the rack cheat? */ 19197 if ((slot == 0) || 19198 (rack->rc_always_pace == 0) || 19199 (rack->r_rr_config == 1)) { 19200 /* 19201 * We have no pacing set or we 19202 * are using old-style rack or 19203 * we are overridden to use the old 1ms pacing. 19204 */ 19205 slot = rack->r_ctl.rc_min_to; 19206 } 19207 } 19208 /* We have sent clear the flag */ 19209 rack->r_ent_rec_ns = 0; 19210 if (rack->r_must_retran) { 19211 if (rsm) { 19212 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19213 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19214 /* 19215 * We have retransmitted all. 19216 */ 19217 rack->r_must_retran = 0; 19218 rack->r_ctl.rc_out_at_rto = 0; 19219 } 19220 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19221 /* 19222 * Sending new data will also kill 19223 * the loop. 19224 */ 19225 rack->r_must_retran = 0; 19226 rack->r_ctl.rc_out_at_rto = 0; 19227 } 19228 } 19229 rack->r_ctl.fsb.recwin = recwin; 19230 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19231 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19232 /* 19233 * We hit an RTO and now have past snd_max at the RTO 19234 * clear all the WAS flags. 19235 */ 19236 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19237 } 19238 if (slot) { 19239 /* set the rack tcb into the slot N */ 19240 if ((error == 0) && 19241 rack_use_rfo && 19242 ((flags & (TH_SYN|TH_FIN)) == 0) && 19243 (rsm == NULL) && 19244 (tp->snd_nxt == tp->snd_max) && 19245 (ipoptlen == 0) && 19246 (tp->rcv_numsacks == 0) && 19247 rack->r_fsb_inited && 19248 TCPS_HAVEESTABLISHED(tp->t_state) && 19249 (rack->r_must_retran == 0) && 19250 ((tp->t_flags & TF_NEEDFIN) == 0) && 19251 (len > 0) && (orig_len > 0) && 19252 (orig_len > len) && 19253 ((orig_len - len) >= segsiz) && 19254 ((optlen == 0) || 19255 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19256 /* We can send at least one more MSS using our fsb */ 19257 19258 rack->r_fast_output = 1; 19259 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19260 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19261 rack->r_ctl.fsb.tcp_flags = flags; 19262 rack->r_ctl.fsb.left_to_send = orig_len - len; 19263 if (hw_tls) 19264 rack->r_ctl.fsb.hw_tls = 1; 19265 else 19266 rack->r_ctl.fsb.hw_tls = 0; 19267 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19268 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19269 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19270 (tp->snd_max - tp->snd_una))); 19271 if (rack->r_ctl.fsb.left_to_send < segsiz) 19272 rack->r_fast_output = 0; 19273 else { 19274 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19275 rack->r_ctl.fsb.rfo_apply_push = 1; 19276 else 19277 rack->r_ctl.fsb.rfo_apply_push = 0; 19278 } 19279 } else 19280 rack->r_fast_output = 0; 19281 rack_log_fsb(rack, tp, so, flags, 19282 ipoptlen, orig_len, len, error, 19283 (rsm == NULL), optlen, __LINE__, 2); 19284 } else if (sendalot) { 19285 int ret; 19286 19287 sack_rxmit = 0; 19288 if ((error == 0) && 19289 rack_use_rfo && 19290 ((flags & (TH_SYN|TH_FIN)) == 0) && 19291 (rsm == NULL) && 19292 (ipoptlen == 0) && 19293 (tp->rcv_numsacks == 0) && 19294 (tp->snd_nxt == tp->snd_max) && 19295 (rack->r_must_retran == 0) && 19296 rack->r_fsb_inited && 19297 TCPS_HAVEESTABLISHED(tp->t_state) && 19298 ((tp->t_flags & TF_NEEDFIN) == 0) && 19299 (len > 0) && (orig_len > 0) && 19300 (orig_len > len) && 19301 ((orig_len - len) >= segsiz) && 19302 ((optlen == 0) || 19303 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19304 /* we can use fast_output for more */ 19305 19306 rack->r_fast_output = 1; 19307 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19308 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19309 rack->r_ctl.fsb.tcp_flags = flags; 19310 rack->r_ctl.fsb.left_to_send = orig_len - len; 19311 if (hw_tls) 19312 rack->r_ctl.fsb.hw_tls = 1; 19313 else 19314 rack->r_ctl.fsb.hw_tls = 0; 19315 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19316 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19317 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19318 (tp->snd_max - tp->snd_una))); 19319 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19320 rack->r_fast_output = 0; 19321 } 19322 if (rack->r_fast_output) { 19323 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19324 rack->r_ctl.fsb.rfo_apply_push = 1; 19325 else 19326 rack->r_ctl.fsb.rfo_apply_push = 0; 19327 rack_log_fsb(rack, tp, so, flags, 19328 ipoptlen, orig_len, len, error, 19329 (rsm == NULL), optlen, __LINE__, 3); 19330 error = 0; 19331 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19332 if (ret >= 0) 19333 return (ret); 19334 else if (error) 19335 goto nomore; 19336 19337 } 19338 } 19339 goto again; 19340 } 19341 /* Assure when we leave that snd_nxt will point to top */ 19342 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19343 tp->snd_nxt = tp->snd_max; 19344 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19345 #ifdef TCP_ACCOUNTING 19346 crtsc = get_cyclecount() - ts_val; 19347 if (tot_len_this_send) { 19348 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19349 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19350 } 19351 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19352 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19353 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19354 } 19355 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19356 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19357 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19358 } 19359 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19360 } else { 19361 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19362 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19363 } 19364 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19365 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19366 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19367 } 19368 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19369 } 19370 sched_unpin(); 19371 #endif 19372 if (error == ENOBUFS) 19373 error = 0; 19374 return (error); 19375 } 19376 19377 static void 19378 rack_update_seg(struct tcp_rack *rack) 19379 { 19380 uint32_t orig_val; 19381 19382 orig_val = rack->r_ctl.rc_pace_max_segs; 19383 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19384 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19385 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19386 } 19387 19388 static void 19389 rack_mtu_change(struct tcpcb *tp) 19390 { 19391 /* 19392 * The MSS may have changed 19393 */ 19394 struct tcp_rack *rack; 19395 struct rack_sendmap *rsm; 19396 19397 rack = (struct tcp_rack *)tp->t_fb_ptr; 19398 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19399 /* 19400 * The MTU has changed we need to resend everything 19401 * since all we have sent is lost. We first fix 19402 * up the mtu though. 19403 */ 19404 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19405 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19406 rack_remxt_tmr(tp); 19407 rack->r_fast_output = 0; 19408 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19409 rack->r_ctl.rc_sacked); 19410 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19411 rack->r_must_retran = 1; 19412 /* Mark all inflight to needing to be rxt'd */ 19413 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19414 rsm->r_flags |= RACK_MUST_RXT; 19415 } 19416 } 19417 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19418 /* We don't use snd_nxt to retransmit */ 19419 tp->snd_nxt = tp->snd_max; 19420 } 19421 19422 static int 19423 rack_set_profile(struct tcp_rack *rack, int prof) 19424 { 19425 int err = EINVAL; 19426 if (prof == 1) { 19427 /* pace_always=1 */ 19428 if (rack->rc_always_pace == 0) { 19429 if (tcp_can_enable_pacing() == 0) 19430 return (EBUSY); 19431 } 19432 rack->rc_always_pace = 1; 19433 if (rack->use_fixed_rate || rack->gp_ready) 19434 rack_set_cc_pacing(rack); 19435 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19436 rack->rack_attempt_hdwr_pace = 0; 19437 /* cmpack=1 */ 19438 if (rack_use_cmp_acks) 19439 rack->r_use_cmp_ack = 1; 19440 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19441 rack->r_use_cmp_ack) 19442 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19443 /* scwnd=1 */ 19444 rack->rack_enable_scwnd = 1; 19445 /* dynamic=100 */ 19446 rack->rc_gp_dyn_mul = 1; 19447 /* gp_inc_ca */ 19448 rack->r_ctl.rack_per_of_gp_ca = 100; 19449 /* rrr_conf=3 */ 19450 rack->r_rr_config = 3; 19451 /* npush=2 */ 19452 rack->r_ctl.rc_no_push_at_mrtt = 2; 19453 /* fillcw=1 */ 19454 rack->rc_pace_to_cwnd = 1; 19455 rack->rc_pace_fill_if_rttin_range = 0; 19456 rack->rtt_limit_mul = 0; 19457 /* noprr=1 */ 19458 rack->rack_no_prr = 1; 19459 /* lscwnd=1 */ 19460 rack->r_limit_scw = 1; 19461 /* gp_inc_rec */ 19462 rack->r_ctl.rack_per_of_gp_rec = 90; 19463 err = 0; 19464 19465 } else if (prof == 3) { 19466 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19467 /* pace_always=1 */ 19468 if (rack->rc_always_pace == 0) { 19469 if (tcp_can_enable_pacing() == 0) 19470 return (EBUSY); 19471 } 19472 rack->rc_always_pace = 1; 19473 if (rack->use_fixed_rate || rack->gp_ready) 19474 rack_set_cc_pacing(rack); 19475 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19476 rack->rack_attempt_hdwr_pace = 0; 19477 /* cmpack=1 */ 19478 if (rack_use_cmp_acks) 19479 rack->r_use_cmp_ack = 1; 19480 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19481 rack->r_use_cmp_ack) 19482 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19483 /* scwnd=1 */ 19484 rack->rack_enable_scwnd = 1; 19485 /* dynamic=100 */ 19486 rack->rc_gp_dyn_mul = 1; 19487 /* gp_inc_ca */ 19488 rack->r_ctl.rack_per_of_gp_ca = 100; 19489 /* rrr_conf=3 */ 19490 rack->r_rr_config = 3; 19491 /* npush=2 */ 19492 rack->r_ctl.rc_no_push_at_mrtt = 2; 19493 /* fillcw=2 */ 19494 rack->rc_pace_to_cwnd = 1; 19495 rack->r_fill_less_agg = 1; 19496 rack->rc_pace_fill_if_rttin_range = 0; 19497 rack->rtt_limit_mul = 0; 19498 /* noprr=1 */ 19499 rack->rack_no_prr = 1; 19500 /* lscwnd=1 */ 19501 rack->r_limit_scw = 1; 19502 /* gp_inc_rec */ 19503 rack->r_ctl.rack_per_of_gp_rec = 90; 19504 err = 0; 19505 19506 19507 } else if (prof == 2) { 19508 /* cmpack=1 */ 19509 if (rack->rc_always_pace == 0) { 19510 if (tcp_can_enable_pacing() == 0) 19511 return (EBUSY); 19512 } 19513 rack->rc_always_pace = 1; 19514 if (rack->use_fixed_rate || rack->gp_ready) 19515 rack_set_cc_pacing(rack); 19516 rack->r_use_cmp_ack = 1; 19517 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19518 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19519 /* pace_always=1 */ 19520 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19521 /* scwnd=1 */ 19522 rack->rack_enable_scwnd = 1; 19523 /* dynamic=100 */ 19524 rack->rc_gp_dyn_mul = 1; 19525 rack->r_ctl.rack_per_of_gp_ca = 100; 19526 /* rrr_conf=3 */ 19527 rack->r_rr_config = 3; 19528 /* npush=2 */ 19529 rack->r_ctl.rc_no_push_at_mrtt = 2; 19530 /* fillcw=1 */ 19531 rack->rc_pace_to_cwnd = 1; 19532 rack->rc_pace_fill_if_rttin_range = 0; 19533 rack->rtt_limit_mul = 0; 19534 /* noprr=1 */ 19535 rack->rack_no_prr = 1; 19536 /* lscwnd=0 */ 19537 rack->r_limit_scw = 0; 19538 err = 0; 19539 } else if (prof == 0) { 19540 /* This changes things back to the default settings */ 19541 err = 0; 19542 if (rack->rc_always_pace) { 19543 tcp_decrement_paced_conn(); 19544 rack_undo_cc_pacing(rack); 19545 rack->rc_always_pace = 0; 19546 } 19547 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19548 rack->rc_always_pace = 1; 19549 if (rack->use_fixed_rate || rack->gp_ready) 19550 rack_set_cc_pacing(rack); 19551 } else 19552 rack->rc_always_pace = 0; 19553 if (rack_dsack_std_based & 0x1) { 19554 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19555 rack->rc_rack_tmr_std_based = 1; 19556 } 19557 if (rack_dsack_std_based & 0x2) { 19558 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19559 rack->rc_rack_use_dsack = 1; 19560 } 19561 if (rack_use_cmp_acks) 19562 rack->r_use_cmp_ack = 1; 19563 else 19564 rack->r_use_cmp_ack = 0; 19565 if (rack_disable_prr) 19566 rack->rack_no_prr = 1; 19567 else 19568 rack->rack_no_prr = 0; 19569 if (rack_gp_no_rec_chg) 19570 rack->rc_gp_no_rec_chg = 1; 19571 else 19572 rack->rc_gp_no_rec_chg = 0; 19573 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19574 rack->r_mbuf_queue = 1; 19575 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19576 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19577 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19578 } else { 19579 rack->r_mbuf_queue = 0; 19580 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19581 } 19582 if (rack_enable_shared_cwnd) 19583 rack->rack_enable_scwnd = 1; 19584 else 19585 rack->rack_enable_scwnd = 0; 19586 if (rack_do_dyn_mul) { 19587 /* When dynamic adjustment is on CA needs to start at 100% */ 19588 rack->rc_gp_dyn_mul = 1; 19589 if (rack_do_dyn_mul >= 100) 19590 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19591 } else { 19592 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19593 rack->rc_gp_dyn_mul = 0; 19594 } 19595 rack->r_rr_config = 0; 19596 rack->r_ctl.rc_no_push_at_mrtt = 0; 19597 rack->rc_pace_to_cwnd = 0; 19598 rack->rc_pace_fill_if_rttin_range = 0; 19599 rack->rtt_limit_mul = 0; 19600 19601 if (rack_enable_hw_pacing) 19602 rack->rack_hdw_pace_ena = 1; 19603 else 19604 rack->rack_hdw_pace_ena = 0; 19605 if (rack_disable_prr) 19606 rack->rack_no_prr = 1; 19607 else 19608 rack->rack_no_prr = 0; 19609 if (rack_limits_scwnd) 19610 rack->r_limit_scw = 1; 19611 else 19612 rack->r_limit_scw = 0; 19613 err = 0; 19614 } 19615 return (err); 19616 } 19617 19618 static int 19619 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19620 { 19621 struct deferred_opt_list *dol; 19622 19623 dol = malloc(sizeof(struct deferred_opt_list), 19624 M_TCPFSB, M_NOWAIT|M_ZERO); 19625 if (dol == NULL) { 19626 /* 19627 * No space yikes -- fail out.. 19628 */ 19629 return (0); 19630 } 19631 dol->optname = sopt_name; 19632 dol->optval = loptval; 19633 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19634 return (1); 19635 } 19636 19637 static int 19638 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19639 uint32_t optval, uint64_t loptval) 19640 { 19641 struct epoch_tracker et; 19642 struct sockopt sopt; 19643 struct cc_newreno_opts opt; 19644 struct inpcb *inp = tptoinpcb(tp); 19645 uint64_t val; 19646 int error = 0; 19647 uint16_t ca, ss; 19648 19649 switch (sopt_name) { 19650 19651 case TCP_RACK_DSACK_OPT: 19652 RACK_OPTS_INC(tcp_rack_dsack_opt); 19653 if (optval & 0x1) { 19654 rack->rc_rack_tmr_std_based = 1; 19655 } else { 19656 rack->rc_rack_tmr_std_based = 0; 19657 } 19658 if (optval & 0x2) { 19659 rack->rc_rack_use_dsack = 1; 19660 } else { 19661 rack->rc_rack_use_dsack = 0; 19662 } 19663 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19664 break; 19665 case TCP_RACK_PACING_BETA: 19666 RACK_OPTS_INC(tcp_rack_beta); 19667 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19668 /* This only works for newreno. */ 19669 error = EINVAL; 19670 break; 19671 } 19672 if (rack->rc_pacing_cc_set) { 19673 /* 19674 * Set them into the real CC module 19675 * whats in the rack pcb is the old values 19676 * to be used on restoral/ 19677 */ 19678 sopt.sopt_dir = SOPT_SET; 19679 opt.name = CC_NEWRENO_BETA; 19680 opt.val = optval; 19681 if (CC_ALGO(tp)->ctl_output != NULL) 19682 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19683 else { 19684 error = ENOENT; 19685 break; 19686 } 19687 } else { 19688 /* 19689 * Not pacing yet so set it into our local 19690 * rack pcb storage. 19691 */ 19692 rack->r_ctl.rc_saved_beta.beta = optval; 19693 } 19694 break; 19695 case TCP_RACK_TIMER_SLOP: 19696 RACK_OPTS_INC(tcp_rack_timer_slop); 19697 rack->r_ctl.timer_slop = optval; 19698 if (rack->rc_tp->t_srtt) { 19699 /* 19700 * If we have an SRTT lets update t_rxtcur 19701 * to have the new slop. 19702 */ 19703 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19704 rack_rto_min, rack_rto_max, 19705 rack->r_ctl.timer_slop); 19706 } 19707 break; 19708 case TCP_RACK_PACING_BETA_ECN: 19709 RACK_OPTS_INC(tcp_rack_beta_ecn); 19710 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19711 /* This only works for newreno. */ 19712 error = EINVAL; 19713 break; 19714 } 19715 if (rack->rc_pacing_cc_set) { 19716 /* 19717 * Set them into the real CC module 19718 * whats in the rack pcb is the old values 19719 * to be used on restoral/ 19720 */ 19721 sopt.sopt_dir = SOPT_SET; 19722 opt.name = CC_NEWRENO_BETA_ECN; 19723 opt.val = optval; 19724 if (CC_ALGO(tp)->ctl_output != NULL) 19725 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19726 else 19727 error = ENOENT; 19728 } else { 19729 /* 19730 * Not pacing yet so set it into our local 19731 * rack pcb storage. 19732 */ 19733 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19734 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19735 } 19736 break; 19737 case TCP_DEFER_OPTIONS: 19738 RACK_OPTS_INC(tcp_defer_opt); 19739 if (optval) { 19740 if (rack->gp_ready) { 19741 /* Too late */ 19742 error = EINVAL; 19743 break; 19744 } 19745 rack->defer_options = 1; 19746 } else 19747 rack->defer_options = 0; 19748 break; 19749 case TCP_RACK_MEASURE_CNT: 19750 RACK_OPTS_INC(tcp_rack_measure_cnt); 19751 if (optval && (optval <= 0xff)) { 19752 rack->r_ctl.req_measurements = optval; 19753 } else 19754 error = EINVAL; 19755 break; 19756 case TCP_REC_ABC_VAL: 19757 RACK_OPTS_INC(tcp_rec_abc_val); 19758 if (optval > 0) 19759 rack->r_use_labc_for_rec = 1; 19760 else 19761 rack->r_use_labc_for_rec = 0; 19762 break; 19763 case TCP_RACK_ABC_VAL: 19764 RACK_OPTS_INC(tcp_rack_abc_val); 19765 if ((optval > 0) && (optval < 255)) 19766 rack->rc_labc = optval; 19767 else 19768 error = EINVAL; 19769 break; 19770 case TCP_HDWR_UP_ONLY: 19771 RACK_OPTS_INC(tcp_pacing_up_only); 19772 if (optval) 19773 rack->r_up_only = 1; 19774 else 19775 rack->r_up_only = 0; 19776 break; 19777 case TCP_PACING_RATE_CAP: 19778 RACK_OPTS_INC(tcp_pacing_rate_cap); 19779 rack->r_ctl.bw_rate_cap = loptval; 19780 break; 19781 case TCP_RACK_PROFILE: 19782 RACK_OPTS_INC(tcp_profile); 19783 error = rack_set_profile(rack, optval); 19784 break; 19785 case TCP_USE_CMP_ACKS: 19786 RACK_OPTS_INC(tcp_use_cmp_acks); 19787 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19788 /* You can't turn it off once its on! */ 19789 error = EINVAL; 19790 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19791 rack->r_use_cmp_ack = 1; 19792 rack->r_mbuf_queue = 1; 19793 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19794 } 19795 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19796 inp->inp_flags2 |= INP_MBUF_ACKCMP; 19797 break; 19798 case TCP_SHARED_CWND_TIME_LIMIT: 19799 RACK_OPTS_INC(tcp_lscwnd); 19800 if (optval) 19801 rack->r_limit_scw = 1; 19802 else 19803 rack->r_limit_scw = 0; 19804 break; 19805 case TCP_RACK_PACE_TO_FILL: 19806 RACK_OPTS_INC(tcp_fillcw); 19807 if (optval == 0) 19808 rack->rc_pace_to_cwnd = 0; 19809 else { 19810 rack->rc_pace_to_cwnd = 1; 19811 if (optval > 1) 19812 rack->r_fill_less_agg = 1; 19813 } 19814 if ((optval >= rack_gp_rtt_maxmul) && 19815 rack_gp_rtt_maxmul && 19816 (optval < 0xf)) { 19817 rack->rc_pace_fill_if_rttin_range = 1; 19818 rack->rtt_limit_mul = optval; 19819 } else { 19820 rack->rc_pace_fill_if_rttin_range = 0; 19821 rack->rtt_limit_mul = 0; 19822 } 19823 break; 19824 case TCP_RACK_NO_PUSH_AT_MAX: 19825 RACK_OPTS_INC(tcp_npush); 19826 if (optval == 0) 19827 rack->r_ctl.rc_no_push_at_mrtt = 0; 19828 else if (optval < 0xff) 19829 rack->r_ctl.rc_no_push_at_mrtt = optval; 19830 else 19831 error = EINVAL; 19832 break; 19833 case TCP_SHARED_CWND_ENABLE: 19834 RACK_OPTS_INC(tcp_rack_scwnd); 19835 if (optval == 0) 19836 rack->rack_enable_scwnd = 0; 19837 else 19838 rack->rack_enable_scwnd = 1; 19839 break; 19840 case TCP_RACK_MBUF_QUEUE: 19841 /* Now do we use the LRO mbuf-queue feature */ 19842 RACK_OPTS_INC(tcp_rack_mbufq); 19843 if (optval || rack->r_use_cmp_ack) 19844 rack->r_mbuf_queue = 1; 19845 else 19846 rack->r_mbuf_queue = 0; 19847 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19848 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19849 else 19850 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19851 break; 19852 case TCP_RACK_NONRXT_CFG_RATE: 19853 RACK_OPTS_INC(tcp_rack_cfg_rate); 19854 if (optval == 0) 19855 rack->rack_rec_nonrxt_use_cr = 0; 19856 else 19857 rack->rack_rec_nonrxt_use_cr = 1; 19858 break; 19859 case TCP_NO_PRR: 19860 RACK_OPTS_INC(tcp_rack_noprr); 19861 if (optval == 0) 19862 rack->rack_no_prr = 0; 19863 else if (optval == 1) 19864 rack->rack_no_prr = 1; 19865 else if (optval == 2) 19866 rack->no_prr_addback = 1; 19867 else 19868 error = EINVAL; 19869 break; 19870 case TCP_TIMELY_DYN_ADJ: 19871 RACK_OPTS_INC(tcp_timely_dyn); 19872 if (optval == 0) 19873 rack->rc_gp_dyn_mul = 0; 19874 else { 19875 rack->rc_gp_dyn_mul = 1; 19876 if (optval >= 100) { 19877 /* 19878 * If the user sets something 100 or more 19879 * its the gp_ca value. 19880 */ 19881 rack->r_ctl.rack_per_of_gp_ca = optval; 19882 } 19883 } 19884 break; 19885 case TCP_RACK_DO_DETECTION: 19886 RACK_OPTS_INC(tcp_rack_do_detection); 19887 if (optval == 0) 19888 rack->do_detection = 0; 19889 else 19890 rack->do_detection = 1; 19891 break; 19892 case TCP_RACK_TLP_USE: 19893 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19894 error = EINVAL; 19895 break; 19896 } 19897 RACK_OPTS_INC(tcp_tlp_use); 19898 rack->rack_tlp_threshold_use = optval; 19899 break; 19900 case TCP_RACK_TLP_REDUCE: 19901 /* RACK TLP cwnd reduction (bool) */ 19902 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19903 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19904 break; 19905 /* Pacing related ones */ 19906 case TCP_RACK_PACE_ALWAYS: 19907 /* 19908 * zero is old rack method, 1 is new 19909 * method using a pacing rate. 19910 */ 19911 RACK_OPTS_INC(tcp_rack_pace_always); 19912 if (optval > 0) { 19913 if (rack->rc_always_pace) { 19914 error = EALREADY; 19915 break; 19916 } else if (tcp_can_enable_pacing()) { 19917 rack->rc_always_pace = 1; 19918 if (rack->use_fixed_rate || rack->gp_ready) 19919 rack_set_cc_pacing(rack); 19920 } 19921 else { 19922 error = ENOSPC; 19923 break; 19924 } 19925 } else { 19926 if (rack->rc_always_pace) { 19927 tcp_decrement_paced_conn(); 19928 rack->rc_always_pace = 0; 19929 rack_undo_cc_pacing(rack); 19930 } 19931 } 19932 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19933 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19934 else 19935 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19936 /* A rate may be set irate or other, if so set seg size */ 19937 rack_update_seg(rack); 19938 break; 19939 case TCP_BBR_RACK_INIT_RATE: 19940 RACK_OPTS_INC(tcp_initial_rate); 19941 val = optval; 19942 /* Change from kbits per second to bytes per second */ 19943 val *= 1000; 19944 val /= 8; 19945 rack->r_ctl.init_rate = val; 19946 if (rack->rc_init_win != rack_default_init_window) { 19947 uint32_t win, snt; 19948 19949 /* 19950 * Options don't always get applied 19951 * in the order you think. So in order 19952 * to assure we update a cwnd we need 19953 * to check and see if we are still 19954 * where we should raise the cwnd. 19955 */ 19956 win = rc_init_window(rack); 19957 if (SEQ_GT(tp->snd_max, tp->iss)) 19958 snt = tp->snd_max - tp->iss; 19959 else 19960 snt = 0; 19961 if ((snt < win) && 19962 (tp->snd_cwnd < win)) 19963 tp->snd_cwnd = win; 19964 } 19965 if (rack->rc_always_pace) 19966 rack_update_seg(rack); 19967 break; 19968 case TCP_BBR_IWINTSO: 19969 RACK_OPTS_INC(tcp_initial_win); 19970 if (optval && (optval <= 0xff)) { 19971 uint32_t win, snt; 19972 19973 rack->rc_init_win = optval; 19974 win = rc_init_window(rack); 19975 if (SEQ_GT(tp->snd_max, tp->iss)) 19976 snt = tp->snd_max - tp->iss; 19977 else 19978 snt = 0; 19979 if ((snt < win) && 19980 (tp->t_srtt | 19981 #ifdef NETFLIX_PEAKRATE 19982 tp->t_maxpeakrate | 19983 #endif 19984 rack->r_ctl.init_rate)) { 19985 /* 19986 * We are not past the initial window 19987 * and we have some bases for pacing, 19988 * so we need to possibly adjust up 19989 * the cwnd. Note even if we don't set 19990 * the cwnd, its still ok to raise the rc_init_win 19991 * which can be used coming out of idle when we 19992 * would have a rate. 19993 */ 19994 if (tp->snd_cwnd < win) 19995 tp->snd_cwnd = win; 19996 } 19997 if (rack->rc_always_pace) 19998 rack_update_seg(rack); 19999 } else 20000 error = EINVAL; 20001 break; 20002 case TCP_RACK_FORCE_MSEG: 20003 RACK_OPTS_INC(tcp_rack_force_max_seg); 20004 if (optval) 20005 rack->rc_force_max_seg = 1; 20006 else 20007 rack->rc_force_max_seg = 0; 20008 break; 20009 case TCP_RACK_PACE_MAX_SEG: 20010 /* Max segments size in a pace in bytes */ 20011 RACK_OPTS_INC(tcp_rack_max_seg); 20012 rack->rc_user_set_max_segs = optval; 20013 rack_set_pace_segments(tp, rack, __LINE__, NULL); 20014 break; 20015 case TCP_RACK_PACE_RATE_REC: 20016 /* Set the fixed pacing rate in Bytes per second ca */ 20017 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 20018 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20019 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 20020 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20021 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20022 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20023 rack->use_fixed_rate = 1; 20024 if (rack->rc_always_pace) 20025 rack_set_cc_pacing(rack); 20026 rack_log_pacing_delay_calc(rack, 20027 rack->r_ctl.rc_fixed_pacing_rate_ss, 20028 rack->r_ctl.rc_fixed_pacing_rate_ca, 20029 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20030 __LINE__, NULL,0); 20031 break; 20032 20033 case TCP_RACK_PACE_RATE_SS: 20034 /* Set the fixed pacing rate in Bytes per second ca */ 20035 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 20036 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20037 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 20038 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20039 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20040 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20041 rack->use_fixed_rate = 1; 20042 if (rack->rc_always_pace) 20043 rack_set_cc_pacing(rack); 20044 rack_log_pacing_delay_calc(rack, 20045 rack->r_ctl.rc_fixed_pacing_rate_ss, 20046 rack->r_ctl.rc_fixed_pacing_rate_ca, 20047 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20048 __LINE__, NULL, 0); 20049 break; 20050 20051 case TCP_RACK_PACE_RATE_CA: 20052 /* Set the fixed pacing rate in Bytes per second ca */ 20053 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 20054 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20055 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20056 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20057 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20058 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20059 rack->use_fixed_rate = 1; 20060 if (rack->rc_always_pace) 20061 rack_set_cc_pacing(rack); 20062 rack_log_pacing_delay_calc(rack, 20063 rack->r_ctl.rc_fixed_pacing_rate_ss, 20064 rack->r_ctl.rc_fixed_pacing_rate_ca, 20065 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20066 __LINE__, NULL, 0); 20067 break; 20068 case TCP_RACK_GP_INCREASE_REC: 20069 RACK_OPTS_INC(tcp_gp_inc_rec); 20070 rack->r_ctl.rack_per_of_gp_rec = optval; 20071 rack_log_pacing_delay_calc(rack, 20072 rack->r_ctl.rack_per_of_gp_ss, 20073 rack->r_ctl.rack_per_of_gp_ca, 20074 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20075 __LINE__, NULL, 0); 20076 break; 20077 case TCP_RACK_GP_INCREASE_CA: 20078 RACK_OPTS_INC(tcp_gp_inc_ca); 20079 ca = optval; 20080 if (ca < 100) { 20081 /* 20082 * We don't allow any reduction 20083 * over the GP b/w. 20084 */ 20085 error = EINVAL; 20086 break; 20087 } 20088 rack->r_ctl.rack_per_of_gp_ca = ca; 20089 rack_log_pacing_delay_calc(rack, 20090 rack->r_ctl.rack_per_of_gp_ss, 20091 rack->r_ctl.rack_per_of_gp_ca, 20092 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20093 __LINE__, NULL, 0); 20094 break; 20095 case TCP_RACK_GP_INCREASE_SS: 20096 RACK_OPTS_INC(tcp_gp_inc_ss); 20097 ss = optval; 20098 if (ss < 100) { 20099 /* 20100 * We don't allow any reduction 20101 * over the GP b/w. 20102 */ 20103 error = EINVAL; 20104 break; 20105 } 20106 rack->r_ctl.rack_per_of_gp_ss = ss; 20107 rack_log_pacing_delay_calc(rack, 20108 rack->r_ctl.rack_per_of_gp_ss, 20109 rack->r_ctl.rack_per_of_gp_ca, 20110 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20111 __LINE__, NULL, 0); 20112 break; 20113 case TCP_RACK_RR_CONF: 20114 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20115 if (optval && optval <= 3) 20116 rack->r_rr_config = optval; 20117 else 20118 rack->r_rr_config = 0; 20119 break; 20120 case TCP_HDWR_RATE_CAP: 20121 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20122 if (optval) { 20123 if (rack->r_rack_hw_rate_caps == 0) 20124 rack->r_rack_hw_rate_caps = 1; 20125 else 20126 error = EALREADY; 20127 } else { 20128 rack->r_rack_hw_rate_caps = 0; 20129 } 20130 break; 20131 case TCP_BBR_HDWR_PACE: 20132 RACK_OPTS_INC(tcp_hdwr_pacing); 20133 if (optval){ 20134 if (rack->rack_hdrw_pacing == 0) { 20135 rack->rack_hdw_pace_ena = 1; 20136 rack->rack_attempt_hdwr_pace = 0; 20137 } else 20138 error = EALREADY; 20139 } else { 20140 rack->rack_hdw_pace_ena = 0; 20141 #ifdef RATELIMIT 20142 if (rack->r_ctl.crte != NULL) { 20143 rack->rack_hdrw_pacing = 0; 20144 rack->rack_attempt_hdwr_pace = 0; 20145 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20146 rack->r_ctl.crte = NULL; 20147 } 20148 #endif 20149 } 20150 break; 20151 /* End Pacing related ones */ 20152 case TCP_RACK_PRR_SENDALOT: 20153 /* Allow PRR to send more than one seg */ 20154 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20155 rack->r_ctl.rc_prr_sendalot = optval; 20156 break; 20157 case TCP_RACK_MIN_TO: 20158 /* Minimum time between rack t-o's in ms */ 20159 RACK_OPTS_INC(tcp_rack_min_to); 20160 rack->r_ctl.rc_min_to = optval; 20161 break; 20162 case TCP_RACK_EARLY_SEG: 20163 /* If early recovery max segments */ 20164 RACK_OPTS_INC(tcp_rack_early_seg); 20165 rack->r_ctl.rc_early_recovery_segs = optval; 20166 break; 20167 case TCP_RACK_ENABLE_HYSTART: 20168 { 20169 if (optval) { 20170 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 20171 if (rack_do_hystart > RACK_HYSTART_ON) 20172 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 20173 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 20174 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 20175 } else { 20176 tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 20177 } 20178 } 20179 break; 20180 case TCP_RACK_REORD_THRESH: 20181 /* RACK reorder threshold (shift amount) */ 20182 RACK_OPTS_INC(tcp_rack_reord_thresh); 20183 if ((optval > 0) && (optval < 31)) 20184 rack->r_ctl.rc_reorder_shift = optval; 20185 else 20186 error = EINVAL; 20187 break; 20188 case TCP_RACK_REORD_FADE: 20189 /* Does reordering fade after ms time */ 20190 RACK_OPTS_INC(tcp_rack_reord_fade); 20191 rack->r_ctl.rc_reorder_fade = optval; 20192 break; 20193 case TCP_RACK_TLP_THRESH: 20194 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20195 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20196 if (optval) 20197 rack->r_ctl.rc_tlp_threshold = optval; 20198 else 20199 error = EINVAL; 20200 break; 20201 case TCP_BBR_USE_RACK_RR: 20202 RACK_OPTS_INC(tcp_rack_rr); 20203 if (optval) 20204 rack->use_rack_rr = 1; 20205 else 20206 rack->use_rack_rr = 0; 20207 break; 20208 case TCP_FAST_RSM_HACK: 20209 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20210 if (optval) 20211 rack->fast_rsm_hack = 1; 20212 else 20213 rack->fast_rsm_hack = 0; 20214 break; 20215 case TCP_RACK_PKT_DELAY: 20216 /* RACK added ms i.e. rack-rtt + reord + N */ 20217 RACK_OPTS_INC(tcp_rack_pkt_delay); 20218 rack->r_ctl.rc_pkt_delay = optval; 20219 break; 20220 case TCP_DELACK: 20221 RACK_OPTS_INC(tcp_rack_delayed_ack); 20222 if (optval == 0) 20223 tp->t_delayed_ack = 0; 20224 else 20225 tp->t_delayed_ack = 1; 20226 if (tp->t_flags & TF_DELACK) { 20227 tp->t_flags &= ~TF_DELACK; 20228 tp->t_flags |= TF_ACKNOW; 20229 NET_EPOCH_ENTER(et); 20230 rack_output(tp); 20231 NET_EPOCH_EXIT(et); 20232 } 20233 break; 20234 20235 case TCP_BBR_RACK_RTT_USE: 20236 RACK_OPTS_INC(tcp_rack_rtt_use); 20237 if ((optval != USE_RTT_HIGH) && 20238 (optval != USE_RTT_LOW) && 20239 (optval != USE_RTT_AVG)) 20240 error = EINVAL; 20241 else 20242 rack->r_ctl.rc_rate_sample_method = optval; 20243 break; 20244 case TCP_DATA_AFTER_CLOSE: 20245 RACK_OPTS_INC(tcp_data_after_close); 20246 if (optval) 20247 rack->rc_allow_data_af_clo = 1; 20248 else 20249 rack->rc_allow_data_af_clo = 0; 20250 break; 20251 default: 20252 break; 20253 } 20254 #ifdef NETFLIX_STATS 20255 tcp_log_socket_option(tp, sopt_name, optval, error); 20256 #endif 20257 return (error); 20258 } 20259 20260 20261 static void 20262 rack_apply_deferred_options(struct tcp_rack *rack) 20263 { 20264 struct deferred_opt_list *dol, *sdol; 20265 uint32_t s_optval; 20266 20267 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20268 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20269 /* Disadvantage of deferal is you loose the error return */ 20270 s_optval = (uint32_t)dol->optval; 20271 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20272 free(dol, M_TCPDO); 20273 } 20274 } 20275 20276 static void 20277 rack_hw_tls_change(struct tcpcb *tp, int chg) 20278 { 20279 /* 20280 * HW tls state has changed.. fix all 20281 * rsm's in flight. 20282 */ 20283 struct tcp_rack *rack; 20284 struct rack_sendmap *rsm; 20285 20286 rack = (struct tcp_rack *)tp->t_fb_ptr; 20287 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20288 if (chg) 20289 rsm->r_hw_tls = 1; 20290 else 20291 rsm->r_hw_tls = 0; 20292 } 20293 if (chg) 20294 rack->r_ctl.fsb.hw_tls = 1; 20295 else 20296 rack->r_ctl.fsb.hw_tls = 0; 20297 } 20298 20299 static int 20300 rack_pru_options(struct tcpcb *tp, int flags) 20301 { 20302 if (flags & PRUS_OOB) 20303 return (EOPNOTSUPP); 20304 return (0); 20305 } 20306 20307 static struct tcp_function_block __tcp_rack = { 20308 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20309 .tfb_tcp_output = rack_output, 20310 .tfb_do_queued_segments = ctf_do_queued_segments, 20311 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20312 .tfb_tcp_do_segment = rack_do_segment, 20313 .tfb_tcp_ctloutput = rack_ctloutput, 20314 .tfb_tcp_fb_init = rack_init, 20315 .tfb_tcp_fb_fini = rack_fini, 20316 .tfb_tcp_timer_stop_all = rack_stopall, 20317 .tfb_tcp_timer_activate = rack_timer_activate, 20318 .tfb_tcp_timer_active = rack_timer_active, 20319 .tfb_tcp_timer_stop = rack_timer_stop, 20320 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20321 .tfb_tcp_handoff_ok = rack_handoff_ok, 20322 .tfb_tcp_mtu_chg = rack_mtu_change, 20323 .tfb_pru_options = rack_pru_options, 20324 .tfb_hwtls_change = rack_hw_tls_change, 20325 .tfb_compute_pipe = rack_compute_pipe, 20326 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20327 }; 20328 20329 /* 20330 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20331 * socket option arguments. When it re-acquires the lock after the copy, it 20332 * has to revalidate that the connection is still valid for the socket 20333 * option. 20334 */ 20335 static int 20336 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20337 { 20338 #ifdef INET6 20339 struct ip6_hdr *ip6; 20340 #endif 20341 #ifdef INET 20342 struct ip *ip; 20343 #endif 20344 struct tcpcb *tp; 20345 struct tcp_rack *rack; 20346 uint64_t loptval; 20347 int32_t error = 0, optval; 20348 20349 tp = intotcpcb(inp); 20350 rack = (struct tcp_rack *)tp->t_fb_ptr; 20351 if (rack == NULL) { 20352 INP_WUNLOCK(inp); 20353 return (EINVAL); 20354 } 20355 #ifdef INET6 20356 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20357 #endif 20358 #ifdef INET 20359 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20360 #endif 20361 20362 switch (sopt->sopt_level) { 20363 #ifdef INET6 20364 case IPPROTO_IPV6: 20365 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20366 switch (sopt->sopt_name) { 20367 case IPV6_USE_MIN_MTU: 20368 tcp6_use_min_mtu(tp); 20369 break; 20370 case IPV6_TCLASS: 20371 /* 20372 * The DSCP codepoint has changed, update the fsb. 20373 */ 20374 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20375 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20376 break; 20377 } 20378 INP_WUNLOCK(inp); 20379 return (0); 20380 #endif 20381 #ifdef INET 20382 case IPPROTO_IP: 20383 switch (sopt->sopt_name) { 20384 case IP_TOS: 20385 /* 20386 * The DSCP codepoint has changed, update the fsb. 20387 */ 20388 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20389 break; 20390 case IP_TTL: 20391 /* 20392 * The TTL has changed, update the fsb. 20393 */ 20394 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20395 break; 20396 } 20397 INP_WUNLOCK(inp); 20398 return (0); 20399 #endif 20400 } 20401 20402 switch (sopt->sopt_name) { 20403 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20404 /* Pacing related ones */ 20405 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20406 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20407 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20408 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20409 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20410 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20411 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20412 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20413 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20414 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20415 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20416 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20417 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20418 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20419 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20420 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20421 /* End pacing related */ 20422 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20423 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20424 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20425 case TCP_RACK_MIN_TO: /* URL:min_to */ 20426 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20427 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20428 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20429 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20430 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20431 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20432 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20433 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20434 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20435 case TCP_NO_PRR: /* URL:noprr */ 20436 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20437 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20438 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20439 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20440 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20441 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20442 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20443 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20444 case TCP_RACK_PROFILE: /* URL:profile */ 20445 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20446 case TCP_RACK_ABC_VAL: /* URL:labc */ 20447 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20448 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20449 case TCP_DEFER_OPTIONS: /* URL:defer */ 20450 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20451 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20452 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20453 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20454 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20455 break; 20456 default: 20457 /* Filter off all unknown options to the base stack */ 20458 return (tcp_default_ctloutput(inp, sopt)); 20459 break; 20460 } 20461 INP_WUNLOCK(inp); 20462 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20463 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20464 /* 20465 * We truncate it down to 32 bits for the socket-option trace this 20466 * means rates > 34Gbps won't show right, but thats probably ok. 20467 */ 20468 optval = (uint32_t)loptval; 20469 } else { 20470 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20471 /* Save it in 64 bit form too */ 20472 loptval = optval; 20473 } 20474 if (error) 20475 return (error); 20476 INP_WLOCK(inp); 20477 if (inp->inp_flags & INP_DROPPED) { 20478 INP_WUNLOCK(inp); 20479 return (ECONNRESET); 20480 } 20481 if (tp->t_fb != &__tcp_rack) { 20482 INP_WUNLOCK(inp); 20483 return (ENOPROTOOPT); 20484 } 20485 if (rack->defer_options && (rack->gp_ready == 0) && 20486 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20487 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20488 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20489 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20490 /* Options are beind deferred */ 20491 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20492 INP_WUNLOCK(inp); 20493 return (0); 20494 } else { 20495 /* No memory to defer, fail */ 20496 INP_WUNLOCK(inp); 20497 return (ENOMEM); 20498 } 20499 } 20500 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20501 INP_WUNLOCK(inp); 20502 return (error); 20503 } 20504 20505 static void 20506 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20507 { 20508 20509 INP_WLOCK_ASSERT(tptoinpcb(tp)); 20510 bzero(ti, sizeof(*ti)); 20511 20512 ti->tcpi_state = tp->t_state; 20513 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20514 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20515 if (tp->t_flags & TF_SACK_PERMIT) 20516 ti->tcpi_options |= TCPI_OPT_SACK; 20517 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20518 ti->tcpi_options |= TCPI_OPT_WSCALE; 20519 ti->tcpi_snd_wscale = tp->snd_scale; 20520 ti->tcpi_rcv_wscale = tp->rcv_scale; 20521 } 20522 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 20523 ti->tcpi_options |= TCPI_OPT_ECN; 20524 if (tp->t_flags & TF_FASTOPEN) 20525 ti->tcpi_options |= TCPI_OPT_TFO; 20526 /* still kept in ticks is t_rcvtime */ 20527 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20528 /* Since we hold everything in precise useconds this is easy */ 20529 ti->tcpi_rtt = tp->t_srtt; 20530 ti->tcpi_rttvar = tp->t_rttvar; 20531 ti->tcpi_rto = tp->t_rxtcur; 20532 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20533 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20534 /* 20535 * FreeBSD-specific extension fields for tcp_info. 20536 */ 20537 ti->tcpi_rcv_space = tp->rcv_wnd; 20538 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20539 ti->tcpi_snd_wnd = tp->snd_wnd; 20540 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20541 ti->tcpi_snd_nxt = tp->snd_nxt; 20542 ti->tcpi_snd_mss = tp->t_maxseg; 20543 ti->tcpi_rcv_mss = tp->t_maxseg; 20544 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20545 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20546 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20547 #ifdef NETFLIX_STATS 20548 ti->tcpi_total_tlp = tp->t_sndtlppack; 20549 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20550 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20551 #endif 20552 #ifdef TCP_OFFLOAD 20553 if (tp->t_flags & TF_TOE) { 20554 ti->tcpi_options |= TCPI_OPT_TOE; 20555 tcp_offload_tcp_info(tp, ti); 20556 } 20557 #endif 20558 } 20559 20560 static int 20561 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20562 { 20563 struct tcpcb *tp; 20564 struct tcp_rack *rack; 20565 int32_t error, optval; 20566 uint64_t val, loptval; 20567 struct tcp_info ti; 20568 /* 20569 * Because all our options are either boolean or an int, we can just 20570 * pull everything into optval and then unlock and copy. If we ever 20571 * add a option that is not a int, then this will have quite an 20572 * impact to this routine. 20573 */ 20574 error = 0; 20575 tp = intotcpcb(inp); 20576 rack = (struct tcp_rack *)tp->t_fb_ptr; 20577 if (rack == NULL) { 20578 INP_WUNLOCK(inp); 20579 return (EINVAL); 20580 } 20581 switch (sopt->sopt_name) { 20582 case TCP_INFO: 20583 /* First get the info filled */ 20584 rack_fill_info(tp, &ti); 20585 /* Fix up the rtt related fields if needed */ 20586 INP_WUNLOCK(inp); 20587 error = sooptcopyout(sopt, &ti, sizeof ti); 20588 return (error); 20589 /* 20590 * Beta is the congestion control value for NewReno that influences how 20591 * much of a backoff happens when loss is detected. It is normally set 20592 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20593 * when you exit recovery. 20594 */ 20595 case TCP_RACK_PACING_BETA: 20596 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20597 error = EINVAL; 20598 else if (rack->rc_pacing_cc_set == 0) 20599 optval = rack->r_ctl.rc_saved_beta.beta; 20600 else { 20601 /* 20602 * Reach out into the CC data and report back what 20603 * I have previously set. Yeah it looks hackish but 20604 * we don't want to report the saved values. 20605 */ 20606 if (tp->ccv->cc_data) 20607 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20608 else 20609 error = EINVAL; 20610 } 20611 break; 20612 /* 20613 * Beta_ecn is the congestion control value for NewReno that influences how 20614 * much of a backoff happens when a ECN mark is detected. It is normally set 20615 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20616 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20617 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20618 */ 20619 20620 case TCP_RACK_PACING_BETA_ECN: 20621 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20622 error = EINVAL; 20623 else if (rack->rc_pacing_cc_set == 0) 20624 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20625 else { 20626 /* 20627 * Reach out into the CC data and report back what 20628 * I have previously set. Yeah it looks hackish but 20629 * we don't want to report the saved values. 20630 */ 20631 if (tp->ccv->cc_data) 20632 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20633 else 20634 error = EINVAL; 20635 } 20636 break; 20637 case TCP_RACK_DSACK_OPT: 20638 optval = 0; 20639 if (rack->rc_rack_tmr_std_based) { 20640 optval |= 1; 20641 } 20642 if (rack->rc_rack_use_dsack) { 20643 optval |= 2; 20644 } 20645 break; 20646 case TCP_RACK_ENABLE_HYSTART: 20647 { 20648 if (tp->ccv->flags & CCF_HYSTART_ALLOWED) { 20649 optval = RACK_HYSTART_ON; 20650 if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND) 20651 optval = RACK_HYSTART_ON_W_SC; 20652 if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH) 20653 optval = RACK_HYSTART_ON_W_SC_C; 20654 } else { 20655 optval = RACK_HYSTART_OFF; 20656 } 20657 } 20658 break; 20659 case TCP_FAST_RSM_HACK: 20660 optval = rack->fast_rsm_hack; 20661 break; 20662 case TCP_DEFER_OPTIONS: 20663 optval = rack->defer_options; 20664 break; 20665 case TCP_RACK_MEASURE_CNT: 20666 optval = rack->r_ctl.req_measurements; 20667 break; 20668 case TCP_REC_ABC_VAL: 20669 optval = rack->r_use_labc_for_rec; 20670 break; 20671 case TCP_RACK_ABC_VAL: 20672 optval = rack->rc_labc; 20673 break; 20674 case TCP_HDWR_UP_ONLY: 20675 optval= rack->r_up_only; 20676 break; 20677 case TCP_PACING_RATE_CAP: 20678 loptval = rack->r_ctl.bw_rate_cap; 20679 break; 20680 case TCP_RACK_PROFILE: 20681 /* You cannot retrieve a profile, its write only */ 20682 error = EINVAL; 20683 break; 20684 case TCP_USE_CMP_ACKS: 20685 optval = rack->r_use_cmp_ack; 20686 break; 20687 case TCP_RACK_PACE_TO_FILL: 20688 optval = rack->rc_pace_to_cwnd; 20689 if (optval && rack->r_fill_less_agg) 20690 optval++; 20691 break; 20692 case TCP_RACK_NO_PUSH_AT_MAX: 20693 optval = rack->r_ctl.rc_no_push_at_mrtt; 20694 break; 20695 case TCP_SHARED_CWND_ENABLE: 20696 optval = rack->rack_enable_scwnd; 20697 break; 20698 case TCP_RACK_NONRXT_CFG_RATE: 20699 optval = rack->rack_rec_nonrxt_use_cr; 20700 break; 20701 case TCP_NO_PRR: 20702 if (rack->rack_no_prr == 1) 20703 optval = 1; 20704 else if (rack->no_prr_addback == 1) 20705 optval = 2; 20706 else 20707 optval = 0; 20708 break; 20709 case TCP_RACK_DO_DETECTION: 20710 optval = rack->do_detection; 20711 break; 20712 case TCP_RACK_MBUF_QUEUE: 20713 /* Now do we use the LRO mbuf-queue feature */ 20714 optval = rack->r_mbuf_queue; 20715 break; 20716 case TCP_TIMELY_DYN_ADJ: 20717 optval = rack->rc_gp_dyn_mul; 20718 break; 20719 case TCP_BBR_IWINTSO: 20720 optval = rack->rc_init_win; 20721 break; 20722 case TCP_RACK_TLP_REDUCE: 20723 /* RACK TLP cwnd reduction (bool) */ 20724 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20725 break; 20726 case TCP_BBR_RACK_INIT_RATE: 20727 val = rack->r_ctl.init_rate; 20728 /* convert to kbits per sec */ 20729 val *= 8; 20730 val /= 1000; 20731 optval = (uint32_t)val; 20732 break; 20733 case TCP_RACK_FORCE_MSEG: 20734 optval = rack->rc_force_max_seg; 20735 break; 20736 case TCP_RACK_PACE_MAX_SEG: 20737 /* Max segments in a pace */ 20738 optval = rack->rc_user_set_max_segs; 20739 break; 20740 case TCP_RACK_PACE_ALWAYS: 20741 /* Use the always pace method */ 20742 optval = rack->rc_always_pace; 20743 break; 20744 case TCP_RACK_PRR_SENDALOT: 20745 /* Allow PRR to send more than one seg */ 20746 optval = rack->r_ctl.rc_prr_sendalot; 20747 break; 20748 case TCP_RACK_MIN_TO: 20749 /* Minimum time between rack t-o's in ms */ 20750 optval = rack->r_ctl.rc_min_to; 20751 break; 20752 case TCP_RACK_EARLY_SEG: 20753 /* If early recovery max segments */ 20754 optval = rack->r_ctl.rc_early_recovery_segs; 20755 break; 20756 case TCP_RACK_REORD_THRESH: 20757 /* RACK reorder threshold (shift amount) */ 20758 optval = rack->r_ctl.rc_reorder_shift; 20759 break; 20760 case TCP_RACK_REORD_FADE: 20761 /* Does reordering fade after ms time */ 20762 optval = rack->r_ctl.rc_reorder_fade; 20763 break; 20764 case TCP_BBR_USE_RACK_RR: 20765 /* Do we use the rack cheat for rxt */ 20766 optval = rack->use_rack_rr; 20767 break; 20768 case TCP_RACK_RR_CONF: 20769 optval = rack->r_rr_config; 20770 break; 20771 case TCP_HDWR_RATE_CAP: 20772 optval = rack->r_rack_hw_rate_caps; 20773 break; 20774 case TCP_BBR_HDWR_PACE: 20775 optval = rack->rack_hdw_pace_ena; 20776 break; 20777 case TCP_RACK_TLP_THRESH: 20778 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20779 optval = rack->r_ctl.rc_tlp_threshold; 20780 break; 20781 case TCP_RACK_PKT_DELAY: 20782 /* RACK added ms i.e. rack-rtt + reord + N */ 20783 optval = rack->r_ctl.rc_pkt_delay; 20784 break; 20785 case TCP_RACK_TLP_USE: 20786 optval = rack->rack_tlp_threshold_use; 20787 break; 20788 case TCP_RACK_PACE_RATE_CA: 20789 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20790 break; 20791 case TCP_RACK_PACE_RATE_SS: 20792 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20793 break; 20794 case TCP_RACK_PACE_RATE_REC: 20795 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20796 break; 20797 case TCP_RACK_GP_INCREASE_SS: 20798 optval = rack->r_ctl.rack_per_of_gp_ca; 20799 break; 20800 case TCP_RACK_GP_INCREASE_CA: 20801 optval = rack->r_ctl.rack_per_of_gp_ss; 20802 break; 20803 case TCP_BBR_RACK_RTT_USE: 20804 optval = rack->r_ctl.rc_rate_sample_method; 20805 break; 20806 case TCP_DELACK: 20807 optval = tp->t_delayed_ack; 20808 break; 20809 case TCP_DATA_AFTER_CLOSE: 20810 optval = rack->rc_allow_data_af_clo; 20811 break; 20812 case TCP_SHARED_CWND_TIME_LIMIT: 20813 optval = rack->r_limit_scw; 20814 break; 20815 case TCP_RACK_TIMER_SLOP: 20816 optval = rack->r_ctl.timer_slop; 20817 break; 20818 default: 20819 return (tcp_default_ctloutput(inp, sopt)); 20820 break; 20821 } 20822 INP_WUNLOCK(inp); 20823 if (error == 0) { 20824 if (TCP_PACING_RATE_CAP) 20825 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20826 else 20827 error = sooptcopyout(sopt, &optval, sizeof optval); 20828 } 20829 return (error); 20830 } 20831 20832 static int 20833 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20834 { 20835 if (sopt->sopt_dir == SOPT_SET) { 20836 return (rack_set_sockopt(inp, sopt)); 20837 } else if (sopt->sopt_dir == SOPT_GET) { 20838 return (rack_get_sockopt(inp, sopt)); 20839 } else { 20840 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20841 } 20842 } 20843 20844 static const char *rack_stack_names[] = { 20845 __XSTRING(STACKNAME), 20846 #ifdef STACKALIAS 20847 __XSTRING(STACKALIAS), 20848 #endif 20849 }; 20850 20851 static int 20852 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20853 { 20854 memset(mem, 0, size); 20855 return (0); 20856 } 20857 20858 static void 20859 rack_dtor(void *mem, int32_t size, void *arg) 20860 { 20861 20862 } 20863 20864 static bool rack_mod_inited = false; 20865 20866 static int 20867 tcp_addrack(module_t mod, int32_t type, void *data) 20868 { 20869 int32_t err = 0; 20870 int num_stacks; 20871 20872 switch (type) { 20873 case MOD_LOAD: 20874 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20875 sizeof(struct rack_sendmap), 20876 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20877 20878 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20879 sizeof(struct tcp_rack), 20880 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20881 20882 sysctl_ctx_init(&rack_sysctl_ctx); 20883 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20884 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20885 OID_AUTO, 20886 #ifdef STACKALIAS 20887 __XSTRING(STACKALIAS), 20888 #else 20889 __XSTRING(STACKNAME), 20890 #endif 20891 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20892 ""); 20893 if (rack_sysctl_root == NULL) { 20894 printf("Failed to add sysctl node\n"); 20895 err = EFAULT; 20896 goto free_uma; 20897 } 20898 rack_init_sysctls(); 20899 num_stacks = nitems(rack_stack_names); 20900 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20901 rack_stack_names, &num_stacks); 20902 if (err) { 20903 printf("Failed to register %s stack name for " 20904 "%s module\n", rack_stack_names[num_stacks], 20905 __XSTRING(MODNAME)); 20906 sysctl_ctx_free(&rack_sysctl_ctx); 20907 free_uma: 20908 uma_zdestroy(rack_zone); 20909 uma_zdestroy(rack_pcb_zone); 20910 rack_counter_destroy(); 20911 printf("Failed to register rack module -- err:%d\n", err); 20912 return (err); 20913 } 20914 tcp_lro_reg_mbufq(); 20915 rack_mod_inited = true; 20916 break; 20917 case MOD_QUIESCE: 20918 err = deregister_tcp_functions(&__tcp_rack, true, false); 20919 break; 20920 case MOD_UNLOAD: 20921 err = deregister_tcp_functions(&__tcp_rack, false, true); 20922 if (err == EBUSY) 20923 break; 20924 if (rack_mod_inited) { 20925 uma_zdestroy(rack_zone); 20926 uma_zdestroy(rack_pcb_zone); 20927 sysctl_ctx_free(&rack_sysctl_ctx); 20928 rack_counter_destroy(); 20929 rack_mod_inited = false; 20930 } 20931 tcp_lro_dereg_mbufq(); 20932 err = 0; 20933 break; 20934 default: 20935 return (EOPNOTSUPP); 20936 } 20937 return (err); 20938 } 20939 20940 static moduledata_t tcp_rack = { 20941 .name = __XSTRING(MODNAME), 20942 .evhand = tcp_addrack, 20943 .priv = 0 20944 }; 20945 20946 MODULE_VERSION(MODNAME, 1); 20947 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20948 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20949