1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_hpts.h> 97 #include <netinet/tcp_ratelimit.h> 98 #include <netinet/tcp_accounting.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/cc/cc.h> 101 #include <netinet/cc/cc_newreno.h> 102 #include <netinet/tcp_fastopen.h> 103 #include <netinet/tcp_lro.h> 104 #ifdef NETFLIX_SHARED_CWND 105 #include <netinet/tcp_shared_cwnd.h> 106 #endif 107 #ifdef TCPDEBUG 108 #include <netinet/tcp_debug.h> 109 #endif /* TCPDEBUG */ 110 #ifdef TCP_OFFLOAD 111 #include <netinet/tcp_offload.h> 112 #endif 113 #ifdef INET6 114 #include <netinet6/tcp6_var.h> 115 #endif 116 117 #include <netipsec/ipsec_support.h> 118 119 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 120 #include <netipsec/ipsec.h> 121 #include <netipsec/ipsec6.h> 122 #endif /* IPSEC */ 123 124 #include <netinet/udp.h> 125 #include <netinet/udp_var.h> 126 #include <machine/in_cksum.h> 127 128 #ifdef MAC 129 #include <security/mac/mac_framework.h> 130 #endif 131 #include "sack_filter.h" 132 #include "tcp_rack.h" 133 #include "rack_bbr_common.h" 134 135 uma_zone_t rack_zone; 136 uma_zone_t rack_pcb_zone; 137 138 #ifndef TICKS2SBT 139 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 140 #endif 141 142 VNET_DECLARE(uint32_t, newreno_beta); 143 VNET_DECLARE(uint32_t, newreno_beta_ecn); 144 #define V_newreno_beta VNET(newreno_beta) 145 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 146 147 148 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 149 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 150 151 struct sysctl_ctx_list rack_sysctl_ctx; 152 struct sysctl_oid *rack_sysctl_root; 153 154 #define CUM_ACKED 1 155 #define SACKED 2 156 157 /* 158 * The RACK module incorporates a number of 159 * TCP ideas that have been put out into the IETF 160 * over the last few years: 161 * - Matt Mathis's Rate Halving which slowly drops 162 * the congestion window so that the ack clock can 163 * be maintained during a recovery. 164 * - Yuchung Cheng's RACK TCP (for which its named) that 165 * will stop us using the number of dup acks and instead 166 * use time as the gage of when we retransmit. 167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 168 * of Dukkipati et.al. 169 * RACK depends on SACK, so if an endpoint arrives that 170 * cannot do SACK the state machine below will shuttle the 171 * connection back to using the "default" TCP stack that is 172 * in FreeBSD. 173 * 174 * To implement RACK the original TCP stack was first decomposed 175 * into a functional state machine with individual states 176 * for each of the possible TCP connection states. The do_segement 177 * functions role in life is to mandate the connection supports SACK 178 * initially and then assure that the RACK state matches the conenction 179 * state before calling the states do_segment function. Each 180 * state is simplified due to the fact that the original do_segment 181 * has been decomposed and we *know* what state we are in (no 182 * switches on the state) and all tests for SACK are gone. This 183 * greatly simplifies what each state does. 184 * 185 * TCP output is also over-written with a new version since it 186 * must maintain the new rack scoreboard. 187 * 188 */ 189 static int32_t rack_tlp_thresh = 1; 190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 191 static int32_t rack_tlp_use_greater = 1; 192 static int32_t rack_reorder_thresh = 2; 193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 194 * - 60 seconds */ 195 static uint8_t rack_req_measurements = 1; 196 /* Attack threshold detections */ 197 static uint32_t rack_highest_sack_thresh_seen = 0; 198 static uint32_t rack_highest_move_thresh_seen = 0; 199 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 200 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 201 static int32_t rack_hw_rate_caps = 1; /* 1; */ 202 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 203 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 204 static int32_t rack_hw_up_only = 1; 205 static int32_t rack_stats_gets_ms_rtt = 1; 206 static int32_t rack_prr_addbackmax = 2; 207 208 static int32_t rack_pkt_delay = 1000; 209 static int32_t rack_send_a_lot_in_prr = 1; 210 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 211 static int32_t rack_verbose_logging = 0; 212 static int32_t rack_ignore_data_after_close = 1; 213 static int32_t rack_enable_shared_cwnd = 1; 214 static int32_t rack_use_cmp_acks = 1; 215 static int32_t rack_use_fsb = 1; 216 static int32_t rack_use_rfo = 1; 217 static int32_t rack_use_rsm_rfo = 1; 218 static int32_t rack_max_abc_post_recovery = 2; 219 static int32_t rack_client_low_buf = 0; 220 #ifdef TCP_ACCOUNTING 221 static int32_t rack_tcp_accounting = 0; 222 #endif 223 static int32_t rack_limits_scwnd = 1; 224 static int32_t rack_enable_mqueue_for_nonpaced = 0; 225 static int32_t rack_disable_prr = 0; 226 static int32_t use_rack_rr = 1; 227 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 228 static int32_t rack_persist_min = 250000; /* 250usec */ 229 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 230 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 231 static int32_t rack_default_init_window = 0; /* Use system default */ 232 static int32_t rack_limit_time_with_srtt = 0; 233 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 234 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 235 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 236 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 237 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 238 /* 239 * Currently regular tcp has a rto_min of 30ms 240 * the backoff goes 12 times so that ends up 241 * being a total of 122.850 seconds before a 242 * connection is killed. 243 */ 244 static uint32_t rack_def_data_window = 20; 245 static uint32_t rack_goal_bdp = 2; 246 static uint32_t rack_min_srtts = 1; 247 static uint32_t rack_min_measure_usec = 0; 248 static int32_t rack_tlp_min = 10000; /* 10ms */ 249 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 250 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 251 static const int32_t rack_free_cache = 2; 252 static int32_t rack_hptsi_segments = 40; 253 static int32_t rack_rate_sample_method = USE_RTT_LOW; 254 static int32_t rack_pace_every_seg = 0; 255 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 256 static int32_t rack_slot_reduction = 4; 257 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 258 static int32_t rack_cwnd_block_ends_measure = 0; 259 static int32_t rack_rwnd_block_ends_measure = 0; 260 static int32_t rack_def_profile = 0; 261 262 static int32_t rack_lower_cwnd_at_tlp = 0; 263 static int32_t rack_limited_retran = 0; 264 static int32_t rack_always_send_oldest = 0; 265 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 266 267 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 268 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 269 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 270 271 /* Probertt */ 272 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 273 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 274 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 275 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 276 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 277 278 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 279 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 280 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 281 static uint32_t rack_probertt_use_min_rtt_exit = 0; 282 static uint32_t rack_probe_rtt_sets_cwnd = 0; 283 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 284 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 285 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 286 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 287 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 288 static uint32_t rack_probertt_filter_life = 10000000; 289 static uint32_t rack_probertt_lower_within = 10; 290 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 291 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 292 static int32_t rack_probertt_clear_is = 1; 293 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 294 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 295 296 /* Part of pacing */ 297 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 298 299 /* Timely information */ 300 /* Combine these two gives the range of 'no change' to bw */ 301 /* ie the up/down provide the upper and lower bound */ 302 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 303 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 304 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 305 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 306 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 307 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multipler */ 308 static int32_t rack_gp_increase_per = 2; /* 2% increase in multipler */ 309 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 310 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 311 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 312 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 313 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 314 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 315 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 316 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 317 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 318 static int32_t rack_use_max_for_nobackoff = 0; 319 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 320 static int32_t rack_timely_no_stopping = 0; 321 static int32_t rack_down_raise_thresh = 100; 322 static int32_t rack_req_segs = 1; 323 static uint64_t rack_bw_rate_cap = 0; 324 325 /* Weird delayed ack mode */ 326 static int32_t rack_use_imac_dack = 0; 327 /* Rack specific counters */ 328 counter_u64_t rack_badfr; 329 counter_u64_t rack_badfr_bytes; 330 counter_u64_t rack_rtm_prr_retran; 331 counter_u64_t rack_rtm_prr_newdata; 332 counter_u64_t rack_timestamp_mismatch; 333 counter_u64_t rack_reorder_seen; 334 counter_u64_t rack_paced_segments; 335 counter_u64_t rack_unpaced_segments; 336 counter_u64_t rack_calc_zero; 337 counter_u64_t rack_calc_nonzero; 338 counter_u64_t rack_saw_enobuf; 339 counter_u64_t rack_saw_enobuf_hw; 340 counter_u64_t rack_saw_enetunreach; 341 counter_u64_t rack_per_timer_hole; 342 counter_u64_t rack_large_ackcmp; 343 counter_u64_t rack_small_ackcmp; 344 #ifdef INVARIANTS 345 counter_u64_t rack_adjust_map_bw; 346 #endif 347 /* Tail loss probe counters */ 348 counter_u64_t rack_tlp_tot; 349 counter_u64_t rack_tlp_newdata; 350 counter_u64_t rack_tlp_retran; 351 counter_u64_t rack_tlp_retran_bytes; 352 counter_u64_t rack_tlp_retran_fail; 353 counter_u64_t rack_to_tot; 354 counter_u64_t rack_to_arm_rack; 355 counter_u64_t rack_to_arm_tlp; 356 counter_u64_t rack_hot_alloc; 357 counter_u64_t rack_to_alloc; 358 counter_u64_t rack_to_alloc_hard; 359 counter_u64_t rack_to_alloc_emerg; 360 counter_u64_t rack_to_alloc_limited; 361 counter_u64_t rack_alloc_limited_conns; 362 counter_u64_t rack_split_limited; 363 364 #define MAX_NUM_OF_CNTS 13 365 counter_u64_t rack_proc_comp_ack[MAX_NUM_OF_CNTS]; 366 counter_u64_t rack_multi_single_eq; 367 counter_u64_t rack_proc_non_comp_ack; 368 369 counter_u64_t rack_fto_send; 370 counter_u64_t rack_fto_rsm_send; 371 counter_u64_t rack_nfto_resend; 372 counter_u64_t rack_non_fto_send; 373 counter_u64_t rack_extended_rfo; 374 375 counter_u64_t rack_sack_proc_all; 376 counter_u64_t rack_sack_proc_short; 377 counter_u64_t rack_sack_proc_restart; 378 counter_u64_t rack_sack_attacks_detected; 379 counter_u64_t rack_sack_attacks_reversed; 380 counter_u64_t rack_sack_used_next_merge; 381 counter_u64_t rack_sack_splits; 382 counter_u64_t rack_sack_used_prev_merge; 383 counter_u64_t rack_sack_skipped_acked; 384 counter_u64_t rack_ack_total; 385 counter_u64_t rack_express_sack; 386 counter_u64_t rack_sack_total; 387 counter_u64_t rack_move_none; 388 counter_u64_t rack_move_some; 389 390 counter_u64_t rack_used_tlpmethod; 391 counter_u64_t rack_used_tlpmethod2; 392 counter_u64_t rack_enter_tlp_calc; 393 counter_u64_t rack_input_idle_reduces; 394 counter_u64_t rack_collapsed_win; 395 counter_u64_t rack_tlp_does_nada; 396 counter_u64_t rack_try_scwnd; 397 counter_u64_t rack_hw_pace_init_fail; 398 counter_u64_t rack_hw_pace_lost; 399 counter_u64_t rack_sbsndptr_right; 400 counter_u64_t rack_sbsndptr_wrong; 401 402 /* Temp CPU counters */ 403 counter_u64_t rack_find_high; 404 405 counter_u64_t rack_progress_drops; 406 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 407 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 408 409 410 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 411 412 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 413 (tv) = (value) + slop; \ 414 if ((u_long)(tv) < (u_long)(tvmin)) \ 415 (tv) = (tvmin); \ 416 if ((u_long)(tv) > (u_long)(tvmax)) \ 417 (tv) = (tvmax); \ 418 } while (0) 419 420 static void 421 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 422 423 static int 424 rack_process_ack(struct mbuf *m, struct tcphdr *th, 425 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 426 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 427 static int 428 rack_process_data(struct mbuf *m, struct tcphdr *th, 429 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 430 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 431 static void 432 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 433 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 434 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 435 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 436 uint8_t limit_type); 437 static struct rack_sendmap * 438 rack_check_recovery_mode(struct tcpcb *tp, 439 uint32_t tsused); 440 static void 441 rack_cong_signal(struct tcpcb *tp, 442 uint32_t type, uint32_t ack); 443 static void rack_counter_destroy(void); 444 static int 445 rack_ctloutput(struct socket *so, struct sockopt *sopt, 446 struct inpcb *inp, struct tcpcb *tp); 447 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 448 static void 449 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 450 static void 451 rack_do_segment(struct mbuf *m, struct tcphdr *th, 452 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 453 uint8_t iptos); 454 static void rack_dtor(void *mem, int32_t size, void *arg); 455 static void 456 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 457 uint32_t flex1, uint32_t flex2, 458 uint32_t flex3, uint32_t flex4, 459 uint32_t flex5, uint32_t flex6, 460 uint16_t flex7, uint8_t mod); 461 static void 462 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 463 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, struct rack_sendmap *rsm); 464 static struct rack_sendmap * 465 rack_find_high_nonack(struct tcp_rack *rack, 466 struct rack_sendmap *rsm); 467 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 468 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 469 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 470 static int 471 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 472 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 473 static void 474 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 475 tcp_seq th_ack, int line); 476 static uint32_t 477 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 478 static int32_t rack_handoff_ok(struct tcpcb *tp); 479 static int32_t rack_init(struct tcpcb *tp); 480 static void rack_init_sysctls(void); 481 static void 482 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 483 struct tcphdr *th, int entered_rec, int dup_ack_struck); 484 static void 485 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 486 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t ts, 487 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 488 489 static void 490 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 491 struct rack_sendmap *rsm); 492 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 493 static int32_t rack_output(struct tcpcb *tp); 494 495 static uint32_t 496 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 497 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 498 uint32_t cts, int *moved_two); 499 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 500 static void rack_remxt_tmr(struct tcpcb *tp); 501 static int 502 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 503 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack); 504 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 505 static int32_t rack_stopall(struct tcpcb *tp); 506 static void 507 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 508 uint32_t delta); 509 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 510 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 511 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 512 static uint32_t 513 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 514 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 515 static void 516 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 517 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 518 static int 519 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 520 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 521 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 522 static int 523 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 524 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 525 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 526 static int 527 rack_do_closing(struct mbuf *m, struct tcphdr *th, 528 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 529 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 530 static int 531 rack_do_established(struct mbuf *m, struct tcphdr *th, 532 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 533 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 534 static int 535 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 536 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 537 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 538 static int 539 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 540 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 541 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 542 static int 543 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 544 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 545 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 546 static int 547 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 548 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 549 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 550 static int 551 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 552 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 553 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 554 static int 555 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 556 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 557 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 558 struct rack_sendmap * 559 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 560 uint32_t tsused); 561 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 562 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 563 static void 564 tcp_rack_partialack(struct tcpcb *tp); 565 static int 566 rack_set_profile(struct tcp_rack *rack, int prof); 567 static void 568 rack_apply_deferred_options(struct tcp_rack *rack); 569 570 int32_t rack_clear_counter=0; 571 572 static void 573 rack_set_cc_pacing(struct tcp_rack *rack) 574 { 575 struct sockopt sopt; 576 struct cc_newreno_opts opt; 577 struct newreno old, *ptr; 578 struct tcpcb *tp; 579 int error; 580 581 if (rack->rc_pacing_cc_set) 582 return; 583 584 tp = rack->rc_tp; 585 if (tp->cc_algo == NULL) { 586 /* Tcb is leaving */ 587 printf("No cc algorithm?\n"); 588 return; 589 } 590 rack->rc_pacing_cc_set = 1; 591 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 592 /* Not new-reno we can't play games with beta! */ 593 goto out; 594 } 595 ptr = ((struct newreno *)tp->ccv->cc_data); 596 if (CC_ALGO(tp)->ctl_output == NULL) { 597 /* Huh, why does new_reno no longer have a set function? */ 598 printf("no ctl_output for algo:%s\n", tp->cc_algo->name); 599 goto out; 600 } 601 if (ptr == NULL) { 602 /* Just the default values */ 603 old.beta = V_newreno_beta_ecn; 604 old.beta_ecn = V_newreno_beta_ecn; 605 old.newreno_flags = 0; 606 } else { 607 old.beta = ptr->beta; 608 old.beta_ecn = ptr->beta_ecn; 609 old.newreno_flags = ptr->newreno_flags; 610 } 611 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 612 sopt.sopt_dir = SOPT_SET; 613 opt.name = CC_NEWRENO_BETA; 614 opt.val = rack->r_ctl.rc_saved_beta.beta; 615 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 616 if (error) { 617 printf("Error returned by ctl_output %d\n", error); 618 goto out; 619 } 620 /* 621 * Hack alert we need to set in our newreno_flags 622 * so that Abe behavior is also applied. 623 */ 624 ((struct newreno *)tp->ccv->cc_data)->newreno_flags = CC_NEWRENO_BETA_ECN; 625 opt.name = CC_NEWRENO_BETA_ECN; 626 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 627 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 628 if (error) { 629 printf("Error returned by ctl_output %d\n", error); 630 goto out; 631 } 632 /* Save off the original values for restoral */ 633 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 634 out: 635 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 636 union tcp_log_stackspecific log; 637 struct timeval tv; 638 639 ptr = ((struct newreno *)tp->ccv->cc_data); 640 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 641 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 642 if (ptr) { 643 log.u_bbr.flex1 = ptr->beta; 644 log.u_bbr.flex2 = ptr->beta_ecn; 645 log.u_bbr.flex3 = ptr->newreno_flags; 646 } 647 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 648 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 649 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 650 log.u_bbr.flex7 = rack->gp_ready; 651 log.u_bbr.flex7 <<= 1; 652 log.u_bbr.flex7 |= rack->use_fixed_rate; 653 log.u_bbr.flex7 <<= 1; 654 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 655 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 656 log.u_bbr.flex8 = 3; 657 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 658 0, &log, false, NULL, NULL, 0, &tv); 659 } 660 } 661 662 static void 663 rack_undo_cc_pacing(struct tcp_rack *rack) 664 { 665 struct newreno old, *ptr; 666 struct tcpcb *tp; 667 668 if (rack->rc_pacing_cc_set == 0) 669 return; 670 tp = rack->rc_tp; 671 rack->rc_pacing_cc_set = 0; 672 if (tp->cc_algo == NULL) 673 /* Tcb is leaving */ 674 return; 675 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 676 /* Not new-reno nothing to do! */ 677 return; 678 } 679 ptr = ((struct newreno *)tp->ccv->cc_data); 680 if (ptr == NULL) { 681 /* 682 * This happens at rack_fini() if the 683 * cc module gets freed on us. In that 684 * case we loose our "new" settings but 685 * thats ok, since the tcb is going away anyway. 686 */ 687 return; 688 } 689 /* Grab out our set values */ 690 memcpy(&old, ptr, sizeof(struct newreno)); 691 /* Copy back in the original values */ 692 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 693 /* Now save back the values we had set in (for when pacing is restored) */ 694 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 695 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 696 union tcp_log_stackspecific log; 697 struct timeval tv; 698 699 ptr = ((struct newreno *)tp->ccv->cc_data); 700 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 701 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 702 log.u_bbr.flex1 = ptr->beta; 703 log.u_bbr.flex2 = ptr->beta_ecn; 704 log.u_bbr.flex3 = ptr->newreno_flags; 705 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 706 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 707 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 708 log.u_bbr.flex7 = rack->gp_ready; 709 log.u_bbr.flex7 <<= 1; 710 log.u_bbr.flex7 |= rack->use_fixed_rate; 711 log.u_bbr.flex7 <<= 1; 712 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 713 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 714 log.u_bbr.flex8 = 4; 715 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 716 0, &log, false, NULL, NULL, 0, &tv); 717 } 718 } 719 720 #ifdef NETFLIX_PEAKRATE 721 static inline void 722 rack_update_peakrate_thr(struct tcpcb *tp) 723 { 724 /* Keep in mind that t_maxpeakrate is in B/s. */ 725 uint64_t peak; 726 peak = uqmax((tp->t_maxseg * 2), 727 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 728 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 729 } 730 #endif 731 732 static int 733 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 734 { 735 uint32_t stat; 736 int32_t error; 737 int i; 738 739 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 740 if (error || req->newptr == NULL) 741 return error; 742 743 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 744 if (error) 745 return (error); 746 if (stat == 1) { 747 #ifdef INVARIANTS 748 printf("Clearing RACK counters\n"); 749 #endif 750 counter_u64_zero(rack_badfr); 751 counter_u64_zero(rack_badfr_bytes); 752 counter_u64_zero(rack_rtm_prr_retran); 753 counter_u64_zero(rack_rtm_prr_newdata); 754 counter_u64_zero(rack_timestamp_mismatch); 755 counter_u64_zero(rack_reorder_seen); 756 counter_u64_zero(rack_tlp_tot); 757 counter_u64_zero(rack_tlp_newdata); 758 counter_u64_zero(rack_tlp_retran); 759 counter_u64_zero(rack_tlp_retran_bytes); 760 counter_u64_zero(rack_tlp_retran_fail); 761 counter_u64_zero(rack_to_tot); 762 counter_u64_zero(rack_to_arm_rack); 763 counter_u64_zero(rack_to_arm_tlp); 764 counter_u64_zero(rack_paced_segments); 765 counter_u64_zero(rack_calc_zero); 766 counter_u64_zero(rack_calc_nonzero); 767 counter_u64_zero(rack_unpaced_segments); 768 counter_u64_zero(rack_saw_enobuf); 769 counter_u64_zero(rack_saw_enobuf_hw); 770 counter_u64_zero(rack_saw_enetunreach); 771 counter_u64_zero(rack_per_timer_hole); 772 counter_u64_zero(rack_large_ackcmp); 773 counter_u64_zero(rack_small_ackcmp); 774 #ifdef INVARIANTS 775 counter_u64_zero(rack_adjust_map_bw); 776 #endif 777 counter_u64_zero(rack_to_alloc_hard); 778 counter_u64_zero(rack_to_alloc_emerg); 779 counter_u64_zero(rack_sack_proc_all); 780 counter_u64_zero(rack_fto_send); 781 counter_u64_zero(rack_fto_rsm_send); 782 counter_u64_zero(rack_extended_rfo); 783 counter_u64_zero(rack_hw_pace_init_fail); 784 counter_u64_zero(rack_hw_pace_lost); 785 counter_u64_zero(rack_sbsndptr_wrong); 786 counter_u64_zero(rack_sbsndptr_right); 787 counter_u64_zero(rack_non_fto_send); 788 counter_u64_zero(rack_nfto_resend); 789 counter_u64_zero(rack_sack_proc_short); 790 counter_u64_zero(rack_sack_proc_restart); 791 counter_u64_zero(rack_to_alloc); 792 counter_u64_zero(rack_to_alloc_limited); 793 counter_u64_zero(rack_alloc_limited_conns); 794 counter_u64_zero(rack_split_limited); 795 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 796 counter_u64_zero(rack_proc_comp_ack[i]); 797 } 798 counter_u64_zero(rack_multi_single_eq); 799 counter_u64_zero(rack_proc_non_comp_ack); 800 counter_u64_zero(rack_find_high); 801 counter_u64_zero(rack_sack_attacks_detected); 802 counter_u64_zero(rack_sack_attacks_reversed); 803 counter_u64_zero(rack_sack_used_next_merge); 804 counter_u64_zero(rack_sack_used_prev_merge); 805 counter_u64_zero(rack_sack_splits); 806 counter_u64_zero(rack_sack_skipped_acked); 807 counter_u64_zero(rack_ack_total); 808 counter_u64_zero(rack_express_sack); 809 counter_u64_zero(rack_sack_total); 810 counter_u64_zero(rack_move_none); 811 counter_u64_zero(rack_move_some); 812 counter_u64_zero(rack_used_tlpmethod); 813 counter_u64_zero(rack_used_tlpmethod2); 814 counter_u64_zero(rack_enter_tlp_calc); 815 counter_u64_zero(rack_progress_drops); 816 counter_u64_zero(rack_tlp_does_nada); 817 counter_u64_zero(rack_try_scwnd); 818 counter_u64_zero(rack_collapsed_win); 819 } 820 rack_clear_counter = 0; 821 return (0); 822 } 823 824 static void 825 rack_init_sysctls(void) 826 { 827 int i; 828 struct sysctl_oid *rack_counters; 829 struct sysctl_oid *rack_attack; 830 struct sysctl_oid *rack_pacing; 831 struct sysctl_oid *rack_timely; 832 struct sysctl_oid *rack_timers; 833 struct sysctl_oid *rack_tlp; 834 struct sysctl_oid *rack_misc; 835 struct sysctl_oid *rack_measure; 836 struct sysctl_oid *rack_probertt; 837 struct sysctl_oid *rack_hw_pacing; 838 839 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 840 SYSCTL_CHILDREN(rack_sysctl_root), 841 OID_AUTO, 842 "sack_attack", 843 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 844 "Rack Sack Attack Counters and Controls"); 845 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 846 SYSCTL_CHILDREN(rack_sysctl_root), 847 OID_AUTO, 848 "stats", 849 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 850 "Rack Counters"); 851 SYSCTL_ADD_S32(&rack_sysctl_ctx, 852 SYSCTL_CHILDREN(rack_sysctl_root), 853 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 854 &rack_rate_sample_method , USE_RTT_LOW, 855 "What method should we use for rate sampling 0=high, 1=low "); 856 /* Probe rtt related controls */ 857 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 858 SYSCTL_CHILDREN(rack_sysctl_root), 859 OID_AUTO, 860 "probertt", 861 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 862 "ProbeRTT related Controls"); 863 SYSCTL_ADD_U16(&rack_sysctl_ctx, 864 SYSCTL_CHILDREN(rack_probertt), 865 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 866 &rack_atexit_prtt_hbp, 130, 867 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 868 SYSCTL_ADD_U16(&rack_sysctl_ctx, 869 SYSCTL_CHILDREN(rack_probertt), 870 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 871 &rack_atexit_prtt, 130, 872 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 873 SYSCTL_ADD_U16(&rack_sysctl_ctx, 874 SYSCTL_CHILDREN(rack_probertt), 875 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 876 &rack_per_of_gp_probertt, 60, 877 "What percentage of goodput do we pace at in probertt"); 878 SYSCTL_ADD_U16(&rack_sysctl_ctx, 879 SYSCTL_CHILDREN(rack_probertt), 880 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 881 &rack_per_of_gp_probertt_reduce, 10, 882 "What percentage of goodput do we reduce every gp_srtt"); 883 SYSCTL_ADD_U16(&rack_sysctl_ctx, 884 SYSCTL_CHILDREN(rack_probertt), 885 OID_AUTO, "gp_per_low", CTLFLAG_RW, 886 &rack_per_of_gp_lowthresh, 40, 887 "What percentage of goodput do we allow the multiplier to fall to"); 888 SYSCTL_ADD_U32(&rack_sysctl_ctx, 889 SYSCTL_CHILDREN(rack_probertt), 890 OID_AUTO, "time_between", CTLFLAG_RW, 891 & rack_time_between_probertt, 96000000, 892 "How many useconds between the lowest rtt falling must past before we enter probertt"); 893 SYSCTL_ADD_U32(&rack_sysctl_ctx, 894 SYSCTL_CHILDREN(rack_probertt), 895 OID_AUTO, "safety", CTLFLAG_RW, 896 &rack_probe_rtt_safety_val, 2000000, 897 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 898 SYSCTL_ADD_U32(&rack_sysctl_ctx, 899 SYSCTL_CHILDREN(rack_probertt), 900 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 901 &rack_probe_rtt_sets_cwnd, 0, 902 "Do we set the cwnd too (if always_lower is on)"); 903 SYSCTL_ADD_U32(&rack_sysctl_ctx, 904 SYSCTL_CHILDREN(rack_probertt), 905 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 906 &rack_max_drain_wait, 2, 907 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 908 SYSCTL_ADD_U32(&rack_sysctl_ctx, 909 SYSCTL_CHILDREN(rack_probertt), 910 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 911 &rack_must_drain, 1, 912 "We must drain this many gp_srtt's waiting for flight to reach goal"); 913 SYSCTL_ADD_U32(&rack_sysctl_ctx, 914 SYSCTL_CHILDREN(rack_probertt), 915 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 916 &rack_probertt_use_min_rtt_entry, 1, 917 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 918 SYSCTL_ADD_U32(&rack_sysctl_ctx, 919 SYSCTL_CHILDREN(rack_probertt), 920 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 921 &rack_probertt_use_min_rtt_exit, 0, 922 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 923 SYSCTL_ADD_U32(&rack_sysctl_ctx, 924 SYSCTL_CHILDREN(rack_probertt), 925 OID_AUTO, "length_div", CTLFLAG_RW, 926 &rack_probertt_gpsrtt_cnt_div, 0, 927 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 928 SYSCTL_ADD_U32(&rack_sysctl_ctx, 929 SYSCTL_CHILDREN(rack_probertt), 930 OID_AUTO, "length_mul", CTLFLAG_RW, 931 &rack_probertt_gpsrtt_cnt_mul, 0, 932 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 933 SYSCTL_ADD_U32(&rack_sysctl_ctx, 934 SYSCTL_CHILDREN(rack_probertt), 935 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 936 &rack_min_probertt_hold, 200000, 937 "What is the minimum time we hold probertt at target"); 938 SYSCTL_ADD_U32(&rack_sysctl_ctx, 939 SYSCTL_CHILDREN(rack_probertt), 940 OID_AUTO, "filter_life", CTLFLAG_RW, 941 &rack_probertt_filter_life, 10000000, 942 "What is the time for the filters life in useconds"); 943 SYSCTL_ADD_U32(&rack_sysctl_ctx, 944 SYSCTL_CHILDREN(rack_probertt), 945 OID_AUTO, "lower_within", CTLFLAG_RW, 946 &rack_probertt_lower_within, 10, 947 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 948 SYSCTL_ADD_U32(&rack_sysctl_ctx, 949 SYSCTL_CHILDREN(rack_probertt), 950 OID_AUTO, "must_move", CTLFLAG_RW, 951 &rack_min_rtt_movement, 250, 952 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 953 SYSCTL_ADD_U32(&rack_sysctl_ctx, 954 SYSCTL_CHILDREN(rack_probertt), 955 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 956 &rack_probertt_clear_is, 1, 957 "Do we clear I/S counts on exiting probe-rtt"); 958 SYSCTL_ADD_S32(&rack_sysctl_ctx, 959 SYSCTL_CHILDREN(rack_probertt), 960 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 961 &rack_max_drain_hbp, 1, 962 "How many extra drain gpsrtt's do we get in highly buffered paths"); 963 SYSCTL_ADD_S32(&rack_sysctl_ctx, 964 SYSCTL_CHILDREN(rack_probertt), 965 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 966 &rack_hbp_thresh, 3, 967 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 968 /* Pacing related sysctls */ 969 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 970 SYSCTL_CHILDREN(rack_sysctl_root), 971 OID_AUTO, 972 "pacing", 973 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 974 "Pacing related Controls"); 975 SYSCTL_ADD_S32(&rack_sysctl_ctx, 976 SYSCTL_CHILDREN(rack_pacing), 977 OID_AUTO, "max_pace_over", CTLFLAG_RW, 978 &rack_max_per_above, 30, 979 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 980 SYSCTL_ADD_S32(&rack_sysctl_ctx, 981 SYSCTL_CHILDREN(rack_pacing), 982 OID_AUTO, "pace_to_one", CTLFLAG_RW, 983 &rack_pace_one_seg, 0, 984 "Do we allow low b/w pacing of 1MSS instead of two"); 985 SYSCTL_ADD_S32(&rack_sysctl_ctx, 986 SYSCTL_CHILDREN(rack_pacing), 987 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 988 &rack_limit_time_with_srtt, 0, 989 "Do we limit pacing time based on srtt"); 990 SYSCTL_ADD_S32(&rack_sysctl_ctx, 991 SYSCTL_CHILDREN(rack_pacing), 992 OID_AUTO, "init_win", CTLFLAG_RW, 993 &rack_default_init_window, 0, 994 "Do we have a rack initial window 0 = system default"); 995 SYSCTL_ADD_U16(&rack_sysctl_ctx, 996 SYSCTL_CHILDREN(rack_pacing), 997 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 998 &rack_per_of_gp_ss, 250, 999 "If non zero, what percentage of goodput to pace at in slow start"); 1000 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1001 SYSCTL_CHILDREN(rack_pacing), 1002 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1003 &rack_per_of_gp_ca, 150, 1004 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1005 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1006 SYSCTL_CHILDREN(rack_pacing), 1007 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1008 &rack_per_of_gp_rec, 200, 1009 "If non zero, what percentage of goodput to pace at in recovery"); 1010 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1011 SYSCTL_CHILDREN(rack_pacing), 1012 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1013 &rack_hptsi_segments, 40, 1014 "What size is the max for TSO segments in pacing and burst mitigation"); 1015 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1016 SYSCTL_CHILDREN(rack_pacing), 1017 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1018 &rack_slot_reduction, 4, 1019 "When doing only burst mitigation what is the reduce divisor"); 1020 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1021 SYSCTL_CHILDREN(rack_sysctl_root), 1022 OID_AUTO, "use_pacing", CTLFLAG_RW, 1023 &rack_pace_every_seg, 0, 1024 "If set we use pacing, if clear we use only the original burst mitigation"); 1025 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1026 SYSCTL_CHILDREN(rack_pacing), 1027 OID_AUTO, "rate_cap", CTLFLAG_RW, 1028 &rack_bw_rate_cap, 0, 1029 "If set we apply this value to the absolute rate cap used by pacing"); 1030 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1031 SYSCTL_CHILDREN(rack_sysctl_root), 1032 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1033 &rack_req_measurements, 1, 1034 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1035 /* Hardware pacing */ 1036 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1037 SYSCTL_CHILDREN(rack_sysctl_root), 1038 OID_AUTO, 1039 "hdwr_pacing", 1040 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1041 "Pacing related Controls"); 1042 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1043 SYSCTL_CHILDREN(rack_hw_pacing), 1044 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1045 &rack_hw_rwnd_factor, 2, 1046 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1047 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1048 SYSCTL_CHILDREN(rack_hw_pacing), 1049 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1050 &rack_enobuf_hw_boost_mult, 2, 1051 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1052 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1053 SYSCTL_CHILDREN(rack_hw_pacing), 1054 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1055 &rack_enobuf_hw_max, 2, 1056 "What is the max boost the pacing time if we see a ENOBUFS?"); 1057 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1058 SYSCTL_CHILDREN(rack_hw_pacing), 1059 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1060 &rack_enobuf_hw_min, 2, 1061 "What is the min boost the pacing time if we see a ENOBUFS?"); 1062 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1063 SYSCTL_CHILDREN(rack_hw_pacing), 1064 OID_AUTO, "enable", CTLFLAG_RW, 1065 &rack_enable_hw_pacing, 0, 1066 "Should RACK attempt to use hw pacing?"); 1067 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1068 SYSCTL_CHILDREN(rack_hw_pacing), 1069 OID_AUTO, "rate_cap", CTLFLAG_RW, 1070 &rack_hw_rate_caps, 1, 1071 "Does the highest hardware pacing rate cap the rate we will send at??"); 1072 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1073 SYSCTL_CHILDREN(rack_hw_pacing), 1074 OID_AUTO, "rate_min", CTLFLAG_RW, 1075 &rack_hw_rate_min, 0, 1076 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1077 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1078 SYSCTL_CHILDREN(rack_hw_pacing), 1079 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1080 &rack_hw_rate_to_low, 0, 1081 "If we fall below this rate, dis-engage hw pacing?"); 1082 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1083 SYSCTL_CHILDREN(rack_hw_pacing), 1084 OID_AUTO, "up_only", CTLFLAG_RW, 1085 &rack_hw_up_only, 1, 1086 "Do we allow hw pacing to lower the rate selected?"); 1087 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1088 SYSCTL_CHILDREN(rack_hw_pacing), 1089 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1090 &rack_hw_pace_extra_slots, 2, 1091 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1092 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1093 SYSCTL_CHILDREN(rack_sysctl_root), 1094 OID_AUTO, 1095 "timely", 1096 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1097 "Rack Timely RTT Controls"); 1098 /* Timely based GP dynmics */ 1099 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1100 SYSCTL_CHILDREN(rack_timely), 1101 OID_AUTO, "upper", CTLFLAG_RW, 1102 &rack_gp_per_bw_mul_up, 2, 1103 "Rack timely upper range for equal b/w (in percentage)"); 1104 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1105 SYSCTL_CHILDREN(rack_timely), 1106 OID_AUTO, "lower", CTLFLAG_RW, 1107 &rack_gp_per_bw_mul_down, 4, 1108 "Rack timely lower range for equal b/w (in percentage)"); 1109 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1110 SYSCTL_CHILDREN(rack_timely), 1111 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1112 &rack_gp_rtt_maxmul, 3, 1113 "Rack timely multipler of lowest rtt for rtt_max"); 1114 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1115 SYSCTL_CHILDREN(rack_timely), 1116 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1117 &rack_gp_rtt_mindiv, 4, 1118 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1119 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1120 SYSCTL_CHILDREN(rack_timely), 1121 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1122 &rack_gp_rtt_minmul, 1, 1123 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1124 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1125 SYSCTL_CHILDREN(rack_timely), 1126 OID_AUTO, "decrease", CTLFLAG_RW, 1127 &rack_gp_decrease_per, 20, 1128 "Rack timely decrease percentage of our GP multiplication factor"); 1129 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1130 SYSCTL_CHILDREN(rack_timely), 1131 OID_AUTO, "increase", CTLFLAG_RW, 1132 &rack_gp_increase_per, 2, 1133 "Rack timely increase perentage of our GP multiplication factor"); 1134 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1135 SYSCTL_CHILDREN(rack_timely), 1136 OID_AUTO, "lowerbound", CTLFLAG_RW, 1137 &rack_per_lower_bound, 50, 1138 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1139 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1140 SYSCTL_CHILDREN(rack_timely), 1141 OID_AUTO, "upperboundss", CTLFLAG_RW, 1142 &rack_per_upper_bound_ss, 0, 1143 "Rack timely higest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1144 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1145 SYSCTL_CHILDREN(rack_timely), 1146 OID_AUTO, "upperboundca", CTLFLAG_RW, 1147 &rack_per_upper_bound_ca, 0, 1148 "Rack timely higest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1149 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1150 SYSCTL_CHILDREN(rack_timely), 1151 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1152 &rack_do_dyn_mul, 0, 1153 "Rack timely do we enable dynmaic timely goodput by default"); 1154 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1155 SYSCTL_CHILDREN(rack_timely), 1156 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1157 &rack_gp_no_rec_chg, 1, 1158 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1159 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1160 SYSCTL_CHILDREN(rack_timely), 1161 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1162 &rack_timely_dec_clear, 6, 1163 "Rack timely what threshold do we count to before another boost during b/w decent"); 1164 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1165 SYSCTL_CHILDREN(rack_timely), 1166 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1167 &rack_timely_max_push_rise, 3, 1168 "Rack timely how many times do we push up with b/w increase"); 1169 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1170 SYSCTL_CHILDREN(rack_timely), 1171 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1172 &rack_timely_max_push_drop, 3, 1173 "Rack timely how many times do we push back on b/w decent"); 1174 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1175 SYSCTL_CHILDREN(rack_timely), 1176 OID_AUTO, "min_segs", CTLFLAG_RW, 1177 &rack_timely_min_segs, 4, 1178 "Rack timely when setting the cwnd what is the min num segments"); 1179 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1180 SYSCTL_CHILDREN(rack_timely), 1181 OID_AUTO, "noback_max", CTLFLAG_RW, 1182 &rack_use_max_for_nobackoff, 0, 1183 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1184 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1185 SYSCTL_CHILDREN(rack_timely), 1186 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1187 &rack_timely_int_timely_only, 0, 1188 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1189 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1190 SYSCTL_CHILDREN(rack_timely), 1191 OID_AUTO, "nonstop", CTLFLAG_RW, 1192 &rack_timely_no_stopping, 0, 1193 "Rack timely don't stop increase"); 1194 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1195 SYSCTL_CHILDREN(rack_timely), 1196 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1197 &rack_down_raise_thresh, 100, 1198 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1199 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1200 SYSCTL_CHILDREN(rack_timely), 1201 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1202 &rack_req_segs, 1, 1203 "Bottom dragging if not these many segments outstanding and room"); 1204 1205 /* TLP and Rack related parameters */ 1206 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1207 SYSCTL_CHILDREN(rack_sysctl_root), 1208 OID_AUTO, 1209 "tlp", 1210 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1211 "TLP and Rack related Controls"); 1212 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1213 SYSCTL_CHILDREN(rack_tlp), 1214 OID_AUTO, "use_rrr", CTLFLAG_RW, 1215 &use_rack_rr, 1, 1216 "Do we use Rack Rapid Recovery"); 1217 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1218 SYSCTL_CHILDREN(rack_tlp), 1219 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1220 &rack_max_abc_post_recovery, 2, 1221 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1222 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1223 SYSCTL_CHILDREN(rack_tlp), 1224 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1225 &rack_non_rxt_use_cr, 0, 1226 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1227 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1228 SYSCTL_CHILDREN(rack_tlp), 1229 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1230 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1231 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1232 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1233 SYSCTL_CHILDREN(rack_tlp), 1234 OID_AUTO, "limit", CTLFLAG_RW, 1235 &rack_tlp_limit, 2, 1236 "How many TLP's can be sent without sending new data"); 1237 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1238 SYSCTL_CHILDREN(rack_tlp), 1239 OID_AUTO, "use_greater", CTLFLAG_RW, 1240 &rack_tlp_use_greater, 1, 1241 "Should we use the rack_rtt time if its greater than srtt"); 1242 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1243 SYSCTL_CHILDREN(rack_tlp), 1244 OID_AUTO, "tlpminto", CTLFLAG_RW, 1245 &rack_tlp_min, 10000, 1246 "TLP minimum timeout per the specification (in microseconds)"); 1247 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1248 SYSCTL_CHILDREN(rack_tlp), 1249 OID_AUTO, "send_oldest", CTLFLAG_RW, 1250 &rack_always_send_oldest, 0, 1251 "Should we always send the oldest TLP and RACK-TLP"); 1252 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1253 SYSCTL_CHILDREN(rack_tlp), 1254 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1255 &rack_limited_retran, 0, 1256 "How many times can a rack timeout drive out sends"); 1257 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1258 SYSCTL_CHILDREN(rack_tlp), 1259 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1260 &rack_lower_cwnd_at_tlp, 0, 1261 "When a TLP completes a retran should we enter recovery"); 1262 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1263 SYSCTL_CHILDREN(rack_tlp), 1264 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1265 &rack_reorder_thresh, 2, 1266 "What factor for rack will be added when seeing reordering (shift right)"); 1267 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1268 SYSCTL_CHILDREN(rack_tlp), 1269 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1270 &rack_tlp_thresh, 1, 1271 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1272 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1273 SYSCTL_CHILDREN(rack_tlp), 1274 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1275 &rack_reorder_fade, 60000000, 1276 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1277 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1278 SYSCTL_CHILDREN(rack_tlp), 1279 OID_AUTO, "pktdelay", CTLFLAG_RW, 1280 &rack_pkt_delay, 1000, 1281 "Extra RACK time (in microseconds) besides reordering thresh"); 1282 1283 /* Timer related controls */ 1284 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1285 SYSCTL_CHILDREN(rack_sysctl_root), 1286 OID_AUTO, 1287 "timers", 1288 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1289 "Timer related controls"); 1290 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1291 SYSCTL_CHILDREN(rack_timers), 1292 OID_AUTO, "persmin", CTLFLAG_RW, 1293 &rack_persist_min, 250000, 1294 "What is the minimum time in microseconds between persists"); 1295 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1296 SYSCTL_CHILDREN(rack_timers), 1297 OID_AUTO, "persmax", CTLFLAG_RW, 1298 &rack_persist_max, 2000000, 1299 "What is the largest delay in microseconds between persists"); 1300 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1301 SYSCTL_CHILDREN(rack_timers), 1302 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1303 &rack_delayed_ack_time, 40000, 1304 "Delayed ack time (40ms in microseconds)"); 1305 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1306 SYSCTL_CHILDREN(rack_timers), 1307 OID_AUTO, "minrto", CTLFLAG_RW, 1308 &rack_rto_min, 30000, 1309 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1310 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1311 SYSCTL_CHILDREN(rack_timers), 1312 OID_AUTO, "maxrto", CTLFLAG_RW, 1313 &rack_rto_max, 4000000, 1314 "Maxiumum RTO in microseconds -- should be at least as large as min_rto"); 1315 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1316 SYSCTL_CHILDREN(rack_timers), 1317 OID_AUTO, "minto", CTLFLAG_RW, 1318 &rack_min_to, 1000, 1319 "Minimum rack timeout in microseconds"); 1320 /* Measure controls */ 1321 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1322 SYSCTL_CHILDREN(rack_sysctl_root), 1323 OID_AUTO, 1324 "measure", 1325 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1326 "Measure related controls"); 1327 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1328 SYSCTL_CHILDREN(rack_measure), 1329 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1330 &rack_wma_divisor, 8, 1331 "When doing b/w calculation what is the divisor for the WMA"); 1332 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1333 SYSCTL_CHILDREN(rack_measure), 1334 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1335 &rack_cwnd_block_ends_measure, 0, 1336 "Does a cwnd just-return end the measurement window (app limited)"); 1337 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1338 SYSCTL_CHILDREN(rack_measure), 1339 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1340 &rack_rwnd_block_ends_measure, 0, 1341 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1342 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1343 SYSCTL_CHILDREN(rack_measure), 1344 OID_AUTO, "min_target", CTLFLAG_RW, 1345 &rack_def_data_window, 20, 1346 "What is the minimum target window (in mss) for a GP measurements"); 1347 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1348 SYSCTL_CHILDREN(rack_measure), 1349 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1350 &rack_goal_bdp, 2, 1351 "What is the goal BDP to measure"); 1352 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1353 SYSCTL_CHILDREN(rack_measure), 1354 OID_AUTO, "min_srtts", CTLFLAG_RW, 1355 &rack_min_srtts, 1, 1356 "What is the goal BDP to measure"); 1357 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1358 SYSCTL_CHILDREN(rack_measure), 1359 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1360 &rack_min_measure_usec, 0, 1361 "What is the Minimum time time for a measurement if 0, this is off"); 1362 /* Misc rack controls */ 1363 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1364 SYSCTL_CHILDREN(rack_sysctl_root), 1365 OID_AUTO, 1366 "misc", 1367 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1368 "Misc related controls"); 1369 #ifdef TCP_ACCOUNTING 1370 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1371 SYSCTL_CHILDREN(rack_misc), 1372 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1373 &rack_tcp_accounting, 0, 1374 "Should we turn on TCP accounting for all rack sessions?"); 1375 #endif 1376 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1377 SYSCTL_CHILDREN(rack_misc), 1378 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1379 &rack_prr_addbackmax, 2, 1380 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1381 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1382 SYSCTL_CHILDREN(rack_misc), 1383 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1384 &rack_stats_gets_ms_rtt, 1, 1385 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1386 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1387 SYSCTL_CHILDREN(rack_misc), 1388 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1389 &rack_client_low_buf, 0, 1390 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1391 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1392 SYSCTL_CHILDREN(rack_misc), 1393 OID_AUTO, "defprofile", CTLFLAG_RW, 1394 &rack_def_profile, 0, 1395 "Should RACK use a default profile (0=no, num == profile num)?"); 1396 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1397 SYSCTL_CHILDREN(rack_misc), 1398 OID_AUTO, "cmpack", CTLFLAG_RW, 1399 &rack_use_cmp_acks, 1, 1400 "Should RACK have LRO send compressed acks"); 1401 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1402 SYSCTL_CHILDREN(rack_misc), 1403 OID_AUTO, "fsb", CTLFLAG_RW, 1404 &rack_use_fsb, 1, 1405 "Should RACK use the fast send block?"); 1406 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1407 SYSCTL_CHILDREN(rack_misc), 1408 OID_AUTO, "rfo", CTLFLAG_RW, 1409 &rack_use_rfo, 1, 1410 "Should RACK use rack_fast_output()?"); 1411 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1412 SYSCTL_CHILDREN(rack_misc), 1413 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1414 &rack_use_rsm_rfo, 1, 1415 "Should RACK use rack_fast_rsm_output()?"); 1416 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1417 SYSCTL_CHILDREN(rack_misc), 1418 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1419 &rack_enable_shared_cwnd, 1, 1420 "Should RACK try to use the shared cwnd on connections where allowed"); 1421 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1422 SYSCTL_CHILDREN(rack_misc), 1423 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1424 &rack_limits_scwnd, 1, 1425 "Should RACK place low end time limits on the shared cwnd feature"); 1426 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1427 SYSCTL_CHILDREN(rack_misc), 1428 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1429 &rack_enable_mqueue_for_nonpaced, 0, 1430 "Should RACK use mbuf queuing for non-paced connections"); 1431 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1432 SYSCTL_CHILDREN(rack_misc), 1433 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1434 &rack_use_imac_dack, 0, 1435 "Should RACK try to emulate iMac delayed ack"); 1436 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1437 SYSCTL_CHILDREN(rack_misc), 1438 OID_AUTO, "no_prr", CTLFLAG_RW, 1439 &rack_disable_prr, 0, 1440 "Should RACK not use prr and only pace (must have pacing on)"); 1441 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1442 SYSCTL_CHILDREN(rack_misc), 1443 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1444 &rack_verbose_logging, 0, 1445 "Should RACK black box logging be verbose"); 1446 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1447 SYSCTL_CHILDREN(rack_misc), 1448 OID_AUTO, "data_after_close", CTLFLAG_RW, 1449 &rack_ignore_data_after_close, 1, 1450 "Do we hold off sending a RST until all pending data is ack'd"); 1451 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1452 SYSCTL_CHILDREN(rack_misc), 1453 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1454 &rack_sack_not_required, 1, 1455 "Do we allow rack to run on connections not supporting SACK"); 1456 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1457 SYSCTL_CHILDREN(rack_misc), 1458 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1459 &rack_send_a_lot_in_prr, 1, 1460 "Send a lot in prr"); 1461 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1462 SYSCTL_CHILDREN(rack_misc), 1463 OID_AUTO, "autoscale", CTLFLAG_RW, 1464 &rack_autosndbuf_inc, 20, 1465 "What percentage should rack scale up its snd buffer by?"); 1466 /* Sack Attacker detection stuff */ 1467 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1468 SYSCTL_CHILDREN(rack_attack), 1469 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1470 &rack_highest_sack_thresh_seen, 0, 1471 "Highest sack to ack ratio seen"); 1472 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1473 SYSCTL_CHILDREN(rack_attack), 1474 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1475 &rack_highest_move_thresh_seen, 0, 1476 "Highest move to non-move ratio seen"); 1477 rack_ack_total = counter_u64_alloc(M_WAITOK); 1478 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1479 SYSCTL_CHILDREN(rack_attack), 1480 OID_AUTO, "acktotal", CTLFLAG_RD, 1481 &rack_ack_total, 1482 "Total number of Ack's"); 1483 rack_express_sack = counter_u64_alloc(M_WAITOK); 1484 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1485 SYSCTL_CHILDREN(rack_attack), 1486 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1487 &rack_express_sack, 1488 "Total expresss number of Sack's"); 1489 rack_sack_total = counter_u64_alloc(M_WAITOK); 1490 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1491 SYSCTL_CHILDREN(rack_attack), 1492 OID_AUTO, "sacktotal", CTLFLAG_RD, 1493 &rack_sack_total, 1494 "Total number of SACKs"); 1495 rack_move_none = counter_u64_alloc(M_WAITOK); 1496 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1497 SYSCTL_CHILDREN(rack_attack), 1498 OID_AUTO, "move_none", CTLFLAG_RD, 1499 &rack_move_none, 1500 "Total number of SACK index reuse of postions under threshold"); 1501 rack_move_some = counter_u64_alloc(M_WAITOK); 1502 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_attack), 1504 OID_AUTO, "move_some", CTLFLAG_RD, 1505 &rack_move_some, 1506 "Total number of SACK index reuse of postions over threshold"); 1507 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1508 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1509 SYSCTL_CHILDREN(rack_attack), 1510 OID_AUTO, "attacks", CTLFLAG_RD, 1511 &rack_sack_attacks_detected, 1512 "Total number of SACK attackers that had sack disabled"); 1513 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1514 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1515 SYSCTL_CHILDREN(rack_attack), 1516 OID_AUTO, "reversed", CTLFLAG_RD, 1517 &rack_sack_attacks_reversed, 1518 "Total number of SACK attackers that were later determined false positive"); 1519 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1520 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1521 SYSCTL_CHILDREN(rack_attack), 1522 OID_AUTO, "nextmerge", CTLFLAG_RD, 1523 &rack_sack_used_next_merge, 1524 "Total number of times we used the next merge"); 1525 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1526 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1527 SYSCTL_CHILDREN(rack_attack), 1528 OID_AUTO, "prevmerge", CTLFLAG_RD, 1529 &rack_sack_used_prev_merge, 1530 "Total number of times we used the prev merge"); 1531 /* Counters */ 1532 rack_fto_send = counter_u64_alloc(M_WAITOK); 1533 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1534 SYSCTL_CHILDREN(rack_counters), 1535 OID_AUTO, "fto_send", CTLFLAG_RD, 1536 &rack_fto_send, "Total number of rack_fast_output sends"); 1537 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1538 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1539 SYSCTL_CHILDREN(rack_counters), 1540 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1541 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1542 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1543 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1544 SYSCTL_CHILDREN(rack_counters), 1545 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1546 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1547 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1548 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1549 SYSCTL_CHILDREN(rack_counters), 1550 OID_AUTO, "nfto_send", CTLFLAG_RD, 1551 &rack_non_fto_send, "Total number of rack_output first sends"); 1552 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1553 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1554 SYSCTL_CHILDREN(rack_counters), 1555 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1556 &rack_extended_rfo, "Total number of times we extended rfo"); 1557 1558 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1559 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1560 SYSCTL_CHILDREN(rack_counters), 1561 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1562 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1563 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1564 1565 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1566 SYSCTL_CHILDREN(rack_counters), 1567 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1568 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1569 1570 1571 1572 rack_badfr = counter_u64_alloc(M_WAITOK); 1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_counters), 1575 OID_AUTO, "badfr", CTLFLAG_RD, 1576 &rack_badfr, "Total number of bad FRs"); 1577 rack_badfr_bytes = counter_u64_alloc(M_WAITOK); 1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_counters), 1580 OID_AUTO, "badfr_bytes", CTLFLAG_RD, 1581 &rack_badfr_bytes, "Total number of bad FRs"); 1582 rack_rtm_prr_retran = counter_u64_alloc(M_WAITOK); 1583 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1584 SYSCTL_CHILDREN(rack_counters), 1585 OID_AUTO, "prrsndret", CTLFLAG_RD, 1586 &rack_rtm_prr_retran, 1587 "Total number of prr based retransmits"); 1588 rack_rtm_prr_newdata = counter_u64_alloc(M_WAITOK); 1589 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1590 SYSCTL_CHILDREN(rack_counters), 1591 OID_AUTO, "prrsndnew", CTLFLAG_RD, 1592 &rack_rtm_prr_newdata, 1593 "Total number of prr based new transmits"); 1594 rack_timestamp_mismatch = counter_u64_alloc(M_WAITOK); 1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1596 SYSCTL_CHILDREN(rack_counters), 1597 OID_AUTO, "tsnf", CTLFLAG_RD, 1598 &rack_timestamp_mismatch, 1599 "Total number of timestamps that we could not find the reported ts"); 1600 rack_find_high = counter_u64_alloc(M_WAITOK); 1601 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_counters), 1603 OID_AUTO, "findhigh", CTLFLAG_RD, 1604 &rack_find_high, 1605 "Total number of FIN causing find-high"); 1606 rack_reorder_seen = counter_u64_alloc(M_WAITOK); 1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1608 SYSCTL_CHILDREN(rack_counters), 1609 OID_AUTO, "reordering", CTLFLAG_RD, 1610 &rack_reorder_seen, 1611 "Total number of times we added delay due to reordering"); 1612 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1614 SYSCTL_CHILDREN(rack_counters), 1615 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1616 &rack_tlp_tot, 1617 "Total number of tail loss probe expirations"); 1618 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1620 SYSCTL_CHILDREN(rack_counters), 1621 OID_AUTO, "tlp_new", CTLFLAG_RD, 1622 &rack_tlp_newdata, 1623 "Total number of tail loss probe sending new data"); 1624 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_counters), 1627 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1628 &rack_tlp_retran, 1629 "Total number of tail loss probe sending retransmitted data"); 1630 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1632 SYSCTL_CHILDREN(rack_counters), 1633 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1634 &rack_tlp_retran_bytes, 1635 "Total bytes of tail loss probe sending retransmitted data"); 1636 rack_tlp_retran_fail = counter_u64_alloc(M_WAITOK); 1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1638 SYSCTL_CHILDREN(rack_counters), 1639 OID_AUTO, "tlp_retran_fail", CTLFLAG_RD, 1640 &rack_tlp_retran_fail, 1641 "Total number of tail loss probe sending retransmitted data that failed (wait for t3)"); 1642 rack_to_tot = counter_u64_alloc(M_WAITOK); 1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1644 SYSCTL_CHILDREN(rack_counters), 1645 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1646 &rack_to_tot, 1647 "Total number of times the rack to expired"); 1648 rack_to_arm_rack = counter_u64_alloc(M_WAITOK); 1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_counters), 1651 OID_AUTO, "arm_rack", CTLFLAG_RD, 1652 &rack_to_arm_rack, 1653 "Total number of times the rack timer armed"); 1654 rack_to_arm_tlp = counter_u64_alloc(M_WAITOK); 1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_counters), 1657 OID_AUTO, "arm_tlp", CTLFLAG_RD, 1658 &rack_to_arm_tlp, 1659 "Total number of times the tlp timer armed"); 1660 rack_calc_zero = counter_u64_alloc(M_WAITOK); 1661 rack_calc_nonzero = counter_u64_alloc(M_WAITOK); 1662 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1663 SYSCTL_CHILDREN(rack_counters), 1664 OID_AUTO, "calc_zero", CTLFLAG_RD, 1665 &rack_calc_zero, 1666 "Total number of times pacing time worked out to zero"); 1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1668 SYSCTL_CHILDREN(rack_counters), 1669 OID_AUTO, "calc_nonzero", CTLFLAG_RD, 1670 &rack_calc_nonzero, 1671 "Total number of times pacing time worked out to non-zero"); 1672 rack_paced_segments = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_counters), 1675 OID_AUTO, "paced", CTLFLAG_RD, 1676 &rack_paced_segments, 1677 "Total number of times a segment send caused hptsi"); 1678 rack_unpaced_segments = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "unpaced", CTLFLAG_RD, 1682 &rack_unpaced_segments, 1683 "Total number of times a segment did not cause hptsi"); 1684 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1688 &rack_saw_enobuf, 1689 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1690 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1692 SYSCTL_CHILDREN(rack_counters), 1693 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1694 &rack_saw_enobuf_hw, 1695 "Total number of times a send returned enobuf for hdwr paced connections"); 1696 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1698 SYSCTL_CHILDREN(rack_counters), 1699 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1700 &rack_saw_enetunreach, 1701 "Total number of times a send received a enetunreachable"); 1702 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1704 SYSCTL_CHILDREN(rack_counters), 1705 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1706 &rack_hot_alloc, 1707 "Total allocations from the top of our list"); 1708 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1709 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1710 SYSCTL_CHILDREN(rack_counters), 1711 OID_AUTO, "allocs", CTLFLAG_RD, 1712 &rack_to_alloc, 1713 "Total allocations of tracking structures"); 1714 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1715 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1716 SYSCTL_CHILDREN(rack_counters), 1717 OID_AUTO, "allochard", CTLFLAG_RD, 1718 &rack_to_alloc_hard, 1719 "Total allocations done with sleeping the hard way"); 1720 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1721 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1722 SYSCTL_CHILDREN(rack_counters), 1723 OID_AUTO, "allocemerg", CTLFLAG_RD, 1724 &rack_to_alloc_emerg, 1725 "Total allocations done from emergency cache"); 1726 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1727 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1728 SYSCTL_CHILDREN(rack_counters), 1729 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1730 &rack_to_alloc_limited, 1731 "Total allocations dropped due to limit"); 1732 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1733 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1734 SYSCTL_CHILDREN(rack_counters), 1735 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1736 &rack_alloc_limited_conns, 1737 "Connections with allocations dropped due to limit"); 1738 rack_split_limited = counter_u64_alloc(M_WAITOK); 1739 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1740 SYSCTL_CHILDREN(rack_counters), 1741 OID_AUTO, "split_limited", CTLFLAG_RD, 1742 &rack_split_limited, 1743 "Split allocations dropped due to limit"); 1744 1745 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 1746 char name[32]; 1747 sprintf(name, "cmp_ack_cnt_%d", i); 1748 rack_proc_comp_ack[i] = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_counters), 1751 OID_AUTO, name, CTLFLAG_RD, 1752 &rack_proc_comp_ack[i], 1753 "Number of compressed acks we processed"); 1754 } 1755 rack_large_ackcmp = counter_u64_alloc(M_WAITOK); 1756 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1757 SYSCTL_CHILDREN(rack_counters), 1758 OID_AUTO, "cmp_large_mbufs", CTLFLAG_RD, 1759 &rack_large_ackcmp, 1760 "Number of TCP connections with large mbuf's for compressed acks"); 1761 rack_small_ackcmp = counter_u64_alloc(M_WAITOK); 1762 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1763 SYSCTL_CHILDREN(rack_counters), 1764 OID_AUTO, "cmp_small_mbufs", CTLFLAG_RD, 1765 &rack_small_ackcmp, 1766 "Number of TCP connections with small mbuf's for compressed acks"); 1767 #ifdef INVARIANTS 1768 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1769 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1770 SYSCTL_CHILDREN(rack_counters), 1771 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1772 &rack_adjust_map_bw, 1773 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1774 #endif 1775 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1776 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1777 SYSCTL_CHILDREN(rack_counters), 1778 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1779 &rack_multi_single_eq, 1780 "Number of compressed acks total represented"); 1781 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1782 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1783 SYSCTL_CHILDREN(rack_counters), 1784 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1785 &rack_proc_non_comp_ack, 1786 "Number of non compresseds acks that we processed"); 1787 1788 1789 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1790 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1791 SYSCTL_CHILDREN(rack_counters), 1792 OID_AUTO, "sack_long", CTLFLAG_RD, 1793 &rack_sack_proc_all, 1794 "Total times we had to walk whole list for sack processing"); 1795 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1796 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1797 SYSCTL_CHILDREN(rack_counters), 1798 OID_AUTO, "sack_restart", CTLFLAG_RD, 1799 &rack_sack_proc_restart, 1800 "Total times we had to walk whole list due to a restart"); 1801 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1802 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1803 SYSCTL_CHILDREN(rack_counters), 1804 OID_AUTO, "sack_short", CTLFLAG_RD, 1805 &rack_sack_proc_short, 1806 "Total times we took shortcut for sack processing"); 1807 rack_enter_tlp_calc = counter_u64_alloc(M_WAITOK); 1808 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1809 SYSCTL_CHILDREN(rack_counters), 1810 OID_AUTO, "tlp_calc_entered", CTLFLAG_RD, 1811 &rack_enter_tlp_calc, 1812 "Total times we called calc-tlp"); 1813 rack_used_tlpmethod = counter_u64_alloc(M_WAITOK); 1814 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1815 SYSCTL_CHILDREN(rack_counters), 1816 OID_AUTO, "hit_tlp_method", CTLFLAG_RD, 1817 &rack_used_tlpmethod, 1818 "Total number of runt sacks"); 1819 rack_used_tlpmethod2 = counter_u64_alloc(M_WAITOK); 1820 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1821 SYSCTL_CHILDREN(rack_counters), 1822 OID_AUTO, "hit_tlp_method2", CTLFLAG_RD, 1823 &rack_used_tlpmethod2, 1824 "Total number of times we hit TLP method 2"); 1825 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1826 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1827 SYSCTL_CHILDREN(rack_attack), 1828 OID_AUTO, "skipacked", CTLFLAG_RD, 1829 &rack_sack_skipped_acked, 1830 "Total number of times we skipped previously sacked"); 1831 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1832 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1833 SYSCTL_CHILDREN(rack_attack), 1834 OID_AUTO, "ofsplit", CTLFLAG_RD, 1835 &rack_sack_splits, 1836 "Total number of times we did the old fashion tree split"); 1837 rack_progress_drops = counter_u64_alloc(M_WAITOK); 1838 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1839 SYSCTL_CHILDREN(rack_counters), 1840 OID_AUTO, "prog_drops", CTLFLAG_RD, 1841 &rack_progress_drops, 1842 "Total number of progress drops"); 1843 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1844 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1845 SYSCTL_CHILDREN(rack_counters), 1846 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1847 &rack_input_idle_reduces, 1848 "Total number of idle reductions on input"); 1849 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1850 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1851 SYSCTL_CHILDREN(rack_counters), 1852 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1853 &rack_collapsed_win, 1854 "Total number of collapsed windows"); 1855 rack_tlp_does_nada = counter_u64_alloc(M_WAITOK); 1856 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1857 SYSCTL_CHILDREN(rack_counters), 1858 OID_AUTO, "tlp_nada", CTLFLAG_RD, 1859 &rack_tlp_does_nada, 1860 "Total number of nada tlp calls"); 1861 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1862 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1863 SYSCTL_CHILDREN(rack_counters), 1864 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1865 &rack_try_scwnd, 1866 "Total number of scwnd attempts"); 1867 1868 rack_per_timer_hole = counter_u64_alloc(M_WAITOK); 1869 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1870 SYSCTL_CHILDREN(rack_counters), 1871 OID_AUTO, "timer_hole", CTLFLAG_RD, 1872 &rack_per_timer_hole, 1873 "Total persists start in timer hole"); 1874 1875 rack_sbsndptr_wrong = counter_u64_alloc(M_WAITOK); 1876 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1877 SYSCTL_CHILDREN(rack_counters), 1878 OID_AUTO, "sndptr_wrong", CTLFLAG_RD, 1879 &rack_sbsndptr_wrong, "Total number of times the saved sbsndptr was incorret"); 1880 rack_sbsndptr_right = counter_u64_alloc(M_WAITOK); 1881 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1882 SYSCTL_CHILDREN(rack_counters), 1883 OID_AUTO, "sndptr_right", CTLFLAG_RD, 1884 &rack_sbsndptr_right, "Total number of times the saved sbsndptr was corret"); 1885 1886 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1887 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1888 OID_AUTO, "outsize", CTLFLAG_RD, 1889 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1890 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1891 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1892 OID_AUTO, "opts", CTLFLAG_RD, 1893 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1894 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1895 SYSCTL_CHILDREN(rack_sysctl_root), 1896 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1897 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1898 } 1899 1900 static __inline int 1901 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1902 { 1903 if (SEQ_GEQ(b->r_start, a->r_start) && 1904 SEQ_LT(b->r_start, a->r_end)) { 1905 /* 1906 * The entry b is within the 1907 * block a. i.e.: 1908 * a -- |-------------| 1909 * b -- |----| 1910 * <or> 1911 * b -- |------| 1912 * <or> 1913 * b -- |-----------| 1914 */ 1915 return (0); 1916 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1917 /* 1918 * b falls as either the next 1919 * sequence block after a so a 1920 * is said to be smaller than b. 1921 * i.e: 1922 * a -- |------| 1923 * b -- |--------| 1924 * or 1925 * b -- |-----| 1926 */ 1927 return (1); 1928 } 1929 /* 1930 * Whats left is where a is 1931 * larger than b. i.e: 1932 * a -- |-------| 1933 * b -- |---| 1934 * or even possibly 1935 * b -- |--------------| 1936 */ 1937 return (-1); 1938 } 1939 1940 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1941 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1942 1943 static uint32_t 1944 rc_init_window(struct tcp_rack *rack) 1945 { 1946 uint32_t win; 1947 1948 if (rack->rc_init_win == 0) { 1949 /* 1950 * Nothing set by the user, use the system stack 1951 * default. 1952 */ 1953 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1954 } 1955 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1956 return (win); 1957 } 1958 1959 static uint64_t 1960 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1961 { 1962 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1963 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1964 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1965 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1966 else 1967 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1968 } 1969 1970 static uint64_t 1971 rack_get_bw(struct tcp_rack *rack) 1972 { 1973 if (rack->use_fixed_rate) { 1974 /* Return the fixed pacing rate */ 1975 return (rack_get_fixed_pacing_bw(rack)); 1976 } 1977 if (rack->r_ctl.gp_bw == 0) { 1978 /* 1979 * We have yet no b/w measurement, 1980 * if we have a user set initial bw 1981 * return it. If we don't have that and 1982 * we have an srtt, use the tcp IW (10) to 1983 * calculate a fictional b/w over the SRTT 1984 * which is more or less a guess. Note 1985 * we don't use our IW from rack on purpose 1986 * so if we have like IW=30, we are not 1987 * calculating a "huge" b/w. 1988 */ 1989 uint64_t bw, srtt; 1990 if (rack->r_ctl.init_rate) 1991 return (rack->r_ctl.init_rate); 1992 1993 /* Has the user set a max peak rate? */ 1994 #ifdef NETFLIX_PEAKRATE 1995 if (rack->rc_tp->t_maxpeakrate) 1996 return (rack->rc_tp->t_maxpeakrate); 1997 #endif 1998 /* Ok lets come up with the IW guess, if we have a srtt */ 1999 if (rack->rc_tp->t_srtt == 0) { 2000 /* 2001 * Go with old pacing method 2002 * i.e. burst mitigation only. 2003 */ 2004 return (0); 2005 } 2006 /* Ok lets get the initial TCP win (not racks) */ 2007 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 2008 srtt = (uint64_t)rack->rc_tp->t_srtt; 2009 bw *= (uint64_t)USECS_IN_SECOND; 2010 bw /= srtt; 2011 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2012 bw = rack->r_ctl.bw_rate_cap; 2013 return (bw); 2014 } else { 2015 uint64_t bw; 2016 2017 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 2018 /* Averaging is done, we can return the value */ 2019 bw = rack->r_ctl.gp_bw; 2020 } else { 2021 /* Still doing initial average must calculate */ 2022 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 2023 } 2024 #ifdef NETFLIX_PEAKRATE 2025 if ((rack->rc_tp->t_maxpeakrate) && 2026 (bw > rack->rc_tp->t_maxpeakrate)) { 2027 /* The user has set a peak rate to pace at 2028 * don't allow us to pace faster than that. 2029 */ 2030 return (rack->rc_tp->t_maxpeakrate); 2031 } 2032 #endif 2033 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 2034 bw = rack->r_ctl.bw_rate_cap; 2035 return (bw); 2036 } 2037 } 2038 2039 static uint16_t 2040 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 2041 { 2042 if (rack->use_fixed_rate) { 2043 return (100); 2044 } else if (rack->in_probe_rtt && (rsm == NULL)) 2045 return (rack->r_ctl.rack_per_of_gp_probertt); 2046 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 2047 rack->r_ctl.rack_per_of_gp_rec)) { 2048 if (rsm) { 2049 /* a retransmission always use the recovery rate */ 2050 return (rack->r_ctl.rack_per_of_gp_rec); 2051 } else if (rack->rack_rec_nonrxt_use_cr) { 2052 /* Directed to use the configured rate */ 2053 goto configured_rate; 2054 } else if (rack->rack_no_prr && 2055 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 2056 /* No PRR, lets just use the b/w estimate only */ 2057 return (100); 2058 } else { 2059 /* 2060 * Here we may have a non-retransmit but we 2061 * have no overrides, so just use the recovery 2062 * rate (prr is in effect). 2063 */ 2064 return (rack->r_ctl.rack_per_of_gp_rec); 2065 } 2066 } 2067 configured_rate: 2068 /* For the configured rate we look at our cwnd vs the ssthresh */ 2069 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 2070 return (rack->r_ctl.rack_per_of_gp_ss); 2071 else 2072 return (rack->r_ctl.rack_per_of_gp_ca); 2073 } 2074 2075 static void 2076 rack_log_hdwr_pacing(struct tcp_rack *rack, 2077 uint64_t rate, uint64_t hw_rate, int line, 2078 int error, uint16_t mod) 2079 { 2080 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2081 union tcp_log_stackspecific log; 2082 struct timeval tv; 2083 const struct ifnet *ifp; 2084 2085 memset(&log, 0, sizeof(log)); 2086 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2087 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2088 if (rack->r_ctl.crte) { 2089 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2090 } else if (rack->rc_inp->inp_route.ro_nh && 2091 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2092 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2093 } else 2094 ifp = NULL; 2095 if (ifp) { 2096 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2097 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2098 } 2099 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2100 log.u_bbr.bw_inuse = rate; 2101 log.u_bbr.flex5 = line; 2102 log.u_bbr.flex6 = error; 2103 log.u_bbr.flex7 = mod; 2104 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2105 log.u_bbr.flex8 = rack->use_fixed_rate; 2106 log.u_bbr.flex8 <<= 1; 2107 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2108 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2109 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2110 if (rack->r_ctl.crte) 2111 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2112 else 2113 log.u_bbr.cur_del_rate = 0; 2114 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2115 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2116 &rack->rc_inp->inp_socket->so_rcv, 2117 &rack->rc_inp->inp_socket->so_snd, 2118 BBR_LOG_HDWR_PACE, 0, 2119 0, &log, false, &tv); 2120 } 2121 } 2122 2123 static uint64_t 2124 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2125 { 2126 /* 2127 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2128 */ 2129 uint64_t bw_est, high_rate; 2130 uint64_t gain; 2131 2132 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2133 bw_est = bw * gain; 2134 bw_est /= (uint64_t)100; 2135 /* Never fall below the minimum (def 64kbps) */ 2136 if (bw_est < RACK_MIN_BW) 2137 bw_est = RACK_MIN_BW; 2138 if (rack->r_rack_hw_rate_caps) { 2139 /* Rate caps are in place */ 2140 if (rack->r_ctl.crte != NULL) { 2141 /* We have a hdwr rate already */ 2142 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2143 if (bw_est >= high_rate) { 2144 /* We are capping bw at the highest rate table entry */ 2145 rack_log_hdwr_pacing(rack, 2146 bw_est, high_rate, __LINE__, 2147 0, 3); 2148 bw_est = high_rate; 2149 if (capped) 2150 *capped = 1; 2151 } 2152 } else if ((rack->rack_hdrw_pacing == 0) && 2153 (rack->rack_hdw_pace_ena) && 2154 (rack->rack_attempt_hdwr_pace == 0) && 2155 (rack->rc_inp->inp_route.ro_nh != NULL) && 2156 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2157 /* 2158 * Special case, we have not yet attempted hardware 2159 * pacing, and yet we may, when we do, find out if we are 2160 * above the highest rate. We need to know the maxbw for the interface 2161 * in question (if it supports ratelimiting). We get back 2162 * a 0, if the interface is not found in the RL lists. 2163 */ 2164 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2165 if (high_rate) { 2166 /* Yep, we have a rate is it above this rate? */ 2167 if (bw_est > high_rate) { 2168 bw_est = high_rate; 2169 if (capped) 2170 *capped = 1; 2171 } 2172 } 2173 } 2174 } 2175 return (bw_est); 2176 } 2177 2178 static void 2179 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2180 { 2181 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2182 union tcp_log_stackspecific log; 2183 struct timeval tv; 2184 2185 if ((mod != 1) && (rack_verbose_logging == 0)) { 2186 /* 2187 * We get 3 values currently for mod 2188 * 1 - We are retransmitting and this tells the reason. 2189 * 2 - We are clearing a dup-ack count. 2190 * 3 - We are incrementing a dup-ack count. 2191 * 2192 * The clear/increment are only logged 2193 * if you have BBverbose on. 2194 */ 2195 return; 2196 } 2197 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2198 log.u_bbr.flex1 = tsused; 2199 log.u_bbr.flex2 = thresh; 2200 log.u_bbr.flex3 = rsm->r_flags; 2201 log.u_bbr.flex4 = rsm->r_dupack; 2202 log.u_bbr.flex5 = rsm->r_start; 2203 log.u_bbr.flex6 = rsm->r_end; 2204 log.u_bbr.flex8 = mod; 2205 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2206 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2207 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2208 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2209 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2210 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2211 log.u_bbr.pacing_gain = rack->r_must_retran; 2212 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2213 &rack->rc_inp->inp_socket->so_rcv, 2214 &rack->rc_inp->inp_socket->so_snd, 2215 BBR_LOG_SETTINGS_CHG, 0, 2216 0, &log, false, &tv); 2217 } 2218 } 2219 2220 static void 2221 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2222 { 2223 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2224 union tcp_log_stackspecific log; 2225 struct timeval tv; 2226 2227 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2228 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2229 log.u_bbr.flex2 = to; 2230 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2231 log.u_bbr.flex4 = slot; 2232 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2233 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2234 log.u_bbr.flex7 = rack->rc_in_persist; 2235 log.u_bbr.flex8 = which; 2236 if (rack->rack_no_prr) 2237 log.u_bbr.pkts_out = 0; 2238 else 2239 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2240 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2241 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2242 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2243 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2244 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2245 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2246 log.u_bbr.pacing_gain = rack->r_must_retran; 2247 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2248 log.u_bbr.lost = rack_rto_min; 2249 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2250 &rack->rc_inp->inp_socket->so_rcv, 2251 &rack->rc_inp->inp_socket->so_snd, 2252 BBR_LOG_TIMERSTAR, 0, 2253 0, &log, false, &tv); 2254 } 2255 } 2256 2257 static void 2258 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2259 { 2260 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2261 union tcp_log_stackspecific log; 2262 struct timeval tv; 2263 2264 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2265 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2266 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2267 log.u_bbr.flex8 = to_num; 2268 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2269 log.u_bbr.flex2 = rack->rc_rack_rtt; 2270 if (rsm == NULL) 2271 log.u_bbr.flex3 = 0; 2272 else 2273 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2274 if (rack->rack_no_prr) 2275 log.u_bbr.flex5 = 0; 2276 else 2277 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2278 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2279 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2280 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2281 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2282 log.u_bbr.pacing_gain = rack->r_must_retran; 2283 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2284 &rack->rc_inp->inp_socket->so_rcv, 2285 &rack->rc_inp->inp_socket->so_snd, 2286 BBR_LOG_RTO, 0, 2287 0, &log, false, &tv); 2288 } 2289 } 2290 2291 static void 2292 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2293 struct rack_sendmap *prev, 2294 struct rack_sendmap *rsm, 2295 struct rack_sendmap *next, 2296 int flag, uint32_t th_ack, int line) 2297 { 2298 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2299 union tcp_log_stackspecific log; 2300 struct timeval tv; 2301 2302 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2303 log.u_bbr.flex8 = flag; 2304 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2305 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2306 log.u_bbr.cur_del_rate = (uint64_t)prev; 2307 log.u_bbr.delRate = (uint64_t)rsm; 2308 log.u_bbr.rttProp = (uint64_t)next; 2309 log.u_bbr.flex7 = 0; 2310 if (prev) { 2311 log.u_bbr.flex1 = prev->r_start; 2312 log.u_bbr.flex2 = prev->r_end; 2313 log.u_bbr.flex7 |= 0x4; 2314 } 2315 if (rsm) { 2316 log.u_bbr.flex3 = rsm->r_start; 2317 log.u_bbr.flex4 = rsm->r_end; 2318 log.u_bbr.flex7 |= 0x2; 2319 } 2320 if (next) { 2321 log.u_bbr.flex5 = next->r_start; 2322 log.u_bbr.flex6 = next->r_end; 2323 log.u_bbr.flex7 |= 0x1; 2324 } 2325 log.u_bbr.applimited = line; 2326 log.u_bbr.pkts_out = th_ack; 2327 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2328 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2329 if (rack->rack_no_prr) 2330 log.u_bbr.lost = 0; 2331 else 2332 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2333 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2334 &rack->rc_inp->inp_socket->so_rcv, 2335 &rack->rc_inp->inp_socket->so_snd, 2336 TCP_LOG_MAPCHG, 0, 2337 0, &log, false, &tv); 2338 } 2339 } 2340 2341 static void 2342 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2343 struct rack_sendmap *rsm, int conf) 2344 { 2345 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2346 union tcp_log_stackspecific log; 2347 struct timeval tv; 2348 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2349 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2350 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2351 log.u_bbr.flex1 = t; 2352 log.u_bbr.flex2 = len; 2353 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2354 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2355 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2356 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2357 log.u_bbr.flex7 = conf; 2358 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2359 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2360 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2361 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2362 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2363 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2364 if (rsm) { 2365 log.u_bbr.pkt_epoch = rsm->r_start; 2366 log.u_bbr.lost = rsm->r_end; 2367 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2368 log.u_bbr.pacing_gain = rsm->r_flags; 2369 } else { 2370 /* Its a SYN */ 2371 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2372 log.u_bbr.lost = 0; 2373 log.u_bbr.cwnd_gain = 0; 2374 log.u_bbr.pacing_gain = 0; 2375 } 2376 /* Write out general bits of interest rrs here */ 2377 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2378 log.u_bbr.use_lt_bw <<= 1; 2379 log.u_bbr.use_lt_bw |= rack->forced_ack; 2380 log.u_bbr.use_lt_bw <<= 1; 2381 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2382 log.u_bbr.use_lt_bw <<= 1; 2383 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2384 log.u_bbr.use_lt_bw <<= 1; 2385 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2386 log.u_bbr.use_lt_bw <<= 1; 2387 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2388 log.u_bbr.use_lt_bw <<= 1; 2389 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2390 log.u_bbr.use_lt_bw <<= 1; 2391 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2392 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2393 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2394 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2395 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2396 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2397 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2398 log.u_bbr.bw_inuse <<= 32; 2399 if (rsm) 2400 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2401 TCP_LOG_EVENTP(tp, NULL, 2402 &rack->rc_inp->inp_socket->so_rcv, 2403 &rack->rc_inp->inp_socket->so_snd, 2404 BBR_LOG_BBRRTT, 0, 2405 0, &log, false, &tv); 2406 2407 2408 } 2409 } 2410 2411 static void 2412 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2413 { 2414 /* 2415 * Log the rtt sample we are 2416 * applying to the srtt algorithm in 2417 * useconds. 2418 */ 2419 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2420 union tcp_log_stackspecific log; 2421 struct timeval tv; 2422 2423 /* Convert our ms to a microsecond */ 2424 memset(&log, 0, sizeof(log)); 2425 log.u_bbr.flex1 = rtt; 2426 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2427 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2428 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2429 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2430 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2431 log.u_bbr.flex7 = 1; 2432 log.u_bbr.flex8 = rack->sack_attack_disable; 2433 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2434 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2435 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2436 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2437 log.u_bbr.pacing_gain = rack->r_must_retran; 2438 /* 2439 * We capture in delRate the upper 32 bits as 2440 * the confidence level we had declared, and the 2441 * lower 32 bits as the actual RTT using the arrival 2442 * timestamp. 2443 */ 2444 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2445 log.u_bbr.delRate <<= 32; 2446 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2447 /* Lets capture all the things that make up t_rtxcur */ 2448 log.u_bbr.applimited = rack_rto_min; 2449 log.u_bbr.epoch = rack_rto_max; 2450 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2451 log.u_bbr.lost = rack_rto_min; 2452 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2453 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2454 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2455 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2456 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2457 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2458 &rack->rc_inp->inp_socket->so_rcv, 2459 &rack->rc_inp->inp_socket->so_snd, 2460 TCP_LOG_RTT, 0, 2461 0, &log, false, &tv); 2462 } 2463 } 2464 2465 static void 2466 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2467 { 2468 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2469 union tcp_log_stackspecific log; 2470 struct timeval tv; 2471 2472 /* Convert our ms to a microsecond */ 2473 memset(&log, 0, sizeof(log)); 2474 log.u_bbr.flex1 = rtt; 2475 log.u_bbr.flex2 = send_time; 2476 log.u_bbr.flex3 = ack_time; 2477 log.u_bbr.flex4 = where; 2478 log.u_bbr.flex7 = 2; 2479 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2480 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2481 &rack->rc_inp->inp_socket->so_rcv, 2482 &rack->rc_inp->inp_socket->so_snd, 2483 TCP_LOG_RTT, 0, 2484 0, &log, false, &tv); 2485 } 2486 } 2487 2488 2489 2490 static inline void 2491 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2492 { 2493 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2494 union tcp_log_stackspecific log; 2495 struct timeval tv; 2496 2497 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2498 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2499 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2500 log.u_bbr.flex1 = line; 2501 log.u_bbr.flex2 = tick; 2502 log.u_bbr.flex3 = tp->t_maxunacktime; 2503 log.u_bbr.flex4 = tp->t_acktime; 2504 log.u_bbr.flex8 = event; 2505 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2506 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2507 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2508 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2509 log.u_bbr.pacing_gain = rack->r_must_retran; 2510 TCP_LOG_EVENTP(tp, NULL, 2511 &rack->rc_inp->inp_socket->so_rcv, 2512 &rack->rc_inp->inp_socket->so_snd, 2513 BBR_LOG_PROGRESS, 0, 2514 0, &log, false, &tv); 2515 } 2516 } 2517 2518 static void 2519 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2520 { 2521 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2522 union tcp_log_stackspecific log; 2523 2524 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2525 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2526 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2527 log.u_bbr.flex1 = slot; 2528 if (rack->rack_no_prr) 2529 log.u_bbr.flex2 = 0; 2530 else 2531 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2532 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2533 log.u_bbr.flex8 = rack->rc_in_persist; 2534 log.u_bbr.timeStamp = cts; 2535 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2536 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2537 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2538 log.u_bbr.pacing_gain = rack->r_must_retran; 2539 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2540 &rack->rc_inp->inp_socket->so_rcv, 2541 &rack->rc_inp->inp_socket->so_snd, 2542 BBR_LOG_BBRSND, 0, 2543 0, &log, false, tv); 2544 } 2545 } 2546 2547 static void 2548 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2549 { 2550 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2551 union tcp_log_stackspecific log; 2552 struct timeval tv; 2553 2554 memset(&log, 0, sizeof(log)); 2555 log.u_bbr.flex1 = did_out; 2556 log.u_bbr.flex2 = nxt_pkt; 2557 log.u_bbr.flex3 = way_out; 2558 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2559 if (rack->rack_no_prr) 2560 log.u_bbr.flex5 = 0; 2561 else 2562 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2563 log.u_bbr.flex6 = nsegs; 2564 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2565 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2566 log.u_bbr.flex7 <<= 1; 2567 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2568 log.u_bbr.flex7 <<= 1; 2569 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2570 log.u_bbr.flex8 = rack->rc_in_persist; 2571 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2572 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2573 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2574 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2575 log.u_bbr.use_lt_bw <<= 1; 2576 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2577 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2578 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2579 log.u_bbr.pacing_gain = rack->r_must_retran; 2580 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2581 &rack->rc_inp->inp_socket->so_rcv, 2582 &rack->rc_inp->inp_socket->so_snd, 2583 BBR_LOG_DOSEG_DONE, 0, 2584 0, &log, false, &tv); 2585 } 2586 } 2587 2588 static void 2589 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2590 { 2591 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2592 union tcp_log_stackspecific log; 2593 struct timeval tv; 2594 uint32_t cts; 2595 2596 memset(&log, 0, sizeof(log)); 2597 cts = tcp_get_usecs(&tv); 2598 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2599 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2600 log.u_bbr.flex4 = arg1; 2601 log.u_bbr.flex5 = arg2; 2602 log.u_bbr.flex6 = arg3; 2603 log.u_bbr.flex8 = frm; 2604 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2605 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2606 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2607 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2608 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2609 log.u_bbr.pacing_gain = rack->r_must_retran; 2610 TCP_LOG_EVENTP(tp, NULL, 2611 &tp->t_inpcb->inp_socket->so_rcv, 2612 &tp->t_inpcb->inp_socket->so_snd, 2613 TCP_HDWR_PACE_SIZE, 0, 2614 0, &log, false, &tv); 2615 } 2616 } 2617 2618 static void 2619 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2620 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2621 { 2622 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2623 union tcp_log_stackspecific log; 2624 struct timeval tv; 2625 2626 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2627 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2628 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2629 log.u_bbr.flex1 = slot; 2630 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2631 log.u_bbr.flex4 = reason; 2632 if (rack->rack_no_prr) 2633 log.u_bbr.flex5 = 0; 2634 else 2635 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2636 log.u_bbr.flex7 = hpts_calling; 2637 log.u_bbr.flex8 = rack->rc_in_persist; 2638 log.u_bbr.lt_epoch = cwnd_to_use; 2639 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2640 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2641 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2642 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2643 log.u_bbr.pacing_gain = rack->r_must_retran; 2644 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2645 &rack->rc_inp->inp_socket->so_rcv, 2646 &rack->rc_inp->inp_socket->so_snd, 2647 BBR_LOG_JUSTRET, 0, 2648 tlen, &log, false, &tv); 2649 } 2650 } 2651 2652 static void 2653 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2654 struct timeval *tv, uint32_t flags_on_entry) 2655 { 2656 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2657 union tcp_log_stackspecific log; 2658 2659 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2660 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 2661 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 2662 log.u_bbr.flex1 = line; 2663 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2664 log.u_bbr.flex3 = flags_on_entry; 2665 log.u_bbr.flex4 = us_cts; 2666 if (rack->rack_no_prr) 2667 log.u_bbr.flex5 = 0; 2668 else 2669 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2670 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2671 log.u_bbr.flex7 = hpts_removed; 2672 log.u_bbr.flex8 = 1; 2673 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2674 log.u_bbr.timeStamp = us_cts; 2675 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2676 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2677 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2678 log.u_bbr.pacing_gain = rack->r_must_retran; 2679 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2680 &rack->rc_inp->inp_socket->so_rcv, 2681 &rack->rc_inp->inp_socket->so_snd, 2682 BBR_LOG_TIMERCANC, 0, 2683 0, &log, false, tv); 2684 } 2685 } 2686 2687 static void 2688 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2689 uint32_t flex1, uint32_t flex2, 2690 uint32_t flex3, uint32_t flex4, 2691 uint32_t flex5, uint32_t flex6, 2692 uint16_t flex7, uint8_t mod) 2693 { 2694 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2695 union tcp_log_stackspecific log; 2696 struct timeval tv; 2697 2698 if (mod == 1) { 2699 /* No you can't use 1, its for the real to cancel */ 2700 return; 2701 } 2702 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2703 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2704 log.u_bbr.flex1 = flex1; 2705 log.u_bbr.flex2 = flex2; 2706 log.u_bbr.flex3 = flex3; 2707 log.u_bbr.flex4 = flex4; 2708 log.u_bbr.flex5 = flex5; 2709 log.u_bbr.flex6 = flex6; 2710 log.u_bbr.flex7 = flex7; 2711 log.u_bbr.flex8 = mod; 2712 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2713 &rack->rc_inp->inp_socket->so_rcv, 2714 &rack->rc_inp->inp_socket->so_snd, 2715 BBR_LOG_TIMERCANC, 0, 2716 0, &log, false, &tv); 2717 } 2718 } 2719 2720 static void 2721 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2722 { 2723 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2724 union tcp_log_stackspecific log; 2725 struct timeval tv; 2726 2727 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2728 log.u_bbr.flex1 = timers; 2729 log.u_bbr.flex2 = ret; 2730 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2731 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2732 log.u_bbr.flex5 = cts; 2733 if (rack->rack_no_prr) 2734 log.u_bbr.flex6 = 0; 2735 else 2736 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2737 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2738 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2739 log.u_bbr.pacing_gain = rack->r_must_retran; 2740 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2741 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2742 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2743 &rack->rc_inp->inp_socket->so_rcv, 2744 &rack->rc_inp->inp_socket->so_snd, 2745 BBR_LOG_TO_PROCESS, 0, 2746 0, &log, false, &tv); 2747 } 2748 } 2749 2750 static void 2751 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd) 2752 { 2753 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2754 union tcp_log_stackspecific log; 2755 struct timeval tv; 2756 2757 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2758 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2759 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2760 if (rack->rack_no_prr) 2761 log.u_bbr.flex3 = 0; 2762 else 2763 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2764 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2765 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2766 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2767 log.u_bbr.flex8 = frm; 2768 log.u_bbr.pkts_out = orig_cwnd; 2769 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2770 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2771 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2772 log.u_bbr.use_lt_bw <<= 1; 2773 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2774 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2775 &rack->rc_inp->inp_socket->so_rcv, 2776 &rack->rc_inp->inp_socket->so_snd, 2777 BBR_LOG_BBRUPD, 0, 2778 0, &log, false, &tv); 2779 } 2780 } 2781 2782 #ifdef NETFLIX_EXP_DETECTION 2783 static void 2784 rack_log_sad(struct tcp_rack *rack, int event) 2785 { 2786 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2787 union tcp_log_stackspecific log; 2788 struct timeval tv; 2789 2790 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2791 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2792 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2793 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2794 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2795 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2796 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2797 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2798 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2799 log.u_bbr.lt_epoch |= rack->do_detection; 2800 log.u_bbr.applimited = tcp_map_minimum; 2801 log.u_bbr.flex7 = rack->sack_attack_disable; 2802 log.u_bbr.flex8 = event; 2803 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2804 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2805 log.u_bbr.delivered = tcp_sad_decay_val; 2806 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2807 &rack->rc_inp->inp_socket->so_rcv, 2808 &rack->rc_inp->inp_socket->so_snd, 2809 TCP_SAD_DETECTION, 0, 2810 0, &log, false, &tv); 2811 } 2812 } 2813 #endif 2814 2815 static void 2816 rack_counter_destroy(void) 2817 { 2818 int i; 2819 2820 counter_u64_free(rack_fto_send); 2821 counter_u64_free(rack_fto_rsm_send); 2822 counter_u64_free(rack_nfto_resend); 2823 counter_u64_free(rack_hw_pace_init_fail); 2824 counter_u64_free(rack_hw_pace_lost); 2825 counter_u64_free(rack_non_fto_send); 2826 counter_u64_free(rack_extended_rfo); 2827 counter_u64_free(rack_ack_total); 2828 counter_u64_free(rack_express_sack); 2829 counter_u64_free(rack_sack_total); 2830 counter_u64_free(rack_move_none); 2831 counter_u64_free(rack_move_some); 2832 counter_u64_free(rack_sack_attacks_detected); 2833 counter_u64_free(rack_sack_attacks_reversed); 2834 counter_u64_free(rack_sack_used_next_merge); 2835 counter_u64_free(rack_sack_used_prev_merge); 2836 counter_u64_free(rack_badfr); 2837 counter_u64_free(rack_badfr_bytes); 2838 counter_u64_free(rack_rtm_prr_retran); 2839 counter_u64_free(rack_rtm_prr_newdata); 2840 counter_u64_free(rack_timestamp_mismatch); 2841 counter_u64_free(rack_find_high); 2842 counter_u64_free(rack_reorder_seen); 2843 counter_u64_free(rack_tlp_tot); 2844 counter_u64_free(rack_tlp_newdata); 2845 counter_u64_free(rack_tlp_retran); 2846 counter_u64_free(rack_tlp_retran_bytes); 2847 counter_u64_free(rack_tlp_retran_fail); 2848 counter_u64_free(rack_to_tot); 2849 counter_u64_free(rack_to_arm_rack); 2850 counter_u64_free(rack_to_arm_tlp); 2851 counter_u64_free(rack_calc_zero); 2852 counter_u64_free(rack_calc_nonzero); 2853 counter_u64_free(rack_paced_segments); 2854 counter_u64_free(rack_unpaced_segments); 2855 counter_u64_free(rack_saw_enobuf); 2856 counter_u64_free(rack_saw_enobuf_hw); 2857 counter_u64_free(rack_saw_enetunreach); 2858 counter_u64_free(rack_hot_alloc); 2859 counter_u64_free(rack_to_alloc); 2860 counter_u64_free(rack_to_alloc_hard); 2861 counter_u64_free(rack_to_alloc_emerg); 2862 counter_u64_free(rack_to_alloc_limited); 2863 counter_u64_free(rack_alloc_limited_conns); 2864 counter_u64_free(rack_split_limited); 2865 for (i = 0; i < MAX_NUM_OF_CNTS; i++) { 2866 counter_u64_free(rack_proc_comp_ack[i]); 2867 } 2868 counter_u64_free(rack_multi_single_eq); 2869 counter_u64_free(rack_proc_non_comp_ack); 2870 counter_u64_free(rack_sack_proc_all); 2871 counter_u64_free(rack_sack_proc_restart); 2872 counter_u64_free(rack_sack_proc_short); 2873 counter_u64_free(rack_enter_tlp_calc); 2874 counter_u64_free(rack_used_tlpmethod); 2875 counter_u64_free(rack_used_tlpmethod2); 2876 counter_u64_free(rack_sack_skipped_acked); 2877 counter_u64_free(rack_sack_splits); 2878 counter_u64_free(rack_progress_drops); 2879 counter_u64_free(rack_input_idle_reduces); 2880 counter_u64_free(rack_collapsed_win); 2881 counter_u64_free(rack_tlp_does_nada); 2882 counter_u64_free(rack_try_scwnd); 2883 counter_u64_free(rack_per_timer_hole); 2884 counter_u64_free(rack_large_ackcmp); 2885 counter_u64_free(rack_small_ackcmp); 2886 #ifdef INVARIANTS 2887 counter_u64_free(rack_adjust_map_bw); 2888 #endif 2889 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2890 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2891 } 2892 2893 static struct rack_sendmap * 2894 rack_alloc(struct tcp_rack *rack) 2895 { 2896 struct rack_sendmap *rsm; 2897 2898 /* 2899 * First get the top of the list it in 2900 * theory is the "hottest" rsm we have, 2901 * possibly just freed by ack processing. 2902 */ 2903 if (rack->rc_free_cnt > rack_free_cache) { 2904 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2905 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2906 counter_u64_add(rack_hot_alloc, 1); 2907 rack->rc_free_cnt--; 2908 return (rsm); 2909 } 2910 /* 2911 * Once we get under our free cache we probably 2912 * no longer have a "hot" one available. Lets 2913 * get one from UMA. 2914 */ 2915 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2916 if (rsm) { 2917 rack->r_ctl.rc_num_maps_alloced++; 2918 counter_u64_add(rack_to_alloc, 1); 2919 return (rsm); 2920 } 2921 /* 2922 * Dig in to our aux rsm's (the last two) since 2923 * UMA failed to get us one. 2924 */ 2925 if (rack->rc_free_cnt) { 2926 counter_u64_add(rack_to_alloc_emerg, 1); 2927 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2928 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2929 rack->rc_free_cnt--; 2930 return (rsm); 2931 } 2932 return (NULL); 2933 } 2934 2935 static struct rack_sendmap * 2936 rack_alloc_full_limit(struct tcp_rack *rack) 2937 { 2938 if ((V_tcp_map_entries_limit > 0) && 2939 (rack->do_detection == 0) && 2940 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2941 counter_u64_add(rack_to_alloc_limited, 1); 2942 if (!rack->alloc_limit_reported) { 2943 rack->alloc_limit_reported = 1; 2944 counter_u64_add(rack_alloc_limited_conns, 1); 2945 } 2946 return (NULL); 2947 } 2948 return (rack_alloc(rack)); 2949 } 2950 2951 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2952 static struct rack_sendmap * 2953 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2954 { 2955 struct rack_sendmap *rsm; 2956 2957 if (limit_type) { 2958 /* currently there is only one limit type */ 2959 if (V_tcp_map_split_limit > 0 && 2960 (rack->do_detection == 0) && 2961 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2962 counter_u64_add(rack_split_limited, 1); 2963 if (!rack->alloc_limit_reported) { 2964 rack->alloc_limit_reported = 1; 2965 counter_u64_add(rack_alloc_limited_conns, 1); 2966 } 2967 return (NULL); 2968 } 2969 } 2970 2971 /* allocate and mark in the limit type, if set */ 2972 rsm = rack_alloc(rack); 2973 if (rsm != NULL && limit_type) { 2974 rsm->r_limit_type = limit_type; 2975 rack->r_ctl.rc_num_split_allocs++; 2976 } 2977 return (rsm); 2978 } 2979 2980 static void 2981 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2982 { 2983 if (rsm->r_flags & RACK_APP_LIMITED) { 2984 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2985 rack->r_ctl.rc_app_limited_cnt--; 2986 } 2987 } 2988 if (rsm->r_limit_type) { 2989 /* currently there is only one limit type */ 2990 rack->r_ctl.rc_num_split_allocs--; 2991 } 2992 if (rsm == rack->r_ctl.rc_first_appl) { 2993 if (rack->r_ctl.rc_app_limited_cnt == 0) 2994 rack->r_ctl.rc_first_appl = NULL; 2995 else { 2996 /* Follow the next one out */ 2997 struct rack_sendmap fe; 2998 2999 fe.r_start = rsm->r_nseq_appl; 3000 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 3001 } 3002 } 3003 if (rsm == rack->r_ctl.rc_resend) 3004 rack->r_ctl.rc_resend = NULL; 3005 if (rsm == rack->r_ctl.rc_rsm_at_retran) 3006 rack->r_ctl.rc_rsm_at_retran = NULL; 3007 if (rsm == rack->r_ctl.rc_end_appl) 3008 rack->r_ctl.rc_end_appl = NULL; 3009 if (rack->r_ctl.rc_tlpsend == rsm) 3010 rack->r_ctl.rc_tlpsend = NULL; 3011 if (rack->r_ctl.rc_sacklast == rsm) 3012 rack->r_ctl.rc_sacklast = NULL; 3013 memset(rsm, 0, sizeof(struct rack_sendmap)); 3014 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 3015 rack->rc_free_cnt++; 3016 } 3017 3018 static void 3019 rack_free_trim(struct tcp_rack *rack) 3020 { 3021 struct rack_sendmap *rsm; 3022 3023 /* 3024 * Free up all the tail entries until 3025 * we get our list down to the limit. 3026 */ 3027 while (rack->rc_free_cnt > rack_free_cache) { 3028 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 3029 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 3030 rack->rc_free_cnt--; 3031 uma_zfree(rack_zone, rsm); 3032 } 3033 } 3034 3035 3036 static uint32_t 3037 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 3038 { 3039 uint64_t srtt, bw, len, tim; 3040 uint32_t segsiz, def_len, minl; 3041 3042 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3043 def_len = rack_def_data_window * segsiz; 3044 if (rack->rc_gp_filled == 0) { 3045 /* 3046 * We have no measurement (IW is in flight?) so 3047 * we can only guess using our data_window sysctl 3048 * value (usually 100MSS). 3049 */ 3050 return (def_len); 3051 } 3052 /* 3053 * Now we have a number of factors to consider. 3054 * 3055 * 1) We have a desired BDP which is usually 3056 * at least 2. 3057 * 2) We have a minimum number of rtt's usually 1 SRTT 3058 * but we allow it too to be more. 3059 * 3) We want to make sure a measurement last N useconds (if 3060 * we have set rack_min_measure_usec. 3061 * 3062 * We handle the first concern here by trying to create a data 3063 * window of max(rack_def_data_window, DesiredBDP). The 3064 * second concern we handle in not letting the measurement 3065 * window end normally until at least the required SRTT's 3066 * have gone by which is done further below in 3067 * rack_enough_for_measurement(). Finally the third concern 3068 * we also handle here by calculating how long that time 3069 * would take at the current BW and then return the 3070 * max of our first calculation and that length. Note 3071 * that if rack_min_measure_usec is 0, we don't deal 3072 * with concern 3. Also for both Concern 1 and 3 an 3073 * application limited period could end the measurement 3074 * earlier. 3075 * 3076 * So lets calculate the BDP with the "known" b/w using 3077 * the SRTT has our rtt and then multiply it by the 3078 * goal. 3079 */ 3080 bw = rack_get_bw(rack); 3081 srtt = (uint64_t)tp->t_srtt; 3082 len = bw * srtt; 3083 len /= (uint64_t)HPTS_USEC_IN_SEC; 3084 len *= max(1, rack_goal_bdp); 3085 /* Now we need to round up to the nearest MSS */ 3086 len = roundup(len, segsiz); 3087 if (rack_min_measure_usec) { 3088 /* Now calculate our min length for this b/w */ 3089 tim = rack_min_measure_usec; 3090 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3091 if (minl == 0) 3092 minl = 1; 3093 minl = roundup(minl, segsiz); 3094 if (len < minl) 3095 len = minl; 3096 } 3097 /* 3098 * Now if we have a very small window we want 3099 * to attempt to get the window that is 3100 * as small as possible. This happens on 3101 * low b/w connections and we don't want to 3102 * span huge numbers of rtt's between measurements. 3103 * 3104 * We basically include 2 over our "MIN window" so 3105 * that the measurement can be shortened (possibly) by 3106 * an ack'ed packet. 3107 */ 3108 if (len < def_len) 3109 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3110 else 3111 return (max((uint32_t)len, def_len)); 3112 3113 } 3114 3115 static int 3116 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack) 3117 { 3118 uint32_t tim, srtts, segsiz; 3119 3120 /* 3121 * Has enough time passed for the GP measurement to be valid? 3122 */ 3123 if ((tp->snd_max == tp->snd_una) || 3124 (th_ack == tp->snd_max)){ 3125 /* All is acked */ 3126 return (1); 3127 } 3128 if (SEQ_LT(th_ack, tp->gput_seq)) { 3129 /* Not enough bytes yet */ 3130 return (0); 3131 } 3132 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3133 if (SEQ_LT(th_ack, tp->gput_ack) && 3134 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3135 /* Not enough bytes yet */ 3136 return (0); 3137 } 3138 if (rack->r_ctl.rc_first_appl && 3139 (rack->r_ctl.rc_first_appl->r_start == th_ack)) { 3140 /* 3141 * We are up to the app limited point 3142 * we have to measure irrespective of the time.. 3143 */ 3144 return (1); 3145 } 3146 /* Now what about time? */ 3147 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3148 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3149 if (tim >= srtts) { 3150 return (1); 3151 } 3152 /* Nope not even a full SRTT has passed */ 3153 return (0); 3154 } 3155 3156 static void 3157 rack_log_timely(struct tcp_rack *rack, 3158 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3159 uint64_t up_bnd, int line, uint8_t method) 3160 { 3161 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3162 union tcp_log_stackspecific log; 3163 struct timeval tv; 3164 3165 memset(&log, 0, sizeof(log)); 3166 log.u_bbr.flex1 = logged; 3167 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3168 log.u_bbr.flex2 <<= 4; 3169 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3170 log.u_bbr.flex2 <<= 4; 3171 log.u_bbr.flex2 |= rack->rc_gp_incr; 3172 log.u_bbr.flex2 <<= 4; 3173 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3174 log.u_bbr.flex3 = rack->rc_gp_incr; 3175 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3176 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3177 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3178 log.u_bbr.flex7 = rack->rc_gp_bwred; 3179 log.u_bbr.flex8 = method; 3180 log.u_bbr.cur_del_rate = cur_bw; 3181 log.u_bbr.delRate = low_bnd; 3182 log.u_bbr.bw_inuse = up_bnd; 3183 log.u_bbr.rttProp = rack_get_bw(rack); 3184 log.u_bbr.pkt_epoch = line; 3185 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3186 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3187 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3188 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3189 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3190 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3191 log.u_bbr.cwnd_gain <<= 1; 3192 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3193 log.u_bbr.cwnd_gain <<= 1; 3194 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3195 log.u_bbr.cwnd_gain <<= 1; 3196 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3197 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3198 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3199 &rack->rc_inp->inp_socket->so_rcv, 3200 &rack->rc_inp->inp_socket->so_snd, 3201 TCP_TIMELY_WORK, 0, 3202 0, &log, false, &tv); 3203 } 3204 } 3205 3206 static int 3207 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3208 { 3209 /* 3210 * Before we increase we need to know if 3211 * the estimate just made was less than 3212 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3213 * 3214 * If we already are pacing at a fast enough 3215 * rate to push us faster there is no sense of 3216 * increasing. 3217 * 3218 * We first caculate our actual pacing rate (ss or ca multipler 3219 * times our cur_bw). 3220 * 3221 * Then we take the last measured rate and multipy by our 3222 * maximum pacing overage to give us a max allowable rate. 3223 * 3224 * If our act_rate is smaller than our max_allowable rate 3225 * then we should increase. Else we should hold steady. 3226 * 3227 */ 3228 uint64_t act_rate, max_allow_rate; 3229 3230 if (rack_timely_no_stopping) 3231 return (1); 3232 3233 if ((cur_bw == 0) || (last_bw_est == 0)) { 3234 /* 3235 * Initial startup case or 3236 * everything is acked case. 3237 */ 3238 rack_log_timely(rack, mult, cur_bw, 0, 0, 3239 __LINE__, 9); 3240 return (1); 3241 } 3242 if (mult <= 100) { 3243 /* 3244 * We can always pace at or slightly above our rate. 3245 */ 3246 rack_log_timely(rack, mult, cur_bw, 0, 0, 3247 __LINE__, 9); 3248 return (1); 3249 } 3250 act_rate = cur_bw * (uint64_t)mult; 3251 act_rate /= 100; 3252 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3253 max_allow_rate /= 100; 3254 if (act_rate < max_allow_rate) { 3255 /* 3256 * Here the rate we are actually pacing at 3257 * is smaller than 10% above our last measurement. 3258 * This means we are pacing below what we would 3259 * like to try to achieve (plus some wiggle room). 3260 */ 3261 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3262 __LINE__, 9); 3263 return (1); 3264 } else { 3265 /* 3266 * Here we are already pacing at least rack_max_per_above(10%) 3267 * what we are getting back. This indicates most likely 3268 * that we are being limited (cwnd/rwnd/app) and can't 3269 * get any more b/w. There is no sense of trying to 3270 * raise up the pacing rate its not speeding us up 3271 * and we already are pacing faster than we are getting. 3272 */ 3273 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3274 __LINE__, 8); 3275 return (0); 3276 } 3277 } 3278 3279 static void 3280 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3281 { 3282 /* 3283 * When we drag bottom, we want to assure 3284 * that no multiplier is below 1.0, if so 3285 * we want to restore it to at least that. 3286 */ 3287 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3288 /* This is unlikely we usually do not touch recovery */ 3289 rack->r_ctl.rack_per_of_gp_rec = 100; 3290 } 3291 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3292 rack->r_ctl.rack_per_of_gp_ca = 100; 3293 } 3294 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3295 rack->r_ctl.rack_per_of_gp_ss = 100; 3296 } 3297 } 3298 3299 static void 3300 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3301 { 3302 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3303 rack->r_ctl.rack_per_of_gp_ca = 100; 3304 } 3305 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3306 rack->r_ctl.rack_per_of_gp_ss = 100; 3307 } 3308 } 3309 3310 static void 3311 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3312 { 3313 int32_t calc, logged, plus; 3314 3315 logged = 0; 3316 3317 if (override) { 3318 /* 3319 * override is passed when we are 3320 * loosing b/w and making one last 3321 * gasp at trying to not loose out 3322 * to a new-reno flow. 3323 */ 3324 goto extra_boost; 3325 } 3326 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3327 if (rack->rc_gp_incr && 3328 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3329 /* 3330 * Reset and get 5 strokes more before the boost. Note 3331 * that the count is 0 based so we have to add one. 3332 */ 3333 extra_boost: 3334 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3335 rack->rc_gp_timely_inc_cnt = 0; 3336 } else 3337 plus = (uint32_t)rack_gp_increase_per; 3338 /* Must be at least 1% increase for true timely increases */ 3339 if ((plus < 1) && 3340 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3341 plus = 1; 3342 if (rack->rc_gp_saw_rec && 3343 (rack->rc_gp_no_rec_chg == 0) && 3344 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3345 rack->r_ctl.rack_per_of_gp_rec)) { 3346 /* We have been in recovery ding it too */ 3347 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3348 if (calc > 0xffff) 3349 calc = 0xffff; 3350 logged |= 1; 3351 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3352 if (rack_per_upper_bound_ss && 3353 (rack->rc_dragged_bottom == 0) && 3354 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3355 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3356 } 3357 if (rack->rc_gp_saw_ca && 3358 (rack->rc_gp_saw_ss == 0) && 3359 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3360 rack->r_ctl.rack_per_of_gp_ca)) { 3361 /* In CA */ 3362 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3363 if (calc > 0xffff) 3364 calc = 0xffff; 3365 logged |= 2; 3366 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3367 if (rack_per_upper_bound_ca && 3368 (rack->rc_dragged_bottom == 0) && 3369 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3370 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3371 } 3372 if (rack->rc_gp_saw_ss && 3373 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3374 rack->r_ctl.rack_per_of_gp_ss)) { 3375 /* In SS */ 3376 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3377 if (calc > 0xffff) 3378 calc = 0xffff; 3379 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3380 if (rack_per_upper_bound_ss && 3381 (rack->rc_dragged_bottom == 0) && 3382 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3383 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3384 logged |= 4; 3385 } 3386 if (logged && 3387 (rack->rc_gp_incr == 0)){ 3388 /* Go into increment mode */ 3389 rack->rc_gp_incr = 1; 3390 rack->rc_gp_timely_inc_cnt = 0; 3391 } 3392 if (rack->rc_gp_incr && 3393 logged && 3394 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3395 rack->rc_gp_timely_inc_cnt++; 3396 } 3397 rack_log_timely(rack, logged, plus, 0, 0, 3398 __LINE__, 1); 3399 } 3400 3401 static uint32_t 3402 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3403 { 3404 /* 3405 * norm_grad = rtt_diff / minrtt; 3406 * new_per = curper * (1 - B * norm_grad) 3407 * 3408 * B = rack_gp_decrease_per (default 10%) 3409 * rtt_dif = input var current rtt-diff 3410 * curper = input var current percentage 3411 * minrtt = from rack filter 3412 * 3413 */ 3414 uint64_t perf; 3415 3416 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3417 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3418 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3419 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3420 (uint64_t)1000000)) / 3421 (uint64_t)1000000); 3422 if (perf > curper) { 3423 /* TSNH */ 3424 perf = curper - 1; 3425 } 3426 return ((uint32_t)perf); 3427 } 3428 3429 static uint32_t 3430 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3431 { 3432 /* 3433 * highrttthresh 3434 * result = curper * (1 - (B * ( 1 - ------ )) 3435 * gp_srtt 3436 * 3437 * B = rack_gp_decrease_per (default 10%) 3438 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3439 */ 3440 uint64_t perf; 3441 uint32_t highrttthresh; 3442 3443 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3444 3445 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3446 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3447 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3448 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3449 return (perf); 3450 } 3451 3452 static void 3453 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3454 { 3455 uint64_t logvar, logvar2, logvar3; 3456 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3457 3458 if (rack->rc_gp_incr) { 3459 /* Turn off increment counting */ 3460 rack->rc_gp_incr = 0; 3461 rack->rc_gp_timely_inc_cnt = 0; 3462 } 3463 ss_red = ca_red = rec_red = 0; 3464 logged = 0; 3465 /* Calculate the reduction value */ 3466 if (rtt_diff < 0) { 3467 rtt_diff *= -1; 3468 } 3469 /* Must be at least 1% reduction */ 3470 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3471 /* We have been in recovery ding it too */ 3472 if (timely_says == 2) { 3473 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3474 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3475 if (alt < new_per) 3476 val = alt; 3477 else 3478 val = new_per; 3479 } else 3480 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3481 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3482 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3483 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3484 } else { 3485 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3486 rec_red = 0; 3487 } 3488 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3489 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3490 logged |= 1; 3491 } 3492 if (rack->rc_gp_saw_ss) { 3493 /* Sent in SS */ 3494 if (timely_says == 2) { 3495 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3496 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3497 if (alt < new_per) 3498 val = alt; 3499 else 3500 val = new_per; 3501 } else 3502 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3503 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3504 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3505 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3506 } else { 3507 ss_red = new_per; 3508 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3509 logvar = new_per; 3510 logvar <<= 32; 3511 logvar |= alt; 3512 logvar2 = (uint32_t)rtt; 3513 logvar2 <<= 32; 3514 logvar2 |= (uint32_t)rtt_diff; 3515 logvar3 = rack_gp_rtt_maxmul; 3516 logvar3 <<= 32; 3517 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3518 rack_log_timely(rack, timely_says, 3519 logvar2, logvar3, 3520 logvar, __LINE__, 10); 3521 } 3522 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3523 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3524 logged |= 4; 3525 } else if (rack->rc_gp_saw_ca) { 3526 /* Sent in CA */ 3527 if (timely_says == 2) { 3528 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3529 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3530 if (alt < new_per) 3531 val = alt; 3532 else 3533 val = new_per; 3534 } else 3535 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3536 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3537 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3538 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3539 } else { 3540 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3541 ca_red = 0; 3542 logvar = new_per; 3543 logvar <<= 32; 3544 logvar |= alt; 3545 logvar2 = (uint32_t)rtt; 3546 logvar2 <<= 32; 3547 logvar2 |= (uint32_t)rtt_diff; 3548 logvar3 = rack_gp_rtt_maxmul; 3549 logvar3 <<= 32; 3550 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3551 rack_log_timely(rack, timely_says, 3552 logvar2, logvar3, 3553 logvar, __LINE__, 10); 3554 } 3555 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3556 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3557 logged |= 2; 3558 } 3559 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3560 rack->rc_gp_timely_dec_cnt++; 3561 if (rack_timely_dec_clear && 3562 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3563 rack->rc_gp_timely_dec_cnt = 0; 3564 } 3565 logvar = ss_red; 3566 logvar <<= 32; 3567 logvar |= ca_red; 3568 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3569 __LINE__, 2); 3570 } 3571 3572 static void 3573 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3574 uint32_t rtt, uint32_t line, uint8_t reas) 3575 { 3576 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3577 union tcp_log_stackspecific log; 3578 struct timeval tv; 3579 3580 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3581 log.u_bbr.flex1 = line; 3582 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3583 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3584 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3585 log.u_bbr.flex5 = rtt; 3586 log.u_bbr.flex6 = rack->rc_highly_buffered; 3587 log.u_bbr.flex6 <<= 1; 3588 log.u_bbr.flex6 |= rack->forced_ack; 3589 log.u_bbr.flex6 <<= 1; 3590 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3591 log.u_bbr.flex6 <<= 1; 3592 log.u_bbr.flex6 |= rack->in_probe_rtt; 3593 log.u_bbr.flex6 <<= 1; 3594 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3595 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3596 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3597 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3598 log.u_bbr.flex8 = reas; 3599 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3600 log.u_bbr.delRate = rack_get_bw(rack); 3601 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3602 log.u_bbr.cur_del_rate <<= 32; 3603 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3604 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3605 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3606 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3607 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3608 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3609 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3610 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3611 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3612 log.u_bbr.rttProp = us_cts; 3613 log.u_bbr.rttProp <<= 32; 3614 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3615 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3616 &rack->rc_inp->inp_socket->so_rcv, 3617 &rack->rc_inp->inp_socket->so_snd, 3618 BBR_LOG_RTT_SHRINKS, 0, 3619 0, &log, false, &rack->r_ctl.act_rcv_time); 3620 } 3621 } 3622 3623 static void 3624 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3625 { 3626 uint64_t bwdp; 3627 3628 bwdp = rack_get_bw(rack); 3629 bwdp *= (uint64_t)rtt; 3630 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3631 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3632 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3633 /* 3634 * A window protocol must be able to have 4 packets 3635 * outstanding as the floor in order to function 3636 * (especially considering delayed ack :D). 3637 */ 3638 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3639 } 3640 } 3641 3642 static void 3643 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3644 { 3645 /** 3646 * ProbeRTT is a bit different in rack_pacing than in 3647 * BBR. It is like BBR in that it uses the lowering of 3648 * the RTT as a signal that we saw something new and 3649 * counts from there for how long between. But it is 3650 * different in that its quite simple. It does not 3651 * play with the cwnd and wait until we get down 3652 * to N segments outstanding and hold that for 3653 * 200ms. Instead it just sets the pacing reduction 3654 * rate to a set percentage (70 by default) and hold 3655 * that for a number of recent GP Srtt's. 3656 */ 3657 uint32_t segsiz; 3658 3659 if (rack->rc_gp_dyn_mul == 0) 3660 return; 3661 3662 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3663 /* We are idle */ 3664 return; 3665 } 3666 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3667 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3668 /* 3669 * Stop the goodput now, the idea here is 3670 * that future measurements with in_probe_rtt 3671 * won't register if they are not greater so 3672 * we want to get what info (if any) is available 3673 * now. 3674 */ 3675 rack_do_goodput_measurement(rack->rc_tp, rack, 3676 rack->rc_tp->snd_una, __LINE__); 3677 } 3678 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3679 rack->r_ctl.rc_time_probertt_entered = us_cts; 3680 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3681 rack->r_ctl.rc_pace_min_segs); 3682 rack->in_probe_rtt = 1; 3683 rack->measure_saw_probe_rtt = 1; 3684 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3685 rack->r_ctl.rc_time_probertt_starts = 0; 3686 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3687 if (rack_probertt_use_min_rtt_entry) 3688 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3689 else 3690 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3691 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3692 __LINE__, RACK_RTTS_ENTERPROBE); 3693 } 3694 3695 static void 3696 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3697 { 3698 struct rack_sendmap *rsm; 3699 uint32_t segsiz; 3700 3701 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3702 rack->r_ctl.rc_pace_min_segs); 3703 rack->in_probe_rtt = 0; 3704 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3705 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3706 /* 3707 * Stop the goodput now, the idea here is 3708 * that future measurements with in_probe_rtt 3709 * won't register if they are not greater so 3710 * we want to get what info (if any) is available 3711 * now. 3712 */ 3713 rack_do_goodput_measurement(rack->rc_tp, rack, 3714 rack->rc_tp->snd_una, __LINE__); 3715 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3716 /* 3717 * We don't have enough data to make a measurement. 3718 * So lets just stop and start here after exiting 3719 * probe-rtt. We probably are not interested in 3720 * the results anyway. 3721 */ 3722 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3723 } 3724 /* 3725 * Measurements through the current snd_max are going 3726 * to be limited by the slower pacing rate. 3727 * 3728 * We need to mark these as app-limited so we 3729 * don't collapse the b/w. 3730 */ 3731 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3732 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3733 if (rack->r_ctl.rc_app_limited_cnt == 0) 3734 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3735 else { 3736 /* 3737 * Go out to the end app limited and mark 3738 * this new one as next and move the end_appl up 3739 * to this guy. 3740 */ 3741 if (rack->r_ctl.rc_end_appl) 3742 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3743 rack->r_ctl.rc_end_appl = rsm; 3744 } 3745 rsm->r_flags |= RACK_APP_LIMITED; 3746 rack->r_ctl.rc_app_limited_cnt++; 3747 } 3748 /* 3749 * Now, we need to examine our pacing rate multipliers. 3750 * If its under 100%, we need to kick it back up to 3751 * 100%. We also don't let it be over our "max" above 3752 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3753 * Note setting clamp_atexit_prtt to 0 has the effect 3754 * of setting CA/SS to 100% always at exit (which is 3755 * the default behavior). 3756 */ 3757 if (rack_probertt_clear_is) { 3758 rack->rc_gp_incr = 0; 3759 rack->rc_gp_bwred = 0; 3760 rack->rc_gp_timely_inc_cnt = 0; 3761 rack->rc_gp_timely_dec_cnt = 0; 3762 } 3763 /* Do we do any clamping at exit? */ 3764 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3765 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3766 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3767 } 3768 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3769 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3770 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3771 } 3772 /* 3773 * Lets set rtt_diff to 0, so that we will get a "boost" 3774 * after exiting. 3775 */ 3776 rack->r_ctl.rc_rtt_diff = 0; 3777 3778 /* Clear all flags so we start fresh */ 3779 rack->rc_tp->t_bytes_acked = 0; 3780 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3781 /* 3782 * If configured to, set the cwnd and ssthresh to 3783 * our targets. 3784 */ 3785 if (rack_probe_rtt_sets_cwnd) { 3786 uint64_t ebdp; 3787 uint32_t setto; 3788 3789 /* Set ssthresh so we get into CA once we hit our target */ 3790 if (rack_probertt_use_min_rtt_exit == 1) { 3791 /* Set to min rtt */ 3792 rack_set_prtt_target(rack, segsiz, 3793 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3794 } else if (rack_probertt_use_min_rtt_exit == 2) { 3795 /* Set to current gp rtt */ 3796 rack_set_prtt_target(rack, segsiz, 3797 rack->r_ctl.rc_gp_srtt); 3798 } else if (rack_probertt_use_min_rtt_exit == 3) { 3799 /* Set to entry gp rtt */ 3800 rack_set_prtt_target(rack, segsiz, 3801 rack->r_ctl.rc_entry_gp_rtt); 3802 } else { 3803 uint64_t sum; 3804 uint32_t setval; 3805 3806 sum = rack->r_ctl.rc_entry_gp_rtt; 3807 sum *= 10; 3808 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3809 if (sum >= 20) { 3810 /* 3811 * A highly buffered path needs 3812 * cwnd space for timely to work. 3813 * Lets set things up as if 3814 * we are heading back here again. 3815 */ 3816 setval = rack->r_ctl.rc_entry_gp_rtt; 3817 } else if (sum >= 15) { 3818 /* 3819 * Lets take the smaller of the 3820 * two since we are just somewhat 3821 * buffered. 3822 */ 3823 setval = rack->r_ctl.rc_gp_srtt; 3824 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3825 setval = rack->r_ctl.rc_entry_gp_rtt; 3826 } else { 3827 /* 3828 * Here we are not highly buffered 3829 * and should pick the min we can to 3830 * keep from causing loss. 3831 */ 3832 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3833 } 3834 rack_set_prtt_target(rack, segsiz, 3835 setval); 3836 } 3837 if (rack_probe_rtt_sets_cwnd > 1) { 3838 /* There is a percentage here to boost */ 3839 ebdp = rack->r_ctl.rc_target_probertt_flight; 3840 ebdp *= rack_probe_rtt_sets_cwnd; 3841 ebdp /= 100; 3842 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3843 } else 3844 setto = rack->r_ctl.rc_target_probertt_flight; 3845 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3846 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3847 /* Enforce a min */ 3848 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3849 } 3850 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3851 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3852 } 3853 rack_log_rtt_shrinks(rack, us_cts, 3854 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3855 __LINE__, RACK_RTTS_EXITPROBE); 3856 /* Clear times last so log has all the info */ 3857 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3858 rack->r_ctl.rc_time_probertt_entered = us_cts; 3859 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3860 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3861 } 3862 3863 static void 3864 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3865 { 3866 /* Check in on probe-rtt */ 3867 if (rack->rc_gp_filled == 0) { 3868 /* We do not do p-rtt unless we have gp measurements */ 3869 return; 3870 } 3871 if (rack->in_probe_rtt) { 3872 uint64_t no_overflow; 3873 uint32_t endtime, must_stay; 3874 3875 if (rack->r_ctl.rc_went_idle_time && 3876 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3877 /* 3878 * We went idle during prtt, just exit now. 3879 */ 3880 rack_exit_probertt(rack, us_cts); 3881 } else if (rack_probe_rtt_safety_val && 3882 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3883 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3884 /* 3885 * Probe RTT safety value triggered! 3886 */ 3887 rack_log_rtt_shrinks(rack, us_cts, 3888 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3889 __LINE__, RACK_RTTS_SAFETY); 3890 rack_exit_probertt(rack, us_cts); 3891 } 3892 /* Calculate the max we will wait */ 3893 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3894 if (rack->rc_highly_buffered) 3895 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3896 /* Calculate the min we must wait */ 3897 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3898 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3899 TSTMP_LT(us_cts, endtime)) { 3900 uint32_t calc; 3901 /* Do we lower more? */ 3902 no_exit: 3903 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3904 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3905 else 3906 calc = 0; 3907 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3908 if (calc) { 3909 /* Maybe */ 3910 calc *= rack_per_of_gp_probertt_reduce; 3911 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3912 /* Limit it too */ 3913 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3914 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3915 } 3916 /* We must reach target or the time set */ 3917 return; 3918 } 3919 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3920 if ((TSTMP_LT(us_cts, must_stay) && 3921 rack->rc_highly_buffered) || 3922 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3923 rack->r_ctl.rc_target_probertt_flight)) { 3924 /* We are not past the must_stay time */ 3925 goto no_exit; 3926 } 3927 rack_log_rtt_shrinks(rack, us_cts, 3928 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3929 __LINE__, RACK_RTTS_REACHTARGET); 3930 rack->r_ctl.rc_time_probertt_starts = us_cts; 3931 if (rack->r_ctl.rc_time_probertt_starts == 0) 3932 rack->r_ctl.rc_time_probertt_starts = 1; 3933 /* Restore back to our rate we want to pace at in prtt */ 3934 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3935 } 3936 /* 3937 * Setup our end time, some number of gp_srtts plus 200ms. 3938 */ 3939 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3940 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3941 if (rack_probertt_gpsrtt_cnt_div) 3942 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3943 else 3944 endtime = 0; 3945 endtime += rack_min_probertt_hold; 3946 endtime += rack->r_ctl.rc_time_probertt_starts; 3947 if (TSTMP_GEQ(us_cts, endtime)) { 3948 /* yes, exit probertt */ 3949 rack_exit_probertt(rack, us_cts); 3950 } 3951 3952 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3953 /* Go into probertt, its been too long since we went lower */ 3954 rack_enter_probertt(rack, us_cts); 3955 } 3956 } 3957 3958 static void 3959 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3960 uint32_t rtt, int32_t rtt_diff) 3961 { 3962 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3963 uint32_t losses; 3964 3965 if ((rack->rc_gp_dyn_mul == 0) || 3966 (rack->use_fixed_rate) || 3967 (rack->in_probe_rtt) || 3968 (rack->rc_always_pace == 0)) { 3969 /* No dynamic GP multipler in play */ 3970 return; 3971 } 3972 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3973 cur_bw = rack_get_bw(rack); 3974 /* Calculate our up and down range */ 3975 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3976 up_bnd /= 100; 3977 up_bnd += rack->r_ctl.last_gp_comp_bw; 3978 3979 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3980 subfr /= 100; 3981 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3982 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3983 /* 3984 * This is the case where our RTT is above 3985 * the max target and we have been configured 3986 * to just do timely no bonus up stuff in that case. 3987 * 3988 * There are two configurations, set to 1, and we 3989 * just do timely if we are over our max. If its 3990 * set above 1 then we slam the multipliers down 3991 * to 100 and then decrement per timely. 3992 */ 3993 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3994 __LINE__, 3); 3995 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3996 rack_validate_multipliers_at_or_below_100(rack); 3997 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3998 } else if ((last_bw_est < low_bnd) && !losses) { 3999 /* 4000 * We are decreasing this is a bit complicated this 4001 * means we are loosing ground. This could be 4002 * because another flow entered and we are competing 4003 * for b/w with it. This will push the RTT up which 4004 * makes timely unusable unless we want to get shoved 4005 * into a corner and just be backed off (the age 4006 * old problem with delay based CC). 4007 * 4008 * On the other hand if it was a route change we 4009 * would like to stay somewhat contained and not 4010 * blow out the buffers. 4011 */ 4012 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4013 __LINE__, 3); 4014 rack->r_ctl.last_gp_comp_bw = cur_bw; 4015 if (rack->rc_gp_bwred == 0) { 4016 /* Go into reduction counting */ 4017 rack->rc_gp_bwred = 1; 4018 rack->rc_gp_timely_dec_cnt = 0; 4019 } 4020 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 4021 (timely_says == 0)) { 4022 /* 4023 * Push another time with a faster pacing 4024 * to try to gain back (we include override to 4025 * get a full raise factor). 4026 */ 4027 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 4028 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 4029 (timely_says == 0) || 4030 (rack_down_raise_thresh == 0)) { 4031 /* 4032 * Do an override up in b/w if we were 4033 * below the threshold or if the threshold 4034 * is zero we always do the raise. 4035 */ 4036 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 4037 } else { 4038 /* Log it stays the same */ 4039 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 4040 __LINE__, 11); 4041 } 4042 rack->rc_gp_timely_dec_cnt++; 4043 /* We are not incrementing really no-count */ 4044 rack->rc_gp_incr = 0; 4045 rack->rc_gp_timely_inc_cnt = 0; 4046 } else { 4047 /* 4048 * Lets just use the RTT 4049 * information and give up 4050 * pushing. 4051 */ 4052 goto use_timely; 4053 } 4054 } else if ((timely_says != 2) && 4055 !losses && 4056 (last_bw_est > up_bnd)) { 4057 /* 4058 * We are increasing b/w lets keep going, updating 4059 * our b/w and ignoring any timely input, unless 4060 * of course we are at our max raise (if there is one). 4061 */ 4062 4063 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4064 __LINE__, 3); 4065 rack->r_ctl.last_gp_comp_bw = cur_bw; 4066 if (rack->rc_gp_saw_ss && 4067 rack_per_upper_bound_ss && 4068 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 4069 /* 4070 * In cases where we can't go higher 4071 * we should just use timely. 4072 */ 4073 goto use_timely; 4074 } 4075 if (rack->rc_gp_saw_ca && 4076 rack_per_upper_bound_ca && 4077 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4078 /* 4079 * In cases where we can't go higher 4080 * we should just use timely. 4081 */ 4082 goto use_timely; 4083 } 4084 rack->rc_gp_bwred = 0; 4085 rack->rc_gp_timely_dec_cnt = 0; 4086 /* You get a set number of pushes if timely is trying to reduce */ 4087 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4088 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4089 } else { 4090 /* Log it stays the same */ 4091 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4092 __LINE__, 12); 4093 } 4094 return; 4095 } else { 4096 /* 4097 * We are staying between the lower and upper range bounds 4098 * so use timely to decide. 4099 */ 4100 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4101 __LINE__, 3); 4102 use_timely: 4103 if (timely_says) { 4104 rack->rc_gp_incr = 0; 4105 rack->rc_gp_timely_inc_cnt = 0; 4106 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4107 !losses && 4108 (last_bw_est < low_bnd)) { 4109 /* We are loosing ground */ 4110 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4111 rack->rc_gp_timely_dec_cnt++; 4112 /* We are not incrementing really no-count */ 4113 rack->rc_gp_incr = 0; 4114 rack->rc_gp_timely_inc_cnt = 0; 4115 } else 4116 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4117 } else { 4118 rack->rc_gp_bwred = 0; 4119 rack->rc_gp_timely_dec_cnt = 0; 4120 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4121 } 4122 } 4123 } 4124 4125 static int32_t 4126 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4127 { 4128 int32_t timely_says; 4129 uint64_t log_mult, log_rtt_a_diff; 4130 4131 log_rtt_a_diff = rtt; 4132 log_rtt_a_diff <<= 32; 4133 log_rtt_a_diff |= (uint32_t)rtt_diff; 4134 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4135 rack_gp_rtt_maxmul)) { 4136 /* Reduce the b/w multipler */ 4137 timely_says = 2; 4138 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4139 log_mult <<= 32; 4140 log_mult |= prev_rtt; 4141 rack_log_timely(rack, timely_says, log_mult, 4142 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4143 log_rtt_a_diff, __LINE__, 4); 4144 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4145 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4146 max(rack_gp_rtt_mindiv , 1)))) { 4147 /* Increase the b/w multipler */ 4148 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4149 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4150 max(rack_gp_rtt_mindiv , 1)); 4151 log_mult <<= 32; 4152 log_mult |= prev_rtt; 4153 timely_says = 0; 4154 rack_log_timely(rack, timely_says, log_mult , 4155 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4156 log_rtt_a_diff, __LINE__, 5); 4157 } else { 4158 /* 4159 * Use a gradient to find it the timely gradient 4160 * is: 4161 * grad = rc_rtt_diff / min_rtt; 4162 * 4163 * anything below or equal to 0 will be 4164 * a increase indication. Anything above 4165 * zero is a decrease. Note we take care 4166 * of the actual gradient calculation 4167 * in the reduction (its not needed for 4168 * increase). 4169 */ 4170 log_mult = prev_rtt; 4171 if (rtt_diff <= 0) { 4172 /* 4173 * Rttdiff is less than zero, increase the 4174 * b/w multipler (its 0 or negative) 4175 */ 4176 timely_says = 0; 4177 rack_log_timely(rack, timely_says, log_mult, 4178 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4179 } else { 4180 /* Reduce the b/w multipler */ 4181 timely_says = 1; 4182 rack_log_timely(rack, timely_says, log_mult, 4183 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4184 } 4185 } 4186 return (timely_says); 4187 } 4188 4189 static void 4190 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4191 tcp_seq th_ack, int line) 4192 { 4193 uint64_t tim, bytes_ps, ltim, stim, utim; 4194 uint32_t segsiz, bytes, reqbytes, us_cts; 4195 int32_t gput, new_rtt_diff, timely_says; 4196 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4197 int did_add = 0; 4198 4199 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4200 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4201 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4202 tim = us_cts - tp->gput_ts; 4203 else 4204 tim = 0; 4205 4206 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4207 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4208 else 4209 stim = 0; 4210 /* 4211 * Use the larger of the send time or ack time. This prevents us 4212 * from being influenced by ack artifacts to come up with too 4213 * high of measurement. Note that since we are spanning over many more 4214 * bytes in most of our measurements hopefully that is less likely to 4215 * occur. 4216 */ 4217 if (tim > stim) 4218 utim = max(tim, 1); 4219 else 4220 utim = max(stim, 1); 4221 /* Lets get a msec time ltim too for the old stuff */ 4222 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4223 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4224 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4225 if ((tim == 0) && (stim == 0)) { 4226 /* 4227 * Invalid measurement time, maybe 4228 * all on one ack/one send? 4229 */ 4230 bytes = 0; 4231 bytes_ps = 0; 4232 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4233 0, 0, 0, 10, __LINE__, NULL); 4234 goto skip_measurement; 4235 } 4236 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4237 /* We never made a us_rtt measurement? */ 4238 bytes = 0; 4239 bytes_ps = 0; 4240 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4241 0, 0, 0, 10, __LINE__, NULL); 4242 goto skip_measurement; 4243 } 4244 /* 4245 * Calculate the maximum possible b/w this connection 4246 * could have. We base our calculation on the lowest 4247 * rtt we have seen during the measurement and the 4248 * largest rwnd the client has given us in that time. This 4249 * forms a BDP that is the maximum that we could ever 4250 * get to the client. Anything larger is not valid. 4251 * 4252 * I originally had code here that rejected measurements 4253 * where the time was less than 1/2 the latest us_rtt. 4254 * But after thinking on that I realized its wrong since 4255 * say you had a 150Mbps or even 1Gbps link, and you 4256 * were a long way away.. example I am in Europe (100ms rtt) 4257 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4258 * bytes my time would be 1.2ms, and yet my rtt would say 4259 * the measurement was invalid the time was < 50ms. The 4260 * same thing is true for 150Mb (8ms of time). 4261 * 4262 * A better way I realized is to look at what the maximum 4263 * the connection could possibly do. This is gated on 4264 * the lowest RTT we have seen and the highest rwnd. 4265 * We should in theory never exceed that, if we are 4266 * then something on the path is storing up packets 4267 * and then feeding them all at once to our endpoint 4268 * messing up our measurement. 4269 */ 4270 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4271 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4272 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4273 if (SEQ_LT(th_ack, tp->gput_seq)) { 4274 /* No measurement can be made */ 4275 bytes = 0; 4276 bytes_ps = 0; 4277 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4278 0, 0, 0, 10, __LINE__, NULL); 4279 goto skip_measurement; 4280 } else 4281 bytes = (th_ack - tp->gput_seq); 4282 bytes_ps = (uint64_t)bytes; 4283 /* 4284 * Don't measure a b/w for pacing unless we have gotten at least 4285 * an initial windows worth of data in this measurement interval. 4286 * 4287 * Small numbers of bytes get badly influenced by delayed ack and 4288 * other artifacts. Note we take the initial window or our 4289 * defined minimum GP (defaulting to 10 which hopefully is the 4290 * IW). 4291 */ 4292 if (rack->rc_gp_filled == 0) { 4293 /* 4294 * The initial estimate is special. We 4295 * have blasted out an IW worth of packets 4296 * without a real valid ack ts results. We 4297 * then setup the app_limited_needs_set flag, 4298 * this should get the first ack in (probably 2 4299 * MSS worth) to be recorded as the timestamp. 4300 * We thus allow a smaller number of bytes i.e. 4301 * IW - 2MSS. 4302 */ 4303 reqbytes -= (2 * segsiz); 4304 /* Also lets fill previous for our first measurement to be neutral */ 4305 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4306 } 4307 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4308 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4309 rack->r_ctl.rc_app_limited_cnt, 4310 0, 0, 10, __LINE__, NULL); 4311 goto skip_measurement; 4312 } 4313 /* 4314 * We now need to calculate the Timely like status so 4315 * we can update (possibly) the b/w multipliers. 4316 */ 4317 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4318 if (rack->rc_gp_filled == 0) { 4319 /* No previous reading */ 4320 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4321 } else { 4322 if (rack->measure_saw_probe_rtt == 0) { 4323 /* 4324 * We don't want a probertt to be counted 4325 * since it will be negative incorrectly. We 4326 * expect to be reducing the RTT when we 4327 * pace at a slower rate. 4328 */ 4329 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4330 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4331 } 4332 } 4333 timely_says = rack_make_timely_judgement(rack, 4334 rack->r_ctl.rc_gp_srtt, 4335 rack->r_ctl.rc_rtt_diff, 4336 rack->r_ctl.rc_prev_gp_srtt 4337 ); 4338 bytes_ps *= HPTS_USEC_IN_SEC; 4339 bytes_ps /= utim; 4340 if (bytes_ps > rack->r_ctl.last_max_bw) { 4341 /* 4342 * Something is on path playing 4343 * since this b/w is not possible based 4344 * on our BDP (highest rwnd and lowest rtt 4345 * we saw in the measurement window). 4346 * 4347 * Another option here would be to 4348 * instead skip the measurement. 4349 */ 4350 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4351 bytes_ps, rack->r_ctl.last_max_bw, 0, 4352 11, __LINE__, NULL); 4353 bytes_ps = rack->r_ctl.last_max_bw; 4354 } 4355 /* We store gp for b/w in bytes per second */ 4356 if (rack->rc_gp_filled == 0) { 4357 /* Initial measurment */ 4358 if (bytes_ps) { 4359 rack->r_ctl.gp_bw = bytes_ps; 4360 rack->rc_gp_filled = 1; 4361 rack->r_ctl.num_measurements = 1; 4362 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4363 } else { 4364 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4365 rack->r_ctl.rc_app_limited_cnt, 4366 0, 0, 10, __LINE__, NULL); 4367 } 4368 if (rack->rc_inp->inp_in_hpts && 4369 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4370 /* 4371 * Ok we can't trust the pacer in this case 4372 * where we transition from un-paced to paced. 4373 * Or for that matter when the burst mitigation 4374 * was making a wild guess and got it wrong. 4375 * Stop the pacer and clear up all the aggregate 4376 * delays etc. 4377 */ 4378 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 4379 rack->r_ctl.rc_hpts_flags = 0; 4380 rack->r_ctl.rc_last_output_to = 0; 4381 } 4382 did_add = 2; 4383 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4384 /* Still a small number run an average */ 4385 rack->r_ctl.gp_bw += bytes_ps; 4386 addpart = rack->r_ctl.num_measurements; 4387 rack->r_ctl.num_measurements++; 4388 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4389 /* We have collected enought to move forward */ 4390 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4391 } 4392 did_add = 3; 4393 } else { 4394 /* 4395 * We want to take 1/wma of the goodput and add in to 7/8th 4396 * of the old value weighted by the srtt. So if your measurement 4397 * period is say 2 SRTT's long you would get 1/4 as the 4398 * value, if it was like 1/2 SRTT then you would get 1/16th. 4399 * 4400 * But we must be careful not to take too much i.e. if the 4401 * srtt is say 20ms and the measurement is taken over 4402 * 400ms our weight would be 400/20 i.e. 20. On the 4403 * other hand if we get a measurement over 1ms with a 4404 * 10ms rtt we only want to take a much smaller portion. 4405 */ 4406 if (rack->r_ctl.num_measurements < 0xff) { 4407 rack->r_ctl.num_measurements++; 4408 } 4409 srtt = (uint64_t)tp->t_srtt; 4410 if (srtt == 0) { 4411 /* 4412 * Strange why did t_srtt go back to zero? 4413 */ 4414 if (rack->r_ctl.rc_rack_min_rtt) 4415 srtt = rack->r_ctl.rc_rack_min_rtt; 4416 else 4417 srtt = HPTS_USEC_IN_MSEC; 4418 } 4419 /* 4420 * XXXrrs: Note for reviewers, in playing with 4421 * dynamic pacing I discovered this GP calculation 4422 * as done originally leads to some undesired results. 4423 * Basically you can get longer measurements contributing 4424 * too much to the WMA. Thus I changed it if you are doing 4425 * dynamic adjustments to only do the aportioned adjustment 4426 * if we have a very small (time wise) measurement. Longer 4427 * measurements just get there weight (defaulting to 1/8) 4428 * add to the WMA. We may want to think about changing 4429 * this to always do that for both sides i.e. dynamic 4430 * and non-dynamic... but considering lots of folks 4431 * were playing with this I did not want to change the 4432 * calculation per.se. without your thoughts.. Lawerence? 4433 * Peter?? 4434 */ 4435 if (rack->rc_gp_dyn_mul == 0) { 4436 subpart = rack->r_ctl.gp_bw * utim; 4437 subpart /= (srtt * 8); 4438 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4439 /* 4440 * The b/w update takes no more 4441 * away then 1/2 our running total 4442 * so factor it in. 4443 */ 4444 addpart = bytes_ps * utim; 4445 addpart /= (srtt * 8); 4446 } else { 4447 /* 4448 * Don't allow a single measurement 4449 * to account for more than 1/2 of the 4450 * WMA. This could happen on a retransmission 4451 * where utim becomes huge compared to 4452 * srtt (multiple retransmissions when using 4453 * the sending rate which factors in all the 4454 * transmissions from the first one). 4455 */ 4456 subpart = rack->r_ctl.gp_bw / 2; 4457 addpart = bytes_ps / 2; 4458 } 4459 resid_bw = rack->r_ctl.gp_bw - subpart; 4460 rack->r_ctl.gp_bw = resid_bw + addpart; 4461 did_add = 1; 4462 } else { 4463 if ((utim / srtt) <= 1) { 4464 /* 4465 * The b/w update was over a small period 4466 * of time. The idea here is to prevent a small 4467 * measurement time period from counting 4468 * too much. So we scale it based on the 4469 * time so it attributes less than 1/rack_wma_divisor 4470 * of its measurement. 4471 */ 4472 subpart = rack->r_ctl.gp_bw * utim; 4473 subpart /= (srtt * rack_wma_divisor); 4474 addpart = bytes_ps * utim; 4475 addpart /= (srtt * rack_wma_divisor); 4476 } else { 4477 /* 4478 * The scaled measurement was long 4479 * enough so lets just add in the 4480 * portion of the measurment i.e. 1/rack_wma_divisor 4481 */ 4482 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4483 addpart = bytes_ps / rack_wma_divisor; 4484 } 4485 if ((rack->measure_saw_probe_rtt == 0) || 4486 (bytes_ps > rack->r_ctl.gp_bw)) { 4487 /* 4488 * For probe-rtt we only add it in 4489 * if its larger, all others we just 4490 * add in. 4491 */ 4492 did_add = 1; 4493 resid_bw = rack->r_ctl.gp_bw - subpart; 4494 rack->r_ctl.gp_bw = resid_bw + addpart; 4495 } 4496 } 4497 } 4498 if ((rack->gp_ready == 0) && 4499 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4500 /* We have enough measurements now */ 4501 rack->gp_ready = 1; 4502 rack_set_cc_pacing(rack); 4503 if (rack->defer_options) 4504 rack_apply_deferred_options(rack); 4505 } 4506 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4507 rack_get_bw(rack), 22, did_add, NULL); 4508 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4509 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4510 rack_update_multiplier(rack, timely_says, bytes_ps, 4511 rack->r_ctl.rc_gp_srtt, 4512 rack->r_ctl.rc_rtt_diff); 4513 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4514 rack_get_bw(rack), 3, line, NULL); 4515 /* reset the gp srtt and setup the new prev */ 4516 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4517 /* Record the lost count for the next measurement */ 4518 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4519 /* 4520 * We restart our diffs based on the gpsrtt in the 4521 * measurement window. 4522 */ 4523 rack->rc_gp_rtt_set = 0; 4524 rack->rc_gp_saw_rec = 0; 4525 rack->rc_gp_saw_ca = 0; 4526 rack->rc_gp_saw_ss = 0; 4527 rack->rc_dragged_bottom = 0; 4528 skip_measurement: 4529 4530 #ifdef STATS 4531 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4532 gput); 4533 /* 4534 * XXXLAS: This is a temporary hack, and should be 4535 * chained off VOI_TCP_GPUT when stats(9) grows an 4536 * API to deal with chained VOIs. 4537 */ 4538 if (tp->t_stats_gput_prev > 0) 4539 stats_voi_update_abs_s32(tp->t_stats, 4540 VOI_TCP_GPUT_ND, 4541 ((gput - tp->t_stats_gput_prev) * 100) / 4542 tp->t_stats_gput_prev); 4543 #endif 4544 tp->t_flags &= ~TF_GPUTINPROG; 4545 tp->t_stats_gput_prev = gput; 4546 /* 4547 * Now are we app limited now and there is space from where we 4548 * were to where we want to go? 4549 * 4550 * We don't do the other case i.e. non-applimited here since 4551 * the next send will trigger us picking up the missing data. 4552 */ 4553 if (rack->r_ctl.rc_first_appl && 4554 TCPS_HAVEESTABLISHED(tp->t_state) && 4555 rack->r_ctl.rc_app_limited_cnt && 4556 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4557 ((rack->r_ctl.rc_first_appl->r_start - th_ack) > 4558 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4559 /* 4560 * Yep there is enough outstanding to make a measurement here. 4561 */ 4562 struct rack_sendmap *rsm, fe; 4563 4564 tp->t_flags |= TF_GPUTINPROG; 4565 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4566 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4567 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4568 rack->app_limited_needs_set = 0; 4569 tp->gput_seq = th_ack; 4570 if (rack->in_probe_rtt) 4571 rack->measure_saw_probe_rtt = 1; 4572 else if ((rack->measure_saw_probe_rtt) && 4573 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4574 rack->measure_saw_probe_rtt = 0; 4575 if ((rack->r_ctl.rc_first_appl->r_start - th_ack) >= rack_get_measure_window(tp, rack)) { 4576 /* There is a full window to gain info from */ 4577 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4578 } else { 4579 /* We can only measure up to the applimited point */ 4580 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_start - th_ack); 4581 } 4582 /* 4583 * Now we need to find the timestamp of the send at tp->gput_seq 4584 * for the send based measurement. 4585 */ 4586 fe.r_start = tp->gput_seq; 4587 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4588 if (rsm) { 4589 /* Ok send-based limit is set */ 4590 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4591 /* 4592 * Move back to include the earlier part 4593 * so our ack time lines up right (this may 4594 * make an overlapping measurement but thats 4595 * ok). 4596 */ 4597 tp->gput_seq = rsm->r_start; 4598 } 4599 if (rsm->r_flags & RACK_ACKED) 4600 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4601 else 4602 rack->app_limited_needs_set = 1; 4603 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4604 } else { 4605 /* 4606 * If we don't find the rsm due to some 4607 * send-limit set the current time, which 4608 * basically disables the send-limit. 4609 */ 4610 struct timeval tv; 4611 4612 microuptime(&tv); 4613 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4614 } 4615 rack_log_pacing_delay_calc(rack, 4616 tp->gput_seq, 4617 tp->gput_ack, 4618 (uint64_t)rsm, 4619 tp->gput_ts, 4620 rack->r_ctl.rc_app_limited_cnt, 4621 9, 4622 __LINE__, NULL); 4623 } 4624 } 4625 4626 /* 4627 * CC wrapper hook functions 4628 */ 4629 static void 4630 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4631 uint16_t type, int32_t recovery) 4632 { 4633 uint32_t prior_cwnd, acked; 4634 struct tcp_log_buffer *lgb = NULL; 4635 uint8_t labc_to_use; 4636 4637 INP_WLOCK_ASSERT(tp->t_inpcb); 4638 tp->ccv->nsegs = nsegs; 4639 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4640 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4641 uint32_t max; 4642 4643 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4644 if (tp->ccv->bytes_this_ack > max) { 4645 tp->ccv->bytes_this_ack = max; 4646 } 4647 } 4648 #ifdef STATS 4649 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4650 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4651 #endif 4652 if ((tp->t_flags & TF_GPUTINPROG) && 4653 rack_enough_for_measurement(tp, rack, th_ack)) { 4654 /* Measure the Goodput */ 4655 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__); 4656 #ifdef NETFLIX_PEAKRATE 4657 if ((type == CC_ACK) && 4658 (tp->t_maxpeakrate)) { 4659 /* 4660 * We update t_peakrate_thr. This gives us roughly 4661 * one update per round trip time. Note 4662 * it will only be used if pace_always is off i.e 4663 * we don't do this for paced flows. 4664 */ 4665 rack_update_peakrate_thr(tp); 4666 } 4667 #endif 4668 } 4669 /* Which way our we limited, if not cwnd limited no advance in CA */ 4670 if (tp->snd_cwnd <= tp->snd_wnd) 4671 tp->ccv->flags |= CCF_CWND_LIMITED; 4672 else 4673 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4674 if (tp->snd_cwnd > tp->snd_ssthresh) { 4675 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4676 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4677 /* For the setting of a window past use the actual scwnd we are using */ 4678 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4679 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4680 tp->ccv->flags |= CCF_ABC_SENTAWND; 4681 } 4682 } else { 4683 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4684 tp->t_bytes_acked = 0; 4685 } 4686 prior_cwnd = tp->snd_cwnd; 4687 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4688 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4689 labc_to_use = rack->rc_labc; 4690 else 4691 labc_to_use = rack_max_abc_post_recovery; 4692 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4693 union tcp_log_stackspecific log; 4694 struct timeval tv; 4695 4696 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4697 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4698 log.u_bbr.flex1 = th_ack; 4699 log.u_bbr.flex2 = tp->ccv->flags; 4700 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4701 log.u_bbr.flex4 = tp->ccv->nsegs; 4702 log.u_bbr.flex5 = labc_to_use; 4703 log.u_bbr.flex6 = prior_cwnd; 4704 log.u_bbr.flex7 = V_tcp_do_newsack; 4705 log.u_bbr.flex8 = 1; 4706 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4707 0, &log, false, NULL, NULL, 0, &tv); 4708 } 4709 if (CC_ALGO(tp)->ack_received != NULL) { 4710 /* XXXLAS: Find a way to live without this */ 4711 tp->ccv->curack = th_ack; 4712 tp->ccv->labc = labc_to_use; 4713 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4714 CC_ALGO(tp)->ack_received(tp->ccv, type); 4715 } 4716 if (lgb) { 4717 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4718 } 4719 if (rack->r_must_retran) { 4720 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4721 /* 4722 * We now are beyond the rxt point so lets disable 4723 * the flag. 4724 */ 4725 rack->r_ctl.rc_out_at_rto = 0; 4726 rack->r_must_retran = 0; 4727 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4728 /* 4729 * Only decrement the rc_out_at_rto if the cwnd advances 4730 * at least a whole segment. Otherwise next time the peer 4731 * acks, we won't be able to send this generaly happens 4732 * when we are in Congestion Avoidance. 4733 */ 4734 if (acked <= rack->r_ctl.rc_out_at_rto){ 4735 rack->r_ctl.rc_out_at_rto -= acked; 4736 } else { 4737 rack->r_ctl.rc_out_at_rto = 0; 4738 } 4739 } 4740 } 4741 #ifdef STATS 4742 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4743 #endif 4744 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4745 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4746 } 4747 #ifdef NETFLIX_PEAKRATE 4748 /* we enforce max peak rate if it is set and we are not pacing */ 4749 if ((rack->rc_always_pace == 0) && 4750 tp->t_peakrate_thr && 4751 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4752 tp->snd_cwnd = tp->t_peakrate_thr; 4753 } 4754 #endif 4755 } 4756 4757 static void 4758 tcp_rack_partialack(struct tcpcb *tp) 4759 { 4760 struct tcp_rack *rack; 4761 4762 rack = (struct tcp_rack *)tp->t_fb_ptr; 4763 INP_WLOCK_ASSERT(tp->t_inpcb); 4764 /* 4765 * If we are doing PRR and have enough 4766 * room to send <or> we are pacing and prr 4767 * is disabled we will want to see if we 4768 * can send data (by setting r_wanted_output to 4769 * true). 4770 */ 4771 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4772 rack->rack_no_prr) 4773 rack->r_wanted_output = 1; 4774 } 4775 4776 static void 4777 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4778 { 4779 struct tcp_rack *rack; 4780 uint32_t orig_cwnd; 4781 4782 orig_cwnd = tp->snd_cwnd; 4783 INP_WLOCK_ASSERT(tp->t_inpcb); 4784 rack = (struct tcp_rack *)tp->t_fb_ptr; 4785 /* only alert CC if we alerted when we entered */ 4786 if (CC_ALGO(tp)->post_recovery != NULL) { 4787 tp->ccv->curack = th_ack; 4788 CC_ALGO(tp)->post_recovery(tp->ccv); 4789 if (tp->snd_cwnd < tp->snd_ssthresh) { 4790 /* 4791 * Rack has burst control and pacing 4792 * so lets not set this any lower than 4793 * snd_ssthresh per RFC-6582 (option 2). 4794 */ 4795 tp->snd_cwnd = tp->snd_ssthresh; 4796 } 4797 } 4798 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4799 union tcp_log_stackspecific log; 4800 struct timeval tv; 4801 4802 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4803 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4804 log.u_bbr.flex1 = th_ack; 4805 log.u_bbr.flex2 = tp->ccv->flags; 4806 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4807 log.u_bbr.flex4 = tp->ccv->nsegs; 4808 log.u_bbr.flex5 = V_tcp_abc_l_var; 4809 log.u_bbr.flex6 = orig_cwnd; 4810 log.u_bbr.flex7 = V_tcp_do_newsack; 4811 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4812 log.u_bbr.flex8 = 2; 4813 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4814 0, &log, false, NULL, NULL, 0, &tv); 4815 } 4816 if ((rack->rack_no_prr == 0) && 4817 (rack->no_prr_addback == 0) && 4818 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4819 /* 4820 * Suck the next prr cnt back into cwnd, but 4821 * only do that if we are not application limited. 4822 */ 4823 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4824 /* 4825 * We are allowed to add back to the cwnd the amount we did 4826 * not get out if: 4827 * a) no_prr_addback is off. 4828 * b) we are not app limited 4829 * c) we are doing prr 4830 * <and> 4831 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4832 */ 4833 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4834 rack->r_ctl.rc_prr_sndcnt); 4835 } 4836 rack->r_ctl.rc_prr_sndcnt = 0; 4837 rack_log_to_prr(rack, 1, 0); 4838 } 4839 rack_log_to_prr(rack, 14, orig_cwnd); 4840 tp->snd_recover = tp->snd_una; 4841 EXIT_RECOVERY(tp->t_flags); 4842 } 4843 4844 static void 4845 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack) 4846 { 4847 struct tcp_rack *rack; 4848 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4849 4850 INP_WLOCK_ASSERT(tp->t_inpcb); 4851 #ifdef STATS 4852 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4853 #endif 4854 if (IN_RECOVERY(tp->t_flags) == 0) { 4855 in_rec_at_entry = 0; 4856 ssthresh_enter = tp->snd_ssthresh; 4857 cwnd_enter = tp->snd_cwnd; 4858 } else 4859 in_rec_at_entry = 1; 4860 rack = (struct tcp_rack *)tp->t_fb_ptr; 4861 switch (type) { 4862 case CC_NDUPACK: 4863 tp->t_flags &= ~TF_WASFRECOVERY; 4864 tp->t_flags &= ~TF_WASCRECOVERY; 4865 if (!IN_FASTRECOVERY(tp->t_flags)) { 4866 rack->r_ctl.rc_prr_delivered = 0; 4867 rack->r_ctl.rc_prr_out = 0; 4868 if (rack->rack_no_prr == 0) { 4869 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4870 rack_log_to_prr(rack, 2, in_rec_at_entry); 4871 } 4872 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4873 tp->snd_recover = tp->snd_max; 4874 if (tp->t_flags2 & TF2_ECN_PERMIT) 4875 tp->t_flags2 |= TF2_ECN_SND_CWR; 4876 } 4877 break; 4878 case CC_ECN: 4879 if (!IN_CONGRECOVERY(tp->t_flags) || 4880 /* 4881 * Allow ECN reaction on ACK to CWR, if 4882 * that data segment was also CE marked. 4883 */ 4884 SEQ_GEQ(ack, tp->snd_recover)) { 4885 EXIT_CONGRECOVERY(tp->t_flags); 4886 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4887 tp->snd_recover = tp->snd_max + 1; 4888 if (tp->t_flags2 & TF2_ECN_PERMIT) 4889 tp->t_flags2 |= TF2_ECN_SND_CWR; 4890 } 4891 break; 4892 case CC_RTO: 4893 tp->t_dupacks = 0; 4894 tp->t_bytes_acked = 0; 4895 EXIT_RECOVERY(tp->t_flags); 4896 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4897 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4898 orig_cwnd = tp->snd_cwnd; 4899 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4900 rack_log_to_prr(rack, 16, orig_cwnd); 4901 if (tp->t_flags2 & TF2_ECN_PERMIT) 4902 tp->t_flags2 |= TF2_ECN_SND_CWR; 4903 break; 4904 case CC_RTO_ERR: 4905 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4906 /* RTO was unnecessary, so reset everything. */ 4907 tp->snd_cwnd = tp->snd_cwnd_prev; 4908 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4909 tp->snd_recover = tp->snd_recover_prev; 4910 if (tp->t_flags & TF_WASFRECOVERY) { 4911 ENTER_FASTRECOVERY(tp->t_flags); 4912 tp->t_flags &= ~TF_WASFRECOVERY; 4913 } 4914 if (tp->t_flags & TF_WASCRECOVERY) { 4915 ENTER_CONGRECOVERY(tp->t_flags); 4916 tp->t_flags &= ~TF_WASCRECOVERY; 4917 } 4918 tp->snd_nxt = tp->snd_max; 4919 tp->t_badrxtwin = 0; 4920 break; 4921 } 4922 if ((CC_ALGO(tp)->cong_signal != NULL) && 4923 (type != CC_RTO)){ 4924 tp->ccv->curack = ack; 4925 CC_ALGO(tp)->cong_signal(tp->ccv, type); 4926 } 4927 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4928 rack_log_to_prr(rack, 15, cwnd_enter); 4929 rack->r_ctl.dsack_byte_cnt = 0; 4930 rack->r_ctl.retran_during_recovery = 0; 4931 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4932 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4933 rack->r_ent_rec_ns = 1; 4934 } 4935 } 4936 4937 static inline void 4938 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4939 { 4940 uint32_t i_cwnd; 4941 4942 INP_WLOCK_ASSERT(tp->t_inpcb); 4943 4944 #ifdef NETFLIX_STATS 4945 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4946 if (tp->t_state == TCPS_ESTABLISHED) 4947 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4948 #endif 4949 if (CC_ALGO(tp)->after_idle != NULL) 4950 CC_ALGO(tp)->after_idle(tp->ccv); 4951 4952 if (tp->snd_cwnd == 1) 4953 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4954 else 4955 i_cwnd = rc_init_window(rack); 4956 4957 /* 4958 * Being idle is no differnt than the initial window. If the cc 4959 * clamps it down below the initial window raise it to the initial 4960 * window. 4961 */ 4962 if (tp->snd_cwnd < i_cwnd) { 4963 tp->snd_cwnd = i_cwnd; 4964 } 4965 } 4966 4967 /* 4968 * Indicate whether this ack should be delayed. We can delay the ack if 4969 * following conditions are met: 4970 * - There is no delayed ack timer in progress. 4971 * - Our last ack wasn't a 0-sized window. We never want to delay 4972 * the ack that opens up a 0-sized window. 4973 * - LRO wasn't used for this segment. We make sure by checking that the 4974 * segment size is not larger than the MSS. 4975 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4976 * connection. 4977 */ 4978 #define DELAY_ACK(tp, tlen) \ 4979 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4980 ((tp->t_flags & TF_DELACK) == 0) && \ 4981 (tlen <= tp->t_maxseg) && \ 4982 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4983 4984 static struct rack_sendmap * 4985 rack_find_lowest_rsm(struct tcp_rack *rack) 4986 { 4987 struct rack_sendmap *rsm; 4988 4989 /* 4990 * Walk the time-order transmitted list looking for an rsm that is 4991 * not acked. This will be the one that was sent the longest time 4992 * ago that is still outstanding. 4993 */ 4994 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4995 if (rsm->r_flags & RACK_ACKED) { 4996 continue; 4997 } 4998 goto finish; 4999 } 5000 finish: 5001 return (rsm); 5002 } 5003 5004 static struct rack_sendmap * 5005 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 5006 { 5007 struct rack_sendmap *prsm; 5008 5009 /* 5010 * Walk the sequence order list backward until we hit and arrive at 5011 * the highest seq not acked. In theory when this is called it 5012 * should be the last segment (which it was not). 5013 */ 5014 counter_u64_add(rack_find_high, 1); 5015 prsm = rsm; 5016 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 5017 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 5018 continue; 5019 } 5020 return (prsm); 5021 } 5022 return (NULL); 5023 } 5024 5025 static uint32_t 5026 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 5027 { 5028 int32_t lro; 5029 uint32_t thresh; 5030 5031 /* 5032 * lro is the flag we use to determine if we have seen reordering. 5033 * If it gets set we have seen reordering. The reorder logic either 5034 * works in one of two ways: 5035 * 5036 * If reorder-fade is configured, then we track the last time we saw 5037 * re-ordering occur. If we reach the point where enough time as 5038 * passed we no longer consider reordering has occuring. 5039 * 5040 * Or if reorder-face is 0, then once we see reordering we consider 5041 * the connection to alway be subject to reordering and just set lro 5042 * to 1. 5043 * 5044 * In the end if lro is non-zero we add the extra time for 5045 * reordering in. 5046 */ 5047 if (srtt == 0) 5048 srtt = 1; 5049 if (rack->r_ctl.rc_reorder_ts) { 5050 if (rack->r_ctl.rc_reorder_fade) { 5051 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5052 lro = cts - rack->r_ctl.rc_reorder_ts; 5053 if (lro == 0) { 5054 /* 5055 * No time as passed since the last 5056 * reorder, mark it as reordering. 5057 */ 5058 lro = 1; 5059 } 5060 } else { 5061 /* Negative time? */ 5062 lro = 0; 5063 } 5064 if (lro > rack->r_ctl.rc_reorder_fade) { 5065 /* Turn off reordering seen too */ 5066 rack->r_ctl.rc_reorder_ts = 0; 5067 lro = 0; 5068 } 5069 } else { 5070 /* Reodering does not fade */ 5071 lro = 1; 5072 } 5073 } else { 5074 lro = 0; 5075 } 5076 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5077 if (lro) { 5078 /* It must be set, if not you get 1/4 rtt */ 5079 if (rack->r_ctl.rc_reorder_shift) 5080 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5081 else 5082 thresh += (srtt >> 2); 5083 } else { 5084 thresh += 1; 5085 } 5086 /* We don't let the rack timeout be above a RTO */ 5087 if (thresh > rack->rc_tp->t_rxtcur) { 5088 thresh = rack->rc_tp->t_rxtcur; 5089 } 5090 /* And we don't want it above the RTO max either */ 5091 if (thresh > rack_rto_max) { 5092 thresh = rack_rto_max; 5093 } 5094 return (thresh); 5095 } 5096 5097 static uint32_t 5098 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5099 struct rack_sendmap *rsm, uint32_t srtt) 5100 { 5101 struct rack_sendmap *prsm; 5102 uint32_t thresh, len; 5103 int segsiz; 5104 5105 if (srtt == 0) 5106 srtt = 1; 5107 if (rack->r_ctl.rc_tlp_threshold) 5108 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5109 else 5110 thresh = (srtt * 2); 5111 5112 /* Get the previous sent packet, if any */ 5113 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5114 counter_u64_add(rack_enter_tlp_calc, 1); 5115 len = rsm->r_end - rsm->r_start; 5116 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5117 /* Exactly like the ID */ 5118 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5119 uint32_t alt_thresh; 5120 /* 5121 * Compensate for delayed-ack with the d-ack time. 5122 */ 5123 counter_u64_add(rack_used_tlpmethod, 1); 5124 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5125 if (alt_thresh > thresh) 5126 thresh = alt_thresh; 5127 } 5128 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5129 /* 2.1 behavior */ 5130 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5131 if (prsm && (len <= segsiz)) { 5132 /* 5133 * Two packets outstanding, thresh should be (2*srtt) + 5134 * possible inter-packet delay (if any). 5135 */ 5136 uint32_t inter_gap = 0; 5137 int idx, nidx; 5138 5139 counter_u64_add(rack_used_tlpmethod, 1); 5140 idx = rsm->r_rtr_cnt - 1; 5141 nidx = prsm->r_rtr_cnt - 1; 5142 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5143 /* Yes it was sent later (or at the same time) */ 5144 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5145 } 5146 thresh += inter_gap; 5147 } else if (len <= segsiz) { 5148 /* 5149 * Possibly compensate for delayed-ack. 5150 */ 5151 uint32_t alt_thresh; 5152 5153 counter_u64_add(rack_used_tlpmethod2, 1); 5154 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5155 if (alt_thresh > thresh) 5156 thresh = alt_thresh; 5157 } 5158 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5159 /* 2.2 behavior */ 5160 if (len <= segsiz) { 5161 uint32_t alt_thresh; 5162 /* 5163 * Compensate for delayed-ack with the d-ack time. 5164 */ 5165 counter_u64_add(rack_used_tlpmethod, 1); 5166 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5167 if (alt_thresh > thresh) 5168 thresh = alt_thresh; 5169 } 5170 } 5171 /* Not above an RTO */ 5172 if (thresh > tp->t_rxtcur) { 5173 thresh = tp->t_rxtcur; 5174 } 5175 /* Not above a RTO max */ 5176 if (thresh > rack_rto_max) { 5177 thresh = rack_rto_max; 5178 } 5179 /* Apply user supplied min TLP */ 5180 if (thresh < rack_tlp_min) { 5181 thresh = rack_tlp_min; 5182 } 5183 return (thresh); 5184 } 5185 5186 static uint32_t 5187 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5188 { 5189 /* 5190 * We want the rack_rtt which is the 5191 * last rtt we measured. However if that 5192 * does not exist we fallback to the srtt (which 5193 * we probably will never do) and then as a last 5194 * resort we use RACK_INITIAL_RTO if no srtt is 5195 * yet set. 5196 */ 5197 if (rack->rc_rack_rtt) 5198 return (rack->rc_rack_rtt); 5199 else if (tp->t_srtt == 0) 5200 return (RACK_INITIAL_RTO); 5201 return (tp->t_srtt); 5202 } 5203 5204 static struct rack_sendmap * 5205 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5206 { 5207 /* 5208 * Check to see that we don't need to fall into recovery. We will 5209 * need to do so if our oldest transmit is past the time we should 5210 * have had an ack. 5211 */ 5212 struct tcp_rack *rack; 5213 struct rack_sendmap *rsm; 5214 int32_t idx; 5215 uint32_t srtt, thresh; 5216 5217 rack = (struct tcp_rack *)tp->t_fb_ptr; 5218 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5219 return (NULL); 5220 } 5221 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5222 if (rsm == NULL) 5223 return (NULL); 5224 5225 if (rsm->r_flags & RACK_ACKED) { 5226 rsm = rack_find_lowest_rsm(rack); 5227 if (rsm == NULL) 5228 return (NULL); 5229 } 5230 idx = rsm->r_rtr_cnt - 1; 5231 srtt = rack_grab_rtt(tp, rack); 5232 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5233 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5234 return (NULL); 5235 } 5236 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5237 return (NULL); 5238 } 5239 /* Ok if we reach here we are over-due and this guy can be sent */ 5240 if (IN_RECOVERY(tp->t_flags) == 0) { 5241 /* 5242 * For the one that enters us into recovery record undo 5243 * info. 5244 */ 5245 rack->r_ctl.rc_rsm_start = rsm->r_start; 5246 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 5247 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 5248 } 5249 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 5250 return (rsm); 5251 } 5252 5253 static uint32_t 5254 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5255 { 5256 int32_t t; 5257 int32_t tt; 5258 uint32_t ret_val; 5259 5260 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5261 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5262 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5263 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 5264 tp->t_rxtshift++; 5265 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5266 ret_val = (uint32_t)tt; 5267 return (ret_val); 5268 } 5269 5270 static uint32_t 5271 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5272 { 5273 /* 5274 * Start the FR timer, we do this based on getting the first one in 5275 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5276 * events we need to stop the running timer (if its running) before 5277 * starting the new one. 5278 */ 5279 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5280 uint32_t srtt_cur; 5281 int32_t idx; 5282 int32_t is_tlp_timer = 0; 5283 struct rack_sendmap *rsm; 5284 5285 if (rack->t_timers_stopped) { 5286 /* All timers have been stopped none are to run */ 5287 return (0); 5288 } 5289 if (rack->rc_in_persist) { 5290 /* We can't start any timer in persists */ 5291 return (rack_get_persists_timer_val(tp, rack)); 5292 } 5293 rack->rc_on_min_to = 0; 5294 if ((tp->t_state < TCPS_ESTABLISHED) || 5295 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5296 goto activate_rxt; 5297 } 5298 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5299 if ((rsm == NULL) || sup_rack) { 5300 /* Nothing on the send map or no rack */ 5301 activate_rxt: 5302 time_since_sent = 0; 5303 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5304 if (rsm) { 5305 /* 5306 * Should we discount the RTX timer any? 5307 * 5308 * We want to discount it the smallest amount. 5309 * If a timer (Rack/TLP or RXT) has gone off more 5310 * recently thats the discount we want to use (now - timer time). 5311 * If the retransmit of the oldest packet was more recent then 5312 * we want to use that (now - oldest-packet-last_transmit_time). 5313 * 5314 */ 5315 idx = rsm->r_rtr_cnt - 1; 5316 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5317 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5318 else 5319 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5320 if (TSTMP_GT(cts, tstmp_touse)) 5321 time_since_sent = cts - tstmp_touse; 5322 } 5323 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5324 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5325 to = tp->t_rxtcur; 5326 if (to > time_since_sent) 5327 to -= time_since_sent; 5328 else 5329 to = rack->r_ctl.rc_min_to; 5330 if (to == 0) 5331 to = 1; 5332 /* Special case for KEEPINIT */ 5333 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5334 (TP_KEEPINIT(tp) != 0) && 5335 rsm) { 5336 /* 5337 * We have to put a ceiling on the rxt timer 5338 * of the keep-init timeout. 5339 */ 5340 uint32_t max_time, red; 5341 5342 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5343 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5344 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5345 if (red < max_time) 5346 max_time -= red; 5347 else 5348 max_time = 1; 5349 } 5350 /* Reduce timeout to the keep value if needed */ 5351 if (max_time < to) 5352 to = max_time; 5353 } 5354 return (to); 5355 } 5356 return (0); 5357 } 5358 if (rsm->r_flags & RACK_ACKED) { 5359 rsm = rack_find_lowest_rsm(rack); 5360 if (rsm == NULL) { 5361 /* No lowest? */ 5362 goto activate_rxt; 5363 } 5364 } 5365 if (rack->sack_attack_disable) { 5366 /* 5367 * We don't want to do 5368 * any TLP's if you are an attacker. 5369 * Though if you are doing what 5370 * is expected you may still have 5371 * SACK-PASSED marks. 5372 */ 5373 goto activate_rxt; 5374 } 5375 /* Convert from ms to usecs */ 5376 if ((rsm->r_flags & RACK_SACK_PASSED) || (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5377 if ((tp->t_flags & TF_SENTFIN) && 5378 ((tp->snd_max - tp->snd_una) == 1) && 5379 (rsm->r_flags & RACK_HAS_FIN)) { 5380 /* 5381 * We don't start a rack timer if all we have is a 5382 * FIN outstanding. 5383 */ 5384 goto activate_rxt; 5385 } 5386 if ((rack->use_rack_rr == 0) && 5387 (IN_FASTRECOVERY(tp->t_flags)) && 5388 (rack->rack_no_prr == 0) && 5389 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5390 /* 5391 * We are not cheating, in recovery and 5392 * not enough ack's to yet get our next 5393 * retransmission out. 5394 * 5395 * Note that classified attackers do not 5396 * get to use the rack-cheat. 5397 */ 5398 goto activate_tlp; 5399 } 5400 srtt = rack_grab_rtt(tp, rack); 5401 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5402 idx = rsm->r_rtr_cnt - 1; 5403 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5404 if (SEQ_GEQ(exp, cts)) { 5405 to = exp - cts; 5406 if (to < rack->r_ctl.rc_min_to) { 5407 to = rack->r_ctl.rc_min_to; 5408 if (rack->r_rr_config == 3) 5409 rack->rc_on_min_to = 1; 5410 } 5411 } else { 5412 to = rack->r_ctl.rc_min_to; 5413 if (rack->r_rr_config == 3) 5414 rack->rc_on_min_to = 1; 5415 } 5416 } else { 5417 /* Ok we need to do a TLP not RACK */ 5418 activate_tlp: 5419 if ((rack->rc_tlp_in_progress != 0) && 5420 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5421 /* 5422 * The previous send was a TLP and we have sent 5423 * N TLP's without sending new data. 5424 */ 5425 goto activate_rxt; 5426 } 5427 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5428 if (rsm == NULL) { 5429 /* We found no rsm to TLP with. */ 5430 goto activate_rxt; 5431 } 5432 if (rsm->r_flags & RACK_HAS_FIN) { 5433 /* If its a FIN we dont do TLP */ 5434 rsm = NULL; 5435 goto activate_rxt; 5436 } 5437 idx = rsm->r_rtr_cnt - 1; 5438 time_since_sent = 0; 5439 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5440 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5441 else 5442 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5443 if (TSTMP_GT(cts, tstmp_touse)) 5444 time_since_sent = cts - tstmp_touse; 5445 is_tlp_timer = 1; 5446 if (tp->t_srtt) { 5447 if ((rack->rc_srtt_measure_made == 0) && 5448 (tp->t_srtt == 1)) { 5449 /* 5450 * If another stack as run and set srtt to 1, 5451 * then the srtt was 0, so lets use the initial. 5452 */ 5453 srtt = RACK_INITIAL_RTO; 5454 } else { 5455 srtt_cur = tp->t_srtt; 5456 srtt = srtt_cur; 5457 } 5458 } else 5459 srtt = RACK_INITIAL_RTO; 5460 /* 5461 * If the SRTT is not keeping up and the 5462 * rack RTT has spiked we want to use 5463 * the last RTT not the smoothed one. 5464 */ 5465 if (rack_tlp_use_greater && 5466 tp->t_srtt && 5467 (srtt < rack_grab_rtt(tp, rack))) { 5468 srtt = rack_grab_rtt(tp, rack); 5469 } 5470 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5471 if (thresh > time_since_sent) { 5472 to = thresh - time_since_sent; 5473 } else { 5474 to = rack->r_ctl.rc_min_to; 5475 rack_log_alt_to_to_cancel(rack, 5476 thresh, /* flex1 */ 5477 time_since_sent, /* flex2 */ 5478 tstmp_touse, /* flex3 */ 5479 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5480 (uint32_t)rsm->r_tim_lastsent[idx], 5481 srtt, 5482 idx, 99); 5483 } 5484 if (to < rack_tlp_min) { 5485 to = rack_tlp_min; 5486 } 5487 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5488 /* 5489 * If the TLP time works out to larger than the max 5490 * RTO lets not do TLP.. just RTO. 5491 */ 5492 goto activate_rxt; 5493 } 5494 } 5495 if (is_tlp_timer == 0) { 5496 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5497 } else { 5498 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5499 } 5500 if (to == 0) 5501 to = 1; 5502 return (to); 5503 } 5504 5505 static void 5506 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5507 { 5508 if (rack->rc_in_persist == 0) { 5509 if (tp->t_flags & TF_GPUTINPROG) { 5510 /* 5511 * Stop the goodput now, the calling of the 5512 * measurement function clears the flag. 5513 */ 5514 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__); 5515 } 5516 #ifdef NETFLIX_SHARED_CWND 5517 if (rack->r_ctl.rc_scw) { 5518 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5519 rack->rack_scwnd_is_idle = 1; 5520 } 5521 #endif 5522 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5523 if (rack->r_ctl.rc_went_idle_time == 0) 5524 rack->r_ctl.rc_went_idle_time = 1; 5525 rack_timer_cancel(tp, rack, cts, __LINE__); 5526 tp->t_rxtshift = 0; 5527 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5528 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5529 rack->rc_in_persist = 1; 5530 } 5531 } 5532 5533 static void 5534 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5535 { 5536 if (rack->rc_inp->inp_in_hpts) { 5537 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 5538 rack->r_ctl.rc_hpts_flags = 0; 5539 } 5540 #ifdef NETFLIX_SHARED_CWND 5541 if (rack->r_ctl.rc_scw) { 5542 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5543 rack->rack_scwnd_is_idle = 0; 5544 } 5545 #endif 5546 if (rack->rc_gp_dyn_mul && 5547 (rack->use_fixed_rate == 0) && 5548 (rack->rc_always_pace)) { 5549 /* 5550 * Do we count this as if a probe-rtt just 5551 * finished? 5552 */ 5553 uint32_t time_idle, idle_min; 5554 5555 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5556 idle_min = rack_min_probertt_hold; 5557 if (rack_probertt_gpsrtt_cnt_div) { 5558 uint64_t extra; 5559 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5560 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5561 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5562 idle_min += (uint32_t)extra; 5563 } 5564 if (time_idle >= idle_min) { 5565 /* Yes, we count it as a probe-rtt. */ 5566 uint32_t us_cts; 5567 5568 us_cts = tcp_get_usecs(NULL); 5569 if (rack->in_probe_rtt == 0) { 5570 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5571 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5572 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5573 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5574 } else { 5575 rack_exit_probertt(rack, us_cts); 5576 } 5577 } 5578 } 5579 rack->rc_in_persist = 0; 5580 rack->r_ctl.rc_went_idle_time = 0; 5581 tp->t_rxtshift = 0; 5582 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5583 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5584 rack->r_ctl.rc_agg_delayed = 0; 5585 rack->r_early = 0; 5586 rack->r_late = 0; 5587 rack->r_ctl.rc_agg_early = 0; 5588 } 5589 5590 static void 5591 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5592 struct hpts_diag *diag, struct timeval *tv) 5593 { 5594 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5595 union tcp_log_stackspecific log; 5596 5597 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5598 log.u_bbr.flex1 = diag->p_nxt_slot; 5599 log.u_bbr.flex2 = diag->p_cur_slot; 5600 log.u_bbr.flex3 = diag->slot_req; 5601 log.u_bbr.flex4 = diag->inp_hptsslot; 5602 log.u_bbr.flex5 = diag->slot_remaining; 5603 log.u_bbr.flex6 = diag->need_new_to; 5604 log.u_bbr.flex7 = diag->p_hpts_active; 5605 log.u_bbr.flex8 = diag->p_on_min_sleep; 5606 /* Hijack other fields as needed */ 5607 log.u_bbr.epoch = diag->have_slept; 5608 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5609 log.u_bbr.pkts_out = diag->co_ret; 5610 log.u_bbr.applimited = diag->hpts_sleep_time; 5611 log.u_bbr.delivered = diag->p_prev_slot; 5612 log.u_bbr.inflight = diag->p_runningtick; 5613 log.u_bbr.bw_inuse = diag->wheel_tick; 5614 log.u_bbr.rttProp = diag->wheel_cts; 5615 log.u_bbr.timeStamp = cts; 5616 log.u_bbr.delRate = diag->maxticks; 5617 log.u_bbr.cur_del_rate = diag->p_curtick; 5618 log.u_bbr.cur_del_rate <<= 32; 5619 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5620 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5621 &rack->rc_inp->inp_socket->so_rcv, 5622 &rack->rc_inp->inp_socket->so_snd, 5623 BBR_LOG_HPTSDIAG, 0, 5624 0, &log, false, tv); 5625 } 5626 5627 } 5628 5629 static void 5630 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5631 { 5632 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5633 union tcp_log_stackspecific log; 5634 struct timeval tv; 5635 5636 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5637 log.u_bbr.flex1 = sb->sb_flags; 5638 log.u_bbr.flex2 = len; 5639 log.u_bbr.flex3 = sb->sb_state; 5640 log.u_bbr.flex8 = type; 5641 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5642 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5643 &rack->rc_inp->inp_socket->so_rcv, 5644 &rack->rc_inp->inp_socket->so_snd, 5645 TCP_LOG_SB_WAKE, 0, 5646 len, &log, false, &tv); 5647 } 5648 } 5649 5650 static void 5651 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5652 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5653 { 5654 struct hpts_diag diag; 5655 struct inpcb *inp; 5656 struct timeval tv; 5657 uint32_t delayed_ack = 0; 5658 uint32_t hpts_timeout; 5659 uint32_t entry_slot = slot; 5660 uint8_t stopped; 5661 uint32_t left = 0; 5662 uint32_t us_cts; 5663 5664 inp = tp->t_inpcb; 5665 if ((tp->t_state == TCPS_CLOSED) || 5666 (tp->t_state == TCPS_LISTEN)) { 5667 return; 5668 } 5669 if (inp->inp_in_hpts) { 5670 /* Already on the pacer */ 5671 return; 5672 } 5673 stopped = rack->rc_tmr_stopped; 5674 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5675 left = rack->r_ctl.rc_timer_exp - cts; 5676 } 5677 rack->r_ctl.rc_timer_exp = 0; 5678 rack->r_ctl.rc_hpts_flags = 0; 5679 us_cts = tcp_get_usecs(&tv); 5680 /* Now early/late accounting */ 5681 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL); 5682 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5683 /* 5684 * We have a early carry over set, 5685 * we can always add more time so we 5686 * can always make this compensation. 5687 * 5688 * Note if ack's are allowed to wake us do not 5689 * penalize the next timer for being awoke 5690 * by an ack aka the rc_agg_early (non-paced mode). 5691 */ 5692 slot += rack->r_ctl.rc_agg_early; 5693 rack->r_early = 0; 5694 rack->r_ctl.rc_agg_early = 0; 5695 } 5696 if (rack->r_late) { 5697 /* 5698 * This is harder, we can 5699 * compensate some but it 5700 * really depends on what 5701 * the current pacing time is. 5702 */ 5703 if (rack->r_ctl.rc_agg_delayed >= slot) { 5704 /* 5705 * We can't compensate for it all. 5706 * And we have to have some time 5707 * on the clock. We always have a min 5708 * 10 slots (10 x 10 i.e. 100 usecs). 5709 */ 5710 if (slot <= HPTS_TICKS_PER_USEC) { 5711 /* We gain delay */ 5712 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_USEC - slot); 5713 slot = HPTS_TICKS_PER_USEC; 5714 } else { 5715 /* We take off some */ 5716 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_USEC); 5717 slot = HPTS_TICKS_PER_USEC; 5718 } 5719 } else { 5720 slot -= rack->r_ctl.rc_agg_delayed; 5721 rack->r_ctl.rc_agg_delayed = 0; 5722 /* Make sure we have 100 useconds at minimum */ 5723 if (slot < HPTS_TICKS_PER_USEC) { 5724 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_USEC - slot; 5725 slot = HPTS_TICKS_PER_USEC; 5726 } 5727 if (rack->r_ctl.rc_agg_delayed == 0) 5728 rack->r_late = 0; 5729 } 5730 } 5731 if (slot) { 5732 /* We are pacing too */ 5733 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5734 } 5735 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5736 #ifdef NETFLIX_EXP_DETECTION 5737 if (rack->sack_attack_disable && 5738 (slot < tcp_sad_pacing_interval)) { 5739 /* 5740 * We have a potential attacker on 5741 * the line. We have possibly some 5742 * (or now) pacing time set. We want to 5743 * slow down the processing of sacks by some 5744 * amount (if it is an attacker). Set the default 5745 * slot for attackers in place (unless the orginal 5746 * interval is longer). Its stored in 5747 * micro-seconds, so lets convert to msecs. 5748 */ 5749 slot = tcp_sad_pacing_interval; 5750 } 5751 #endif 5752 if (tp->t_flags & TF_DELACK) { 5753 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5754 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5755 } 5756 if (delayed_ack && ((hpts_timeout == 0) || 5757 (delayed_ack < hpts_timeout))) 5758 hpts_timeout = delayed_ack; 5759 else 5760 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5761 /* 5762 * If no timers are going to run and we will fall off the hptsi 5763 * wheel, we resort to a keep-alive timer if its configured. 5764 */ 5765 if ((hpts_timeout == 0) && 5766 (slot == 0)) { 5767 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5768 (tp->t_state <= TCPS_CLOSING)) { 5769 /* 5770 * Ok we have no timer (persists, rack, tlp, rxt or 5771 * del-ack), we don't have segments being paced. So 5772 * all that is left is the keepalive timer. 5773 */ 5774 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5775 /* Get the established keep-alive time */ 5776 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5777 } else { 5778 /* 5779 * Get the initial setup keep-alive time, 5780 * note that this is probably not going to 5781 * happen, since rack will be running a rxt timer 5782 * if a SYN of some sort is outstanding. It is 5783 * actually handled in rack_timeout_rxt(). 5784 */ 5785 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5786 } 5787 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5788 if (rack->in_probe_rtt) { 5789 /* 5790 * We want to instead not wake up a long time from 5791 * now but to wake up about the time we would 5792 * exit probe-rtt and initiate a keep-alive ack. 5793 * This will get us out of probe-rtt and update 5794 * our min-rtt. 5795 */ 5796 hpts_timeout = rack_min_probertt_hold; 5797 } 5798 } 5799 } 5800 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5801 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5802 /* 5803 * RACK, TLP, persists and RXT timers all are restartable 5804 * based on actions input .. i.e we received a packet (ack 5805 * or sack) and that changes things (rw, or snd_una etc). 5806 * Thus we can restart them with a new value. For 5807 * keep-alive, delayed_ack we keep track of what was left 5808 * and restart the timer with a smaller value. 5809 */ 5810 if (left < hpts_timeout) 5811 hpts_timeout = left; 5812 } 5813 if (hpts_timeout) { 5814 /* 5815 * Hack alert for now we can't time-out over 2,147,483 5816 * seconds (a bit more than 596 hours), which is probably ok 5817 * :). 5818 */ 5819 if (hpts_timeout > 0x7ffffffe) 5820 hpts_timeout = 0x7ffffffe; 5821 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5822 } 5823 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL); 5824 if ((rack->gp_ready == 0) && 5825 (rack->use_fixed_rate == 0) && 5826 (hpts_timeout < slot) && 5827 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5828 /* 5829 * We have no good estimate yet for the 5830 * old clunky burst mitigation or the 5831 * real pacing. And the tlp or rxt is smaller 5832 * than the pacing calculation. Lets not 5833 * pace that long since we know the calculation 5834 * so far is not accurate. 5835 */ 5836 slot = hpts_timeout; 5837 } 5838 rack->r_ctl.last_pacing_time = slot; 5839 /** 5840 * Turn off all the flags for queuing by default. The 5841 * flags have important meanings to what happens when 5842 * LRO interacts with the transport. Most likely (by default now) 5843 * mbuf_queueing and ack compression are on. So the transport 5844 * has a couple of flags that control what happens (if those 5845 * are not on then these flags won't have any effect since it 5846 * won't go through the queuing LRO path). 5847 * 5848 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5849 * pacing output, so don't disturb. But 5850 * it also means LRO can wake me if there 5851 * is a SACK arrival. 5852 * 5853 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5854 * with the above flag (QUEUE_READY) and 5855 * when present it says don't even wake me 5856 * if a SACK arrives. 5857 * 5858 * The idea behind these flags is that if we are pacing we 5859 * set the MBUF_QUEUE_READY and only get woken up if 5860 * a SACK arrives (which could change things) or if 5861 * our pacing timer expires. If, however, we have a rack 5862 * timer running, then we don't even want a sack to wake 5863 * us since the rack timer has to expire before we can send. 5864 * 5865 * Other cases should usually have none of the flags set 5866 * so LRO can call into us. 5867 */ 5868 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5869 if (slot) { 5870 rack->r_ctl.rc_last_output_to = us_cts + slot; 5871 /* 5872 * A pacing timer (slot) is being set, in 5873 * such a case we cannot send (we are blocked by 5874 * the timer). So lets tell LRO that it should not 5875 * wake us unless there is a SACK. Note this only 5876 * will be effective if mbuf queueing is on or 5877 * compressed acks are being processed. 5878 */ 5879 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5880 /* 5881 * But wait if we have a Rack timer running 5882 * even a SACK should not disturb us (with 5883 * the exception of r_rr_config 3). 5884 */ 5885 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5886 (rack->r_rr_config != 3)) 5887 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5888 if (rack->rc_ack_can_sendout_data) { 5889 /* 5890 * Ahh but wait, this is that special case 5891 * where the pacing timer can be disturbed 5892 * backout the changes (used for non-paced 5893 * burst limiting). 5894 */ 5895 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5896 } 5897 if ((rack->use_rack_rr) && 5898 (rack->r_rr_config < 2) && 5899 ((hpts_timeout) && (hpts_timeout < slot))) { 5900 /* 5901 * Arrange for the hpts to kick back in after the 5902 * t-o if the t-o does not cause a send. 5903 */ 5904 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5905 __LINE__, &diag); 5906 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5907 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5908 } else { 5909 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 5910 __LINE__, &diag); 5911 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5912 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5913 } 5914 } else if (hpts_timeout) { 5915 /* 5916 * With respect to inp_flags2 here, lets let any new acks wake 5917 * us up here. Since we are not pacing (no pacing timer), output 5918 * can happen so we should let it. If its a Rack timer, then any inbound 5919 * packet probably won't change the sending (we will be blocked) 5920 * but it may change the prr stats so letting it in (the set defaults 5921 * at the start of this block) are good enough. 5922 */ 5923 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5924 __LINE__, &diag); 5925 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5926 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5927 } else { 5928 /* No timer starting */ 5929 #ifdef INVARIANTS 5930 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5931 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5932 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5933 } 5934 #endif 5935 } 5936 rack->rc_tmr_stopped = 0; 5937 if (slot) 5938 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5939 } 5940 5941 /* 5942 * RACK Timer, here we simply do logging and house keeping. 5943 * the normal rack_output() function will call the 5944 * appropriate thing to check if we need to do a RACK retransmit. 5945 * We return 1, saying don't proceed with rack_output only 5946 * when all timers have been stopped (destroyed PCB?). 5947 */ 5948 static int 5949 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5950 { 5951 /* 5952 * This timer simply provides an internal trigger to send out data. 5953 * The check_recovery_mode call will see if there are needed 5954 * retransmissions, if so we will enter fast-recovery. The output 5955 * call may or may not do the same thing depending on sysctl 5956 * settings. 5957 */ 5958 struct rack_sendmap *rsm; 5959 5960 if (tp->t_timers->tt_flags & TT_STOPPED) { 5961 return (1); 5962 } 5963 counter_u64_add(rack_to_tot, 1); 5964 if (rack->r_state && (rack->r_state != tp->t_state)) 5965 rack_set_state(tp, rack); 5966 rack->rc_on_min_to = 0; 5967 rsm = rack_check_recovery_mode(tp, cts); 5968 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5969 if (rsm) { 5970 rack->r_ctl.rc_resend = rsm; 5971 rack->r_timer_override = 1; 5972 if (rack->use_rack_rr) { 5973 /* 5974 * Don't accumulate extra pacing delay 5975 * we are allowing the rack timer to 5976 * over-ride pacing i.e. rrr takes precedence 5977 * if the pacing interval is longer than the rrr 5978 * time (in other words we get the min pacing 5979 * time versus rrr pacing time). 5980 */ 5981 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5982 } 5983 } 5984 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5985 if (rsm == NULL) { 5986 /* restart a timer and return 1 */ 5987 rack_start_hpts_timer(rack, tp, cts, 5988 0, 0, 0); 5989 return (1); 5990 } 5991 return (0); 5992 } 5993 5994 static void 5995 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5996 { 5997 if (rsm->m->m_len > rsm->orig_m_len) { 5998 /* 5999 * Mbuf grew, caused by sbcompress, our offset does 6000 * not change. 6001 */ 6002 rsm->orig_m_len = rsm->m->m_len; 6003 } else if (rsm->m->m_len < rsm->orig_m_len) { 6004 /* 6005 * Mbuf shrank, trimmed off the top by an ack, our 6006 * offset changes. 6007 */ 6008 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 6009 rsm->orig_m_len = rsm->m->m_len; 6010 } 6011 } 6012 6013 static void 6014 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 6015 { 6016 struct mbuf *m; 6017 uint32_t soff; 6018 6019 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 6020 /* Fix up the orig_m_len and possibly the mbuf offset */ 6021 rack_adjust_orig_mlen(src_rsm); 6022 } 6023 m = src_rsm->m; 6024 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 6025 while (soff >= m->m_len) { 6026 /* Move out past this mbuf */ 6027 soff -= m->m_len; 6028 m = m->m_next; 6029 KASSERT((m != NULL), 6030 ("rsm:%p nrsm:%p hit at soff:%u null m", 6031 src_rsm, rsm, soff)); 6032 } 6033 rsm->m = m; 6034 rsm->soff = soff; 6035 rsm->orig_m_len = m->m_len; 6036 } 6037 6038 static __inline void 6039 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 6040 struct rack_sendmap *rsm, uint32_t start) 6041 { 6042 int idx; 6043 6044 nrsm->r_start = start; 6045 nrsm->r_end = rsm->r_end; 6046 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6047 nrsm->r_flags = rsm->r_flags; 6048 nrsm->r_dupack = rsm->r_dupack; 6049 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6050 nrsm->r_rtr_bytes = 0; 6051 rsm->r_end = nrsm->r_start; 6052 nrsm->r_just_ret = rsm->r_just_ret; 6053 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6054 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6055 } 6056 /* Now if we have SYN flag we keep it on the left edge */ 6057 if (nrsm->r_flags & RACK_HAS_SYN) 6058 nrsm->r_flags &= ~RACK_HAS_SYN; 6059 /* Now if we have a FIN flag we keep it on the right edge */ 6060 if (rsm->r_flags & RACK_HAS_FIN) 6061 rsm->r_flags &= ~RACK_HAS_FIN; 6062 /* Push bit must go to the right edge as well */ 6063 if (rsm->r_flags & RACK_HAD_PUSH) 6064 rsm->r_flags &= ~RACK_HAD_PUSH; 6065 /* Clone over the state of the hw_tls flag */ 6066 nrsm->r_hw_tls = rsm->r_hw_tls; 6067 /* 6068 * Now we need to find nrsm's new location in the mbuf chain 6069 * we basically calculate a new offset, which is soff + 6070 * how much is left in original rsm. Then we walk out the mbuf 6071 * chain to find the righ postion, it may be the same mbuf 6072 * or maybe not. 6073 */ 6074 KASSERT(((rsm->m != NULL) || 6075 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6076 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6077 if (rsm->m) 6078 rack_setup_offset_for_rsm(rsm, nrsm); 6079 } 6080 6081 static struct rack_sendmap * 6082 rack_merge_rsm(struct tcp_rack *rack, 6083 struct rack_sendmap *l_rsm, 6084 struct rack_sendmap *r_rsm) 6085 { 6086 /* 6087 * We are merging two ack'd RSM's, 6088 * the l_rsm is on the left (lower seq 6089 * values) and the r_rsm is on the right 6090 * (higher seq value). The simplest way 6091 * to merge these is to move the right 6092 * one into the left. I don't think there 6093 * is any reason we need to try to find 6094 * the oldest (or last oldest retransmitted). 6095 */ 6096 struct rack_sendmap *rm; 6097 6098 rack_log_map_chg(rack->rc_tp, rack, NULL, 6099 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6100 l_rsm->r_end = r_rsm->r_end; 6101 if (l_rsm->r_dupack < r_rsm->r_dupack) 6102 l_rsm->r_dupack = r_rsm->r_dupack; 6103 if (r_rsm->r_rtr_bytes) 6104 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6105 if (r_rsm->r_in_tmap) { 6106 /* This really should not happen */ 6107 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6108 r_rsm->r_in_tmap = 0; 6109 } 6110 6111 /* Now the flags */ 6112 if (r_rsm->r_flags & RACK_HAS_FIN) 6113 l_rsm->r_flags |= RACK_HAS_FIN; 6114 if (r_rsm->r_flags & RACK_TLP) 6115 l_rsm->r_flags |= RACK_TLP; 6116 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6117 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6118 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6119 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6120 /* 6121 * If both are app-limited then let the 6122 * free lower the count. If right is app 6123 * limited and left is not, transfer. 6124 */ 6125 l_rsm->r_flags |= RACK_APP_LIMITED; 6126 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6127 if (r_rsm == rack->r_ctl.rc_first_appl) 6128 rack->r_ctl.rc_first_appl = l_rsm; 6129 } 6130 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6131 #ifdef INVARIANTS 6132 if (rm != r_rsm) { 6133 panic("removing head in rack:%p rsm:%p rm:%p", 6134 rack, r_rsm, rm); 6135 } 6136 #endif 6137 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6138 /* Transfer the split limit to the map we free */ 6139 r_rsm->r_limit_type = l_rsm->r_limit_type; 6140 l_rsm->r_limit_type = 0; 6141 } 6142 rack_free(rack, r_rsm); 6143 return (l_rsm); 6144 } 6145 6146 /* 6147 * TLP Timer, here we simply setup what segment we want to 6148 * have the TLP expire on, the normal rack_output() will then 6149 * send it out. 6150 * 6151 * We return 1, saying don't proceed with rack_output only 6152 * when all timers have been stopped (destroyed PCB?). 6153 */ 6154 static int 6155 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6156 { 6157 /* 6158 * Tail Loss Probe. 6159 */ 6160 struct rack_sendmap *rsm = NULL; 6161 struct rack_sendmap *insret; 6162 struct socket *so; 6163 uint32_t amm; 6164 uint32_t out, avail; 6165 int collapsed_win = 0; 6166 6167 if (tp->t_timers->tt_flags & TT_STOPPED) { 6168 return (1); 6169 } 6170 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6171 /* Its not time yet */ 6172 return (0); 6173 } 6174 if (ctf_progress_timeout_check(tp, true)) { 6175 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6176 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 6177 return (1); 6178 } 6179 /* 6180 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6181 * need to figure out how to force a full MSS segment out. 6182 */ 6183 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6184 rack->r_ctl.retran_during_recovery = 0; 6185 rack->r_ctl.dsack_byte_cnt = 0; 6186 counter_u64_add(rack_tlp_tot, 1); 6187 if (rack->r_state && (rack->r_state != tp->t_state)) 6188 rack_set_state(tp, rack); 6189 so = tp->t_inpcb->inp_socket; 6190 avail = sbavail(&so->so_snd); 6191 out = tp->snd_max - tp->snd_una; 6192 if (out > tp->snd_wnd) { 6193 /* special case, we need a retransmission */ 6194 collapsed_win = 1; 6195 goto need_retran; 6196 } 6197 /* 6198 * Check our send oldest always settings, and if 6199 * there is an oldest to send jump to the need_retran. 6200 */ 6201 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6202 goto need_retran; 6203 6204 if (avail > out) { 6205 /* New data is available */ 6206 amm = avail - out; 6207 if (amm > ctf_fixed_maxseg(tp)) { 6208 amm = ctf_fixed_maxseg(tp); 6209 if ((amm + out) > tp->snd_wnd) { 6210 /* We are rwnd limited */ 6211 goto need_retran; 6212 } 6213 } else if (amm < ctf_fixed_maxseg(tp)) { 6214 /* not enough to fill a MTU */ 6215 goto need_retran; 6216 } 6217 if (IN_FASTRECOVERY(tp->t_flags)) { 6218 /* Unlikely */ 6219 if (rack->rack_no_prr == 0) { 6220 if (out + amm <= tp->snd_wnd) { 6221 rack->r_ctl.rc_prr_sndcnt = amm; 6222 rack_log_to_prr(rack, 4, 0); 6223 } 6224 } else 6225 goto need_retran; 6226 } else { 6227 /* Set the send-new override */ 6228 if (out + amm <= tp->snd_wnd) 6229 rack->r_ctl.rc_tlp_new_data = amm; 6230 else 6231 goto need_retran; 6232 } 6233 rack->r_ctl.rc_tlpsend = NULL; 6234 counter_u64_add(rack_tlp_newdata, 1); 6235 goto send; 6236 } 6237 need_retran: 6238 /* 6239 * Ok we need to arrange the last un-acked segment to be re-sent, or 6240 * optionally the first un-acked segment. 6241 */ 6242 if (collapsed_win == 0) { 6243 if (rack_always_send_oldest) 6244 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6245 else { 6246 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6247 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6248 rsm = rack_find_high_nonack(rack, rsm); 6249 } 6250 } 6251 if (rsm == NULL) { 6252 counter_u64_add(rack_tlp_does_nada, 1); 6253 #ifdef TCP_BLACKBOX 6254 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6255 #endif 6256 goto out; 6257 } 6258 } else { 6259 /* 6260 * We must find the last segment 6261 * that was acceptable by the client. 6262 */ 6263 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6264 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6265 /* Found one */ 6266 break; 6267 } 6268 } 6269 if (rsm == NULL) { 6270 /* None? if so send the first */ 6271 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6272 if (rsm == NULL) { 6273 counter_u64_add(rack_tlp_does_nada, 1); 6274 #ifdef TCP_BLACKBOX 6275 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6276 #endif 6277 goto out; 6278 } 6279 } 6280 } 6281 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6282 /* 6283 * We need to split this the last segment in two. 6284 */ 6285 struct rack_sendmap *nrsm; 6286 6287 nrsm = rack_alloc_full_limit(rack); 6288 if (nrsm == NULL) { 6289 /* 6290 * No memory to split, we will just exit and punt 6291 * off to the RXT timer. 6292 */ 6293 counter_u64_add(rack_tlp_does_nada, 1); 6294 goto out; 6295 } 6296 rack_clone_rsm(rack, nrsm, rsm, 6297 (rsm->r_end - ctf_fixed_maxseg(tp))); 6298 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6299 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6300 #ifdef INVARIANTS 6301 if (insret != NULL) { 6302 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6303 nrsm, insret, rack, rsm); 6304 } 6305 #endif 6306 if (rsm->r_in_tmap) { 6307 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6308 nrsm->r_in_tmap = 1; 6309 } 6310 rsm->r_flags &= (~RACK_HAS_FIN); 6311 rsm = nrsm; 6312 } 6313 rack->r_ctl.rc_tlpsend = rsm; 6314 send: 6315 rack->r_timer_override = 1; 6316 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6317 return (0); 6318 out: 6319 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6320 return (0); 6321 } 6322 6323 /* 6324 * Delayed ack Timer, here we simply need to setup the 6325 * ACK_NOW flag and remove the DELACK flag. From there 6326 * the output routine will send the ack out. 6327 * 6328 * We only return 1, saying don't proceed, if all timers 6329 * are stopped (destroyed PCB?). 6330 */ 6331 static int 6332 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6333 { 6334 if (tp->t_timers->tt_flags & TT_STOPPED) { 6335 return (1); 6336 } 6337 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6338 tp->t_flags &= ~TF_DELACK; 6339 tp->t_flags |= TF_ACKNOW; 6340 KMOD_TCPSTAT_INC(tcps_delack); 6341 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6342 return (0); 6343 } 6344 6345 /* 6346 * Persists timer, here we simply send the 6347 * same thing as a keepalive will. 6348 * the one byte send. 6349 * 6350 * We only return 1, saying don't proceed, if all timers 6351 * are stopped (destroyed PCB?). 6352 */ 6353 static int 6354 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6355 { 6356 struct tcptemp *t_template; 6357 struct inpcb *inp; 6358 int32_t retval = 1; 6359 6360 inp = tp->t_inpcb; 6361 6362 if (tp->t_timers->tt_flags & TT_STOPPED) { 6363 return (1); 6364 } 6365 if (rack->rc_in_persist == 0) 6366 return (0); 6367 if (ctf_progress_timeout_check(tp, false)) { 6368 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6369 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6370 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6371 return (1); 6372 } 6373 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6374 /* 6375 * Persistence timer into zero window. Force a byte to be output, if 6376 * possible. 6377 */ 6378 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6379 /* 6380 * Hack: if the peer is dead/unreachable, we do not time out if the 6381 * window is closed. After a full backoff, drop the connection if 6382 * the idle time (no responses to probes) reaches the maximum 6383 * backoff that we would use if retransmitting. 6384 */ 6385 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6386 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6387 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6388 KMOD_TCPSTAT_INC(tcps_persistdrop); 6389 retval = 1; 6390 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6391 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6392 goto out; 6393 } 6394 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6395 tp->snd_una == tp->snd_max) 6396 rack_exit_persist(tp, rack, cts); 6397 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6398 /* 6399 * If the user has closed the socket then drop a persisting 6400 * connection after a much reduced timeout. 6401 */ 6402 if (tp->t_state > TCPS_CLOSE_WAIT && 6403 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6404 retval = 1; 6405 KMOD_TCPSTAT_INC(tcps_persistdrop); 6406 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6407 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6408 goto out; 6409 } 6410 t_template = tcpip_maketemplate(rack->rc_inp); 6411 if (t_template) { 6412 /* only set it if we were answered */ 6413 if (rack->forced_ack == 0) { 6414 rack->forced_ack = 1; 6415 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6416 } 6417 tcp_respond(tp, t_template->tt_ipgen, 6418 &t_template->tt_t, (struct mbuf *)NULL, 6419 tp->rcv_nxt, tp->snd_una - 1, 0); 6420 /* This sends an ack */ 6421 if (tp->t_flags & TF_DELACK) 6422 tp->t_flags &= ~TF_DELACK; 6423 free(t_template, M_TEMP); 6424 } 6425 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6426 tp->t_rxtshift++; 6427 out: 6428 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6429 rack_start_hpts_timer(rack, tp, cts, 6430 0, 0, 0); 6431 return (retval); 6432 } 6433 6434 /* 6435 * If a keepalive goes off, we had no other timers 6436 * happening. We always return 1 here since this 6437 * routine either drops the connection or sends 6438 * out a segment with respond. 6439 */ 6440 static int 6441 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6442 { 6443 struct tcptemp *t_template; 6444 struct inpcb *inp; 6445 6446 if (tp->t_timers->tt_flags & TT_STOPPED) { 6447 return (1); 6448 } 6449 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6450 inp = tp->t_inpcb; 6451 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6452 /* 6453 * Keep-alive timer went off; send something or drop connection if 6454 * idle for too long. 6455 */ 6456 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6457 if (tp->t_state < TCPS_ESTABLISHED) 6458 goto dropit; 6459 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6460 tp->t_state <= TCPS_CLOSING) { 6461 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6462 goto dropit; 6463 /* 6464 * Send a packet designed to force a response if the peer is 6465 * up and reachable: either an ACK if the connection is 6466 * still alive, or an RST if the peer has closed the 6467 * connection due to timeout or reboot. Using sequence 6468 * number tp->snd_una-1 causes the transmitted zero-length 6469 * segment to lie outside the receive window; by the 6470 * protocol spec, this requires the correspondent TCP to 6471 * respond. 6472 */ 6473 KMOD_TCPSTAT_INC(tcps_keepprobe); 6474 t_template = tcpip_maketemplate(inp); 6475 if (t_template) { 6476 if (rack->forced_ack == 0) { 6477 rack->forced_ack = 1; 6478 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6479 } 6480 tcp_respond(tp, t_template->tt_ipgen, 6481 &t_template->tt_t, (struct mbuf *)NULL, 6482 tp->rcv_nxt, tp->snd_una - 1, 0); 6483 free(t_template, M_TEMP); 6484 } 6485 } 6486 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6487 return (1); 6488 dropit: 6489 KMOD_TCPSTAT_INC(tcps_keepdrops); 6490 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6491 tcp_set_inp_to_drop(rack->rc_inp, ETIMEDOUT); 6492 return (1); 6493 } 6494 6495 /* 6496 * Retransmit helper function, clear up all the ack 6497 * flags and take care of important book keeping. 6498 */ 6499 static void 6500 rack_remxt_tmr(struct tcpcb *tp) 6501 { 6502 /* 6503 * The retransmit timer went off, all sack'd blocks must be 6504 * un-acked. 6505 */ 6506 struct rack_sendmap *rsm, *trsm = NULL; 6507 struct tcp_rack *rack; 6508 6509 rack = (struct tcp_rack *)tp->t_fb_ptr; 6510 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6511 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6512 if (rack->r_state && (rack->r_state != tp->t_state)) 6513 rack_set_state(tp, rack); 6514 /* 6515 * Ideally we would like to be able to 6516 * mark SACK-PASS on anything not acked here. 6517 * 6518 * However, if we do that we would burst out 6519 * all that data 1ms apart. This would be unwise, 6520 * so for now we will just let the normal rxt timer 6521 * and tlp timer take care of it. 6522 * 6523 * Also we really need to stick them back in sequence 6524 * order. This way we send in the proper order and any 6525 * sacks that come floating in will "re-ack" the data. 6526 * To do this we zap the tmap with an INIT and then 6527 * walk through and place every rsm in the RB tree 6528 * back in its seq ordered place. 6529 */ 6530 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6531 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6532 rsm->r_dupack = 0; 6533 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6534 /* We must re-add it back to the tlist */ 6535 if (trsm == NULL) { 6536 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6537 } else { 6538 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6539 } 6540 rsm->r_in_tmap = 1; 6541 trsm = rsm; 6542 if (rsm->r_flags & RACK_ACKED) 6543 rsm->r_flags |= RACK_WAS_ACKED; 6544 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS); 6545 } 6546 /* Clear the count (we just un-acked them) */ 6547 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6548 rack->r_ctl.rc_sacked = 0; 6549 rack->r_ctl.rc_sacklast = NULL; 6550 rack->r_ctl.rc_agg_delayed = 0; 6551 rack->r_early = 0; 6552 rack->r_ctl.rc_agg_early = 0; 6553 rack->r_late = 0; 6554 /* Clear the tlp rtx mark */ 6555 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6556 if (rack->r_ctl.rc_resend != NULL) 6557 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6558 rack->r_ctl.rc_prr_sndcnt = 0; 6559 rack_log_to_prr(rack, 6, 0); 6560 rack->r_timer_override = 1; 6561 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6562 #ifdef NETFLIX_EXP_DETECTION 6563 || (rack->sack_attack_disable != 0) 6564 #endif 6565 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6566 /* 6567 * For non-sack customers new data 6568 * needs to go out as retransmits until 6569 * we retransmit up to snd_max. 6570 */ 6571 rack->r_must_retran = 1; 6572 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6573 rack->r_ctl.rc_sacked); 6574 } 6575 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6576 } 6577 6578 static void 6579 rack_convert_rtts(struct tcpcb *tp) 6580 { 6581 if (tp->t_srtt > 1) { 6582 uint32_t val, frac; 6583 6584 val = tp->t_srtt >> TCP_RTT_SHIFT; 6585 frac = tp->t_srtt & 0x1f; 6586 tp->t_srtt = TICKS_2_USEC(val); 6587 /* 6588 * frac is the fractional part of the srtt (if any) 6589 * but its in ticks and every bit represents 6590 * 1/32nd of a hz. 6591 */ 6592 if (frac) { 6593 if (hz == 1000) { 6594 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6595 } else { 6596 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6597 } 6598 tp->t_srtt += frac; 6599 } 6600 } 6601 if (tp->t_rttvar) { 6602 uint32_t val, frac; 6603 6604 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6605 frac = tp->t_rttvar & 0x1f; 6606 tp->t_rttvar = TICKS_2_USEC(val); 6607 /* 6608 * frac is the fractional part of the srtt (if any) 6609 * but its in ticks and every bit represents 6610 * 1/32nd of a hz. 6611 */ 6612 if (frac) { 6613 if (hz == 1000) { 6614 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6615 } else { 6616 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6617 } 6618 tp->t_rttvar += frac; 6619 } 6620 } 6621 tp->t_rxtcur = RACK_REXMTVAL(tp); 6622 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6623 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6624 } 6625 if (tp->t_rxtcur > rack_rto_max) { 6626 tp->t_rxtcur = rack_rto_max; 6627 } 6628 } 6629 6630 static void 6631 rack_cc_conn_init(struct tcpcb *tp) 6632 { 6633 struct tcp_rack *rack; 6634 uint32_t srtt; 6635 6636 rack = (struct tcp_rack *)tp->t_fb_ptr; 6637 srtt = tp->t_srtt; 6638 cc_conn_init(tp); 6639 /* 6640 * Now convert to rack's internal format, 6641 * if required. 6642 */ 6643 if ((srtt == 0) && (tp->t_srtt != 0)) 6644 rack_convert_rtts(tp); 6645 /* 6646 * We want a chance to stay in slowstart as 6647 * we create a connection. TCP spec says that 6648 * initially ssthresh is infinite. For our 6649 * purposes that is the snd_wnd. 6650 */ 6651 if (tp->snd_ssthresh < tp->snd_wnd) { 6652 tp->snd_ssthresh = tp->snd_wnd; 6653 } 6654 /* 6655 * We also want to assure a IW worth of 6656 * data can get inflight. 6657 */ 6658 if (rc_init_window(rack) < tp->snd_cwnd) 6659 tp->snd_cwnd = rc_init_window(rack); 6660 } 6661 6662 /* 6663 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6664 * we will setup to retransmit the lowest seq number outstanding. 6665 */ 6666 static int 6667 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6668 { 6669 int32_t rexmt; 6670 struct inpcb *inp; 6671 int32_t retval = 0; 6672 bool isipv6; 6673 6674 inp = tp->t_inpcb; 6675 if (tp->t_timers->tt_flags & TT_STOPPED) { 6676 return (1); 6677 } 6678 if (ctf_progress_timeout_check(tp, false)) { 6679 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6680 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6681 tcp_set_inp_to_drop(inp, ETIMEDOUT); 6682 return (1); 6683 } 6684 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6685 rack->r_ctl.retran_during_recovery = 0; 6686 rack->r_ctl.dsack_byte_cnt = 0; 6687 if (IN_FASTRECOVERY(tp->t_flags)) 6688 tp->t_flags |= TF_WASFRECOVERY; 6689 else 6690 tp->t_flags &= ~TF_WASFRECOVERY; 6691 if (IN_CONGRECOVERY(tp->t_flags)) 6692 tp->t_flags |= TF_WASCRECOVERY; 6693 else 6694 tp->t_flags &= ~TF_WASCRECOVERY; 6695 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6696 (tp->snd_una == tp->snd_max)) { 6697 /* Nothing outstanding .. nothing to do */ 6698 return (0); 6699 } 6700 /* 6701 * Rack can only run one timer at a time, so we cannot 6702 * run a KEEPINIT (gating SYN sending) and a retransmit 6703 * timer for the SYN. So if we are in a front state and 6704 * have a KEEPINIT timer we need to check the first transmit 6705 * against now to see if we have exceeded the KEEPINIT time 6706 * (if one is set). 6707 */ 6708 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6709 (TP_KEEPINIT(tp) != 0)) { 6710 struct rack_sendmap *rsm; 6711 6712 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6713 if (rsm) { 6714 /* Ok we have something outstanding to test keepinit with */ 6715 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6716 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6717 /* We have exceeded the KEEPINIT time */ 6718 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6719 goto drop_it; 6720 } 6721 } 6722 } 6723 /* 6724 * Retransmission timer went off. Message has not been acked within 6725 * retransmit interval. Back off to a longer retransmit interval 6726 * and retransmit one segment. 6727 */ 6728 rack_remxt_tmr(tp); 6729 if ((rack->r_ctl.rc_resend == NULL) || 6730 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6731 /* 6732 * If the rwnd collapsed on 6733 * the one we are retransmitting 6734 * it does not count against the 6735 * rxt count. 6736 */ 6737 tp->t_rxtshift++; 6738 } 6739 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6740 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6741 drop_it: 6742 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6743 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6744 retval = 1; 6745 tcp_set_inp_to_drop(rack->rc_inp, 6746 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 6747 goto out; 6748 } 6749 if (tp->t_state == TCPS_SYN_SENT) { 6750 /* 6751 * If the SYN was retransmitted, indicate CWND to be limited 6752 * to 1 segment in cc_conn_init(). 6753 */ 6754 tp->snd_cwnd = 1; 6755 } else if (tp->t_rxtshift == 1) { 6756 /* 6757 * first retransmit; record ssthresh and cwnd so they can be 6758 * recovered if this turns out to be a "bad" retransmit. A 6759 * retransmit is considered "bad" if an ACK for this segment 6760 * is received within RTT/2 interval; the assumption here is 6761 * that the ACK was already in flight. See "On Estimating 6762 * End-to-End Network Path Properties" by Allman and Paxson 6763 * for more details. 6764 */ 6765 tp->snd_cwnd_prev = tp->snd_cwnd; 6766 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6767 tp->snd_recover_prev = tp->snd_recover; 6768 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6769 tp->t_flags |= TF_PREVVALID; 6770 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6771 tp->t_flags &= ~TF_PREVVALID; 6772 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6773 if ((tp->t_state == TCPS_SYN_SENT) || 6774 (tp->t_state == TCPS_SYN_RECEIVED)) 6775 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6776 else 6777 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6778 6779 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6780 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6781 /* 6782 * We enter the path for PLMTUD if connection is established or, if 6783 * connection is FIN_WAIT_1 status, reason for the last is that if 6784 * amount of data we send is very small, we could send it in couple 6785 * of packets and process straight to FIN. In that case we won't 6786 * catch ESTABLISHED state. 6787 */ 6788 #ifdef INET6 6789 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6790 #else 6791 isipv6 = false; 6792 #endif 6793 if (((V_tcp_pmtud_blackhole_detect == 1) || 6794 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6795 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6796 ((tp->t_state == TCPS_ESTABLISHED) || 6797 (tp->t_state == TCPS_FIN_WAIT_1))) { 6798 /* 6799 * Idea here is that at each stage of mtu probe (usually, 6800 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6801 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6802 * should take care of that. 6803 */ 6804 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6805 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6806 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6807 tp->t_rxtshift % 2 == 0)) { 6808 /* 6809 * Enter Path MTU Black-hole Detection mechanism: - 6810 * Disable Path MTU Discovery (IP "DF" bit). - 6811 * Reduce MTU to lower value than what we negotiated 6812 * with peer. 6813 */ 6814 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6815 /* Record that we may have found a black hole. */ 6816 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6817 /* Keep track of previous MSS. */ 6818 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6819 } 6820 6821 /* 6822 * Reduce the MSS to blackhole value or to the 6823 * default in an attempt to retransmit. 6824 */ 6825 #ifdef INET6 6826 if (isipv6 && 6827 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6828 /* Use the sysctl tuneable blackhole MSS. */ 6829 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6830 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6831 } else if (isipv6) { 6832 /* Use the default MSS. */ 6833 tp->t_maxseg = V_tcp_v6mssdflt; 6834 /* 6835 * Disable Path MTU Discovery when we switch 6836 * to minmss. 6837 */ 6838 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6839 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6840 } 6841 #endif 6842 #if defined(INET6) && defined(INET) 6843 else 6844 #endif 6845 #ifdef INET 6846 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6847 /* Use the sysctl tuneable blackhole MSS. */ 6848 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6849 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6850 } else { 6851 /* Use the default MSS. */ 6852 tp->t_maxseg = V_tcp_mssdflt; 6853 /* 6854 * Disable Path MTU Discovery when we switch 6855 * to minmss. 6856 */ 6857 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6858 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6859 } 6860 #endif 6861 } else { 6862 /* 6863 * If further retransmissions are still unsuccessful 6864 * with a lowered MTU, maybe this isn't a blackhole 6865 * and we restore the previous MSS and blackhole 6866 * detection flags. The limit '6' is determined by 6867 * giving each probe stage (1448, 1188, 524) 2 6868 * chances to recover. 6869 */ 6870 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6871 (tp->t_rxtshift >= 6)) { 6872 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6873 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6874 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6875 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6876 } 6877 } 6878 } 6879 /* 6880 * Disable RFC1323 and SACK if we haven't got any response to 6881 * our third SYN to work-around some broken terminal servers 6882 * (most of which have hopefully been retired) that have bad VJ 6883 * header compression code which trashes TCP segments containing 6884 * unknown-to-them TCP options. 6885 */ 6886 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6887 (tp->t_rxtshift == 3)) 6888 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6889 /* 6890 * If we backed off this far, our srtt estimate is probably bogus. 6891 * Clobber it so we'll take the next rtt measurement as our srtt; 6892 * move the current srtt into rttvar to keep the current retransmit 6893 * times until then. 6894 */ 6895 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6896 #ifdef INET6 6897 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 6898 in6_losing(tp->t_inpcb); 6899 else 6900 #endif 6901 in_losing(tp->t_inpcb); 6902 tp->t_rttvar += tp->t_srtt; 6903 tp->t_srtt = 0; 6904 } 6905 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6906 tp->snd_recover = tp->snd_max; 6907 tp->t_flags |= TF_ACKNOW; 6908 tp->t_rtttime = 0; 6909 rack_cong_signal(tp, CC_RTO, tp->snd_una); 6910 out: 6911 return (retval); 6912 } 6913 6914 static int 6915 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling) 6916 { 6917 int32_t ret = 0; 6918 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6919 6920 if (timers == 0) { 6921 return (0); 6922 } 6923 if (tp->t_state == TCPS_LISTEN) { 6924 /* no timers on listen sockets */ 6925 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6926 return (0); 6927 return (1); 6928 } 6929 if ((timers & PACE_TMR_RACK) && 6930 rack->rc_on_min_to) { 6931 /* 6932 * For the rack timer when we 6933 * are on a min-timeout (which means rrr_conf = 3) 6934 * we don't want to check the timer. It may 6935 * be going off for a pace and thats ok we 6936 * want to send the retransmit (if its ready). 6937 * 6938 * If its on a normal rack timer (non-min) then 6939 * we will check if its expired. 6940 */ 6941 goto skip_time_check; 6942 } 6943 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6944 uint32_t left; 6945 6946 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6947 ret = -1; 6948 rack_log_to_processing(rack, cts, ret, 0); 6949 return (0); 6950 } 6951 if (hpts_calling == 0) { 6952 /* 6953 * A user send or queued mbuf (sack) has called us? We 6954 * return 0 and let the pacing guards 6955 * deal with it if they should or 6956 * should not cause a send. 6957 */ 6958 ret = -2; 6959 rack_log_to_processing(rack, cts, ret, 0); 6960 return (0); 6961 } 6962 /* 6963 * Ok our timer went off early and we are not paced false 6964 * alarm, go back to sleep. 6965 */ 6966 ret = -3; 6967 left = rack->r_ctl.rc_timer_exp - cts; 6968 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 6969 rack_log_to_processing(rack, cts, ret, left); 6970 return (1); 6971 } 6972 skip_time_check: 6973 rack->rc_tmr_stopped = 0; 6974 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 6975 if (timers & PACE_TMR_DELACK) { 6976 ret = rack_timeout_delack(tp, rack, cts); 6977 } else if (timers & PACE_TMR_RACK) { 6978 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6979 rack->r_fast_output = 0; 6980 ret = rack_timeout_rack(tp, rack, cts); 6981 } else if (timers & PACE_TMR_TLP) { 6982 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6983 ret = rack_timeout_tlp(tp, rack, cts); 6984 } else if (timers & PACE_TMR_RXT) { 6985 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6986 rack->r_fast_output = 0; 6987 ret = rack_timeout_rxt(tp, rack, cts); 6988 } else if (timers & PACE_TMR_PERSIT) { 6989 ret = rack_timeout_persist(tp, rack, cts); 6990 } else if (timers & PACE_TMR_KEEP) { 6991 ret = rack_timeout_keepalive(tp, rack, cts); 6992 } 6993 rack_log_to_processing(rack, cts, ret, timers); 6994 return (ret); 6995 } 6996 6997 static void 6998 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 6999 { 7000 struct timeval tv; 7001 uint32_t us_cts, flags_on_entry; 7002 uint8_t hpts_removed = 0; 7003 7004 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7005 us_cts = tcp_get_usecs(&tv); 7006 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7007 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7008 ((tp->snd_max - tp->snd_una) == 0))) { 7009 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7010 hpts_removed = 1; 7011 /* If we were not delayed cancel out the flag. */ 7012 if ((tp->snd_max - tp->snd_una) == 0) 7013 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7014 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7015 } 7016 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7017 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7018 if (rack->rc_inp->inp_in_hpts && 7019 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7020 /* 7021 * Canceling timer's when we have no output being 7022 * paced. We also must remove ourselves from the 7023 * hpts. 7024 */ 7025 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 7026 hpts_removed = 1; 7027 } 7028 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7029 } 7030 if (hpts_removed == 0) 7031 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7032 } 7033 7034 static void 7035 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7036 { 7037 return; 7038 } 7039 7040 static int 7041 rack_stopall(struct tcpcb *tp) 7042 { 7043 struct tcp_rack *rack; 7044 rack = (struct tcp_rack *)tp->t_fb_ptr; 7045 rack->t_timers_stopped = 1; 7046 return (0); 7047 } 7048 7049 static void 7050 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7051 { 7052 return; 7053 } 7054 7055 static int 7056 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7057 { 7058 return (0); 7059 } 7060 7061 static void 7062 rack_stop_all_timers(struct tcpcb *tp) 7063 { 7064 struct tcp_rack *rack; 7065 7066 /* 7067 * Assure no timers are running. 7068 */ 7069 if (tcp_timer_active(tp, TT_PERSIST)) { 7070 /* We enter in persists, set the flag appropriately */ 7071 rack = (struct tcp_rack *)tp->t_fb_ptr; 7072 rack->rc_in_persist = 1; 7073 } 7074 tcp_timer_suspend(tp, TT_PERSIST); 7075 tcp_timer_suspend(tp, TT_REXMT); 7076 tcp_timer_suspend(tp, TT_KEEP); 7077 tcp_timer_suspend(tp, TT_DELACK); 7078 } 7079 7080 static void 7081 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7082 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7083 { 7084 int32_t idx; 7085 uint16_t stripped_flags; 7086 7087 rsm->r_rtr_cnt++; 7088 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7089 rsm->r_dupack = 0; 7090 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7091 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7092 rsm->r_flags |= RACK_OVERMAX; 7093 } 7094 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7095 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7096 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7097 } 7098 idx = rsm->r_rtr_cnt - 1; 7099 rsm->r_tim_lastsent[idx] = ts; 7100 stripped_flags = rsm->r_flags & ~(RACK_SENT_SP|RACK_SENT_FP); 7101 if (rsm->r_flags & RACK_ACKED) { 7102 /* Problably MTU discovery messing with us */ 7103 rsm->r_flags &= ~RACK_ACKED; 7104 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7105 } 7106 if (rsm->r_in_tmap) { 7107 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7108 rsm->r_in_tmap = 0; 7109 } 7110 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7111 rsm->r_in_tmap = 1; 7112 if (rsm->r_flags & RACK_SACK_PASSED) { 7113 /* We have retransmitted due to the SACK pass */ 7114 rsm->r_flags &= ~RACK_SACK_PASSED; 7115 rsm->r_flags |= RACK_WAS_SACKPASS; 7116 } 7117 } 7118 7119 static uint32_t 7120 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7121 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7122 { 7123 /* 7124 * We (re-)transmitted starting at rsm->r_start for some length 7125 * (possibly less than r_end. 7126 */ 7127 struct rack_sendmap *nrsm, *insret; 7128 uint32_t c_end; 7129 int32_t len; 7130 7131 len = *lenp; 7132 c_end = rsm->r_start + len; 7133 if (SEQ_GEQ(c_end, rsm->r_end)) { 7134 /* 7135 * We retransmitted the whole piece or more than the whole 7136 * slopping into the next rsm. 7137 */ 7138 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7139 if (c_end == rsm->r_end) { 7140 *lenp = 0; 7141 return (0); 7142 } else { 7143 int32_t act_len; 7144 7145 /* Hangs over the end return whats left */ 7146 act_len = rsm->r_end - rsm->r_start; 7147 *lenp = (len - act_len); 7148 return (rsm->r_end); 7149 } 7150 /* We don't get out of this block. */ 7151 } 7152 /* 7153 * Here we retransmitted less than the whole thing which means we 7154 * have to split this into what was transmitted and what was not. 7155 */ 7156 nrsm = rack_alloc_full_limit(rack); 7157 if (nrsm == NULL) { 7158 /* 7159 * We can't get memory, so lets not proceed. 7160 */ 7161 *lenp = 0; 7162 return (0); 7163 } 7164 /* 7165 * So here we are going to take the original rsm and make it what we 7166 * retransmitted. nrsm will be the tail portion we did not 7167 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7168 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7169 * 1, 6 and the new piece will be 6, 11. 7170 */ 7171 rack_clone_rsm(rack, nrsm, rsm, c_end); 7172 nrsm->r_dupack = 0; 7173 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7174 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7175 #ifdef INVARIANTS 7176 if (insret != NULL) { 7177 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7178 nrsm, insret, rack, rsm); 7179 } 7180 #endif 7181 if (rsm->r_in_tmap) { 7182 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7183 nrsm->r_in_tmap = 1; 7184 } 7185 rsm->r_flags &= (~RACK_HAS_FIN); 7186 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7187 /* Log a split of rsm into rsm and nrsm */ 7188 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7189 *lenp = 0; 7190 return (0); 7191 } 7192 7193 static void 7194 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7195 uint32_t seq_out, uint8_t th_flags, int32_t err, uint64_t cts, 7196 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7197 { 7198 struct tcp_rack *rack; 7199 struct rack_sendmap *rsm, *nrsm, *insret, fe; 7200 register uint32_t snd_max, snd_una; 7201 7202 /* 7203 * Add to the RACK log of packets in flight or retransmitted. If 7204 * there is a TS option we will use the TS echoed, if not we will 7205 * grab a TS. 7206 * 7207 * Retransmissions will increment the count and move the ts to its 7208 * proper place. Note that if options do not include TS's then we 7209 * won't be able to effectively use the ACK for an RTT on a retran. 7210 * 7211 * Notes about r_start and r_end. Lets consider a send starting at 7212 * sequence 1 for 10 bytes. In such an example the r_start would be 7213 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7214 * This means that r_end is actually the first sequence for the next 7215 * slot (11). 7216 * 7217 */ 7218 /* 7219 * If err is set what do we do XXXrrs? should we not add the thing? 7220 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7221 * i.e. proceed with add ** do this for now. 7222 */ 7223 INP_WLOCK_ASSERT(tp->t_inpcb); 7224 if (err) 7225 /* 7226 * We don't log errors -- we could but snd_max does not 7227 * advance in this case either. 7228 */ 7229 return; 7230 7231 if (th_flags & TH_RST) { 7232 /* 7233 * We don't log resets and we return immediately from 7234 * sending 7235 */ 7236 return; 7237 } 7238 rack = (struct tcp_rack *)tp->t_fb_ptr; 7239 snd_una = tp->snd_una; 7240 snd_max = tp->snd_max; 7241 if (th_flags & (TH_SYN | TH_FIN)) { 7242 /* 7243 * The call to rack_log_output is made before bumping 7244 * snd_max. This means we can record one extra byte on a SYN 7245 * or FIN if seq_out is adding more on and a FIN is present 7246 * (and we are not resending). 7247 */ 7248 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7249 len++; 7250 if (th_flags & TH_FIN) 7251 len++; 7252 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7253 /* 7254 * The add/update as not been done for the FIN/SYN 7255 * yet. 7256 */ 7257 snd_max = tp->snd_nxt; 7258 } 7259 } 7260 if (SEQ_LEQ((seq_out + len), snd_una)) { 7261 /* Are sending an old segment to induce an ack (keep-alive)? */ 7262 return; 7263 } 7264 if (SEQ_LT(seq_out, snd_una)) { 7265 /* huh? should we panic? */ 7266 uint32_t end; 7267 7268 end = seq_out + len; 7269 seq_out = snd_una; 7270 if (SEQ_GEQ(end, seq_out)) 7271 len = end - seq_out; 7272 else 7273 len = 0; 7274 } 7275 if (len == 0) { 7276 /* We don't log zero window probes */ 7277 return; 7278 } 7279 rack->r_ctl.rc_time_last_sent = cts; 7280 if (IN_FASTRECOVERY(tp->t_flags)) { 7281 rack->r_ctl.rc_prr_out += len; 7282 } 7283 /* First question is it a retransmission or new? */ 7284 if (seq_out == snd_max) { 7285 /* Its new */ 7286 again: 7287 rsm = rack_alloc(rack); 7288 if (rsm == NULL) { 7289 /* 7290 * Hmm out of memory and the tcb got destroyed while 7291 * we tried to wait. 7292 */ 7293 return; 7294 } 7295 if (th_flags & TH_FIN) { 7296 rsm->r_flags = RACK_HAS_FIN|add_flag; 7297 } else { 7298 rsm->r_flags = add_flag; 7299 } 7300 if (hw_tls) 7301 rsm->r_hw_tls = 1; 7302 rsm->r_tim_lastsent[0] = cts; 7303 rsm->r_rtr_cnt = 1; 7304 rsm->r_rtr_bytes = 0; 7305 if (th_flags & TH_SYN) { 7306 /* The data space is one beyond snd_una */ 7307 rsm->r_flags |= RACK_HAS_SYN; 7308 } 7309 rsm->r_start = seq_out; 7310 rsm->r_end = rsm->r_start + len; 7311 rsm->r_dupack = 0; 7312 /* 7313 * save off the mbuf location that 7314 * sndmbuf_noadv returned (which is 7315 * where we started copying from).. 7316 */ 7317 rsm->m = s_mb; 7318 rsm->soff = s_moff; 7319 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7320 if (rsm->m) { 7321 if (rsm->m->m_len <= rsm->soff) { 7322 /* 7323 * XXXrrs Question, will this happen? 7324 * 7325 * If sbsndptr is set at the correct place 7326 * then s_moff should always be somewhere 7327 * within rsm->m. But if the sbsndptr was 7328 * off then that won't be true. If it occurs 7329 * we need to walkout to the correct location. 7330 */ 7331 struct mbuf *lm; 7332 7333 lm = rsm->m; 7334 while (lm->m_len <= rsm->soff) { 7335 rsm->soff -= lm->m_len; 7336 lm = lm->m_next; 7337 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7338 __func__, rack, s_moff, s_mb, rsm->soff)); 7339 } 7340 rsm->m = lm; 7341 counter_u64_add(rack_sbsndptr_wrong, 1); 7342 } else 7343 counter_u64_add(rack_sbsndptr_right, 1); 7344 rsm->orig_m_len = rsm->m->m_len; 7345 } else 7346 rsm->orig_m_len = 0; 7347 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7348 /* Log a new rsm */ 7349 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7350 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7351 #ifdef INVARIANTS 7352 if (insret != NULL) { 7353 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7354 nrsm, insret, rack, rsm); 7355 } 7356 #endif 7357 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7358 rsm->r_in_tmap = 1; 7359 /* 7360 * Special case detection, is there just a single 7361 * packet outstanding when we are not in recovery? 7362 * 7363 * If this is true mark it so. 7364 */ 7365 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7366 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7367 struct rack_sendmap *prsm; 7368 7369 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7370 if (prsm) 7371 prsm->r_one_out_nr = 1; 7372 } 7373 return; 7374 } 7375 /* 7376 * If we reach here its a retransmission and we need to find it. 7377 */ 7378 memset(&fe, 0, sizeof(fe)); 7379 more: 7380 if (hintrsm && (hintrsm->r_start == seq_out)) { 7381 rsm = hintrsm; 7382 hintrsm = NULL; 7383 } else { 7384 /* No hints sorry */ 7385 rsm = NULL; 7386 } 7387 if ((rsm) && (rsm->r_start == seq_out)) { 7388 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7389 if (len == 0) { 7390 return; 7391 } else { 7392 goto more; 7393 } 7394 } 7395 /* Ok it was not the last pointer go through it the hard way. */ 7396 refind: 7397 fe.r_start = seq_out; 7398 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7399 if (rsm) { 7400 if (rsm->r_start == seq_out) { 7401 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7402 if (len == 0) { 7403 return; 7404 } else { 7405 goto refind; 7406 } 7407 } 7408 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7409 /* Transmitted within this piece */ 7410 /* 7411 * Ok we must split off the front and then let the 7412 * update do the rest 7413 */ 7414 nrsm = rack_alloc_full_limit(rack); 7415 if (nrsm == NULL) { 7416 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7417 return; 7418 } 7419 /* 7420 * copy rsm to nrsm and then trim the front of rsm 7421 * to not include this part. 7422 */ 7423 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7424 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7425 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7426 #ifdef INVARIANTS 7427 if (insret != NULL) { 7428 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7429 nrsm, insret, rack, rsm); 7430 } 7431 #endif 7432 if (rsm->r_in_tmap) { 7433 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7434 nrsm->r_in_tmap = 1; 7435 } 7436 rsm->r_flags &= (~RACK_HAS_FIN); 7437 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7438 if (len == 0) { 7439 return; 7440 } else if (len > 0) 7441 goto refind; 7442 } 7443 } 7444 /* 7445 * Hmm not found in map did they retransmit both old and on into the 7446 * new? 7447 */ 7448 if (seq_out == tp->snd_max) { 7449 goto again; 7450 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7451 #ifdef INVARIANTS 7452 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7453 seq_out, len, tp->snd_una, tp->snd_max); 7454 printf("Starting Dump of all rack entries\n"); 7455 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7456 printf("rsm:%p start:%u end:%u\n", 7457 rsm, rsm->r_start, rsm->r_end); 7458 } 7459 printf("Dump complete\n"); 7460 panic("seq_out not found rack:%p tp:%p", 7461 rack, tp); 7462 #endif 7463 } else { 7464 #ifdef INVARIANTS 7465 /* 7466 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7467 * flag) 7468 */ 7469 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7470 seq_out, len, tp->snd_max, tp); 7471 #endif 7472 } 7473 } 7474 7475 /* 7476 * Record one of the RTT updates from an ack into 7477 * our sample structure. 7478 */ 7479 7480 static void 7481 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7482 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7483 { 7484 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7485 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7486 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7487 } 7488 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7489 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7490 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7491 } 7492 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7493 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7494 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7495 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7496 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7497 } 7498 if ((confidence == 1) && 7499 ((rsm == NULL) || 7500 (rsm->r_just_ret) || 7501 (rsm->r_one_out_nr && 7502 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7503 /* 7504 * If the rsm had a just return 7505 * hit it then we can't trust the 7506 * rtt measurement for buffer deterimination 7507 * Note that a confidence of 2, indicates 7508 * SACK'd which overrides the r_just_ret or 7509 * the r_one_out_nr. If it was a CUM-ACK and 7510 * we had only two outstanding, but get an 7511 * ack for only 1. Then that also lowers our 7512 * confidence. 7513 */ 7514 confidence = 0; 7515 } 7516 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7517 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7518 if (rack->r_ctl.rack_rs.confidence == 0) { 7519 /* 7520 * We take anything with no current confidence 7521 * saved. 7522 */ 7523 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7524 rack->r_ctl.rack_rs.confidence = confidence; 7525 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7526 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7527 /* 7528 * Once we have a confident number, 7529 * we can update it with a smaller 7530 * value since this confident number 7531 * may include the DSACK time until 7532 * the next segment (the second one) arrived. 7533 */ 7534 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7535 rack->r_ctl.rack_rs.confidence = confidence; 7536 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7537 } 7538 } 7539 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7540 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7541 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7542 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7543 } 7544 7545 /* 7546 * Collect new round-trip time estimate 7547 * and update averages and current timeout. 7548 */ 7549 static void 7550 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7551 { 7552 int32_t delta; 7553 uint32_t o_srtt, o_var; 7554 int32_t hrtt_up = 0; 7555 int32_t rtt; 7556 7557 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7558 /* No valid sample */ 7559 return; 7560 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7561 /* We are to use the lowest RTT seen in a single ack */ 7562 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7563 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7564 /* We are to use the highest RTT seen in a single ack */ 7565 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7566 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7567 /* We are to use the average RTT seen in a single ack */ 7568 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7569 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7570 } else { 7571 #ifdef INVARIANTS 7572 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7573 #endif 7574 return; 7575 } 7576 if (rtt == 0) 7577 rtt = 1; 7578 if (rack->rc_gp_rtt_set == 0) { 7579 /* 7580 * With no RTT we have to accept 7581 * even one we are not confident of. 7582 */ 7583 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7584 rack->rc_gp_rtt_set = 1; 7585 } else if (rack->r_ctl.rack_rs.confidence) { 7586 /* update the running gp srtt */ 7587 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7588 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7589 } 7590 if (rack->r_ctl.rack_rs.confidence) { 7591 /* 7592 * record the low and high for highly buffered path computation, 7593 * we only do this if we are confident (not a retransmission). 7594 */ 7595 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7596 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7597 hrtt_up = 1; 7598 } 7599 if (rack->rc_highly_buffered == 0) { 7600 /* 7601 * Currently once we declare a path has 7602 * highly buffered there is no going 7603 * back, which may be a problem... 7604 */ 7605 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7606 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7607 rack->r_ctl.rc_highest_us_rtt, 7608 rack->r_ctl.rc_lowest_us_rtt, 7609 RACK_RTTS_SEEHBP); 7610 rack->rc_highly_buffered = 1; 7611 } 7612 } 7613 } 7614 if ((rack->r_ctl.rack_rs.confidence) || 7615 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7616 /* 7617 * If we are highly confident of it <or> it was 7618 * never retransmitted we accept it as the last us_rtt. 7619 */ 7620 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7621 /* The lowest rtt can be set if its was not retransmited */ 7622 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7623 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7624 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7625 rack->r_ctl.rc_lowest_us_rtt = 1; 7626 } 7627 } 7628 o_srtt = tp->t_srtt; 7629 o_var = tp->t_rttvar; 7630 rack = (struct tcp_rack *)tp->t_fb_ptr; 7631 if (tp->t_srtt != 0) { 7632 /* 7633 * We keep a simple srtt in microseconds, like our rtt 7634 * measurement. We don't need to do any tricks with shifting 7635 * etc. Instead we just add in 1/8th of the new measurement 7636 * and subtract out 1/8 of the old srtt. We do the same with 7637 * the variance after finding the absolute value of the 7638 * difference between this sample and the current srtt. 7639 */ 7640 delta = tp->t_srtt - rtt; 7641 /* Take off 1/8th of the current sRTT */ 7642 tp->t_srtt -= (tp->t_srtt >> 3); 7643 /* Add in 1/8th of the new RTT just measured */ 7644 tp->t_srtt += (rtt >> 3); 7645 if (tp->t_srtt <= 0) 7646 tp->t_srtt = 1; 7647 /* Now lets make the absolute value of the variance */ 7648 if (delta < 0) 7649 delta = -delta; 7650 /* Subtract out 1/8th */ 7651 tp->t_rttvar -= (tp->t_rttvar >> 3); 7652 /* Add in 1/8th of the new variance we just saw */ 7653 tp->t_rttvar += (delta >> 3); 7654 if (tp->t_rttvar <= 0) 7655 tp->t_rttvar = 1; 7656 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7657 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7658 } else { 7659 /* 7660 * No rtt measurement yet - use the unsmoothed rtt. Set the 7661 * variance to half the rtt (so our first retransmit happens 7662 * at 3*rtt). 7663 */ 7664 tp->t_srtt = rtt; 7665 tp->t_rttvar = rtt >> 1; 7666 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7667 } 7668 rack->rc_srtt_measure_made = 1; 7669 KMOD_TCPSTAT_INC(tcps_rttupdated); 7670 tp->t_rttupdated++; 7671 #ifdef STATS 7672 if (rack_stats_gets_ms_rtt == 0) { 7673 /* Send in the microsecond rtt used for rxt timeout purposes */ 7674 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7675 } else if (rack_stats_gets_ms_rtt == 1) { 7676 /* Send in the millisecond rtt used for rxt timeout purposes */ 7677 int32_t ms_rtt; 7678 7679 /* Round up */ 7680 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7681 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7682 } else if (rack_stats_gets_ms_rtt == 2) { 7683 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7684 int32_t ms_rtt; 7685 7686 /* Round up */ 7687 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7688 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7689 } else { 7690 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7691 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7692 } 7693 7694 #endif 7695 /* 7696 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7697 * way we do the smoothing, srtt and rttvar will each average +1/2 7698 * tick of bias. When we compute the retransmit timer, we want 1/2 7699 * tick of rounding and 1 extra tick because of +-1/2 tick 7700 * uncertainty in the firing of the timer. The bias will give us 7701 * exactly the 1.5 tick we need. But, because the bias is 7702 * statistical, we have to test that we don't drop below the minimum 7703 * feasible timer (which is 2 ticks). 7704 */ 7705 tp->t_rxtshift = 0; 7706 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7707 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7708 rack_log_rtt_sample(rack, rtt); 7709 tp->t_softerror = 0; 7710 } 7711 7712 7713 static void 7714 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7715 { 7716 /* 7717 * Apply to filter the inbound us-rtt at us_cts. 7718 */ 7719 uint32_t old_rtt; 7720 7721 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7722 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7723 us_rtt, us_cts); 7724 if (rack->r_ctl.last_pacing_time && 7725 rack->rc_gp_dyn_mul && 7726 (rack->r_ctl.last_pacing_time > us_rtt)) 7727 rack->pacing_longer_than_rtt = 1; 7728 else 7729 rack->pacing_longer_than_rtt = 0; 7730 if (old_rtt > us_rtt) { 7731 /* We just hit a new lower rtt time */ 7732 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7733 __LINE__, RACK_RTTS_NEWRTT); 7734 /* 7735 * Only count it if its lower than what we saw within our 7736 * calculated range. 7737 */ 7738 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7739 if (rack_probertt_lower_within && 7740 rack->rc_gp_dyn_mul && 7741 (rack->use_fixed_rate == 0) && 7742 (rack->rc_always_pace)) { 7743 /* 7744 * We are seeing a new lower rtt very close 7745 * to the time that we would have entered probe-rtt. 7746 * This is probably due to the fact that a peer flow 7747 * has entered probe-rtt. Lets go in now too. 7748 */ 7749 uint32_t val; 7750 7751 val = rack_probertt_lower_within * rack_time_between_probertt; 7752 val /= 100; 7753 if ((rack->in_probe_rtt == 0) && 7754 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7755 rack_enter_probertt(rack, us_cts); 7756 } 7757 } 7758 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7759 } 7760 } 7761 } 7762 7763 static int 7764 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7765 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7766 { 7767 int32_t i, all; 7768 uint32_t t, len_acked; 7769 7770 if ((rsm->r_flags & RACK_ACKED) || 7771 (rsm->r_flags & RACK_WAS_ACKED)) 7772 /* Already done */ 7773 return (0); 7774 if (rsm->r_no_rtt_allowed) { 7775 /* Not allowed */ 7776 return (0); 7777 } 7778 if (ack_type == CUM_ACKED) { 7779 if (SEQ_GT(th_ack, rsm->r_end)) { 7780 len_acked = rsm->r_end - rsm->r_start; 7781 all = 1; 7782 } else { 7783 len_acked = th_ack - rsm->r_start; 7784 all = 0; 7785 } 7786 } else { 7787 len_acked = rsm->r_end - rsm->r_start; 7788 all = 0; 7789 } 7790 if (rsm->r_rtr_cnt == 1) { 7791 uint32_t us_rtt; 7792 7793 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7794 if ((int)t <= 0) 7795 t = 1; 7796 if (!tp->t_rttlow || tp->t_rttlow > t) 7797 tp->t_rttlow = t; 7798 if (!rack->r_ctl.rc_rack_min_rtt || 7799 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7800 rack->r_ctl.rc_rack_min_rtt = t; 7801 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7802 rack->r_ctl.rc_rack_min_rtt = 1; 7803 } 7804 } 7805 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7806 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7807 else 7808 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7809 if (us_rtt == 0) 7810 us_rtt = 1; 7811 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7812 if (ack_type == SACKED) { 7813 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7814 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7815 } else { 7816 /* 7817 * We need to setup what our confidence 7818 * is in this ack. 7819 * 7820 * If the rsm was app limited and it is 7821 * less than a mss in length (the end 7822 * of the send) then we have a gap. If we 7823 * were app limited but say we were sending 7824 * multiple MSS's then we are more confident 7825 * int it. 7826 * 7827 * When we are not app-limited then we see if 7828 * the rsm is being included in the current 7829 * measurement, we tell this by the app_limited_needs_set 7830 * flag. 7831 * 7832 * Note that being cwnd blocked is not applimited 7833 * as well as the pacing delay between packets which 7834 * are sending only 1 or 2 MSS's also will show up 7835 * in the RTT. We probably need to examine this algorithm 7836 * a bit more and enhance it to account for the delay 7837 * between rsm's. We could do that by saving off the 7838 * pacing delay of each rsm (in an rsm) and then 7839 * factoring that in somehow though for now I am 7840 * not sure how :) 7841 */ 7842 int calc_conf = 0; 7843 7844 if (rsm->r_flags & RACK_APP_LIMITED) { 7845 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7846 calc_conf = 0; 7847 else 7848 calc_conf = 1; 7849 } else if (rack->app_limited_needs_set == 0) { 7850 calc_conf = 1; 7851 } else { 7852 calc_conf = 0; 7853 } 7854 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7855 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7856 calc_conf, rsm, rsm->r_rtr_cnt); 7857 } 7858 if ((rsm->r_flags & RACK_TLP) && 7859 (!IN_FASTRECOVERY(tp->t_flags))) { 7860 /* Segment was a TLP and our retrans matched */ 7861 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7862 rack->r_ctl.rc_rsm_start = tp->snd_max; 7863 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 7864 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 7865 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 7866 } 7867 } 7868 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7869 /* New more recent rack_tmit_time */ 7870 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7871 rack->rc_rack_rtt = t; 7872 } 7873 return (1); 7874 } 7875 /* 7876 * We clear the soft/rxtshift since we got an ack. 7877 * There is no assurance we will call the commit() function 7878 * so we need to clear these to avoid incorrect handling. 7879 */ 7880 tp->t_rxtshift = 0; 7881 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7882 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7883 tp->t_softerror = 0; 7884 if (to && (to->to_flags & TOF_TS) && 7885 (ack_type == CUM_ACKED) && 7886 (to->to_tsecr) && 7887 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7888 /* 7889 * Now which timestamp does it match? In this block the ACK 7890 * must be coming from a previous transmission. 7891 */ 7892 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7893 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7894 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7895 if ((int)t <= 0) 7896 t = 1; 7897 if ((i + 1) < rsm->r_rtr_cnt) { 7898 /* 7899 * The peer ack'd from our previous 7900 * transmission. We have a spurious 7901 * retransmission and thus we dont 7902 * want to update our rack_rtt. 7903 */ 7904 return (0); 7905 } 7906 if (!tp->t_rttlow || tp->t_rttlow > t) 7907 tp->t_rttlow = t; 7908 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7909 rack->r_ctl.rc_rack_min_rtt = t; 7910 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7911 rack->r_ctl.rc_rack_min_rtt = 1; 7912 } 7913 } 7914 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7915 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7916 /* New more recent rack_tmit_time */ 7917 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7918 rack->rc_rack_rtt = t; 7919 } 7920 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7921 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7922 rsm->r_rtr_cnt); 7923 return (1); 7924 } 7925 } 7926 goto ts_not_found; 7927 } else { 7928 /* 7929 * Ok its a SACK block that we retransmitted. or a windows 7930 * machine without timestamps. We can tell nothing from the 7931 * time-stamp since its not there or the time the peer last 7932 * recieved a segment that moved forward its cum-ack point. 7933 */ 7934 ts_not_found: 7935 i = rsm->r_rtr_cnt - 1; 7936 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7937 if ((int)t <= 0) 7938 t = 1; 7939 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7940 /* 7941 * We retransmitted and the ack came back in less 7942 * than the smallest rtt we have observed. We most 7943 * likely did an improper retransmit as outlined in 7944 * 6.2 Step 2 point 2 in the rack-draft so we 7945 * don't want to update our rack_rtt. We in 7946 * theory (in future) might want to think about reverting our 7947 * cwnd state but we won't for now. 7948 */ 7949 return (0); 7950 } else if (rack->r_ctl.rc_rack_min_rtt) { 7951 /* 7952 * We retransmitted it and the retransmit did the 7953 * job. 7954 */ 7955 if (!rack->r_ctl.rc_rack_min_rtt || 7956 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7957 rack->r_ctl.rc_rack_min_rtt = t; 7958 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7959 rack->r_ctl.rc_rack_min_rtt = 1; 7960 } 7961 } 7962 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 7963 /* New more recent rack_tmit_time */ 7964 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 7965 rack->rc_rack_rtt = t; 7966 } 7967 return (1); 7968 } 7969 } 7970 return (0); 7971 } 7972 7973 /* 7974 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7975 */ 7976 static void 7977 rack_log_sack_passed(struct tcpcb *tp, 7978 struct tcp_rack *rack, struct rack_sendmap *rsm) 7979 { 7980 struct rack_sendmap *nrsm; 7981 7982 nrsm = rsm; 7983 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 7984 rack_head, r_tnext) { 7985 if (nrsm == rsm) { 7986 /* Skip orginal segment he is acked */ 7987 continue; 7988 } 7989 if (nrsm->r_flags & RACK_ACKED) { 7990 /* 7991 * Skip ack'd segments, though we 7992 * should not see these, since tmap 7993 * should not have ack'd segments. 7994 */ 7995 continue; 7996 } 7997 if (nrsm->r_flags & RACK_SACK_PASSED) { 7998 /* 7999 * We found one that is already marked 8000 * passed, we have been here before and 8001 * so all others below this are marked. 8002 */ 8003 break; 8004 } 8005 nrsm->r_flags |= RACK_SACK_PASSED; 8006 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8007 } 8008 } 8009 8010 static void 8011 rack_need_set_test(struct tcpcb *tp, 8012 struct tcp_rack *rack, 8013 struct rack_sendmap *rsm, 8014 tcp_seq th_ack, 8015 int line, 8016 int use_which) 8017 { 8018 8019 if ((tp->t_flags & TF_GPUTINPROG) && 8020 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8021 /* 8022 * We were app limited, and this ack 8023 * butts up or goes beyond the point where we want 8024 * to start our next measurement. We need 8025 * to record the new gput_ts as here and 8026 * possibly update the start sequence. 8027 */ 8028 uint32_t seq, ts; 8029 8030 if (rsm->r_rtr_cnt > 1) { 8031 /* 8032 * This is a retransmit, can we 8033 * really make any assessment at this 8034 * point? We are not really sure of 8035 * the timestamp, is it this or the 8036 * previous transmission? 8037 * 8038 * Lets wait for something better that 8039 * is not retransmitted. 8040 */ 8041 return; 8042 } 8043 seq = tp->gput_seq; 8044 ts = tp->gput_ts; 8045 rack->app_limited_needs_set = 0; 8046 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8047 /* Do we start at a new end? */ 8048 if ((use_which == RACK_USE_BEG) && 8049 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8050 /* 8051 * When we get an ACK that just eats 8052 * up some of the rsm, we set RACK_USE_BEG 8053 * since whats at r_start (i.e. th_ack) 8054 * is left unacked and thats where the 8055 * measurement not starts. 8056 */ 8057 tp->gput_seq = rsm->r_start; 8058 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8059 } 8060 if ((use_which == RACK_USE_END) && 8061 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8062 /* 8063 * We use the end when the cumack 8064 * is moving forward and completely 8065 * deleting the rsm passed so basically 8066 * r_end holds th_ack. 8067 * 8068 * For SACK's we also want to use the end 8069 * since this piece just got sacked and 8070 * we want to target anything after that 8071 * in our measurement. 8072 */ 8073 tp->gput_seq = rsm->r_end; 8074 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8075 } 8076 if (use_which == RACK_USE_END_OR_THACK) { 8077 /* 8078 * special case for ack moving forward, 8079 * not a sack, we need to move all the 8080 * way up to where this ack cum-ack moves 8081 * to. 8082 */ 8083 if (SEQ_GT(th_ack, rsm->r_end)) 8084 tp->gput_seq = th_ack; 8085 else 8086 tp->gput_seq = rsm->r_end; 8087 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8088 } 8089 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8090 /* 8091 * We moved beyond this guy's range, re-calculate 8092 * the new end point. 8093 */ 8094 if (rack->rc_gp_filled == 0) { 8095 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8096 } else { 8097 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8098 } 8099 } 8100 /* 8101 * We are moving the goal post, we may be able to clear the 8102 * measure_saw_probe_rtt flag. 8103 */ 8104 if ((rack->in_probe_rtt == 0) && 8105 (rack->measure_saw_probe_rtt) && 8106 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8107 rack->measure_saw_probe_rtt = 0; 8108 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8109 seq, tp->gput_seq, 0, 5, line, NULL); 8110 if (rack->rc_gp_filled && 8111 ((tp->gput_ack - tp->gput_seq) < 8112 max(rc_init_window(rack), (MIN_GP_WIN * 8113 ctf_fixed_maxseg(tp))))) { 8114 uint32_t ideal_amount; 8115 8116 ideal_amount = rack_get_measure_window(tp, rack); 8117 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8118 /* 8119 * There is no sense of continuing this measurement 8120 * because its too small to gain us anything we 8121 * trust. Skip it and that way we can start a new 8122 * measurement quicker. 8123 */ 8124 tp->t_flags &= ~TF_GPUTINPROG; 8125 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8126 0, 0, 0, 6, __LINE__, NULL); 8127 } else { 8128 /* 8129 * Reset the window further out. 8130 */ 8131 tp->gput_ack = tp->gput_seq + ideal_amount; 8132 } 8133 } 8134 } 8135 } 8136 8137 static uint32_t 8138 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8139 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8140 { 8141 uint32_t start, end, changed = 0; 8142 struct rack_sendmap stack_map; 8143 struct rack_sendmap *rsm, *nrsm, fe, *insret, *prev, *next; 8144 int32_t used_ref = 1; 8145 int moved = 0; 8146 8147 start = sack->start; 8148 end = sack->end; 8149 rsm = *prsm; 8150 memset(&fe, 0, sizeof(fe)); 8151 do_rest_ofb: 8152 if ((rsm == NULL) || 8153 (SEQ_LT(end, rsm->r_start)) || 8154 (SEQ_GEQ(start, rsm->r_end)) || 8155 (SEQ_LT(start, rsm->r_start))) { 8156 /* 8157 * We are not in the right spot, 8158 * find the correct spot in the tree. 8159 */ 8160 used_ref = 0; 8161 fe.r_start = start; 8162 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8163 moved++; 8164 } 8165 if (rsm == NULL) { 8166 /* TSNH */ 8167 goto out; 8168 } 8169 /* Ok we have an ACK for some piece of this rsm */ 8170 if (rsm->r_start != start) { 8171 if ((rsm->r_flags & RACK_ACKED) == 0) { 8172 /** 8173 * Need to split this in two pieces the before and after, 8174 * the before remains in the map, the after must be 8175 * added. In other words we have: 8176 * rsm |--------------| 8177 * sackblk |-------> 8178 * rsm will become 8179 * rsm |---| 8180 * and nrsm will be the sacked piece 8181 * nrsm |----------| 8182 * 8183 * But before we start down that path lets 8184 * see if the sack spans over on top of 8185 * the next guy and it is already sacked. 8186 */ 8187 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8188 if (next && (next->r_flags & RACK_ACKED) && 8189 SEQ_GEQ(end, next->r_start)) { 8190 /** 8191 * So the next one is already acked, and 8192 * we can thus by hookery use our stack_map 8193 * to reflect the piece being sacked and 8194 * then adjust the two tree entries moving 8195 * the start and ends around. So we start like: 8196 * rsm |------------| (not-acked) 8197 * next |-----------| (acked) 8198 * sackblk |--------> 8199 * We want to end like so: 8200 * rsm |------| (not-acked) 8201 * next |-----------------| (acked) 8202 * nrsm |-----| 8203 * Where nrsm is a temporary stack piece we 8204 * use to update all the gizmos. 8205 */ 8206 /* Copy up our fudge block */ 8207 nrsm = &stack_map; 8208 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8209 /* Now adjust our tree blocks */ 8210 rsm->r_end = start; 8211 next->r_start = start; 8212 /* Now we must adjust back where next->m is */ 8213 rack_setup_offset_for_rsm(rsm, next); 8214 8215 /* We don't need to adjust rsm, it did not change */ 8216 /* Clear out the dup ack count of the remainder */ 8217 rsm->r_dupack = 0; 8218 rsm->r_just_ret = 0; 8219 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8220 /* Now lets make sure our fudge block is right */ 8221 nrsm->r_start = start; 8222 /* Now lets update all the stats and such */ 8223 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8224 if (rack->app_limited_needs_set) 8225 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8226 changed += (nrsm->r_end - nrsm->r_start); 8227 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8228 if (nrsm->r_flags & RACK_SACK_PASSED) { 8229 counter_u64_add(rack_reorder_seen, 1); 8230 rack->r_ctl.rc_reorder_ts = cts; 8231 } 8232 /* 8233 * Now we want to go up from rsm (the 8234 * one left un-acked) to the next one 8235 * in the tmap. We do this so when 8236 * we walk backwards we include marking 8237 * sack-passed on rsm (The one passed in 8238 * is skipped since it is generally called 8239 * on something sacked before removing it 8240 * from the tmap). 8241 */ 8242 if (rsm->r_in_tmap) { 8243 nrsm = TAILQ_NEXT(rsm, r_tnext); 8244 /* 8245 * Now that we have the next 8246 * one walk backwards from there. 8247 */ 8248 if (nrsm && nrsm->r_in_tmap) 8249 rack_log_sack_passed(tp, rack, nrsm); 8250 } 8251 /* Now are we done? */ 8252 if (SEQ_LT(end, next->r_end) || 8253 (end == next->r_end)) { 8254 /* Done with block */ 8255 goto out; 8256 } 8257 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8258 counter_u64_add(rack_sack_used_next_merge, 1); 8259 /* Postion for the next block */ 8260 start = next->r_end; 8261 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8262 if (rsm == NULL) 8263 goto out; 8264 } else { 8265 /** 8266 * We can't use any hookery here, so we 8267 * need to split the map. We enter like 8268 * so: 8269 * rsm |--------| 8270 * sackblk |-----> 8271 * We will add the new block nrsm and 8272 * that will be the new portion, and then 8273 * fall through after reseting rsm. So we 8274 * split and look like this: 8275 * rsm |----| 8276 * sackblk |-----> 8277 * nrsm |---| 8278 * We then fall through reseting 8279 * rsm to nrsm, so the next block 8280 * picks it up. 8281 */ 8282 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8283 if (nrsm == NULL) { 8284 /* 8285 * failed XXXrrs what can we do but loose the sack 8286 * info? 8287 */ 8288 goto out; 8289 } 8290 counter_u64_add(rack_sack_splits, 1); 8291 rack_clone_rsm(rack, nrsm, rsm, start); 8292 rsm->r_just_ret = 0; 8293 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8294 #ifdef INVARIANTS 8295 if (insret != NULL) { 8296 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8297 nrsm, insret, rack, rsm); 8298 } 8299 #endif 8300 if (rsm->r_in_tmap) { 8301 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8302 nrsm->r_in_tmap = 1; 8303 } 8304 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8305 rsm->r_flags &= (~RACK_HAS_FIN); 8306 /* Position us to point to the new nrsm that starts the sack blk */ 8307 rsm = nrsm; 8308 } 8309 } else { 8310 /* Already sacked this piece */ 8311 counter_u64_add(rack_sack_skipped_acked, 1); 8312 moved++; 8313 if (end == rsm->r_end) { 8314 /* Done with block */ 8315 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8316 goto out; 8317 } else if (SEQ_LT(end, rsm->r_end)) { 8318 /* A partial sack to a already sacked block */ 8319 moved++; 8320 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8321 goto out; 8322 } else { 8323 /* 8324 * The end goes beyond this guy 8325 * repostion the start to the 8326 * next block. 8327 */ 8328 start = rsm->r_end; 8329 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8330 if (rsm == NULL) 8331 goto out; 8332 } 8333 } 8334 } 8335 if (SEQ_GEQ(end, rsm->r_end)) { 8336 /** 8337 * The end of this block is either beyond this guy or right 8338 * at this guy. I.e.: 8339 * rsm --- |-----| 8340 * end |-----| 8341 * <or> 8342 * end |---------| 8343 */ 8344 if ((rsm->r_flags & RACK_ACKED) == 0) { 8345 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8346 changed += (rsm->r_end - rsm->r_start); 8347 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8348 if (rsm->r_in_tmap) /* should be true */ 8349 rack_log_sack_passed(tp, rack, rsm); 8350 /* Is Reordering occuring? */ 8351 if (rsm->r_flags & RACK_SACK_PASSED) { 8352 rsm->r_flags &= ~RACK_SACK_PASSED; 8353 counter_u64_add(rack_reorder_seen, 1); 8354 rack->r_ctl.rc_reorder_ts = cts; 8355 } 8356 if (rack->app_limited_needs_set) 8357 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8358 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8359 rsm->r_flags |= RACK_ACKED; 8360 rsm->r_flags &= ~RACK_TLP; 8361 if (rsm->r_in_tmap) { 8362 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8363 rsm->r_in_tmap = 0; 8364 } 8365 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8366 } else { 8367 counter_u64_add(rack_sack_skipped_acked, 1); 8368 moved++; 8369 } 8370 if (end == rsm->r_end) { 8371 /* This block only - done, setup for next */ 8372 goto out; 8373 } 8374 /* 8375 * There is more not coverend by this rsm move on 8376 * to the next block in the RB tree. 8377 */ 8378 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8379 start = rsm->r_end; 8380 rsm = nrsm; 8381 if (rsm == NULL) 8382 goto out; 8383 goto do_rest_ofb; 8384 } 8385 /** 8386 * The end of this sack block is smaller than 8387 * our rsm i.e.: 8388 * rsm --- |-----| 8389 * end |--| 8390 */ 8391 if ((rsm->r_flags & RACK_ACKED) == 0) { 8392 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8393 if (prev && (prev->r_flags & RACK_ACKED)) { 8394 /** 8395 * Goal, we want the right remainder of rsm to shrink 8396 * in place and span from (rsm->r_start = end) to rsm->r_end. 8397 * We want to expand prev to go all the way 8398 * to prev->r_end <- end. 8399 * so in the tree we have before: 8400 * prev |--------| (acked) 8401 * rsm |-------| (non-acked) 8402 * sackblk |-| 8403 * We churn it so we end up with 8404 * prev |----------| (acked) 8405 * rsm |-----| (non-acked) 8406 * nrsm |-| (temporary) 8407 */ 8408 nrsm = &stack_map; 8409 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8410 prev->r_end = end; 8411 rsm->r_start = end; 8412 /* Now adjust nrsm (stack copy) to be 8413 * the one that is the small 8414 * piece that was "sacked". 8415 */ 8416 nrsm->r_end = end; 8417 rsm->r_dupack = 0; 8418 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8419 /* 8420 * Now that the rsm has had its start moved forward 8421 * lets go ahead and get its new place in the world. 8422 */ 8423 rack_setup_offset_for_rsm(prev, rsm); 8424 /* 8425 * Now nrsm is our new little piece 8426 * that is acked (which was merged 8427 * to prev). Update the rtt and changed 8428 * based on that. Also check for reordering. 8429 */ 8430 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8431 if (rack->app_limited_needs_set) 8432 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8433 changed += (nrsm->r_end - nrsm->r_start); 8434 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8435 if (nrsm->r_flags & RACK_SACK_PASSED) { 8436 counter_u64_add(rack_reorder_seen, 1); 8437 rack->r_ctl.rc_reorder_ts = cts; 8438 } 8439 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8440 rsm = prev; 8441 counter_u64_add(rack_sack_used_prev_merge, 1); 8442 } else { 8443 /** 8444 * This is the case where our previous 8445 * block is not acked either, so we must 8446 * split the block in two. 8447 */ 8448 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8449 if (nrsm == NULL) { 8450 /* failed rrs what can we do but loose the sack info? */ 8451 goto out; 8452 } 8453 /** 8454 * In this case nrsm becomes 8455 * nrsm->r_start = end; 8456 * nrsm->r_end = rsm->r_end; 8457 * which is un-acked. 8458 * <and> 8459 * rsm->r_end = nrsm->r_start; 8460 * i.e. the remaining un-acked 8461 * piece is left on the left 8462 * hand side. 8463 * 8464 * So we start like this 8465 * rsm |----------| (not acked) 8466 * sackblk |---| 8467 * build it so we have 8468 * rsm |---| (acked) 8469 * nrsm |------| (not acked) 8470 */ 8471 counter_u64_add(rack_sack_splits, 1); 8472 rack_clone_rsm(rack, nrsm, rsm, end); 8473 rsm->r_flags &= (~RACK_HAS_FIN); 8474 rsm->r_just_ret = 0; 8475 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8476 #ifdef INVARIANTS 8477 if (insret != NULL) { 8478 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8479 nrsm, insret, rack, rsm); 8480 } 8481 #endif 8482 if (rsm->r_in_tmap) { 8483 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8484 nrsm->r_in_tmap = 1; 8485 } 8486 nrsm->r_dupack = 0; 8487 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8488 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8489 changed += (rsm->r_end - rsm->r_start); 8490 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8491 if (rsm->r_in_tmap) /* should be true */ 8492 rack_log_sack_passed(tp, rack, rsm); 8493 /* Is Reordering occuring? */ 8494 if (rsm->r_flags & RACK_SACK_PASSED) { 8495 rsm->r_flags &= ~RACK_SACK_PASSED; 8496 counter_u64_add(rack_reorder_seen, 1); 8497 rack->r_ctl.rc_reorder_ts = cts; 8498 } 8499 if (rack->app_limited_needs_set) 8500 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8501 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8502 rsm->r_flags |= RACK_ACKED; 8503 rsm->r_flags &= ~RACK_TLP; 8504 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8505 if (rsm->r_in_tmap) { 8506 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8507 rsm->r_in_tmap = 0; 8508 } 8509 } 8510 } else if (start != end){ 8511 /* 8512 * The block was already acked. 8513 */ 8514 counter_u64_add(rack_sack_skipped_acked, 1); 8515 moved++; 8516 } 8517 out: 8518 if (rsm && (rsm->r_flags & RACK_ACKED)) { 8519 /* 8520 * Now can we merge where we worked 8521 * with either the previous or 8522 * next block? 8523 */ 8524 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8525 while (next) { 8526 if (next->r_flags & RACK_ACKED) { 8527 /* yep this and next can be merged */ 8528 rsm = rack_merge_rsm(rack, rsm, next); 8529 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8530 } else 8531 break; 8532 } 8533 /* Now what about the previous? */ 8534 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8535 while (prev) { 8536 if (prev->r_flags & RACK_ACKED) { 8537 /* yep the previous and this can be merged */ 8538 rsm = rack_merge_rsm(rack, prev, rsm); 8539 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8540 } else 8541 break; 8542 } 8543 } 8544 if (used_ref == 0) { 8545 counter_u64_add(rack_sack_proc_all, 1); 8546 } else { 8547 counter_u64_add(rack_sack_proc_short, 1); 8548 } 8549 /* Save off the next one for quick reference. */ 8550 if (rsm) 8551 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8552 else 8553 nrsm = NULL; 8554 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8555 /* Pass back the moved. */ 8556 *moved_two = moved; 8557 return (changed); 8558 } 8559 8560 static void inline 8561 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8562 { 8563 struct rack_sendmap *tmap; 8564 8565 tmap = NULL; 8566 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8567 /* Its no longer sacked, mark it so */ 8568 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8569 #ifdef INVARIANTS 8570 if (rsm->r_in_tmap) { 8571 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8572 rack, rsm, rsm->r_flags); 8573 } 8574 #endif 8575 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8576 /* Rebuild it into our tmap */ 8577 if (tmap == NULL) { 8578 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8579 tmap = rsm; 8580 } else { 8581 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8582 tmap = rsm; 8583 } 8584 tmap->r_in_tmap = 1; 8585 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8586 } 8587 /* 8588 * Now lets possibly clear the sack filter so we start 8589 * recognizing sacks that cover this area. 8590 */ 8591 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8592 8593 } 8594 8595 static void 8596 rack_do_decay(struct tcp_rack *rack) 8597 { 8598 struct timeval res; 8599 8600 #define timersub(tvp, uvp, vvp) \ 8601 do { \ 8602 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8603 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8604 if ((vvp)->tv_usec < 0) { \ 8605 (vvp)->tv_sec--; \ 8606 (vvp)->tv_usec += 1000000; \ 8607 } \ 8608 } while (0) 8609 8610 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8611 #undef timersub 8612 8613 rack->r_ctl.input_pkt++; 8614 if ((rack->rc_in_persist) || 8615 (res.tv_sec >= 1) || 8616 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8617 /* 8618 * Check for decay of non-SAD, 8619 * we want all SAD detection metrics to 8620 * decay 1/4 per second (or more) passed. 8621 */ 8622 uint32_t pkt_delta; 8623 8624 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8625 /* Update our saved tracking values */ 8626 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8627 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8628 /* Now do we escape without decay? */ 8629 #ifdef NETFLIX_EXP_DETECTION 8630 if (rack->rc_in_persist || 8631 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8632 (pkt_delta < tcp_sad_low_pps)){ 8633 /* 8634 * We don't decay idle connections 8635 * or ones that have a low input pps. 8636 */ 8637 return; 8638 } 8639 /* Decay the counters */ 8640 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8641 tcp_sad_decay_val); 8642 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8643 tcp_sad_decay_val); 8644 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8645 tcp_sad_decay_val); 8646 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8647 tcp_sad_decay_val); 8648 #endif 8649 } 8650 } 8651 8652 static void 8653 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8654 { 8655 struct rack_sendmap *rsm, *rm; 8656 8657 /* 8658 * The ACK point is advancing to th_ack, we must drop off 8659 * the packets in the rack log and calculate any eligble 8660 * RTT's. 8661 */ 8662 rack->r_wanted_output = 1; 8663 more: 8664 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8665 if (rsm == NULL) { 8666 if ((th_ack - 1) == tp->iss) { 8667 /* 8668 * For the SYN incoming case we will not 8669 * have called tcp_output for the sending of 8670 * the SYN, so there will be no map. All 8671 * other cases should probably be a panic. 8672 */ 8673 return; 8674 } 8675 if (tp->t_flags & TF_SENTFIN) { 8676 /* if we sent a FIN we often will not have map */ 8677 return; 8678 } 8679 #ifdef INVARIANTS 8680 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8681 tp, 8682 tp->t_state, th_ack, rack, 8683 tp->snd_una, tp->snd_max, tp->snd_nxt); 8684 #endif 8685 return; 8686 } 8687 if (SEQ_LT(th_ack, rsm->r_start)) { 8688 /* Huh map is missing this */ 8689 #ifdef INVARIANTS 8690 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 8691 rsm->r_start, 8692 th_ack, tp->t_state, rack->r_state); 8693 #endif 8694 return; 8695 } 8696 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 8697 /* Now do we consume the whole thing? */ 8698 if (SEQ_GEQ(th_ack, rsm->r_end)) { 8699 /* Its all consumed. */ 8700 uint32_t left; 8701 uint8_t newly_acked; 8702 8703 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 8704 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 8705 rsm->r_rtr_bytes = 0; 8706 /* Record the time of highest cumack sent */ 8707 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8708 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8709 #ifdef INVARIANTS 8710 if (rm != rsm) { 8711 panic("removing head in rack:%p rsm:%p rm:%p", 8712 rack, rsm, rm); 8713 } 8714 #endif 8715 if (rsm->r_in_tmap) { 8716 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8717 rsm->r_in_tmap = 0; 8718 } 8719 newly_acked = 1; 8720 if (rsm->r_flags & RACK_ACKED) { 8721 /* 8722 * It was acked on the scoreboard -- remove 8723 * it from total 8724 */ 8725 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8726 newly_acked = 0; 8727 } else if (rsm->r_flags & RACK_SACK_PASSED) { 8728 /* 8729 * There are segments ACKED on the 8730 * scoreboard further up. We are seeing 8731 * reordering. 8732 */ 8733 rsm->r_flags &= ~RACK_SACK_PASSED; 8734 counter_u64_add(rack_reorder_seen, 1); 8735 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8736 rsm->r_flags |= RACK_ACKED; 8737 rack->r_ctl.rc_reorder_ts = cts; 8738 if (rack->r_ent_rec_ns) { 8739 /* 8740 * We have sent no more, and we saw an sack 8741 * then ack arrive. 8742 */ 8743 rack->r_might_revert = 1; 8744 } 8745 } 8746 if ((rsm->r_flags & RACK_TO_REXT) && 8747 (tp->t_flags & TF_RCVD_TSTMP) && 8748 (to->to_flags & TOF_TS) && 8749 (tp->t_flags & TF_PREVVALID)) { 8750 /* 8751 * We can use the timestamp to see 8752 * if this retransmission was from the 8753 * first transmit. If so we made a mistake. 8754 */ 8755 tp->t_flags &= ~TF_PREVVALID; 8756 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 8757 /* The first transmit is what this ack is for */ 8758 rack_cong_signal(tp, CC_RTO_ERR, th_ack); 8759 } 8760 } 8761 left = th_ack - rsm->r_end; 8762 if (rack->app_limited_needs_set && newly_acked) 8763 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 8764 /* Free back to zone */ 8765 rack_free(rack, rsm); 8766 if (left) { 8767 goto more; 8768 } 8769 /* Check for reneging */ 8770 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8771 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 8772 /* 8773 * The peer has moved snd_una up to 8774 * the edge of this send, i.e. one 8775 * that it had previously acked. The only 8776 * way that can be true if the peer threw 8777 * away data (space issues) that it had 8778 * previously sacked (else it would have 8779 * given us snd_una up to (rsm->r_end). 8780 * We need to undo the acked markings here. 8781 * 8782 * Note we have to look to make sure th_ack is 8783 * our rsm->r_start in case we get an old ack 8784 * where th_ack is behind snd_una. 8785 */ 8786 rack_peer_reneges(rack, rsm, th_ack); 8787 } 8788 return; 8789 } 8790 if (rsm->r_flags & RACK_ACKED) { 8791 /* 8792 * It was acked on the scoreboard -- remove it from 8793 * total for the part being cum-acked. 8794 */ 8795 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 8796 } 8797 /* 8798 * Clear the dup ack count for 8799 * the piece that remains. 8800 */ 8801 rsm->r_dupack = 0; 8802 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8803 if (rsm->r_rtr_bytes) { 8804 /* 8805 * It was retransmitted adjust the 8806 * sack holes for what was acked. 8807 */ 8808 int ack_am; 8809 8810 ack_am = (th_ack - rsm->r_start); 8811 if (ack_am >= rsm->r_rtr_bytes) { 8812 rack->r_ctl.rc_holes_rxt -= ack_am; 8813 rsm->r_rtr_bytes -= ack_am; 8814 } 8815 } 8816 /* 8817 * Update where the piece starts and record 8818 * the time of send of highest cumack sent. 8819 */ 8820 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8821 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 8822 /* Now we need to move our offset forward too */ 8823 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 8824 /* Fix up the orig_m_len and possibly the mbuf offset */ 8825 rack_adjust_orig_mlen(rsm); 8826 } 8827 rsm->soff += (th_ack - rsm->r_start); 8828 rsm->r_start = th_ack; 8829 /* Now do we need to move the mbuf fwd too? */ 8830 if (rsm->m) { 8831 while (rsm->soff >= rsm->m->m_len) { 8832 rsm->soff -= rsm->m->m_len; 8833 rsm->m = rsm->m->m_next; 8834 KASSERT((rsm->m != NULL), 8835 (" nrsm:%p hit at soff:%u null m", 8836 rsm, rsm->soff)); 8837 } 8838 rsm->orig_m_len = rsm->m->m_len; 8839 } 8840 if (rack->app_limited_needs_set) 8841 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 8842 } 8843 8844 static void 8845 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 8846 { 8847 struct rack_sendmap *rsm; 8848 int sack_pass_fnd = 0; 8849 8850 if (rack->r_might_revert) { 8851 /* 8852 * Ok we have reordering, have not sent anything, we 8853 * might want to revert the congestion state if nothing 8854 * further has SACK_PASSED on it. Lets check. 8855 * 8856 * We also get here when we have DSACKs come in for 8857 * all the data that we FR'd. Note that a rxt or tlp 8858 * timer clears this from happening. 8859 */ 8860 8861 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 8862 if (rsm->r_flags & RACK_SACK_PASSED) { 8863 sack_pass_fnd = 1; 8864 break; 8865 } 8866 } 8867 if (sack_pass_fnd == 0) { 8868 /* 8869 * We went into recovery 8870 * incorrectly due to reordering! 8871 */ 8872 int orig_cwnd; 8873 8874 rack->r_ent_rec_ns = 0; 8875 orig_cwnd = tp->snd_cwnd; 8876 tp->snd_cwnd = rack->r_ctl.rc_cwnd_at_erec; 8877 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 8878 tp->snd_recover = tp->snd_una; 8879 rack_log_to_prr(rack, 14, orig_cwnd); 8880 EXIT_RECOVERY(tp->t_flags); 8881 } 8882 rack->r_might_revert = 0; 8883 } 8884 } 8885 8886 #ifdef NETFLIX_EXP_DETECTION 8887 static void 8888 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 8889 { 8890 if ((rack->do_detection || tcp_force_detection) && 8891 tcp_sack_to_ack_thresh && 8892 tcp_sack_to_move_thresh && 8893 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 8894 /* 8895 * We have thresholds set to find 8896 * possible attackers and disable sack. 8897 * Check them. 8898 */ 8899 uint64_t ackratio, moveratio, movetotal; 8900 8901 /* Log detecting */ 8902 rack_log_sad(rack, 1); 8903 ackratio = (uint64_t)(rack->r_ctl.sack_count); 8904 ackratio *= (uint64_t)(1000); 8905 if (rack->r_ctl.ack_count) 8906 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 8907 else { 8908 /* We really should not hit here */ 8909 ackratio = 1000; 8910 } 8911 if ((rack->sack_attack_disable == 0) && 8912 (ackratio > rack_highest_sack_thresh_seen)) 8913 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 8914 movetotal = rack->r_ctl.sack_moved_extra; 8915 movetotal += rack->r_ctl.sack_noextra_move; 8916 moveratio = rack->r_ctl.sack_moved_extra; 8917 moveratio *= (uint64_t)1000; 8918 if (movetotal) 8919 moveratio /= movetotal; 8920 else { 8921 /* No moves, thats pretty good */ 8922 moveratio = 0; 8923 } 8924 if ((rack->sack_attack_disable == 0) && 8925 (moveratio > rack_highest_move_thresh_seen)) 8926 rack_highest_move_thresh_seen = (uint32_t)moveratio; 8927 if (rack->sack_attack_disable == 0) { 8928 if ((ackratio > tcp_sack_to_ack_thresh) && 8929 (moveratio > tcp_sack_to_move_thresh)) { 8930 /* Disable sack processing */ 8931 rack->sack_attack_disable = 1; 8932 if (rack->r_rep_attack == 0) { 8933 rack->r_rep_attack = 1; 8934 counter_u64_add(rack_sack_attacks_detected, 1); 8935 } 8936 if (tcp_attack_on_turns_on_logging) { 8937 /* 8938 * Turn on logging, used for debugging 8939 * false positives. 8940 */ 8941 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 8942 } 8943 /* Clamp the cwnd at flight size */ 8944 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 8945 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 8946 rack_log_sad(rack, 2); 8947 } 8948 } else { 8949 /* We are sack-disabled check for false positives */ 8950 if ((ackratio <= tcp_restoral_thresh) || 8951 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 8952 rack->sack_attack_disable = 0; 8953 rack_log_sad(rack, 3); 8954 /* Restart counting */ 8955 rack->r_ctl.sack_count = 0; 8956 rack->r_ctl.sack_moved_extra = 0; 8957 rack->r_ctl.sack_noextra_move = 1; 8958 rack->r_ctl.ack_count = max(1, 8959 (bytes_this_ack / segsiz)); 8960 8961 if (rack->r_rep_reverse == 0) { 8962 rack->r_rep_reverse = 1; 8963 counter_u64_add(rack_sack_attacks_reversed, 1); 8964 } 8965 /* Restore the cwnd */ 8966 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 8967 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 8968 } 8969 } 8970 } 8971 } 8972 #endif 8973 8974 static void 8975 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 8976 { 8977 8978 uint32_t am; 8979 8980 if (SEQ_GT(end, start)) 8981 am = end - start; 8982 else 8983 am = 0; 8984 /* 8985 * We keep track of how many DSACK blocks we get 8986 * after a recovery incident. 8987 */ 8988 rack->r_ctl.dsack_byte_cnt += am; 8989 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 8990 rack->r_ctl.retran_during_recovery && 8991 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 8992 /* 8993 * False recovery most likely culprit is reordering. If 8994 * nothing else is missing we need to revert. 8995 */ 8996 rack->r_might_revert = 1; 8997 rack_handle_might_revert(rack->rc_tp, rack); 8998 rack->r_might_revert = 0; 8999 rack->r_ctl.retran_during_recovery = 0; 9000 rack->r_ctl.dsack_byte_cnt = 0; 9001 } 9002 } 9003 9004 static void 9005 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9006 { 9007 /* Deal with changed and PRR here (in recovery only) */ 9008 uint32_t pipe, snd_una; 9009 9010 rack->r_ctl.rc_prr_delivered += changed; 9011 9012 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9013 /* 9014 * It is all outstanding, we are application limited 9015 * and thus we don't need more room to send anything. 9016 * Note we use tp->snd_una here and not th_ack because 9017 * the data as yet not been cut from the sb. 9018 */ 9019 rack->r_ctl.rc_prr_sndcnt = 0; 9020 return; 9021 } 9022 /* Compute prr_sndcnt */ 9023 if (SEQ_GT(tp->snd_una, th_ack)) { 9024 snd_una = tp->snd_una; 9025 } else { 9026 snd_una = th_ack; 9027 } 9028 pipe = ((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt; 9029 if (pipe > tp->snd_ssthresh) { 9030 long sndcnt; 9031 9032 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9033 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9034 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9035 else { 9036 rack->r_ctl.rc_prr_sndcnt = 0; 9037 rack_log_to_prr(rack, 9, 0); 9038 sndcnt = 0; 9039 } 9040 sndcnt++; 9041 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9042 sndcnt -= rack->r_ctl.rc_prr_out; 9043 else 9044 sndcnt = 0; 9045 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9046 rack_log_to_prr(rack, 10, 0); 9047 } else { 9048 uint32_t limit; 9049 9050 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9051 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9052 else 9053 limit = 0; 9054 if (changed > limit) 9055 limit = changed; 9056 limit += ctf_fixed_maxseg(tp); 9057 if (tp->snd_ssthresh > pipe) { 9058 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9059 rack_log_to_prr(rack, 11, 0); 9060 } else { 9061 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9062 rack_log_to_prr(rack, 12, 0); 9063 } 9064 } 9065 } 9066 9067 static void 9068 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9069 { 9070 uint32_t changed; 9071 struct tcp_rack *rack; 9072 struct rack_sendmap *rsm; 9073 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9074 register uint32_t th_ack; 9075 int32_t i, j, k, num_sack_blks = 0; 9076 uint32_t cts, acked, ack_point, sack_changed = 0; 9077 int loop_start = 0, moved_two = 0; 9078 uint32_t tsused; 9079 9080 9081 INP_WLOCK_ASSERT(tp->t_inpcb); 9082 if (th->th_flags & TH_RST) { 9083 /* We don't log resets */ 9084 return; 9085 } 9086 rack = (struct tcp_rack *)tp->t_fb_ptr; 9087 cts = tcp_get_usecs(NULL); 9088 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9089 changed = 0; 9090 th_ack = th->th_ack; 9091 if (rack->sack_attack_disable == 0) 9092 rack_do_decay(rack); 9093 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9094 /* 9095 * You only get credit for 9096 * MSS and greater (and you get extra 9097 * credit for larger cum-ack moves). 9098 */ 9099 int ac; 9100 9101 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9102 rack->r_ctl.ack_count += ac; 9103 counter_u64_add(rack_ack_total, ac); 9104 } 9105 if (rack->r_ctl.ack_count > 0xfff00000) { 9106 /* 9107 * reduce the number to keep us under 9108 * a uint32_t. 9109 */ 9110 rack->r_ctl.ack_count /= 2; 9111 rack->r_ctl.sack_count /= 2; 9112 } 9113 if (SEQ_GT(th_ack, tp->snd_una)) { 9114 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9115 tp->t_acktime = ticks; 9116 } 9117 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9118 changed = th_ack - rsm->r_start; 9119 if (changed) { 9120 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9121 } 9122 if ((to->to_flags & TOF_SACK) == 0) { 9123 /* We are done nothing left and no sack. */ 9124 rack_handle_might_revert(tp, rack); 9125 /* 9126 * For cases where we struck a dup-ack 9127 * with no SACK, add to the changes so 9128 * PRR will work right. 9129 */ 9130 if (dup_ack_struck && (changed == 0)) { 9131 changed += ctf_fixed_maxseg(rack->rc_tp); 9132 } 9133 goto out; 9134 } 9135 /* Sack block processing */ 9136 if (SEQ_GT(th_ack, tp->snd_una)) 9137 ack_point = th_ack; 9138 else 9139 ack_point = tp->snd_una; 9140 for (i = 0; i < to->to_nsacks; i++) { 9141 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9142 &sack, sizeof(sack)); 9143 sack.start = ntohl(sack.start); 9144 sack.end = ntohl(sack.end); 9145 if (SEQ_GT(sack.end, sack.start) && 9146 SEQ_GT(sack.start, ack_point) && 9147 SEQ_LT(sack.start, tp->snd_max) && 9148 SEQ_GT(sack.end, ack_point) && 9149 SEQ_LEQ(sack.end, tp->snd_max)) { 9150 sack_blocks[num_sack_blks] = sack; 9151 num_sack_blks++; 9152 #ifdef NETFLIX_STATS 9153 } else if (SEQ_LEQ(sack.start, th_ack) && 9154 SEQ_LEQ(sack.end, th_ack)) { 9155 /* 9156 * Its a D-SACK block. 9157 */ 9158 tcp_record_dsack(sack.start, sack.end); 9159 #endif 9160 rack_note_dsack(rack, sack.start, sack.end); 9161 } 9162 } 9163 /* 9164 * Sort the SACK blocks so we can update the rack scoreboard with 9165 * just one pass. 9166 */ 9167 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9168 num_sack_blks, th->th_ack); 9169 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9170 if (num_sack_blks == 0) { 9171 /* Nothing to sack (DSACKs?) */ 9172 goto out_with_totals; 9173 } 9174 if (num_sack_blks < 2) { 9175 /* Only one, we don't need to sort */ 9176 goto do_sack_work; 9177 } 9178 /* Sort the sacks */ 9179 for (i = 0; i < num_sack_blks; i++) { 9180 for (j = i + 1; j < num_sack_blks; j++) { 9181 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9182 sack = sack_blocks[i]; 9183 sack_blocks[i] = sack_blocks[j]; 9184 sack_blocks[j] = sack; 9185 } 9186 } 9187 } 9188 /* 9189 * Now are any of the sack block ends the same (yes some 9190 * implementations send these)? 9191 */ 9192 again: 9193 if (num_sack_blks == 0) 9194 goto out_with_totals; 9195 if (num_sack_blks > 1) { 9196 for (i = 0; i < num_sack_blks; i++) { 9197 for (j = i + 1; j < num_sack_blks; j++) { 9198 if (sack_blocks[i].end == sack_blocks[j].end) { 9199 /* 9200 * Ok these two have the same end we 9201 * want the smallest end and then 9202 * throw away the larger and start 9203 * again. 9204 */ 9205 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9206 /* 9207 * The second block covers 9208 * more area use that 9209 */ 9210 sack_blocks[i].start = sack_blocks[j].start; 9211 } 9212 /* 9213 * Now collapse out the dup-sack and 9214 * lower the count 9215 */ 9216 for (k = (j + 1); k < num_sack_blks; k++) { 9217 sack_blocks[j].start = sack_blocks[k].start; 9218 sack_blocks[j].end = sack_blocks[k].end; 9219 j++; 9220 } 9221 num_sack_blks--; 9222 goto again; 9223 } 9224 } 9225 } 9226 } 9227 do_sack_work: 9228 /* 9229 * First lets look to see if 9230 * we have retransmitted and 9231 * can use the transmit next? 9232 */ 9233 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9234 if (rsm && 9235 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9236 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9237 /* 9238 * We probably did the FR and the next 9239 * SACK in continues as we would expect. 9240 */ 9241 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9242 if (acked) { 9243 rack->r_wanted_output = 1; 9244 changed += acked; 9245 sack_changed += acked; 9246 } 9247 if (num_sack_blks == 1) { 9248 /* 9249 * This is what we would expect from 9250 * a normal implementation to happen 9251 * after we have retransmitted the FR, 9252 * i.e the sack-filter pushes down 9253 * to 1 block and the next to be retransmitted 9254 * is the sequence in the sack block (has more 9255 * are acked). Count this as ACK'd data to boost 9256 * up the chances of recovering any false positives. 9257 */ 9258 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9259 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9260 counter_u64_add(rack_express_sack, 1); 9261 if (rack->r_ctl.ack_count > 0xfff00000) { 9262 /* 9263 * reduce the number to keep us under 9264 * a uint32_t. 9265 */ 9266 rack->r_ctl.ack_count /= 2; 9267 rack->r_ctl.sack_count /= 2; 9268 } 9269 goto out_with_totals; 9270 } else { 9271 /* 9272 * Start the loop through the 9273 * rest of blocks, past the first block. 9274 */ 9275 moved_two = 0; 9276 loop_start = 1; 9277 } 9278 } 9279 /* Its a sack of some sort */ 9280 rack->r_ctl.sack_count++; 9281 if (rack->r_ctl.sack_count > 0xfff00000) { 9282 /* 9283 * reduce the number to keep us under 9284 * a uint32_t. 9285 */ 9286 rack->r_ctl.ack_count /= 2; 9287 rack->r_ctl.sack_count /= 2; 9288 } 9289 counter_u64_add(rack_sack_total, 1); 9290 if (rack->sack_attack_disable) { 9291 /* An attacker disablement is in place */ 9292 if (num_sack_blks > 1) { 9293 rack->r_ctl.sack_count += (num_sack_blks - 1); 9294 rack->r_ctl.sack_moved_extra++; 9295 counter_u64_add(rack_move_some, 1); 9296 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9297 rack->r_ctl.sack_moved_extra /= 2; 9298 rack->r_ctl.sack_noextra_move /= 2; 9299 } 9300 } 9301 goto out; 9302 } 9303 rsm = rack->r_ctl.rc_sacklast; 9304 for (i = loop_start; i < num_sack_blks; i++) { 9305 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9306 if (acked) { 9307 rack->r_wanted_output = 1; 9308 changed += acked; 9309 sack_changed += acked; 9310 } 9311 if (moved_two) { 9312 /* 9313 * If we did not get a SACK for at least a MSS and 9314 * had to move at all, or if we moved more than our 9315 * threshold, it counts against the "extra" move. 9316 */ 9317 rack->r_ctl.sack_moved_extra += moved_two; 9318 counter_u64_add(rack_move_some, 1); 9319 } else { 9320 /* 9321 * else we did not have to move 9322 * any more than we would expect. 9323 */ 9324 rack->r_ctl.sack_noextra_move++; 9325 counter_u64_add(rack_move_none, 1); 9326 } 9327 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9328 /* 9329 * If the SACK was not a full MSS then 9330 * we add to sack_count the number of 9331 * MSS's (or possibly more than 9332 * a MSS if its a TSO send) we had to skip by. 9333 */ 9334 rack->r_ctl.sack_count += moved_two; 9335 counter_u64_add(rack_sack_total, moved_two); 9336 } 9337 /* 9338 * Now we need to setup for the next 9339 * round. First we make sure we won't 9340 * exceed the size of our uint32_t on 9341 * the various counts, and then clear out 9342 * moved_two. 9343 */ 9344 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9345 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9346 rack->r_ctl.sack_moved_extra /= 2; 9347 rack->r_ctl.sack_noextra_move /= 2; 9348 } 9349 if (rack->r_ctl.sack_count > 0xfff00000) { 9350 rack->r_ctl.ack_count /= 2; 9351 rack->r_ctl.sack_count /= 2; 9352 } 9353 moved_two = 0; 9354 } 9355 out_with_totals: 9356 if (num_sack_blks > 1) { 9357 /* 9358 * You get an extra stroke if 9359 * you have more than one sack-blk, this 9360 * could be where we are skipping forward 9361 * and the sack-filter is still working, or 9362 * it could be an attacker constantly 9363 * moving us. 9364 */ 9365 rack->r_ctl.sack_moved_extra++; 9366 counter_u64_add(rack_move_some, 1); 9367 } 9368 out: 9369 #ifdef NETFLIX_EXP_DETECTION 9370 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9371 #endif 9372 if (changed) { 9373 /* Something changed cancel the rack timer */ 9374 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9375 } 9376 tsused = tcp_get_usecs(NULL); 9377 rsm = tcp_rack_output(tp, rack, tsused); 9378 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9379 rsm) { 9380 /* Enter recovery */ 9381 rack->r_ctl.rc_rsm_start = rsm->r_start; 9382 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 9383 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 9384 entered_recovery = 1; 9385 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 9386 /* 9387 * When we enter recovery we need to assure we send 9388 * one packet. 9389 */ 9390 if (rack->rack_no_prr == 0) { 9391 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9392 rack_log_to_prr(rack, 8, 0); 9393 } 9394 rack->r_timer_override = 1; 9395 rack->r_early = 0; 9396 rack->r_ctl.rc_agg_early = 0; 9397 } else if (IN_FASTRECOVERY(tp->t_flags) && 9398 rsm && 9399 (rack->r_rr_config == 3)) { 9400 /* 9401 * Assure we can output and we get no 9402 * remembered pace time except the retransmit. 9403 */ 9404 rack->r_timer_override = 1; 9405 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9406 rack->r_ctl.rc_resend = rsm; 9407 } 9408 if (IN_FASTRECOVERY(tp->t_flags) && 9409 (rack->rack_no_prr == 0) && 9410 (entered_recovery == 0)) { 9411 rack_update_prr(tp, rack, changed, th_ack); 9412 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9413 ((rack->rc_inp->inp_in_hpts == 0) && 9414 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9415 /* 9416 * If you are pacing output you don't want 9417 * to override. 9418 */ 9419 rack->r_early = 0; 9420 rack->r_ctl.rc_agg_early = 0; 9421 rack->r_timer_override = 1; 9422 } 9423 } 9424 } 9425 9426 static void 9427 rack_strike_dupack(struct tcp_rack *rack) 9428 { 9429 struct rack_sendmap *rsm; 9430 9431 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9432 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9433 rsm = TAILQ_NEXT(rsm, r_tnext); 9434 } 9435 if (rsm && (rsm->r_dupack < 0xff)) { 9436 rsm->r_dupack++; 9437 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9438 struct timeval tv; 9439 uint32_t cts; 9440 /* 9441 * Here we see if we need to retransmit. For 9442 * a SACK type connection if enough time has passed 9443 * we will get a return of the rsm. For a non-sack 9444 * connection we will get the rsm returned if the 9445 * dupack value is 3 or more. 9446 */ 9447 cts = tcp_get_usecs(&tv); 9448 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9449 if (rack->r_ctl.rc_resend != NULL) { 9450 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9451 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9452 rack->rc_tp->snd_una); 9453 } 9454 rack->r_wanted_output = 1; 9455 rack->r_timer_override = 1; 9456 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9457 } 9458 } else { 9459 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9460 } 9461 } 9462 } 9463 9464 static void 9465 rack_check_bottom_drag(struct tcpcb *tp, 9466 struct tcp_rack *rack, 9467 struct socket *so, int32_t acked) 9468 { 9469 uint32_t segsiz, minseg; 9470 9471 segsiz = ctf_fixed_maxseg(tp); 9472 minseg = segsiz; 9473 9474 if (tp->snd_max == tp->snd_una) { 9475 /* 9476 * We are doing dynamic pacing and we are way 9477 * under. Basically everything got acked while 9478 * we were still waiting on the pacer to expire. 9479 * 9480 * This means we need to boost the b/w in 9481 * addition to any earlier boosting of 9482 * the multipler. 9483 */ 9484 rack->rc_dragged_bottom = 1; 9485 rack_validate_multipliers_at_or_above100(rack); 9486 /* 9487 * Lets use the segment bytes acked plus 9488 * the lowest RTT seen as the basis to 9489 * form a b/w estimate. This will be off 9490 * due to the fact that the true estimate 9491 * should be around 1/2 the time of the RTT 9492 * but we can settle for that. 9493 */ 9494 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9495 acked) { 9496 uint64_t bw, calc_bw, rtt; 9497 9498 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9499 if (rtt == 0) { 9500 /* no us sample is there a ms one? */ 9501 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9502 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9503 } else { 9504 goto no_measurement; 9505 } 9506 } 9507 bw = acked; 9508 calc_bw = bw * 1000000; 9509 calc_bw /= rtt; 9510 if (rack->r_ctl.last_max_bw && 9511 (rack->r_ctl.last_max_bw < calc_bw)) { 9512 /* 9513 * If we have a last calculated max bw 9514 * enforce it. 9515 */ 9516 calc_bw = rack->r_ctl.last_max_bw; 9517 } 9518 /* now plop it in */ 9519 if (rack->rc_gp_filled == 0) { 9520 if (calc_bw > ONE_POINT_TWO_MEG) { 9521 /* 9522 * If we have no measurement 9523 * don't let us set in more than 9524 * 1.2Mbps. If we are still too 9525 * low after pacing with this we 9526 * will hopefully have a max b/w 9527 * available to sanity check things. 9528 */ 9529 calc_bw = ONE_POINT_TWO_MEG; 9530 } 9531 rack->r_ctl.rc_rtt_diff = 0; 9532 rack->r_ctl.gp_bw = calc_bw; 9533 rack->rc_gp_filled = 1; 9534 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9535 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9536 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9537 } else if (calc_bw > rack->r_ctl.gp_bw) { 9538 rack->r_ctl.rc_rtt_diff = 0; 9539 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9540 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9541 rack->r_ctl.gp_bw = calc_bw; 9542 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9543 } else 9544 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9545 if ((rack->gp_ready == 0) && 9546 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9547 /* We have enough measurements now */ 9548 rack->gp_ready = 1; 9549 rack_set_cc_pacing(rack); 9550 if (rack->defer_options) 9551 rack_apply_deferred_options(rack); 9552 } 9553 /* 9554 * For acks over 1mss we do a extra boost to simulate 9555 * where we would get 2 acks (we want 110 for the mul). 9556 */ 9557 if (acked > segsiz) 9558 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9559 } else { 9560 /* 9561 * zero rtt possibly?, settle for just an old increase. 9562 */ 9563 no_measurement: 9564 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9565 } 9566 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9567 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9568 minseg)) && 9569 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9570 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9571 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9572 (segsiz * rack_req_segs))) { 9573 /* 9574 * We are doing dynamic GP pacing and 9575 * we have everything except 1MSS or less 9576 * bytes left out. We are still pacing away. 9577 * And there is data that could be sent, This 9578 * means we are inserting delayed ack time in 9579 * our measurements because we are pacing too slow. 9580 */ 9581 rack_validate_multipliers_at_or_above100(rack); 9582 rack->rc_dragged_bottom = 1; 9583 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9584 } 9585 } 9586 9587 9588 9589 static void 9590 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 9591 { 9592 /* 9593 * The fast output path is enabled and we 9594 * have moved the cumack forward. Lets see if 9595 * we can expand forward the fast path length by 9596 * that amount. What we would ideally like to 9597 * do is increase the number of bytes in the 9598 * fast path block (left_to_send) by the 9599 * acked amount. However we have to gate that 9600 * by two factors: 9601 * 1) The amount outstanding and the rwnd of the peer 9602 * (i.e. we don't want to exceed the rwnd of the peer). 9603 * <and> 9604 * 2) The amount of data left in the socket buffer (i.e. 9605 * we can't send beyond what is in the buffer). 9606 * 9607 * Note that this does not take into account any increase 9608 * in the cwnd. We will only extend the fast path by 9609 * what was acked. 9610 */ 9611 uint32_t new_total, gating_val; 9612 9613 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 9614 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 9615 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 9616 if (new_total <= gating_val) { 9617 /* We can increase left_to_send by the acked amount */ 9618 counter_u64_add(rack_extended_rfo, 1); 9619 rack->r_ctl.fsb.left_to_send = new_total; 9620 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 9621 ("rack:%p left_to_send:%u sbavail:%u out:%u", 9622 rack, rack->r_ctl.fsb.left_to_send, 9623 sbavail(&rack->rc_inp->inp_socket->so_snd), 9624 (tp->snd_max - tp->snd_una))); 9625 9626 } 9627 } 9628 9629 static void 9630 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 9631 { 9632 /* 9633 * Here any sendmap entry that points to the 9634 * beginning mbuf must be adjusted to the correct 9635 * offset. This must be called with: 9636 * 1) The socket buffer locked 9637 * 2) snd_una adjusted to its new postion. 9638 * 9639 * Note that (2) implies rack_ack_received has also 9640 * been called. 9641 * 9642 * We grab the first mbuf in the socket buffer and 9643 * then go through the front of the sendmap, recalculating 9644 * the stored offset for any sendmap entry that has 9645 * that mbuf. We must use the sb functions to do this 9646 * since its possible an add was done has well as 9647 * the subtraction we may have just completed. This should 9648 * not be a penalty though, since we just referenced the sb 9649 * to go in and trim off the mbufs that we freed (of course 9650 * there will be a penalty for the sendmap references though). 9651 */ 9652 struct mbuf *m; 9653 struct rack_sendmap *rsm; 9654 9655 SOCKBUF_LOCK_ASSERT(sb); 9656 m = sb->sb_mb; 9657 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9658 if ((rsm == NULL) || (m == NULL)) { 9659 /* Nothing outstanding */ 9660 return; 9661 } 9662 while (rsm->m && (rsm->m == m)) { 9663 /* one to adjust */ 9664 #ifdef INVARIANTS 9665 struct mbuf *tm; 9666 uint32_t soff; 9667 9668 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 9669 if (rsm->orig_m_len != m->m_len) { 9670 rack_adjust_orig_mlen(rsm); 9671 } 9672 if (rsm->soff != soff) { 9673 /* 9674 * This is not a fatal error, we anticipate it 9675 * might happen (the else code), so we count it here 9676 * so that under invariant we can see that it really 9677 * does happen. 9678 */ 9679 counter_u64_add(rack_adjust_map_bw, 1); 9680 } 9681 rsm->m = tm; 9682 rsm->soff = soff; 9683 if (tm) 9684 rsm->orig_m_len = rsm->m->m_len; 9685 else 9686 rsm->orig_m_len = 0; 9687 #else 9688 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 9689 if (rsm->m) 9690 rsm->orig_m_len = rsm->m->m_len; 9691 else 9692 rsm->orig_m_len = 0; 9693 #endif 9694 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 9695 rsm); 9696 if (rsm == NULL) 9697 break; 9698 } 9699 } 9700 9701 /* 9702 * Return value of 1, we do not need to call rack_process_data(). 9703 * return value of 0, rack_process_data can be called. 9704 * For ret_val if its 0 the TCP is locked, if its non-zero 9705 * its unlocked and probably unsafe to touch the TCB. 9706 */ 9707 static int 9708 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 9709 struct tcpcb *tp, struct tcpopt *to, 9710 uint32_t tiwin, int32_t tlen, 9711 int32_t * ofia, int32_t thflags, int32_t *ret_val) 9712 { 9713 int32_t ourfinisacked = 0; 9714 int32_t nsegs, acked_amount; 9715 int32_t acked; 9716 struct mbuf *mfree; 9717 struct tcp_rack *rack; 9718 int32_t under_pacing = 0; 9719 int32_t recovery = 0; 9720 9721 rack = (struct tcp_rack *)tp->t_fb_ptr; 9722 if (SEQ_GT(th->th_ack, tp->snd_max)) { 9723 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 9724 &rack->r_ctl.challenge_ack_ts, 9725 &rack->r_ctl.challenge_ack_cnt); 9726 rack->r_wanted_output = 1; 9727 return (1); 9728 } 9729 if (rack->gp_ready && 9730 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 9731 under_pacing = 1; 9732 } 9733 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 9734 int in_rec, dup_ack_struck = 0; 9735 9736 in_rec = IN_FASTRECOVERY(tp->t_flags); 9737 if (rack->rc_in_persist) { 9738 tp->t_rxtshift = 0; 9739 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9740 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9741 } 9742 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) { 9743 rack_strike_dupack(rack); 9744 dup_ack_struck = 1; 9745 } 9746 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 9747 } 9748 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 9749 /* 9750 * Old ack, behind (or duplicate to) the last one rcv'd 9751 * Note: We mark reordering is occuring if its 9752 * less than and we have not closed our window. 9753 */ 9754 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 9755 counter_u64_add(rack_reorder_seen, 1); 9756 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 9757 } 9758 return (0); 9759 } 9760 /* 9761 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 9762 * something we sent. 9763 */ 9764 if (tp->t_flags & TF_NEEDSYN) { 9765 /* 9766 * T/TCP: Connection was half-synchronized, and our SYN has 9767 * been ACK'd (so connection is now fully synchronized). Go 9768 * to non-starred state, increment snd_una for ACK of SYN, 9769 * and check if we can do window scaling. 9770 */ 9771 tp->t_flags &= ~TF_NEEDSYN; 9772 tp->snd_una++; 9773 /* Do window scaling? */ 9774 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 9775 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 9776 tp->rcv_scale = tp->request_r_scale; 9777 /* Send window already scaled. */ 9778 } 9779 } 9780 nsegs = max(1, m->m_pkthdr.lro_nsegs); 9781 INP_WLOCK_ASSERT(tp->t_inpcb); 9782 9783 acked = BYTES_THIS_ACK(tp, th); 9784 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 9785 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 9786 /* 9787 * If we just performed our first retransmit, and the ACK arrives 9788 * within our recovery window, then it was a mistake to do the 9789 * retransmit in the first place. Recover our original cwnd and 9790 * ssthresh, and proceed to transmit where we left off. 9791 */ 9792 if ((tp->t_flags & TF_PREVVALID) && 9793 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 9794 tp->t_flags &= ~TF_PREVVALID; 9795 if (tp->t_rxtshift == 1 && 9796 (int)(ticks - tp->t_badrxtwin) < 0) 9797 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 9798 } 9799 if (acked) { 9800 /* assure we are not backed off */ 9801 tp->t_rxtshift = 0; 9802 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 9803 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 9804 rack->rc_tlp_in_progress = 0; 9805 rack->r_ctl.rc_tlp_cnt_out = 0; 9806 /* 9807 * If it is the RXT timer we want to 9808 * stop it, so we can restart a TLP. 9809 */ 9810 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 9811 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9812 #ifdef NETFLIX_HTTP_LOGGING 9813 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 9814 #endif 9815 } 9816 /* 9817 * If we have a timestamp reply, update smoothed round trip time. If 9818 * no timestamp is present but transmit timer is running and timed 9819 * sequence number was acked, update smoothed round trip time. Since 9820 * we now have an rtt measurement, cancel the timer backoff (cf., 9821 * Phil Karn's retransmit alg.). Recompute the initial retransmit 9822 * timer. 9823 * 9824 * Some boxes send broken timestamp replies during the SYN+ACK 9825 * phase, ignore timestamps of 0 or we could calculate a huge RTT 9826 * and blow up the retransmit timer. 9827 */ 9828 /* 9829 * If all outstanding data is acked, stop retransmit timer and 9830 * remember to restart (more output or persist). If there is more 9831 * data to be acked, restart retransmit timer, using current 9832 * (possibly backed-off) value. 9833 */ 9834 if (acked == 0) { 9835 if (ofia) 9836 *ofia = ourfinisacked; 9837 return (0); 9838 } 9839 if (IN_RECOVERY(tp->t_flags)) { 9840 if (SEQ_LT(th->th_ack, tp->snd_recover) && 9841 (SEQ_LT(th->th_ack, tp->snd_max))) { 9842 tcp_rack_partialack(tp); 9843 } else { 9844 rack_post_recovery(tp, th->th_ack); 9845 recovery = 1; 9846 } 9847 } 9848 /* 9849 * Let the congestion control algorithm update congestion control 9850 * related information. This typically means increasing the 9851 * congestion window. 9852 */ 9853 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 9854 SOCKBUF_LOCK(&so->so_snd); 9855 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 9856 tp->snd_wnd -= acked_amount; 9857 mfree = sbcut_locked(&so->so_snd, acked_amount); 9858 if ((sbused(&so->so_snd) == 0) && 9859 (acked > acked_amount) && 9860 (tp->t_state >= TCPS_FIN_WAIT_1) && 9861 (tp->t_flags & TF_SENTFIN)) { 9862 /* 9863 * We must be sure our fin 9864 * was sent and acked (we can be 9865 * in FIN_WAIT_1 without having 9866 * sent the fin). 9867 */ 9868 ourfinisacked = 1; 9869 } 9870 tp->snd_una = th->th_ack; 9871 if (acked_amount && sbavail(&so->so_snd)) 9872 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 9873 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 9874 /* NB: sowwakeup_locked() does an implicit unlock. */ 9875 sowwakeup_locked(so); 9876 m_freem(mfree); 9877 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 9878 tp->snd_recover = tp->snd_una; 9879 9880 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 9881 tp->snd_nxt = tp->snd_una; 9882 } 9883 if (under_pacing && 9884 (rack->use_fixed_rate == 0) && 9885 (rack->in_probe_rtt == 0) && 9886 rack->rc_gp_dyn_mul && 9887 rack->rc_always_pace) { 9888 /* Check if we are dragging bottom */ 9889 rack_check_bottom_drag(tp, rack, so, acked); 9890 } 9891 if (tp->snd_una == tp->snd_max) { 9892 /* Nothing left outstanding */ 9893 tp->t_flags &= ~TF_PREVVALID; 9894 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 9895 rack->r_ctl.retran_during_recovery = 0; 9896 rack->r_ctl.dsack_byte_cnt = 0; 9897 if (rack->r_ctl.rc_went_idle_time == 0) 9898 rack->r_ctl.rc_went_idle_time = 1; 9899 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 9900 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 9901 tp->t_acktime = 0; 9902 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9903 /* Set need output so persist might get set */ 9904 rack->r_wanted_output = 1; 9905 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 9906 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 9907 (sbavail(&so->so_snd) == 0) && 9908 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 9909 /* 9910 * The socket was gone and the 9911 * peer sent data (now or in the past), time to 9912 * reset him. 9913 */ 9914 *ret_val = 1; 9915 /* tcp_close will kill the inp pre-log the Reset */ 9916 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 9917 tp = tcp_close(tp); 9918 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 9919 return (1); 9920 } 9921 } 9922 if (ofia) 9923 *ofia = ourfinisacked; 9924 return (0); 9925 } 9926 9927 static void 9928 rack_collapsed_window(struct tcp_rack *rack) 9929 { 9930 /* 9931 * Now we must walk the 9932 * send map and divide the 9933 * ones left stranded. These 9934 * guys can't cause us to abort 9935 * the connection and are really 9936 * "unsent". However if a buggy 9937 * client actually did keep some 9938 * of the data i.e. collapsed the win 9939 * and refused to ack and then opened 9940 * the win and acked that data. We would 9941 * get into an ack war, the simplier 9942 * method then of just pretending we 9943 * did not send those segments something 9944 * won't work. 9945 */ 9946 struct rack_sendmap *rsm, *nrsm, fe, *insret; 9947 tcp_seq max_seq; 9948 9949 max_seq = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 9950 memset(&fe, 0, sizeof(fe)); 9951 fe.r_start = max_seq; 9952 /* Find the first seq past or at maxseq */ 9953 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 9954 if (rsm == NULL) { 9955 /* Nothing to do strange */ 9956 rack->rc_has_collapsed = 0; 9957 return; 9958 } 9959 /* 9960 * Now do we need to split at 9961 * the collapse point? 9962 */ 9963 if (SEQ_GT(max_seq, rsm->r_start)) { 9964 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 9965 if (nrsm == NULL) { 9966 /* We can't get a rsm, mark all? */ 9967 nrsm = rsm; 9968 goto no_split; 9969 } 9970 /* Clone it */ 9971 rack_clone_rsm(rack, nrsm, rsm, max_seq); 9972 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 9973 #ifdef INVARIANTS 9974 if (insret != NULL) { 9975 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 9976 nrsm, insret, rack, rsm); 9977 } 9978 #endif 9979 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, max_seq, __LINE__); 9980 if (rsm->r_in_tmap) { 9981 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 9982 nrsm->r_in_tmap = 1; 9983 } 9984 /* 9985 * Set in the new RSM as the 9986 * collapsed starting point 9987 */ 9988 rsm = nrsm; 9989 } 9990 no_split: 9991 counter_u64_add(rack_collapsed_win, 1); 9992 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 9993 nrsm->r_flags |= RACK_RWND_COLLAPSED; 9994 } 9995 rack->rc_has_collapsed = 1; 9996 } 9997 9998 static void 9999 rack_un_collapse_window(struct tcp_rack *rack) 10000 { 10001 struct rack_sendmap *rsm; 10002 10003 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 10004 if (rsm->r_flags & RACK_RWND_COLLAPSED) 10005 rsm->r_flags &= ~RACK_RWND_COLLAPSED; 10006 else 10007 break; 10008 } 10009 rack->rc_has_collapsed = 0; 10010 } 10011 10012 static void 10013 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10014 int32_t tlen, int32_t tfo_syn) 10015 { 10016 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10017 if (rack->rc_dack_mode && 10018 (tlen > 500) && 10019 (rack->rc_dack_toggle == 1)) { 10020 goto no_delayed_ack; 10021 } 10022 rack_timer_cancel(tp, rack, 10023 rack->r_ctl.rc_rcvtime, __LINE__); 10024 tp->t_flags |= TF_DELACK; 10025 } else { 10026 no_delayed_ack: 10027 rack->r_wanted_output = 1; 10028 tp->t_flags |= TF_ACKNOW; 10029 if (rack->rc_dack_mode) { 10030 if (tp->t_flags & TF_DELACK) 10031 rack->rc_dack_toggle = 1; 10032 else 10033 rack->rc_dack_toggle = 0; 10034 } 10035 } 10036 } 10037 10038 static void 10039 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10040 { 10041 /* 10042 * If fast output is in progress, lets validate that 10043 * the new window did not shrink on us and make it 10044 * so fast output should end. 10045 */ 10046 if (rack->r_fast_output) { 10047 uint32_t out; 10048 10049 /* 10050 * Calculate what we will send if left as is 10051 * and compare that to our send window. 10052 */ 10053 out = ctf_outstanding(tp); 10054 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10055 /* ok we have an issue */ 10056 if (out >= tp->snd_wnd) { 10057 /* Turn off fast output the window is met or collapsed */ 10058 rack->r_fast_output = 0; 10059 } else { 10060 /* we have some room left */ 10061 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10062 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10063 /* If not at least 1 full segment never mind */ 10064 rack->r_fast_output = 0; 10065 } 10066 } 10067 } 10068 } 10069 } 10070 10071 10072 /* 10073 * Return value of 1, the TCB is unlocked and most 10074 * likely gone, return value of 0, the TCP is still 10075 * locked. 10076 */ 10077 static int 10078 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10079 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10080 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10081 { 10082 /* 10083 * Update window information. Don't look at window if no ACK: TAC's 10084 * send garbage on first SYN. 10085 */ 10086 int32_t nsegs; 10087 int32_t tfo_syn; 10088 struct tcp_rack *rack; 10089 10090 rack = (struct tcp_rack *)tp->t_fb_ptr; 10091 INP_WLOCK_ASSERT(tp->t_inpcb); 10092 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10093 if ((thflags & TH_ACK) && 10094 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10095 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10096 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10097 /* keep track of pure window updates */ 10098 if (tlen == 0 && 10099 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10100 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10101 tp->snd_wnd = tiwin; 10102 rack_validate_fo_sendwin_up(tp, rack); 10103 tp->snd_wl1 = th->th_seq; 10104 tp->snd_wl2 = th->th_ack; 10105 if (tp->snd_wnd > tp->max_sndwnd) 10106 tp->max_sndwnd = tp->snd_wnd; 10107 rack->r_wanted_output = 1; 10108 } else if (thflags & TH_ACK) { 10109 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10110 tp->snd_wnd = tiwin; 10111 rack_validate_fo_sendwin_up(tp, rack); 10112 tp->snd_wl1 = th->th_seq; 10113 tp->snd_wl2 = th->th_ack; 10114 } 10115 } 10116 if (tp->snd_wnd < ctf_outstanding(tp)) 10117 /* The peer collapsed the window */ 10118 rack_collapsed_window(rack); 10119 else if (rack->rc_has_collapsed) 10120 rack_un_collapse_window(rack); 10121 /* Was persist timer active and now we have window space? */ 10122 if ((rack->rc_in_persist != 0) && 10123 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10124 rack->r_ctl.rc_pace_min_segs))) { 10125 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10126 tp->snd_nxt = tp->snd_max; 10127 /* Make sure we output to start the timer */ 10128 rack->r_wanted_output = 1; 10129 } 10130 /* Do we enter persists? */ 10131 if ((rack->rc_in_persist == 0) && 10132 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10133 TCPS_HAVEESTABLISHED(tp->t_state) && 10134 (tp->snd_max == tp->snd_una) && 10135 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10136 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10137 /* 10138 * Here the rwnd is less than 10139 * the pacing size, we are established, 10140 * nothing is outstanding, and there is 10141 * data to send. Enter persists. 10142 */ 10143 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10144 } 10145 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10146 m_freem(m); 10147 return (0); 10148 } 10149 /* 10150 * don't process the URG bit, ignore them drag 10151 * along the up. 10152 */ 10153 tp->rcv_up = tp->rcv_nxt; 10154 INP_WLOCK_ASSERT(tp->t_inpcb); 10155 10156 /* 10157 * Process the segment text, merging it into the TCP sequencing 10158 * queue, and arranging for acknowledgment of receipt if necessary. 10159 * This process logically involves adjusting tp->rcv_wnd as data is 10160 * presented to the user (this happens in tcp_usrreq.c, case 10161 * PRU_RCVD). If a FIN has already been received on this connection 10162 * then we just ignore the text. 10163 */ 10164 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10165 IS_FASTOPEN(tp->t_flags)); 10166 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10167 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10168 tcp_seq save_start = th->th_seq; 10169 tcp_seq save_rnxt = tp->rcv_nxt; 10170 int save_tlen = tlen; 10171 10172 m_adj(m, drop_hdrlen); /* delayed header drop */ 10173 /* 10174 * Insert segment which includes th into TCP reassembly 10175 * queue with control block tp. Set thflags to whether 10176 * reassembly now includes a segment with FIN. This handles 10177 * the common case inline (segment is the next to be 10178 * received on an established connection, and the queue is 10179 * empty), avoiding linkage into and removal from the queue 10180 * and repetition of various conversions. Set DELACK for 10181 * segments received in order, but ack immediately when 10182 * segments are out of order (so fast retransmit can work). 10183 */ 10184 if (th->th_seq == tp->rcv_nxt && 10185 SEGQ_EMPTY(tp) && 10186 (TCPS_HAVEESTABLISHED(tp->t_state) || 10187 tfo_syn)) { 10188 #ifdef NETFLIX_SB_LIMITS 10189 u_int mcnt, appended; 10190 10191 if (so->so_rcv.sb_shlim) { 10192 mcnt = m_memcnt(m); 10193 appended = 0; 10194 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10195 CFO_NOSLEEP, NULL) == false) { 10196 counter_u64_add(tcp_sb_shlim_fails, 1); 10197 m_freem(m); 10198 return (0); 10199 } 10200 } 10201 #endif 10202 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10203 tp->rcv_nxt += tlen; 10204 if (tlen && 10205 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10206 (tp->t_fbyte_in == 0)) { 10207 tp->t_fbyte_in = ticks; 10208 if (tp->t_fbyte_in == 0) 10209 tp->t_fbyte_in = 1; 10210 if (tp->t_fbyte_out && tp->t_fbyte_in) 10211 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10212 } 10213 thflags = th->th_flags & TH_FIN; 10214 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10215 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10216 SOCKBUF_LOCK(&so->so_rcv); 10217 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10218 m_freem(m); 10219 } else 10220 #ifdef NETFLIX_SB_LIMITS 10221 appended = 10222 #endif 10223 sbappendstream_locked(&so->so_rcv, m, 0); 10224 10225 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10226 /* NB: sorwakeup_locked() does an implicit unlock. */ 10227 sorwakeup_locked(so); 10228 #ifdef NETFLIX_SB_LIMITS 10229 if (so->so_rcv.sb_shlim && appended != mcnt) 10230 counter_fo_release(so->so_rcv.sb_shlim, 10231 mcnt - appended); 10232 #endif 10233 } else { 10234 /* 10235 * XXX: Due to the header drop above "th" is 10236 * theoretically invalid by now. Fortunately 10237 * m_adj() doesn't actually frees any mbufs when 10238 * trimming from the head. 10239 */ 10240 tcp_seq temp = save_start; 10241 10242 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10243 tp->t_flags |= TF_ACKNOW; 10244 if (tp->t_flags & TF_WAKESOR) { 10245 tp->t_flags &= ~TF_WAKESOR; 10246 /* NB: sorwakeup_locked() does an implicit unlock. */ 10247 sorwakeup_locked(so); 10248 } 10249 } 10250 if ((tp->t_flags & TF_SACK_PERMIT) && 10251 (save_tlen > 0) && 10252 TCPS_HAVEESTABLISHED(tp->t_state)) { 10253 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10254 /* 10255 * DSACK actually handled in the fastpath 10256 * above. 10257 */ 10258 RACK_OPTS_INC(tcp_sack_path_1); 10259 tcp_update_sack_list(tp, save_start, 10260 save_start + save_tlen); 10261 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10262 if ((tp->rcv_numsacks >= 1) && 10263 (tp->sackblks[0].end == save_start)) { 10264 /* 10265 * Partial overlap, recorded at todrop 10266 * above. 10267 */ 10268 RACK_OPTS_INC(tcp_sack_path_2a); 10269 tcp_update_sack_list(tp, 10270 tp->sackblks[0].start, 10271 tp->sackblks[0].end); 10272 } else { 10273 RACK_OPTS_INC(tcp_sack_path_2b); 10274 tcp_update_dsack_list(tp, save_start, 10275 save_start + save_tlen); 10276 } 10277 } else if (tlen >= save_tlen) { 10278 /* Update of sackblks. */ 10279 RACK_OPTS_INC(tcp_sack_path_3); 10280 tcp_update_dsack_list(tp, save_start, 10281 save_start + save_tlen); 10282 } else if (tlen > 0) { 10283 RACK_OPTS_INC(tcp_sack_path_4); 10284 tcp_update_dsack_list(tp, save_start, 10285 save_start + tlen); 10286 } 10287 } 10288 } else { 10289 m_freem(m); 10290 thflags &= ~TH_FIN; 10291 } 10292 10293 /* 10294 * If FIN is received ACK the FIN and let the user know that the 10295 * connection is closing. 10296 */ 10297 if (thflags & TH_FIN) { 10298 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10299 /* The socket upcall is handled by socantrcvmore. */ 10300 socantrcvmore(so); 10301 /* 10302 * If connection is half-synchronized (ie NEEDSYN 10303 * flag on) then delay ACK, so it may be piggybacked 10304 * when SYN is sent. Otherwise, since we received a 10305 * FIN then no more input can be expected, send ACK 10306 * now. 10307 */ 10308 if (tp->t_flags & TF_NEEDSYN) { 10309 rack_timer_cancel(tp, rack, 10310 rack->r_ctl.rc_rcvtime, __LINE__); 10311 tp->t_flags |= TF_DELACK; 10312 } else { 10313 tp->t_flags |= TF_ACKNOW; 10314 } 10315 tp->rcv_nxt++; 10316 } 10317 switch (tp->t_state) { 10318 /* 10319 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10320 * CLOSE_WAIT state. 10321 */ 10322 case TCPS_SYN_RECEIVED: 10323 tp->t_starttime = ticks; 10324 /* FALLTHROUGH */ 10325 case TCPS_ESTABLISHED: 10326 rack_timer_cancel(tp, rack, 10327 rack->r_ctl.rc_rcvtime, __LINE__); 10328 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10329 break; 10330 10331 /* 10332 * If still in FIN_WAIT_1 STATE FIN has not been 10333 * acked so enter the CLOSING state. 10334 */ 10335 case TCPS_FIN_WAIT_1: 10336 rack_timer_cancel(tp, rack, 10337 rack->r_ctl.rc_rcvtime, __LINE__); 10338 tcp_state_change(tp, TCPS_CLOSING); 10339 break; 10340 10341 /* 10342 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10343 * starting the time-wait timer, turning off the 10344 * other standard timers. 10345 */ 10346 case TCPS_FIN_WAIT_2: 10347 rack_timer_cancel(tp, rack, 10348 rack->r_ctl.rc_rcvtime, __LINE__); 10349 tcp_twstart(tp); 10350 return (1); 10351 } 10352 } 10353 /* 10354 * Return any desired output. 10355 */ 10356 if ((tp->t_flags & TF_ACKNOW) || 10357 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10358 rack->r_wanted_output = 1; 10359 } 10360 INP_WLOCK_ASSERT(tp->t_inpcb); 10361 return (0); 10362 } 10363 10364 /* 10365 * Here nothing is really faster, its just that we 10366 * have broken out the fast-data path also just like 10367 * the fast-ack. 10368 */ 10369 static int 10370 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10371 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10372 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10373 { 10374 int32_t nsegs; 10375 int32_t newsize = 0; /* automatic sockbuf scaling */ 10376 struct tcp_rack *rack; 10377 #ifdef NETFLIX_SB_LIMITS 10378 u_int mcnt, appended; 10379 #endif 10380 #ifdef TCPDEBUG 10381 /* 10382 * The size of tcp_saveipgen must be the size of the max ip header, 10383 * now IPv6. 10384 */ 10385 u_char tcp_saveipgen[IP6_HDR_LEN]; 10386 struct tcphdr tcp_savetcp; 10387 short ostate = 0; 10388 10389 #endif 10390 /* 10391 * If last ACK falls within this segment's sequence numbers, record 10392 * the timestamp. NOTE that the test is modified according to the 10393 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10394 */ 10395 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10396 return (0); 10397 } 10398 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10399 return (0); 10400 } 10401 if (tiwin && tiwin != tp->snd_wnd) { 10402 return (0); 10403 } 10404 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10405 return (0); 10406 } 10407 if (__predict_false((to->to_flags & TOF_TS) && 10408 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10409 return (0); 10410 } 10411 if (__predict_false((th->th_ack != tp->snd_una))) { 10412 return (0); 10413 } 10414 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10415 return (0); 10416 } 10417 if ((to->to_flags & TOF_TS) != 0 && 10418 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10419 tp->ts_recent_age = tcp_ts_getticks(); 10420 tp->ts_recent = to->to_tsval; 10421 } 10422 rack = (struct tcp_rack *)tp->t_fb_ptr; 10423 /* 10424 * This is a pure, in-sequence data packet with nothing on the 10425 * reassembly queue and we have enough buffer space to take it. 10426 */ 10427 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10428 10429 #ifdef NETFLIX_SB_LIMITS 10430 if (so->so_rcv.sb_shlim) { 10431 mcnt = m_memcnt(m); 10432 appended = 0; 10433 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10434 CFO_NOSLEEP, NULL) == false) { 10435 counter_u64_add(tcp_sb_shlim_fails, 1); 10436 m_freem(m); 10437 return (1); 10438 } 10439 } 10440 #endif 10441 /* Clean receiver SACK report if present */ 10442 if (tp->rcv_numsacks) 10443 tcp_clean_sackreport(tp); 10444 KMOD_TCPSTAT_INC(tcps_preddat); 10445 tp->rcv_nxt += tlen; 10446 if (tlen && 10447 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10448 (tp->t_fbyte_in == 0)) { 10449 tp->t_fbyte_in = ticks; 10450 if (tp->t_fbyte_in == 0) 10451 tp->t_fbyte_in = 1; 10452 if (tp->t_fbyte_out && tp->t_fbyte_in) 10453 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10454 } 10455 /* 10456 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10457 */ 10458 tp->snd_wl1 = th->th_seq; 10459 /* 10460 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10461 */ 10462 tp->rcv_up = tp->rcv_nxt; 10463 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10464 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10465 #ifdef TCPDEBUG 10466 if (so->so_options & SO_DEBUG) 10467 tcp_trace(TA_INPUT, ostate, tp, 10468 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10469 #endif 10470 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10471 10472 /* Add data to socket buffer. */ 10473 SOCKBUF_LOCK(&so->so_rcv); 10474 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10475 m_freem(m); 10476 } else { 10477 /* 10478 * Set new socket buffer size. Give up when limit is 10479 * reached. 10480 */ 10481 if (newsize) 10482 if (!sbreserve_locked(&so->so_rcv, 10483 newsize, so, NULL)) 10484 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10485 m_adj(m, drop_hdrlen); /* delayed header drop */ 10486 #ifdef NETFLIX_SB_LIMITS 10487 appended = 10488 #endif 10489 sbappendstream_locked(&so->so_rcv, m, 0); 10490 ctf_calc_rwin(so, tp); 10491 } 10492 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10493 /* NB: sorwakeup_locked() does an implicit unlock. */ 10494 sorwakeup_locked(so); 10495 #ifdef NETFLIX_SB_LIMITS 10496 if (so->so_rcv.sb_shlim && mcnt != appended) 10497 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10498 #endif 10499 rack_handle_delayed_ack(tp, rack, tlen, 0); 10500 if (tp->snd_una == tp->snd_max) 10501 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10502 return (1); 10503 } 10504 10505 /* 10506 * This subfunction is used to try to highly optimize the 10507 * fast path. We again allow window updates that are 10508 * in sequence to remain in the fast-path. We also add 10509 * in the __predict's to attempt to help the compiler. 10510 * Note that if we return a 0, then we can *not* process 10511 * it and the caller should push the packet into the 10512 * slow-path. 10513 */ 10514 static int 10515 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10516 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10517 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10518 { 10519 int32_t acked; 10520 int32_t nsegs; 10521 #ifdef TCPDEBUG 10522 /* 10523 * The size of tcp_saveipgen must be the size of the max ip header, 10524 * now IPv6. 10525 */ 10526 u_char tcp_saveipgen[IP6_HDR_LEN]; 10527 struct tcphdr tcp_savetcp; 10528 short ostate = 0; 10529 #endif 10530 int32_t under_pacing = 0; 10531 struct tcp_rack *rack; 10532 10533 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10534 /* Old ack, behind (or duplicate to) the last one rcv'd */ 10535 return (0); 10536 } 10537 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 10538 /* Above what we have sent? */ 10539 return (0); 10540 } 10541 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10542 /* We are retransmitting */ 10543 return (0); 10544 } 10545 if (__predict_false(tiwin == 0)) { 10546 /* zero window */ 10547 return (0); 10548 } 10549 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 10550 /* We need a SYN or a FIN, unlikely.. */ 10551 return (0); 10552 } 10553 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 10554 /* Timestamp is behind .. old ack with seq wrap? */ 10555 return (0); 10556 } 10557 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 10558 /* Still recovering */ 10559 return (0); 10560 } 10561 rack = (struct tcp_rack *)tp->t_fb_ptr; 10562 if (rack->r_ctl.rc_sacked) { 10563 /* We have sack holes on our scoreboard */ 10564 return (0); 10565 } 10566 /* Ok if we reach here, we can process a fast-ack */ 10567 if (rack->gp_ready && 10568 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10569 under_pacing = 1; 10570 } 10571 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10572 rack_log_ack(tp, to, th, 0, 0); 10573 /* Did the window get updated? */ 10574 if (tiwin != tp->snd_wnd) { 10575 tp->snd_wnd = tiwin; 10576 rack_validate_fo_sendwin_up(tp, rack); 10577 tp->snd_wl1 = th->th_seq; 10578 if (tp->snd_wnd > tp->max_sndwnd) 10579 tp->max_sndwnd = tp->snd_wnd; 10580 } 10581 /* Do we exit persists? */ 10582 if ((rack->rc_in_persist != 0) && 10583 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10584 rack->r_ctl.rc_pace_min_segs))) { 10585 rack_exit_persist(tp, rack, cts); 10586 } 10587 /* Do we enter persists? */ 10588 if ((rack->rc_in_persist == 0) && 10589 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10590 TCPS_HAVEESTABLISHED(tp->t_state) && 10591 (tp->snd_max == tp->snd_una) && 10592 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10593 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10594 /* 10595 * Here the rwnd is less than 10596 * the pacing size, we are established, 10597 * nothing is outstanding, and there is 10598 * data to send. Enter persists. 10599 */ 10600 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10601 } 10602 /* 10603 * If last ACK falls within this segment's sequence numbers, record 10604 * the timestamp. NOTE that the test is modified according to the 10605 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10606 */ 10607 if ((to->to_flags & TOF_TS) != 0 && 10608 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10609 tp->ts_recent_age = tcp_ts_getticks(); 10610 tp->ts_recent = to->to_tsval; 10611 } 10612 /* 10613 * This is a pure ack for outstanding data. 10614 */ 10615 KMOD_TCPSTAT_INC(tcps_predack); 10616 10617 /* 10618 * "bad retransmit" recovery. 10619 */ 10620 if ((tp->t_flags & TF_PREVVALID) && 10621 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10622 tp->t_flags &= ~TF_PREVVALID; 10623 if (tp->t_rxtshift == 1 && 10624 (int)(ticks - tp->t_badrxtwin) < 0) 10625 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack); 10626 } 10627 /* 10628 * Recalculate the transmit timer / rtt. 10629 * 10630 * Some boxes send broken timestamp replies during the SYN+ACK 10631 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10632 * and blow up the retransmit timer. 10633 */ 10634 acked = BYTES_THIS_ACK(tp, th); 10635 10636 #ifdef TCP_HHOOK 10637 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 10638 hhook_run_tcp_est_in(tp, th, to); 10639 #endif 10640 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10641 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10642 if (acked) { 10643 struct mbuf *mfree; 10644 10645 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 10646 SOCKBUF_LOCK(&so->so_snd); 10647 mfree = sbcut_locked(&so->so_snd, acked); 10648 tp->snd_una = th->th_ack; 10649 /* Note we want to hold the sb lock through the sendmap adjust */ 10650 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10651 /* Wake up the socket if we have room to write more */ 10652 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10653 sowwakeup_locked(so); 10654 m_freem(mfree); 10655 tp->t_rxtshift = 0; 10656 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10657 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10658 rack->rc_tlp_in_progress = 0; 10659 rack->r_ctl.rc_tlp_cnt_out = 0; 10660 /* 10661 * If it is the RXT timer we want to 10662 * stop it, so we can restart a TLP. 10663 */ 10664 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10665 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10666 #ifdef NETFLIX_HTTP_LOGGING 10667 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10668 #endif 10669 } 10670 /* 10671 * Let the congestion control algorithm update congestion control 10672 * related information. This typically means increasing the 10673 * congestion window. 10674 */ 10675 if (tp->snd_wnd < ctf_outstanding(tp)) { 10676 /* The peer collapsed the window */ 10677 rack_collapsed_window(rack); 10678 } else if (rack->rc_has_collapsed) 10679 rack_un_collapse_window(rack); 10680 10681 /* 10682 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 10683 */ 10684 tp->snd_wl2 = th->th_ack; 10685 tp->t_dupacks = 0; 10686 m_freem(m); 10687 /* ND6_HINT(tp); *//* Some progress has been made. */ 10688 10689 /* 10690 * If all outstanding data are acked, stop retransmit timer, 10691 * otherwise restart timer using current (possibly backed-off) 10692 * value. If process is waiting for space, wakeup/selwakeup/signal. 10693 * If data are ready to send, let tcp_output decide between more 10694 * output or persist. 10695 */ 10696 #ifdef TCPDEBUG 10697 if (so->so_options & SO_DEBUG) 10698 tcp_trace(TA_INPUT, ostate, tp, 10699 (void *)tcp_saveipgen, 10700 &tcp_savetcp, 0); 10701 #endif 10702 if (under_pacing && 10703 (rack->use_fixed_rate == 0) && 10704 (rack->in_probe_rtt == 0) && 10705 rack->rc_gp_dyn_mul && 10706 rack->rc_always_pace) { 10707 /* Check if we are dragging bottom */ 10708 rack_check_bottom_drag(tp, rack, so, acked); 10709 } 10710 if (tp->snd_una == tp->snd_max) { 10711 tp->t_flags &= ~TF_PREVVALID; 10712 rack->r_ctl.retran_during_recovery = 0; 10713 rack->r_ctl.dsack_byte_cnt = 0; 10714 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10715 if (rack->r_ctl.rc_went_idle_time == 0) 10716 rack->r_ctl.rc_went_idle_time = 1; 10717 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10718 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10719 tp->t_acktime = 0; 10720 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10721 } 10722 if (acked && rack->r_fast_output) 10723 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 10724 if (sbavail(&so->so_snd)) { 10725 rack->r_wanted_output = 1; 10726 } 10727 return (1); 10728 } 10729 10730 /* 10731 * Return value of 1, the TCB is unlocked and most 10732 * likely gone, return value of 0, the TCP is still 10733 * locked. 10734 */ 10735 static int 10736 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 10737 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10738 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 10739 { 10740 int32_t ret_val = 0; 10741 int32_t todrop; 10742 int32_t ourfinisacked = 0; 10743 struct tcp_rack *rack; 10744 10745 ctf_calc_rwin(so, tp); 10746 /* 10747 * If the state is SYN_SENT: if seg contains an ACK, but not for our 10748 * SYN, drop the input. if seg contains a RST, then drop the 10749 * connection. if seg does not contain SYN, then drop it. Otherwise 10750 * this is an acceptable SYN segment initialize tp->rcv_nxt and 10751 * tp->irs if seg contains ack then advance tp->snd_una if seg 10752 * contains an ECE and ECN support is enabled, the stream is ECN 10753 * capable. if SYN has been acked change to ESTABLISHED else 10754 * SYN_RCVD state arrange for segment to be acked (eventually) 10755 * continue processing rest of data/controls. 10756 */ 10757 if ((thflags & TH_ACK) && 10758 (SEQ_LEQ(th->th_ack, tp->iss) || 10759 SEQ_GT(th->th_ack, tp->snd_max))) { 10760 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 10761 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 10762 return (1); 10763 } 10764 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 10765 TCP_PROBE5(connect__refused, NULL, tp, 10766 mtod(m, const char *), tp, th); 10767 tp = tcp_drop(tp, ECONNREFUSED); 10768 ctf_do_drop(m, tp); 10769 return (1); 10770 } 10771 if (thflags & TH_RST) { 10772 ctf_do_drop(m, tp); 10773 return (1); 10774 } 10775 if (!(thflags & TH_SYN)) { 10776 ctf_do_drop(m, tp); 10777 return (1); 10778 } 10779 tp->irs = th->th_seq; 10780 tcp_rcvseqinit(tp); 10781 rack = (struct tcp_rack *)tp->t_fb_ptr; 10782 if (thflags & TH_ACK) { 10783 int tfo_partial = 0; 10784 10785 KMOD_TCPSTAT_INC(tcps_connects); 10786 soisconnected(so); 10787 #ifdef MAC 10788 mac_socketpeer_set_from_mbuf(m, so); 10789 #endif 10790 /* Do window scaling on this connection? */ 10791 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10792 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10793 tp->rcv_scale = tp->request_r_scale; 10794 } 10795 tp->rcv_adv += min(tp->rcv_wnd, 10796 TCP_MAXWIN << tp->rcv_scale); 10797 /* 10798 * If not all the data that was sent in the TFO SYN 10799 * has been acked, resend the remainder right away. 10800 */ 10801 if (IS_FASTOPEN(tp->t_flags) && 10802 (tp->snd_una != tp->snd_max)) { 10803 tp->snd_nxt = th->th_ack; 10804 tfo_partial = 1; 10805 } 10806 /* 10807 * If there's data, delay ACK; if there's also a FIN ACKNOW 10808 * will be turned on later. 10809 */ 10810 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 10811 rack_timer_cancel(tp, rack, 10812 rack->r_ctl.rc_rcvtime, __LINE__); 10813 tp->t_flags |= TF_DELACK; 10814 } else { 10815 rack->r_wanted_output = 1; 10816 tp->t_flags |= TF_ACKNOW; 10817 rack->rc_dack_toggle = 0; 10818 } 10819 if (((thflags & (TH_CWR | TH_ECE)) == TH_ECE) && 10820 (V_tcp_do_ecn == 1)) { 10821 tp->t_flags2 |= TF2_ECN_PERMIT; 10822 KMOD_TCPSTAT_INC(tcps_ecn_shs); 10823 } 10824 if (SEQ_GT(th->th_ack, tp->snd_una)) { 10825 /* 10826 * We advance snd_una for the 10827 * fast open case. If th_ack is 10828 * acknowledging data beyond 10829 * snd_una we can't just call 10830 * ack-processing since the 10831 * data stream in our send-map 10832 * will start at snd_una + 1 (one 10833 * beyond the SYN). If its just 10834 * equal we don't need to do that 10835 * and there is no send_map. 10836 */ 10837 tp->snd_una++; 10838 } 10839 /* 10840 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 10841 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 10842 */ 10843 tp->t_starttime = ticks; 10844 if (tp->t_flags & TF_NEEDFIN) { 10845 tcp_state_change(tp, TCPS_FIN_WAIT_1); 10846 tp->t_flags &= ~TF_NEEDFIN; 10847 thflags &= ~TH_SYN; 10848 } else { 10849 tcp_state_change(tp, TCPS_ESTABLISHED); 10850 TCP_PROBE5(connect__established, NULL, tp, 10851 mtod(m, const char *), tp, th); 10852 rack_cc_conn_init(tp); 10853 } 10854 } else { 10855 /* 10856 * Received initial SYN in SYN-SENT[*] state => simultaneous 10857 * open. If segment contains CC option and there is a 10858 * cached CC, apply TAO test. If it succeeds, connection is * 10859 * half-synchronized. Otherwise, do 3-way handshake: 10860 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 10861 * there was no CC option, clear cached CC value. 10862 */ 10863 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 10864 tcp_state_change(tp, TCPS_SYN_RECEIVED); 10865 } 10866 INP_WLOCK_ASSERT(tp->t_inpcb); 10867 /* 10868 * Advance th->th_seq to correspond to first data byte. If data, 10869 * trim to stay within window, dropping FIN if necessary. 10870 */ 10871 th->th_seq++; 10872 if (tlen > tp->rcv_wnd) { 10873 todrop = tlen - tp->rcv_wnd; 10874 m_adj(m, -todrop); 10875 tlen = tp->rcv_wnd; 10876 thflags &= ~TH_FIN; 10877 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 10878 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 10879 } 10880 tp->snd_wl1 = th->th_seq - 1; 10881 tp->rcv_up = th->th_seq; 10882 /* 10883 * Client side of transaction: already sent SYN and data. If the 10884 * remote host used T/TCP to validate the SYN, our data will be 10885 * ACK'd; if so, enter normal data segment processing in the middle 10886 * of step 5, ack processing. Otherwise, goto step 6. 10887 */ 10888 if (thflags & TH_ACK) { 10889 /* For syn-sent we need to possibly update the rtt */ 10890 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 10891 uint32_t t, mcts; 10892 10893 mcts = tcp_ts_getticks(); 10894 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 10895 if (!tp->t_rttlow || tp->t_rttlow > t) 10896 tp->t_rttlow = t; 10897 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 10898 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 10899 tcp_rack_xmit_timer_commit(rack, tp); 10900 } 10901 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 10902 return (ret_val); 10903 /* We may have changed to FIN_WAIT_1 above */ 10904 if (tp->t_state == TCPS_FIN_WAIT_1) { 10905 /* 10906 * In FIN_WAIT_1 STATE in addition to the processing 10907 * for the ESTABLISHED state if our FIN is now 10908 * acknowledged then enter FIN_WAIT_2. 10909 */ 10910 if (ourfinisacked) { 10911 /* 10912 * If we can't receive any more data, then 10913 * closing user can proceed. Starting the 10914 * timer is contrary to the specification, 10915 * but if we don't get a FIN we'll hang 10916 * forever. 10917 * 10918 * XXXjl: we should release the tp also, and 10919 * use a compressed state. 10920 */ 10921 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10922 soisdisconnected(so); 10923 tcp_timer_activate(tp, TT_2MSL, 10924 (tcp_fast_finwait2_recycle ? 10925 tcp_finwait2_timeout : 10926 TP_MAXIDLE(tp))); 10927 } 10928 tcp_state_change(tp, TCPS_FIN_WAIT_2); 10929 } 10930 } 10931 } 10932 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 10933 tiwin, thflags, nxt_pkt)); 10934 } 10935 10936 /* 10937 * Return value of 1, the TCB is unlocked and most 10938 * likely gone, return value of 0, the TCP is still 10939 * locked. 10940 */ 10941 static int 10942 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 10943 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10944 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 10945 { 10946 struct tcp_rack *rack; 10947 int32_t ret_val = 0; 10948 int32_t ourfinisacked = 0; 10949 10950 ctf_calc_rwin(so, tp); 10951 if ((thflags & TH_ACK) && 10952 (SEQ_LEQ(th->th_ack, tp->snd_una) || 10953 SEQ_GT(th->th_ack, tp->snd_max))) { 10954 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 10955 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 10956 return (1); 10957 } 10958 rack = (struct tcp_rack *)tp->t_fb_ptr; 10959 if (IS_FASTOPEN(tp->t_flags)) { 10960 /* 10961 * When a TFO connection is in SYN_RECEIVED, the 10962 * only valid packets are the initial SYN, a 10963 * retransmit/copy of the initial SYN (possibly with 10964 * a subset of the original data), a valid ACK, a 10965 * FIN, or a RST. 10966 */ 10967 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 10968 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 10969 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 10970 return (1); 10971 } else if (thflags & TH_SYN) { 10972 /* non-initial SYN is ignored */ 10973 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 10974 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 10975 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 10976 ctf_do_drop(m, NULL); 10977 return (0); 10978 } 10979 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 10980 ctf_do_drop(m, NULL); 10981 return (0); 10982 } 10983 } 10984 if ((thflags & TH_RST) || 10985 (tp->t_fin_is_rst && (thflags & TH_FIN))) 10986 return (ctf_process_rst(m, th, so, tp)); 10987 /* 10988 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 10989 * it's less than ts_recent, drop it. 10990 */ 10991 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 10992 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 10993 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 10994 return (ret_val); 10995 } 10996 /* 10997 * In the SYN-RECEIVED state, validate that the packet belongs to 10998 * this connection before trimming the data to fit the receive 10999 * window. Check the sequence number versus IRS since we know the 11000 * sequence numbers haven't wrapped. This is a partial fix for the 11001 * "LAND" DoS attack. 11002 */ 11003 if (SEQ_LT(th->th_seq, tp->irs)) { 11004 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11005 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11006 return (1); 11007 } 11008 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11009 &rack->r_ctl.challenge_ack_ts, 11010 &rack->r_ctl.challenge_ack_cnt)) { 11011 return (ret_val); 11012 } 11013 /* 11014 * If last ACK falls within this segment's sequence numbers, record 11015 * its timestamp. NOTE: 1) That the test incorporates suggestions 11016 * from the latest proposal of the tcplw@cray.com list (Braden 11017 * 1993/04/26). 2) That updating only on newer timestamps interferes 11018 * with our earlier PAWS tests, so this check should be solely 11019 * predicated on the sequence space of this segment. 3) That we 11020 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11021 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11022 * SEG.Len, This modified check allows us to overcome RFC1323's 11023 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11024 * p.869. In such cases, we can still calculate the RTT correctly 11025 * when RCV.NXT == Last.ACK.Sent. 11026 */ 11027 if ((to->to_flags & TOF_TS) != 0 && 11028 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11029 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11030 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11031 tp->ts_recent_age = tcp_ts_getticks(); 11032 tp->ts_recent = to->to_tsval; 11033 } 11034 tp->snd_wnd = tiwin; 11035 rack_validate_fo_sendwin_up(tp, rack); 11036 /* 11037 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11038 * is on (half-synchronized state), then queue data for later 11039 * processing; else drop segment and return. 11040 */ 11041 if ((thflags & TH_ACK) == 0) { 11042 if (IS_FASTOPEN(tp->t_flags)) { 11043 rack_cc_conn_init(tp); 11044 } 11045 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11046 tiwin, thflags, nxt_pkt)); 11047 } 11048 KMOD_TCPSTAT_INC(tcps_connects); 11049 soisconnected(so); 11050 /* Do window scaling? */ 11051 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11052 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11053 tp->rcv_scale = tp->request_r_scale; 11054 } 11055 /* 11056 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11057 * FIN-WAIT-1 11058 */ 11059 tp->t_starttime = ticks; 11060 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11061 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11062 tp->t_tfo_pending = NULL; 11063 } 11064 if (tp->t_flags & TF_NEEDFIN) { 11065 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11066 tp->t_flags &= ~TF_NEEDFIN; 11067 } else { 11068 tcp_state_change(tp, TCPS_ESTABLISHED); 11069 TCP_PROBE5(accept__established, NULL, tp, 11070 mtod(m, const char *), tp, th); 11071 /* 11072 * TFO connections call cc_conn_init() during SYN 11073 * processing. Calling it again here for such connections 11074 * is not harmless as it would undo the snd_cwnd reduction 11075 * that occurs when a TFO SYN|ACK is retransmitted. 11076 */ 11077 if (!IS_FASTOPEN(tp->t_flags)) 11078 rack_cc_conn_init(tp); 11079 } 11080 /* 11081 * Account for the ACK of our SYN prior to 11082 * regular ACK processing below, except for 11083 * simultaneous SYN, which is handled later. 11084 */ 11085 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11086 tp->snd_una++; 11087 /* 11088 * If segment contains data or ACK, will call tcp_reass() later; if 11089 * not, do so now to pass queued data to user. 11090 */ 11091 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11092 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11093 (struct mbuf *)0); 11094 if (tp->t_flags & TF_WAKESOR) { 11095 tp->t_flags &= ~TF_WAKESOR; 11096 /* NB: sorwakeup_locked() does an implicit unlock. */ 11097 sorwakeup_locked(so); 11098 } 11099 } 11100 tp->snd_wl1 = th->th_seq - 1; 11101 /* For syn-recv we need to possibly update the rtt */ 11102 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11103 uint32_t t, mcts; 11104 11105 mcts = tcp_ts_getticks(); 11106 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11107 if (!tp->t_rttlow || tp->t_rttlow > t) 11108 tp->t_rttlow = t; 11109 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11110 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11111 tcp_rack_xmit_timer_commit(rack, tp); 11112 } 11113 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11114 return (ret_val); 11115 } 11116 if (tp->t_state == TCPS_FIN_WAIT_1) { 11117 /* We could have went to FIN_WAIT_1 (or EST) above */ 11118 /* 11119 * In FIN_WAIT_1 STATE in addition to the processing for the 11120 * ESTABLISHED state if our FIN is now acknowledged then 11121 * enter FIN_WAIT_2. 11122 */ 11123 if (ourfinisacked) { 11124 /* 11125 * If we can't receive any more data, then closing 11126 * user can proceed. Starting the timer is contrary 11127 * to the specification, but if we don't get a FIN 11128 * we'll hang forever. 11129 * 11130 * XXXjl: we should release the tp also, and use a 11131 * compressed state. 11132 */ 11133 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11134 soisdisconnected(so); 11135 tcp_timer_activate(tp, TT_2MSL, 11136 (tcp_fast_finwait2_recycle ? 11137 tcp_finwait2_timeout : 11138 TP_MAXIDLE(tp))); 11139 } 11140 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11141 } 11142 } 11143 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11144 tiwin, thflags, nxt_pkt)); 11145 } 11146 11147 /* 11148 * Return value of 1, the TCB is unlocked and most 11149 * likely gone, return value of 0, the TCP is still 11150 * locked. 11151 */ 11152 static int 11153 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11154 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11155 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11156 { 11157 int32_t ret_val = 0; 11158 struct tcp_rack *rack; 11159 11160 /* 11161 * Header prediction: check for the two common cases of a 11162 * uni-directional data xfer. If the packet has no control flags, 11163 * is in-sequence, the window didn't change and we're not 11164 * retransmitting, it's a candidate. If the length is zero and the 11165 * ack moved forward, we're the sender side of the xfer. Just free 11166 * the data acked & wake any higher level process that was blocked 11167 * waiting for space. If the length is non-zero and the ack didn't 11168 * move, we're the receiver side. If we're getting packets in-order 11169 * (the reassembly queue is empty), add the data toc The socket 11170 * buffer and note that we need a delayed ack. Make sure that the 11171 * hidden state-flags are also off. Since we check for 11172 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11173 */ 11174 rack = (struct tcp_rack *)tp->t_fb_ptr; 11175 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11176 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11177 __predict_true(SEGQ_EMPTY(tp)) && 11178 __predict_true(th->th_seq == tp->rcv_nxt)) { 11179 if (tlen == 0) { 11180 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11181 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11182 return (0); 11183 } 11184 } else { 11185 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11186 tiwin, nxt_pkt, iptos)) { 11187 return (0); 11188 } 11189 } 11190 } 11191 ctf_calc_rwin(so, tp); 11192 11193 if ((thflags & TH_RST) || 11194 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11195 return (ctf_process_rst(m, th, so, tp)); 11196 11197 /* 11198 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11199 * synchronized state. 11200 */ 11201 if (thflags & TH_SYN) { 11202 ctf_challenge_ack(m, th, tp, &ret_val); 11203 return (ret_val); 11204 } 11205 /* 11206 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11207 * it's less than ts_recent, drop it. 11208 */ 11209 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11210 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11211 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11212 return (ret_val); 11213 } 11214 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11215 &rack->r_ctl.challenge_ack_ts, 11216 &rack->r_ctl.challenge_ack_cnt)) { 11217 return (ret_val); 11218 } 11219 /* 11220 * If last ACK falls within this segment's sequence numbers, record 11221 * its timestamp. NOTE: 1) That the test incorporates suggestions 11222 * from the latest proposal of the tcplw@cray.com list (Braden 11223 * 1993/04/26). 2) That updating only on newer timestamps interferes 11224 * with our earlier PAWS tests, so this check should be solely 11225 * predicated on the sequence space of this segment. 3) That we 11226 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11227 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11228 * SEG.Len, This modified check allows us to overcome RFC1323's 11229 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11230 * p.869. In such cases, we can still calculate the RTT correctly 11231 * when RCV.NXT == Last.ACK.Sent. 11232 */ 11233 if ((to->to_flags & TOF_TS) != 0 && 11234 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11235 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11236 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11237 tp->ts_recent_age = tcp_ts_getticks(); 11238 tp->ts_recent = to->to_tsval; 11239 } 11240 /* 11241 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11242 * is on (half-synchronized state), then queue data for later 11243 * processing; else drop segment and return. 11244 */ 11245 if ((thflags & TH_ACK) == 0) { 11246 if (tp->t_flags & TF_NEEDSYN) { 11247 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11248 tiwin, thflags, nxt_pkt)); 11249 11250 } else if (tp->t_flags & TF_ACKNOW) { 11251 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11252 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11253 return (ret_val); 11254 } else { 11255 ctf_do_drop(m, NULL); 11256 return (0); 11257 } 11258 } 11259 /* 11260 * Ack processing. 11261 */ 11262 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11263 return (ret_val); 11264 } 11265 if (sbavail(&so->so_snd)) { 11266 if (ctf_progress_timeout_check(tp, true)) { 11267 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11268 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11269 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11270 return (1); 11271 } 11272 } 11273 /* State changes only happen in rack_process_data() */ 11274 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11275 tiwin, thflags, nxt_pkt)); 11276 } 11277 11278 /* 11279 * Return value of 1, the TCB is unlocked and most 11280 * likely gone, return value of 0, the TCP is still 11281 * locked. 11282 */ 11283 static int 11284 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11285 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11286 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11287 { 11288 int32_t ret_val = 0; 11289 struct tcp_rack *rack; 11290 11291 rack = (struct tcp_rack *)tp->t_fb_ptr; 11292 ctf_calc_rwin(so, tp); 11293 if ((thflags & TH_RST) || 11294 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11295 return (ctf_process_rst(m, th, so, tp)); 11296 /* 11297 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11298 * synchronized state. 11299 */ 11300 if (thflags & TH_SYN) { 11301 ctf_challenge_ack(m, th, tp, &ret_val); 11302 return (ret_val); 11303 } 11304 /* 11305 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11306 * it's less than ts_recent, drop it. 11307 */ 11308 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11309 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11310 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11311 return (ret_val); 11312 } 11313 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11314 &rack->r_ctl.challenge_ack_ts, 11315 &rack->r_ctl.challenge_ack_cnt)) { 11316 return (ret_val); 11317 } 11318 /* 11319 * If last ACK falls within this segment's sequence numbers, record 11320 * its timestamp. NOTE: 1) That the test incorporates suggestions 11321 * from the latest proposal of the tcplw@cray.com list (Braden 11322 * 1993/04/26). 2) That updating only on newer timestamps interferes 11323 * with our earlier PAWS tests, so this check should be solely 11324 * predicated on the sequence space of this segment. 3) That we 11325 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11326 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11327 * SEG.Len, This modified check allows us to overcome RFC1323's 11328 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11329 * p.869. In such cases, we can still calculate the RTT correctly 11330 * when RCV.NXT == Last.ACK.Sent. 11331 */ 11332 if ((to->to_flags & TOF_TS) != 0 && 11333 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11334 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11335 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11336 tp->ts_recent_age = tcp_ts_getticks(); 11337 tp->ts_recent = to->to_tsval; 11338 } 11339 /* 11340 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11341 * is on (half-synchronized state), then queue data for later 11342 * processing; else drop segment and return. 11343 */ 11344 if ((thflags & TH_ACK) == 0) { 11345 if (tp->t_flags & TF_NEEDSYN) { 11346 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11347 tiwin, thflags, nxt_pkt)); 11348 11349 } else if (tp->t_flags & TF_ACKNOW) { 11350 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11351 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11352 return (ret_val); 11353 } else { 11354 ctf_do_drop(m, NULL); 11355 return (0); 11356 } 11357 } 11358 /* 11359 * Ack processing. 11360 */ 11361 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11362 return (ret_val); 11363 } 11364 if (sbavail(&so->so_snd)) { 11365 if (ctf_progress_timeout_check(tp, true)) { 11366 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11367 tp, tick, PROGRESS_DROP, __LINE__); 11368 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11369 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11370 return (1); 11371 } 11372 } 11373 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11374 tiwin, thflags, nxt_pkt)); 11375 } 11376 11377 static int 11378 rack_check_data_after_close(struct mbuf *m, 11379 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11380 { 11381 struct tcp_rack *rack; 11382 11383 rack = (struct tcp_rack *)tp->t_fb_ptr; 11384 if (rack->rc_allow_data_af_clo == 0) { 11385 close_now: 11386 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11387 /* tcp_close will kill the inp pre-log the Reset */ 11388 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11389 tp = tcp_close(tp); 11390 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11391 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11392 return (1); 11393 } 11394 if (sbavail(&so->so_snd) == 0) 11395 goto close_now; 11396 /* Ok we allow data that is ignored and a followup reset */ 11397 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11398 tp->rcv_nxt = th->th_seq + *tlen; 11399 tp->t_flags2 |= TF2_DROP_AF_DATA; 11400 rack->r_wanted_output = 1; 11401 *tlen = 0; 11402 return (0); 11403 } 11404 11405 /* 11406 * Return value of 1, the TCB is unlocked and most 11407 * likely gone, return value of 0, the TCP is still 11408 * locked. 11409 */ 11410 static int 11411 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11412 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11413 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11414 { 11415 int32_t ret_val = 0; 11416 int32_t ourfinisacked = 0; 11417 struct tcp_rack *rack; 11418 11419 rack = (struct tcp_rack *)tp->t_fb_ptr; 11420 ctf_calc_rwin(so, tp); 11421 11422 if ((thflags & TH_RST) || 11423 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11424 return (ctf_process_rst(m, th, so, tp)); 11425 /* 11426 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11427 * synchronized state. 11428 */ 11429 if (thflags & TH_SYN) { 11430 ctf_challenge_ack(m, th, tp, &ret_val); 11431 return (ret_val); 11432 } 11433 /* 11434 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11435 * it's less than ts_recent, drop it. 11436 */ 11437 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11438 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11439 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11440 return (ret_val); 11441 } 11442 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11443 &rack->r_ctl.challenge_ack_ts, 11444 &rack->r_ctl.challenge_ack_cnt)) { 11445 return (ret_val); 11446 } 11447 /* 11448 * If new data are received on a connection after the user processes 11449 * are gone, then RST the other end. 11450 */ 11451 if ((so->so_state & SS_NOFDREF) && tlen) { 11452 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11453 return (1); 11454 } 11455 /* 11456 * If last ACK falls within this segment's sequence numbers, record 11457 * its timestamp. NOTE: 1) That the test incorporates suggestions 11458 * from the latest proposal of the tcplw@cray.com list (Braden 11459 * 1993/04/26). 2) That updating only on newer timestamps interferes 11460 * with our earlier PAWS tests, so this check should be solely 11461 * predicated on the sequence space of this segment. 3) That we 11462 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11463 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11464 * SEG.Len, This modified check allows us to overcome RFC1323's 11465 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11466 * p.869. In such cases, we can still calculate the RTT correctly 11467 * when RCV.NXT == Last.ACK.Sent. 11468 */ 11469 if ((to->to_flags & TOF_TS) != 0 && 11470 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11471 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11472 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11473 tp->ts_recent_age = tcp_ts_getticks(); 11474 tp->ts_recent = to->to_tsval; 11475 } 11476 /* 11477 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11478 * is on (half-synchronized state), then queue data for later 11479 * processing; else drop segment and return. 11480 */ 11481 if ((thflags & TH_ACK) == 0) { 11482 if (tp->t_flags & TF_NEEDSYN) { 11483 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11484 tiwin, thflags, nxt_pkt)); 11485 } else if (tp->t_flags & TF_ACKNOW) { 11486 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11487 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11488 return (ret_val); 11489 } else { 11490 ctf_do_drop(m, NULL); 11491 return (0); 11492 } 11493 } 11494 /* 11495 * Ack processing. 11496 */ 11497 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11498 return (ret_val); 11499 } 11500 if (ourfinisacked) { 11501 /* 11502 * If we can't receive any more data, then closing user can 11503 * proceed. Starting the timer is contrary to the 11504 * specification, but if we don't get a FIN we'll hang 11505 * forever. 11506 * 11507 * XXXjl: we should release the tp also, and use a 11508 * compressed state. 11509 */ 11510 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11511 soisdisconnected(so); 11512 tcp_timer_activate(tp, TT_2MSL, 11513 (tcp_fast_finwait2_recycle ? 11514 tcp_finwait2_timeout : 11515 TP_MAXIDLE(tp))); 11516 } 11517 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11518 } 11519 if (sbavail(&so->so_snd)) { 11520 if (ctf_progress_timeout_check(tp, true)) { 11521 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11522 tp, tick, PROGRESS_DROP, __LINE__); 11523 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11524 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11525 return (1); 11526 } 11527 } 11528 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11529 tiwin, thflags, nxt_pkt)); 11530 } 11531 11532 /* 11533 * Return value of 1, the TCB is unlocked and most 11534 * likely gone, return value of 0, the TCP is still 11535 * locked. 11536 */ 11537 static int 11538 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 11539 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11540 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11541 { 11542 int32_t ret_val = 0; 11543 int32_t ourfinisacked = 0; 11544 struct tcp_rack *rack; 11545 11546 rack = (struct tcp_rack *)tp->t_fb_ptr; 11547 ctf_calc_rwin(so, tp); 11548 11549 if ((thflags & TH_RST) || 11550 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11551 return (ctf_process_rst(m, th, so, tp)); 11552 /* 11553 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11554 * synchronized state. 11555 */ 11556 if (thflags & TH_SYN) { 11557 ctf_challenge_ack(m, th, tp, &ret_val); 11558 return (ret_val); 11559 } 11560 /* 11561 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11562 * it's less than ts_recent, drop it. 11563 */ 11564 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11565 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11566 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11567 return (ret_val); 11568 } 11569 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11570 &rack->r_ctl.challenge_ack_ts, 11571 &rack->r_ctl.challenge_ack_cnt)) { 11572 return (ret_val); 11573 } 11574 /* 11575 * If new data are received on a connection after the user processes 11576 * are gone, then RST the other end. 11577 */ 11578 if ((so->so_state & SS_NOFDREF) && tlen) { 11579 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11580 return (1); 11581 } 11582 /* 11583 * If last ACK falls within this segment's sequence numbers, record 11584 * its timestamp. NOTE: 1) That the test incorporates suggestions 11585 * from the latest proposal of the tcplw@cray.com list (Braden 11586 * 1993/04/26). 2) That updating only on newer timestamps interferes 11587 * with our earlier PAWS tests, so this check should be solely 11588 * predicated on the sequence space of this segment. 3) That we 11589 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11590 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11591 * SEG.Len, This modified check allows us to overcome RFC1323's 11592 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11593 * p.869. In such cases, we can still calculate the RTT correctly 11594 * when RCV.NXT == Last.ACK.Sent. 11595 */ 11596 if ((to->to_flags & TOF_TS) != 0 && 11597 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11598 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11599 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11600 tp->ts_recent_age = tcp_ts_getticks(); 11601 tp->ts_recent = to->to_tsval; 11602 } 11603 /* 11604 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11605 * is on (half-synchronized state), then queue data for later 11606 * processing; else drop segment and return. 11607 */ 11608 if ((thflags & TH_ACK) == 0) { 11609 if (tp->t_flags & TF_NEEDSYN) { 11610 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11611 tiwin, thflags, nxt_pkt)); 11612 } else if (tp->t_flags & TF_ACKNOW) { 11613 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11614 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11615 return (ret_val); 11616 } else { 11617 ctf_do_drop(m, NULL); 11618 return (0); 11619 } 11620 } 11621 /* 11622 * Ack processing. 11623 */ 11624 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11625 return (ret_val); 11626 } 11627 if (ourfinisacked) { 11628 tcp_twstart(tp); 11629 m_freem(m); 11630 return (1); 11631 } 11632 if (sbavail(&so->so_snd)) { 11633 if (ctf_progress_timeout_check(tp, true)) { 11634 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11635 tp, tick, PROGRESS_DROP, __LINE__); 11636 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11637 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11638 return (1); 11639 } 11640 } 11641 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11642 tiwin, thflags, nxt_pkt)); 11643 } 11644 11645 /* 11646 * Return value of 1, the TCB is unlocked and most 11647 * likely gone, return value of 0, the TCP is still 11648 * locked. 11649 */ 11650 static int 11651 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 11652 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11653 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11654 { 11655 int32_t ret_val = 0; 11656 int32_t ourfinisacked = 0; 11657 struct tcp_rack *rack; 11658 11659 rack = (struct tcp_rack *)tp->t_fb_ptr; 11660 ctf_calc_rwin(so, tp); 11661 11662 if ((thflags & TH_RST) || 11663 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11664 return (ctf_process_rst(m, th, so, tp)); 11665 /* 11666 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11667 * synchronized state. 11668 */ 11669 if (thflags & TH_SYN) { 11670 ctf_challenge_ack(m, th, tp, &ret_val); 11671 return (ret_val); 11672 } 11673 /* 11674 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11675 * it's less than ts_recent, drop it. 11676 */ 11677 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11678 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11679 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11680 return (ret_val); 11681 } 11682 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11683 &rack->r_ctl.challenge_ack_ts, 11684 &rack->r_ctl.challenge_ack_cnt)) { 11685 return (ret_val); 11686 } 11687 /* 11688 * If new data are received on a connection after the user processes 11689 * are gone, then RST the other end. 11690 */ 11691 if ((so->so_state & SS_NOFDREF) && tlen) { 11692 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11693 return (1); 11694 } 11695 /* 11696 * If last ACK falls within this segment's sequence numbers, record 11697 * its timestamp. NOTE: 1) That the test incorporates suggestions 11698 * from the latest proposal of the tcplw@cray.com list (Braden 11699 * 1993/04/26). 2) That updating only on newer timestamps interferes 11700 * with our earlier PAWS tests, so this check should be solely 11701 * predicated on the sequence space of this segment. 3) That we 11702 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11703 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11704 * SEG.Len, This modified check allows us to overcome RFC1323's 11705 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11706 * p.869. In such cases, we can still calculate the RTT correctly 11707 * when RCV.NXT == Last.ACK.Sent. 11708 */ 11709 if ((to->to_flags & TOF_TS) != 0 && 11710 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11711 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11712 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11713 tp->ts_recent_age = tcp_ts_getticks(); 11714 tp->ts_recent = to->to_tsval; 11715 } 11716 /* 11717 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11718 * is on (half-synchronized state), then queue data for later 11719 * processing; else drop segment and return. 11720 */ 11721 if ((thflags & TH_ACK) == 0) { 11722 if (tp->t_flags & TF_NEEDSYN) { 11723 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11724 tiwin, thflags, nxt_pkt)); 11725 } else if (tp->t_flags & TF_ACKNOW) { 11726 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11727 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11728 return (ret_val); 11729 } else { 11730 ctf_do_drop(m, NULL); 11731 return (0); 11732 } 11733 } 11734 /* 11735 * case TCPS_LAST_ACK: Ack processing. 11736 */ 11737 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11738 return (ret_val); 11739 } 11740 if (ourfinisacked) { 11741 tp = tcp_close(tp); 11742 ctf_do_drop(m, tp); 11743 return (1); 11744 } 11745 if (sbavail(&so->so_snd)) { 11746 if (ctf_progress_timeout_check(tp, true)) { 11747 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11748 tp, tick, PROGRESS_DROP, __LINE__); 11749 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11750 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11751 return (1); 11752 } 11753 } 11754 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11755 tiwin, thflags, nxt_pkt)); 11756 } 11757 11758 /* 11759 * Return value of 1, the TCB is unlocked and most 11760 * likely gone, return value of 0, the TCP is still 11761 * locked. 11762 */ 11763 static int 11764 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 11765 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11766 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11767 { 11768 int32_t ret_val = 0; 11769 int32_t ourfinisacked = 0; 11770 struct tcp_rack *rack; 11771 11772 rack = (struct tcp_rack *)tp->t_fb_ptr; 11773 ctf_calc_rwin(so, tp); 11774 11775 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 11776 if ((thflags & TH_RST) || 11777 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11778 return (ctf_process_rst(m, th, so, tp)); 11779 /* 11780 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11781 * synchronized state. 11782 */ 11783 if (thflags & TH_SYN) { 11784 ctf_challenge_ack(m, th, tp, &ret_val); 11785 return (ret_val); 11786 } 11787 /* 11788 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11789 * it's less than ts_recent, drop it. 11790 */ 11791 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11792 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11793 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11794 return (ret_val); 11795 } 11796 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11797 &rack->r_ctl.challenge_ack_ts, 11798 &rack->r_ctl.challenge_ack_cnt)) { 11799 return (ret_val); 11800 } 11801 /* 11802 * If new data are received on a connection after the user processes 11803 * are gone, then RST the other end. 11804 */ 11805 if ((so->so_state & SS_NOFDREF) && 11806 tlen) { 11807 if (rack_check_data_after_close(m, tp, &tlen, th, so)) 11808 return (1); 11809 } 11810 /* 11811 * If last ACK falls within this segment's sequence numbers, record 11812 * its timestamp. NOTE: 1) That the test incorporates suggestions 11813 * from the latest proposal of the tcplw@cray.com list (Braden 11814 * 1993/04/26). 2) That updating only on newer timestamps interferes 11815 * with our earlier PAWS tests, so this check should be solely 11816 * predicated on the sequence space of this segment. 3) That we 11817 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11818 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11819 * SEG.Len, This modified check allows us to overcome RFC1323's 11820 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11821 * p.869. In such cases, we can still calculate the RTT correctly 11822 * when RCV.NXT == Last.ACK.Sent. 11823 */ 11824 if ((to->to_flags & TOF_TS) != 0 && 11825 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11826 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11827 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11828 tp->ts_recent_age = tcp_ts_getticks(); 11829 tp->ts_recent = to->to_tsval; 11830 } 11831 /* 11832 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11833 * is on (half-synchronized state), then queue data for later 11834 * processing; else drop segment and return. 11835 */ 11836 if ((thflags & TH_ACK) == 0) { 11837 if (tp->t_flags & TF_NEEDSYN) { 11838 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11839 tiwin, thflags, nxt_pkt)); 11840 } else if (tp->t_flags & TF_ACKNOW) { 11841 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11842 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11843 return (ret_val); 11844 } else { 11845 ctf_do_drop(m, NULL); 11846 return (0); 11847 } 11848 } 11849 /* 11850 * Ack processing. 11851 */ 11852 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11853 return (ret_val); 11854 } 11855 if (sbavail(&so->so_snd)) { 11856 if (ctf_progress_timeout_check(tp, true)) { 11857 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11858 tp, tick, PROGRESS_DROP, __LINE__); 11859 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 11860 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11861 return (1); 11862 } 11863 } 11864 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11865 tiwin, thflags, nxt_pkt)); 11866 } 11867 11868 static void inline 11869 rack_clear_rate_sample(struct tcp_rack *rack) 11870 { 11871 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 11872 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 11873 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 11874 } 11875 11876 static void 11877 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 11878 { 11879 uint64_t bw_est, rate_wanted; 11880 int chged = 0; 11881 uint32_t user_max, orig_min, orig_max; 11882 11883 orig_min = rack->r_ctl.rc_pace_min_segs; 11884 orig_max = rack->r_ctl.rc_pace_max_segs; 11885 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 11886 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 11887 chged = 1; 11888 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 11889 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 11890 if (user_max != rack->r_ctl.rc_pace_max_segs) 11891 chged = 1; 11892 } 11893 if (rack->rc_force_max_seg) { 11894 rack->r_ctl.rc_pace_max_segs = user_max; 11895 } else if (rack->use_fixed_rate) { 11896 bw_est = rack_get_bw(rack); 11897 if ((rack->r_ctl.crte == NULL) || 11898 (bw_est != rack->r_ctl.crte->rate)) { 11899 rack->r_ctl.rc_pace_max_segs = user_max; 11900 } else { 11901 /* We are pacing right at the hardware rate */ 11902 uint32_t segsiz; 11903 11904 segsiz = min(ctf_fixed_maxseg(tp), 11905 rack->r_ctl.rc_pace_min_segs); 11906 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 11907 tp, bw_est, segsiz, 0, 11908 rack->r_ctl.crte, NULL); 11909 } 11910 } else if (rack->rc_always_pace) { 11911 if (rack->r_ctl.gp_bw || 11912 #ifdef NETFLIX_PEAKRATE 11913 rack->rc_tp->t_maxpeakrate || 11914 #endif 11915 rack->r_ctl.init_rate) { 11916 /* We have a rate of some sort set */ 11917 uint32_t orig; 11918 11919 bw_est = rack_get_bw(rack); 11920 orig = rack->r_ctl.rc_pace_max_segs; 11921 if (fill_override) 11922 rate_wanted = *fill_override; 11923 else 11924 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 11925 if (rate_wanted) { 11926 /* We have something */ 11927 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 11928 rate_wanted, 11929 ctf_fixed_maxseg(rack->rc_tp)); 11930 } else 11931 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 11932 if (orig != rack->r_ctl.rc_pace_max_segs) 11933 chged = 1; 11934 } else if ((rack->r_ctl.gp_bw == 0) && 11935 (rack->r_ctl.rc_pace_max_segs == 0)) { 11936 /* 11937 * If we have nothing limit us to bursting 11938 * out IW sized pieces. 11939 */ 11940 chged = 1; 11941 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 11942 } 11943 } 11944 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 11945 chged = 1; 11946 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 11947 } 11948 if (chged) 11949 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 11950 } 11951 11952 11953 static void 11954 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 11955 { 11956 #ifdef INET6 11957 struct ip6_hdr *ip6 = NULL; 11958 #endif 11959 #ifdef INET 11960 struct ip *ip = NULL; 11961 #endif 11962 struct udphdr *udp = NULL; 11963 11964 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 11965 #ifdef INET6 11966 if (rack->r_is_v6) { 11967 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 11968 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 11969 if (tp->t_port) { 11970 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 11971 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 11972 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 11973 udp->uh_dport = tp->t_port; 11974 rack->r_ctl.fsb.udp = udp; 11975 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 11976 } else 11977 { 11978 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 11979 rack->r_ctl.fsb.udp = NULL; 11980 } 11981 tcpip_fillheaders(rack->rc_inp, 11982 tp->t_port, 11983 ip6, rack->r_ctl.fsb.th); 11984 } else 11985 #endif /* INET6 */ 11986 { 11987 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 11988 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 11989 if (tp->t_port) { 11990 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 11991 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 11992 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 11993 udp->uh_dport = tp->t_port; 11994 rack->r_ctl.fsb.udp = udp; 11995 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 11996 } else 11997 { 11998 rack->r_ctl.fsb.udp = NULL; 11999 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12000 } 12001 tcpip_fillheaders(rack->rc_inp, 12002 tp->t_port, 12003 ip, rack->r_ctl.fsb.th); 12004 } 12005 rack->r_fsb_inited = 1; 12006 } 12007 12008 static int 12009 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12010 { 12011 /* 12012 * Allocate the larger of spaces V6 if available else just 12013 * V4 and include udphdr (overbook) 12014 */ 12015 #ifdef INET6 12016 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12017 #else 12018 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12019 #endif 12020 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12021 M_TCPFSB, M_NOWAIT|M_ZERO); 12022 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12023 return (ENOMEM); 12024 } 12025 rack->r_fsb_inited = 0; 12026 return (0); 12027 } 12028 12029 static int 12030 rack_init(struct tcpcb *tp) 12031 { 12032 struct tcp_rack *rack = NULL; 12033 struct rack_sendmap *insret; 12034 uint32_t iwin, snt, us_cts; 12035 int err; 12036 12037 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12038 if (tp->t_fb_ptr == NULL) { 12039 /* 12040 * We need to allocate memory but cant. The INP and INP_INFO 12041 * locks and they are recusive (happens during setup. So a 12042 * scheme to drop the locks fails :( 12043 * 12044 */ 12045 return (ENOMEM); 12046 } 12047 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12048 12049 rack = (struct tcp_rack *)tp->t_fb_ptr; 12050 RB_INIT(&rack->r_ctl.rc_mtree); 12051 TAILQ_INIT(&rack->r_ctl.rc_free); 12052 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12053 rack->rc_tp = tp; 12054 rack->rc_inp = tp->t_inpcb; 12055 /* Set the flag */ 12056 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12057 /* Probably not needed but lets be sure */ 12058 rack_clear_rate_sample(rack); 12059 /* 12060 * Save off the default values, socket options will poke 12061 * at these if pacing is not on or we have not yet 12062 * reached where pacing is on (gp_ready/fixed enabled). 12063 * When they get set into the CC module (when gp_ready 12064 * is enabled or we enable fixed) then we will set these 12065 * values into the CC and place in here the old values 12066 * so we have a restoral. Then we will set the flag 12067 * rc_pacing_cc_set. That way whenever we turn off pacing 12068 * or switch off this stack, we will know to go restore 12069 * the saved values. 12070 */ 12071 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12072 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12073 /* We want abe like behavior as well */ 12074 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN; 12075 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12076 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12077 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12078 if (use_rack_rr) 12079 rack->use_rack_rr = 1; 12080 if (V_tcp_delack_enabled) 12081 tp->t_delayed_ack = 1; 12082 else 12083 tp->t_delayed_ack = 0; 12084 #ifdef TCP_ACCOUNTING 12085 if (rack_tcp_accounting) { 12086 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12087 } 12088 #endif 12089 if (rack_enable_shared_cwnd) 12090 rack->rack_enable_scwnd = 1; 12091 rack->rc_user_set_max_segs = rack_hptsi_segments; 12092 rack->rc_force_max_seg = 0; 12093 if (rack_use_imac_dack) 12094 rack->rc_dack_mode = 1; 12095 TAILQ_INIT(&rack->r_ctl.opt_list); 12096 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12097 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12098 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12099 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12100 rack->r_ctl.rc_highest_us_rtt = 0; 12101 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12102 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12103 if (rack_use_cmp_acks) 12104 rack->r_use_cmp_ack = 1; 12105 if (rack_disable_prr) 12106 rack->rack_no_prr = 1; 12107 if (rack_gp_no_rec_chg) 12108 rack->rc_gp_no_rec_chg = 1; 12109 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12110 rack->rc_always_pace = 1; 12111 if (rack->use_fixed_rate || rack->gp_ready) 12112 rack_set_cc_pacing(rack); 12113 } else 12114 rack->rc_always_pace = 0; 12115 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12116 rack->r_mbuf_queue = 1; 12117 else 12118 rack->r_mbuf_queue = 0; 12119 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12120 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12121 else 12122 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12123 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12124 if (rack_limits_scwnd) 12125 rack->r_limit_scw = 1; 12126 else 12127 rack->r_limit_scw = 0; 12128 rack->rc_labc = V_tcp_abc_l_var; 12129 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12130 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12131 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12132 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12133 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12134 rack->r_ctl.rc_min_to = rack_min_to; 12135 microuptime(&rack->r_ctl.act_rcv_time); 12136 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12137 rack->r_running_late = 0; 12138 rack->r_running_early = 0; 12139 rack->rc_init_win = rack_default_init_window; 12140 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12141 if (rack_hw_up_only) 12142 rack->r_up_only = 1; 12143 if (rack_do_dyn_mul) { 12144 /* When dynamic adjustment is on CA needs to start at 100% */ 12145 rack->rc_gp_dyn_mul = 1; 12146 if (rack_do_dyn_mul >= 100) 12147 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12148 } else 12149 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12150 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12151 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12152 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12153 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12154 rack_probertt_filter_life); 12155 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12156 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12157 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12158 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12159 rack->r_ctl.rc_time_probertt_starts = 0; 12160 /* We require at least one measurement, even if the sysctl is 0 */ 12161 if (rack_req_measurements) 12162 rack->r_ctl.req_measurements = rack_req_measurements; 12163 else 12164 rack->r_ctl.req_measurements = 1; 12165 if (rack_enable_hw_pacing) 12166 rack->rack_hdw_pace_ena = 1; 12167 if (rack_hw_rate_caps) 12168 rack->r_rack_hw_rate_caps = 1; 12169 /* Do we force on detection? */ 12170 #ifdef NETFLIX_EXP_DETECTION 12171 if (tcp_force_detection) 12172 rack->do_detection = 1; 12173 else 12174 #endif 12175 rack->do_detection = 0; 12176 if (rack_non_rxt_use_cr) 12177 rack->rack_rec_nonrxt_use_cr = 1; 12178 err = rack_init_fsb(tp, rack); 12179 if (err) { 12180 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12181 tp->t_fb_ptr = NULL; 12182 return (err); 12183 } 12184 if (tp->snd_una != tp->snd_max) { 12185 /* Create a send map for the current outstanding data */ 12186 struct rack_sendmap *rsm; 12187 12188 rsm = rack_alloc(rack); 12189 if (rsm == NULL) { 12190 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12191 tp->t_fb_ptr = NULL; 12192 return (ENOMEM); 12193 } 12194 rsm->r_no_rtt_allowed = 1; 12195 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12196 rsm->r_rtr_cnt = 1; 12197 rsm->r_rtr_bytes = 0; 12198 if (tp->t_flags & TF_SENTFIN) { 12199 rsm->r_end = tp->snd_max - 1; 12200 rsm->r_flags |= RACK_HAS_FIN; 12201 } else { 12202 rsm->r_end = tp->snd_max; 12203 } 12204 if (tp->snd_una == tp->iss) { 12205 /* The data space is one beyond snd_una */ 12206 rsm->r_flags |= RACK_HAS_SYN; 12207 rsm->r_start = tp->iss; 12208 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); 12209 } else 12210 rsm->r_start = tp->snd_una; 12211 rsm->r_dupack = 0; 12212 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12213 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12214 if (rsm->m) 12215 rsm->orig_m_len = rsm->m->m_len; 12216 else 12217 rsm->orig_m_len = 0; 12218 } else { 12219 /* 12220 * This can happen if we have a stand-alone FIN or 12221 * SYN. 12222 */ 12223 rsm->m = NULL; 12224 rsm->orig_m_len = 0; 12225 rsm->soff = 0; 12226 } 12227 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12228 #ifdef INVARIANTS 12229 if (insret != NULL) { 12230 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12231 insret, rack, rsm); 12232 } 12233 #endif 12234 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12235 rsm->r_in_tmap = 1; 12236 } 12237 /* 12238 * Timers in Rack are kept in microseconds so lets 12239 * convert any initial incoming variables 12240 * from ticks into usecs. Note that we 12241 * also change the values of t_srtt and t_rttvar, if 12242 * they are non-zero. They are kept with a 5 12243 * bit decimal so we have to carefully convert 12244 * these to get the full precision. 12245 */ 12246 rack_convert_rtts(tp); 12247 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12248 if (rack_def_profile) 12249 rack_set_profile(rack, rack_def_profile); 12250 /* Cancel the GP measurement in progress */ 12251 tp->t_flags &= ~TF_GPUTINPROG; 12252 if (SEQ_GT(tp->snd_max, tp->iss)) 12253 snt = tp->snd_max - tp->iss; 12254 else 12255 snt = 0; 12256 iwin = rc_init_window(rack); 12257 if (snt < iwin) { 12258 /* We are not past the initial window 12259 * so we need to make sure cwnd is 12260 * correct. 12261 */ 12262 if (tp->snd_cwnd < iwin) 12263 tp->snd_cwnd = iwin; 12264 /* 12265 * If we are within the initial window 12266 * we want ssthresh to be unlimited. Setting 12267 * it to the rwnd (which the default stack does 12268 * and older racks) is not really a good idea 12269 * since we want to be in SS and grow both the 12270 * cwnd and the rwnd (via dynamic rwnd growth). If 12271 * we set it to the rwnd then as the peer grows its 12272 * rwnd we will be stuck in CA and never hit SS. 12273 * 12274 * Its far better to raise it up high (this takes the 12275 * risk that there as been a loss already, probably 12276 * we should have an indicator in all stacks of loss 12277 * but we don't), but considering the normal use this 12278 * is a risk worth taking. The consequences of not 12279 * hitting SS are far worse than going one more time 12280 * into it early on (before we have sent even a IW). 12281 * It is highly unlikely that we will have had a loss 12282 * before getting the IW out. 12283 */ 12284 tp->snd_ssthresh = 0xffffffff; 12285 } 12286 rack_stop_all_timers(tp); 12287 /* Lets setup the fsb block */ 12288 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12289 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12290 __LINE__, RACK_RTTS_INIT); 12291 return (0); 12292 } 12293 12294 static int 12295 rack_handoff_ok(struct tcpcb *tp) 12296 { 12297 if ((tp->t_state == TCPS_CLOSED) || 12298 (tp->t_state == TCPS_LISTEN)) { 12299 /* Sure no problem though it may not stick */ 12300 return (0); 12301 } 12302 if ((tp->t_state == TCPS_SYN_SENT) || 12303 (tp->t_state == TCPS_SYN_RECEIVED)) { 12304 /* 12305 * We really don't know if you support sack, 12306 * you have to get to ESTAB or beyond to tell. 12307 */ 12308 return (EAGAIN); 12309 } 12310 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12311 /* 12312 * Rack will only send a FIN after all data is acknowledged. 12313 * So in this case we have more data outstanding. We can't 12314 * switch stacks until either all data and only the FIN 12315 * is left (in which case rack_init() now knows how 12316 * to deal with that) <or> all is acknowledged and we 12317 * are only left with incoming data, though why you 12318 * would want to switch to rack after all data is acknowledged 12319 * I have no idea (rrs)! 12320 */ 12321 return (EAGAIN); 12322 } 12323 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12324 return (0); 12325 } 12326 /* 12327 * If we reach here we don't do SACK on this connection so we can 12328 * never do rack. 12329 */ 12330 return (EINVAL); 12331 } 12332 12333 12334 static void 12335 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12336 { 12337 int ack_cmp = 0; 12338 12339 if (tp->t_fb_ptr) { 12340 struct tcp_rack *rack; 12341 struct rack_sendmap *rsm, *nrsm, *rm; 12342 12343 rack = (struct tcp_rack *)tp->t_fb_ptr; 12344 if (tp->t_in_pkt) { 12345 /* 12346 * It is unsafe to process the packets since a 12347 * reset may be lurking in them (its rare but it 12348 * can occur). If we were to find a RST, then we 12349 * would end up dropping the connection and the 12350 * INP lock, so when we return the caller (tcp_usrreq) 12351 * will blow up when it trys to unlock the inp. 12352 */ 12353 struct mbuf *save, *m; 12354 12355 m = tp->t_in_pkt; 12356 tp->t_in_pkt = NULL; 12357 tp->t_tail_pkt = NULL; 12358 while (m) { 12359 save = m->m_nextpkt; 12360 m->m_nextpkt = NULL; 12361 m_freem(m); 12362 m = save; 12363 } 12364 if ((tp->t_inpcb) && 12365 (tp->t_inpcb->inp_flags2 & INP_MBUF_ACKCMP)) 12366 ack_cmp = 1; 12367 if (ack_cmp) { 12368 /* Total if we used large or small (if ack-cmp was used). */ 12369 if (rack->rc_inp->inp_flags2 & INP_MBUF_L_ACKS) 12370 counter_u64_add(rack_large_ackcmp, 1); 12371 else 12372 counter_u64_add(rack_small_ackcmp, 1); 12373 } 12374 } 12375 tp->t_flags &= ~TF_FORCEDATA; 12376 #ifdef NETFLIX_SHARED_CWND 12377 if (rack->r_ctl.rc_scw) { 12378 uint32_t limit; 12379 12380 if (rack->r_limit_scw) 12381 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12382 else 12383 limit = 0; 12384 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12385 rack->r_ctl.rc_scw_index, 12386 limit); 12387 rack->r_ctl.rc_scw = NULL; 12388 } 12389 #endif 12390 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12391 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12392 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12393 rack->r_ctl.fsb.th = NULL; 12394 } 12395 /* Convert back to ticks, with */ 12396 if (tp->t_srtt > 1) { 12397 uint32_t val, frac; 12398 12399 val = USEC_2_TICKS(tp->t_srtt); 12400 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12401 tp->t_srtt = val << TCP_RTT_SHIFT; 12402 /* 12403 * frac is the fractional part here is left 12404 * over from converting to hz and shifting. 12405 * We need to convert this to the 5 bit 12406 * remainder. 12407 */ 12408 if (frac) { 12409 if (hz == 1000) { 12410 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12411 } else { 12412 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12413 } 12414 tp->t_srtt += frac; 12415 } 12416 } 12417 if (tp->t_rttvar) { 12418 uint32_t val, frac; 12419 12420 val = USEC_2_TICKS(tp->t_rttvar); 12421 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12422 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12423 /* 12424 * frac is the fractional part here is left 12425 * over from converting to hz and shifting. 12426 * We need to convert this to the 5 bit 12427 * remainder. 12428 */ 12429 if (frac) { 12430 if (hz == 1000) { 12431 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12432 } else { 12433 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12434 } 12435 tp->t_rttvar += frac; 12436 } 12437 } 12438 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12439 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12440 if (rack->rc_always_pace) { 12441 tcp_decrement_paced_conn(); 12442 rack_undo_cc_pacing(rack); 12443 rack->rc_always_pace = 0; 12444 } 12445 /* Clean up any options if they were not applied */ 12446 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12447 struct deferred_opt_list *dol; 12448 12449 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12450 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12451 free(dol, M_TCPDO); 12452 } 12453 /* rack does not use force data but other stacks may clear it */ 12454 if (rack->r_ctl.crte != NULL) { 12455 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12456 rack->rack_hdrw_pacing = 0; 12457 rack->r_ctl.crte = NULL; 12458 } 12459 #ifdef TCP_BLACKBOX 12460 tcp_log_flowend(tp); 12461 #endif 12462 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12463 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12464 #ifdef INVARIANTS 12465 if (rm != rsm) { 12466 panic("At fini, rack:%p rsm:%p rm:%p", 12467 rack, rsm, rm); 12468 } 12469 #endif 12470 uma_zfree(rack_zone, rsm); 12471 } 12472 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12473 while (rsm) { 12474 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12475 uma_zfree(rack_zone, rsm); 12476 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12477 } 12478 rack->rc_free_cnt = 0; 12479 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12480 tp->t_fb_ptr = NULL; 12481 } 12482 if (tp->t_inpcb) { 12483 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12484 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12485 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12486 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 12487 /* Cancel the GP measurement in progress */ 12488 tp->t_flags &= ~TF_GPUTINPROG; 12489 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 12490 } 12491 /* Make sure snd_nxt is correctly set */ 12492 tp->snd_nxt = tp->snd_max; 12493 } 12494 12495 static void 12496 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12497 { 12498 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12499 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12500 } 12501 switch (tp->t_state) { 12502 case TCPS_SYN_SENT: 12503 rack->r_state = TCPS_SYN_SENT; 12504 rack->r_substate = rack_do_syn_sent; 12505 break; 12506 case TCPS_SYN_RECEIVED: 12507 rack->r_state = TCPS_SYN_RECEIVED; 12508 rack->r_substate = rack_do_syn_recv; 12509 break; 12510 case TCPS_ESTABLISHED: 12511 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12512 rack->r_state = TCPS_ESTABLISHED; 12513 rack->r_substate = rack_do_established; 12514 break; 12515 case TCPS_CLOSE_WAIT: 12516 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12517 rack->r_state = TCPS_CLOSE_WAIT; 12518 rack->r_substate = rack_do_close_wait; 12519 break; 12520 case TCPS_FIN_WAIT_1: 12521 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12522 rack->r_state = TCPS_FIN_WAIT_1; 12523 rack->r_substate = rack_do_fin_wait_1; 12524 break; 12525 case TCPS_CLOSING: 12526 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12527 rack->r_state = TCPS_CLOSING; 12528 rack->r_substate = rack_do_closing; 12529 break; 12530 case TCPS_LAST_ACK: 12531 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12532 rack->r_state = TCPS_LAST_ACK; 12533 rack->r_substate = rack_do_lastack; 12534 break; 12535 case TCPS_FIN_WAIT_2: 12536 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12537 rack->r_state = TCPS_FIN_WAIT_2; 12538 rack->r_substate = rack_do_fin_wait_2; 12539 break; 12540 case TCPS_LISTEN: 12541 case TCPS_CLOSED: 12542 case TCPS_TIME_WAIT: 12543 default: 12544 break; 12545 }; 12546 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 12547 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 12548 12549 } 12550 12551 static void 12552 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 12553 { 12554 /* 12555 * We received an ack, and then did not 12556 * call send or were bounced out due to the 12557 * hpts was running. Now a timer is up as well, is 12558 * it the right timer? 12559 */ 12560 struct rack_sendmap *rsm; 12561 int tmr_up; 12562 12563 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 12564 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 12565 return; 12566 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12567 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 12568 (tmr_up == PACE_TMR_RXT)) { 12569 /* Should be an RXT */ 12570 return; 12571 } 12572 if (rsm == NULL) { 12573 /* Nothing outstanding? */ 12574 if (tp->t_flags & TF_DELACK) { 12575 if (tmr_up == PACE_TMR_DELACK) 12576 /* We are supposed to have delayed ack up and we do */ 12577 return; 12578 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 12579 /* 12580 * if we hit enobufs then we would expect the possiblity 12581 * of nothing outstanding and the RXT up (and the hptsi timer). 12582 */ 12583 return; 12584 } else if (((V_tcp_always_keepalive || 12585 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 12586 (tp->t_state <= TCPS_CLOSING)) && 12587 (tmr_up == PACE_TMR_KEEP) && 12588 (tp->snd_max == tp->snd_una)) { 12589 /* We should have keep alive up and we do */ 12590 return; 12591 } 12592 } 12593 if (SEQ_GT(tp->snd_max, tp->snd_una) && 12594 ((tmr_up == PACE_TMR_TLP) || 12595 (tmr_up == PACE_TMR_RACK) || 12596 (tmr_up == PACE_TMR_RXT))) { 12597 /* 12598 * Either a Rack, TLP or RXT is fine if we 12599 * have outstanding data. 12600 */ 12601 return; 12602 } else if (tmr_up == PACE_TMR_DELACK) { 12603 /* 12604 * If the delayed ack was going to go off 12605 * before the rtx/tlp/rack timer were going to 12606 * expire, then that would be the timer in control. 12607 * Note we don't check the time here trusting the 12608 * code is correct. 12609 */ 12610 return; 12611 } 12612 /* 12613 * Ok the timer originally started is not what we want now. 12614 * We will force the hpts to be stopped if any, and restart 12615 * with the slot set to what was in the saved slot. 12616 */ 12617 if (rack->rc_inp->inp_in_hpts) { 12618 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 12619 uint32_t us_cts; 12620 12621 us_cts = tcp_get_usecs(NULL); 12622 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 12623 rack->r_early = 1; 12624 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 12625 } 12626 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 12627 } 12628 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 12629 } 12630 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 12631 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12632 } 12633 12634 12635 static void 12636 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 12637 { 12638 tp->snd_wnd = tiwin; 12639 rack_validate_fo_sendwin_up(tp, rack); 12640 tp->snd_wl1 = seq; 12641 tp->snd_wl2 = ack; 12642 if (tp->snd_wnd > tp->max_sndwnd) 12643 tp->max_sndwnd = tp->snd_wnd; 12644 if (tp->snd_wnd < (tp->snd_max - high_seq)) { 12645 /* The peer collapsed the window */ 12646 rack_collapsed_window(rack); 12647 } else if (rack->rc_has_collapsed) 12648 rack_un_collapse_window(rack); 12649 /* Do we exit persists? */ 12650 if ((rack->rc_in_persist != 0) && 12651 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 12652 rack->r_ctl.rc_pace_min_segs))) { 12653 rack_exit_persist(tp, rack, cts); 12654 } 12655 /* Do we enter persists? */ 12656 if ((rack->rc_in_persist == 0) && 12657 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 12658 TCPS_HAVEESTABLISHED(tp->t_state) && 12659 (tp->snd_max == tp->snd_una) && 12660 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 12661 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 12662 /* 12663 * Here the rwnd is less than 12664 * the pacing size, we are established, 12665 * nothing is outstanding, and there is 12666 * data to send. Enter persists. 12667 */ 12668 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 12669 } 12670 } 12671 12672 static void 12673 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 12674 { 12675 12676 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 12677 union tcp_log_stackspecific log; 12678 struct timeval ltv; 12679 char tcp_hdr_buf[60]; 12680 struct tcphdr *th; 12681 struct timespec ts; 12682 uint32_t orig_snd_una; 12683 uint8_t xx = 0; 12684 12685 #ifdef NETFLIX_HTTP_LOGGING 12686 struct http_sendfile_track *http_req; 12687 12688 if (SEQ_GT(ae->ack, tp->snd_una)) { 12689 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 12690 } else { 12691 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 12692 } 12693 #endif 12694 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 12695 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 12696 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 12697 if (rack->rack_no_prr == 0) 12698 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 12699 else 12700 log.u_bbr.flex1 = 0; 12701 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 12702 log.u_bbr.use_lt_bw <<= 1; 12703 log.u_bbr.use_lt_bw |= rack->r_might_revert; 12704 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 12705 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 12706 log.u_bbr.pkts_out = tp->t_maxseg; 12707 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 12708 log.u_bbr.flex7 = 1; 12709 log.u_bbr.lost = ae->flags; 12710 log.u_bbr.cwnd_gain = ackval; 12711 log.u_bbr.pacing_gain = 0x2; 12712 if (ae->flags & TSTMP_HDWR) { 12713 /* Record the hardware timestamp if present */ 12714 log.u_bbr.flex3 = M_TSTMP; 12715 ts.tv_sec = ae->timestamp / 1000000000; 12716 ts.tv_nsec = ae->timestamp % 1000000000; 12717 ltv.tv_sec = ts.tv_sec; 12718 ltv.tv_usec = ts.tv_nsec / 1000; 12719 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 12720 } else if (ae->flags & TSTMP_LRO) { 12721 /* Record the LRO the arrival timestamp */ 12722 log.u_bbr.flex3 = M_TSTMP_LRO; 12723 ts.tv_sec = ae->timestamp / 1000000000; 12724 ts.tv_nsec = ae->timestamp % 1000000000; 12725 ltv.tv_sec = ts.tv_sec; 12726 ltv.tv_usec = ts.tv_nsec / 1000; 12727 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 12728 } 12729 log.u_bbr.timeStamp = tcp_get_usecs(<v); 12730 /* Log the rcv time */ 12731 log.u_bbr.delRate = ae->timestamp; 12732 #ifdef NETFLIX_HTTP_LOGGING 12733 log.u_bbr.applimited = tp->t_http_closed; 12734 log.u_bbr.applimited <<= 8; 12735 log.u_bbr.applimited |= tp->t_http_open; 12736 log.u_bbr.applimited <<= 8; 12737 log.u_bbr.applimited |= tp->t_http_req; 12738 if (http_req) { 12739 /* Copy out any client req info */ 12740 /* seconds */ 12741 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 12742 /* useconds */ 12743 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 12744 log.u_bbr.rttProp = http_req->timestamp; 12745 log.u_bbr.cur_del_rate = http_req->start; 12746 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 12747 log.u_bbr.flex8 |= 1; 12748 } else { 12749 log.u_bbr.flex8 |= 2; 12750 log.u_bbr.bw_inuse = http_req->end; 12751 } 12752 log.u_bbr.flex6 = http_req->start_seq; 12753 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 12754 log.u_bbr.flex8 |= 4; 12755 log.u_bbr.epoch = http_req->end_seq; 12756 } 12757 } 12758 #endif 12759 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 12760 th = (struct tcphdr *)tcp_hdr_buf; 12761 th->th_seq = ae->seq; 12762 th->th_ack = ae->ack; 12763 th->th_win = ae->win; 12764 /* Now fill in the ports */ 12765 th->th_sport = tp->t_inpcb->inp_fport; 12766 th->th_dport = tp->t_inpcb->inp_lport; 12767 th->th_flags = ae->flags & 0xff; 12768 /* Now do we have a timestamp option? */ 12769 if (ae->flags & HAS_TSTMP) { 12770 u_char *cp; 12771 uint32_t val; 12772 12773 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 12774 cp = (u_char *)(th + 1); 12775 *cp = TCPOPT_NOP; 12776 cp++; 12777 *cp = TCPOPT_NOP; 12778 cp++; 12779 *cp = TCPOPT_TIMESTAMP; 12780 cp++; 12781 *cp = TCPOLEN_TIMESTAMP; 12782 cp++; 12783 val = htonl(ae->ts_value); 12784 bcopy((char *)&val, 12785 (char *)cp, sizeof(uint32_t)); 12786 val = htonl(ae->ts_echo); 12787 bcopy((char *)&val, 12788 (char *)(cp + 4), sizeof(uint32_t)); 12789 } else 12790 th->th_off = (sizeof(struct tcphdr) >> 2); 12791 12792 /* 12793 * For sane logging we need to play a little trick. 12794 * If the ack were fully processed we would have moved 12795 * snd_una to high_seq, but since compressed acks are 12796 * processed in two phases, at this point (logging) snd_una 12797 * won't be advanced. So we would see multiple acks showing 12798 * the advancement. We can prevent that by "pretending" that 12799 * snd_una was advanced and then un-advancing it so that the 12800 * logging code has the right value for tlb_snd_una. 12801 */ 12802 if (tp->snd_una != high_seq) { 12803 orig_snd_una = tp->snd_una; 12804 tp->snd_una = high_seq; 12805 xx = 1; 12806 } else 12807 xx = 0; 12808 TCP_LOG_EVENTP(tp, th, 12809 &tp->t_inpcb->inp_socket->so_rcv, 12810 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 12811 0, &log, true, <v); 12812 if (xx) { 12813 tp->snd_una = orig_snd_una; 12814 } 12815 } 12816 12817 } 12818 12819 static int 12820 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 12821 { 12822 /* 12823 * Handle a "special" compressed ack mbuf. Each incoming 12824 * ack has only four possible dispositions: 12825 * 12826 * A) It moves the cum-ack forward 12827 * B) It is behind the cum-ack. 12828 * C) It is a window-update ack. 12829 * D) It is a dup-ack. 12830 * 12831 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 12832 * in the incoming mbuf. We also need to still pay attention 12833 * to nxt_pkt since there may be another packet after this 12834 * one. 12835 */ 12836 #ifdef TCP_ACCOUNTING 12837 uint64_t ts_val; 12838 uint64_t rdstc; 12839 #endif 12840 int segsiz; 12841 struct timespec ts; 12842 struct tcp_rack *rack; 12843 struct tcp_ackent *ae; 12844 uint32_t tiwin, us_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 12845 int cnt, i, did_out, ourfinisacked = 0; 12846 int win_up_req = 0; 12847 struct tcpopt to_holder, *to = NULL; 12848 int nsegs = 0; 12849 int under_pacing = 1; 12850 int recovery = 0; 12851 int idx; 12852 #ifdef TCP_ACCOUNTING 12853 sched_pin(); 12854 #endif 12855 rack = (struct tcp_rack *)tp->t_fb_ptr; 12856 if (rack->gp_ready && 12857 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 12858 under_pacing = 0; 12859 else 12860 under_pacing = 1; 12861 12862 if (rack->r_state != tp->t_state) 12863 rack_set_state(tp, rack); 12864 to = &to_holder; 12865 to->to_flags = 0; 12866 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 12867 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 12868 cnt = m->m_len / sizeof(struct tcp_ackent); 12869 idx = cnt / 5; 12870 if (idx >= MAX_NUM_OF_CNTS) 12871 idx = MAX_NUM_OF_CNTS - 1; 12872 counter_u64_add(rack_proc_comp_ack[idx], 1); 12873 counter_u64_add(rack_multi_single_eq, cnt); 12874 high_seq = tp->snd_una; 12875 the_win = tp->snd_wnd; 12876 win_seq = tp->snd_wl1; 12877 win_upd_ack = tp->snd_wl2; 12878 cts = us_cts = tcp_tv_to_usectick(tv); 12879 segsiz = ctf_fixed_maxseg(tp); 12880 if ((rack->rc_gp_dyn_mul) && 12881 (rack->use_fixed_rate == 0) && 12882 (rack->rc_always_pace)) { 12883 /* Check in on probertt */ 12884 rack_check_probe_rtt(rack, us_cts); 12885 } 12886 for (i = 0; i < cnt; i++) { 12887 #ifdef TCP_ACCOUNTING 12888 ts_val = get_cyclecount(); 12889 #endif 12890 rack_clear_rate_sample(rack); 12891 ae = ((mtod(m, struct tcp_ackent *)) + i); 12892 /* Setup the window */ 12893 tiwin = ae->win << tp->snd_scale; 12894 /* figure out the type of ack */ 12895 if (SEQ_LT(ae->ack, high_seq)) { 12896 /* Case B*/ 12897 ae->ack_val_set = ACK_BEHIND; 12898 } else if (SEQ_GT(ae->ack, high_seq)) { 12899 /* Case A */ 12900 ae->ack_val_set = ACK_CUMACK; 12901 } else if (tiwin == the_win) { 12902 /* Case D */ 12903 ae->ack_val_set = ACK_DUPACK; 12904 } else { 12905 /* Case C */ 12906 ae->ack_val_set = ACK_RWND; 12907 } 12908 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 12909 /* Validate timestamp */ 12910 if (ae->flags & HAS_TSTMP) { 12911 /* Setup for a timestamp */ 12912 to->to_flags = TOF_TS; 12913 ae->ts_echo -= tp->ts_offset; 12914 to->to_tsecr = ae->ts_echo; 12915 to->to_tsval = ae->ts_value; 12916 /* 12917 * If echoed timestamp is later than the current time, fall back to 12918 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 12919 * were used when this connection was established. 12920 */ 12921 if (TSTMP_GT(ae->ts_echo, cts)) 12922 ae->ts_echo = 0; 12923 if (tp->ts_recent && 12924 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 12925 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 12926 #ifdef TCP_ACCOUNTING 12927 rdstc = get_cyclecount(); 12928 if (rdstc > ts_val) { 12929 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 12930 (rdstc - ts_val)); 12931 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 12932 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 12933 } 12934 } 12935 #endif 12936 continue; 12937 } 12938 } 12939 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 12940 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 12941 tp->ts_recent_age = tcp_ts_getticks(); 12942 tp->ts_recent = ae->ts_value; 12943 } 12944 } else { 12945 /* Setup for a no options */ 12946 to->to_flags = 0; 12947 } 12948 /* Update the rcv time and perform idle reduction possibly */ 12949 if (tp->t_idle_reduce && 12950 (tp->snd_max == tp->snd_una) && 12951 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 12952 counter_u64_add(rack_input_idle_reduces, 1); 12953 rack_cc_after_idle(rack, tp); 12954 } 12955 tp->t_rcvtime = ticks; 12956 /* Now what about ECN? */ 12957 if (tp->t_flags2 & TF2_ECN_PERMIT) { 12958 if (ae->flags & TH_CWR) { 12959 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 12960 tp->t_flags |= TF_ACKNOW; 12961 } 12962 switch (ae->codepoint & IPTOS_ECN_MASK) { 12963 case IPTOS_ECN_CE: 12964 tp->t_flags2 |= TF2_ECN_SND_ECE; 12965 KMOD_TCPSTAT_INC(tcps_ecn_ce); 12966 break; 12967 case IPTOS_ECN_ECT0: 12968 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 12969 break; 12970 case IPTOS_ECN_ECT1: 12971 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 12972 break; 12973 } 12974 12975 /* Process a packet differently from RFC3168. */ 12976 cc_ecnpkt_handler_flags(tp, ae->flags, ae->codepoint); 12977 /* Congestion experienced. */ 12978 if (ae->flags & TH_ECE) { 12979 rack_cong_signal(tp, CC_ECN, ae->ack); 12980 } 12981 } 12982 #ifdef TCP_ACCOUNTING 12983 /* Count for the specific type of ack in */ 12984 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 12985 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 12986 tp->tcp_cnt_counters[ae->ack_val_set]++; 12987 } 12988 #endif 12989 /* 12990 * Note how we could move up these in the determination 12991 * above, but we don't so that way the timestamp checks (and ECN) 12992 * is done first before we do any processing on the ACK. 12993 * The non-compressed path through the code has this 12994 * weakness (noted by @jtl) that it actually does some 12995 * processing before verifying the timestamp information. 12996 * We don't take that path here which is why we set 12997 * the ack_val_set first, do the timestamp and ecn 12998 * processing, and then look at what we have setup. 12999 */ 13000 if (ae->ack_val_set == ACK_BEHIND) { 13001 /* 13002 * Case B flag reordering, if window is not closed 13003 * or it could be a keep-alive or persists 13004 */ 13005 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13006 counter_u64_add(rack_reorder_seen, 1); 13007 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13008 } 13009 } else if (ae->ack_val_set == ACK_DUPACK) { 13010 /* Case D */ 13011 13012 rack_strike_dupack(rack); 13013 } else if (ae->ack_val_set == ACK_RWND) { 13014 /* Case C */ 13015 13016 win_up_req = 1; 13017 win_upd_ack = ae->ack; 13018 win_seq = ae->seq; 13019 the_win = tiwin; 13020 } else { 13021 /* Case A */ 13022 13023 if (SEQ_GT(ae->ack, tp->snd_max)) { 13024 /* 13025 * We just send an ack since the incoming 13026 * ack is beyond the largest seq we sent. 13027 */ 13028 if ((tp->t_flags & TF_ACKNOW) == 0) { 13029 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13030 if (tp->t_flags && TF_ACKNOW) 13031 rack->r_wanted_output = 1; 13032 } 13033 } else { 13034 nsegs++; 13035 /* If the window changed setup to update */ 13036 if (tiwin != tp->snd_wnd) { 13037 win_up_req = 1; 13038 win_upd_ack = ae->ack; 13039 win_seq = ae->seq; 13040 the_win = tiwin; 13041 } 13042 #ifdef TCP_ACCOUNTING 13043 /* Account for the acks */ 13044 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13045 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13046 } 13047 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13048 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13049 #endif 13050 high_seq = ae->ack; 13051 /* Setup our act_rcv_time */ 13052 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13053 ts.tv_sec = ae->timestamp / 1000000000; 13054 ts.tv_nsec = ae->timestamp % 1000000000; 13055 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13056 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13057 } else { 13058 rack->r_ctl.act_rcv_time = *tv; 13059 } 13060 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13061 } 13062 } 13063 /* And lets be sure to commit the rtt measurements for this ack */ 13064 tcp_rack_xmit_timer_commit(rack, tp); 13065 #ifdef TCP_ACCOUNTING 13066 rdstc = get_cyclecount(); 13067 if (rdstc > ts_val) { 13068 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13069 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13070 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13071 if (ae->ack_val_set == ACK_CUMACK) 13072 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13073 } 13074 } 13075 #endif 13076 } 13077 #ifdef TCP_ACCOUNTING 13078 ts_val = get_cyclecount(); 13079 #endif 13080 acked_amount = acked = (high_seq - tp->snd_una); 13081 if (win_up_req) { 13082 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13083 } 13084 if (acked) { 13085 if (rack->sack_attack_disable == 0) 13086 rack_do_decay(rack); 13087 if (acked >= segsiz) { 13088 /* 13089 * You only get credit for 13090 * MSS and greater (and you get extra 13091 * credit for larger cum-ack moves). 13092 */ 13093 int ac; 13094 13095 ac = acked / segsiz; 13096 rack->r_ctl.ack_count += ac; 13097 counter_u64_add(rack_ack_total, ac); 13098 } 13099 if (rack->r_ctl.ack_count > 0xfff00000) { 13100 /* 13101 * reduce the number to keep us under 13102 * a uint32_t. 13103 */ 13104 rack->r_ctl.ack_count /= 2; 13105 rack->r_ctl.sack_count /= 2; 13106 } 13107 if (tp->t_flags & TF_NEEDSYN) { 13108 /* 13109 * T/TCP: Connection was half-synchronized, and our SYN has 13110 * been ACK'd (so connection is now fully synchronized). Go 13111 * to non-starred state, increment snd_una for ACK of SYN, 13112 * and check if we can do window scaling. 13113 */ 13114 tp->t_flags &= ~TF_NEEDSYN; 13115 tp->snd_una++; 13116 acked_amount = acked = (high_seq - tp->snd_una); 13117 } 13118 if (acked > sbavail(&so->so_snd)) 13119 acked_amount = sbavail(&so->so_snd); 13120 #ifdef NETFLIX_EXP_DETECTION 13121 /* 13122 * We only care on a cum-ack move if we are in a sack-disabled 13123 * state. We have already added in to the ack_count, and we never 13124 * would disable on a cum-ack move, so we only care to do the 13125 * detection if it may "undo" it, i.e. we were in disabled already. 13126 */ 13127 if (rack->sack_attack_disable) 13128 rack_do_detection(tp, rack, acked_amount, segsiz); 13129 #endif 13130 if (IN_FASTRECOVERY(tp->t_flags) && 13131 (rack->rack_no_prr == 0)) 13132 rack_update_prr(tp, rack, acked_amount, high_seq); 13133 if (IN_RECOVERY(tp->t_flags)) { 13134 if (SEQ_LT(high_seq, tp->snd_recover) && 13135 (SEQ_LT(high_seq, tp->snd_max))) { 13136 tcp_rack_partialack(tp); 13137 } else { 13138 rack_post_recovery(tp, high_seq); 13139 recovery = 1; 13140 } 13141 } 13142 /* Handle the rack-log-ack part (sendmap) */ 13143 if ((sbused(&so->so_snd) == 0) && 13144 (acked > acked_amount) && 13145 (tp->t_state >= TCPS_FIN_WAIT_1) && 13146 (tp->t_flags & TF_SENTFIN)) { 13147 /* 13148 * We must be sure our fin 13149 * was sent and acked (we can be 13150 * in FIN_WAIT_1 without having 13151 * sent the fin). 13152 */ 13153 ourfinisacked = 1; 13154 /* 13155 * Lets make sure snd_una is updated 13156 * since most likely acked_amount = 0 (it 13157 * should be). 13158 */ 13159 tp->snd_una = high_seq; 13160 } 13161 /* Did we make a RTO error? */ 13162 if ((tp->t_flags & TF_PREVVALID) && 13163 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13164 tp->t_flags &= ~TF_PREVVALID; 13165 if (tp->t_rxtshift == 1 && 13166 (int)(ticks - tp->t_badrxtwin) < 0) 13167 rack_cong_signal(tp, CC_RTO_ERR, high_seq); 13168 } 13169 /* Handle the data in the socket buffer */ 13170 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13171 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13172 if (acked_amount > 0) { 13173 struct mbuf *mfree; 13174 13175 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13176 SOCKBUF_LOCK(&so->so_snd); 13177 mfree = sbcut_locked(&so->so_snd, acked); 13178 tp->snd_una = high_seq; 13179 /* Note we want to hold the sb lock through the sendmap adjust */ 13180 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13181 /* Wake up the socket if we have room to write more */ 13182 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13183 sowwakeup_locked(so); 13184 m_freem(mfree); 13185 } 13186 /* update progress */ 13187 tp->t_acktime = ticks; 13188 rack_log_progress_event(rack, tp, tp->t_acktime, 13189 PROGRESS_UPDATE, __LINE__); 13190 /* Clear out shifts and such */ 13191 tp->t_rxtshift = 0; 13192 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13193 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13194 rack->rc_tlp_in_progress = 0; 13195 rack->r_ctl.rc_tlp_cnt_out = 0; 13196 /* Send recover and snd_nxt must be dragged along */ 13197 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13198 tp->snd_recover = tp->snd_una; 13199 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13200 tp->snd_nxt = tp->snd_una; 13201 /* 13202 * If the RXT timer is running we want to 13203 * stop it, so we can restart a TLP (or new RXT). 13204 */ 13205 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13206 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13207 #ifdef NETFLIX_HTTP_LOGGING 13208 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13209 #endif 13210 tp->snd_wl2 = high_seq; 13211 tp->t_dupacks = 0; 13212 if (under_pacing && 13213 (rack->use_fixed_rate == 0) && 13214 (rack->in_probe_rtt == 0) && 13215 rack->rc_gp_dyn_mul && 13216 rack->rc_always_pace) { 13217 /* Check if we are dragging bottom */ 13218 rack_check_bottom_drag(tp, rack, so, acked); 13219 } 13220 if (tp->snd_una == tp->snd_max) { 13221 tp->t_flags &= ~TF_PREVVALID; 13222 rack->r_ctl.retran_during_recovery = 0; 13223 rack->r_ctl.dsack_byte_cnt = 0; 13224 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13225 if (rack->r_ctl.rc_went_idle_time == 0) 13226 rack->r_ctl.rc_went_idle_time = 1; 13227 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13228 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13229 tp->t_acktime = 0; 13230 /* Set so we might enter persists... */ 13231 rack->r_wanted_output = 1; 13232 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13233 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13234 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13235 (sbavail(&so->so_snd) == 0) && 13236 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13237 /* 13238 * The socket was gone and the 13239 * peer sent data (not now in the past), time to 13240 * reset him. 13241 */ 13242 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13243 /* tcp_close will kill the inp pre-log the Reset */ 13244 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13245 #ifdef TCP_ACCOUNTING 13246 rdstc = get_cyclecount(); 13247 if (rdstc > ts_val) { 13248 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13249 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13250 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13251 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13252 } 13253 } 13254 #endif 13255 m_freem(m); 13256 tp = tcp_close(tp); 13257 if (tp == NULL) { 13258 #ifdef TCP_ACCOUNTING 13259 sched_unpin(); 13260 #endif 13261 return (1); 13262 } 13263 /* 13264 * We would normally do drop-with-reset which would 13265 * send back a reset. We can't since we don't have 13266 * all the needed bits. Instead lets arrange for 13267 * a call to tcp_output(). That way since we 13268 * are in the closed state we will generate a reset. 13269 * 13270 * Note if tcp_accounting is on we don't unpin since 13271 * we do that after the goto label. 13272 */ 13273 goto send_out_a_rst; 13274 } 13275 if ((sbused(&so->so_snd) == 0) && 13276 (tp->t_state >= TCPS_FIN_WAIT_1) && 13277 (tp->t_flags & TF_SENTFIN)) { 13278 /* 13279 * If we can't receive any more data, then closing user can 13280 * proceed. Starting the timer is contrary to the 13281 * specification, but if we don't get a FIN we'll hang 13282 * forever. 13283 * 13284 */ 13285 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13286 soisdisconnected(so); 13287 tcp_timer_activate(tp, TT_2MSL, 13288 (tcp_fast_finwait2_recycle ? 13289 tcp_finwait2_timeout : 13290 TP_MAXIDLE(tp))); 13291 } 13292 if (ourfinisacked == 0) { 13293 /* 13294 * We don't change to fin-wait-2 if we have our fin acked 13295 * which means we are probably in TCPS_CLOSING. 13296 */ 13297 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13298 } 13299 } 13300 } 13301 /* Wake up the socket if we have room to write more */ 13302 if (sbavail(&so->so_snd)) { 13303 rack->r_wanted_output = 1; 13304 if (ctf_progress_timeout_check(tp, true)) { 13305 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13306 tp, tick, PROGRESS_DROP, __LINE__); 13307 tcp_set_inp_to_drop(tp->t_inpcb, ETIMEDOUT); 13308 /* 13309 * We cheat here and don't send a RST, we should send one 13310 * when the pacer drops the connection. 13311 */ 13312 #ifdef TCP_ACCOUNTING 13313 rdstc = get_cyclecount(); 13314 if (rdstc > ts_val) { 13315 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13316 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13317 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13318 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13319 } 13320 } 13321 sched_unpin(); 13322 #endif 13323 INP_WUNLOCK(rack->rc_inp); 13324 m_freem(m); 13325 return (1); 13326 } 13327 } 13328 if (ourfinisacked) { 13329 switch(tp->t_state) { 13330 case TCPS_CLOSING: 13331 #ifdef TCP_ACCOUNTING 13332 rdstc = get_cyclecount(); 13333 if (rdstc > ts_val) { 13334 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13335 (rdstc - ts_val)); 13336 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13337 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13338 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13339 } 13340 } 13341 sched_unpin(); 13342 #endif 13343 tcp_twstart(tp); 13344 m_freem(m); 13345 return (1); 13346 break; 13347 case TCPS_LAST_ACK: 13348 #ifdef TCP_ACCOUNTING 13349 rdstc = get_cyclecount(); 13350 if (rdstc > ts_val) { 13351 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13352 (rdstc - ts_val)); 13353 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13354 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13355 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13356 } 13357 } 13358 sched_unpin(); 13359 #endif 13360 tp = tcp_close(tp); 13361 ctf_do_drop(m, tp); 13362 return (1); 13363 break; 13364 case TCPS_FIN_WAIT_1: 13365 #ifdef TCP_ACCOUNTING 13366 rdstc = get_cyclecount(); 13367 if (rdstc > ts_val) { 13368 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13369 (rdstc - ts_val)); 13370 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13371 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13372 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13373 } 13374 } 13375 #endif 13376 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13377 soisdisconnected(so); 13378 tcp_timer_activate(tp, TT_2MSL, 13379 (tcp_fast_finwait2_recycle ? 13380 tcp_finwait2_timeout : 13381 TP_MAXIDLE(tp))); 13382 } 13383 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13384 break; 13385 default: 13386 break; 13387 } 13388 } 13389 if (rack->r_fast_output) { 13390 /* 13391 * We re doing fast output.. can we expand that? 13392 */ 13393 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13394 } 13395 #ifdef TCP_ACCOUNTING 13396 rdstc = get_cyclecount(); 13397 if (rdstc > ts_val) { 13398 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13399 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13400 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13401 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13402 } 13403 } 13404 13405 } else if (win_up_req) { 13406 rdstc = get_cyclecount(); 13407 if (rdstc > ts_val) { 13408 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 13409 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13410 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13411 } 13412 } 13413 #endif 13414 } 13415 /* Now is there a next packet, if so we are done */ 13416 m_freem(m); 13417 did_out = 0; 13418 if (nxt_pkt) { 13419 #ifdef TCP_ACCOUNTING 13420 sched_unpin(); 13421 #endif 13422 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13423 return (0); 13424 } 13425 rack_handle_might_revert(tp, rack); 13426 ctf_calc_rwin(so, tp); 13427 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13428 send_out_a_rst: 13429 (void)tp->t_fb->tfb_tcp_output(tp); 13430 did_out = 1; 13431 } 13432 rack_free_trim(rack); 13433 #ifdef TCP_ACCOUNTING 13434 sched_unpin(); 13435 #endif 13436 rack_timer_audit(tp, rack, &so->so_snd); 13437 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13438 return (0); 13439 } 13440 13441 13442 static int 13443 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13444 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13445 int32_t nxt_pkt, struct timeval *tv) 13446 { 13447 #ifdef TCP_ACCOUNTING 13448 uint64_t ts_val; 13449 #endif 13450 int32_t thflags, retval, did_out = 0; 13451 int32_t way_out = 0; 13452 uint32_t cts; 13453 uint32_t tiwin; 13454 struct timespec ts; 13455 struct tcpopt to; 13456 struct tcp_rack *rack; 13457 struct rack_sendmap *rsm; 13458 int32_t prev_state = 0; 13459 #ifdef TCP_ACCOUNTING 13460 int ack_val_set = 0xf; 13461 #endif 13462 int nsegs; 13463 uint32_t us_cts; 13464 /* 13465 * tv passed from common code is from either M_TSTMP_LRO or 13466 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 13467 */ 13468 if (m->m_flags & M_ACKCMP) { 13469 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 13470 } 13471 if (m->m_flags & M_ACKCMP) { 13472 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 13473 } 13474 nsegs = m->m_pkthdr.lro_nsegs; 13475 counter_u64_add(rack_proc_non_comp_ack, 1); 13476 thflags = th->th_flags; 13477 #ifdef TCP_ACCOUNTING 13478 sched_pin(); 13479 if (thflags & TH_ACK) 13480 ts_val = get_cyclecount(); 13481 #endif 13482 cts = tcp_tv_to_usectick(tv); 13483 rack = (struct tcp_rack *)tp->t_fb_ptr; 13484 13485 if ((m->m_flags & M_TSTMP) || 13486 (m->m_flags & M_TSTMP_LRO)) { 13487 mbuf_tstmp2timespec(m, &ts); 13488 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13489 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13490 } else 13491 rack->r_ctl.act_rcv_time = *tv; 13492 kern_prefetch(rack, &prev_state); 13493 prev_state = 0; 13494 /* 13495 * Unscale the window into a 32-bit value. For the SYN_SENT state 13496 * the scale is zero. 13497 */ 13498 tiwin = th->th_win << tp->snd_scale; 13499 /* 13500 * Parse options on any incoming segment. 13501 */ 13502 memset(&to, 0, sizeof(to)); 13503 tcp_dooptions(&to, (u_char *)(th + 1), 13504 (th->th_off << 2) - sizeof(struct tcphdr), 13505 (thflags & TH_SYN) ? TO_SYN : 0); 13506 #ifdef TCP_ACCOUNTING 13507 if (thflags & TH_ACK) { 13508 /* 13509 * We have a tradeoff here. We can either do what we are 13510 * doing i.e. pinning to this CPU and then doing the accounting 13511 * <or> we could do a critical enter, setup the rdtsc and cpu 13512 * as in below, and then validate we are on the same CPU on 13513 * exit. I have choosen to not do the critical enter since 13514 * that often will gain you a context switch, and instead lock 13515 * us (line above this if) to the same CPU with sched_pin(). This 13516 * means we may be context switched out for a higher priority 13517 * interupt but we won't be moved to another CPU. 13518 * 13519 * If this occurs (which it won't very often since we most likely 13520 * are running this code in interupt context and only a higher 13521 * priority will bump us ... clock?) we will falsely add in 13522 * to the time the interupt processing time plus the ack processing 13523 * time. This is ok since its a rare event. 13524 */ 13525 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 13526 ctf_fixed_maxseg(tp)); 13527 } 13528 #endif 13529 NET_EPOCH_ASSERT(); 13530 INP_WLOCK_ASSERT(tp->t_inpcb); 13531 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 13532 __func__)); 13533 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 13534 __func__)); 13535 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13536 union tcp_log_stackspecific log; 13537 struct timeval ltv; 13538 #ifdef NETFLIX_HTTP_LOGGING 13539 struct http_sendfile_track *http_req; 13540 13541 if (SEQ_GT(th->th_ack, tp->snd_una)) { 13542 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 13543 } else { 13544 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 13545 } 13546 #endif 13547 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13548 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 13549 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 13550 if (rack->rack_no_prr == 0) 13551 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13552 else 13553 log.u_bbr.flex1 = 0; 13554 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13555 log.u_bbr.use_lt_bw <<= 1; 13556 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13557 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13558 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 13559 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 13560 log.u_bbr.flex3 = m->m_flags; 13561 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13562 log.u_bbr.lost = thflags; 13563 log.u_bbr.pacing_gain = 0x1; 13564 #ifdef TCP_ACCOUNTING 13565 log.u_bbr.cwnd_gain = ack_val_set; 13566 #endif 13567 log.u_bbr.flex7 = 2; 13568 if (m->m_flags & M_TSTMP) { 13569 /* Record the hardware timestamp if present */ 13570 mbuf_tstmp2timespec(m, &ts); 13571 ltv.tv_sec = ts.tv_sec; 13572 ltv.tv_usec = ts.tv_nsec / 1000; 13573 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13574 } else if (m->m_flags & M_TSTMP_LRO) { 13575 /* Record the LRO the arrival timestamp */ 13576 mbuf_tstmp2timespec(m, &ts); 13577 ltv.tv_sec = ts.tv_sec; 13578 ltv.tv_usec = ts.tv_nsec / 1000; 13579 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13580 } 13581 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13582 /* Log the rcv time */ 13583 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 13584 #ifdef NETFLIX_HTTP_LOGGING 13585 log.u_bbr.applimited = tp->t_http_closed; 13586 log.u_bbr.applimited <<= 8; 13587 log.u_bbr.applimited |= tp->t_http_open; 13588 log.u_bbr.applimited <<= 8; 13589 log.u_bbr.applimited |= tp->t_http_req; 13590 if (http_req) { 13591 /* Copy out any client req info */ 13592 /* seconds */ 13593 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13594 /* useconds */ 13595 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13596 log.u_bbr.rttProp = http_req->timestamp; 13597 log.u_bbr.cur_del_rate = http_req->start; 13598 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13599 log.u_bbr.flex8 |= 1; 13600 } else { 13601 log.u_bbr.flex8 |= 2; 13602 log.u_bbr.bw_inuse = http_req->end; 13603 } 13604 log.u_bbr.flex6 = http_req->start_seq; 13605 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13606 log.u_bbr.flex8 |= 4; 13607 log.u_bbr.epoch = http_req->end_seq; 13608 } 13609 } 13610 #endif 13611 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 13612 tlen, &log, true, <v); 13613 } 13614 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 13615 way_out = 4; 13616 retval = 0; 13617 m_freem(m); 13618 goto done_with_input; 13619 } 13620 /* 13621 * If a segment with the ACK-bit set arrives in the SYN-SENT state 13622 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 13623 */ 13624 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 13625 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 13626 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 13627 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 13628 #ifdef TCP_ACCOUNTING 13629 sched_unpin(); 13630 #endif 13631 return (1); 13632 } 13633 13634 /* 13635 * Parse options on any incoming segment. 13636 */ 13637 tcp_dooptions(&to, (u_char *)(th + 1), 13638 (th->th_off << 2) - sizeof(struct tcphdr), 13639 (thflags & TH_SYN) ? TO_SYN : 0); 13640 13641 /* 13642 * If timestamps were negotiated during SYN/ACK and a 13643 * segment without a timestamp is received, silently drop 13644 * the segment, unless it is a RST segment or missing timestamps are 13645 * tolerated. 13646 * See section 3.2 of RFC 7323. 13647 */ 13648 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 13649 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 13650 way_out = 5; 13651 retval = 0; 13652 m_freem(m); 13653 goto done_with_input; 13654 } 13655 13656 /* 13657 * Segment received on connection. Reset idle time and keep-alive 13658 * timer. XXX: This should be done after segment validation to 13659 * ignore broken/spoofed segs. 13660 */ 13661 if (tp->t_idle_reduce && 13662 (tp->snd_max == tp->snd_una) && 13663 ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13664 counter_u64_add(rack_input_idle_reduces, 1); 13665 rack_cc_after_idle(rack, tp); 13666 } 13667 tp->t_rcvtime = ticks; 13668 #ifdef STATS 13669 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 13670 #endif 13671 if (tiwin > rack->r_ctl.rc_high_rwnd) 13672 rack->r_ctl.rc_high_rwnd = tiwin; 13673 /* 13674 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 13675 * this to occur after we've validated the segment. 13676 */ 13677 if (tp->t_flags2 & TF2_ECN_PERMIT) { 13678 if (thflags & TH_CWR) { 13679 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 13680 tp->t_flags |= TF_ACKNOW; 13681 } 13682 switch (iptos & IPTOS_ECN_MASK) { 13683 case IPTOS_ECN_CE: 13684 tp->t_flags2 |= TF2_ECN_SND_ECE; 13685 KMOD_TCPSTAT_INC(tcps_ecn_ce); 13686 break; 13687 case IPTOS_ECN_ECT0: 13688 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 13689 break; 13690 case IPTOS_ECN_ECT1: 13691 KMOD_TCPSTAT_INC(tcps_ecn_ect1); 13692 break; 13693 } 13694 13695 /* Process a packet differently from RFC3168. */ 13696 cc_ecnpkt_handler(tp, th, iptos); 13697 13698 /* Congestion experienced. */ 13699 if (thflags & TH_ECE) { 13700 rack_cong_signal(tp, CC_ECN, th->th_ack); 13701 } 13702 } 13703 13704 /* 13705 * If echoed timestamp is later than the current time, fall back to 13706 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13707 * were used when this connection was established. 13708 */ 13709 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 13710 to.to_tsecr -= tp->ts_offset; 13711 if (TSTMP_GT(to.to_tsecr, cts)) 13712 to.to_tsecr = 0; 13713 } 13714 13715 /* 13716 * If its the first time in we need to take care of options and 13717 * verify we can do SACK for rack! 13718 */ 13719 if (rack->r_state == 0) { 13720 /* Should be init'd by rack_init() */ 13721 KASSERT(rack->rc_inp != NULL, 13722 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 13723 if (rack->rc_inp == NULL) { 13724 rack->rc_inp = tp->t_inpcb; 13725 } 13726 13727 /* 13728 * Process options only when we get SYN/ACK back. The SYN 13729 * case for incoming connections is handled in tcp_syncache. 13730 * According to RFC1323 the window field in a SYN (i.e., a 13731 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 13732 * this is traditional behavior, may need to be cleaned up. 13733 */ 13734 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 13735 /* Handle parallel SYN for ECN */ 13736 if (!(thflags & TH_ACK) && 13737 ((thflags & (TH_CWR | TH_ECE)) == (TH_CWR | TH_ECE)) && 13738 ((V_tcp_do_ecn == 1) || (V_tcp_do_ecn == 2))) { 13739 tp->t_flags2 |= TF2_ECN_PERMIT; 13740 tp->t_flags2 |= TF2_ECN_SND_ECE; 13741 TCPSTAT_INC(tcps_ecn_shs); 13742 } 13743 if ((to.to_flags & TOF_SCALE) && 13744 (tp->t_flags & TF_REQ_SCALE)) { 13745 tp->t_flags |= TF_RCVD_SCALE; 13746 tp->snd_scale = to.to_wscale; 13747 } else 13748 tp->t_flags &= ~TF_REQ_SCALE; 13749 /* 13750 * Initial send window. It will be updated with the 13751 * next incoming segment to the scaled value. 13752 */ 13753 tp->snd_wnd = th->th_win; 13754 rack_validate_fo_sendwin_up(tp, rack); 13755 if ((to.to_flags & TOF_TS) && 13756 (tp->t_flags & TF_REQ_TSTMP)) { 13757 tp->t_flags |= TF_RCVD_TSTMP; 13758 tp->ts_recent = to.to_tsval; 13759 tp->ts_recent_age = cts; 13760 } else 13761 tp->t_flags &= ~TF_REQ_TSTMP; 13762 if (to.to_flags & TOF_MSS) { 13763 tcp_mss(tp, to.to_mss); 13764 } 13765 if ((tp->t_flags & TF_SACK_PERMIT) && 13766 (to.to_flags & TOF_SACKPERM) == 0) 13767 tp->t_flags &= ~TF_SACK_PERMIT; 13768 if (IS_FASTOPEN(tp->t_flags)) { 13769 if (to.to_flags & TOF_FASTOPEN) { 13770 uint16_t mss; 13771 13772 if (to.to_flags & TOF_MSS) 13773 mss = to.to_mss; 13774 else 13775 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 13776 mss = TCP6_MSS; 13777 else 13778 mss = TCP_MSS; 13779 tcp_fastopen_update_cache(tp, mss, 13780 to.to_tfo_len, to.to_tfo_cookie); 13781 } else 13782 tcp_fastopen_disable_path(tp); 13783 } 13784 } 13785 /* 13786 * At this point we are at the initial call. Here we decide 13787 * if we are doing RACK or not. We do this by seeing if 13788 * TF_SACK_PERMIT is set and the sack-not-required is clear. 13789 * The code now does do dup-ack counting so if you don't 13790 * switch back you won't get rack & TLP, but you will still 13791 * get this stack. 13792 */ 13793 13794 if ((rack_sack_not_required == 0) && 13795 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 13796 tcp_switch_back_to_default(tp); 13797 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 13798 tlen, iptos); 13799 #ifdef TCP_ACCOUNTING 13800 sched_unpin(); 13801 #endif 13802 return (1); 13803 } 13804 tcp_set_hpts(tp->t_inpcb); 13805 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 13806 } 13807 if (thflags & TH_FIN) 13808 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 13809 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13810 if ((rack->rc_gp_dyn_mul) && 13811 (rack->use_fixed_rate == 0) && 13812 (rack->rc_always_pace)) { 13813 /* Check in on probertt */ 13814 rack_check_probe_rtt(rack, us_cts); 13815 } 13816 if (rack->forced_ack) { 13817 uint32_t us_rtt; 13818 13819 /* 13820 * A persist or keep-alive was forced out, update our 13821 * min rtt time. Note we do not worry about lost 13822 * retransmissions since KEEP-ALIVES and persists 13823 * are usually way long on times of sending (though 13824 * if we were really paranoid or worried we could 13825 * at least use timestamps if available to validate). 13826 */ 13827 rack->forced_ack = 0; 13828 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13829 if (us_rtt == 0) 13830 us_rtt = 1; 13831 rack_log_rtt_upd(tp, rack, us_rtt, 0, NULL, 3); 13832 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13833 } 13834 /* 13835 * This is the one exception case where we set the rack state 13836 * always. All other times (timers etc) we must have a rack-state 13837 * set (so we assure we have done the checks above for SACK). 13838 */ 13839 rack->r_ctl.rc_rcvtime = cts; 13840 if (rack->r_state != tp->t_state) 13841 rack_set_state(tp, rack); 13842 if (SEQ_GT(th->th_ack, tp->snd_una) && 13843 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 13844 kern_prefetch(rsm, &prev_state); 13845 prev_state = rack->r_state; 13846 rack_clear_rate_sample(rack); 13847 retval = (*rack->r_substate) (m, th, so, 13848 tp, &to, drop_hdrlen, 13849 tlen, tiwin, thflags, nxt_pkt, iptos); 13850 #ifdef INVARIANTS 13851 if ((retval == 0) && 13852 (tp->t_inpcb == NULL)) { 13853 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 13854 retval, tp, prev_state); 13855 } 13856 #endif 13857 if (retval == 0) { 13858 /* 13859 * If retval is 1 the tcb is unlocked and most likely the tp 13860 * is gone. 13861 */ 13862 INP_WLOCK_ASSERT(tp->t_inpcb); 13863 if ((rack->rc_gp_dyn_mul) && 13864 (rack->rc_always_pace) && 13865 (rack->use_fixed_rate == 0) && 13866 rack->in_probe_rtt && 13867 (rack->r_ctl.rc_time_probertt_starts == 0)) { 13868 /* 13869 * If we are going for target, lets recheck before 13870 * we output. 13871 */ 13872 rack_check_probe_rtt(rack, us_cts); 13873 } 13874 if (rack->set_pacing_done_a_iw == 0) { 13875 /* How much has been acked? */ 13876 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 13877 /* We have enough to set in the pacing segment size */ 13878 rack->set_pacing_done_a_iw = 1; 13879 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13880 } 13881 } 13882 tcp_rack_xmit_timer_commit(rack, tp); 13883 #ifdef TCP_ACCOUNTING 13884 /* 13885 * If we set the ack_val_se to what ack processing we are doing 13886 * we also want to track how many cycles we burned. Note 13887 * the bits after tcp_output we let be "free". This is because 13888 * we are also tracking the tcp_output times as well. Note the 13889 * use of 0xf here since we only have 11 counter (0 - 0xa) and 13890 * 0xf cannot be returned and is what we initialize it too to 13891 * indicate we are not doing the tabulations. 13892 */ 13893 if (ack_val_set != 0xf) { 13894 uint64_t crtsc; 13895 13896 crtsc = get_cyclecount(); 13897 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 13898 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13899 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 13900 } 13901 } 13902 #endif 13903 if (nxt_pkt == 0) { 13904 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13905 do_output_now: 13906 did_out = 1; 13907 (void)tp->t_fb->tfb_tcp_output(tp); 13908 } 13909 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 13910 rack_free_trim(rack); 13911 } 13912 if ((nxt_pkt == 0) && 13913 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 13914 (SEQ_GT(tp->snd_max, tp->snd_una) || 13915 (tp->t_flags & TF_DELACK) || 13916 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13917 (tp->t_state <= TCPS_CLOSING)))) { 13918 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 13919 if ((tp->snd_max == tp->snd_una) && 13920 ((tp->t_flags & TF_DELACK) == 0) && 13921 (rack->rc_inp->inp_in_hpts) && 13922 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 13923 /* keep alive not needed if we are hptsi output yet */ 13924 ; 13925 } else { 13926 int late = 0; 13927 if (rack->rc_inp->inp_in_hpts) { 13928 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13929 us_cts = tcp_get_usecs(NULL); 13930 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13931 rack->r_early = 1; 13932 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13933 } else 13934 late = 1; 13935 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13936 } 13937 tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_OUTPUT); 13938 } 13939 if (late && (did_out == 0)) { 13940 /* 13941 * We are late in the sending 13942 * and we did not call the output 13943 * (this probably should not happen). 13944 */ 13945 goto do_output_now; 13946 } 13947 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13948 } 13949 way_out = 1; 13950 } else if (nxt_pkt == 0) { 13951 /* Do we have the correct timer running? */ 13952 rack_timer_audit(tp, rack, &so->so_snd); 13953 way_out = 2; 13954 } 13955 done_with_input: 13956 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 13957 if (did_out) 13958 rack->r_wanted_output = 0; 13959 #ifdef INVARIANTS 13960 if (tp->t_inpcb == NULL) { 13961 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 13962 did_out, 13963 retval, tp, prev_state); 13964 } 13965 #endif 13966 #ifdef TCP_ACCOUNTING 13967 } else { 13968 /* 13969 * Track the time (see above). 13970 */ 13971 if (ack_val_set != 0xf) { 13972 uint64_t crtsc; 13973 13974 crtsc = get_cyclecount(); 13975 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 13976 /* 13977 * Note we *DO NOT* increment the per-tcb counters since 13978 * in the else the TP may be gone!! 13979 */ 13980 } 13981 #endif 13982 } 13983 #ifdef TCP_ACCOUNTING 13984 sched_unpin(); 13985 #endif 13986 return (retval); 13987 } 13988 13989 void 13990 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 13991 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 13992 { 13993 struct timeval tv; 13994 13995 /* First lets see if we have old packets */ 13996 if (tp->t_in_pkt) { 13997 if (ctf_do_queued_segments(so, tp, 1)) { 13998 m_freem(m); 13999 return; 14000 } 14001 } 14002 if (m->m_flags & M_TSTMP_LRO) { 14003 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14004 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14005 } else { 14006 /* Should not be should we kassert instead? */ 14007 tcp_get_usecs(&tv); 14008 } 14009 if (rack_do_segment_nounlock(m, th, so, tp, 14010 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14011 INP_WUNLOCK(tp->t_inpcb); 14012 } 14013 } 14014 14015 struct rack_sendmap * 14016 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14017 { 14018 struct rack_sendmap *rsm = NULL; 14019 int32_t idx; 14020 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14021 14022 /* Return the next guy to be re-transmitted */ 14023 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14024 return (NULL); 14025 } 14026 if (tp->t_flags & TF_SENTFIN) { 14027 /* retran the end FIN? */ 14028 return (NULL); 14029 } 14030 /* ok lets look at this one */ 14031 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14032 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14033 goto check_it; 14034 } 14035 rsm = rack_find_lowest_rsm(rack); 14036 if (rsm == NULL) { 14037 return (NULL); 14038 } 14039 check_it: 14040 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14041 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14042 /* 14043 * No sack so we automatically do the 3 strikes and 14044 * retransmit (no rack timer would be started). 14045 */ 14046 14047 return (rsm); 14048 } 14049 if (rsm->r_flags & RACK_ACKED) { 14050 return (NULL); 14051 } 14052 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14053 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14054 /* Its not yet ready */ 14055 return (NULL); 14056 } 14057 srtt = rack_grab_rtt(tp, rack); 14058 idx = rsm->r_rtr_cnt - 1; 14059 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14060 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14061 if ((tsused == ts_low) || 14062 (TSTMP_LT(tsused, ts_low))) { 14063 /* No time since sending */ 14064 return (NULL); 14065 } 14066 if ((tsused - ts_low) < thresh) { 14067 /* It has not been long enough yet */ 14068 return (NULL); 14069 } 14070 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14071 ((rsm->r_flags & RACK_SACK_PASSED) && 14072 (rack->sack_attack_disable == 0))) { 14073 /* 14074 * We have passed the dup-ack threshold <or> 14075 * a SACK has indicated this is missing. 14076 * Note that if you are a declared attacker 14077 * it is only the dup-ack threshold that 14078 * will cause retransmits. 14079 */ 14080 /* log retransmit reason */ 14081 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14082 rack->r_fast_output = 0; 14083 return (rsm); 14084 } 14085 return (NULL); 14086 } 14087 14088 static void 14089 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14090 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14091 int line, struct rack_sendmap *rsm) 14092 { 14093 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14094 union tcp_log_stackspecific log; 14095 struct timeval tv; 14096 14097 memset(&log, 0, sizeof(log)); 14098 log.u_bbr.flex1 = slot; 14099 log.u_bbr.flex2 = len; 14100 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14101 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14102 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14103 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14104 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14105 log.u_bbr.use_lt_bw <<= 1; 14106 log.u_bbr.use_lt_bw |= rack->r_late; 14107 log.u_bbr.use_lt_bw <<= 1; 14108 log.u_bbr.use_lt_bw |= rack->r_early; 14109 log.u_bbr.use_lt_bw <<= 1; 14110 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14111 log.u_bbr.use_lt_bw <<= 1; 14112 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14113 log.u_bbr.use_lt_bw <<= 1; 14114 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14115 log.u_bbr.use_lt_bw <<= 1; 14116 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14117 log.u_bbr.use_lt_bw <<= 1; 14118 log.u_bbr.use_lt_bw |= rack->gp_ready; 14119 log.u_bbr.pkt_epoch = line; 14120 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14121 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14122 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14123 log.u_bbr.bw_inuse = bw_est; 14124 log.u_bbr.delRate = bw; 14125 if (rack->r_ctl.gp_bw == 0) 14126 log.u_bbr.cur_del_rate = 0; 14127 else 14128 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14129 log.u_bbr.rttProp = len_time; 14130 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14131 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14132 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14133 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14134 /* We are in slow start */ 14135 log.u_bbr.flex7 = 1; 14136 } else { 14137 /* we are on congestion avoidance */ 14138 log.u_bbr.flex7 = 0; 14139 } 14140 log.u_bbr.flex8 = method; 14141 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14142 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14143 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14144 log.u_bbr.cwnd_gain <<= 1; 14145 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14146 log.u_bbr.cwnd_gain <<= 1; 14147 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14148 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14149 &rack->rc_inp->inp_socket->so_rcv, 14150 &rack->rc_inp->inp_socket->so_snd, 14151 BBR_LOG_HPTSI_CALC, 0, 14152 0, &log, false, &tv); 14153 } 14154 } 14155 14156 static uint32_t 14157 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14158 { 14159 uint32_t new_tso, user_max; 14160 14161 user_max = rack->rc_user_set_max_segs * mss; 14162 if (rack->rc_force_max_seg) { 14163 return (user_max); 14164 } 14165 if (rack->use_fixed_rate && 14166 ((rack->r_ctl.crte == NULL) || 14167 (bw != rack->r_ctl.crte->rate))) { 14168 /* Use the user mss since we are not exactly matched */ 14169 return (user_max); 14170 } 14171 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14172 if (new_tso > user_max) 14173 new_tso = user_max; 14174 return (new_tso); 14175 } 14176 14177 static int32_t 14178 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14179 { 14180 uint64_t lentim, fill_bw; 14181 14182 /* Lets first see if we are full, if so continue with normal rate */ 14183 rack->r_via_fill_cw = 0; 14184 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14185 return (slot); 14186 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14187 return (slot); 14188 if (rack->r_ctl.rc_last_us_rtt == 0) 14189 return (slot); 14190 if (rack->rc_pace_fill_if_rttin_range && 14191 (rack->r_ctl.rc_last_us_rtt >= 14192 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14193 /* The rtt is huge, N * smallest, lets not fill */ 14194 return (slot); 14195 } 14196 /* 14197 * first lets calculate the b/w based on the last us-rtt 14198 * and the sndwnd. 14199 */ 14200 fill_bw = rack->r_ctl.cwnd_to_use; 14201 /* Take the rwnd if its smaller */ 14202 if (fill_bw > rack->rc_tp->snd_wnd) 14203 fill_bw = rack->rc_tp->snd_wnd; 14204 if (rack->r_fill_less_agg) { 14205 /* 14206 * Now take away the inflight (this will reduce our 14207 * aggressiveness and yeah, if we get that much out in 1RTT 14208 * we will have had acks come back and still be behind). 14209 */ 14210 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14211 } 14212 /* Now lets make it into a b/w */ 14213 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14214 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14215 /* We are below the min b/w */ 14216 if (non_paced) 14217 *rate_wanted = fill_bw; 14218 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14219 return (slot); 14220 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14221 fill_bw = rack->r_ctl.bw_rate_cap; 14222 rack->r_via_fill_cw = 1; 14223 if (rack->r_rack_hw_rate_caps && 14224 (rack->r_ctl.crte != NULL)) { 14225 uint64_t high_rate; 14226 14227 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14228 if (fill_bw > high_rate) { 14229 /* We are capping bw at the highest rate table entry */ 14230 if (*rate_wanted > high_rate) { 14231 /* The original rate was also capped */ 14232 rack->r_via_fill_cw = 0; 14233 } 14234 rack_log_hdwr_pacing(rack, 14235 fill_bw, high_rate, __LINE__, 14236 0, 3); 14237 fill_bw = high_rate; 14238 if (capped) 14239 *capped = 1; 14240 } 14241 } else if ((rack->r_ctl.crte == NULL) && 14242 (rack->rack_hdrw_pacing == 0) && 14243 (rack->rack_hdw_pace_ena) && 14244 rack->r_rack_hw_rate_caps && 14245 (rack->rack_attempt_hdwr_pace == 0) && 14246 (rack->rc_inp->inp_route.ro_nh != NULL) && 14247 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14248 /* 14249 * Ok we may have a first attempt that is greater than our top rate 14250 * lets check. 14251 */ 14252 uint64_t high_rate; 14253 14254 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14255 if (high_rate) { 14256 if (fill_bw > high_rate) { 14257 fill_bw = high_rate; 14258 if (capped) 14259 *capped = 1; 14260 } 14261 } 14262 } 14263 /* 14264 * Ok fill_bw holds our mythical b/w to fill the cwnd 14265 * in a rtt, what does that time wise equate too? 14266 */ 14267 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14268 lentim /= fill_bw; 14269 *rate_wanted = fill_bw; 14270 if (non_paced || (lentim < slot)) { 14271 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14272 0, lentim, 12, __LINE__, NULL); 14273 return ((int32_t)lentim); 14274 } else 14275 return (slot); 14276 } 14277 14278 static int32_t 14279 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14280 { 14281 struct rack_sendmap *lrsm; 14282 int32_t slot = 0; 14283 int can_start_hw_pacing = 1; 14284 int err; 14285 14286 if (rack->rc_always_pace == 0) { 14287 /* 14288 * We use the most optimistic possible cwnd/srtt for 14289 * sending calculations. This will make our 14290 * calculation anticipate getting more through 14291 * quicker then possible. But thats ok we don't want 14292 * the peer to have a gap in data sending. 14293 */ 14294 uint32_t srtt, cwnd, tr_perms = 0; 14295 int32_t reduce = 0; 14296 14297 old_method: 14298 /* 14299 * We keep no precise pacing with the old method 14300 * instead we use the pacer to mitigate bursts. 14301 */ 14302 if (rack->r_ctl.rc_rack_min_rtt) 14303 srtt = rack->r_ctl.rc_rack_min_rtt; 14304 else 14305 srtt = max(tp->t_srtt, 1); 14306 if (rack->r_ctl.rc_rack_largest_cwnd) 14307 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14308 else 14309 cwnd = rack->r_ctl.cwnd_to_use; 14310 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14311 tr_perms = (cwnd * 1000) / srtt; 14312 if (tr_perms == 0) { 14313 tr_perms = ctf_fixed_maxseg(tp); 14314 } 14315 /* 14316 * Calculate how long this will take to drain, if 14317 * the calculation comes out to zero, thats ok we 14318 * will use send_a_lot to possibly spin around for 14319 * more increasing tot_len_this_send to the point 14320 * that its going to require a pace, or we hit the 14321 * cwnd. Which in that case we are just waiting for 14322 * a ACK. 14323 */ 14324 slot = len / tr_perms; 14325 /* Now do we reduce the time so we don't run dry? */ 14326 if (slot && rack_slot_reduction) { 14327 reduce = (slot / rack_slot_reduction); 14328 if (reduce < slot) { 14329 slot -= reduce; 14330 } else 14331 slot = 0; 14332 } 14333 slot *= HPTS_USEC_IN_MSEC; 14334 if (rsm == NULL) { 14335 /* 14336 * We always consider ourselves app limited with old style 14337 * that are not retransmits. This could be the initial 14338 * measurement, but thats ok its all setup and specially 14339 * handled. If another send leaks out, then that too will 14340 * be mark app-limited. 14341 */ 14342 lrsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 14343 if (lrsm && ((lrsm->r_flags & RACK_APP_LIMITED) == 0)) { 14344 rack->r_ctl.rc_first_appl = lrsm; 14345 lrsm->r_flags |= RACK_APP_LIMITED; 14346 rack->r_ctl.rc_app_limited_cnt++; 14347 } 14348 } 14349 if (rack->rc_pace_to_cwnd) { 14350 uint64_t rate_wanted = 0; 14351 14352 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14353 rack->rc_ack_can_sendout_data = 1; 14354 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL); 14355 } else 14356 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL); 14357 } else { 14358 uint64_t bw_est, res, lentim, rate_wanted; 14359 uint32_t orig_val, srtt, segs, oh; 14360 int capped = 0; 14361 int prev_fill; 14362 14363 if ((rack->r_rr_config == 1) && rsm) { 14364 return (rack->r_ctl.rc_min_to); 14365 } 14366 if (rack->use_fixed_rate) { 14367 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14368 } else if ((rack->r_ctl.init_rate == 0) && 14369 #ifdef NETFLIX_PEAKRATE 14370 (rack->rc_tp->t_maxpeakrate == 0) && 14371 #endif 14372 (rack->r_ctl.gp_bw == 0)) { 14373 /* no way to yet do an estimate */ 14374 bw_est = rate_wanted = 0; 14375 } else { 14376 bw_est = rack_get_bw(rack); 14377 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14378 } 14379 if ((bw_est == 0) || (rate_wanted == 0) || 14380 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14381 /* 14382 * No way yet to make a b/w estimate or 14383 * our raise is set incorrectly. 14384 */ 14385 goto old_method; 14386 } 14387 /* We need to account for all the overheads */ 14388 segs = (len + segsiz - 1) / segsiz; 14389 /* 14390 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14391 * and how much data we put in each packet. Yes this 14392 * means we may be off if we are larger than 1500 bytes 14393 * or smaller. But this just makes us more conservative. 14394 */ 14395 if (rack_hw_rate_min && 14396 (bw_est < rack_hw_rate_min)) 14397 can_start_hw_pacing = 0; 14398 if (ETHERNET_SEGMENT_SIZE > segsiz) 14399 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14400 else 14401 oh = 0; 14402 segs *= oh; 14403 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14404 res = lentim / rate_wanted; 14405 slot = (uint32_t)res; 14406 orig_val = rack->r_ctl.rc_pace_max_segs; 14407 if (rack->r_ctl.crte == NULL) { 14408 /* 14409 * Only do this if we are not hardware pacing 14410 * since if we are doing hw-pacing below we will 14411 * set make a call after setting up or changing 14412 * the rate. 14413 */ 14414 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14415 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14416 /* 14417 * We lost our rate somehow, this can happen 14418 * if the interface changed underneath us. 14419 */ 14420 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14421 rack->r_ctl.crte = NULL; 14422 /* Lets re-allow attempting to setup pacing */ 14423 rack->rack_hdrw_pacing = 0; 14424 rack->rack_attempt_hdwr_pace = 0; 14425 rack_log_hdwr_pacing(rack, 14426 rate_wanted, bw_est, __LINE__, 14427 0, 6); 14428 } 14429 /* Did we change the TSO size, if so log it */ 14430 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14431 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL); 14432 prev_fill = rack->r_via_fill_cw; 14433 if ((rack->rc_pace_to_cwnd) && 14434 (capped == 0) && 14435 (rack->use_fixed_rate == 0) && 14436 (rack->in_probe_rtt == 0) && 14437 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14438 /* 14439 * We want to pace at our rate *or* faster to 14440 * fill the cwnd to the max if its not full. 14441 */ 14442 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14443 } 14444 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14445 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14446 if ((rack->rack_hdw_pace_ena) && 14447 (can_start_hw_pacing > 0) && 14448 (rack->rack_hdrw_pacing == 0) && 14449 (rack->rack_attempt_hdwr_pace == 0)) { 14450 /* 14451 * Lets attempt to turn on hardware pacing 14452 * if we can. 14453 */ 14454 rack->rack_attempt_hdwr_pace = 1; 14455 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14456 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14457 rate_wanted, 14458 RS_PACING_GEQ, 14459 &err, &rack->r_ctl.crte_prev_rate); 14460 if (rack->r_ctl.crte) { 14461 rack->rack_hdrw_pacing = 1; 14462 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 14463 0, rack->r_ctl.crte, 14464 NULL); 14465 rack_log_hdwr_pacing(rack, 14466 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14467 err, 0); 14468 rack->r_ctl.last_hw_bw_req = rate_wanted; 14469 } else { 14470 counter_u64_add(rack_hw_pace_init_fail, 1); 14471 } 14472 } else if (rack->rack_hdrw_pacing && 14473 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 14474 /* Do we need to adjust our rate? */ 14475 const struct tcp_hwrate_limit_table *nrte; 14476 14477 if (rack->r_up_only && 14478 (rate_wanted < rack->r_ctl.crte->rate)) { 14479 /** 14480 * We have four possible states here 14481 * having to do with the previous time 14482 * and this time. 14483 * previous | this-time 14484 * A) 0 | 0 -- fill_cw not in the picture 14485 * B) 1 | 0 -- we were doing a fill-cw but now are not 14486 * C) 1 | 1 -- all rates from fill_cw 14487 * D) 0 | 1 -- we were doing non-fill and now we are filling 14488 * 14489 * For case A, C and D we don't allow a drop. But for 14490 * case B where we now our on our steady rate we do 14491 * allow a drop. 14492 * 14493 */ 14494 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 14495 goto done_w_hdwr; 14496 } 14497 if ((rate_wanted > rack->r_ctl.crte->rate) || 14498 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 14499 if (rack_hw_rate_to_low && 14500 (bw_est < rack_hw_rate_to_low)) { 14501 /* 14502 * The pacing rate is too low for hardware, but 14503 * do allow hardware pacing to be restarted. 14504 */ 14505 rack_log_hdwr_pacing(rack, 14506 bw_est, rack->r_ctl.crte->rate, __LINE__, 14507 0, 5); 14508 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14509 rack->r_ctl.crte = NULL; 14510 rack->rack_attempt_hdwr_pace = 0; 14511 rack->rack_hdrw_pacing = 0; 14512 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14513 goto done_w_hdwr; 14514 } 14515 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 14516 rack->rc_tp, 14517 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14518 rate_wanted, 14519 RS_PACING_GEQ, 14520 &err, &rack->r_ctl.crte_prev_rate); 14521 if (nrte == NULL) { 14522 /* Lost the rate */ 14523 rack->rack_hdrw_pacing = 0; 14524 rack->r_ctl.crte = NULL; 14525 rack_log_hdwr_pacing(rack, 14526 rate_wanted, 0, __LINE__, 14527 err, 1); 14528 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14529 counter_u64_add(rack_hw_pace_lost, 1); 14530 } else if (nrte != rack->r_ctl.crte) { 14531 rack->r_ctl.crte = nrte; 14532 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 14533 segsiz, 0, 14534 rack->r_ctl.crte, 14535 NULL); 14536 rack_log_hdwr_pacing(rack, 14537 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14538 err, 2); 14539 rack->r_ctl.last_hw_bw_req = rate_wanted; 14540 } 14541 } else { 14542 /* We just need to adjust the segment size */ 14543 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 14544 rack_log_hdwr_pacing(rack, 14545 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14546 0, 4); 14547 rack->r_ctl.last_hw_bw_req = rate_wanted; 14548 } 14549 } 14550 } 14551 if ((rack->r_ctl.crte != NULL) && 14552 (rack->r_ctl.crte->rate == rate_wanted)) { 14553 /* 14554 * We need to add a extra if the rates 14555 * are exactly matched. The idea is 14556 * we want the software to make sure the 14557 * queue is empty before adding more, this 14558 * gives us N MSS extra pace times where 14559 * N is our sysctl 14560 */ 14561 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 14562 } 14563 done_w_hdwr: 14564 if (rack_limit_time_with_srtt && 14565 (rack->use_fixed_rate == 0) && 14566 #ifdef NETFLIX_PEAKRATE 14567 (rack->rc_tp->t_maxpeakrate == 0) && 14568 #endif 14569 (rack->rack_hdrw_pacing == 0)) { 14570 /* 14571 * Sanity check, we do not allow the pacing delay 14572 * to be longer than the SRTT of the path. If it is 14573 * a slow path, then adding a packet should increase 14574 * the RTT and compensate for this i.e. the srtt will 14575 * be greater so the allowed pacing time will be greater. 14576 * 14577 * Note this restriction is not for where a peak rate 14578 * is set, we are doing fixed pacing or hardware pacing. 14579 */ 14580 if (rack->rc_tp->t_srtt) 14581 srtt = rack->rc_tp->t_srtt; 14582 else 14583 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 14584 if (srtt < slot) { 14585 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL); 14586 slot = srtt; 14587 } 14588 } 14589 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm); 14590 } 14591 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 14592 /* 14593 * If this rate is seeing enobufs when it 14594 * goes to send then either the nic is out 14595 * of gas or we are mis-estimating the time 14596 * somehow and not letting the queue empty 14597 * completely. Lets add to the pacing time. 14598 */ 14599 int hw_boost_delay; 14600 14601 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 14602 if (hw_boost_delay > rack_enobuf_hw_max) 14603 hw_boost_delay = rack_enobuf_hw_max; 14604 else if (hw_boost_delay < rack_enobuf_hw_min) 14605 hw_boost_delay = rack_enobuf_hw_min; 14606 slot += hw_boost_delay; 14607 } 14608 if (slot) 14609 counter_u64_add(rack_calc_nonzero, 1); 14610 else 14611 counter_u64_add(rack_calc_zero, 1); 14612 return (slot); 14613 } 14614 14615 static void 14616 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 14617 tcp_seq startseq, uint32_t sb_offset) 14618 { 14619 struct rack_sendmap *my_rsm = NULL; 14620 struct rack_sendmap fe; 14621 14622 if (tp->t_state < TCPS_ESTABLISHED) { 14623 /* 14624 * We don't start any measurements if we are 14625 * not at least established. 14626 */ 14627 return; 14628 } 14629 tp->t_flags |= TF_GPUTINPROG; 14630 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 14631 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 14632 tp->gput_seq = startseq; 14633 rack->app_limited_needs_set = 0; 14634 if (rack->in_probe_rtt) 14635 rack->measure_saw_probe_rtt = 1; 14636 else if ((rack->measure_saw_probe_rtt) && 14637 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 14638 rack->measure_saw_probe_rtt = 0; 14639 if (rack->rc_gp_filled) 14640 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14641 else { 14642 /* Special case initial measurement */ 14643 struct timeval tv; 14644 14645 tp->gput_ts = tcp_get_usecs(&tv); 14646 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 14647 } 14648 /* 14649 * We take a guess out into the future, 14650 * if we have no measurement and no 14651 * initial rate, we measure the first 14652 * initial-windows worth of data to 14653 * speed up getting some GP measurement and 14654 * thus start pacing. 14655 */ 14656 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 14657 rack->app_limited_needs_set = 1; 14658 tp->gput_ack = startseq + max(rc_init_window(rack), 14659 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 14660 rack_log_pacing_delay_calc(rack, 14661 tp->gput_seq, 14662 tp->gput_ack, 14663 0, 14664 tp->gput_ts, 14665 rack->r_ctl.rc_app_limited_cnt, 14666 9, 14667 __LINE__, NULL); 14668 return; 14669 } 14670 if (sb_offset) { 14671 /* 14672 * We are out somewhere in the sb 14673 * can we use the already outstanding data? 14674 */ 14675 14676 if (rack->r_ctl.rc_app_limited_cnt == 0) { 14677 /* 14678 * Yes first one is good and in this case 14679 * the tp->gput_ts is correctly set based on 14680 * the last ack that arrived (no need to 14681 * set things up when an ack comes in). 14682 */ 14683 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 14684 if ((my_rsm == NULL) || 14685 (my_rsm->r_rtr_cnt != 1)) { 14686 /* retransmission? */ 14687 goto use_latest; 14688 } 14689 } else { 14690 if (rack->r_ctl.rc_first_appl == NULL) { 14691 /* 14692 * If rc_first_appl is NULL 14693 * then the cnt should be 0. 14694 * This is probably an error, maybe 14695 * a KASSERT would be approprate. 14696 */ 14697 goto use_latest; 14698 } 14699 /* 14700 * If we have a marker pointer to the last one that is 14701 * app limited we can use that, but we need to set 14702 * things up so that when it gets ack'ed we record 14703 * the ack time (if its not already acked). 14704 */ 14705 rack->app_limited_needs_set = 1; 14706 /* 14707 * We want to get to the rsm that is either 14708 * next with space i.e. over 1 MSS or the one 14709 * after that (after the app-limited). 14710 */ 14711 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 14712 rack->r_ctl.rc_first_appl); 14713 if (my_rsm) { 14714 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 14715 /* Have to use the next one */ 14716 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 14717 my_rsm); 14718 else { 14719 /* Use after the first MSS of it is acked */ 14720 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 14721 goto start_set; 14722 } 14723 } 14724 if ((my_rsm == NULL) || 14725 (my_rsm->r_rtr_cnt != 1)) { 14726 /* 14727 * Either its a retransmit or 14728 * the last is the app-limited one. 14729 */ 14730 goto use_latest; 14731 } 14732 } 14733 tp->gput_seq = my_rsm->r_start; 14734 start_set: 14735 if (my_rsm->r_flags & RACK_ACKED) { 14736 /* 14737 * This one has been acked use the arrival ack time 14738 */ 14739 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 14740 rack->app_limited_needs_set = 0; 14741 } 14742 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 14743 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 14744 rack_log_pacing_delay_calc(rack, 14745 tp->gput_seq, 14746 tp->gput_ack, 14747 (uint64_t)my_rsm, 14748 tp->gput_ts, 14749 rack->r_ctl.rc_app_limited_cnt, 14750 9, 14751 __LINE__, NULL); 14752 return; 14753 } 14754 14755 use_latest: 14756 /* 14757 * We don't know how long we may have been 14758 * idle or if this is the first-send. Lets 14759 * setup the flag so we will trim off 14760 * the first ack'd data so we get a true 14761 * measurement. 14762 */ 14763 rack->app_limited_needs_set = 1; 14764 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 14765 /* Find this guy so we can pull the send time */ 14766 fe.r_start = startseq; 14767 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 14768 if (my_rsm) { 14769 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 14770 if (my_rsm->r_flags & RACK_ACKED) { 14771 /* 14772 * Unlikely since its probably what was 14773 * just transmitted (but I am paranoid). 14774 */ 14775 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 14776 rack->app_limited_needs_set = 0; 14777 } 14778 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 14779 /* This also is unlikely */ 14780 tp->gput_seq = my_rsm->r_start; 14781 } 14782 } else { 14783 /* 14784 * TSNH unless we have some send-map limit, 14785 * and even at that it should not be hitting 14786 * that limit (we should have stopped sending). 14787 */ 14788 struct timeval tv; 14789 14790 microuptime(&tv); 14791 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 14792 } 14793 rack_log_pacing_delay_calc(rack, 14794 tp->gput_seq, 14795 tp->gput_ack, 14796 (uint64_t)my_rsm, 14797 tp->gput_ts, 14798 rack->r_ctl.rc_app_limited_cnt, 14799 9, __LINE__, NULL); 14800 } 14801 14802 static inline uint32_t 14803 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 14804 uint32_t avail, int32_t sb_offset) 14805 { 14806 uint32_t len; 14807 uint32_t sendwin; 14808 14809 if (tp->snd_wnd > cwnd_to_use) 14810 sendwin = cwnd_to_use; 14811 else 14812 sendwin = tp->snd_wnd; 14813 if (ctf_outstanding(tp) >= tp->snd_wnd) { 14814 /* We never want to go over our peers rcv-window */ 14815 len = 0; 14816 } else { 14817 uint32_t flight; 14818 14819 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 14820 if (flight >= sendwin) { 14821 /* 14822 * We have in flight what we are allowed by cwnd (if 14823 * it was rwnd blocking it would have hit above out 14824 * >= tp->snd_wnd). 14825 */ 14826 return (0); 14827 } 14828 len = sendwin - flight; 14829 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 14830 /* We would send too much (beyond the rwnd) */ 14831 len = tp->snd_wnd - ctf_outstanding(tp); 14832 } 14833 if ((len + sb_offset) > avail) { 14834 /* 14835 * We don't have that much in the SB, how much is 14836 * there? 14837 */ 14838 len = avail - sb_offset; 14839 } 14840 } 14841 return (len); 14842 } 14843 14844 static void 14845 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 14846 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 14847 int rsm_is_null, int optlen, int line, uint16_t mode) 14848 { 14849 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14850 union tcp_log_stackspecific log; 14851 struct timeval tv; 14852 14853 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14854 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 14855 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 14856 log.u_bbr.flex1 = error; 14857 log.u_bbr.flex2 = flags; 14858 log.u_bbr.flex3 = rsm_is_null; 14859 log.u_bbr.flex4 = ipoptlen; 14860 log.u_bbr.flex5 = tp->rcv_numsacks; 14861 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 14862 log.u_bbr.flex7 = optlen; 14863 log.u_bbr.flex8 = rack->r_fsb_inited; 14864 log.u_bbr.applimited = rack->r_fast_output; 14865 log.u_bbr.bw_inuse = rack_get_bw(rack); 14866 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 14867 log.u_bbr.cwnd_gain = mode; 14868 log.u_bbr.pkts_out = orig_len; 14869 log.u_bbr.lt_epoch = len; 14870 log.u_bbr.delivered = line; 14871 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14872 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14873 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 14874 len, &log, false, NULL, NULL, 0, &tv); 14875 } 14876 } 14877 14878 14879 static struct mbuf * 14880 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 14881 struct rack_fast_send_blk *fsb, 14882 int32_t seglimit, int32_t segsize, int hw_tls) 14883 { 14884 #ifdef KERN_TLS 14885 struct ktls_session *tls, *ntls; 14886 struct mbuf *start; 14887 #endif 14888 struct mbuf *m, *n, **np, *smb; 14889 struct mbuf *top; 14890 int32_t off, soff; 14891 int32_t len = *plen; 14892 int32_t fragsize; 14893 int32_t len_cp = 0; 14894 uint32_t mlen, frags; 14895 14896 soff = off = the_off; 14897 smb = m = the_m; 14898 np = ⊤ 14899 top = NULL; 14900 #ifdef KERN_TLS 14901 if (hw_tls && (m->m_flags & M_EXTPG)) 14902 tls = m->m_epg_tls; 14903 else 14904 tls = NULL; 14905 start = m; 14906 #endif 14907 while (len > 0) { 14908 if (m == NULL) { 14909 *plen = len_cp; 14910 break; 14911 } 14912 #ifdef KERN_TLS 14913 if (hw_tls) { 14914 if (m->m_flags & M_EXTPG) 14915 ntls = m->m_epg_tls; 14916 else 14917 ntls = NULL; 14918 14919 /* 14920 * Avoid mixing TLS records with handshake 14921 * data or TLS records from different 14922 * sessions. 14923 */ 14924 if (tls != ntls) { 14925 MPASS(m != start); 14926 *plen = len_cp; 14927 break; 14928 } 14929 } 14930 #endif 14931 mlen = min(len, m->m_len - off); 14932 if (seglimit) { 14933 /* 14934 * For M_EXTPG mbufs, add 3 segments 14935 * + 1 in case we are crossing page boundaries 14936 * + 2 in case the TLS hdr/trailer are used 14937 * It is cheaper to just add the segments 14938 * than it is to take the cache miss to look 14939 * at the mbuf ext_pgs state in detail. 14940 */ 14941 if (m->m_flags & M_EXTPG) { 14942 fragsize = min(segsize, PAGE_SIZE); 14943 frags = 3; 14944 } else { 14945 fragsize = segsize; 14946 frags = 0; 14947 } 14948 14949 /* Break if we really can't fit anymore. */ 14950 if ((frags + 1) >= seglimit) { 14951 *plen = len_cp; 14952 break; 14953 } 14954 14955 /* 14956 * Reduce size if you can't copy the whole 14957 * mbuf. If we can't copy the whole mbuf, also 14958 * adjust len so the loop will end after this 14959 * mbuf. 14960 */ 14961 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 14962 mlen = (seglimit - frags - 1) * fragsize; 14963 len = mlen; 14964 *plen = len_cp + len; 14965 } 14966 frags += howmany(mlen, fragsize); 14967 if (frags == 0) 14968 frags++; 14969 seglimit -= frags; 14970 KASSERT(seglimit > 0, 14971 ("%s: seglimit went too low", __func__)); 14972 } 14973 n = m_get(M_NOWAIT, m->m_type); 14974 *np = n; 14975 if (n == NULL) 14976 goto nospace; 14977 n->m_len = mlen; 14978 soff += mlen; 14979 len_cp += n->m_len; 14980 if (m->m_flags & (M_EXT|M_EXTPG)) { 14981 n->m_data = m->m_data + off; 14982 mb_dupcl(n, m); 14983 } else { 14984 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 14985 (u_int)n->m_len); 14986 } 14987 len -= n->m_len; 14988 off = 0; 14989 m = m->m_next; 14990 np = &n->m_next; 14991 if (len || (soff == smb->m_len)) { 14992 /* 14993 * We have more so we move forward or 14994 * we have consumed the entire mbuf and 14995 * len has fell to 0. 14996 */ 14997 soff = 0; 14998 smb = m; 14999 } 15000 15001 } 15002 if (fsb != NULL) { 15003 fsb->m = smb; 15004 fsb->off = soff; 15005 if (smb) { 15006 /* 15007 * Save off the size of the mbuf. We do 15008 * this so that we can recognize when it 15009 * has been trimmed by sbcut() as acks 15010 * come in. 15011 */ 15012 fsb->o_m_len = smb->m_len; 15013 } else { 15014 /* 15015 * This is the case where the next mbuf went to NULL. This 15016 * means with this copy we have sent everything in the sb. 15017 * In theory we could clear the fast_output flag, but lets 15018 * not since its possible that we could get more added 15019 * and acks that call the extend function which would let 15020 * us send more. 15021 */ 15022 fsb->o_m_len = 0; 15023 } 15024 } 15025 return (top); 15026 nospace: 15027 if (top) 15028 m_freem(top); 15029 return (NULL); 15030 15031 } 15032 15033 /* 15034 * This is a copy of m_copym(), taking the TSO segment size/limit 15035 * constraints into account, and advancing the sndptr as it goes. 15036 */ 15037 static struct mbuf * 15038 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15039 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15040 { 15041 struct mbuf *m, *n; 15042 int32_t soff; 15043 15044 soff = rack->r_ctl.fsb.off; 15045 m = rack->r_ctl.fsb.m; 15046 if (rack->r_ctl.fsb.o_m_len != m->m_len) { 15047 /* 15048 * The mbuf had the front of it chopped off by an ack 15049 * we need to adjust the soff/off by that difference. 15050 */ 15051 uint32_t delta; 15052 15053 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15054 soff -= delta; 15055 } 15056 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15057 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15058 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15059 __FUNCTION__, 15060 rack, *plen, m, m->m_len)); 15061 /* Save off the right location before we copy and advance */ 15062 *s_soff = soff; 15063 *s_mb = rack->r_ctl.fsb.m; 15064 n = rack_fo_base_copym(m, soff, plen, 15065 &rack->r_ctl.fsb, 15066 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15067 return (n); 15068 } 15069 15070 static int 15071 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15072 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len) 15073 { 15074 /* 15075 * Enter the fast retransmit path. We are given that a sched_pin is 15076 * in place (if accounting is compliled in) and the cycle count taken 15077 * at the entry is in the ts_val. The concept her is that the rsm 15078 * now holds the mbuf offsets and such so we can directly transmit 15079 * without a lot of overhead, the len field is already set for 15080 * us to prohibit us from sending too much (usually its 1MSS). 15081 */ 15082 struct ip *ip = NULL; 15083 struct udphdr *udp = NULL; 15084 struct tcphdr *th = NULL; 15085 struct mbuf *m = NULL; 15086 struct inpcb *inp; 15087 uint8_t *cpto; 15088 struct tcp_log_buffer *lgb; 15089 #ifdef TCP_ACCOUNTING 15090 uint64_t crtsc; 15091 int cnt_thru = 1; 15092 #endif 15093 int doing_tlp = 0; 15094 struct tcpopt to; 15095 u_char opt[TCP_MAXOLEN]; 15096 uint32_t hdrlen, optlen; 15097 int32_t slot, segsiz, max_val, tso = 0, error, flags, ulen = 0; 15098 uint32_t us_cts; 15099 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15100 uint32_t if_hw_tsomaxsegsize; 15101 15102 #ifdef INET6 15103 struct ip6_hdr *ip6 = NULL; 15104 15105 if (rack->r_is_v6) { 15106 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15107 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15108 } else 15109 #endif /* INET6 */ 15110 { 15111 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15112 hdrlen = sizeof(struct tcpiphdr); 15113 } 15114 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15115 goto failed; 15116 } 15117 if (rsm->r_flags & RACK_TLP) 15118 doing_tlp = 1; 15119 startseq = rsm->r_start; 15120 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15121 inp = rack->rc_inp; 15122 to.to_flags = 0; 15123 flags = tcp_outflags[tp->t_state]; 15124 if (flags & (TH_SYN|TH_RST)) { 15125 goto failed; 15126 } 15127 if (rsm->r_flags & RACK_HAS_FIN) { 15128 /* We can't send a FIN here */ 15129 goto failed; 15130 } 15131 if (flags & TH_FIN) { 15132 /* We never send a FIN */ 15133 flags &= ~TH_FIN; 15134 } 15135 if (tp->t_flags & TF_RCVD_TSTMP) { 15136 to.to_tsval = ms_cts + tp->ts_offset; 15137 to.to_tsecr = tp->ts_recent; 15138 to.to_flags = TOF_TS; 15139 } 15140 optlen = tcp_addoptions(&to, opt); 15141 hdrlen += optlen; 15142 udp = rack->r_ctl.fsb.udp; 15143 if (udp) 15144 hdrlen += sizeof(struct udphdr); 15145 if (rack->r_ctl.rc_pace_max_segs) 15146 max_val = rack->r_ctl.rc_pace_max_segs; 15147 else if (rack->rc_user_set_max_segs) 15148 max_val = rack->rc_user_set_max_segs * segsiz; 15149 else 15150 max_val = len; 15151 if ((tp->t_flags & TF_TSO) && 15152 V_tcp_do_tso && 15153 (len > segsiz) && 15154 (tp->t_port == 0)) 15155 tso = 1; 15156 #ifdef INET6 15157 if (MHLEN < hdrlen + max_linkhdr) 15158 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15159 else 15160 #endif 15161 m = m_gethdr(M_NOWAIT, MT_DATA); 15162 if (m == NULL) 15163 goto failed; 15164 m->m_data += max_linkhdr; 15165 m->m_len = hdrlen; 15166 th = rack->r_ctl.fsb.th; 15167 /* Establish the len to send */ 15168 if (len > max_val) 15169 len = max_val; 15170 if ((tso) && (len + optlen > tp->t_maxseg)) { 15171 uint32_t if_hw_tsomax; 15172 int32_t max_len; 15173 15174 /* extract TSO information */ 15175 if_hw_tsomax = tp->t_tsomax; 15176 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15177 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15178 /* 15179 * Check if we should limit by maximum payload 15180 * length: 15181 */ 15182 if (if_hw_tsomax != 0) { 15183 /* compute maximum TSO length */ 15184 max_len = (if_hw_tsomax - hdrlen - 15185 max_linkhdr); 15186 if (max_len <= 0) { 15187 goto failed; 15188 } else if (len > max_len) { 15189 len = max_len; 15190 } 15191 } 15192 if (len <= segsiz) { 15193 /* 15194 * In case there are too many small fragments don't 15195 * use TSO: 15196 */ 15197 tso = 0; 15198 } 15199 } else { 15200 tso = 0; 15201 } 15202 if ((tso == 0) && (len > segsiz)) 15203 len = segsiz; 15204 us_cts = tcp_get_usecs(tv); 15205 if ((len == 0) || 15206 (len <= MHLEN - hdrlen - max_linkhdr)) { 15207 goto failed; 15208 } 15209 th->th_seq = htonl(rsm->r_start); 15210 th->th_ack = htonl(tp->rcv_nxt); 15211 /* 15212 * The PUSH bit should only be applied 15213 * if the full retransmission is made. If 15214 * we are sending less than this is the 15215 * left hand edge and should not have 15216 * the PUSH bit. 15217 */ 15218 if ((rsm->r_flags & RACK_HAD_PUSH) && 15219 (len == (rsm->r_end - rsm->r_start))) 15220 flags |= TH_PUSH; 15221 th->th_flags = flags; 15222 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15223 if (th->th_win == 0) { 15224 tp->t_sndzerowin++; 15225 tp->t_flags |= TF_RXWIN0SENT; 15226 } else 15227 tp->t_flags &= ~TF_RXWIN0SENT; 15228 if (rsm->r_flags & RACK_TLP) { 15229 /* 15230 * TLP should not count in retran count, but 15231 * in its own bin 15232 */ 15233 counter_u64_add(rack_tlp_retran, 1); 15234 counter_u64_add(rack_tlp_retran_bytes, len); 15235 } else { 15236 tp->t_sndrexmitpack++; 15237 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15238 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15239 } 15240 #ifdef STATS 15241 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15242 len); 15243 #endif 15244 if (rsm->m == NULL) 15245 goto failed; 15246 if (rsm->orig_m_len != rsm->m->m_len) { 15247 /* Fix up the orig_m_len and possibly the mbuf offset */ 15248 rack_adjust_orig_mlen(rsm); 15249 } 15250 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15251 if (len <= segsiz) { 15252 /* 15253 * Must have ran out of mbufs for the copy 15254 * shorten it to no longer need tso. Lets 15255 * not put on sendalot since we are low on 15256 * mbufs. 15257 */ 15258 tso = 0; 15259 } 15260 if ((m->m_next == NULL) || (len <= 0)){ 15261 goto failed; 15262 } 15263 if (udp) { 15264 if (rack->r_is_v6) 15265 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15266 else 15267 ulen = hdrlen + len - sizeof(struct ip); 15268 udp->uh_ulen = htons(ulen); 15269 } 15270 m->m_pkthdr.rcvif = (struct ifnet *)0; 15271 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15272 #ifdef INET6 15273 if (rack->r_is_v6) { 15274 if (tp->t_port) { 15275 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15276 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15277 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15278 th->th_sum = htons(0); 15279 UDPSTAT_INC(udps_opackets); 15280 } else { 15281 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15282 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15283 th->th_sum = in6_cksum_pseudo(ip6, 15284 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15285 0); 15286 } 15287 } 15288 #endif 15289 #if defined(INET6) && defined(INET) 15290 else 15291 #endif 15292 #ifdef INET 15293 { 15294 if (tp->t_port) { 15295 m->m_pkthdr.csum_flags = CSUM_UDP; 15296 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15297 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15298 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15299 th->th_sum = htons(0); 15300 UDPSTAT_INC(udps_opackets); 15301 } else { 15302 m->m_pkthdr.csum_flags = CSUM_TCP; 15303 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15304 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15305 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15306 IPPROTO_TCP + len + optlen)); 15307 } 15308 /* IP version must be set here for ipv4/ipv6 checking later */ 15309 KASSERT(ip->ip_v == IPVERSION, 15310 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15311 } 15312 #endif 15313 if (tso) { 15314 KASSERT(len > tp->t_maxseg - optlen, 15315 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15316 m->m_pkthdr.csum_flags |= CSUM_TSO; 15317 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15318 } 15319 #ifdef INET6 15320 if (rack->r_is_v6) { 15321 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15322 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15323 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15324 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15325 else 15326 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15327 } 15328 #endif 15329 #if defined(INET) && defined(INET6) 15330 else 15331 #endif 15332 #ifdef INET 15333 { 15334 ip->ip_len = htons(m->m_pkthdr.len); 15335 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15336 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15337 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15338 if (tp->t_port == 0 || len < V_tcp_minmss) { 15339 ip->ip_off |= htons(IP_DF); 15340 } 15341 } else { 15342 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15343 } 15344 } 15345 #endif 15346 /* Time to copy in our header */ 15347 cpto = mtod(m, uint8_t *); 15348 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15349 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15350 if (optlen) { 15351 bcopy(opt, th + 1, optlen); 15352 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15353 } else { 15354 th->th_off = sizeof(struct tcphdr) >> 2; 15355 } 15356 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15357 union tcp_log_stackspecific log; 15358 15359 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15360 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 15361 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 15362 if (rack->rack_no_prr) 15363 log.u_bbr.flex1 = 0; 15364 else 15365 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15366 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15367 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15368 log.u_bbr.flex4 = max_val; 15369 log.u_bbr.flex5 = 0; 15370 /* Save off the early/late values */ 15371 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15372 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15373 log.u_bbr.bw_inuse = rack_get_bw(rack); 15374 log.u_bbr.flex8 = 1; 15375 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15376 log.u_bbr.flex7 = 55; 15377 log.u_bbr.pkts_out = tp->t_maxseg; 15378 log.u_bbr.timeStamp = cts; 15379 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15380 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15381 log.u_bbr.delivered = 0; 15382 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15383 len, &log, false, NULL, NULL, 0, tv); 15384 } else 15385 lgb = NULL; 15386 #ifdef INET6 15387 if (rack->r_is_v6) { 15388 error = ip6_output(m, NULL, 15389 &inp->inp_route6, 15390 0, NULL, NULL, inp); 15391 } 15392 #endif 15393 #if defined(INET) && defined(INET6) 15394 else 15395 #endif 15396 #ifdef INET 15397 { 15398 error = ip_output(m, NULL, 15399 &inp->inp_route, 15400 0, 0, inp); 15401 } 15402 #endif 15403 m = NULL; 15404 if (lgb) { 15405 lgb->tlb_errno = error; 15406 lgb = NULL; 15407 } 15408 if (error) { 15409 goto failed; 15410 } 15411 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15412 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15413 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15414 rack->rc_tlp_in_progress = 1; 15415 rack->r_ctl.rc_tlp_cnt_out++; 15416 } 15417 if (error == 0) 15418 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15419 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15420 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15421 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15422 rack->r_ctl.retran_during_recovery += len; 15423 { 15424 int idx; 15425 15426 idx = (len / segsiz) + 3; 15427 if (idx >= TCP_MSS_ACCT_ATIMER) 15428 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15429 else 15430 counter_u64_add(rack_out_size[idx], 1); 15431 } 15432 if (tp->t_rtttime == 0) { 15433 tp->t_rtttime = ticks; 15434 tp->t_rtseq = startseq; 15435 KMOD_TCPSTAT_INC(tcps_segstimed); 15436 } 15437 counter_u64_add(rack_fto_rsm_send, 1); 15438 if (error && (error == ENOBUFS)) { 15439 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 15440 if (rack->rc_enobuf < 0x7f) 15441 rack->rc_enobuf++; 15442 if (slot < (10 * HPTS_USEC_IN_MSEC)) 15443 slot = 10 * HPTS_USEC_IN_MSEC; 15444 } else 15445 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 15446 if ((slot == 0) || 15447 (rack->rc_always_pace == 0) || 15448 (rack->r_rr_config == 1)) { 15449 /* 15450 * We have no pacing set or we 15451 * are using old-style rack or 15452 * we are overriden to use the old 1ms pacing. 15453 */ 15454 slot = rack->r_ctl.rc_min_to; 15455 } 15456 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 15457 if (rack->r_must_retran) { 15458 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 15459 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 15460 /* 15461 * We have retransmitted all we need. 15462 */ 15463 rack->r_must_retran = 0; 15464 rack->r_ctl.rc_out_at_rto = 0; 15465 } 15466 } 15467 #ifdef TCP_ACCOUNTING 15468 crtsc = get_cyclecount(); 15469 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15470 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 15471 } 15472 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 15473 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15474 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 15475 } 15476 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 15477 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15478 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 15479 } 15480 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 15481 sched_unpin(); 15482 #endif 15483 return (0); 15484 failed: 15485 if (m) 15486 m_free(m); 15487 return (-1); 15488 } 15489 15490 static void 15491 rack_sndbuf_autoscale(struct tcp_rack *rack) 15492 { 15493 /* 15494 * Automatic sizing of send socket buffer. Often the send buffer 15495 * size is not optimally adjusted to the actual network conditions 15496 * at hand (delay bandwidth product). Setting the buffer size too 15497 * small limits throughput on links with high bandwidth and high 15498 * delay (eg. trans-continental/oceanic links). Setting the 15499 * buffer size too big consumes too much real kernel memory, 15500 * especially with many connections on busy servers. 15501 * 15502 * The criteria to step up the send buffer one notch are: 15503 * 1. receive window of remote host is larger than send buffer 15504 * (with a fudge factor of 5/4th); 15505 * 2. send buffer is filled to 7/8th with data (so we actually 15506 * have data to make use of it); 15507 * 3. send buffer fill has not hit maximal automatic size; 15508 * 4. our send window (slow start and cogestion controlled) is 15509 * larger than sent but unacknowledged data in send buffer. 15510 * 15511 * Note that the rack version moves things much faster since 15512 * we want to avoid hitting cache lines in the rack_fast_output() 15513 * path so this is called much less often and thus moves 15514 * the SB forward by a percentage. 15515 */ 15516 struct socket *so; 15517 struct tcpcb *tp; 15518 uint32_t sendwin, scaleup; 15519 15520 tp = rack->rc_tp; 15521 so = rack->rc_inp->inp_socket; 15522 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 15523 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 15524 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 15525 sbused(&so->so_snd) >= 15526 (so->so_snd.sb_hiwat / 8 * 7) && 15527 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 15528 sendwin >= (sbused(&so->so_snd) - 15529 (tp->snd_nxt - tp->snd_una))) { 15530 if (rack_autosndbuf_inc) 15531 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 15532 else 15533 scaleup = V_tcp_autosndbuf_inc; 15534 if (scaleup < V_tcp_autosndbuf_inc) 15535 scaleup = V_tcp_autosndbuf_inc; 15536 scaleup += so->so_snd.sb_hiwat; 15537 if (scaleup > V_tcp_autosndbuf_max) 15538 scaleup = V_tcp_autosndbuf_max; 15539 if (!sbreserve_locked(&so->so_snd, scaleup, so, curthread)) 15540 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 15541 } 15542 } 15543 } 15544 15545 static int 15546 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 15547 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 15548 { 15549 /* 15550 * Enter to do fast output. We are given that the sched_pin is 15551 * in place (if accounting is compiled in) and the cycle count taken 15552 * at entry is in place in ts_val. The idea here is that 15553 * we know how many more bytes needs to be sent (presumably either 15554 * during pacing or to fill the cwnd and that was greater than 15555 * the max-burst). We have how much to send and all the info we 15556 * need to just send. 15557 */ 15558 struct ip *ip = NULL; 15559 struct udphdr *udp = NULL; 15560 struct tcphdr *th = NULL; 15561 struct mbuf *m, *s_mb; 15562 struct inpcb *inp; 15563 uint8_t *cpto; 15564 struct tcp_log_buffer *lgb; 15565 #ifdef TCP_ACCOUNTING 15566 uint64_t crtsc; 15567 #endif 15568 struct tcpopt to; 15569 u_char opt[TCP_MAXOLEN]; 15570 uint32_t hdrlen, optlen; 15571 int cnt_thru = 1; 15572 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, flags, ulen = 0; 15573 uint32_t us_cts, s_soff; 15574 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15575 uint32_t if_hw_tsomaxsegsize; 15576 uint16_t add_flag = RACK_SENT_FP; 15577 #ifdef INET6 15578 struct ip6_hdr *ip6 = NULL; 15579 15580 if (rack->r_is_v6) { 15581 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15582 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15583 } else 15584 #endif /* INET6 */ 15585 { 15586 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15587 hdrlen = sizeof(struct tcpiphdr); 15588 } 15589 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15590 m = NULL; 15591 goto failed; 15592 } 15593 startseq = tp->snd_max; 15594 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15595 inp = rack->rc_inp; 15596 len = rack->r_ctl.fsb.left_to_send; 15597 to.to_flags = 0; 15598 flags = rack->r_ctl.fsb.tcp_flags; 15599 if (tp->t_flags & TF_RCVD_TSTMP) { 15600 to.to_tsval = ms_cts + tp->ts_offset; 15601 to.to_tsecr = tp->ts_recent; 15602 to.to_flags = TOF_TS; 15603 } 15604 optlen = tcp_addoptions(&to, opt); 15605 hdrlen += optlen; 15606 udp = rack->r_ctl.fsb.udp; 15607 if (udp) 15608 hdrlen += sizeof(struct udphdr); 15609 if (rack->r_ctl.rc_pace_max_segs) 15610 max_val = rack->r_ctl.rc_pace_max_segs; 15611 else if (rack->rc_user_set_max_segs) 15612 max_val = rack->rc_user_set_max_segs * segsiz; 15613 else 15614 max_val = len; 15615 if ((tp->t_flags & TF_TSO) && 15616 V_tcp_do_tso && 15617 (len > segsiz) && 15618 (tp->t_port == 0)) 15619 tso = 1; 15620 again: 15621 #ifdef INET6 15622 if (MHLEN < hdrlen + max_linkhdr) 15623 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15624 else 15625 #endif 15626 m = m_gethdr(M_NOWAIT, MT_DATA); 15627 if (m == NULL) 15628 goto failed; 15629 m->m_data += max_linkhdr; 15630 m->m_len = hdrlen; 15631 th = rack->r_ctl.fsb.th; 15632 /* Establish the len to send */ 15633 if (len > max_val) 15634 len = max_val; 15635 if ((tso) && (len + optlen > tp->t_maxseg)) { 15636 uint32_t if_hw_tsomax; 15637 int32_t max_len; 15638 15639 /* extract TSO information */ 15640 if_hw_tsomax = tp->t_tsomax; 15641 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15642 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15643 /* 15644 * Check if we should limit by maximum payload 15645 * length: 15646 */ 15647 if (if_hw_tsomax != 0) { 15648 /* compute maximum TSO length */ 15649 max_len = (if_hw_tsomax - hdrlen - 15650 max_linkhdr); 15651 if (max_len <= 0) { 15652 goto failed; 15653 } else if (len > max_len) { 15654 len = max_len; 15655 } 15656 } 15657 if (len <= segsiz) { 15658 /* 15659 * In case there are too many small fragments don't 15660 * use TSO: 15661 */ 15662 tso = 0; 15663 } 15664 } else { 15665 tso = 0; 15666 } 15667 if ((tso == 0) && (len > segsiz)) 15668 len = segsiz; 15669 us_cts = tcp_get_usecs(tv); 15670 if ((len == 0) || 15671 (len <= MHLEN - hdrlen - max_linkhdr)) { 15672 goto failed; 15673 } 15674 sb_offset = tp->snd_max - tp->snd_una; 15675 th->th_seq = htonl(tp->snd_max); 15676 th->th_ack = htonl(tp->rcv_nxt); 15677 th->th_flags = flags; 15678 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15679 if (th->th_win == 0) { 15680 tp->t_sndzerowin++; 15681 tp->t_flags |= TF_RXWIN0SENT; 15682 } else 15683 tp->t_flags &= ~TF_RXWIN0SENT; 15684 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 15685 KMOD_TCPSTAT_INC(tcps_sndpack); 15686 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 15687 #ifdef STATS 15688 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 15689 len); 15690 #endif 15691 if (rack->r_ctl.fsb.m == NULL) 15692 goto failed; 15693 15694 /* s_mb and s_soff are saved for rack_log_output */ 15695 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 15696 &s_mb, &s_soff); 15697 if (len <= segsiz) { 15698 /* 15699 * Must have ran out of mbufs for the copy 15700 * shorten it to no longer need tso. Lets 15701 * not put on sendalot since we are low on 15702 * mbufs. 15703 */ 15704 tso = 0; 15705 } 15706 if (rack->r_ctl.fsb.rfo_apply_push && 15707 (len == rack->r_ctl.fsb.left_to_send)) { 15708 th->th_flags |= TH_PUSH; 15709 add_flag |= RACK_HAD_PUSH; 15710 } 15711 if ((m->m_next == NULL) || (len <= 0)){ 15712 goto failed; 15713 } 15714 if (udp) { 15715 if (rack->r_is_v6) 15716 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15717 else 15718 ulen = hdrlen + len - sizeof(struct ip); 15719 udp->uh_ulen = htons(ulen); 15720 } 15721 m->m_pkthdr.rcvif = (struct ifnet *)0; 15722 if (tp->t_state == TCPS_ESTABLISHED && 15723 (tp->t_flags2 & TF2_ECN_PERMIT)) { 15724 /* 15725 * If the peer has ECN, mark data packets with ECN capable 15726 * transmission (ECT). Ignore pure ack packets, 15727 * retransmissions. 15728 */ 15729 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max)) { 15730 #ifdef INET6 15731 if (rack->r_is_v6) 15732 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 15733 else 15734 #endif 15735 ip->ip_tos |= IPTOS_ECN_ECT0; 15736 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 15737 /* 15738 * Reply with proper ECN notifications. 15739 * Only set CWR on new data segments. 15740 */ 15741 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 15742 flags |= TH_CWR; 15743 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 15744 } 15745 } 15746 if (tp->t_flags2 & TF2_ECN_SND_ECE) 15747 flags |= TH_ECE; 15748 } 15749 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15750 #ifdef INET6 15751 if (rack->r_is_v6) { 15752 if (tp->t_port) { 15753 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15754 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15755 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15756 th->th_sum = htons(0); 15757 UDPSTAT_INC(udps_opackets); 15758 } else { 15759 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15760 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15761 th->th_sum = in6_cksum_pseudo(ip6, 15762 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15763 0); 15764 } 15765 } 15766 #endif 15767 #if defined(INET6) && defined(INET) 15768 else 15769 #endif 15770 #ifdef INET 15771 { 15772 if (tp->t_port) { 15773 m->m_pkthdr.csum_flags = CSUM_UDP; 15774 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15775 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15776 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15777 th->th_sum = htons(0); 15778 UDPSTAT_INC(udps_opackets); 15779 } else { 15780 m->m_pkthdr.csum_flags = CSUM_TCP; 15781 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15782 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15783 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15784 IPPROTO_TCP + len + optlen)); 15785 } 15786 /* IP version must be set here for ipv4/ipv6 checking later */ 15787 KASSERT(ip->ip_v == IPVERSION, 15788 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15789 } 15790 #endif 15791 if (tso) { 15792 KASSERT(len > tp->t_maxseg - optlen, 15793 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15794 m->m_pkthdr.csum_flags |= CSUM_TSO; 15795 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15796 } 15797 #ifdef INET6 15798 if (rack->r_is_v6) { 15799 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15800 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15801 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15802 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15803 else 15804 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15805 } 15806 #endif 15807 #if defined(INET) && defined(INET6) 15808 else 15809 #endif 15810 #ifdef INET 15811 { 15812 ip->ip_len = htons(m->m_pkthdr.len); 15813 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15814 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15815 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15816 if (tp->t_port == 0 || len < V_tcp_minmss) { 15817 ip->ip_off |= htons(IP_DF); 15818 } 15819 } else { 15820 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15821 } 15822 } 15823 #endif 15824 /* Time to copy in our header */ 15825 cpto = mtod(m, uint8_t *); 15826 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15827 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15828 if (optlen) { 15829 bcopy(opt, th + 1, optlen); 15830 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15831 } else { 15832 th->th_off = sizeof(struct tcphdr) >> 2; 15833 } 15834 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15835 union tcp_log_stackspecific log; 15836 15837 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15838 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 15839 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 15840 if (rack->rack_no_prr) 15841 log.u_bbr.flex1 = 0; 15842 else 15843 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15844 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15845 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15846 log.u_bbr.flex4 = max_val; 15847 log.u_bbr.flex5 = 0; 15848 /* Save off the early/late values */ 15849 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15850 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15851 log.u_bbr.bw_inuse = rack_get_bw(rack); 15852 log.u_bbr.flex8 = 0; 15853 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15854 log.u_bbr.flex7 = 44; 15855 log.u_bbr.pkts_out = tp->t_maxseg; 15856 log.u_bbr.timeStamp = cts; 15857 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15858 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15859 log.u_bbr.delivered = 0; 15860 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15861 len, &log, false, NULL, NULL, 0, tv); 15862 } else 15863 lgb = NULL; 15864 #ifdef INET6 15865 if (rack->r_is_v6) { 15866 error = ip6_output(m, NULL, 15867 &inp->inp_route6, 15868 0, NULL, NULL, inp); 15869 } 15870 #endif 15871 #if defined(INET) && defined(INET6) 15872 else 15873 #endif 15874 #ifdef INET 15875 { 15876 error = ip_output(m, NULL, 15877 &inp->inp_route, 15878 0, 0, inp); 15879 } 15880 #endif 15881 if (lgb) { 15882 lgb->tlb_errno = error; 15883 lgb = NULL; 15884 } 15885 if (error) { 15886 *send_err = error; 15887 m = NULL; 15888 goto failed; 15889 } 15890 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 15891 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 15892 m = NULL; 15893 if (tp->snd_una == tp->snd_max) { 15894 rack->r_ctl.rc_tlp_rxt_last_time = cts; 15895 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 15896 tp->t_acktime = ticks; 15897 } 15898 if (error == 0) 15899 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 15900 15901 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15902 tot_len += len; 15903 if ((tp->t_flags & TF_GPUTINPROG) == 0) 15904 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 15905 tp->snd_max += len; 15906 tp->snd_nxt = tp->snd_max; 15907 { 15908 int idx; 15909 15910 idx = (len / segsiz) + 3; 15911 if (idx >= TCP_MSS_ACCT_ATIMER) 15912 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15913 else 15914 counter_u64_add(rack_out_size[idx], 1); 15915 } 15916 if (len <= rack->r_ctl.fsb.left_to_send) 15917 rack->r_ctl.fsb.left_to_send -= len; 15918 else 15919 rack->r_ctl.fsb.left_to_send = 0; 15920 if (rack->r_ctl.fsb.left_to_send < segsiz) { 15921 rack->r_fast_output = 0; 15922 rack->r_ctl.fsb.left_to_send = 0; 15923 /* At the end of fast_output scale up the sb */ 15924 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 15925 rack_sndbuf_autoscale(rack); 15926 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 15927 } 15928 if (tp->t_rtttime == 0) { 15929 tp->t_rtttime = ticks; 15930 tp->t_rtseq = startseq; 15931 KMOD_TCPSTAT_INC(tcps_segstimed); 15932 } 15933 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 15934 (max_val > len) && 15935 (tso == 0)) { 15936 max_val -= len; 15937 len = segsiz; 15938 th = rack->r_ctl.fsb.th; 15939 cnt_thru++; 15940 goto again; 15941 } 15942 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15943 counter_u64_add(rack_fto_send, 1); 15944 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 15945 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 15946 #ifdef TCP_ACCOUNTING 15947 crtsc = get_cyclecount(); 15948 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15949 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 15950 } 15951 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 15952 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15953 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 15954 } 15955 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 15956 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 15957 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 15958 } 15959 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 15960 sched_unpin(); 15961 #endif 15962 return (0); 15963 failed: 15964 if (m) 15965 m_free(m); 15966 rack->r_fast_output = 0; 15967 return (-1); 15968 } 15969 15970 static int 15971 rack_output(struct tcpcb *tp) 15972 { 15973 struct socket *so; 15974 uint32_t recwin; 15975 uint32_t sb_offset, s_moff = 0; 15976 int32_t len, flags, error = 0; 15977 struct mbuf *m, *s_mb = NULL; 15978 struct mbuf *mb; 15979 uint32_t if_hw_tsomaxsegcount = 0; 15980 uint32_t if_hw_tsomaxsegsize; 15981 int32_t segsiz, minseg; 15982 long tot_len_this_send = 0; 15983 #ifdef INET 15984 struct ip *ip = NULL; 15985 #endif 15986 #ifdef TCPDEBUG 15987 struct ipovly *ipov = NULL; 15988 #endif 15989 struct udphdr *udp = NULL; 15990 struct tcp_rack *rack; 15991 struct tcphdr *th; 15992 uint8_t pass = 0; 15993 uint8_t mark = 0; 15994 uint8_t wanted_cookie = 0; 15995 u_char opt[TCP_MAXOLEN]; 15996 unsigned ipoptlen, optlen, hdrlen, ulen=0; 15997 uint32_t rack_seq; 15998 15999 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16000 unsigned ipsec_optlen = 0; 16001 16002 #endif 16003 int32_t idle, sendalot; 16004 int32_t sub_from_prr = 0; 16005 volatile int32_t sack_rxmit; 16006 struct rack_sendmap *rsm = NULL; 16007 int32_t tso, mtu; 16008 struct tcpopt to; 16009 int32_t slot = 0; 16010 int32_t sup_rack = 0; 16011 uint32_t cts, ms_cts, delayed, early; 16012 uint16_t add_flag = RACK_SENT_SP; 16013 uint8_t hpts_calling, doing_tlp = 0; 16014 uint32_t cwnd_to_use, pace_max_seg; 16015 int32_t do_a_prefetch = 0; 16016 int32_t prefetch_rsm = 0; 16017 int32_t orig_len = 0; 16018 struct timeval tv; 16019 int32_t prefetch_so_done = 0; 16020 struct tcp_log_buffer *lgb; 16021 struct inpcb *inp; 16022 struct sockbuf *sb; 16023 uint64_t ts_val = 0; 16024 #ifdef TCP_ACCOUNTING 16025 uint64_t crtsc; 16026 #endif 16027 #ifdef INET6 16028 struct ip6_hdr *ip6 = NULL; 16029 int32_t isipv6; 16030 #endif 16031 uint8_t filled_all = 0; 16032 bool hw_tls = false; 16033 16034 /* setup and take the cache hits here */ 16035 rack = (struct tcp_rack *)tp->t_fb_ptr; 16036 #ifdef TCP_ACCOUNTING 16037 sched_pin(); 16038 ts_val = get_cyclecount(); 16039 #endif 16040 hpts_calling = rack->rc_inp->inp_hpts_calls; 16041 NET_EPOCH_ASSERT(); 16042 INP_WLOCK_ASSERT(rack->rc_inp); 16043 #ifdef TCP_OFFLOAD 16044 if (tp->t_flags & TF_TOE) { 16045 #ifdef TCP_ACCOUNTING 16046 sched_unpin(); 16047 #endif 16048 return (tcp_offload_output(tp)); 16049 } 16050 #endif 16051 /* 16052 * For TFO connections in SYN_RECEIVED, only allow the initial 16053 * SYN|ACK and those sent by the retransmit timer. 16054 */ 16055 if (IS_FASTOPEN(tp->t_flags) && 16056 (tp->t_state == TCPS_SYN_RECEIVED) && 16057 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16058 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16059 #ifdef TCP_ACCOUNTING 16060 sched_unpin(); 16061 #endif 16062 return (0); 16063 } 16064 #ifdef INET6 16065 if (rack->r_state) { 16066 /* Use the cache line loaded if possible */ 16067 isipv6 = rack->r_is_v6; 16068 } else { 16069 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16070 } 16071 #endif 16072 early = 0; 16073 cts = tcp_get_usecs(&tv); 16074 ms_cts = tcp_tv_to_mssectick(&tv); 16075 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16076 rack->rc_inp->inp_in_hpts) { 16077 /* 16078 * We are on the hpts for some timer but not hptsi output. 16079 * Remove from the hpts unconditionally. 16080 */ 16081 rack_timer_cancel(tp, rack, cts, __LINE__); 16082 } 16083 /* Are we pacing and late? */ 16084 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16085 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16086 /* We are delayed */ 16087 delayed = cts - rack->r_ctl.rc_last_output_to; 16088 } else { 16089 delayed = 0; 16090 } 16091 /* Do the timers, which may override the pacer */ 16092 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16093 if (rack_process_timers(tp, rack, cts, hpts_calling)) { 16094 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16095 #ifdef TCP_ACCOUNTING 16096 sched_unpin(); 16097 #endif 16098 return (0); 16099 } 16100 } 16101 if (rack->rc_in_persist) { 16102 if (rack->rc_inp->inp_in_hpts == 0) { 16103 /* Timer is not running */ 16104 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16105 } 16106 #ifdef TCP_ACCOUNTING 16107 sched_unpin(); 16108 #endif 16109 return (0); 16110 } 16111 if ((rack->r_timer_override) || 16112 (rack->rc_ack_can_sendout_data) || 16113 (delayed) || 16114 (tp->t_state < TCPS_ESTABLISHED)) { 16115 rack->rc_ack_can_sendout_data = 0; 16116 if (rack->rc_inp->inp_in_hpts) 16117 tcp_hpts_remove(rack->rc_inp, HPTS_REMOVE_OUTPUT); 16118 } else if (rack->rc_inp->inp_in_hpts) { 16119 /* 16120 * On the hpts you can't pass even if ACKNOW is on, we will 16121 * when the hpts fires. 16122 */ 16123 #ifdef TCP_ACCOUNTING 16124 crtsc = get_cyclecount(); 16125 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16126 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16127 } 16128 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16129 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16130 tp->tcp_cnt_counters[SND_BLOCKED]++; 16131 } 16132 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16133 sched_unpin(); 16134 #endif 16135 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16136 return (0); 16137 } 16138 rack->rc_inp->inp_hpts_calls = 0; 16139 /* Finish out both pacing early and late accounting */ 16140 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16141 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16142 early = rack->r_ctl.rc_last_output_to - cts; 16143 } else 16144 early = 0; 16145 if (delayed) { 16146 rack->r_ctl.rc_agg_delayed += delayed; 16147 rack->r_late = 1; 16148 } else if (early) { 16149 rack->r_ctl.rc_agg_early += early; 16150 rack->r_early = 1; 16151 } 16152 /* Now that early/late accounting is done turn off the flag */ 16153 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16154 rack->r_wanted_output = 0; 16155 rack->r_timer_override = 0; 16156 if ((tp->t_state != rack->r_state) && 16157 TCPS_HAVEESTABLISHED(tp->t_state)) { 16158 rack_set_state(tp, rack); 16159 } 16160 if ((rack->r_fast_output) && 16161 (tp->rcv_numsacks == 0)) { 16162 int ret; 16163 16164 error = 0; 16165 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16166 if (ret >= 0) 16167 return(ret); 16168 else if (error) { 16169 inp = rack->rc_inp; 16170 so = inp->inp_socket; 16171 sb = &so->so_snd; 16172 goto nomore; 16173 } 16174 } 16175 inp = rack->rc_inp; 16176 /* 16177 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16178 * only allow the initial SYN or SYN|ACK and those sent 16179 * by the retransmit timer. 16180 */ 16181 if (IS_FASTOPEN(tp->t_flags) && 16182 ((tp->t_state == TCPS_SYN_RECEIVED) || 16183 (tp->t_state == TCPS_SYN_SENT)) && 16184 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16185 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16186 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16187 so = inp->inp_socket; 16188 sb = &so->so_snd; 16189 goto just_return_nolock; 16190 } 16191 /* 16192 * Determine length of data that should be transmitted, and flags 16193 * that will be used. If there is some data or critical controls 16194 * (SYN, RST) to send, then transmit; otherwise, investigate 16195 * further. 16196 */ 16197 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16198 if (tp->t_idle_reduce) { 16199 if (idle && ((ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16200 rack_cc_after_idle(rack, tp); 16201 } 16202 tp->t_flags &= ~TF_LASTIDLE; 16203 if (idle) { 16204 if (tp->t_flags & TF_MORETOCOME) { 16205 tp->t_flags |= TF_LASTIDLE; 16206 idle = 0; 16207 } 16208 } 16209 if ((tp->snd_una == tp->snd_max) && 16210 rack->r_ctl.rc_went_idle_time && 16211 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16212 idle = cts - rack->r_ctl.rc_went_idle_time; 16213 if (idle > rack_min_probertt_hold) { 16214 /* Count as a probe rtt */ 16215 if (rack->in_probe_rtt == 0) { 16216 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16217 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16218 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16219 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16220 } else { 16221 rack_exit_probertt(rack, cts); 16222 } 16223 } 16224 idle = 0; 16225 } 16226 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16227 rack_init_fsb_block(tp, rack); 16228 again: 16229 /* 16230 * If we've recently taken a timeout, snd_max will be greater than 16231 * snd_nxt. There may be SACK information that allows us to avoid 16232 * resending already delivered data. Adjust snd_nxt accordingly. 16233 */ 16234 sendalot = 0; 16235 cts = tcp_get_usecs(&tv); 16236 ms_cts = tcp_tv_to_mssectick(&tv); 16237 tso = 0; 16238 mtu = 0; 16239 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16240 minseg = segsiz; 16241 if (rack->r_ctl.rc_pace_max_segs == 0) 16242 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16243 else 16244 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16245 sb_offset = tp->snd_max - tp->snd_una; 16246 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16247 flags = tcp_outflags[tp->t_state]; 16248 while (rack->rc_free_cnt < rack_free_cache) { 16249 rsm = rack_alloc(rack); 16250 if (rsm == NULL) { 16251 if (inp->inp_hpts_calls) 16252 /* Retry in a ms */ 16253 slot = (1 * HPTS_USEC_IN_MSEC); 16254 so = inp->inp_socket; 16255 sb = &so->so_snd; 16256 goto just_return_nolock; 16257 } 16258 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16259 rack->rc_free_cnt++; 16260 rsm = NULL; 16261 } 16262 if (inp->inp_hpts_calls) 16263 inp->inp_hpts_calls = 0; 16264 sack_rxmit = 0; 16265 len = 0; 16266 rsm = NULL; 16267 if (flags & TH_RST) { 16268 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16269 so = inp->inp_socket; 16270 sb = &so->so_snd; 16271 goto send; 16272 } 16273 if (rack->r_ctl.rc_resend) { 16274 /* Retransmit timer */ 16275 rsm = rack->r_ctl.rc_resend; 16276 rack->r_ctl.rc_resend = NULL; 16277 rsm->r_flags &= ~RACK_TLP; 16278 len = rsm->r_end - rsm->r_start; 16279 sack_rxmit = 1; 16280 sendalot = 0; 16281 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16282 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16283 __func__, __LINE__, 16284 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16285 sb_offset = rsm->r_start - tp->snd_una; 16286 if (len >= segsiz) 16287 len = segsiz; 16288 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16289 /* We have a retransmit that takes precedence */ 16290 rsm->r_flags &= ~RACK_TLP; 16291 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16292 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16293 /* Enter recovery if not induced by a time-out */ 16294 rack->r_ctl.rc_rsm_start = rsm->r_start; 16295 rack->r_ctl.rc_cwnd_at = tp->snd_cwnd; 16296 rack->r_ctl.rc_ssthresh_at = tp->snd_ssthresh; 16297 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una); 16298 } 16299 #ifdef INVARIANTS 16300 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16301 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16302 tp, rack, rsm, rsm->r_start, tp->snd_una); 16303 } 16304 #endif 16305 len = rsm->r_end - rsm->r_start; 16306 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16307 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16308 __func__, __LINE__, 16309 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16310 sb_offset = rsm->r_start - tp->snd_una; 16311 sendalot = 0; 16312 if (len >= segsiz) 16313 len = segsiz; 16314 if (len > 0) { 16315 sack_rxmit = 1; 16316 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16317 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16318 min(len, segsiz)); 16319 counter_u64_add(rack_rtm_prr_retran, 1); 16320 } 16321 } else if (rack->r_ctl.rc_tlpsend) { 16322 /* Tail loss probe */ 16323 long cwin; 16324 long tlen; 16325 16326 doing_tlp = 1; 16327 /* 16328 * Check if we can do a TLP with a RACK'd packet 16329 * this can happen if we are not doing the rack 16330 * cheat and we skipped to a TLP and it 16331 * went off. 16332 */ 16333 rsm = rack->r_ctl.rc_tlpsend; 16334 rsm->r_flags |= RACK_TLP; 16335 16336 rack->r_ctl.rc_tlpsend = NULL; 16337 sack_rxmit = 1; 16338 tlen = rsm->r_end - rsm->r_start; 16339 if (tlen > segsiz) 16340 tlen = segsiz; 16341 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16342 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16343 __func__, __LINE__, 16344 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16345 sb_offset = rsm->r_start - tp->snd_una; 16346 cwin = min(tp->snd_wnd, tlen); 16347 len = cwin; 16348 } 16349 if (rack->r_must_retran && 16350 (rsm == NULL)) { 16351 /* 16352 * Non-Sack and we had a RTO or MTU change, we 16353 * need to retransmit until we reach 16354 * the former snd_max (rack->r_ctl.rc_snd_max_at_rto). 16355 */ 16356 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 16357 int sendwin, flight; 16358 16359 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 16360 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 16361 if (flight >= sendwin) { 16362 so = inp->inp_socket; 16363 sb = &so->so_snd; 16364 goto just_return_nolock; 16365 } 16366 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 16367 KASSERT(rsm != NULL, ("rsm is NULL rack:%p r_must_retran set", rack)); 16368 if (rsm == NULL) { 16369 /* TSNH */ 16370 rack->r_must_retran = 0; 16371 rack->r_ctl.rc_out_at_rto = 0; 16372 rack->r_must_retran = 0; 16373 so = inp->inp_socket; 16374 sb = &so->so_snd; 16375 goto just_return_nolock; 16376 } 16377 sack_rxmit = 1; 16378 len = rsm->r_end - rsm->r_start; 16379 sendalot = 0; 16380 sb_offset = rsm->r_start - tp->snd_una; 16381 if (len >= segsiz) 16382 len = segsiz; 16383 } else { 16384 /* We must be done if there is nothing outstanding */ 16385 rack->r_must_retran = 0; 16386 rack->r_ctl.rc_out_at_rto = 0; 16387 } 16388 } 16389 /* 16390 * Enforce a connection sendmap count limit if set 16391 * as long as we are not retransmiting. 16392 */ 16393 if ((rsm == NULL) && 16394 (rack->do_detection == 0) && 16395 (V_tcp_map_entries_limit > 0) && 16396 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 16397 counter_u64_add(rack_to_alloc_limited, 1); 16398 if (!rack->alloc_limit_reported) { 16399 rack->alloc_limit_reported = 1; 16400 counter_u64_add(rack_alloc_limited_conns, 1); 16401 } 16402 so = inp->inp_socket; 16403 sb = &so->so_snd; 16404 goto just_return_nolock; 16405 } 16406 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 16407 /* we are retransmitting the fin */ 16408 len--; 16409 if (len) { 16410 /* 16411 * When retransmitting data do *not* include the 16412 * FIN. This could happen from a TLP probe. 16413 */ 16414 flags &= ~TH_FIN; 16415 } 16416 } 16417 #ifdef INVARIANTS 16418 /* For debugging */ 16419 rack->r_ctl.rc_rsm_at_retran = rsm; 16420 #endif 16421 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 16422 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 16423 int ret; 16424 16425 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len); 16426 if (ret == 0) 16427 return (0); 16428 } 16429 so = inp->inp_socket; 16430 sb = &so->so_snd; 16431 if (do_a_prefetch == 0) { 16432 kern_prefetch(sb, &do_a_prefetch); 16433 do_a_prefetch = 1; 16434 } 16435 #ifdef NETFLIX_SHARED_CWND 16436 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 16437 rack->rack_enable_scwnd) { 16438 /* We are doing cwnd sharing */ 16439 if (rack->gp_ready && 16440 (rack->rack_attempted_scwnd == 0) && 16441 (rack->r_ctl.rc_scw == NULL) && 16442 tp->t_lib) { 16443 /* The pcbid is in, lets make an attempt */ 16444 counter_u64_add(rack_try_scwnd, 1); 16445 rack->rack_attempted_scwnd = 1; 16446 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 16447 &rack->r_ctl.rc_scw_index, 16448 segsiz); 16449 } 16450 if (rack->r_ctl.rc_scw && 16451 (rack->rack_scwnd_is_idle == 1) && 16452 sbavail(&so->so_snd)) { 16453 /* we are no longer out of data */ 16454 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 16455 rack->rack_scwnd_is_idle = 0; 16456 } 16457 if (rack->r_ctl.rc_scw) { 16458 /* First lets update and get the cwnd */ 16459 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 16460 rack->r_ctl.rc_scw_index, 16461 tp->snd_cwnd, tp->snd_wnd, segsiz); 16462 } 16463 } 16464 #endif 16465 /* 16466 * Get standard flags, and add SYN or FIN if requested by 'hidden' 16467 * state flags. 16468 */ 16469 if (tp->t_flags & TF_NEEDFIN) 16470 flags |= TH_FIN; 16471 if (tp->t_flags & TF_NEEDSYN) 16472 flags |= TH_SYN; 16473 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 16474 void *end_rsm; 16475 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 16476 if (end_rsm) 16477 kern_prefetch(end_rsm, &prefetch_rsm); 16478 prefetch_rsm = 1; 16479 } 16480 SOCKBUF_LOCK(sb); 16481 /* 16482 * If snd_nxt == snd_max and we have transmitted a FIN, the 16483 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 16484 * negative length. This can also occur when TCP opens up its 16485 * congestion window while receiving additional duplicate acks after 16486 * fast-retransmit because TCP will reset snd_nxt to snd_max after 16487 * the fast-retransmit. 16488 * 16489 * In the normal retransmit-FIN-only case, however, snd_nxt will be 16490 * set to snd_una, the sb_offset will be 0, and the length may wind 16491 * up 0. 16492 * 16493 * If sack_rxmit is true we are retransmitting from the scoreboard 16494 * in which case len is already set. 16495 */ 16496 if ((sack_rxmit == 0) && 16497 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 16498 uint32_t avail; 16499 16500 avail = sbavail(sb); 16501 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 16502 sb_offset = tp->snd_nxt - tp->snd_una; 16503 else 16504 sb_offset = 0; 16505 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 16506 if (rack->r_ctl.rc_tlp_new_data) { 16507 /* TLP is forcing out new data */ 16508 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 16509 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 16510 } 16511 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 16512 if (tp->snd_wnd > sb_offset) 16513 len = tp->snd_wnd - sb_offset; 16514 else 16515 len = 0; 16516 } else { 16517 len = rack->r_ctl.rc_tlp_new_data; 16518 } 16519 rack->r_ctl.rc_tlp_new_data = 0; 16520 doing_tlp = 1; 16521 } else { 16522 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 16523 } 16524 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 16525 /* 16526 * For prr=off, we need to send only 1 MSS 16527 * at a time. We do this because another sack could 16528 * be arriving that causes us to send retransmits and 16529 * we don't want to be on a long pace due to a larger send 16530 * that keeps us from sending out the retransmit. 16531 */ 16532 len = segsiz; 16533 } 16534 } else { 16535 uint32_t outstanding; 16536 /* 16537 * We are inside of a Fast recovery episode, this 16538 * is caused by a SACK or 3 dup acks. At this point 16539 * we have sent all the retransmissions and we rely 16540 * on PRR to dictate what we will send in the form of 16541 * new data. 16542 */ 16543 16544 outstanding = tp->snd_max - tp->snd_una; 16545 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 16546 if (tp->snd_wnd > outstanding) { 16547 len = tp->snd_wnd - outstanding; 16548 /* Check to see if we have the data */ 16549 if ((sb_offset + len) > avail) { 16550 /* It does not all fit */ 16551 if (avail > sb_offset) 16552 len = avail - sb_offset; 16553 else 16554 len = 0; 16555 } 16556 } else { 16557 len = 0; 16558 } 16559 } else if (avail > sb_offset) { 16560 len = avail - sb_offset; 16561 } else { 16562 len = 0; 16563 } 16564 if (len > 0) { 16565 if (len > rack->r_ctl.rc_prr_sndcnt) { 16566 len = rack->r_ctl.rc_prr_sndcnt; 16567 } 16568 if (len > 0) { 16569 sub_from_prr = 1; 16570 counter_u64_add(rack_rtm_prr_newdata, 1); 16571 } 16572 } 16573 if (len > segsiz) { 16574 /* 16575 * We should never send more than a MSS when 16576 * retransmitting or sending new data in prr 16577 * mode unless the override flag is on. Most 16578 * likely the PRR algorithm is not going to 16579 * let us send a lot as well :-) 16580 */ 16581 if (rack->r_ctl.rc_prr_sendalot == 0) { 16582 len = segsiz; 16583 } 16584 } else if (len < segsiz) { 16585 /* 16586 * Do we send any? The idea here is if the 16587 * send empty's the socket buffer we want to 16588 * do it. However if not then lets just wait 16589 * for our prr_sndcnt to get bigger. 16590 */ 16591 long leftinsb; 16592 16593 leftinsb = sbavail(sb) - sb_offset; 16594 if (leftinsb > len) { 16595 /* This send does not empty the sb */ 16596 len = 0; 16597 } 16598 } 16599 } 16600 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 16601 /* 16602 * If you have not established 16603 * and are not doing FAST OPEN 16604 * no data please. 16605 */ 16606 if ((sack_rxmit == 0) && 16607 (!IS_FASTOPEN(tp->t_flags))){ 16608 len = 0; 16609 sb_offset = 0; 16610 } 16611 } 16612 if (prefetch_so_done == 0) { 16613 kern_prefetch(so, &prefetch_so_done); 16614 prefetch_so_done = 1; 16615 } 16616 /* 16617 * Lop off SYN bit if it has already been sent. However, if this is 16618 * SYN-SENT state and if segment contains data and if we don't know 16619 * that foreign host supports TAO, suppress sending segment. 16620 */ 16621 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 16622 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 16623 /* 16624 * When sending additional segments following a TFO SYN|ACK, 16625 * do not include the SYN bit. 16626 */ 16627 if (IS_FASTOPEN(tp->t_flags) && 16628 (tp->t_state == TCPS_SYN_RECEIVED)) 16629 flags &= ~TH_SYN; 16630 } 16631 /* 16632 * Be careful not to send data and/or FIN on SYN segments. This 16633 * measure is needed to prevent interoperability problems with not 16634 * fully conformant TCP implementations. 16635 */ 16636 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 16637 len = 0; 16638 flags &= ~TH_FIN; 16639 } 16640 /* 16641 * On TFO sockets, ensure no data is sent in the following cases: 16642 * 16643 * - When retransmitting SYN|ACK on a passively-created socket 16644 * 16645 * - When retransmitting SYN on an actively created socket 16646 * 16647 * - When sending a zero-length cookie (cookie request) on an 16648 * actively created socket 16649 * 16650 * - When the socket is in the CLOSED state (RST is being sent) 16651 */ 16652 if (IS_FASTOPEN(tp->t_flags) && 16653 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 16654 ((tp->t_state == TCPS_SYN_SENT) && 16655 (tp->t_tfo_client_cookie_len == 0)) || 16656 (flags & TH_RST))) { 16657 sack_rxmit = 0; 16658 len = 0; 16659 } 16660 /* Without fast-open there should never be data sent on a SYN */ 16661 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 16662 tp->snd_nxt = tp->iss; 16663 len = 0; 16664 } 16665 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 16666 /* We only send 1 MSS if we have a DSACK block */ 16667 add_flag |= RACK_SENT_W_DSACK; 16668 len = segsiz; 16669 } 16670 orig_len = len; 16671 if (len <= 0) { 16672 /* 16673 * If FIN has been sent but not acked, but we haven't been 16674 * called to retransmit, len will be < 0. Otherwise, window 16675 * shrank after we sent into it. If window shrank to 0, 16676 * cancel pending retransmit, pull snd_nxt back to (closed) 16677 * window, and set the persist timer if it isn't already 16678 * going. If the window didn't close completely, just wait 16679 * for an ACK. 16680 * 16681 * We also do a general check here to ensure that we will 16682 * set the persist timer when we have data to send, but a 16683 * 0-byte window. This makes sure the persist timer is set 16684 * even if the packet hits one of the "goto send" lines 16685 * below. 16686 */ 16687 len = 0; 16688 if ((tp->snd_wnd == 0) && 16689 (TCPS_HAVEESTABLISHED(tp->t_state)) && 16690 (tp->snd_una == tp->snd_max) && 16691 (sb_offset < (int)sbavail(sb))) { 16692 rack_enter_persist(tp, rack, cts); 16693 } 16694 } else if ((rsm == NULL) && 16695 (doing_tlp == 0) && 16696 (len < pace_max_seg)) { 16697 /* 16698 * We are not sending a maximum sized segment for 16699 * some reason. Should we not send anything (think 16700 * sws or persists)? 16701 */ 16702 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 16703 (TCPS_HAVEESTABLISHED(tp->t_state)) && 16704 (len < minseg) && 16705 (len < (int)(sbavail(sb) - sb_offset))) { 16706 /* 16707 * Here the rwnd is less than 16708 * the minimum pacing size, this is not a retransmit, 16709 * we are established and 16710 * the send is not the last in the socket buffer 16711 * we send nothing, and we may enter persists 16712 * if nothing is outstanding. 16713 */ 16714 len = 0; 16715 if (tp->snd_max == tp->snd_una) { 16716 /* 16717 * Nothing out we can 16718 * go into persists. 16719 */ 16720 rack_enter_persist(tp, rack, cts); 16721 } 16722 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 16723 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 16724 (len < (int)(sbavail(sb) - sb_offset)) && 16725 (len < minseg)) { 16726 /* 16727 * Here we are not retransmitting, and 16728 * the cwnd is not so small that we could 16729 * not send at least a min size (rxt timer 16730 * not having gone off), We have 2 segments or 16731 * more already in flight, its not the tail end 16732 * of the socket buffer and the cwnd is blocking 16733 * us from sending out a minimum pacing segment size. 16734 * Lets not send anything. 16735 */ 16736 len = 0; 16737 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 16738 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 16739 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 16740 (len < (int)(sbavail(sb) - sb_offset)) && 16741 (TCPS_HAVEESTABLISHED(tp->t_state))) { 16742 /* 16743 * Here we have a send window but we have 16744 * filled it up and we can't send another pacing segment. 16745 * We also have in flight more than 2 segments 16746 * and we are not completing the sb i.e. we allow 16747 * the last bytes of the sb to go out even if 16748 * its not a full pacing segment. 16749 */ 16750 len = 0; 16751 } else if ((rack->r_ctl.crte != NULL) && 16752 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 16753 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 16754 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 16755 (len < (int)(sbavail(sb) - sb_offset))) { 16756 /* 16757 * Here we are doing hardware pacing, this is not a TLP, 16758 * we are not sending a pace max segment size, there is rwnd 16759 * room to send at least N pace_max_seg, the cwnd is greater 16760 * than or equal to a full pacing segments plus 4 mss and we have 2 or 16761 * more segments in flight and its not the tail of the socket buffer. 16762 * 16763 * We don't want to send instead we need to get more ack's in to 16764 * allow us to send a full pacing segment. Normally, if we are pacing 16765 * about the right speed, we should have finished our pacing 16766 * send as most of the acks have come back if we are at the 16767 * right rate. This is a bit fuzzy since return path delay 16768 * can delay the acks, which is why we want to make sure we 16769 * have cwnd space to have a bit more than a max pace segments in flight. 16770 * 16771 * If we have not gotten our acks back we are pacing at too high a 16772 * rate delaying will not hurt and will bring our GP estimate down by 16773 * injecting the delay. If we don't do this we will send 16774 * 2 MSS out in response to the acks being clocked in which 16775 * defeats the point of hw-pacing (i.e. to help us get 16776 * larger TSO's out). 16777 */ 16778 len = 0; 16779 16780 } 16781 16782 } 16783 /* len will be >= 0 after this point. */ 16784 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 16785 rack_sndbuf_autoscale(rack); 16786 /* 16787 * Decide if we can use TCP Segmentation Offloading (if supported by 16788 * hardware). 16789 * 16790 * TSO may only be used if we are in a pure bulk sending state. The 16791 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 16792 * options prevent using TSO. With TSO the TCP header is the same 16793 * (except for the sequence number) for all generated packets. This 16794 * makes it impossible to transmit any options which vary per 16795 * generated segment or packet. 16796 * 16797 * IPv4 handling has a clear separation of ip options and ip header 16798 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 16799 * the right thing below to provide length of just ip options and thus 16800 * checking for ipoptlen is enough to decide if ip options are present. 16801 */ 16802 ipoptlen = 0; 16803 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16804 /* 16805 * Pre-calculate here as we save another lookup into the darknesses 16806 * of IPsec that way and can actually decide if TSO is ok. 16807 */ 16808 #ifdef INET6 16809 if (isipv6 && IPSEC_ENABLED(ipv6)) 16810 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 16811 #ifdef INET 16812 else 16813 #endif 16814 #endif /* INET6 */ 16815 #ifdef INET 16816 if (IPSEC_ENABLED(ipv4)) 16817 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 16818 #endif /* INET */ 16819 #endif 16820 16821 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16822 ipoptlen += ipsec_optlen; 16823 #endif 16824 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 16825 (tp->t_port == 0) && 16826 ((tp->t_flags & TF_SIGNATURE) == 0) && 16827 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 16828 ipoptlen == 0) 16829 tso = 1; 16830 { 16831 uint32_t outstanding; 16832 16833 outstanding = tp->snd_max - tp->snd_una; 16834 if (tp->t_flags & TF_SENTFIN) { 16835 /* 16836 * If we sent a fin, snd_max is 1 higher than 16837 * snd_una 16838 */ 16839 outstanding--; 16840 } 16841 if (sack_rxmit) { 16842 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 16843 flags &= ~TH_FIN; 16844 } else { 16845 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 16846 sbused(sb))) 16847 flags &= ~TH_FIN; 16848 } 16849 } 16850 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 16851 (long)TCP_MAXWIN << tp->rcv_scale); 16852 16853 /* 16854 * Sender silly window avoidance. We transmit under the following 16855 * conditions when len is non-zero: 16856 * 16857 * - We have a full segment (or more with TSO) - This is the last 16858 * buffer in a write()/send() and we are either idle or running 16859 * NODELAY - we've timed out (e.g. persist timer) - we have more 16860 * then 1/2 the maximum send window's worth of data (receiver may be 16861 * limited the window size) - we need to retransmit 16862 */ 16863 if (len) { 16864 if (len >= segsiz) { 16865 goto send; 16866 } 16867 /* 16868 * NOTE! on localhost connections an 'ack' from the remote 16869 * end may occur synchronously with the output and cause us 16870 * to flush a buffer queued with moretocome. XXX 16871 * 16872 */ 16873 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 16874 (idle || (tp->t_flags & TF_NODELAY)) && 16875 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 16876 (tp->t_flags & TF_NOPUSH) == 0) { 16877 pass = 2; 16878 goto send; 16879 } 16880 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 16881 pass = 22; 16882 goto send; 16883 } 16884 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 16885 pass = 4; 16886 goto send; 16887 } 16888 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 16889 pass = 5; 16890 goto send; 16891 } 16892 if (sack_rxmit) { 16893 pass = 6; 16894 goto send; 16895 } 16896 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 16897 (ctf_outstanding(tp) < (segsiz * 2))) { 16898 /* 16899 * We have less than two MSS outstanding (delayed ack) 16900 * and our rwnd will not let us send a full sized 16901 * MSS. Lets go ahead and let this small segment 16902 * out because we want to try to have at least two 16903 * packets inflight to not be caught by delayed ack. 16904 */ 16905 pass = 12; 16906 goto send; 16907 } 16908 } 16909 /* 16910 * Sending of standalone window updates. 16911 * 16912 * Window updates are important when we close our window due to a 16913 * full socket buffer and are opening it again after the application 16914 * reads data from it. Once the window has opened again and the 16915 * remote end starts to send again the ACK clock takes over and 16916 * provides the most current window information. 16917 * 16918 * We must avoid the silly window syndrome whereas every read from 16919 * the receive buffer, no matter how small, causes a window update 16920 * to be sent. We also should avoid sending a flurry of window 16921 * updates when the socket buffer had queued a lot of data and the 16922 * application is doing small reads. 16923 * 16924 * Prevent a flurry of pointless window updates by only sending an 16925 * update when we can increase the advertized window by more than 16926 * 1/4th of the socket buffer capacity. When the buffer is getting 16927 * full or is very small be more aggressive and send an update 16928 * whenever we can increase by two mss sized segments. In all other 16929 * situations the ACK's to new incoming data will carry further 16930 * window increases. 16931 * 16932 * Don't send an independent window update if a delayed ACK is 16933 * pending (it will get piggy-backed on it) or the remote side 16934 * already has done a half-close and won't send more data. Skip 16935 * this if the connection is in T/TCP half-open state. 16936 */ 16937 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 16938 !(tp->t_flags & TF_DELACK) && 16939 !TCPS_HAVERCVDFIN(tp->t_state)) { 16940 /* 16941 * "adv" is the amount we could increase the window, taking 16942 * into account that we are limited by TCP_MAXWIN << 16943 * tp->rcv_scale. 16944 */ 16945 int32_t adv; 16946 int oldwin; 16947 16948 adv = recwin; 16949 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 16950 oldwin = (tp->rcv_adv - tp->rcv_nxt); 16951 if (adv > oldwin) 16952 adv -= oldwin; 16953 else { 16954 /* We can't increase the window */ 16955 adv = 0; 16956 } 16957 } else 16958 oldwin = 0; 16959 16960 /* 16961 * If the new window size ends up being the same as or less 16962 * than the old size when it is scaled, then don't force 16963 * a window update. 16964 */ 16965 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 16966 goto dontupdate; 16967 16968 if (adv >= (int32_t)(2 * segsiz) && 16969 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 16970 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 16971 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 16972 pass = 7; 16973 goto send; 16974 } 16975 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 16976 pass = 23; 16977 goto send; 16978 } 16979 } 16980 dontupdate: 16981 16982 /* 16983 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 16984 * is also a catch-all for the retransmit timer timeout case. 16985 */ 16986 if (tp->t_flags & TF_ACKNOW) { 16987 pass = 8; 16988 goto send; 16989 } 16990 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 16991 pass = 9; 16992 goto send; 16993 } 16994 /* 16995 * If our state indicates that FIN should be sent and we have not 16996 * yet done so, then we need to send. 16997 */ 16998 if ((flags & TH_FIN) && 16999 (tp->snd_nxt == tp->snd_una)) { 17000 pass = 11; 17001 goto send; 17002 } 17003 /* 17004 * No reason to send a segment, just return. 17005 */ 17006 just_return: 17007 SOCKBUF_UNLOCK(sb); 17008 just_return_nolock: 17009 { 17010 int app_limited = CTF_JR_SENT_DATA; 17011 17012 if (tot_len_this_send > 0) { 17013 /* Make sure snd_nxt is up to max */ 17014 rack->r_ctl.fsb.recwin = recwin; 17015 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17016 if ((error == 0) && 17017 rack_use_rfo && 17018 ((flags & (TH_SYN|TH_FIN)) == 0) && 17019 (ipoptlen == 0) && 17020 (tp->snd_nxt == tp->snd_max) && 17021 (tp->rcv_numsacks == 0) && 17022 rack->r_fsb_inited && 17023 TCPS_HAVEESTABLISHED(tp->t_state) && 17024 (rack->r_must_retran == 0) && 17025 ((tp->t_flags & TF_NEEDFIN) == 0) && 17026 (len > 0) && (orig_len > 0) && 17027 (orig_len > len) && 17028 ((orig_len - len) >= segsiz) && 17029 ((optlen == 0) || 17030 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17031 /* We can send at least one more MSS using our fsb */ 17032 17033 rack->r_fast_output = 1; 17034 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17035 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17036 rack->r_ctl.fsb.tcp_flags = flags; 17037 rack->r_ctl.fsb.left_to_send = orig_len - len; 17038 if (hw_tls) 17039 rack->r_ctl.fsb.hw_tls = 1; 17040 else 17041 rack->r_ctl.fsb.hw_tls = 0; 17042 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17043 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17044 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17045 (tp->snd_max - tp->snd_una))); 17046 if (rack->r_ctl.fsb.left_to_send < segsiz) 17047 rack->r_fast_output = 0; 17048 else { 17049 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17050 rack->r_ctl.fsb.rfo_apply_push = 1; 17051 else 17052 rack->r_ctl.fsb.rfo_apply_push = 0; 17053 } 17054 } else 17055 rack->r_fast_output = 0; 17056 17057 17058 rack_log_fsb(rack, tp, so, flags, 17059 ipoptlen, orig_len, len, 0, 17060 1, optlen, __LINE__, 1); 17061 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17062 tp->snd_nxt = tp->snd_max; 17063 } else { 17064 int end_window = 0; 17065 uint32_t seq = tp->gput_ack; 17066 17067 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17068 if (rsm) { 17069 /* 17070 * Mark the last sent that we just-returned (hinting 17071 * that delayed ack may play a role in any rtt measurement). 17072 */ 17073 rsm->r_just_ret = 1; 17074 } 17075 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17076 rack->r_ctl.rc_agg_delayed = 0; 17077 rack->r_early = 0; 17078 rack->r_late = 0; 17079 rack->r_ctl.rc_agg_early = 0; 17080 if ((ctf_outstanding(tp) + 17081 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17082 minseg)) >= tp->snd_wnd) { 17083 /* We are limited by the rwnd */ 17084 app_limited = CTF_JR_RWND_LIMITED; 17085 if (IN_FASTRECOVERY(tp->t_flags)) 17086 rack->r_ctl.rc_prr_sndcnt = 0; 17087 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17088 /* We are limited by whats available -- app limited */ 17089 app_limited = CTF_JR_APP_LIMITED; 17090 if (IN_FASTRECOVERY(tp->t_flags)) 17091 rack->r_ctl.rc_prr_sndcnt = 0; 17092 } else if ((idle == 0) && 17093 ((tp->t_flags & TF_NODELAY) == 0) && 17094 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17095 (len < segsiz)) { 17096 /* 17097 * No delay is not on and the 17098 * user is sending less than 1MSS. This 17099 * brings out SWS avoidance so we 17100 * don't send. Another app-limited case. 17101 */ 17102 app_limited = CTF_JR_APP_LIMITED; 17103 } else if (tp->t_flags & TF_NOPUSH) { 17104 /* 17105 * The user has requested no push of 17106 * the last segment and we are 17107 * at the last segment. Another app 17108 * limited case. 17109 */ 17110 app_limited = CTF_JR_APP_LIMITED; 17111 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17112 /* Its the cwnd */ 17113 app_limited = CTF_JR_CWND_LIMITED; 17114 } else if (IN_FASTRECOVERY(tp->t_flags) && 17115 (rack->rack_no_prr == 0) && 17116 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17117 app_limited = CTF_JR_PRR; 17118 } else { 17119 /* Now why here are we not sending? */ 17120 #ifdef NOW 17121 #ifdef INVARIANTS 17122 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17123 #endif 17124 #endif 17125 app_limited = CTF_JR_ASSESSING; 17126 } 17127 /* 17128 * App limited in some fashion, for our pacing GP 17129 * measurements we don't want any gap (even cwnd). 17130 * Close down the measurement window. 17131 */ 17132 if (rack_cwnd_block_ends_measure && 17133 ((app_limited == CTF_JR_CWND_LIMITED) || 17134 (app_limited == CTF_JR_PRR))) { 17135 /* 17136 * The reason we are not sending is 17137 * the cwnd (or prr). We have been configured 17138 * to end the measurement window in 17139 * this case. 17140 */ 17141 end_window = 1; 17142 } else if (rack_rwnd_block_ends_measure && 17143 (app_limited == CTF_JR_RWND_LIMITED)) { 17144 /* 17145 * We are rwnd limited and have been 17146 * configured to end the measurement 17147 * window in this case. 17148 */ 17149 end_window = 1; 17150 } else if (app_limited == CTF_JR_APP_LIMITED) { 17151 /* 17152 * A true application limited period, we have 17153 * ran out of data. 17154 */ 17155 end_window = 1; 17156 } else if (app_limited == CTF_JR_ASSESSING) { 17157 /* 17158 * In the assessing case we hit the end of 17159 * the if/else and had no known reason 17160 * This will panic us under invariants.. 17161 * 17162 * If we get this out in logs we need to 17163 * investagate which reason we missed. 17164 */ 17165 end_window = 1; 17166 } 17167 if (end_window) { 17168 uint8_t log = 0; 17169 17170 if ((tp->t_flags & TF_GPUTINPROG) && 17171 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17172 /* Mark the last packet has app limited */ 17173 tp->gput_ack = tp->snd_max; 17174 log = 1; 17175 } 17176 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17177 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17178 if (rack->r_ctl.rc_app_limited_cnt == 0) 17179 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17180 else { 17181 /* 17182 * Go out to the end app limited and mark 17183 * this new one as next and move the end_appl up 17184 * to this guy. 17185 */ 17186 if (rack->r_ctl.rc_end_appl) 17187 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17188 rack->r_ctl.rc_end_appl = rsm; 17189 } 17190 rsm->r_flags |= RACK_APP_LIMITED; 17191 rack->r_ctl.rc_app_limited_cnt++; 17192 } 17193 if (log) 17194 rack_log_pacing_delay_calc(rack, 17195 rack->r_ctl.rc_app_limited_cnt, seq, 17196 tp->gput_ack, 0, 0, 4, __LINE__, NULL); 17197 } 17198 } 17199 if (slot) { 17200 /* set the rack tcb into the slot N */ 17201 counter_u64_add(rack_paced_segments, 1); 17202 } else if (tot_len_this_send) { 17203 counter_u64_add(rack_unpaced_segments, 1); 17204 } 17205 /* Check if we need to go into persists or not */ 17206 if ((tp->snd_max == tp->snd_una) && 17207 TCPS_HAVEESTABLISHED(tp->t_state) && 17208 sbavail(sb) && 17209 (sbavail(sb) > tp->snd_wnd) && 17210 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17211 /* Yes lets make sure to move to persist before timer-start */ 17212 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17213 } 17214 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17215 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17216 } 17217 #ifdef NETFLIX_SHARED_CWND 17218 if ((sbavail(sb) == 0) && 17219 rack->r_ctl.rc_scw) { 17220 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17221 rack->rack_scwnd_is_idle = 1; 17222 } 17223 #endif 17224 #ifdef TCP_ACCOUNTING 17225 if (tot_len_this_send > 0) { 17226 crtsc = get_cyclecount(); 17227 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17228 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17229 } 17230 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17231 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17232 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17233 } 17234 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17235 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17236 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17237 } 17238 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17239 } else { 17240 crtsc = get_cyclecount(); 17241 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17242 tp->tcp_cnt_counters[SND_LIMITED]++; 17243 } 17244 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17245 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17246 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17247 } 17248 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17249 } 17250 sched_unpin(); 17251 #endif 17252 return (0); 17253 17254 send: 17255 if (rsm || sack_rxmit) 17256 counter_u64_add(rack_nfto_resend, 1); 17257 else 17258 counter_u64_add(rack_non_fto_send, 1); 17259 if ((flags & TH_FIN) && 17260 sbavail(sb)) { 17261 /* 17262 * We do not transmit a FIN 17263 * with data outstanding. We 17264 * need to make it so all data 17265 * is acked first. 17266 */ 17267 flags &= ~TH_FIN; 17268 } 17269 /* Enforce stack imposed max seg size if we have one */ 17270 if (rack->r_ctl.rc_pace_max_segs && 17271 (len > rack->r_ctl.rc_pace_max_segs)) { 17272 mark = 1; 17273 len = rack->r_ctl.rc_pace_max_segs; 17274 } 17275 SOCKBUF_LOCK_ASSERT(sb); 17276 if (len > 0) { 17277 if (len >= segsiz) 17278 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17279 else 17280 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17281 } 17282 /* 17283 * Before ESTABLISHED, force sending of initial options unless TCP 17284 * set not to do any options. NOTE: we assume that the IP/TCP header 17285 * plus TCP options always fit in a single mbuf, leaving room for a 17286 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17287 * + optlen <= MCLBYTES 17288 */ 17289 optlen = 0; 17290 #ifdef INET6 17291 if (isipv6) 17292 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17293 else 17294 #endif 17295 hdrlen = sizeof(struct tcpiphdr); 17296 17297 /* 17298 * Compute options for segment. We only have to care about SYN and 17299 * established connection segments. Options for SYN-ACK segments 17300 * are handled in TCP syncache. 17301 */ 17302 to.to_flags = 0; 17303 if ((tp->t_flags & TF_NOOPT) == 0) { 17304 /* Maximum segment size. */ 17305 if (flags & TH_SYN) { 17306 tp->snd_nxt = tp->iss; 17307 to.to_mss = tcp_mssopt(&inp->inp_inc); 17308 if (tp->t_port) 17309 to.to_mss -= V_tcp_udp_tunneling_overhead; 17310 to.to_flags |= TOF_MSS; 17311 17312 /* 17313 * On SYN or SYN|ACK transmits on TFO connections, 17314 * only include the TFO option if it is not a 17315 * retransmit, as the presence of the TFO option may 17316 * have caused the original SYN or SYN|ACK to have 17317 * been dropped by a middlebox. 17318 */ 17319 if (IS_FASTOPEN(tp->t_flags) && 17320 (tp->t_rxtshift == 0)) { 17321 if (tp->t_state == TCPS_SYN_RECEIVED) { 17322 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17323 to.to_tfo_cookie = 17324 (u_int8_t *)&tp->t_tfo_cookie.server; 17325 to.to_flags |= TOF_FASTOPEN; 17326 wanted_cookie = 1; 17327 } else if (tp->t_state == TCPS_SYN_SENT) { 17328 to.to_tfo_len = 17329 tp->t_tfo_client_cookie_len; 17330 to.to_tfo_cookie = 17331 tp->t_tfo_cookie.client; 17332 to.to_flags |= TOF_FASTOPEN; 17333 wanted_cookie = 1; 17334 /* 17335 * If we wind up having more data to 17336 * send with the SYN than can fit in 17337 * one segment, don't send any more 17338 * until the SYN|ACK comes back from 17339 * the other end. 17340 */ 17341 sendalot = 0; 17342 } 17343 } 17344 } 17345 /* Window scaling. */ 17346 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 17347 to.to_wscale = tp->request_r_scale; 17348 to.to_flags |= TOF_SCALE; 17349 } 17350 /* Timestamps. */ 17351 if ((tp->t_flags & TF_RCVD_TSTMP) || 17352 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 17353 to.to_tsval = ms_cts + tp->ts_offset; 17354 to.to_tsecr = tp->ts_recent; 17355 to.to_flags |= TOF_TS; 17356 } 17357 /* Set receive buffer autosizing timestamp. */ 17358 if (tp->rfbuf_ts == 0 && 17359 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 17360 tp->rfbuf_ts = tcp_ts_getticks(); 17361 /* Selective ACK's. */ 17362 if (tp->t_flags & TF_SACK_PERMIT) { 17363 if (flags & TH_SYN) 17364 to.to_flags |= TOF_SACKPERM; 17365 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 17366 tp->rcv_numsacks > 0) { 17367 to.to_flags |= TOF_SACK; 17368 to.to_nsacks = tp->rcv_numsacks; 17369 to.to_sacks = (u_char *)tp->sackblks; 17370 } 17371 } 17372 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 17373 /* TCP-MD5 (RFC2385). */ 17374 if (tp->t_flags & TF_SIGNATURE) 17375 to.to_flags |= TOF_SIGNATURE; 17376 #endif /* TCP_SIGNATURE */ 17377 17378 /* Processing the options. */ 17379 hdrlen += optlen = tcp_addoptions(&to, opt); 17380 /* 17381 * If we wanted a TFO option to be added, but it was unable 17382 * to fit, ensure no data is sent. 17383 */ 17384 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 17385 !(to.to_flags & TOF_FASTOPEN)) 17386 len = 0; 17387 } 17388 if (tp->t_port) { 17389 if (V_tcp_udp_tunneling_port == 0) { 17390 /* The port was removed?? */ 17391 SOCKBUF_UNLOCK(&so->so_snd); 17392 #ifdef TCP_ACCOUNTING 17393 crtsc = get_cyclecount(); 17394 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17395 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 17396 } 17397 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 17398 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17399 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 17400 } 17401 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 17402 sched_unpin(); 17403 #endif 17404 return (EHOSTUNREACH); 17405 } 17406 hdrlen += sizeof(struct udphdr); 17407 } 17408 #ifdef INET6 17409 if (isipv6) 17410 ipoptlen = ip6_optlen(tp->t_inpcb); 17411 else 17412 #endif 17413 if (tp->t_inpcb->inp_options) 17414 ipoptlen = tp->t_inpcb->inp_options->m_len - 17415 offsetof(struct ipoption, ipopt_list); 17416 else 17417 ipoptlen = 0; 17418 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17419 ipoptlen += ipsec_optlen; 17420 #endif 17421 17422 /* 17423 * Adjust data length if insertion of options will bump the packet 17424 * length beyond the t_maxseg length. Clear the FIN bit because we 17425 * cut off the tail of the segment. 17426 */ 17427 if (len + optlen + ipoptlen > tp->t_maxseg) { 17428 if (tso) { 17429 uint32_t if_hw_tsomax; 17430 uint32_t moff; 17431 int32_t max_len; 17432 17433 /* extract TSO information */ 17434 if_hw_tsomax = tp->t_tsomax; 17435 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 17436 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 17437 KASSERT(ipoptlen == 0, 17438 ("%s: TSO can't do IP options", __func__)); 17439 17440 /* 17441 * Check if we should limit by maximum payload 17442 * length: 17443 */ 17444 if (if_hw_tsomax != 0) { 17445 /* compute maximum TSO length */ 17446 max_len = (if_hw_tsomax - hdrlen - 17447 max_linkhdr); 17448 if (max_len <= 0) { 17449 len = 0; 17450 } else if (len > max_len) { 17451 sendalot = 1; 17452 len = max_len; 17453 mark = 2; 17454 } 17455 } 17456 /* 17457 * Prevent the last segment from being fractional 17458 * unless the send sockbuf can be emptied: 17459 */ 17460 max_len = (tp->t_maxseg - optlen); 17461 if ((sb_offset + len) < sbavail(sb)) { 17462 moff = len % (u_int)max_len; 17463 if (moff != 0) { 17464 mark = 3; 17465 len -= moff; 17466 } 17467 } 17468 /* 17469 * In case there are too many small fragments don't 17470 * use TSO: 17471 */ 17472 if (len <= segsiz) { 17473 mark = 4; 17474 tso = 0; 17475 } 17476 /* 17477 * Send the FIN in a separate segment after the bulk 17478 * sending is done. We don't trust the TSO 17479 * implementations to clear the FIN flag on all but 17480 * the last segment. 17481 */ 17482 if (tp->t_flags & TF_NEEDFIN) { 17483 sendalot = 4; 17484 } 17485 } else { 17486 mark = 5; 17487 if (optlen + ipoptlen >= tp->t_maxseg) { 17488 /* 17489 * Since we don't have enough space to put 17490 * the IP header chain and the TCP header in 17491 * one packet as required by RFC 7112, don't 17492 * send it. Also ensure that at least one 17493 * byte of the payload can be put into the 17494 * TCP segment. 17495 */ 17496 SOCKBUF_UNLOCK(&so->so_snd); 17497 error = EMSGSIZE; 17498 sack_rxmit = 0; 17499 goto out; 17500 } 17501 len = tp->t_maxseg - optlen - ipoptlen; 17502 sendalot = 5; 17503 } 17504 } else { 17505 tso = 0; 17506 mark = 6; 17507 } 17508 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 17509 ("%s: len > IP_MAXPACKET", __func__)); 17510 #ifdef DIAGNOSTIC 17511 #ifdef INET6 17512 if (max_linkhdr + hdrlen > MCLBYTES) 17513 #else 17514 if (max_linkhdr + hdrlen > MHLEN) 17515 #endif 17516 panic("tcphdr too big"); 17517 #endif 17518 17519 /* 17520 * This KASSERT is here to catch edge cases at a well defined place. 17521 * Before, those had triggered (random) panic conditions further 17522 * down. 17523 */ 17524 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17525 if ((len == 0) && 17526 (flags & TH_FIN) && 17527 (sbused(sb))) { 17528 /* 17529 * We have outstanding data, don't send a fin by itself!. 17530 */ 17531 goto just_return; 17532 } 17533 /* 17534 * Grab a header mbuf, attaching a copy of data to be transmitted, 17535 * and initialize the header from the template for sends on this 17536 * connection. 17537 */ 17538 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 17539 if (len) { 17540 uint32_t max_val; 17541 uint32_t moff; 17542 17543 if (rack->r_ctl.rc_pace_max_segs) 17544 max_val = rack->r_ctl.rc_pace_max_segs; 17545 else if (rack->rc_user_set_max_segs) 17546 max_val = rack->rc_user_set_max_segs * segsiz; 17547 else 17548 max_val = len; 17549 /* 17550 * We allow a limit on sending with hptsi. 17551 */ 17552 if (len > max_val) { 17553 mark = 7; 17554 len = max_val; 17555 } 17556 #ifdef INET6 17557 if (MHLEN < hdrlen + max_linkhdr) 17558 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 17559 else 17560 #endif 17561 m = m_gethdr(M_NOWAIT, MT_DATA); 17562 17563 if (m == NULL) { 17564 SOCKBUF_UNLOCK(sb); 17565 error = ENOBUFS; 17566 sack_rxmit = 0; 17567 goto out; 17568 } 17569 m->m_data += max_linkhdr; 17570 m->m_len = hdrlen; 17571 17572 /* 17573 * Start the m_copy functions from the closest mbuf to the 17574 * sb_offset in the socket buffer chain. 17575 */ 17576 mb = sbsndptr_noadv(sb, sb_offset, &moff); 17577 s_mb = mb; 17578 s_moff = moff; 17579 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 17580 m_copydata(mb, moff, (int)len, 17581 mtod(m, caddr_t)+hdrlen); 17582 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 17583 sbsndptr_adv(sb, mb, len); 17584 m->m_len += len; 17585 } else { 17586 struct sockbuf *msb; 17587 17588 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 17589 msb = NULL; 17590 else 17591 msb = sb; 17592 m->m_next = tcp_m_copym( 17593 mb, moff, &len, 17594 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 17595 ((rsm == NULL) ? hw_tls : 0) 17596 #ifdef NETFLIX_COPY_ARGS 17597 , &filled_all 17598 #endif 17599 ); 17600 if (len <= (tp->t_maxseg - optlen)) { 17601 /* 17602 * Must have ran out of mbufs for the copy 17603 * shorten it to no longer need tso. Lets 17604 * not put on sendalot since we are low on 17605 * mbufs. 17606 */ 17607 tso = 0; 17608 } 17609 if (m->m_next == NULL) { 17610 SOCKBUF_UNLOCK(sb); 17611 (void)m_free(m); 17612 error = ENOBUFS; 17613 sack_rxmit = 0; 17614 goto out; 17615 } 17616 } 17617 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 17618 if (rsm && (rsm->r_flags & RACK_TLP)) { 17619 /* 17620 * TLP should not count in retran count, but 17621 * in its own bin 17622 */ 17623 counter_u64_add(rack_tlp_retran, 1); 17624 counter_u64_add(rack_tlp_retran_bytes, len); 17625 } else { 17626 tp->t_sndrexmitpack++; 17627 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 17628 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 17629 } 17630 #ifdef STATS 17631 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 17632 len); 17633 #endif 17634 } else { 17635 KMOD_TCPSTAT_INC(tcps_sndpack); 17636 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 17637 #ifdef STATS 17638 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 17639 len); 17640 #endif 17641 } 17642 /* 17643 * If we're sending everything we've got, set PUSH. (This 17644 * will keep happy those implementations which only give 17645 * data to the user when a buffer fills or a PUSH comes in.) 17646 */ 17647 if (sb_offset + len == sbused(sb) && 17648 sbused(sb) && 17649 !(flags & TH_SYN)) { 17650 flags |= TH_PUSH; 17651 add_flag |= RACK_HAD_PUSH; 17652 } 17653 17654 SOCKBUF_UNLOCK(sb); 17655 } else { 17656 SOCKBUF_UNLOCK(sb); 17657 if (tp->t_flags & TF_ACKNOW) 17658 KMOD_TCPSTAT_INC(tcps_sndacks); 17659 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 17660 KMOD_TCPSTAT_INC(tcps_sndctrl); 17661 else 17662 KMOD_TCPSTAT_INC(tcps_sndwinup); 17663 17664 m = m_gethdr(M_NOWAIT, MT_DATA); 17665 if (m == NULL) { 17666 error = ENOBUFS; 17667 sack_rxmit = 0; 17668 goto out; 17669 } 17670 #ifdef INET6 17671 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 17672 MHLEN >= hdrlen) { 17673 M_ALIGN(m, hdrlen); 17674 } else 17675 #endif 17676 m->m_data += max_linkhdr; 17677 m->m_len = hdrlen; 17678 } 17679 SOCKBUF_UNLOCK_ASSERT(sb); 17680 m->m_pkthdr.rcvif = (struct ifnet *)0; 17681 #ifdef MAC 17682 mac_inpcb_create_mbuf(inp, m); 17683 #endif 17684 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 17685 #ifdef INET6 17686 if (isipv6) 17687 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 17688 else 17689 #endif /* INET6 */ 17690 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 17691 th = rack->r_ctl.fsb.th; 17692 udp = rack->r_ctl.fsb.udp; 17693 if (udp) { 17694 #ifdef INET6 17695 if (isipv6) 17696 ulen = hdrlen + len - sizeof(struct ip6_hdr); 17697 else 17698 #endif /* INET6 */ 17699 ulen = hdrlen + len - sizeof(struct ip); 17700 udp->uh_ulen = htons(ulen); 17701 } 17702 } else { 17703 #ifdef INET6 17704 if (isipv6) { 17705 ip6 = mtod(m, struct ip6_hdr *); 17706 if (tp->t_port) { 17707 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 17708 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 17709 udp->uh_dport = tp->t_port; 17710 ulen = hdrlen + len - sizeof(struct ip6_hdr); 17711 udp->uh_ulen = htons(ulen); 17712 th = (struct tcphdr *)(udp + 1); 17713 } else 17714 th = (struct tcphdr *)(ip6 + 1); 17715 tcpip_fillheaders(inp, tp->t_port, ip6, th); 17716 } else 17717 #endif /* INET6 */ 17718 { 17719 ip = mtod(m, struct ip *); 17720 #ifdef TCPDEBUG 17721 ipov = (struct ipovly *)ip; 17722 #endif 17723 if (tp->t_port) { 17724 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 17725 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 17726 udp->uh_dport = tp->t_port; 17727 ulen = hdrlen + len - sizeof(struct ip); 17728 udp->uh_ulen = htons(ulen); 17729 th = (struct tcphdr *)(udp + 1); 17730 } else 17731 th = (struct tcphdr *)(ip + 1); 17732 tcpip_fillheaders(inp, tp->t_port, ip, th); 17733 } 17734 } 17735 /* 17736 * Fill in fields, remembering maximum advertised window for use in 17737 * delaying messages about window sizes. If resending a FIN, be sure 17738 * not to use a new sequence number. 17739 */ 17740 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 17741 tp->snd_nxt == tp->snd_max) 17742 tp->snd_nxt--; 17743 /* 17744 * If we are starting a connection, send ECN setup SYN packet. If we 17745 * are on a retransmit, we may resend those bits a number of times 17746 * as per RFC 3168. 17747 */ 17748 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn == 1) { 17749 if (tp->t_rxtshift >= 1) { 17750 if (tp->t_rxtshift <= V_tcp_ecn_maxretries) 17751 flags |= TH_ECE | TH_CWR; 17752 } else 17753 flags |= TH_ECE | TH_CWR; 17754 } 17755 /* Handle parallel SYN for ECN */ 17756 if ((tp->t_state == TCPS_SYN_RECEIVED) && 17757 (tp->t_flags2 & TF2_ECN_SND_ECE)) { 17758 flags |= TH_ECE; 17759 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 17760 } 17761 if (TCPS_HAVEESTABLISHED(tp->t_state) && 17762 (tp->t_flags2 & TF2_ECN_PERMIT)) { 17763 /* 17764 * If the peer has ECN, mark data packets with ECN capable 17765 * transmission (ECT). Ignore pure ack packets, 17766 * retransmissions. 17767 */ 17768 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) && 17769 (sack_rxmit == 0)) { 17770 #ifdef INET6 17771 if (isipv6) 17772 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); 17773 else 17774 #endif 17775 ip->ip_tos |= IPTOS_ECN_ECT0; 17776 KMOD_TCPSTAT_INC(tcps_ecn_ect0); 17777 /* 17778 * Reply with proper ECN notifications. 17779 * Only set CWR on new data segments. 17780 */ 17781 if (tp->t_flags2 & TF2_ECN_SND_CWR) { 17782 flags |= TH_CWR; 17783 tp->t_flags2 &= ~TF2_ECN_SND_CWR; 17784 } 17785 } 17786 if (tp->t_flags2 & TF2_ECN_SND_ECE) 17787 flags |= TH_ECE; 17788 } 17789 /* 17790 * If we are doing retransmissions, then snd_nxt will not reflect 17791 * the first unsent octet. For ACK only packets, we do not want the 17792 * sequence number of the retransmitted packet, we want the sequence 17793 * number of the next unsent octet. So, if there is no data (and no 17794 * SYN or FIN), use snd_max instead of snd_nxt when filling in 17795 * ti_seq. But if we are in persist state, snd_max might reflect 17796 * one byte beyond the right edge of the window, so use snd_nxt in 17797 * that case, since we know we aren't doing a retransmission. 17798 * (retransmit and persist are mutually exclusive...) 17799 */ 17800 if (sack_rxmit == 0) { 17801 if (len || (flags & (TH_SYN | TH_FIN))) { 17802 th->th_seq = htonl(tp->snd_nxt); 17803 rack_seq = tp->snd_nxt; 17804 } else { 17805 th->th_seq = htonl(tp->snd_max); 17806 rack_seq = tp->snd_max; 17807 } 17808 } else { 17809 th->th_seq = htonl(rsm->r_start); 17810 rack_seq = rsm->r_start; 17811 } 17812 th->th_ack = htonl(tp->rcv_nxt); 17813 th->th_flags = flags; 17814 /* 17815 * Calculate receive window. Don't shrink window, but avoid silly 17816 * window syndrome. 17817 * If a RST segment is sent, advertise a window of zero. 17818 */ 17819 if (flags & TH_RST) { 17820 recwin = 0; 17821 } else { 17822 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 17823 recwin < (long)segsiz) { 17824 recwin = 0; 17825 } 17826 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 17827 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 17828 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 17829 } 17830 17831 /* 17832 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 17833 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 17834 * handled in syncache. 17835 */ 17836 if (flags & TH_SYN) 17837 th->th_win = htons((u_short) 17838 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 17839 else { 17840 /* Avoid shrinking window with window scaling. */ 17841 recwin = roundup2(recwin, 1 << tp->rcv_scale); 17842 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 17843 } 17844 /* 17845 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 17846 * window. This may cause the remote transmitter to stall. This 17847 * flag tells soreceive() to disable delayed acknowledgements when 17848 * draining the buffer. This can occur if the receiver is 17849 * attempting to read more data than can be buffered prior to 17850 * transmitting on the connection. 17851 */ 17852 if (th->th_win == 0) { 17853 tp->t_sndzerowin++; 17854 tp->t_flags |= TF_RXWIN0SENT; 17855 } else 17856 tp->t_flags &= ~TF_RXWIN0SENT; 17857 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 17858 /* Now are we using fsb?, if so copy the template data to the mbuf */ 17859 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 17860 uint8_t *cpto; 17861 17862 cpto = mtod(m, uint8_t *); 17863 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 17864 /* 17865 * We have just copied in: 17866 * IP/IP6 17867 * <optional udphdr> 17868 * tcphdr (no options) 17869 * 17870 * We need to grab the correct pointers into the mbuf 17871 * for both the tcp header, and possibly the udp header (if tunneling). 17872 * We do this by using the offset in the copy buffer and adding it 17873 * to the mbuf base pointer (cpto). 17874 */ 17875 #ifdef INET6 17876 if (isipv6) 17877 ip6 = mtod(m, struct ip6_hdr *); 17878 else 17879 #endif /* INET6 */ 17880 ip = mtod(m, struct ip *); 17881 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 17882 /* If we have a udp header lets set it into the mbuf as well */ 17883 if (udp) 17884 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 17885 } 17886 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 17887 if (to.to_flags & TOF_SIGNATURE) { 17888 /* 17889 * Calculate MD5 signature and put it into the place 17890 * determined before. 17891 * NOTE: since TCP options buffer doesn't point into 17892 * mbuf's data, calculate offset and use it. 17893 */ 17894 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 17895 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 17896 /* 17897 * Do not send segment if the calculation of MD5 17898 * digest has failed. 17899 */ 17900 goto out; 17901 } 17902 } 17903 #endif 17904 if (optlen) { 17905 bcopy(opt, th + 1, optlen); 17906 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 17907 } 17908 /* 17909 * Put TCP length in extended header, and then checksum extended 17910 * header and data. 17911 */ 17912 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 17913 #ifdef INET6 17914 if (isipv6) { 17915 /* 17916 * ip6_plen is not need to be filled now, and will be filled 17917 * in ip6_output. 17918 */ 17919 if (tp->t_port) { 17920 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 17921 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 17922 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 17923 th->th_sum = htons(0); 17924 UDPSTAT_INC(udps_opackets); 17925 } else { 17926 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 17927 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 17928 th->th_sum = in6_cksum_pseudo(ip6, 17929 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 17930 0); 17931 } 17932 } 17933 #endif 17934 #if defined(INET6) && defined(INET) 17935 else 17936 #endif 17937 #ifdef INET 17938 { 17939 if (tp->t_port) { 17940 m->m_pkthdr.csum_flags = CSUM_UDP; 17941 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 17942 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 17943 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 17944 th->th_sum = htons(0); 17945 UDPSTAT_INC(udps_opackets); 17946 } else { 17947 m->m_pkthdr.csum_flags = CSUM_TCP; 17948 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 17949 th->th_sum = in_pseudo(ip->ip_src.s_addr, 17950 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 17951 IPPROTO_TCP + len + optlen)); 17952 } 17953 /* IP version must be set here for ipv4/ipv6 checking later */ 17954 KASSERT(ip->ip_v == IPVERSION, 17955 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 17956 } 17957 #endif 17958 /* 17959 * Enable TSO and specify the size of the segments. The TCP pseudo 17960 * header checksum is always provided. XXX: Fixme: This is currently 17961 * not the case for IPv6. 17962 */ 17963 if (tso) { 17964 KASSERT(len > tp->t_maxseg - optlen, 17965 ("%s: len <= tso_segsz", __func__)); 17966 m->m_pkthdr.csum_flags |= CSUM_TSO; 17967 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 17968 } 17969 KASSERT(len + hdrlen == m_length(m, NULL), 17970 ("%s: mbuf chain different than expected: %d + %u != %u", 17971 __func__, len, hdrlen, m_length(m, NULL))); 17972 17973 #ifdef TCP_HHOOK 17974 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 17975 hhook_run_tcp_est_out(tp, th, &to, len, tso); 17976 #endif 17977 /* We're getting ready to send; log now. */ 17978 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 17979 union tcp_log_stackspecific log; 17980 17981 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 17982 log.u_bbr.inhpts = rack->rc_inp->inp_in_hpts; 17983 log.u_bbr.ininput = rack->rc_inp->inp_in_input; 17984 if (rack->rack_no_prr) 17985 log.u_bbr.flex1 = 0; 17986 else 17987 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 17988 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 17989 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 17990 log.u_bbr.flex4 = orig_len; 17991 if (filled_all) 17992 log.u_bbr.flex5 = 0x80000000; 17993 else 17994 log.u_bbr.flex5 = 0; 17995 /* Save off the early/late values */ 17996 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 17997 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 17998 log.u_bbr.bw_inuse = rack_get_bw(rack); 17999 if (rsm || sack_rxmit) { 18000 if (doing_tlp) 18001 log.u_bbr.flex8 = 2; 18002 else 18003 log.u_bbr.flex8 = 1; 18004 } else { 18005 log.u_bbr.flex8 = 0; 18006 } 18007 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18008 log.u_bbr.flex7 = mark; 18009 log.u_bbr.flex7 <<= 8; 18010 log.u_bbr.flex7 |= pass; 18011 log.u_bbr.pkts_out = tp->t_maxseg; 18012 log.u_bbr.timeStamp = cts; 18013 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18014 log.u_bbr.lt_epoch = cwnd_to_use; 18015 log.u_bbr.delivered = sendalot; 18016 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18017 len, &log, false, NULL, NULL, 0, &tv); 18018 } else 18019 lgb = NULL; 18020 18021 /* 18022 * Fill in IP length and desired time to live and send to IP level. 18023 * There should be a better way to handle ttl and tos; we could keep 18024 * them in the template, but need a way to checksum without them. 18025 */ 18026 /* 18027 * m->m_pkthdr.len should have been set before cksum calcuration, 18028 * because in6_cksum() need it. 18029 */ 18030 #ifdef INET6 18031 if (isipv6) { 18032 /* 18033 * we separately set hoplimit for every segment, since the 18034 * user might want to change the value via setsockopt. Also, 18035 * desired default hop limit might be changed via Neighbor 18036 * Discovery. 18037 */ 18038 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18039 18040 /* 18041 * Set the packet size here for the benefit of DTrace 18042 * probes. ip6_output() will set it properly; it's supposed 18043 * to include the option header lengths as well. 18044 */ 18045 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18046 18047 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18048 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18049 else 18050 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18051 18052 if (tp->t_state == TCPS_SYN_SENT) 18053 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18054 18055 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18056 /* TODO: IPv6 IP6TOS_ECT bit on */ 18057 error = ip6_output(m, 18058 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18059 inp->in6p_outputopts, 18060 #else 18061 NULL, 18062 #endif 18063 &inp->inp_route6, 18064 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18065 NULL, NULL, inp); 18066 18067 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18068 mtu = inp->inp_route6.ro_nh->nh_mtu; 18069 } 18070 #endif /* INET6 */ 18071 #if defined(INET) && defined(INET6) 18072 else 18073 #endif 18074 #ifdef INET 18075 { 18076 ip->ip_len = htons(m->m_pkthdr.len); 18077 #ifdef INET6 18078 if (inp->inp_vflag & INP_IPV6PROTO) 18079 ip->ip_ttl = in6_selecthlim(inp, NULL); 18080 #endif /* INET6 */ 18081 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18082 /* 18083 * If we do path MTU discovery, then we set DF on every 18084 * packet. This might not be the best thing to do according 18085 * to RFC3390 Section 2. However the tcp hostcache migitates 18086 * the problem so it affects only the first tcp connection 18087 * with a host. 18088 * 18089 * NB: Don't set DF on small MTU/MSS to have a safe 18090 * fallback. 18091 */ 18092 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18093 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18094 if (tp->t_port == 0 || len < V_tcp_minmss) { 18095 ip->ip_off |= htons(IP_DF); 18096 } 18097 } else { 18098 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18099 } 18100 18101 if (tp->t_state == TCPS_SYN_SENT) 18102 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18103 18104 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18105 18106 error = ip_output(m, 18107 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18108 inp->inp_options, 18109 #else 18110 NULL, 18111 #endif 18112 &inp->inp_route, 18113 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18114 inp); 18115 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18116 mtu = inp->inp_route.ro_nh->nh_mtu; 18117 } 18118 #endif /* INET */ 18119 18120 out: 18121 if (lgb) { 18122 lgb->tlb_errno = error; 18123 lgb = NULL; 18124 } 18125 /* 18126 * In transmit state, time the transmission and arrange for the 18127 * retransmit. In persist state, just set snd_max. 18128 */ 18129 if (error == 0) { 18130 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18131 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18132 if (rsm && (doing_tlp == 0)) { 18133 /* Set we retransmitted */ 18134 rack->rc_gp_saw_rec = 1; 18135 } else { 18136 if (cwnd_to_use > tp->snd_ssthresh) { 18137 /* Set we sent in CA */ 18138 rack->rc_gp_saw_ca = 1; 18139 } else { 18140 /* Set we sent in SS */ 18141 rack->rc_gp_saw_ss = 1; 18142 } 18143 } 18144 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18145 (tp->t_flags & TF_SACK_PERMIT) && 18146 tp->rcv_numsacks > 0) 18147 tcp_clean_dsack_blocks(tp); 18148 tot_len_this_send += len; 18149 if (len == 0) 18150 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18151 else if (len == 1) { 18152 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18153 } else if (len > 1) { 18154 int idx; 18155 18156 idx = (len / segsiz) + 3; 18157 if (idx >= TCP_MSS_ACCT_ATIMER) 18158 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18159 else 18160 counter_u64_add(rack_out_size[idx], 1); 18161 } 18162 } 18163 if ((rack->rack_no_prr == 0) && 18164 sub_from_prr && 18165 (error == 0)) { 18166 if (rack->r_ctl.rc_prr_sndcnt >= len) 18167 rack->r_ctl.rc_prr_sndcnt -= len; 18168 else 18169 rack->r_ctl.rc_prr_sndcnt = 0; 18170 } 18171 sub_from_prr = 0; 18172 if (doing_tlp && (rsm == NULL)) { 18173 /* New send doing a TLP */ 18174 add_flag |= RACK_TLP; 18175 } 18176 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18177 rack_to_usec_ts(&tv), 18178 rsm, add_flag, s_mb, s_moff, hw_tls); 18179 18180 18181 if ((error == 0) && 18182 (len > 0) && 18183 (tp->snd_una == tp->snd_max)) 18184 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18185 { 18186 tcp_seq startseq = tp->snd_nxt; 18187 18188 /* Track our lost count */ 18189 if (rsm && (doing_tlp == 0)) 18190 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18191 /* 18192 * Advance snd_nxt over sequence space of this segment. 18193 */ 18194 if (error) 18195 /* We don't log or do anything with errors */ 18196 goto nomore; 18197 if (doing_tlp == 0) { 18198 if (rsm == NULL) { 18199 /* 18200 * Not a retransmission of some 18201 * sort, new data is going out so 18202 * clear our TLP count and flag. 18203 */ 18204 rack->rc_tlp_in_progress = 0; 18205 rack->r_ctl.rc_tlp_cnt_out = 0; 18206 } 18207 } else { 18208 /* 18209 * We have just sent a TLP, mark that it is true 18210 * and make sure our in progress is set so we 18211 * continue to check the count. 18212 */ 18213 rack->rc_tlp_in_progress = 1; 18214 rack->r_ctl.rc_tlp_cnt_out++; 18215 } 18216 if (flags & (TH_SYN | TH_FIN)) { 18217 if (flags & TH_SYN) 18218 tp->snd_nxt++; 18219 if (flags & TH_FIN) { 18220 tp->snd_nxt++; 18221 tp->t_flags |= TF_SENTFIN; 18222 } 18223 } 18224 /* In the ENOBUFS case we do *not* update snd_max */ 18225 if (sack_rxmit) 18226 goto nomore; 18227 18228 tp->snd_nxt += len; 18229 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18230 if (tp->snd_una == tp->snd_max) { 18231 /* 18232 * Update the time we just added data since 18233 * none was outstanding. 18234 */ 18235 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18236 tp->t_acktime = ticks; 18237 } 18238 tp->snd_max = tp->snd_nxt; 18239 /* 18240 * Time this transmission if not a retransmission and 18241 * not currently timing anything. 18242 * This is only relevant in case of switching back to 18243 * the base stack. 18244 */ 18245 if (tp->t_rtttime == 0) { 18246 tp->t_rtttime = ticks; 18247 tp->t_rtseq = startseq; 18248 KMOD_TCPSTAT_INC(tcps_segstimed); 18249 } 18250 if (len && 18251 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18252 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18253 } 18254 /* 18255 * If we are doing FO we need to update the mbuf position and subtract 18256 * this happens when the peer sends us duplicate information and 18257 * we thus want to send a DSACK. 18258 * 18259 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18260 * turned off? If not then we are going to echo multiple DSACK blocks 18261 * out (with the TSO), which we should not be doing. 18262 */ 18263 if (rack->r_fast_output && len) { 18264 if (rack->r_ctl.fsb.left_to_send > len) 18265 rack->r_ctl.fsb.left_to_send -= len; 18266 else 18267 rack->r_ctl.fsb.left_to_send = 0; 18268 if (rack->r_ctl.fsb.left_to_send < segsiz) 18269 rack->r_fast_output = 0; 18270 if (rack->r_fast_output) { 18271 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18272 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18273 } 18274 } 18275 } 18276 nomore: 18277 if (error) { 18278 rack->r_ctl.rc_agg_delayed = 0; 18279 rack->r_early = 0; 18280 rack->r_late = 0; 18281 rack->r_ctl.rc_agg_early = 0; 18282 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18283 /* 18284 * Failures do not advance the seq counter above. For the 18285 * case of ENOBUFS we will fall out and retry in 1ms with 18286 * the hpts. Everything else will just have to retransmit 18287 * with the timer. 18288 * 18289 * In any case, we do not want to loop around for another 18290 * send without a good reason. 18291 */ 18292 sendalot = 0; 18293 switch (error) { 18294 case EPERM: 18295 tp->t_softerror = error; 18296 #ifdef TCP_ACCOUNTING 18297 crtsc = get_cyclecount(); 18298 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18299 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18300 } 18301 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18302 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18303 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18304 } 18305 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18306 sched_unpin(); 18307 #endif 18308 return (error); 18309 case ENOBUFS: 18310 /* 18311 * Pace us right away to retry in a some 18312 * time 18313 */ 18314 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18315 if (rack->rc_enobuf < 0x7f) 18316 rack->rc_enobuf++; 18317 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18318 slot = 10 * HPTS_USEC_IN_MSEC; 18319 if (rack->r_ctl.crte != NULL) { 18320 counter_u64_add(rack_saw_enobuf_hw, 1); 18321 tcp_rl_log_enobuf(rack->r_ctl.crte); 18322 } 18323 counter_u64_add(rack_saw_enobuf, 1); 18324 goto enobufs; 18325 case EMSGSIZE: 18326 /* 18327 * For some reason the interface we used initially 18328 * to send segments changed to another or lowered 18329 * its MTU. If TSO was active we either got an 18330 * interface without TSO capabilits or TSO was 18331 * turned off. If we obtained mtu from ip_output() 18332 * then update it and try again. 18333 */ 18334 if (tso) 18335 tp->t_flags &= ~TF_TSO; 18336 if (mtu != 0) { 18337 tcp_mss_update(tp, -1, mtu, NULL, NULL); 18338 goto again; 18339 } 18340 slot = 10 * HPTS_USEC_IN_MSEC; 18341 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18342 #ifdef TCP_ACCOUNTING 18343 crtsc = get_cyclecount(); 18344 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18345 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18346 } 18347 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18348 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18349 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18350 } 18351 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18352 sched_unpin(); 18353 #endif 18354 return (error); 18355 case ENETUNREACH: 18356 counter_u64_add(rack_saw_enetunreach, 1); 18357 case EHOSTDOWN: 18358 case EHOSTUNREACH: 18359 case ENETDOWN: 18360 if (TCPS_HAVERCVDSYN(tp->t_state)) { 18361 tp->t_softerror = error; 18362 } 18363 /* FALLTHROUGH */ 18364 default: 18365 slot = 10 * HPTS_USEC_IN_MSEC; 18366 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 18367 #ifdef TCP_ACCOUNTING 18368 crtsc = get_cyclecount(); 18369 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18370 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18371 } 18372 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18373 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18374 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18375 } 18376 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18377 sched_unpin(); 18378 #endif 18379 return (error); 18380 } 18381 } else { 18382 rack->rc_enobuf = 0; 18383 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 18384 rack->r_ctl.retran_during_recovery += len; 18385 } 18386 KMOD_TCPSTAT_INC(tcps_sndtotal); 18387 18388 /* 18389 * Data sent (as far as we can tell). If this advertises a larger 18390 * window than any other segment, then remember the size of the 18391 * advertised window. Any pending ACK has now been sent. 18392 */ 18393 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 18394 tp->rcv_adv = tp->rcv_nxt + recwin; 18395 18396 tp->last_ack_sent = tp->rcv_nxt; 18397 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 18398 enobufs: 18399 if (sendalot) { 18400 /* Do we need to turn off sendalot? */ 18401 if (rack->r_ctl.rc_pace_max_segs && 18402 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 18403 /* We hit our max. */ 18404 sendalot = 0; 18405 } else if ((rack->rc_user_set_max_segs) && 18406 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 18407 /* We hit the user defined max */ 18408 sendalot = 0; 18409 } 18410 } 18411 if ((error == 0) && (flags & TH_FIN)) 18412 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 18413 if (flags & TH_RST) { 18414 /* 18415 * We don't send again after sending a RST. 18416 */ 18417 slot = 0; 18418 sendalot = 0; 18419 if (error == 0) 18420 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 18421 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 18422 /* 18423 * Get our pacing rate, if an error 18424 * occurred in sending (ENOBUF) we would 18425 * hit the else if with slot preset. Other 18426 * errors return. 18427 */ 18428 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 18429 } 18430 if (rsm && 18431 (rsm->r_flags & RACK_HAS_SYN) == 0 && 18432 rack->use_rack_rr) { 18433 /* Its a retransmit and we use the rack cheat? */ 18434 if ((slot == 0) || 18435 (rack->rc_always_pace == 0) || 18436 (rack->r_rr_config == 1)) { 18437 /* 18438 * We have no pacing set or we 18439 * are using old-style rack or 18440 * we are overriden to use the old 1ms pacing. 18441 */ 18442 slot = rack->r_ctl.rc_min_to; 18443 } 18444 } 18445 /* We have sent clear the flag */ 18446 rack->r_ent_rec_ns = 0; 18447 if (rack->r_must_retran) { 18448 if (rsm) { 18449 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 18450 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 18451 /* 18452 * We have retransmitted all. 18453 */ 18454 rack->r_must_retran = 0; 18455 rack->r_ctl.rc_out_at_rto = 0; 18456 } 18457 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 18458 /* 18459 * Sending new data will also kill 18460 * the loop. 18461 */ 18462 rack->r_must_retran = 0; 18463 rack->r_ctl.rc_out_at_rto = 0; 18464 } 18465 } 18466 rack->r_ctl.fsb.recwin = recwin; 18467 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 18468 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 18469 /* 18470 * We hit an RTO and now have past snd_max at the RTO 18471 * clear all the WAS flags. 18472 */ 18473 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 18474 } 18475 if (slot) { 18476 /* set the rack tcb into the slot N */ 18477 counter_u64_add(rack_paced_segments, 1); 18478 if ((error == 0) && 18479 rack_use_rfo && 18480 ((flags & (TH_SYN|TH_FIN)) == 0) && 18481 (rsm == NULL) && 18482 (tp->snd_nxt == tp->snd_max) && 18483 (ipoptlen == 0) && 18484 (tp->rcv_numsacks == 0) && 18485 rack->r_fsb_inited && 18486 TCPS_HAVEESTABLISHED(tp->t_state) && 18487 (rack->r_must_retran == 0) && 18488 ((tp->t_flags & TF_NEEDFIN) == 0) && 18489 (len > 0) && (orig_len > 0) && 18490 (orig_len > len) && 18491 ((orig_len - len) >= segsiz) && 18492 ((optlen == 0) || 18493 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 18494 /* We can send at least one more MSS using our fsb */ 18495 18496 rack->r_fast_output = 1; 18497 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18498 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18499 rack->r_ctl.fsb.tcp_flags = flags; 18500 rack->r_ctl.fsb.left_to_send = orig_len - len; 18501 if (hw_tls) 18502 rack->r_ctl.fsb.hw_tls = 1; 18503 else 18504 rack->r_ctl.fsb.hw_tls = 0; 18505 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 18506 ("rack:%p left_to_send:%u sbavail:%u out:%u", 18507 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 18508 (tp->snd_max - tp->snd_una))); 18509 if (rack->r_ctl.fsb.left_to_send < segsiz) 18510 rack->r_fast_output = 0; 18511 else { 18512 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 18513 rack->r_ctl.fsb.rfo_apply_push = 1; 18514 else 18515 rack->r_ctl.fsb.rfo_apply_push = 0; 18516 } 18517 } else 18518 rack->r_fast_output = 0; 18519 rack_log_fsb(rack, tp, so, flags, 18520 ipoptlen, orig_len, len, error, 18521 (rsm == NULL), optlen, __LINE__, 2); 18522 } else if (sendalot) { 18523 int ret; 18524 18525 if (len) 18526 counter_u64_add(rack_unpaced_segments, 1); 18527 sack_rxmit = 0; 18528 if ((error == 0) && 18529 rack_use_rfo && 18530 ((flags & (TH_SYN|TH_FIN)) == 0) && 18531 (rsm == NULL) && 18532 (ipoptlen == 0) && 18533 (tp->rcv_numsacks == 0) && 18534 (tp->snd_nxt == tp->snd_max) && 18535 (rack->r_must_retran == 0) && 18536 rack->r_fsb_inited && 18537 TCPS_HAVEESTABLISHED(tp->t_state) && 18538 ((tp->t_flags & TF_NEEDFIN) == 0) && 18539 (len > 0) && (orig_len > 0) && 18540 (orig_len > len) && 18541 ((orig_len - len) >= segsiz) && 18542 ((optlen == 0) || 18543 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 18544 /* we can use fast_output for more */ 18545 18546 rack->r_fast_output = 1; 18547 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18548 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18549 rack->r_ctl.fsb.tcp_flags = flags; 18550 rack->r_ctl.fsb.left_to_send = orig_len - len; 18551 if (hw_tls) 18552 rack->r_ctl.fsb.hw_tls = 1; 18553 else 18554 rack->r_ctl.fsb.hw_tls = 0; 18555 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 18556 ("rack:%p left_to_send:%u sbavail:%u out:%u", 18557 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 18558 (tp->snd_max - tp->snd_una))); 18559 if (rack->r_ctl.fsb.left_to_send < segsiz) { 18560 rack->r_fast_output = 0; 18561 } 18562 if (rack->r_fast_output) { 18563 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 18564 rack->r_ctl.fsb.rfo_apply_push = 1; 18565 else 18566 rack->r_ctl.fsb.rfo_apply_push = 0; 18567 rack_log_fsb(rack, tp, so, flags, 18568 ipoptlen, orig_len, len, error, 18569 (rsm == NULL), optlen, __LINE__, 3); 18570 error = 0; 18571 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 18572 if (ret >= 0) 18573 return (ret); 18574 else if (error) 18575 goto nomore; 18576 18577 } 18578 } 18579 goto again; 18580 } else if (len) { 18581 counter_u64_add(rack_unpaced_segments, 1); 18582 } 18583 /* Assure when we leave that snd_nxt will point to top */ 18584 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 18585 tp->snd_nxt = tp->snd_max; 18586 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 18587 #ifdef TCP_ACCOUNTING 18588 crtsc = get_cyclecount() - ts_val; 18589 if (tot_len_this_send) { 18590 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18591 tp->tcp_cnt_counters[SND_OUT_DATA]++; 18592 } 18593 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 18594 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18595 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 18596 } 18597 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 18598 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18599 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 18600 } 18601 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 18602 } else { 18603 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18604 tp->tcp_cnt_counters[SND_OUT_ACK]++; 18605 } 18606 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 18607 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18608 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 18609 } 18610 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 18611 } 18612 sched_unpin(); 18613 #endif 18614 if (error == ENOBUFS) 18615 error = 0; 18616 return (error); 18617 } 18618 18619 static void 18620 rack_update_seg(struct tcp_rack *rack) 18621 { 18622 uint32_t orig_val; 18623 18624 orig_val = rack->r_ctl.rc_pace_max_segs; 18625 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 18626 if (orig_val != rack->r_ctl.rc_pace_max_segs) 18627 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL); 18628 } 18629 18630 static void 18631 rack_mtu_change(struct tcpcb *tp) 18632 { 18633 /* 18634 * The MSS may have changed 18635 */ 18636 struct tcp_rack *rack; 18637 18638 rack = (struct tcp_rack *)tp->t_fb_ptr; 18639 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 18640 /* 18641 * The MTU has changed we need to resend everything 18642 * since all we have sent is lost. We first fix 18643 * up the mtu though. 18644 */ 18645 rack_set_pace_segments(tp, rack, __LINE__, NULL); 18646 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 18647 rack_remxt_tmr(tp); 18648 rack->r_fast_output = 0; 18649 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 18650 rack->r_ctl.rc_sacked); 18651 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 18652 rack->r_must_retran = 1; 18653 18654 } 18655 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 18656 /* We don't use snd_nxt to retransmit */ 18657 tp->snd_nxt = tp->snd_max; 18658 } 18659 18660 static int 18661 rack_set_profile(struct tcp_rack *rack, int prof) 18662 { 18663 int err = EINVAL; 18664 if (prof == 1) { 18665 /* pace_always=1 */ 18666 if (rack->rc_always_pace == 0) { 18667 if (tcp_can_enable_pacing() == 0) 18668 return (EBUSY); 18669 } 18670 rack->rc_always_pace = 1; 18671 if (rack->use_fixed_rate || rack->gp_ready) 18672 rack_set_cc_pacing(rack); 18673 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 18674 rack->rack_attempt_hdwr_pace = 0; 18675 /* cmpack=1 */ 18676 if (rack_use_cmp_acks) 18677 rack->r_use_cmp_ack = 1; 18678 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 18679 rack->r_use_cmp_ack) 18680 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 18681 /* scwnd=1 */ 18682 rack->rack_enable_scwnd = 1; 18683 /* dynamic=100 */ 18684 rack->rc_gp_dyn_mul = 1; 18685 /* gp_inc_ca */ 18686 rack->r_ctl.rack_per_of_gp_ca = 100; 18687 /* rrr_conf=3 */ 18688 rack->r_rr_config = 3; 18689 /* npush=2 */ 18690 rack->r_ctl.rc_no_push_at_mrtt = 2; 18691 /* fillcw=1 */ 18692 rack->rc_pace_to_cwnd = 1; 18693 rack->rc_pace_fill_if_rttin_range = 0; 18694 rack->rtt_limit_mul = 0; 18695 /* noprr=1 */ 18696 rack->rack_no_prr = 1; 18697 /* lscwnd=1 */ 18698 rack->r_limit_scw = 1; 18699 /* gp_inc_rec */ 18700 rack->r_ctl.rack_per_of_gp_rec = 90; 18701 err = 0; 18702 18703 } else if (prof == 3) { 18704 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 18705 /* pace_always=1 */ 18706 if (rack->rc_always_pace == 0) { 18707 if (tcp_can_enable_pacing() == 0) 18708 return (EBUSY); 18709 } 18710 rack->rc_always_pace = 1; 18711 if (rack->use_fixed_rate || rack->gp_ready) 18712 rack_set_cc_pacing(rack); 18713 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 18714 rack->rack_attempt_hdwr_pace = 0; 18715 /* cmpack=1 */ 18716 if (rack_use_cmp_acks) 18717 rack->r_use_cmp_ack = 1; 18718 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 18719 rack->r_use_cmp_ack) 18720 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 18721 /* scwnd=1 */ 18722 rack->rack_enable_scwnd = 1; 18723 /* dynamic=100 */ 18724 rack->rc_gp_dyn_mul = 1; 18725 /* gp_inc_ca */ 18726 rack->r_ctl.rack_per_of_gp_ca = 100; 18727 /* rrr_conf=3 */ 18728 rack->r_rr_config = 3; 18729 /* npush=2 */ 18730 rack->r_ctl.rc_no_push_at_mrtt = 2; 18731 /* fillcw=2 */ 18732 rack->rc_pace_to_cwnd = 1; 18733 rack->r_fill_less_agg = 1; 18734 rack->rc_pace_fill_if_rttin_range = 0; 18735 rack->rtt_limit_mul = 0; 18736 /* noprr=1 */ 18737 rack->rack_no_prr = 1; 18738 /* lscwnd=1 */ 18739 rack->r_limit_scw = 1; 18740 /* gp_inc_rec */ 18741 rack->r_ctl.rack_per_of_gp_rec = 90; 18742 err = 0; 18743 18744 18745 } else if (prof == 2) { 18746 /* cmpack=1 */ 18747 if (rack->rc_always_pace == 0) { 18748 if (tcp_can_enable_pacing() == 0) 18749 return (EBUSY); 18750 } 18751 rack->rc_always_pace = 1; 18752 if (rack->use_fixed_rate || rack->gp_ready) 18753 rack_set_cc_pacing(rack); 18754 rack->r_use_cmp_ack = 1; 18755 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 18756 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 18757 /* pace_always=1 */ 18758 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 18759 /* scwnd=1 */ 18760 rack->rack_enable_scwnd = 1; 18761 /* dynamic=100 */ 18762 rack->rc_gp_dyn_mul = 1; 18763 rack->r_ctl.rack_per_of_gp_ca = 100; 18764 /* rrr_conf=3 */ 18765 rack->r_rr_config = 3; 18766 /* npush=2 */ 18767 rack->r_ctl.rc_no_push_at_mrtt = 2; 18768 /* fillcw=1 */ 18769 rack->rc_pace_to_cwnd = 1; 18770 rack->rc_pace_fill_if_rttin_range = 0; 18771 rack->rtt_limit_mul = 0; 18772 /* noprr=1 */ 18773 rack->rack_no_prr = 1; 18774 /* lscwnd=0 */ 18775 rack->r_limit_scw = 0; 18776 err = 0; 18777 } else if (prof == 0) { 18778 /* This changes things back to the default settings */ 18779 err = 0; 18780 if (rack->rc_always_pace) { 18781 tcp_decrement_paced_conn(); 18782 rack_undo_cc_pacing(rack); 18783 rack->rc_always_pace = 0; 18784 } 18785 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 18786 rack->rc_always_pace = 1; 18787 if (rack->use_fixed_rate || rack->gp_ready) 18788 rack_set_cc_pacing(rack); 18789 } else 18790 rack->rc_always_pace = 0; 18791 if (rack_use_cmp_acks) 18792 rack->r_use_cmp_ack = 1; 18793 else 18794 rack->r_use_cmp_ack = 0; 18795 if (rack_disable_prr) 18796 rack->rack_no_prr = 1; 18797 else 18798 rack->rack_no_prr = 0; 18799 if (rack_gp_no_rec_chg) 18800 rack->rc_gp_no_rec_chg = 1; 18801 else 18802 rack->rc_gp_no_rec_chg = 0; 18803 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 18804 rack->r_mbuf_queue = 1; 18805 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 18806 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 18807 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 18808 } else { 18809 rack->r_mbuf_queue = 0; 18810 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 18811 } 18812 if (rack_enable_shared_cwnd) 18813 rack->rack_enable_scwnd = 1; 18814 else 18815 rack->rack_enable_scwnd = 0; 18816 if (rack_do_dyn_mul) { 18817 /* When dynamic adjustment is on CA needs to start at 100% */ 18818 rack->rc_gp_dyn_mul = 1; 18819 if (rack_do_dyn_mul >= 100) 18820 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 18821 } else { 18822 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 18823 rack->rc_gp_dyn_mul = 0; 18824 } 18825 rack->r_rr_config = 0; 18826 rack->r_ctl.rc_no_push_at_mrtt = 0; 18827 rack->rc_pace_to_cwnd = 0; 18828 rack->rc_pace_fill_if_rttin_range = 0; 18829 rack->rtt_limit_mul = 0; 18830 18831 if (rack_enable_hw_pacing) 18832 rack->rack_hdw_pace_ena = 1; 18833 else 18834 rack->rack_hdw_pace_ena = 0; 18835 if (rack_disable_prr) 18836 rack->rack_no_prr = 1; 18837 else 18838 rack->rack_no_prr = 0; 18839 if (rack_limits_scwnd) 18840 rack->r_limit_scw = 1; 18841 else 18842 rack->r_limit_scw = 0; 18843 err = 0; 18844 } 18845 return (err); 18846 } 18847 18848 static int 18849 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 18850 { 18851 struct deferred_opt_list *dol; 18852 18853 dol = malloc(sizeof(struct deferred_opt_list), 18854 M_TCPFSB, M_NOWAIT|M_ZERO); 18855 if (dol == NULL) { 18856 /* 18857 * No space yikes -- fail out.. 18858 */ 18859 return (0); 18860 } 18861 dol->optname = sopt_name; 18862 dol->optval = loptval; 18863 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 18864 return (1); 18865 } 18866 18867 static int 18868 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 18869 uint32_t optval, uint64_t loptval) 18870 { 18871 struct epoch_tracker et; 18872 struct sockopt sopt; 18873 struct cc_newreno_opts opt; 18874 uint64_t val; 18875 int error = 0; 18876 uint16_t ca, ss; 18877 18878 switch (sopt_name) { 18879 18880 case TCP_RACK_PACING_BETA: 18881 RACK_OPTS_INC(tcp_rack_beta); 18882 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 18883 /* This only works for newreno. */ 18884 error = EINVAL; 18885 break; 18886 } 18887 if (rack->rc_pacing_cc_set) { 18888 /* 18889 * Set them into the real CC module 18890 * whats in the rack pcb is the old values 18891 * to be used on restoral/ 18892 */ 18893 sopt.sopt_dir = SOPT_SET; 18894 opt.name = CC_NEWRENO_BETA; 18895 opt.val = optval; 18896 if (CC_ALGO(tp)->ctl_output != NULL) 18897 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 18898 else { 18899 error = ENOENT; 18900 break; 18901 } 18902 } else { 18903 /* 18904 * Not pacing yet so set it into our local 18905 * rack pcb storage. 18906 */ 18907 rack->r_ctl.rc_saved_beta.beta = optval; 18908 } 18909 break; 18910 case TCP_RACK_TIMER_SLOP: 18911 RACK_OPTS_INC(tcp_rack_timer_slop); 18912 rack->r_ctl.timer_slop = optval; 18913 if (rack->rc_tp->t_srtt) { 18914 /* 18915 * If we have an SRTT lets update t_rxtcur 18916 * to have the new slop. 18917 */ 18918 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 18919 rack_rto_min, rack_rto_max, 18920 rack->r_ctl.timer_slop); 18921 } 18922 break; 18923 case TCP_RACK_PACING_BETA_ECN: 18924 RACK_OPTS_INC(tcp_rack_beta_ecn); 18925 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 18926 /* This only works for newreno. */ 18927 error = EINVAL; 18928 break; 18929 } 18930 if (rack->rc_pacing_cc_set) { 18931 /* 18932 * Set them into the real CC module 18933 * whats in the rack pcb is the old values 18934 * to be used on restoral/ 18935 */ 18936 sopt.sopt_dir = SOPT_SET; 18937 opt.name = CC_NEWRENO_BETA_ECN; 18938 opt.val = optval; 18939 if (CC_ALGO(tp)->ctl_output != NULL) 18940 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 18941 else 18942 error = ENOENT; 18943 } else { 18944 /* 18945 * Not pacing yet so set it into our local 18946 * rack pcb storage. 18947 */ 18948 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 18949 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN; 18950 } 18951 break; 18952 case TCP_DEFER_OPTIONS: 18953 RACK_OPTS_INC(tcp_defer_opt); 18954 if (optval) { 18955 if (rack->gp_ready) { 18956 /* Too late */ 18957 error = EINVAL; 18958 break; 18959 } 18960 rack->defer_options = 1; 18961 } else 18962 rack->defer_options = 0; 18963 break; 18964 case TCP_RACK_MEASURE_CNT: 18965 RACK_OPTS_INC(tcp_rack_measure_cnt); 18966 if (optval && (optval <= 0xff)) { 18967 rack->r_ctl.req_measurements = optval; 18968 } else 18969 error = EINVAL; 18970 break; 18971 case TCP_REC_ABC_VAL: 18972 RACK_OPTS_INC(tcp_rec_abc_val); 18973 if (optval > 0) 18974 rack->r_use_labc_for_rec = 1; 18975 else 18976 rack->r_use_labc_for_rec = 0; 18977 break; 18978 case TCP_RACK_ABC_VAL: 18979 RACK_OPTS_INC(tcp_rack_abc_val); 18980 if ((optval > 0) && (optval < 255)) 18981 rack->rc_labc = optval; 18982 else 18983 error = EINVAL; 18984 break; 18985 case TCP_HDWR_UP_ONLY: 18986 RACK_OPTS_INC(tcp_pacing_up_only); 18987 if (optval) 18988 rack->r_up_only = 1; 18989 else 18990 rack->r_up_only = 0; 18991 break; 18992 case TCP_PACING_RATE_CAP: 18993 RACK_OPTS_INC(tcp_pacing_rate_cap); 18994 rack->r_ctl.bw_rate_cap = loptval; 18995 break; 18996 case TCP_RACK_PROFILE: 18997 RACK_OPTS_INC(tcp_profile); 18998 error = rack_set_profile(rack, optval); 18999 break; 19000 case TCP_USE_CMP_ACKS: 19001 RACK_OPTS_INC(tcp_use_cmp_acks); 19002 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19003 /* You can't turn it off once its on! */ 19004 error = EINVAL; 19005 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19006 rack->r_use_cmp_ack = 1; 19007 rack->r_mbuf_queue = 1; 19008 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19009 } 19010 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19011 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19012 break; 19013 case TCP_SHARED_CWND_TIME_LIMIT: 19014 RACK_OPTS_INC(tcp_lscwnd); 19015 if (optval) 19016 rack->r_limit_scw = 1; 19017 else 19018 rack->r_limit_scw = 0; 19019 break; 19020 case TCP_RACK_PACE_TO_FILL: 19021 RACK_OPTS_INC(tcp_fillcw); 19022 if (optval == 0) 19023 rack->rc_pace_to_cwnd = 0; 19024 else { 19025 rack->rc_pace_to_cwnd = 1; 19026 if (optval > 1) 19027 rack->r_fill_less_agg = 1; 19028 } 19029 if ((optval >= rack_gp_rtt_maxmul) && 19030 rack_gp_rtt_maxmul && 19031 (optval < 0xf)) { 19032 rack->rc_pace_fill_if_rttin_range = 1; 19033 rack->rtt_limit_mul = optval; 19034 } else { 19035 rack->rc_pace_fill_if_rttin_range = 0; 19036 rack->rtt_limit_mul = 0; 19037 } 19038 break; 19039 case TCP_RACK_NO_PUSH_AT_MAX: 19040 RACK_OPTS_INC(tcp_npush); 19041 if (optval == 0) 19042 rack->r_ctl.rc_no_push_at_mrtt = 0; 19043 else if (optval < 0xff) 19044 rack->r_ctl.rc_no_push_at_mrtt = optval; 19045 else 19046 error = EINVAL; 19047 break; 19048 case TCP_SHARED_CWND_ENABLE: 19049 RACK_OPTS_INC(tcp_rack_scwnd); 19050 if (optval == 0) 19051 rack->rack_enable_scwnd = 0; 19052 else 19053 rack->rack_enable_scwnd = 1; 19054 break; 19055 case TCP_RACK_MBUF_QUEUE: 19056 /* Now do we use the LRO mbuf-queue feature */ 19057 RACK_OPTS_INC(tcp_rack_mbufq); 19058 if (optval || rack->r_use_cmp_ack) 19059 rack->r_mbuf_queue = 1; 19060 else 19061 rack->r_mbuf_queue = 0; 19062 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19063 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19064 else 19065 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19066 break; 19067 case TCP_RACK_NONRXT_CFG_RATE: 19068 RACK_OPTS_INC(tcp_rack_cfg_rate); 19069 if (optval == 0) 19070 rack->rack_rec_nonrxt_use_cr = 0; 19071 else 19072 rack->rack_rec_nonrxt_use_cr = 1; 19073 break; 19074 case TCP_NO_PRR: 19075 RACK_OPTS_INC(tcp_rack_noprr); 19076 if (optval == 0) 19077 rack->rack_no_prr = 0; 19078 else if (optval == 1) 19079 rack->rack_no_prr = 1; 19080 else if (optval == 2) 19081 rack->no_prr_addback = 1; 19082 else 19083 error = EINVAL; 19084 break; 19085 case TCP_TIMELY_DYN_ADJ: 19086 RACK_OPTS_INC(tcp_timely_dyn); 19087 if (optval == 0) 19088 rack->rc_gp_dyn_mul = 0; 19089 else { 19090 rack->rc_gp_dyn_mul = 1; 19091 if (optval >= 100) { 19092 /* 19093 * If the user sets something 100 or more 19094 * its the gp_ca value. 19095 */ 19096 rack->r_ctl.rack_per_of_gp_ca = optval; 19097 } 19098 } 19099 break; 19100 case TCP_RACK_DO_DETECTION: 19101 RACK_OPTS_INC(tcp_rack_do_detection); 19102 if (optval == 0) 19103 rack->do_detection = 0; 19104 else 19105 rack->do_detection = 1; 19106 break; 19107 case TCP_RACK_TLP_USE: 19108 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19109 error = EINVAL; 19110 break; 19111 } 19112 RACK_OPTS_INC(tcp_tlp_use); 19113 rack->rack_tlp_threshold_use = optval; 19114 break; 19115 case TCP_RACK_TLP_REDUCE: 19116 /* RACK TLP cwnd reduction (bool) */ 19117 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19118 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19119 break; 19120 /* Pacing related ones */ 19121 case TCP_RACK_PACE_ALWAYS: 19122 /* 19123 * zero is old rack method, 1 is new 19124 * method using a pacing rate. 19125 */ 19126 RACK_OPTS_INC(tcp_rack_pace_always); 19127 if (optval > 0) { 19128 if (rack->rc_always_pace) { 19129 error = EALREADY; 19130 break; 19131 } else if (tcp_can_enable_pacing()) { 19132 rack->rc_always_pace = 1; 19133 if (rack->use_fixed_rate || rack->gp_ready) 19134 rack_set_cc_pacing(rack); 19135 } 19136 else { 19137 error = ENOSPC; 19138 break; 19139 } 19140 } else { 19141 if (rack->rc_always_pace) { 19142 tcp_decrement_paced_conn(); 19143 rack->rc_always_pace = 0; 19144 rack_undo_cc_pacing(rack); 19145 } 19146 } 19147 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19148 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19149 else 19150 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19151 /* A rate may be set irate or other, if so set seg size */ 19152 rack_update_seg(rack); 19153 break; 19154 case TCP_BBR_RACK_INIT_RATE: 19155 RACK_OPTS_INC(tcp_initial_rate); 19156 val = optval; 19157 /* Change from kbits per second to bytes per second */ 19158 val *= 1000; 19159 val /= 8; 19160 rack->r_ctl.init_rate = val; 19161 if (rack->rc_init_win != rack_default_init_window) { 19162 uint32_t win, snt; 19163 19164 /* 19165 * Options don't always get applied 19166 * in the order you think. So in order 19167 * to assure we update a cwnd we need 19168 * to check and see if we are still 19169 * where we should raise the cwnd. 19170 */ 19171 win = rc_init_window(rack); 19172 if (SEQ_GT(tp->snd_max, tp->iss)) 19173 snt = tp->snd_max - tp->iss; 19174 else 19175 snt = 0; 19176 if ((snt < win) && 19177 (tp->snd_cwnd < win)) 19178 tp->snd_cwnd = win; 19179 } 19180 if (rack->rc_always_pace) 19181 rack_update_seg(rack); 19182 break; 19183 case TCP_BBR_IWINTSO: 19184 RACK_OPTS_INC(tcp_initial_win); 19185 if (optval && (optval <= 0xff)) { 19186 uint32_t win, snt; 19187 19188 rack->rc_init_win = optval; 19189 win = rc_init_window(rack); 19190 if (SEQ_GT(tp->snd_max, tp->iss)) 19191 snt = tp->snd_max - tp->iss; 19192 else 19193 snt = 0; 19194 if ((snt < win) && 19195 (tp->t_srtt | 19196 #ifdef NETFLIX_PEAKRATE 19197 tp->t_maxpeakrate | 19198 #endif 19199 rack->r_ctl.init_rate)) { 19200 /* 19201 * We are not past the initial window 19202 * and we have some bases for pacing, 19203 * so we need to possibly adjust up 19204 * the cwnd. Note even if we don't set 19205 * the cwnd, its still ok to raise the rc_init_win 19206 * which can be used coming out of idle when we 19207 * would have a rate. 19208 */ 19209 if (tp->snd_cwnd < win) 19210 tp->snd_cwnd = win; 19211 } 19212 if (rack->rc_always_pace) 19213 rack_update_seg(rack); 19214 } else 19215 error = EINVAL; 19216 break; 19217 case TCP_RACK_FORCE_MSEG: 19218 RACK_OPTS_INC(tcp_rack_force_max_seg); 19219 if (optval) 19220 rack->rc_force_max_seg = 1; 19221 else 19222 rack->rc_force_max_seg = 0; 19223 break; 19224 case TCP_RACK_PACE_MAX_SEG: 19225 /* Max segments size in a pace in bytes */ 19226 RACK_OPTS_INC(tcp_rack_max_seg); 19227 rack->rc_user_set_max_segs = optval; 19228 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19229 break; 19230 case TCP_RACK_PACE_RATE_REC: 19231 /* Set the fixed pacing rate in Bytes per second ca */ 19232 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19233 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19234 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19235 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19236 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19237 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19238 rack->use_fixed_rate = 1; 19239 if (rack->rc_always_pace) 19240 rack_set_cc_pacing(rack); 19241 rack_log_pacing_delay_calc(rack, 19242 rack->r_ctl.rc_fixed_pacing_rate_ss, 19243 rack->r_ctl.rc_fixed_pacing_rate_ca, 19244 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19245 __LINE__, NULL); 19246 break; 19247 19248 case TCP_RACK_PACE_RATE_SS: 19249 /* Set the fixed pacing rate in Bytes per second ca */ 19250 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19251 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19252 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19253 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19254 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19255 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19256 rack->use_fixed_rate = 1; 19257 if (rack->rc_always_pace) 19258 rack_set_cc_pacing(rack); 19259 rack_log_pacing_delay_calc(rack, 19260 rack->r_ctl.rc_fixed_pacing_rate_ss, 19261 rack->r_ctl.rc_fixed_pacing_rate_ca, 19262 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19263 __LINE__, NULL); 19264 break; 19265 19266 case TCP_RACK_PACE_RATE_CA: 19267 /* Set the fixed pacing rate in Bytes per second ca */ 19268 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19269 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19270 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19271 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19272 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19273 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19274 rack->use_fixed_rate = 1; 19275 if (rack->rc_always_pace) 19276 rack_set_cc_pacing(rack); 19277 rack_log_pacing_delay_calc(rack, 19278 rack->r_ctl.rc_fixed_pacing_rate_ss, 19279 rack->r_ctl.rc_fixed_pacing_rate_ca, 19280 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19281 __LINE__, NULL); 19282 break; 19283 case TCP_RACK_GP_INCREASE_REC: 19284 RACK_OPTS_INC(tcp_gp_inc_rec); 19285 rack->r_ctl.rack_per_of_gp_rec = optval; 19286 rack_log_pacing_delay_calc(rack, 19287 rack->r_ctl.rack_per_of_gp_ss, 19288 rack->r_ctl.rack_per_of_gp_ca, 19289 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19290 __LINE__, NULL); 19291 break; 19292 case TCP_RACK_GP_INCREASE_CA: 19293 RACK_OPTS_INC(tcp_gp_inc_ca); 19294 ca = optval; 19295 if (ca < 100) { 19296 /* 19297 * We don't allow any reduction 19298 * over the GP b/w. 19299 */ 19300 error = EINVAL; 19301 break; 19302 } 19303 rack->r_ctl.rack_per_of_gp_ca = ca; 19304 rack_log_pacing_delay_calc(rack, 19305 rack->r_ctl.rack_per_of_gp_ss, 19306 rack->r_ctl.rack_per_of_gp_ca, 19307 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19308 __LINE__, NULL); 19309 break; 19310 case TCP_RACK_GP_INCREASE_SS: 19311 RACK_OPTS_INC(tcp_gp_inc_ss); 19312 ss = optval; 19313 if (ss < 100) { 19314 /* 19315 * We don't allow any reduction 19316 * over the GP b/w. 19317 */ 19318 error = EINVAL; 19319 break; 19320 } 19321 rack->r_ctl.rack_per_of_gp_ss = ss; 19322 rack_log_pacing_delay_calc(rack, 19323 rack->r_ctl.rack_per_of_gp_ss, 19324 rack->r_ctl.rack_per_of_gp_ca, 19325 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19326 __LINE__, NULL); 19327 break; 19328 case TCP_RACK_RR_CONF: 19329 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 19330 if (optval && optval <= 3) 19331 rack->r_rr_config = optval; 19332 else 19333 rack->r_rr_config = 0; 19334 break; 19335 case TCP_HDWR_RATE_CAP: 19336 RACK_OPTS_INC(tcp_hdwr_rate_cap); 19337 if (optval) { 19338 if (rack->r_rack_hw_rate_caps == 0) 19339 rack->r_rack_hw_rate_caps = 1; 19340 else 19341 error = EALREADY; 19342 } else { 19343 rack->r_rack_hw_rate_caps = 0; 19344 } 19345 break; 19346 case TCP_BBR_HDWR_PACE: 19347 RACK_OPTS_INC(tcp_hdwr_pacing); 19348 if (optval){ 19349 if (rack->rack_hdrw_pacing == 0) { 19350 rack->rack_hdw_pace_ena = 1; 19351 rack->rack_attempt_hdwr_pace = 0; 19352 } else 19353 error = EALREADY; 19354 } else { 19355 rack->rack_hdw_pace_ena = 0; 19356 #ifdef RATELIMIT 19357 if (rack->r_ctl.crte != NULL) { 19358 rack->rack_hdrw_pacing = 0; 19359 rack->rack_attempt_hdwr_pace = 0; 19360 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 19361 rack->r_ctl.crte = NULL; 19362 } 19363 #endif 19364 } 19365 break; 19366 /* End Pacing related ones */ 19367 case TCP_RACK_PRR_SENDALOT: 19368 /* Allow PRR to send more than one seg */ 19369 RACK_OPTS_INC(tcp_rack_prr_sendalot); 19370 rack->r_ctl.rc_prr_sendalot = optval; 19371 break; 19372 case TCP_RACK_MIN_TO: 19373 /* Minimum time between rack t-o's in ms */ 19374 RACK_OPTS_INC(tcp_rack_min_to); 19375 rack->r_ctl.rc_min_to = optval; 19376 break; 19377 case TCP_RACK_EARLY_SEG: 19378 /* If early recovery max segments */ 19379 RACK_OPTS_INC(tcp_rack_early_seg); 19380 rack->r_ctl.rc_early_recovery_segs = optval; 19381 break; 19382 case TCP_RACK_REORD_THRESH: 19383 /* RACK reorder threshold (shift amount) */ 19384 RACK_OPTS_INC(tcp_rack_reord_thresh); 19385 if ((optval > 0) && (optval < 31)) 19386 rack->r_ctl.rc_reorder_shift = optval; 19387 else 19388 error = EINVAL; 19389 break; 19390 case TCP_RACK_REORD_FADE: 19391 /* Does reordering fade after ms time */ 19392 RACK_OPTS_INC(tcp_rack_reord_fade); 19393 rack->r_ctl.rc_reorder_fade = optval; 19394 break; 19395 case TCP_RACK_TLP_THRESH: 19396 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 19397 RACK_OPTS_INC(tcp_rack_tlp_thresh); 19398 if (optval) 19399 rack->r_ctl.rc_tlp_threshold = optval; 19400 else 19401 error = EINVAL; 19402 break; 19403 case TCP_BBR_USE_RACK_RR: 19404 RACK_OPTS_INC(tcp_rack_rr); 19405 if (optval) 19406 rack->use_rack_rr = 1; 19407 else 19408 rack->use_rack_rr = 0; 19409 break; 19410 case TCP_FAST_RSM_HACK: 19411 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 19412 if (optval) 19413 rack->fast_rsm_hack = 1; 19414 else 19415 rack->fast_rsm_hack = 0; 19416 break; 19417 case TCP_RACK_PKT_DELAY: 19418 /* RACK added ms i.e. rack-rtt + reord + N */ 19419 RACK_OPTS_INC(tcp_rack_pkt_delay); 19420 rack->r_ctl.rc_pkt_delay = optval; 19421 break; 19422 case TCP_DELACK: 19423 RACK_OPTS_INC(tcp_rack_delayed_ack); 19424 if (optval == 0) 19425 tp->t_delayed_ack = 0; 19426 else 19427 tp->t_delayed_ack = 1; 19428 if (tp->t_flags & TF_DELACK) { 19429 tp->t_flags &= ~TF_DELACK; 19430 tp->t_flags |= TF_ACKNOW; 19431 NET_EPOCH_ENTER(et); 19432 rack_output(tp); 19433 NET_EPOCH_EXIT(et); 19434 } 19435 break; 19436 19437 case TCP_BBR_RACK_RTT_USE: 19438 RACK_OPTS_INC(tcp_rack_rtt_use); 19439 if ((optval != USE_RTT_HIGH) && 19440 (optval != USE_RTT_LOW) && 19441 (optval != USE_RTT_AVG)) 19442 error = EINVAL; 19443 else 19444 rack->r_ctl.rc_rate_sample_method = optval; 19445 break; 19446 case TCP_DATA_AFTER_CLOSE: 19447 RACK_OPTS_INC(tcp_data_after_close); 19448 if (optval) 19449 rack->rc_allow_data_af_clo = 1; 19450 else 19451 rack->rc_allow_data_af_clo = 0; 19452 break; 19453 default: 19454 break; 19455 } 19456 #ifdef NETFLIX_STATS 19457 tcp_log_socket_option(tp, sopt_name, optval, error); 19458 #endif 19459 return (error); 19460 } 19461 19462 19463 static void 19464 rack_apply_deferred_options(struct tcp_rack *rack) 19465 { 19466 struct deferred_opt_list *dol, *sdol; 19467 uint32_t s_optval; 19468 19469 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 19470 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 19471 /* Disadvantage of deferal is you loose the error return */ 19472 s_optval = (uint32_t)dol->optval; 19473 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 19474 free(dol, M_TCPDO); 19475 } 19476 } 19477 19478 static void 19479 rack_hw_tls_change(struct tcpcb *tp, int chg) 19480 { 19481 /* 19482 * HW tls state has changed.. fix all 19483 * rsm's in flight. 19484 */ 19485 struct tcp_rack *rack; 19486 struct rack_sendmap *rsm; 19487 19488 rack = (struct tcp_rack *)tp->t_fb_ptr; 19489 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 19490 if (chg) 19491 rsm->r_hw_tls = 1; 19492 else 19493 rsm->r_hw_tls = 0; 19494 } 19495 if (chg) 19496 rack->r_ctl.fsb.hw_tls = 1; 19497 else 19498 rack->r_ctl.fsb.hw_tls = 0; 19499 } 19500 19501 static int 19502 rack_pru_options(struct tcpcb *tp, int flags) 19503 { 19504 if (flags & PRUS_OOB) 19505 return (EOPNOTSUPP); 19506 return (0); 19507 } 19508 19509 static struct tcp_function_block __tcp_rack = { 19510 .tfb_tcp_block_name = __XSTRING(STACKNAME), 19511 .tfb_tcp_output = rack_output, 19512 .tfb_do_queued_segments = ctf_do_queued_segments, 19513 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 19514 .tfb_tcp_do_segment = rack_do_segment, 19515 .tfb_tcp_ctloutput = rack_ctloutput, 19516 .tfb_tcp_fb_init = rack_init, 19517 .tfb_tcp_fb_fini = rack_fini, 19518 .tfb_tcp_timer_stop_all = rack_stopall, 19519 .tfb_tcp_timer_activate = rack_timer_activate, 19520 .tfb_tcp_timer_active = rack_timer_active, 19521 .tfb_tcp_timer_stop = rack_timer_stop, 19522 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 19523 .tfb_tcp_handoff_ok = rack_handoff_ok, 19524 .tfb_tcp_mtu_chg = rack_mtu_change, 19525 .tfb_pru_options = rack_pru_options, 19526 .tfb_hwtls_change = rack_hw_tls_change, 19527 }; 19528 19529 /* 19530 * rack_ctloutput() must drop the inpcb lock before performing copyin on 19531 * socket option arguments. When it re-acquires the lock after the copy, it 19532 * has to revalidate that the connection is still valid for the socket 19533 * option. 19534 */ 19535 static int 19536 rack_set_sockopt(struct socket *so, struct sockopt *sopt, 19537 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 19538 { 19539 uint64_t loptval; 19540 int32_t error = 0, optval; 19541 19542 switch (sopt->sopt_name) { 19543 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 19544 /* Pacing related ones */ 19545 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 19546 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 19547 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 19548 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 19549 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 19550 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 19551 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 19552 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 19553 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 19554 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 19555 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 19556 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 19557 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 19558 case TCP_HDWR_RATE_CAP: /* URL: hdwrcap boolean */ 19559 case TCP_PACING_RATE_CAP: /* URL:cap-- used by side-channel */ 19560 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 19561 /* End pacing related */ 19562 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 19563 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 19564 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 19565 case TCP_RACK_MIN_TO: /* URL:min_to */ 19566 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 19567 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 19568 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 19569 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 19570 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 19571 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 19572 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 19573 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 19574 case TCP_RACK_DO_DETECTION: /* URL:detect */ 19575 case TCP_NO_PRR: /* URL:noprr */ 19576 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 19577 case TCP_DATA_AFTER_CLOSE: /* no URL */ 19578 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 19579 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 19580 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 19581 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 19582 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 19583 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 19584 case TCP_RACK_PROFILE: /* URL:profile */ 19585 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 19586 case TCP_RACK_ABC_VAL: /* URL:labc */ 19587 case TCP_REC_ABC_VAL: /* URL:reclabc */ 19588 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 19589 case TCP_DEFER_OPTIONS: /* URL:defer */ 19590 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 19591 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 19592 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 19593 break; 19594 default: 19595 /* Filter off all unknown options to the base stack */ 19596 return (tcp_default_ctloutput(so, sopt, inp, tp)); 19597 break; 19598 } 19599 INP_WUNLOCK(inp); 19600 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 19601 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 19602 /* 19603 * We truncate it down to 32 bits for the socket-option trace this 19604 * means rates > 34Gbps won't show right, but thats probably ok. 19605 */ 19606 optval = (uint32_t)loptval; 19607 } else { 19608 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 19609 /* Save it in 64 bit form too */ 19610 loptval = optval; 19611 } 19612 if (error) 19613 return (error); 19614 INP_WLOCK(inp); 19615 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 19616 INP_WUNLOCK(inp); 19617 return (ECONNRESET); 19618 } 19619 if (tp->t_fb != &__tcp_rack) { 19620 INP_WUNLOCK(inp); 19621 return (ENOPROTOOPT); 19622 } 19623 if (rack->defer_options && (rack->gp_ready == 0) && 19624 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 19625 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 19626 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 19627 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 19628 /* Options are beind deferred */ 19629 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 19630 INP_WUNLOCK(inp); 19631 return (0); 19632 } else { 19633 /* No memory to defer, fail */ 19634 INP_WUNLOCK(inp); 19635 return (ENOMEM); 19636 } 19637 } 19638 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 19639 INP_WUNLOCK(inp); 19640 return (error); 19641 } 19642 19643 static void 19644 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 19645 { 19646 19647 INP_WLOCK_ASSERT(tp->t_inpcb); 19648 bzero(ti, sizeof(*ti)); 19649 19650 ti->tcpi_state = tp->t_state; 19651 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 19652 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 19653 if (tp->t_flags & TF_SACK_PERMIT) 19654 ti->tcpi_options |= TCPI_OPT_SACK; 19655 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 19656 ti->tcpi_options |= TCPI_OPT_WSCALE; 19657 ti->tcpi_snd_wscale = tp->snd_scale; 19658 ti->tcpi_rcv_wscale = tp->rcv_scale; 19659 } 19660 if (tp->t_flags2 & TF2_ECN_PERMIT) 19661 ti->tcpi_options |= TCPI_OPT_ECN; 19662 if (tp->t_flags & TF_FASTOPEN) 19663 ti->tcpi_options |= TCPI_OPT_TFO; 19664 /* still kept in ticks is t_rcvtime */ 19665 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 19666 /* Since we hold everything in precise useconds this is easy */ 19667 ti->tcpi_rtt = tp->t_srtt; 19668 ti->tcpi_rttvar = tp->t_rttvar; 19669 ti->tcpi_rto = tp->t_rxtcur; 19670 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 19671 ti->tcpi_snd_cwnd = tp->snd_cwnd; 19672 /* 19673 * FreeBSD-specific extension fields for tcp_info. 19674 */ 19675 ti->tcpi_rcv_space = tp->rcv_wnd; 19676 ti->tcpi_rcv_nxt = tp->rcv_nxt; 19677 ti->tcpi_snd_wnd = tp->snd_wnd; 19678 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 19679 ti->tcpi_snd_nxt = tp->snd_nxt; 19680 ti->tcpi_snd_mss = tp->t_maxseg; 19681 ti->tcpi_rcv_mss = tp->t_maxseg; 19682 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 19683 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 19684 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 19685 #ifdef NETFLIX_STATS 19686 ti->tcpi_total_tlp = tp->t_sndtlppack; 19687 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 19688 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 19689 #endif 19690 #ifdef TCP_OFFLOAD 19691 if (tp->t_flags & TF_TOE) { 19692 ti->tcpi_options |= TCPI_OPT_TOE; 19693 tcp_offload_tcp_info(tp, ti); 19694 } 19695 #endif 19696 } 19697 19698 static int 19699 rack_get_sockopt(struct socket *so, struct sockopt *sopt, 19700 struct inpcb *inp, struct tcpcb *tp, struct tcp_rack *rack) 19701 { 19702 int32_t error, optval; 19703 uint64_t val, loptval; 19704 struct tcp_info ti; 19705 /* 19706 * Because all our options are either boolean or an int, we can just 19707 * pull everything into optval and then unlock and copy. If we ever 19708 * add a option that is not a int, then this will have quite an 19709 * impact to this routine. 19710 */ 19711 error = 0; 19712 switch (sopt->sopt_name) { 19713 case TCP_INFO: 19714 /* First get the info filled */ 19715 rack_fill_info(tp, &ti); 19716 /* Fix up the rtt related fields if needed */ 19717 INP_WUNLOCK(inp); 19718 error = sooptcopyout(sopt, &ti, sizeof ti); 19719 return (error); 19720 /* 19721 * Beta is the congestion control value for NewReno that influences how 19722 * much of a backoff happens when loss is detected. It is normally set 19723 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 19724 * when you exit recovery. 19725 */ 19726 case TCP_RACK_PACING_BETA: 19727 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 19728 error = EINVAL; 19729 else if (rack->rc_pacing_cc_set == 0) 19730 optval = rack->r_ctl.rc_saved_beta.beta; 19731 else { 19732 /* 19733 * Reach out into the CC data and report back what 19734 * I have previously set. Yeah it looks hackish but 19735 * we don't want to report the saved values. 19736 */ 19737 if (tp->ccv->cc_data) 19738 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 19739 else 19740 error = EINVAL; 19741 } 19742 break; 19743 /* 19744 * Beta_ecn is the congestion control value for NewReno that influences how 19745 * much of a backoff happens when a ECN mark is detected. It is normally set 19746 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 19747 * you exit recovery. Note that classic ECN has a beta of 50, it is only 19748 * ABE Ecn that uses this "less" value, but we do too with pacing :) 19749 */ 19750 19751 case TCP_RACK_PACING_BETA_ECN: 19752 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 19753 error = EINVAL; 19754 else if (rack->rc_pacing_cc_set == 0) 19755 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 19756 else { 19757 /* 19758 * Reach out into the CC data and report back what 19759 * I have previously set. Yeah it looks hackish but 19760 * we don't want to report the saved values. 19761 */ 19762 if (tp->ccv->cc_data) 19763 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 19764 else 19765 error = EINVAL; 19766 } 19767 break; 19768 case TCP_FAST_RSM_HACK: 19769 optval = rack->fast_rsm_hack; 19770 break; 19771 case TCP_DEFER_OPTIONS: 19772 optval = rack->defer_options; 19773 break; 19774 case TCP_RACK_MEASURE_CNT: 19775 optval = rack->r_ctl.req_measurements; 19776 break; 19777 case TCP_REC_ABC_VAL: 19778 optval = rack->r_use_labc_for_rec; 19779 break; 19780 case TCP_RACK_ABC_VAL: 19781 optval = rack->rc_labc; 19782 break; 19783 case TCP_HDWR_UP_ONLY: 19784 optval= rack->r_up_only; 19785 break; 19786 case TCP_PACING_RATE_CAP: 19787 loptval = rack->r_ctl.bw_rate_cap; 19788 break; 19789 case TCP_RACK_PROFILE: 19790 /* You cannot retrieve a profile, its write only */ 19791 error = EINVAL; 19792 break; 19793 case TCP_USE_CMP_ACKS: 19794 optval = rack->r_use_cmp_ack; 19795 break; 19796 case TCP_RACK_PACE_TO_FILL: 19797 optval = rack->rc_pace_to_cwnd; 19798 if (optval && rack->r_fill_less_agg) 19799 optval++; 19800 break; 19801 case TCP_RACK_NO_PUSH_AT_MAX: 19802 optval = rack->r_ctl.rc_no_push_at_mrtt; 19803 break; 19804 case TCP_SHARED_CWND_ENABLE: 19805 optval = rack->rack_enable_scwnd; 19806 break; 19807 case TCP_RACK_NONRXT_CFG_RATE: 19808 optval = rack->rack_rec_nonrxt_use_cr; 19809 break; 19810 case TCP_NO_PRR: 19811 if (rack->rack_no_prr == 1) 19812 optval = 1; 19813 else if (rack->no_prr_addback == 1) 19814 optval = 2; 19815 else 19816 optval = 0; 19817 break; 19818 case TCP_RACK_DO_DETECTION: 19819 optval = rack->do_detection; 19820 break; 19821 case TCP_RACK_MBUF_QUEUE: 19822 /* Now do we use the LRO mbuf-queue feature */ 19823 optval = rack->r_mbuf_queue; 19824 break; 19825 case TCP_TIMELY_DYN_ADJ: 19826 optval = rack->rc_gp_dyn_mul; 19827 break; 19828 case TCP_BBR_IWINTSO: 19829 optval = rack->rc_init_win; 19830 break; 19831 case TCP_RACK_TLP_REDUCE: 19832 /* RACK TLP cwnd reduction (bool) */ 19833 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 19834 break; 19835 case TCP_BBR_RACK_INIT_RATE: 19836 val = rack->r_ctl.init_rate; 19837 /* convert to kbits per sec */ 19838 val *= 8; 19839 val /= 1000; 19840 optval = (uint32_t)val; 19841 break; 19842 case TCP_RACK_FORCE_MSEG: 19843 optval = rack->rc_force_max_seg; 19844 break; 19845 case TCP_RACK_PACE_MAX_SEG: 19846 /* Max segments in a pace */ 19847 optval = rack->rc_user_set_max_segs; 19848 break; 19849 case TCP_RACK_PACE_ALWAYS: 19850 /* Use the always pace method */ 19851 optval = rack->rc_always_pace; 19852 break; 19853 case TCP_RACK_PRR_SENDALOT: 19854 /* Allow PRR to send more than one seg */ 19855 optval = rack->r_ctl.rc_prr_sendalot; 19856 break; 19857 case TCP_RACK_MIN_TO: 19858 /* Minimum time between rack t-o's in ms */ 19859 optval = rack->r_ctl.rc_min_to; 19860 break; 19861 case TCP_RACK_EARLY_SEG: 19862 /* If early recovery max segments */ 19863 optval = rack->r_ctl.rc_early_recovery_segs; 19864 break; 19865 case TCP_RACK_REORD_THRESH: 19866 /* RACK reorder threshold (shift amount) */ 19867 optval = rack->r_ctl.rc_reorder_shift; 19868 break; 19869 case TCP_RACK_REORD_FADE: 19870 /* Does reordering fade after ms time */ 19871 optval = rack->r_ctl.rc_reorder_fade; 19872 break; 19873 case TCP_BBR_USE_RACK_RR: 19874 /* Do we use the rack cheat for rxt */ 19875 optval = rack->use_rack_rr; 19876 break; 19877 case TCP_RACK_RR_CONF: 19878 optval = rack->r_rr_config; 19879 break; 19880 case TCP_HDWR_RATE_CAP: 19881 optval = rack->r_rack_hw_rate_caps; 19882 break; 19883 case TCP_BBR_HDWR_PACE: 19884 optval = rack->rack_hdw_pace_ena; 19885 break; 19886 case TCP_RACK_TLP_THRESH: 19887 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 19888 optval = rack->r_ctl.rc_tlp_threshold; 19889 break; 19890 case TCP_RACK_PKT_DELAY: 19891 /* RACK added ms i.e. rack-rtt + reord + N */ 19892 optval = rack->r_ctl.rc_pkt_delay; 19893 break; 19894 case TCP_RACK_TLP_USE: 19895 optval = rack->rack_tlp_threshold_use; 19896 break; 19897 case TCP_RACK_PACE_RATE_CA: 19898 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 19899 break; 19900 case TCP_RACK_PACE_RATE_SS: 19901 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 19902 break; 19903 case TCP_RACK_PACE_RATE_REC: 19904 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 19905 break; 19906 case TCP_RACK_GP_INCREASE_SS: 19907 optval = rack->r_ctl.rack_per_of_gp_ca; 19908 break; 19909 case TCP_RACK_GP_INCREASE_CA: 19910 optval = rack->r_ctl.rack_per_of_gp_ss; 19911 break; 19912 case TCP_BBR_RACK_RTT_USE: 19913 optval = rack->r_ctl.rc_rate_sample_method; 19914 break; 19915 case TCP_DELACK: 19916 optval = tp->t_delayed_ack; 19917 break; 19918 case TCP_DATA_AFTER_CLOSE: 19919 optval = rack->rc_allow_data_af_clo; 19920 break; 19921 case TCP_SHARED_CWND_TIME_LIMIT: 19922 optval = rack->r_limit_scw; 19923 break; 19924 case TCP_RACK_TIMER_SLOP: 19925 optval = rack->r_ctl.timer_slop; 19926 break; 19927 default: 19928 return (tcp_default_ctloutput(so, sopt, inp, tp)); 19929 break; 19930 } 19931 INP_WUNLOCK(inp); 19932 if (error == 0) { 19933 if (TCP_PACING_RATE_CAP) 19934 error = sooptcopyout(sopt, &loptval, sizeof loptval); 19935 else 19936 error = sooptcopyout(sopt, &optval, sizeof optval); 19937 } 19938 return (error); 19939 } 19940 19941 static int 19942 rack_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 19943 { 19944 int32_t error = EINVAL; 19945 struct tcp_rack *rack; 19946 19947 rack = (struct tcp_rack *)tp->t_fb_ptr; 19948 if (rack == NULL) { 19949 /* Huh? */ 19950 goto out; 19951 } 19952 if (sopt->sopt_dir == SOPT_SET) { 19953 return (rack_set_sockopt(so, sopt, inp, tp, rack)); 19954 } else if (sopt->sopt_dir == SOPT_GET) { 19955 return (rack_get_sockopt(so, sopt, inp, tp, rack)); 19956 } 19957 out: 19958 INP_WUNLOCK(inp); 19959 return (error); 19960 } 19961 19962 static const char *rack_stack_names[] = { 19963 __XSTRING(STACKNAME), 19964 #ifdef STACKALIAS 19965 __XSTRING(STACKALIAS), 19966 #endif 19967 }; 19968 19969 static int 19970 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 19971 { 19972 memset(mem, 0, size); 19973 return (0); 19974 } 19975 19976 static void 19977 rack_dtor(void *mem, int32_t size, void *arg) 19978 { 19979 19980 } 19981 19982 static bool rack_mod_inited = false; 19983 19984 static int 19985 tcp_addrack(module_t mod, int32_t type, void *data) 19986 { 19987 int32_t err = 0; 19988 int num_stacks; 19989 19990 switch (type) { 19991 case MOD_LOAD: 19992 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 19993 sizeof(struct rack_sendmap), 19994 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 19995 19996 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 19997 sizeof(struct tcp_rack), 19998 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 19999 20000 sysctl_ctx_init(&rack_sysctl_ctx); 20001 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20002 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20003 OID_AUTO, 20004 #ifdef STACKALIAS 20005 __XSTRING(STACKALIAS), 20006 #else 20007 __XSTRING(STACKNAME), 20008 #endif 20009 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20010 ""); 20011 if (rack_sysctl_root == NULL) { 20012 printf("Failed to add sysctl node\n"); 20013 err = EFAULT; 20014 goto free_uma; 20015 } 20016 rack_init_sysctls(); 20017 num_stacks = nitems(rack_stack_names); 20018 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20019 rack_stack_names, &num_stacks); 20020 if (err) { 20021 printf("Failed to register %s stack name for " 20022 "%s module\n", rack_stack_names[num_stacks], 20023 __XSTRING(MODNAME)); 20024 sysctl_ctx_free(&rack_sysctl_ctx); 20025 free_uma: 20026 uma_zdestroy(rack_zone); 20027 uma_zdestroy(rack_pcb_zone); 20028 rack_counter_destroy(); 20029 printf("Failed to register rack module -- err:%d\n", err); 20030 return (err); 20031 } 20032 tcp_lro_reg_mbufq(); 20033 rack_mod_inited = true; 20034 break; 20035 case MOD_QUIESCE: 20036 err = deregister_tcp_functions(&__tcp_rack, true, false); 20037 break; 20038 case MOD_UNLOAD: 20039 err = deregister_tcp_functions(&__tcp_rack, false, true); 20040 if (err == EBUSY) 20041 break; 20042 if (rack_mod_inited) { 20043 uma_zdestroy(rack_zone); 20044 uma_zdestroy(rack_pcb_zone); 20045 sysctl_ctx_free(&rack_sysctl_ctx); 20046 rack_counter_destroy(); 20047 rack_mod_inited = false; 20048 } 20049 tcp_lro_dereg_mbufq(); 20050 err = 0; 20051 break; 20052 default: 20053 return (EOPNOTSUPP); 20054 } 20055 return (err); 20056 } 20057 20058 static moduledata_t tcp_rack = { 20059 .name = __XSTRING(MODNAME), 20060 .evhand = tcp_addrack, 20061 .priv = 0 20062 }; 20063 20064 MODULE_VERSION(MODNAME, 1); 20065 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20066 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20067