1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_tcpdebug.h" 34 #include "opt_ratelimit.h" 35 #include "opt_kern_tls.h" 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCPDEBUG 109 #include <netinet/tcp_debug.h> 110 #endif /* TCPDEBUG */ 111 #ifdef TCP_OFFLOAD 112 #include <netinet/tcp_offload.h> 113 #endif 114 #ifdef INET6 115 #include <netinet6/tcp6_var.h> 116 #endif 117 #include <netinet/tcp_ecn.h> 118 119 #include <netipsec/ipsec_support.h> 120 121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 122 #include <netipsec/ipsec.h> 123 #include <netipsec/ipsec6.h> 124 #endif /* IPSEC */ 125 126 #include <netinet/udp.h> 127 #include <netinet/udp_var.h> 128 #include <machine/in_cksum.h> 129 130 #ifdef MAC 131 #include <security/mac/mac_framework.h> 132 #endif 133 #include "sack_filter.h" 134 #include "tcp_rack.h" 135 #include "rack_bbr_common.h" 136 137 uma_zone_t rack_zone; 138 uma_zone_t rack_pcb_zone; 139 140 #ifndef TICKS2SBT 141 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 142 #endif 143 144 VNET_DECLARE(uint32_t, newreno_beta); 145 VNET_DECLARE(uint32_t, newreno_beta_ecn); 146 #define V_newreno_beta VNET(newreno_beta) 147 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 148 149 150 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 151 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 152 153 struct sysctl_ctx_list rack_sysctl_ctx; 154 struct sysctl_oid *rack_sysctl_root; 155 156 #define CUM_ACKED 1 157 #define SACKED 2 158 159 /* 160 * The RACK module incorporates a number of 161 * TCP ideas that have been put out into the IETF 162 * over the last few years: 163 * - Matt Mathis's Rate Halving which slowly drops 164 * the congestion window so that the ack clock can 165 * be maintained during a recovery. 166 * - Yuchung Cheng's RACK TCP (for which its named) that 167 * will stop us using the number of dup acks and instead 168 * use time as the gage of when we retransmit. 169 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 170 * of Dukkipati et.al. 171 * RACK depends on SACK, so if an endpoint arrives that 172 * cannot do SACK the state machine below will shuttle the 173 * connection back to using the "default" TCP stack that is 174 * in FreeBSD. 175 * 176 * To implement RACK the original TCP stack was first decomposed 177 * into a functional state machine with individual states 178 * for each of the possible TCP connection states. The do_segment 179 * functions role in life is to mandate the connection supports SACK 180 * initially and then assure that the RACK state matches the conenction 181 * state before calling the states do_segment function. Each 182 * state is simplified due to the fact that the original do_segment 183 * has been decomposed and we *know* what state we are in (no 184 * switches on the state) and all tests for SACK are gone. This 185 * greatly simplifies what each state does. 186 * 187 * TCP output is also over-written with a new version since it 188 * must maintain the new rack scoreboard. 189 * 190 */ 191 static int32_t rack_tlp_thresh = 1; 192 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 193 static int32_t rack_tlp_use_greater = 1; 194 static int32_t rack_reorder_thresh = 2; 195 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 196 * - 60 seconds */ 197 static uint8_t rack_req_measurements = 1; 198 /* Attack threshold detections */ 199 static uint32_t rack_highest_sack_thresh_seen = 0; 200 static uint32_t rack_highest_move_thresh_seen = 0; 201 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 202 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 203 static int32_t rack_hw_rate_caps = 1; /* 1; */ 204 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 205 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 206 static int32_t rack_hw_up_only = 1; 207 static int32_t rack_stats_gets_ms_rtt = 1; 208 static int32_t rack_prr_addbackmax = 2; 209 static int32_t rack_do_hystart = 0; 210 static int32_t rack_apply_rtt_with_reduced_conf = 0; 211 212 static int32_t rack_pkt_delay = 1000; 213 static int32_t rack_send_a_lot_in_prr = 1; 214 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 215 static int32_t rack_verbose_logging = 0; 216 static int32_t rack_ignore_data_after_close = 1; 217 static int32_t rack_enable_shared_cwnd = 1; 218 static int32_t rack_use_cmp_acks = 1; 219 static int32_t rack_use_fsb = 1; 220 static int32_t rack_use_rfo = 1; 221 static int32_t rack_use_rsm_rfo = 1; 222 static int32_t rack_max_abc_post_recovery = 2; 223 static int32_t rack_client_low_buf = 0; 224 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 225 #ifdef TCP_ACCOUNTING 226 static int32_t rack_tcp_accounting = 0; 227 #endif 228 static int32_t rack_limits_scwnd = 1; 229 static int32_t rack_enable_mqueue_for_nonpaced = 0; 230 static int32_t rack_disable_prr = 0; 231 static int32_t use_rack_rr = 1; 232 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 233 static int32_t rack_persist_min = 250000; /* 250usec */ 234 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 235 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 236 static int32_t rack_default_init_window = 0; /* Use system default */ 237 static int32_t rack_limit_time_with_srtt = 0; 238 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 239 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 240 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 241 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 242 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 243 244 /* 245 * Currently regular tcp has a rto_min of 30ms 246 * the backoff goes 12 times so that ends up 247 * being a total of 122.850 seconds before a 248 * connection is killed. 249 */ 250 static uint32_t rack_def_data_window = 20; 251 static uint32_t rack_goal_bdp = 2; 252 static uint32_t rack_min_srtts = 1; 253 static uint32_t rack_min_measure_usec = 0; 254 static int32_t rack_tlp_min = 10000; /* 10ms */ 255 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 256 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 257 static const int32_t rack_free_cache = 2; 258 static int32_t rack_hptsi_segments = 40; 259 static int32_t rack_rate_sample_method = USE_RTT_LOW; 260 static int32_t rack_pace_every_seg = 0; 261 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 262 static int32_t rack_slot_reduction = 4; 263 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 264 static int32_t rack_cwnd_block_ends_measure = 0; 265 static int32_t rack_rwnd_block_ends_measure = 0; 266 static int32_t rack_def_profile = 0; 267 268 static int32_t rack_lower_cwnd_at_tlp = 0; 269 static int32_t rack_limited_retran = 0; 270 static int32_t rack_always_send_oldest = 0; 271 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 272 273 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 274 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 275 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 276 277 /* Probertt */ 278 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 279 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 280 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 281 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 282 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 283 284 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 285 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 286 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 287 static uint32_t rack_probertt_use_min_rtt_exit = 0; 288 static uint32_t rack_probe_rtt_sets_cwnd = 0; 289 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 290 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 291 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 292 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 293 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 294 static uint32_t rack_probertt_filter_life = 10000000; 295 static uint32_t rack_probertt_lower_within = 10; 296 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 297 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 298 static int32_t rack_probertt_clear_is = 1; 299 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 300 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 301 302 /* Part of pacing */ 303 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 304 305 /* Timely information */ 306 /* Combine these two gives the range of 'no change' to bw */ 307 /* ie the up/down provide the upper and lower bound */ 308 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 309 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 310 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 311 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 312 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 313 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */ 314 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 315 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 316 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 317 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 318 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 319 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 320 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 321 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 322 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 323 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 324 static int32_t rack_use_max_for_nobackoff = 0; 325 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 326 static int32_t rack_timely_no_stopping = 0; 327 static int32_t rack_down_raise_thresh = 100; 328 static int32_t rack_req_segs = 1; 329 static uint64_t rack_bw_rate_cap = 0; 330 static uint32_t rack_trace_point_config = 0; 331 static uint32_t rack_trace_point_bb_mode = 4; 332 static int32_t rack_trace_point_count = 0; 333 334 335 /* Weird delayed ack mode */ 336 static int32_t rack_use_imac_dack = 0; 337 /* Rack specific counters */ 338 counter_u64_t rack_saw_enobuf; 339 counter_u64_t rack_saw_enobuf_hw; 340 counter_u64_t rack_saw_enetunreach; 341 counter_u64_t rack_persists_sends; 342 counter_u64_t rack_persists_acks; 343 counter_u64_t rack_persists_loss; 344 counter_u64_t rack_persists_lost_ends; 345 #ifdef INVARIANTS 346 counter_u64_t rack_adjust_map_bw; 347 #endif 348 /* Tail loss probe counters */ 349 counter_u64_t rack_tlp_tot; 350 counter_u64_t rack_tlp_newdata; 351 counter_u64_t rack_tlp_retran; 352 counter_u64_t rack_tlp_retran_bytes; 353 counter_u64_t rack_to_tot; 354 counter_u64_t rack_hot_alloc; 355 counter_u64_t rack_to_alloc; 356 counter_u64_t rack_to_alloc_hard; 357 counter_u64_t rack_to_alloc_emerg; 358 counter_u64_t rack_to_alloc_limited; 359 counter_u64_t rack_alloc_limited_conns; 360 counter_u64_t rack_split_limited; 361 362 counter_u64_t rack_multi_single_eq; 363 counter_u64_t rack_proc_non_comp_ack; 364 365 counter_u64_t rack_fto_send; 366 counter_u64_t rack_fto_rsm_send; 367 counter_u64_t rack_nfto_resend; 368 counter_u64_t rack_non_fto_send; 369 counter_u64_t rack_extended_rfo; 370 371 counter_u64_t rack_sack_proc_all; 372 counter_u64_t rack_sack_proc_short; 373 counter_u64_t rack_sack_proc_restart; 374 counter_u64_t rack_sack_attacks_detected; 375 counter_u64_t rack_sack_attacks_reversed; 376 counter_u64_t rack_sack_used_next_merge; 377 counter_u64_t rack_sack_splits; 378 counter_u64_t rack_sack_used_prev_merge; 379 counter_u64_t rack_sack_skipped_acked; 380 counter_u64_t rack_ack_total; 381 counter_u64_t rack_express_sack; 382 counter_u64_t rack_sack_total; 383 counter_u64_t rack_move_none; 384 counter_u64_t rack_move_some; 385 386 counter_u64_t rack_input_idle_reduces; 387 counter_u64_t rack_collapsed_win; 388 counter_u64_t rack_collapsed_win_seen; 389 counter_u64_t rack_collapsed_win_rxt; 390 counter_u64_t rack_collapsed_win_rxt_bytes; 391 counter_u64_t rack_try_scwnd; 392 counter_u64_t rack_hw_pace_init_fail; 393 counter_u64_t rack_hw_pace_lost; 394 395 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 396 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 397 398 399 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 400 401 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 402 (tv) = (value) + slop; \ 403 if ((u_long)(tv) < (u_long)(tvmin)) \ 404 (tv) = (tvmin); \ 405 if ((u_long)(tv) > (u_long)(tvmax)) \ 406 (tv) = (tvmax); \ 407 } while (0) 408 409 static void 410 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 411 412 static int 413 rack_process_ack(struct mbuf *m, struct tcphdr *th, 414 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 415 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 416 static int 417 rack_process_data(struct mbuf *m, struct tcphdr *th, 418 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 419 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 420 static void 421 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 422 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 423 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 424 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 425 uint8_t limit_type); 426 static struct rack_sendmap * 427 rack_check_recovery_mode(struct tcpcb *tp, 428 uint32_t tsused); 429 static void 430 rack_cong_signal(struct tcpcb *tp, 431 uint32_t type, uint32_t ack, int ); 432 static void rack_counter_destroy(void); 433 static int 434 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 435 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 436 static void 437 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 438 static void 439 rack_do_segment(struct mbuf *m, struct tcphdr *th, 440 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 441 uint8_t iptos); 442 static void rack_dtor(void *mem, int32_t size, void *arg); 443 static void 444 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 445 uint32_t flex1, uint32_t flex2, 446 uint32_t flex3, uint32_t flex4, 447 uint32_t flex5, uint32_t flex6, 448 uint16_t flex7, uint8_t mod); 449 450 static void 451 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 452 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 453 struct rack_sendmap *rsm, uint8_t quality); 454 static struct rack_sendmap * 455 rack_find_high_nonack(struct tcp_rack *rack, 456 struct rack_sendmap *rsm); 457 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 458 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 459 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 460 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 461 static void 462 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 463 tcp_seq th_ack, int line, uint8_t quality); 464 static uint32_t 465 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 466 static int32_t rack_handoff_ok(struct tcpcb *tp); 467 static int32_t rack_init(struct tcpcb *tp); 468 static void rack_init_sysctls(void); 469 static void 470 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 471 struct tcphdr *th, int entered_rec, int dup_ack_struck); 472 static void 473 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 474 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 475 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 476 477 static void 478 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 479 struct rack_sendmap *rsm); 480 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 481 static int32_t rack_output(struct tcpcb *tp); 482 483 static uint32_t 484 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 485 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 486 uint32_t cts, int *moved_two); 487 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 488 static void rack_remxt_tmr(struct tcpcb *tp); 489 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 490 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 491 static int32_t rack_stopall(struct tcpcb *tp); 492 static void 493 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, 494 uint32_t delta); 495 static int32_t rack_timer_active(struct tcpcb *tp, uint32_t timer_type); 496 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 497 static void rack_timer_stop(struct tcpcb *tp, uint32_t timer_type); 498 static uint32_t 499 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 500 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 501 static void 502 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 503 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 504 static int 505 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 506 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 507 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 508 static int 509 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 510 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 511 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 512 static int 513 rack_do_closing(struct mbuf *m, struct tcphdr *th, 514 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 515 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 516 static int 517 rack_do_established(struct mbuf *m, struct tcphdr *th, 518 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 519 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 520 static int 521 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 522 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 523 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 524 static int 525 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 526 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 527 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 528 static int 529 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 532 static int 533 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 536 static int 537 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 538 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 539 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 540 static int 541 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 542 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 543 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 544 struct rack_sendmap * 545 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 546 uint32_t tsused); 547 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 548 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 549 static void 550 tcp_rack_partialack(struct tcpcb *tp); 551 static int 552 rack_set_profile(struct tcp_rack *rack, int prof); 553 static void 554 rack_apply_deferred_options(struct tcp_rack *rack); 555 556 int32_t rack_clear_counter=0; 557 558 static inline void 559 rack_trace_point(struct tcp_rack *rack, int num) 560 { 561 if (((rack_trace_point_config == num) || 562 (rack_trace_point_config = 0xffffffff)) && 563 (rack_trace_point_bb_mode != 0) && 564 (rack_trace_point_count > 0) && 565 (rack->rc_tp->t_logstate == 0)) { 566 int res; 567 res = atomic_fetchadd_int(&rack_trace_point_count, -1); 568 if (res > 0) { 569 rack->rc_tp->t_logstate = rack_trace_point_bb_mode; 570 } else { 571 /* Loss a race assure its zero now */ 572 rack_trace_point_count = 0; 573 } 574 } 575 } 576 577 static void 578 rack_set_cc_pacing(struct tcp_rack *rack) 579 { 580 struct sockopt sopt; 581 struct cc_newreno_opts opt; 582 struct newreno old, *ptr; 583 struct tcpcb *tp; 584 int error; 585 586 if (rack->rc_pacing_cc_set) 587 return; 588 589 tp = rack->rc_tp; 590 if (tp->cc_algo == NULL) { 591 /* Tcb is leaving */ 592 return; 593 } 594 rack->rc_pacing_cc_set = 1; 595 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 596 /* Not new-reno we can't play games with beta! */ 597 goto out; 598 } 599 ptr = ((struct newreno *)tp->ccv->cc_data); 600 if (CC_ALGO(tp)->ctl_output == NULL) { 601 /* Huh, why does new_reno no longer have a set function? */ 602 goto out; 603 } 604 if (ptr == NULL) { 605 /* Just the default values */ 606 old.beta = V_newreno_beta_ecn; 607 old.beta_ecn = V_newreno_beta_ecn; 608 old.newreno_flags = 0; 609 } else { 610 old.beta = ptr->beta; 611 old.beta_ecn = ptr->beta_ecn; 612 old.newreno_flags = ptr->newreno_flags; 613 } 614 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 615 sopt.sopt_dir = SOPT_SET; 616 opt.name = CC_NEWRENO_BETA; 617 opt.val = rack->r_ctl.rc_saved_beta.beta; 618 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 619 if (error) { 620 goto out; 621 } 622 /* 623 * Hack alert we need to set in our newreno_flags 624 * so that Abe behavior is also applied. 625 */ 626 ((struct newreno *)tp->ccv->cc_data)->newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 627 opt.name = CC_NEWRENO_BETA_ECN; 628 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 629 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 630 if (error) { 631 goto out; 632 } 633 /* Save off the original values for restoral */ 634 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 635 out: 636 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 637 union tcp_log_stackspecific log; 638 struct timeval tv; 639 640 ptr = ((struct newreno *)tp->ccv->cc_data); 641 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 642 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 643 if (ptr) { 644 log.u_bbr.flex1 = ptr->beta; 645 log.u_bbr.flex2 = ptr->beta_ecn; 646 log.u_bbr.flex3 = ptr->newreno_flags; 647 } 648 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 649 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 650 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 651 log.u_bbr.flex7 = rack->gp_ready; 652 log.u_bbr.flex7 <<= 1; 653 log.u_bbr.flex7 |= rack->use_fixed_rate; 654 log.u_bbr.flex7 <<= 1; 655 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 656 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 657 log.u_bbr.flex8 = 3; 658 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 659 0, &log, false, NULL, NULL, 0, &tv); 660 } 661 } 662 663 static void 664 rack_undo_cc_pacing(struct tcp_rack *rack) 665 { 666 struct newreno old, *ptr; 667 struct tcpcb *tp; 668 669 if (rack->rc_pacing_cc_set == 0) 670 return; 671 tp = rack->rc_tp; 672 rack->rc_pacing_cc_set = 0; 673 if (tp->cc_algo == NULL) 674 /* Tcb is leaving */ 675 return; 676 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 677 /* Not new-reno nothing to do! */ 678 return; 679 } 680 ptr = ((struct newreno *)tp->ccv->cc_data); 681 if (ptr == NULL) { 682 /* 683 * This happens at rack_fini() if the 684 * cc module gets freed on us. In that 685 * case we loose our "new" settings but 686 * thats ok, since the tcb is going away anyway. 687 */ 688 return; 689 } 690 /* Grab out our set values */ 691 memcpy(&old, ptr, sizeof(struct newreno)); 692 /* Copy back in the original values */ 693 memcpy(ptr, &rack->r_ctl.rc_saved_beta, sizeof(struct newreno)); 694 /* Now save back the values we had set in (for when pacing is restored) */ 695 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 696 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 697 union tcp_log_stackspecific log; 698 struct timeval tv; 699 700 ptr = ((struct newreno *)tp->ccv->cc_data); 701 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 702 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 703 log.u_bbr.flex1 = ptr->beta; 704 log.u_bbr.flex2 = ptr->beta_ecn; 705 log.u_bbr.flex3 = ptr->newreno_flags; 706 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 707 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 708 log.u_bbr.flex6 = rack->r_ctl.rc_saved_beta.newreno_flags; 709 log.u_bbr.flex7 = rack->gp_ready; 710 log.u_bbr.flex7 <<= 1; 711 log.u_bbr.flex7 |= rack->use_fixed_rate; 712 log.u_bbr.flex7 <<= 1; 713 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 714 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 715 log.u_bbr.flex8 = 4; 716 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 717 0, &log, false, NULL, NULL, 0, &tv); 718 } 719 } 720 721 #ifdef NETFLIX_PEAKRATE 722 static inline void 723 rack_update_peakrate_thr(struct tcpcb *tp) 724 { 725 /* Keep in mind that t_maxpeakrate is in B/s. */ 726 uint64_t peak; 727 peak = uqmax((tp->t_maxseg * 2), 728 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 729 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 730 } 731 #endif 732 733 static int 734 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 735 { 736 uint32_t stat; 737 int32_t error; 738 739 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 740 if (error || req->newptr == NULL) 741 return error; 742 743 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 744 if (error) 745 return (error); 746 if (stat == 1) { 747 #ifdef INVARIANTS 748 printf("Clearing RACK counters\n"); 749 #endif 750 counter_u64_zero(rack_tlp_tot); 751 counter_u64_zero(rack_tlp_newdata); 752 counter_u64_zero(rack_tlp_retran); 753 counter_u64_zero(rack_tlp_retran_bytes); 754 counter_u64_zero(rack_to_tot); 755 counter_u64_zero(rack_saw_enobuf); 756 counter_u64_zero(rack_saw_enobuf_hw); 757 counter_u64_zero(rack_saw_enetunreach); 758 counter_u64_zero(rack_persists_sends); 759 counter_u64_zero(rack_persists_acks); 760 counter_u64_zero(rack_persists_loss); 761 counter_u64_zero(rack_persists_lost_ends); 762 #ifdef INVARIANTS 763 counter_u64_zero(rack_adjust_map_bw); 764 #endif 765 counter_u64_zero(rack_to_alloc_hard); 766 counter_u64_zero(rack_to_alloc_emerg); 767 counter_u64_zero(rack_sack_proc_all); 768 counter_u64_zero(rack_fto_send); 769 counter_u64_zero(rack_fto_rsm_send); 770 counter_u64_zero(rack_extended_rfo); 771 counter_u64_zero(rack_hw_pace_init_fail); 772 counter_u64_zero(rack_hw_pace_lost); 773 counter_u64_zero(rack_non_fto_send); 774 counter_u64_zero(rack_nfto_resend); 775 counter_u64_zero(rack_sack_proc_short); 776 counter_u64_zero(rack_sack_proc_restart); 777 counter_u64_zero(rack_to_alloc); 778 counter_u64_zero(rack_to_alloc_limited); 779 counter_u64_zero(rack_alloc_limited_conns); 780 counter_u64_zero(rack_split_limited); 781 counter_u64_zero(rack_multi_single_eq); 782 counter_u64_zero(rack_proc_non_comp_ack); 783 counter_u64_zero(rack_sack_attacks_detected); 784 counter_u64_zero(rack_sack_attacks_reversed); 785 counter_u64_zero(rack_sack_used_next_merge); 786 counter_u64_zero(rack_sack_used_prev_merge); 787 counter_u64_zero(rack_sack_splits); 788 counter_u64_zero(rack_sack_skipped_acked); 789 counter_u64_zero(rack_ack_total); 790 counter_u64_zero(rack_express_sack); 791 counter_u64_zero(rack_sack_total); 792 counter_u64_zero(rack_move_none); 793 counter_u64_zero(rack_move_some); 794 counter_u64_zero(rack_try_scwnd); 795 counter_u64_zero(rack_collapsed_win); 796 counter_u64_zero(rack_collapsed_win_rxt); 797 counter_u64_zero(rack_collapsed_win_seen); 798 counter_u64_zero(rack_collapsed_win_rxt_bytes); 799 } 800 rack_clear_counter = 0; 801 return (0); 802 } 803 804 static void 805 rack_init_sysctls(void) 806 { 807 struct sysctl_oid *rack_counters; 808 struct sysctl_oid *rack_attack; 809 struct sysctl_oid *rack_pacing; 810 struct sysctl_oid *rack_timely; 811 struct sysctl_oid *rack_timers; 812 struct sysctl_oid *rack_tlp; 813 struct sysctl_oid *rack_misc; 814 struct sysctl_oid *rack_features; 815 struct sysctl_oid *rack_measure; 816 struct sysctl_oid *rack_probertt; 817 struct sysctl_oid *rack_hw_pacing; 818 struct sysctl_oid *rack_tracepoint; 819 820 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 821 SYSCTL_CHILDREN(rack_sysctl_root), 822 OID_AUTO, 823 "sack_attack", 824 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 825 "Rack Sack Attack Counters and Controls"); 826 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 827 SYSCTL_CHILDREN(rack_sysctl_root), 828 OID_AUTO, 829 "stats", 830 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 831 "Rack Counters"); 832 SYSCTL_ADD_S32(&rack_sysctl_ctx, 833 SYSCTL_CHILDREN(rack_sysctl_root), 834 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 835 &rack_rate_sample_method , USE_RTT_LOW, 836 "What method should we use for rate sampling 0=high, 1=low "); 837 /* Probe rtt related controls */ 838 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 839 SYSCTL_CHILDREN(rack_sysctl_root), 840 OID_AUTO, 841 "probertt", 842 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 843 "ProbeRTT related Controls"); 844 SYSCTL_ADD_U16(&rack_sysctl_ctx, 845 SYSCTL_CHILDREN(rack_probertt), 846 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 847 &rack_atexit_prtt_hbp, 130, 848 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 849 SYSCTL_ADD_U16(&rack_sysctl_ctx, 850 SYSCTL_CHILDREN(rack_probertt), 851 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 852 &rack_atexit_prtt, 130, 853 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 854 SYSCTL_ADD_U16(&rack_sysctl_ctx, 855 SYSCTL_CHILDREN(rack_probertt), 856 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 857 &rack_per_of_gp_probertt, 60, 858 "What percentage of goodput do we pace at in probertt"); 859 SYSCTL_ADD_U16(&rack_sysctl_ctx, 860 SYSCTL_CHILDREN(rack_probertt), 861 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 862 &rack_per_of_gp_probertt_reduce, 10, 863 "What percentage of goodput do we reduce every gp_srtt"); 864 SYSCTL_ADD_U16(&rack_sysctl_ctx, 865 SYSCTL_CHILDREN(rack_probertt), 866 OID_AUTO, "gp_per_low", CTLFLAG_RW, 867 &rack_per_of_gp_lowthresh, 40, 868 "What percentage of goodput do we allow the multiplier to fall to"); 869 SYSCTL_ADD_U32(&rack_sysctl_ctx, 870 SYSCTL_CHILDREN(rack_probertt), 871 OID_AUTO, "time_between", CTLFLAG_RW, 872 & rack_time_between_probertt, 96000000, 873 "How many useconds between the lowest rtt falling must past before we enter probertt"); 874 SYSCTL_ADD_U32(&rack_sysctl_ctx, 875 SYSCTL_CHILDREN(rack_probertt), 876 OID_AUTO, "safety", CTLFLAG_RW, 877 &rack_probe_rtt_safety_val, 2000000, 878 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 879 SYSCTL_ADD_U32(&rack_sysctl_ctx, 880 SYSCTL_CHILDREN(rack_probertt), 881 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 882 &rack_probe_rtt_sets_cwnd, 0, 883 "Do we set the cwnd too (if always_lower is on)"); 884 SYSCTL_ADD_U32(&rack_sysctl_ctx, 885 SYSCTL_CHILDREN(rack_probertt), 886 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 887 &rack_max_drain_wait, 2, 888 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 889 SYSCTL_ADD_U32(&rack_sysctl_ctx, 890 SYSCTL_CHILDREN(rack_probertt), 891 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 892 &rack_must_drain, 1, 893 "We must drain this many gp_srtt's waiting for flight to reach goal"); 894 SYSCTL_ADD_U32(&rack_sysctl_ctx, 895 SYSCTL_CHILDREN(rack_probertt), 896 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 897 &rack_probertt_use_min_rtt_entry, 1, 898 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 899 SYSCTL_ADD_U32(&rack_sysctl_ctx, 900 SYSCTL_CHILDREN(rack_probertt), 901 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 902 &rack_probertt_use_min_rtt_exit, 0, 903 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 904 SYSCTL_ADD_U32(&rack_sysctl_ctx, 905 SYSCTL_CHILDREN(rack_probertt), 906 OID_AUTO, "length_div", CTLFLAG_RW, 907 &rack_probertt_gpsrtt_cnt_div, 0, 908 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 909 SYSCTL_ADD_U32(&rack_sysctl_ctx, 910 SYSCTL_CHILDREN(rack_probertt), 911 OID_AUTO, "length_mul", CTLFLAG_RW, 912 &rack_probertt_gpsrtt_cnt_mul, 0, 913 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 914 SYSCTL_ADD_U32(&rack_sysctl_ctx, 915 SYSCTL_CHILDREN(rack_probertt), 916 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 917 &rack_min_probertt_hold, 200000, 918 "What is the minimum time we hold probertt at target"); 919 SYSCTL_ADD_U32(&rack_sysctl_ctx, 920 SYSCTL_CHILDREN(rack_probertt), 921 OID_AUTO, "filter_life", CTLFLAG_RW, 922 &rack_probertt_filter_life, 10000000, 923 "What is the time for the filters life in useconds"); 924 SYSCTL_ADD_U32(&rack_sysctl_ctx, 925 SYSCTL_CHILDREN(rack_probertt), 926 OID_AUTO, "lower_within", CTLFLAG_RW, 927 &rack_probertt_lower_within, 10, 928 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 929 SYSCTL_ADD_U32(&rack_sysctl_ctx, 930 SYSCTL_CHILDREN(rack_probertt), 931 OID_AUTO, "must_move", CTLFLAG_RW, 932 &rack_min_rtt_movement, 250, 933 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 934 SYSCTL_ADD_U32(&rack_sysctl_ctx, 935 SYSCTL_CHILDREN(rack_probertt), 936 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 937 &rack_probertt_clear_is, 1, 938 "Do we clear I/S counts on exiting probe-rtt"); 939 SYSCTL_ADD_S32(&rack_sysctl_ctx, 940 SYSCTL_CHILDREN(rack_probertt), 941 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 942 &rack_max_drain_hbp, 1, 943 "How many extra drain gpsrtt's do we get in highly buffered paths"); 944 SYSCTL_ADD_S32(&rack_sysctl_ctx, 945 SYSCTL_CHILDREN(rack_probertt), 946 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 947 &rack_hbp_thresh, 3, 948 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 949 950 rack_tracepoint = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 951 SYSCTL_CHILDREN(rack_sysctl_root), 952 OID_AUTO, 953 "tp", 954 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 955 "Rack tracepoint facility"); 956 SYSCTL_ADD_U32(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_tracepoint), 958 OID_AUTO, "number", CTLFLAG_RW, 959 &rack_trace_point_config, 0, 960 "What is the trace point number to activate (0=none, 0xffffffff = all)?"); 961 SYSCTL_ADD_U32(&rack_sysctl_ctx, 962 SYSCTL_CHILDREN(rack_tracepoint), 963 OID_AUTO, "bbmode", CTLFLAG_RW, 964 &rack_trace_point_bb_mode, 4, 965 "What is BB logging mode that is activated?"); 966 SYSCTL_ADD_S32(&rack_sysctl_ctx, 967 SYSCTL_CHILDREN(rack_tracepoint), 968 OID_AUTO, "count", CTLFLAG_RW, 969 &rack_trace_point_count, 0, 970 "How many connections will have BB logging turned on that hit the tracepoint?"); 971 /* Pacing related sysctls */ 972 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 973 SYSCTL_CHILDREN(rack_sysctl_root), 974 OID_AUTO, 975 "pacing", 976 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 977 "Pacing related Controls"); 978 SYSCTL_ADD_S32(&rack_sysctl_ctx, 979 SYSCTL_CHILDREN(rack_pacing), 980 OID_AUTO, "max_pace_over", CTLFLAG_RW, 981 &rack_max_per_above, 30, 982 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 983 SYSCTL_ADD_S32(&rack_sysctl_ctx, 984 SYSCTL_CHILDREN(rack_pacing), 985 OID_AUTO, "pace_to_one", CTLFLAG_RW, 986 &rack_pace_one_seg, 0, 987 "Do we allow low b/w pacing of 1MSS instead of two"); 988 SYSCTL_ADD_S32(&rack_sysctl_ctx, 989 SYSCTL_CHILDREN(rack_pacing), 990 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 991 &rack_limit_time_with_srtt, 0, 992 "Do we limit pacing time based on srtt"); 993 SYSCTL_ADD_S32(&rack_sysctl_ctx, 994 SYSCTL_CHILDREN(rack_pacing), 995 OID_AUTO, "init_win", CTLFLAG_RW, 996 &rack_default_init_window, 0, 997 "Do we have a rack initial window 0 = system default"); 998 SYSCTL_ADD_U16(&rack_sysctl_ctx, 999 SYSCTL_CHILDREN(rack_pacing), 1000 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 1001 &rack_per_of_gp_ss, 250, 1002 "If non zero, what percentage of goodput to pace at in slow start"); 1003 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1004 SYSCTL_CHILDREN(rack_pacing), 1005 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 1006 &rack_per_of_gp_ca, 150, 1007 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 1008 SYSCTL_ADD_U16(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_pacing), 1010 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 1011 &rack_per_of_gp_rec, 200, 1012 "If non zero, what percentage of goodput to pace at in recovery"); 1013 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_pacing), 1015 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 1016 &rack_hptsi_segments, 40, 1017 "What size is the max for TSO segments in pacing and burst mitigation"); 1018 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_pacing), 1020 OID_AUTO, "burst_reduces", CTLFLAG_RW, 1021 &rack_slot_reduction, 4, 1022 "When doing only burst mitigation what is the reduce divisor"); 1023 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1024 SYSCTL_CHILDREN(rack_sysctl_root), 1025 OID_AUTO, "use_pacing", CTLFLAG_RW, 1026 &rack_pace_every_seg, 0, 1027 "If set we use pacing, if clear we use only the original burst mitigation"); 1028 SYSCTL_ADD_U64(&rack_sysctl_ctx, 1029 SYSCTL_CHILDREN(rack_pacing), 1030 OID_AUTO, "rate_cap", CTLFLAG_RW, 1031 &rack_bw_rate_cap, 0, 1032 "If set we apply this value to the absolute rate cap used by pacing"); 1033 SYSCTL_ADD_U8(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_sysctl_root), 1035 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 1036 &rack_req_measurements, 1, 1037 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1038 /* Hardware pacing */ 1039 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1040 SYSCTL_CHILDREN(rack_sysctl_root), 1041 OID_AUTO, 1042 "hdwr_pacing", 1043 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1044 "Pacing related Controls"); 1045 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1046 SYSCTL_CHILDREN(rack_hw_pacing), 1047 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1048 &rack_hw_rwnd_factor, 2, 1049 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1050 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1051 SYSCTL_CHILDREN(rack_hw_pacing), 1052 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1053 &rack_enobuf_hw_boost_mult, 2, 1054 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1055 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1056 SYSCTL_CHILDREN(rack_hw_pacing), 1057 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1058 &rack_enobuf_hw_max, 2, 1059 "What is the max boost the pacing time if we see a ENOBUFS?"); 1060 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1061 SYSCTL_CHILDREN(rack_hw_pacing), 1062 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1063 &rack_enobuf_hw_min, 2, 1064 "What is the min boost the pacing time if we see a ENOBUFS?"); 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_hw_pacing), 1067 OID_AUTO, "enable", CTLFLAG_RW, 1068 &rack_enable_hw_pacing, 0, 1069 "Should RACK attempt to use hw pacing?"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_hw_pacing), 1072 OID_AUTO, "rate_cap", CTLFLAG_RW, 1073 &rack_hw_rate_caps, 1, 1074 "Does the highest hardware pacing rate cap the rate we will send at??"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_hw_pacing), 1077 OID_AUTO, "rate_min", CTLFLAG_RW, 1078 &rack_hw_rate_min, 0, 1079 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_hw_pacing), 1082 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1083 &rack_hw_rate_to_low, 0, 1084 "If we fall below this rate, dis-engage hw pacing?"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_hw_pacing), 1087 OID_AUTO, "up_only", CTLFLAG_RW, 1088 &rack_hw_up_only, 1, 1089 "Do we allow hw pacing to lower the rate selected?"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_hw_pacing), 1092 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1093 &rack_hw_pace_extra_slots, 2, 1094 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1095 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_sysctl_root), 1097 OID_AUTO, 1098 "timely", 1099 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1100 "Rack Timely RTT Controls"); 1101 /* Timely based GP dynmics */ 1102 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1103 SYSCTL_CHILDREN(rack_timely), 1104 OID_AUTO, "upper", CTLFLAG_RW, 1105 &rack_gp_per_bw_mul_up, 2, 1106 "Rack timely upper range for equal b/w (in percentage)"); 1107 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1108 SYSCTL_CHILDREN(rack_timely), 1109 OID_AUTO, "lower", CTLFLAG_RW, 1110 &rack_gp_per_bw_mul_down, 4, 1111 "Rack timely lower range for equal b/w (in percentage)"); 1112 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1113 SYSCTL_CHILDREN(rack_timely), 1114 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1115 &rack_gp_rtt_maxmul, 3, 1116 "Rack timely multiplier of lowest rtt for rtt_max"); 1117 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1118 SYSCTL_CHILDREN(rack_timely), 1119 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1120 &rack_gp_rtt_mindiv, 4, 1121 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1122 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1123 SYSCTL_CHILDREN(rack_timely), 1124 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1125 &rack_gp_rtt_minmul, 1, 1126 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1127 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1128 SYSCTL_CHILDREN(rack_timely), 1129 OID_AUTO, "decrease", CTLFLAG_RW, 1130 &rack_gp_decrease_per, 20, 1131 "Rack timely decrease percentage of our GP multiplication factor"); 1132 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1133 SYSCTL_CHILDREN(rack_timely), 1134 OID_AUTO, "increase", CTLFLAG_RW, 1135 &rack_gp_increase_per, 2, 1136 "Rack timely increase perentage of our GP multiplication factor"); 1137 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1138 SYSCTL_CHILDREN(rack_timely), 1139 OID_AUTO, "lowerbound", CTLFLAG_RW, 1140 &rack_per_lower_bound, 50, 1141 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1142 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1143 SYSCTL_CHILDREN(rack_timely), 1144 OID_AUTO, "upperboundss", CTLFLAG_RW, 1145 &rack_per_upper_bound_ss, 0, 1146 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1147 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1148 SYSCTL_CHILDREN(rack_timely), 1149 OID_AUTO, "upperboundca", CTLFLAG_RW, 1150 &rack_per_upper_bound_ca, 0, 1151 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1152 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1153 SYSCTL_CHILDREN(rack_timely), 1154 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1155 &rack_do_dyn_mul, 0, 1156 "Rack timely do we enable dynmaic timely goodput by default"); 1157 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1158 SYSCTL_CHILDREN(rack_timely), 1159 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1160 &rack_gp_no_rec_chg, 1, 1161 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1162 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1163 SYSCTL_CHILDREN(rack_timely), 1164 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1165 &rack_timely_dec_clear, 6, 1166 "Rack timely what threshold do we count to before another boost during b/w decent"); 1167 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1168 SYSCTL_CHILDREN(rack_timely), 1169 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1170 &rack_timely_max_push_rise, 3, 1171 "Rack timely how many times do we push up with b/w increase"); 1172 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_timely), 1174 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1175 &rack_timely_max_push_drop, 3, 1176 "Rack timely how many times do we push back on b/w decent"); 1177 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1178 SYSCTL_CHILDREN(rack_timely), 1179 OID_AUTO, "min_segs", CTLFLAG_RW, 1180 &rack_timely_min_segs, 4, 1181 "Rack timely when setting the cwnd what is the min num segments"); 1182 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1183 SYSCTL_CHILDREN(rack_timely), 1184 OID_AUTO, "noback_max", CTLFLAG_RW, 1185 &rack_use_max_for_nobackoff, 0, 1186 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1187 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1188 SYSCTL_CHILDREN(rack_timely), 1189 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1190 &rack_timely_int_timely_only, 0, 1191 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1192 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1193 SYSCTL_CHILDREN(rack_timely), 1194 OID_AUTO, "nonstop", CTLFLAG_RW, 1195 &rack_timely_no_stopping, 0, 1196 "Rack timely don't stop increase"); 1197 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1198 SYSCTL_CHILDREN(rack_timely), 1199 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1200 &rack_down_raise_thresh, 100, 1201 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1202 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1203 SYSCTL_CHILDREN(rack_timely), 1204 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1205 &rack_req_segs, 1, 1206 "Bottom dragging if not these many segments outstanding and room"); 1207 1208 /* TLP and Rack related parameters */ 1209 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1210 SYSCTL_CHILDREN(rack_sysctl_root), 1211 OID_AUTO, 1212 "tlp", 1213 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1214 "TLP and Rack related Controls"); 1215 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1216 SYSCTL_CHILDREN(rack_tlp), 1217 OID_AUTO, "use_rrr", CTLFLAG_RW, 1218 &use_rack_rr, 1, 1219 "Do we use Rack Rapid Recovery"); 1220 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1221 SYSCTL_CHILDREN(rack_tlp), 1222 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1223 &rack_max_abc_post_recovery, 2, 1224 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1225 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1226 SYSCTL_CHILDREN(rack_tlp), 1227 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1228 &rack_non_rxt_use_cr, 0, 1229 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1230 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1231 SYSCTL_CHILDREN(rack_tlp), 1232 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1233 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1234 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1235 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1236 SYSCTL_CHILDREN(rack_tlp), 1237 OID_AUTO, "limit", CTLFLAG_RW, 1238 &rack_tlp_limit, 2, 1239 "How many TLP's can be sent without sending new data"); 1240 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1241 SYSCTL_CHILDREN(rack_tlp), 1242 OID_AUTO, "use_greater", CTLFLAG_RW, 1243 &rack_tlp_use_greater, 1, 1244 "Should we use the rack_rtt time if its greater than srtt"); 1245 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1246 SYSCTL_CHILDREN(rack_tlp), 1247 OID_AUTO, "tlpminto", CTLFLAG_RW, 1248 &rack_tlp_min, 10000, 1249 "TLP minimum timeout per the specification (in microseconds)"); 1250 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_tlp), 1252 OID_AUTO, "send_oldest", CTLFLAG_RW, 1253 &rack_always_send_oldest, 0, 1254 "Should we always send the oldest TLP and RACK-TLP"); 1255 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1256 SYSCTL_CHILDREN(rack_tlp), 1257 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1258 &rack_limited_retran, 0, 1259 "How many times can a rack timeout drive out sends"); 1260 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1261 SYSCTL_CHILDREN(rack_tlp), 1262 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1263 &rack_lower_cwnd_at_tlp, 0, 1264 "When a TLP completes a retran should we enter recovery"); 1265 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1266 SYSCTL_CHILDREN(rack_tlp), 1267 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1268 &rack_reorder_thresh, 2, 1269 "What factor for rack will be added when seeing reordering (shift right)"); 1270 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1271 SYSCTL_CHILDREN(rack_tlp), 1272 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1273 &rack_tlp_thresh, 1, 1274 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1275 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1276 SYSCTL_CHILDREN(rack_tlp), 1277 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1278 &rack_reorder_fade, 60000000, 1279 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1280 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1281 SYSCTL_CHILDREN(rack_tlp), 1282 OID_AUTO, "pktdelay", CTLFLAG_RW, 1283 &rack_pkt_delay, 1000, 1284 "Extra RACK time (in microseconds) besides reordering thresh"); 1285 1286 /* Timer related controls */ 1287 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_sysctl_root), 1289 OID_AUTO, 1290 "timers", 1291 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1292 "Timer related controls"); 1293 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1294 SYSCTL_CHILDREN(rack_timers), 1295 OID_AUTO, "persmin", CTLFLAG_RW, 1296 &rack_persist_min, 250000, 1297 "What is the minimum time in microseconds between persists"); 1298 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1299 SYSCTL_CHILDREN(rack_timers), 1300 OID_AUTO, "persmax", CTLFLAG_RW, 1301 &rack_persist_max, 2000000, 1302 "What is the largest delay in microseconds between persists"); 1303 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1304 SYSCTL_CHILDREN(rack_timers), 1305 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1306 &rack_delayed_ack_time, 40000, 1307 "Delayed ack time (40ms in microseconds)"); 1308 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1309 SYSCTL_CHILDREN(rack_timers), 1310 OID_AUTO, "minrto", CTLFLAG_RW, 1311 &rack_rto_min, 30000, 1312 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1313 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_timers), 1315 OID_AUTO, "maxrto", CTLFLAG_RW, 1316 &rack_rto_max, 4000000, 1317 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1318 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1319 SYSCTL_CHILDREN(rack_timers), 1320 OID_AUTO, "minto", CTLFLAG_RW, 1321 &rack_min_to, 1000, 1322 "Minimum rack timeout in microseconds"); 1323 /* Measure controls */ 1324 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1325 SYSCTL_CHILDREN(rack_sysctl_root), 1326 OID_AUTO, 1327 "measure", 1328 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1329 "Measure related controls"); 1330 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1331 SYSCTL_CHILDREN(rack_measure), 1332 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1333 &rack_wma_divisor, 8, 1334 "When doing b/w calculation what is the divisor for the WMA"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_measure), 1337 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1338 &rack_cwnd_block_ends_measure, 0, 1339 "Does a cwnd just-return end the measurement window (app limited)"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_measure), 1342 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1343 &rack_rwnd_block_ends_measure, 0, 1344 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1345 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_measure), 1347 OID_AUTO, "min_target", CTLFLAG_RW, 1348 &rack_def_data_window, 20, 1349 "What is the minimum target window (in mss) for a GP measurements"); 1350 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_measure), 1352 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1353 &rack_goal_bdp, 2, 1354 "What is the goal BDP to measure"); 1355 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_measure), 1357 OID_AUTO, "min_srtts", CTLFLAG_RW, 1358 &rack_min_srtts, 1, 1359 "What is the goal BDP to measure"); 1360 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_measure), 1362 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1363 &rack_min_measure_usec, 0, 1364 "What is the Minimum time time for a measurement if 0, this is off"); 1365 /* Features */ 1366 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1367 SYSCTL_CHILDREN(rack_sysctl_root), 1368 OID_AUTO, 1369 "features", 1370 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1371 "Feature controls"); 1372 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1373 SYSCTL_CHILDREN(rack_features), 1374 OID_AUTO, "cmpack", CTLFLAG_RW, 1375 &rack_use_cmp_acks, 1, 1376 "Should RACK have LRO send compressed acks"); 1377 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1378 SYSCTL_CHILDREN(rack_features), 1379 OID_AUTO, "fsb", CTLFLAG_RW, 1380 &rack_use_fsb, 1, 1381 "Should RACK use the fast send block?"); 1382 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1383 SYSCTL_CHILDREN(rack_features), 1384 OID_AUTO, "rfo", CTLFLAG_RW, 1385 &rack_use_rfo, 1, 1386 "Should RACK use rack_fast_output()?"); 1387 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1388 SYSCTL_CHILDREN(rack_features), 1389 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1390 &rack_use_rsm_rfo, 1, 1391 "Should RACK use rack_fast_rsm_output()?"); 1392 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1393 SYSCTL_CHILDREN(rack_features), 1394 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1395 &rack_enable_mqueue_for_nonpaced, 0, 1396 "Should RACK use mbuf queuing for non-paced connections"); 1397 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1398 SYSCTL_CHILDREN(rack_features), 1399 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1400 &rack_do_hystart, 0, 1401 "Should RACK enable HyStart++ on connections?"); 1402 /* Misc rack controls */ 1403 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1404 SYSCTL_CHILDREN(rack_sysctl_root), 1405 OID_AUTO, 1406 "misc", 1407 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1408 "Misc related controls"); 1409 #ifdef TCP_ACCOUNTING 1410 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1411 SYSCTL_CHILDREN(rack_misc), 1412 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1413 &rack_tcp_accounting, 0, 1414 "Should we turn on TCP accounting for all rack sessions?"); 1415 #endif 1416 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1417 SYSCTL_CHILDREN(rack_misc), 1418 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1419 &rack_apply_rtt_with_reduced_conf, 0, 1420 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1421 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1422 SYSCTL_CHILDREN(rack_misc), 1423 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1424 &rack_dsack_std_based, 3, 1425 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1426 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1427 SYSCTL_CHILDREN(rack_misc), 1428 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1429 &rack_prr_addbackmax, 2, 1430 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1431 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1432 SYSCTL_CHILDREN(rack_misc), 1433 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1434 &rack_stats_gets_ms_rtt, 1, 1435 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1436 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1437 SYSCTL_CHILDREN(rack_misc), 1438 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1439 &rack_client_low_buf, 0, 1440 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1441 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1442 SYSCTL_CHILDREN(rack_misc), 1443 OID_AUTO, "defprofile", CTLFLAG_RW, 1444 &rack_def_profile, 0, 1445 "Should RACK use a default profile (0=no, num == profile num)?"); 1446 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1447 SYSCTL_CHILDREN(rack_misc), 1448 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1449 &rack_enable_shared_cwnd, 1, 1450 "Should RACK try to use the shared cwnd on connections where allowed"); 1451 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1452 SYSCTL_CHILDREN(rack_misc), 1453 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1454 &rack_limits_scwnd, 1, 1455 "Should RACK place low end time limits on the shared cwnd feature"); 1456 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1457 SYSCTL_CHILDREN(rack_misc), 1458 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1459 &rack_use_imac_dack, 0, 1460 "Should RACK try to emulate iMac delayed ack"); 1461 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1462 SYSCTL_CHILDREN(rack_misc), 1463 OID_AUTO, "no_prr", CTLFLAG_RW, 1464 &rack_disable_prr, 0, 1465 "Should RACK not use prr and only pace (must have pacing on)"); 1466 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1467 SYSCTL_CHILDREN(rack_misc), 1468 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1469 &rack_verbose_logging, 0, 1470 "Should RACK black box logging be verbose"); 1471 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1472 SYSCTL_CHILDREN(rack_misc), 1473 OID_AUTO, "data_after_close", CTLFLAG_RW, 1474 &rack_ignore_data_after_close, 1, 1475 "Do we hold off sending a RST until all pending data is ack'd"); 1476 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1477 SYSCTL_CHILDREN(rack_misc), 1478 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1479 &rack_sack_not_required, 1, 1480 "Do we allow rack to run on connections not supporting SACK"); 1481 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1482 SYSCTL_CHILDREN(rack_misc), 1483 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1484 &rack_send_a_lot_in_prr, 1, 1485 "Send a lot in prr"); 1486 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1487 SYSCTL_CHILDREN(rack_misc), 1488 OID_AUTO, "autoscale", CTLFLAG_RW, 1489 &rack_autosndbuf_inc, 20, 1490 "What percentage should rack scale up its snd buffer by?"); 1491 /* Sack Attacker detection stuff */ 1492 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1493 SYSCTL_CHILDREN(rack_attack), 1494 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1495 &rack_highest_sack_thresh_seen, 0, 1496 "Highest sack to ack ratio seen"); 1497 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1498 SYSCTL_CHILDREN(rack_attack), 1499 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1500 &rack_highest_move_thresh_seen, 0, 1501 "Highest move to non-move ratio seen"); 1502 rack_ack_total = counter_u64_alloc(M_WAITOK); 1503 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1504 SYSCTL_CHILDREN(rack_attack), 1505 OID_AUTO, "acktotal", CTLFLAG_RD, 1506 &rack_ack_total, 1507 "Total number of Ack's"); 1508 rack_express_sack = counter_u64_alloc(M_WAITOK); 1509 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1510 SYSCTL_CHILDREN(rack_attack), 1511 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1512 &rack_express_sack, 1513 "Total expresss number of Sack's"); 1514 rack_sack_total = counter_u64_alloc(M_WAITOK); 1515 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1516 SYSCTL_CHILDREN(rack_attack), 1517 OID_AUTO, "sacktotal", CTLFLAG_RD, 1518 &rack_sack_total, 1519 "Total number of SACKs"); 1520 rack_move_none = counter_u64_alloc(M_WAITOK); 1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_attack), 1523 OID_AUTO, "move_none", CTLFLAG_RD, 1524 &rack_move_none, 1525 "Total number of SACK index reuse of positions under threshold"); 1526 rack_move_some = counter_u64_alloc(M_WAITOK); 1527 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1528 SYSCTL_CHILDREN(rack_attack), 1529 OID_AUTO, "move_some", CTLFLAG_RD, 1530 &rack_move_some, 1531 "Total number of SACK index reuse of positions over threshold"); 1532 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1533 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1534 SYSCTL_CHILDREN(rack_attack), 1535 OID_AUTO, "attacks", CTLFLAG_RD, 1536 &rack_sack_attacks_detected, 1537 "Total number of SACK attackers that had sack disabled"); 1538 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1539 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1540 SYSCTL_CHILDREN(rack_attack), 1541 OID_AUTO, "reversed", CTLFLAG_RD, 1542 &rack_sack_attacks_reversed, 1543 "Total number of SACK attackers that were later determined false positive"); 1544 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1545 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1546 SYSCTL_CHILDREN(rack_attack), 1547 OID_AUTO, "nextmerge", CTLFLAG_RD, 1548 &rack_sack_used_next_merge, 1549 "Total number of times we used the next merge"); 1550 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1551 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1552 SYSCTL_CHILDREN(rack_attack), 1553 OID_AUTO, "prevmerge", CTLFLAG_RD, 1554 &rack_sack_used_prev_merge, 1555 "Total number of times we used the prev merge"); 1556 /* Counters */ 1557 rack_fto_send = counter_u64_alloc(M_WAITOK); 1558 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1559 SYSCTL_CHILDREN(rack_counters), 1560 OID_AUTO, "fto_send", CTLFLAG_RD, 1561 &rack_fto_send, "Total number of rack_fast_output sends"); 1562 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1563 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1564 SYSCTL_CHILDREN(rack_counters), 1565 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1566 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1567 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1568 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1569 SYSCTL_CHILDREN(rack_counters), 1570 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1571 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1572 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1573 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1574 SYSCTL_CHILDREN(rack_counters), 1575 OID_AUTO, "nfto_send", CTLFLAG_RD, 1576 &rack_non_fto_send, "Total number of rack_output first sends"); 1577 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1578 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1579 SYSCTL_CHILDREN(rack_counters), 1580 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1581 &rack_extended_rfo, "Total number of times we extended rfo"); 1582 1583 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1584 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1585 SYSCTL_CHILDREN(rack_counters), 1586 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1587 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1588 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1589 1590 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1591 SYSCTL_CHILDREN(rack_counters), 1592 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1593 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1594 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1595 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1596 SYSCTL_CHILDREN(rack_counters), 1597 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1598 &rack_tlp_tot, 1599 "Total number of tail loss probe expirations"); 1600 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1601 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1602 SYSCTL_CHILDREN(rack_counters), 1603 OID_AUTO, "tlp_new", CTLFLAG_RD, 1604 &rack_tlp_newdata, 1605 "Total number of tail loss probe sending new data"); 1606 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1607 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1608 SYSCTL_CHILDREN(rack_counters), 1609 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1610 &rack_tlp_retran, 1611 "Total number of tail loss probe sending retransmitted data"); 1612 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1613 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1614 SYSCTL_CHILDREN(rack_counters), 1615 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1616 &rack_tlp_retran_bytes, 1617 "Total bytes of tail loss probe sending retransmitted data"); 1618 rack_to_tot = counter_u64_alloc(M_WAITOK); 1619 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1620 SYSCTL_CHILDREN(rack_counters), 1621 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1622 &rack_to_tot, 1623 "Total number of times the rack to expired"); 1624 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1625 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1626 SYSCTL_CHILDREN(rack_counters), 1627 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1628 &rack_saw_enobuf, 1629 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1630 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1631 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1632 SYSCTL_CHILDREN(rack_counters), 1633 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1634 &rack_saw_enobuf_hw, 1635 "Total number of times a send returned enobuf for hdwr paced connections"); 1636 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1637 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1638 SYSCTL_CHILDREN(rack_counters), 1639 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1640 &rack_saw_enetunreach, 1641 "Total number of times a send received a enetunreachable"); 1642 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1643 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1644 SYSCTL_CHILDREN(rack_counters), 1645 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1646 &rack_hot_alloc, 1647 "Total allocations from the top of our list"); 1648 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1649 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1650 SYSCTL_CHILDREN(rack_counters), 1651 OID_AUTO, "allocs", CTLFLAG_RD, 1652 &rack_to_alloc, 1653 "Total allocations of tracking structures"); 1654 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1655 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1656 SYSCTL_CHILDREN(rack_counters), 1657 OID_AUTO, "allochard", CTLFLAG_RD, 1658 &rack_to_alloc_hard, 1659 "Total allocations done with sleeping the hard way"); 1660 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1661 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1662 SYSCTL_CHILDREN(rack_counters), 1663 OID_AUTO, "allocemerg", CTLFLAG_RD, 1664 &rack_to_alloc_emerg, 1665 "Total allocations done from emergency cache"); 1666 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1667 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1668 SYSCTL_CHILDREN(rack_counters), 1669 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1670 &rack_to_alloc_limited, 1671 "Total allocations dropped due to limit"); 1672 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_counters), 1675 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1676 &rack_alloc_limited_conns, 1677 "Connections with allocations dropped due to limit"); 1678 rack_split_limited = counter_u64_alloc(M_WAITOK); 1679 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1680 SYSCTL_CHILDREN(rack_counters), 1681 OID_AUTO, "split_limited", CTLFLAG_RD, 1682 &rack_split_limited, 1683 "Split allocations dropped due to limit"); 1684 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1685 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1686 SYSCTL_CHILDREN(rack_counters), 1687 OID_AUTO, "persist_sends", CTLFLAG_RD, 1688 &rack_persists_sends, 1689 "Number of times we sent a persist probe"); 1690 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1691 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1692 SYSCTL_CHILDREN(rack_counters), 1693 OID_AUTO, "persist_acks", CTLFLAG_RD, 1694 &rack_persists_acks, 1695 "Number of times a persist probe was acked"); 1696 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1697 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1698 SYSCTL_CHILDREN(rack_counters), 1699 OID_AUTO, "persist_loss", CTLFLAG_RD, 1700 &rack_persists_loss, 1701 "Number of times we detected a lost persist probe (no ack)"); 1702 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1703 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1704 SYSCTL_CHILDREN(rack_counters), 1705 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1706 &rack_persists_lost_ends, 1707 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1708 #ifdef INVARIANTS 1709 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1710 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1711 SYSCTL_CHILDREN(rack_counters), 1712 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1713 &rack_adjust_map_bw, 1714 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1715 #endif 1716 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1717 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1718 SYSCTL_CHILDREN(rack_counters), 1719 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1720 &rack_multi_single_eq, 1721 "Number of compressed acks total represented"); 1722 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1723 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1724 SYSCTL_CHILDREN(rack_counters), 1725 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1726 &rack_proc_non_comp_ack, 1727 "Number of non compresseds acks that we processed"); 1728 1729 1730 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1731 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1732 SYSCTL_CHILDREN(rack_counters), 1733 OID_AUTO, "sack_long", CTLFLAG_RD, 1734 &rack_sack_proc_all, 1735 "Total times we had to walk whole list for sack processing"); 1736 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_counters), 1739 OID_AUTO, "sack_restart", CTLFLAG_RD, 1740 &rack_sack_proc_restart, 1741 "Total times we had to walk whole list due to a restart"); 1742 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "sack_short", CTLFLAG_RD, 1746 &rack_sack_proc_short, 1747 "Total times we took shortcut for sack processing"); 1748 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_attack), 1751 OID_AUTO, "skipacked", CTLFLAG_RD, 1752 &rack_sack_skipped_acked, 1753 "Total number of times we skipped previously sacked"); 1754 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_attack), 1757 OID_AUTO, "ofsplit", CTLFLAG_RD, 1758 &rack_sack_splits, 1759 "Total number of times we did the old fashion tree split"); 1760 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1762 SYSCTL_CHILDREN(rack_counters), 1763 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1764 &rack_input_idle_reduces, 1765 "Total number of idle reductions on input"); 1766 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1767 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1768 SYSCTL_CHILDREN(rack_counters), 1769 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1770 &rack_collapsed_win_seen, 1771 "Total number of collapsed window events seen (where our window shrinks)"); 1772 1773 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1774 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1775 SYSCTL_CHILDREN(rack_counters), 1776 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1777 &rack_collapsed_win, 1778 "Total number of collapsed window events where we mark packets"); 1779 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1780 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1781 SYSCTL_CHILDREN(rack_counters), 1782 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1783 &rack_collapsed_win_rxt, 1784 "Total number of packets that were retransmitted"); 1785 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1786 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1787 SYSCTL_CHILDREN(rack_counters), 1788 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1789 &rack_collapsed_win_rxt_bytes, 1790 "Total number of bytes that were retransmitted"); 1791 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1792 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1793 SYSCTL_CHILDREN(rack_counters), 1794 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1795 &rack_try_scwnd, 1796 "Total number of scwnd attempts"); 1797 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1798 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1799 OID_AUTO, "outsize", CTLFLAG_RD, 1800 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1801 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1802 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1803 OID_AUTO, "opts", CTLFLAG_RD, 1804 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1805 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1806 SYSCTL_CHILDREN(rack_sysctl_root), 1807 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1808 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1809 } 1810 1811 static __inline int 1812 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1813 { 1814 if (SEQ_GEQ(b->r_start, a->r_start) && 1815 SEQ_LT(b->r_start, a->r_end)) { 1816 /* 1817 * The entry b is within the 1818 * block a. i.e.: 1819 * a -- |-------------| 1820 * b -- |----| 1821 * <or> 1822 * b -- |------| 1823 * <or> 1824 * b -- |-----------| 1825 */ 1826 return (0); 1827 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1828 /* 1829 * b falls as either the next 1830 * sequence block after a so a 1831 * is said to be smaller than b. 1832 * i.e: 1833 * a -- |------| 1834 * b -- |--------| 1835 * or 1836 * b -- |-----| 1837 */ 1838 return (1); 1839 } 1840 /* 1841 * Whats left is where a is 1842 * larger than b. i.e: 1843 * a -- |-------| 1844 * b -- |---| 1845 * or even possibly 1846 * b -- |--------------| 1847 */ 1848 return (-1); 1849 } 1850 1851 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1852 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1853 1854 static uint32_t 1855 rc_init_window(struct tcp_rack *rack) 1856 { 1857 uint32_t win; 1858 1859 if (rack->rc_init_win == 0) { 1860 /* 1861 * Nothing set by the user, use the system stack 1862 * default. 1863 */ 1864 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1865 } 1866 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1867 return (win); 1868 } 1869 1870 static uint64_t 1871 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1872 { 1873 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1874 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1875 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1876 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1877 else 1878 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1879 } 1880 1881 static uint64_t 1882 rack_get_bw(struct tcp_rack *rack) 1883 { 1884 if (rack->use_fixed_rate) { 1885 /* Return the fixed pacing rate */ 1886 return (rack_get_fixed_pacing_bw(rack)); 1887 } 1888 if (rack->r_ctl.gp_bw == 0) { 1889 /* 1890 * We have yet no b/w measurement, 1891 * if we have a user set initial bw 1892 * return it. If we don't have that and 1893 * we have an srtt, use the tcp IW (10) to 1894 * calculate a fictional b/w over the SRTT 1895 * which is more or less a guess. Note 1896 * we don't use our IW from rack on purpose 1897 * so if we have like IW=30, we are not 1898 * calculating a "huge" b/w. 1899 */ 1900 uint64_t bw, srtt; 1901 if (rack->r_ctl.init_rate) 1902 return (rack->r_ctl.init_rate); 1903 1904 /* Has the user set a max peak rate? */ 1905 #ifdef NETFLIX_PEAKRATE 1906 if (rack->rc_tp->t_maxpeakrate) 1907 return (rack->rc_tp->t_maxpeakrate); 1908 #endif 1909 /* Ok lets come up with the IW guess, if we have a srtt */ 1910 if (rack->rc_tp->t_srtt == 0) { 1911 /* 1912 * Go with old pacing method 1913 * i.e. burst mitigation only. 1914 */ 1915 return (0); 1916 } 1917 /* Ok lets get the initial TCP win (not racks) */ 1918 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1919 srtt = (uint64_t)rack->rc_tp->t_srtt; 1920 bw *= (uint64_t)USECS_IN_SECOND; 1921 bw /= srtt; 1922 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1923 bw = rack->r_ctl.bw_rate_cap; 1924 return (bw); 1925 } else { 1926 uint64_t bw; 1927 1928 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1929 /* Averaging is done, we can return the value */ 1930 bw = rack->r_ctl.gp_bw; 1931 } else { 1932 /* Still doing initial average must calculate */ 1933 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1934 } 1935 #ifdef NETFLIX_PEAKRATE 1936 if ((rack->rc_tp->t_maxpeakrate) && 1937 (bw > rack->rc_tp->t_maxpeakrate)) { 1938 /* The user has set a peak rate to pace at 1939 * don't allow us to pace faster than that. 1940 */ 1941 return (rack->rc_tp->t_maxpeakrate); 1942 } 1943 #endif 1944 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1945 bw = rack->r_ctl.bw_rate_cap; 1946 return (bw); 1947 } 1948 } 1949 1950 static uint16_t 1951 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1952 { 1953 if (rack->use_fixed_rate) { 1954 return (100); 1955 } else if (rack->in_probe_rtt && (rsm == NULL)) 1956 return (rack->r_ctl.rack_per_of_gp_probertt); 1957 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1958 rack->r_ctl.rack_per_of_gp_rec)) { 1959 if (rsm) { 1960 /* a retransmission always use the recovery rate */ 1961 return (rack->r_ctl.rack_per_of_gp_rec); 1962 } else if (rack->rack_rec_nonrxt_use_cr) { 1963 /* Directed to use the configured rate */ 1964 goto configured_rate; 1965 } else if (rack->rack_no_prr && 1966 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1967 /* No PRR, lets just use the b/w estimate only */ 1968 return (100); 1969 } else { 1970 /* 1971 * Here we may have a non-retransmit but we 1972 * have no overrides, so just use the recovery 1973 * rate (prr is in effect). 1974 */ 1975 return (rack->r_ctl.rack_per_of_gp_rec); 1976 } 1977 } 1978 configured_rate: 1979 /* For the configured rate we look at our cwnd vs the ssthresh */ 1980 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1981 return (rack->r_ctl.rack_per_of_gp_ss); 1982 else 1983 return (rack->r_ctl.rack_per_of_gp_ca); 1984 } 1985 1986 static void 1987 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1988 { 1989 /* 1990 * Types of logs (mod value) 1991 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1992 * 2 = a dsack round begins, persist is reset to 16. 1993 * 3 = a dsack round ends 1994 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1995 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1996 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1997 */ 1998 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1999 union tcp_log_stackspecific log; 2000 struct timeval tv; 2001 2002 memset(&log, 0, sizeof(log)); 2003 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 2004 log.u_bbr.flex1 <<= 1; 2005 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 2006 log.u_bbr.flex1 <<= 1; 2007 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 2008 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 2009 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 2010 log.u_bbr.flex4 = flex4; 2011 log.u_bbr.flex5 = flex5; 2012 log.u_bbr.flex6 = flex6; 2013 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 2014 log.u_bbr.flex8 = mod; 2015 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2016 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2017 &rack->rc_inp->inp_socket->so_rcv, 2018 &rack->rc_inp->inp_socket->so_snd, 2019 RACK_DSACK_HANDLING, 0, 2020 0, &log, false, &tv); 2021 } 2022 } 2023 2024 static void 2025 rack_log_hdwr_pacing(struct tcp_rack *rack, 2026 uint64_t rate, uint64_t hw_rate, int line, 2027 int error, uint16_t mod) 2028 { 2029 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2030 union tcp_log_stackspecific log; 2031 struct timeval tv; 2032 const struct ifnet *ifp; 2033 2034 memset(&log, 0, sizeof(log)); 2035 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2036 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2037 if (rack->r_ctl.crte) { 2038 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2039 } else if (rack->rc_inp->inp_route.ro_nh && 2040 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2041 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2042 } else 2043 ifp = NULL; 2044 if (ifp) { 2045 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2046 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2047 } 2048 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2049 log.u_bbr.bw_inuse = rate; 2050 log.u_bbr.flex5 = line; 2051 log.u_bbr.flex6 = error; 2052 log.u_bbr.flex7 = mod; 2053 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2054 log.u_bbr.flex8 = rack->use_fixed_rate; 2055 log.u_bbr.flex8 <<= 1; 2056 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2057 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2058 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2059 if (rack->r_ctl.crte) 2060 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2061 else 2062 log.u_bbr.cur_del_rate = 0; 2063 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2064 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2065 &rack->rc_inp->inp_socket->so_rcv, 2066 &rack->rc_inp->inp_socket->so_snd, 2067 BBR_LOG_HDWR_PACE, 0, 2068 0, &log, false, &tv); 2069 } 2070 } 2071 2072 static uint64_t 2073 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2074 { 2075 /* 2076 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2077 */ 2078 uint64_t bw_est, high_rate; 2079 uint64_t gain; 2080 2081 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2082 bw_est = bw * gain; 2083 bw_est /= (uint64_t)100; 2084 /* Never fall below the minimum (def 64kbps) */ 2085 if (bw_est < RACK_MIN_BW) 2086 bw_est = RACK_MIN_BW; 2087 if (rack->r_rack_hw_rate_caps) { 2088 /* Rate caps are in place */ 2089 if (rack->r_ctl.crte != NULL) { 2090 /* We have a hdwr rate already */ 2091 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2092 if (bw_est >= high_rate) { 2093 /* We are capping bw at the highest rate table entry */ 2094 rack_log_hdwr_pacing(rack, 2095 bw_est, high_rate, __LINE__, 2096 0, 3); 2097 bw_est = high_rate; 2098 if (capped) 2099 *capped = 1; 2100 } 2101 } else if ((rack->rack_hdrw_pacing == 0) && 2102 (rack->rack_hdw_pace_ena) && 2103 (rack->rack_attempt_hdwr_pace == 0) && 2104 (rack->rc_inp->inp_route.ro_nh != NULL) && 2105 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2106 /* 2107 * Special case, we have not yet attempted hardware 2108 * pacing, and yet we may, when we do, find out if we are 2109 * above the highest rate. We need to know the maxbw for the interface 2110 * in question (if it supports ratelimiting). We get back 2111 * a 0, if the interface is not found in the RL lists. 2112 */ 2113 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2114 if (high_rate) { 2115 /* Yep, we have a rate is it above this rate? */ 2116 if (bw_est > high_rate) { 2117 bw_est = high_rate; 2118 if (capped) 2119 *capped = 1; 2120 } 2121 } 2122 } 2123 } 2124 return (bw_est); 2125 } 2126 2127 static void 2128 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2129 { 2130 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2131 union tcp_log_stackspecific log; 2132 struct timeval tv; 2133 2134 if ((mod != 1) && (rack_verbose_logging == 0)) { 2135 /* 2136 * We get 3 values currently for mod 2137 * 1 - We are retransmitting and this tells the reason. 2138 * 2 - We are clearing a dup-ack count. 2139 * 3 - We are incrementing a dup-ack count. 2140 * 2141 * The clear/increment are only logged 2142 * if you have BBverbose on. 2143 */ 2144 return; 2145 } 2146 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2147 log.u_bbr.flex1 = tsused; 2148 log.u_bbr.flex2 = thresh; 2149 log.u_bbr.flex3 = rsm->r_flags; 2150 log.u_bbr.flex4 = rsm->r_dupack; 2151 log.u_bbr.flex5 = rsm->r_start; 2152 log.u_bbr.flex6 = rsm->r_end; 2153 log.u_bbr.flex8 = mod; 2154 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2155 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2156 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2157 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2158 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2159 log.u_bbr.pacing_gain = rack->r_must_retran; 2160 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2161 &rack->rc_inp->inp_socket->so_rcv, 2162 &rack->rc_inp->inp_socket->so_snd, 2163 BBR_LOG_SETTINGS_CHG, 0, 2164 0, &log, false, &tv); 2165 } 2166 } 2167 2168 static void 2169 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2170 { 2171 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2172 union tcp_log_stackspecific log; 2173 struct timeval tv; 2174 2175 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2176 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2177 log.u_bbr.flex2 = to; 2178 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2179 log.u_bbr.flex4 = slot; 2180 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2181 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2182 log.u_bbr.flex7 = rack->rc_in_persist; 2183 log.u_bbr.flex8 = which; 2184 if (rack->rack_no_prr) 2185 log.u_bbr.pkts_out = 0; 2186 else 2187 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2189 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2190 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2191 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2192 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2193 log.u_bbr.pacing_gain = rack->r_must_retran; 2194 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2195 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2196 log.u_bbr.lost = rack_rto_min; 2197 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2198 &rack->rc_inp->inp_socket->so_rcv, 2199 &rack->rc_inp->inp_socket->so_snd, 2200 BBR_LOG_TIMERSTAR, 0, 2201 0, &log, false, &tv); 2202 } 2203 } 2204 2205 static void 2206 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2207 { 2208 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2209 union tcp_log_stackspecific log; 2210 struct timeval tv; 2211 2212 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2213 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2214 log.u_bbr.flex8 = to_num; 2215 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2216 log.u_bbr.flex2 = rack->rc_rack_rtt; 2217 if (rsm == NULL) 2218 log.u_bbr.flex3 = 0; 2219 else 2220 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2221 if (rack->rack_no_prr) 2222 log.u_bbr.flex5 = 0; 2223 else 2224 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2225 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2226 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2227 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2228 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2229 log.u_bbr.pacing_gain = rack->r_must_retran; 2230 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2231 &rack->rc_inp->inp_socket->so_rcv, 2232 &rack->rc_inp->inp_socket->so_snd, 2233 BBR_LOG_RTO, 0, 2234 0, &log, false, &tv); 2235 } 2236 } 2237 2238 static void 2239 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2240 struct rack_sendmap *prev, 2241 struct rack_sendmap *rsm, 2242 struct rack_sendmap *next, 2243 int flag, uint32_t th_ack, int line) 2244 { 2245 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2246 union tcp_log_stackspecific log; 2247 struct timeval tv; 2248 2249 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2250 log.u_bbr.flex8 = flag; 2251 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2252 log.u_bbr.cur_del_rate = (uint64_t)prev; 2253 log.u_bbr.delRate = (uint64_t)rsm; 2254 log.u_bbr.rttProp = (uint64_t)next; 2255 log.u_bbr.flex7 = 0; 2256 if (prev) { 2257 log.u_bbr.flex1 = prev->r_start; 2258 log.u_bbr.flex2 = prev->r_end; 2259 log.u_bbr.flex7 |= 0x4; 2260 } 2261 if (rsm) { 2262 log.u_bbr.flex3 = rsm->r_start; 2263 log.u_bbr.flex4 = rsm->r_end; 2264 log.u_bbr.flex7 |= 0x2; 2265 } 2266 if (next) { 2267 log.u_bbr.flex5 = next->r_start; 2268 log.u_bbr.flex6 = next->r_end; 2269 log.u_bbr.flex7 |= 0x1; 2270 } 2271 log.u_bbr.applimited = line; 2272 log.u_bbr.pkts_out = th_ack; 2273 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2274 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2275 if (rack->rack_no_prr) 2276 log.u_bbr.lost = 0; 2277 else 2278 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2279 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2280 &rack->rc_inp->inp_socket->so_rcv, 2281 &rack->rc_inp->inp_socket->so_snd, 2282 TCP_LOG_MAPCHG, 0, 2283 0, &log, false, &tv); 2284 } 2285 } 2286 2287 static void 2288 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2289 struct rack_sendmap *rsm, int conf) 2290 { 2291 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2292 union tcp_log_stackspecific log; 2293 struct timeval tv; 2294 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2295 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2296 log.u_bbr.flex1 = t; 2297 log.u_bbr.flex2 = len; 2298 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2299 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2300 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2301 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2302 log.u_bbr.flex7 = conf; 2303 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2304 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2305 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2306 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2307 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2308 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2309 if (rsm) { 2310 log.u_bbr.pkt_epoch = rsm->r_start; 2311 log.u_bbr.lost = rsm->r_end; 2312 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2313 /* We loose any upper of the 24 bits */ 2314 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2315 } else { 2316 /* Its a SYN */ 2317 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2318 log.u_bbr.lost = 0; 2319 log.u_bbr.cwnd_gain = 0; 2320 log.u_bbr.pacing_gain = 0; 2321 } 2322 /* Write out general bits of interest rrs here */ 2323 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2324 log.u_bbr.use_lt_bw <<= 1; 2325 log.u_bbr.use_lt_bw |= rack->forced_ack; 2326 log.u_bbr.use_lt_bw <<= 1; 2327 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2328 log.u_bbr.use_lt_bw <<= 1; 2329 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2330 log.u_bbr.use_lt_bw <<= 1; 2331 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2332 log.u_bbr.use_lt_bw <<= 1; 2333 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2334 log.u_bbr.use_lt_bw <<= 1; 2335 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2336 log.u_bbr.use_lt_bw <<= 1; 2337 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2338 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2339 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2340 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2341 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2342 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2343 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2344 log.u_bbr.bw_inuse <<= 32; 2345 if (rsm) 2346 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2347 TCP_LOG_EVENTP(tp, NULL, 2348 &rack->rc_inp->inp_socket->so_rcv, 2349 &rack->rc_inp->inp_socket->so_snd, 2350 BBR_LOG_BBRRTT, 0, 2351 0, &log, false, &tv); 2352 2353 2354 } 2355 } 2356 2357 static void 2358 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2359 { 2360 /* 2361 * Log the rtt sample we are 2362 * applying to the srtt algorithm in 2363 * useconds. 2364 */ 2365 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2366 union tcp_log_stackspecific log; 2367 struct timeval tv; 2368 2369 /* Convert our ms to a microsecond */ 2370 memset(&log, 0, sizeof(log)); 2371 log.u_bbr.flex1 = rtt; 2372 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2373 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2374 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2375 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2376 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2377 log.u_bbr.flex7 = 1; 2378 log.u_bbr.flex8 = rack->sack_attack_disable; 2379 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2380 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2381 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2382 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2383 log.u_bbr.pacing_gain = rack->r_must_retran; 2384 /* 2385 * We capture in delRate the upper 32 bits as 2386 * the confidence level we had declared, and the 2387 * lower 32 bits as the actual RTT using the arrival 2388 * timestamp. 2389 */ 2390 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2391 log.u_bbr.delRate <<= 32; 2392 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2393 /* Lets capture all the things that make up t_rtxcur */ 2394 log.u_bbr.applimited = rack_rto_min; 2395 log.u_bbr.epoch = rack_rto_max; 2396 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2397 log.u_bbr.lost = rack_rto_min; 2398 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2399 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2400 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2401 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2402 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2403 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2404 &rack->rc_inp->inp_socket->so_rcv, 2405 &rack->rc_inp->inp_socket->so_snd, 2406 TCP_LOG_RTT, 0, 2407 0, &log, false, &tv); 2408 } 2409 } 2410 2411 static void 2412 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2413 { 2414 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2415 union tcp_log_stackspecific log; 2416 struct timeval tv; 2417 2418 /* Convert our ms to a microsecond */ 2419 memset(&log, 0, sizeof(log)); 2420 log.u_bbr.flex1 = rtt; 2421 log.u_bbr.flex2 = send_time; 2422 log.u_bbr.flex3 = ack_time; 2423 log.u_bbr.flex4 = where; 2424 log.u_bbr.flex7 = 2; 2425 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2426 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2427 &rack->rc_inp->inp_socket->so_rcv, 2428 &rack->rc_inp->inp_socket->so_snd, 2429 TCP_LOG_RTT, 0, 2430 0, &log, false, &tv); 2431 } 2432 } 2433 2434 2435 2436 static inline void 2437 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2438 { 2439 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2440 union tcp_log_stackspecific log; 2441 struct timeval tv; 2442 2443 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2444 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2445 log.u_bbr.flex1 = line; 2446 log.u_bbr.flex2 = tick; 2447 log.u_bbr.flex3 = tp->t_maxunacktime; 2448 log.u_bbr.flex4 = tp->t_acktime; 2449 log.u_bbr.flex8 = event; 2450 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2451 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2452 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2453 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2454 log.u_bbr.pacing_gain = rack->r_must_retran; 2455 TCP_LOG_EVENTP(tp, NULL, 2456 &rack->rc_inp->inp_socket->so_rcv, 2457 &rack->rc_inp->inp_socket->so_snd, 2458 BBR_LOG_PROGRESS, 0, 2459 0, &log, false, &tv); 2460 } 2461 } 2462 2463 static void 2464 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2465 { 2466 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2467 union tcp_log_stackspecific log; 2468 2469 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2470 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2471 log.u_bbr.flex1 = slot; 2472 if (rack->rack_no_prr) 2473 log.u_bbr.flex2 = 0; 2474 else 2475 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2476 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2477 log.u_bbr.flex8 = rack->rc_in_persist; 2478 log.u_bbr.timeStamp = cts; 2479 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2480 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2481 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2482 log.u_bbr.pacing_gain = rack->r_must_retran; 2483 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2484 &rack->rc_inp->inp_socket->so_rcv, 2485 &rack->rc_inp->inp_socket->so_snd, 2486 BBR_LOG_BBRSND, 0, 2487 0, &log, false, tv); 2488 } 2489 } 2490 2491 static void 2492 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2493 { 2494 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2495 union tcp_log_stackspecific log; 2496 struct timeval tv; 2497 2498 memset(&log, 0, sizeof(log)); 2499 log.u_bbr.flex1 = did_out; 2500 log.u_bbr.flex2 = nxt_pkt; 2501 log.u_bbr.flex3 = way_out; 2502 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2503 if (rack->rack_no_prr) 2504 log.u_bbr.flex5 = 0; 2505 else 2506 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2507 log.u_bbr.flex6 = nsegs; 2508 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2509 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2510 log.u_bbr.flex7 <<= 1; 2511 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2512 log.u_bbr.flex7 <<= 1; 2513 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2514 log.u_bbr.flex8 = rack->rc_in_persist; 2515 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2516 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2517 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2518 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2519 log.u_bbr.use_lt_bw <<= 1; 2520 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2521 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2522 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2523 log.u_bbr.pacing_gain = rack->r_must_retran; 2524 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2525 &rack->rc_inp->inp_socket->so_rcv, 2526 &rack->rc_inp->inp_socket->so_snd, 2527 BBR_LOG_DOSEG_DONE, 0, 2528 0, &log, false, &tv); 2529 } 2530 } 2531 2532 static void 2533 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2534 { 2535 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2536 union tcp_log_stackspecific log; 2537 struct timeval tv; 2538 2539 memset(&log, 0, sizeof(log)); 2540 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2541 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2542 log.u_bbr.flex4 = arg1; 2543 log.u_bbr.flex5 = arg2; 2544 log.u_bbr.flex6 = arg3; 2545 log.u_bbr.flex8 = frm; 2546 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2547 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2548 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2549 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2550 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2551 log.u_bbr.pacing_gain = rack->r_must_retran; 2552 TCP_LOG_EVENTP(tp, NULL, 2553 &tp->t_inpcb->inp_socket->so_rcv, 2554 &tp->t_inpcb->inp_socket->so_snd, 2555 TCP_HDWR_PACE_SIZE, 0, 2556 0, &log, false, &tv); 2557 } 2558 } 2559 2560 static void 2561 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2562 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2563 { 2564 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2565 union tcp_log_stackspecific log; 2566 struct timeval tv; 2567 2568 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2569 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2570 log.u_bbr.flex1 = slot; 2571 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2572 log.u_bbr.flex4 = reason; 2573 if (rack->rack_no_prr) 2574 log.u_bbr.flex5 = 0; 2575 else 2576 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2577 log.u_bbr.flex7 = hpts_calling; 2578 log.u_bbr.flex8 = rack->rc_in_persist; 2579 log.u_bbr.lt_epoch = cwnd_to_use; 2580 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2581 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2582 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2583 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2584 log.u_bbr.pacing_gain = rack->r_must_retran; 2585 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2586 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2587 &rack->rc_inp->inp_socket->so_rcv, 2588 &rack->rc_inp->inp_socket->so_snd, 2589 BBR_LOG_JUSTRET, 0, 2590 tlen, &log, false, &tv); 2591 } 2592 } 2593 2594 static void 2595 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2596 struct timeval *tv, uint32_t flags_on_entry) 2597 { 2598 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2599 union tcp_log_stackspecific log; 2600 2601 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2602 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2603 log.u_bbr.flex1 = line; 2604 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2605 log.u_bbr.flex3 = flags_on_entry; 2606 log.u_bbr.flex4 = us_cts; 2607 if (rack->rack_no_prr) 2608 log.u_bbr.flex5 = 0; 2609 else 2610 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2611 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2612 log.u_bbr.flex7 = hpts_removed; 2613 log.u_bbr.flex8 = 1; 2614 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2615 log.u_bbr.timeStamp = us_cts; 2616 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2617 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2618 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2619 log.u_bbr.pacing_gain = rack->r_must_retran; 2620 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2621 &rack->rc_inp->inp_socket->so_rcv, 2622 &rack->rc_inp->inp_socket->so_snd, 2623 BBR_LOG_TIMERCANC, 0, 2624 0, &log, false, tv); 2625 } 2626 } 2627 2628 static void 2629 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2630 uint32_t flex1, uint32_t flex2, 2631 uint32_t flex3, uint32_t flex4, 2632 uint32_t flex5, uint32_t flex6, 2633 uint16_t flex7, uint8_t mod) 2634 { 2635 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2636 union tcp_log_stackspecific log; 2637 struct timeval tv; 2638 2639 if (mod == 1) { 2640 /* No you can't use 1, its for the real to cancel */ 2641 return; 2642 } 2643 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2644 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2645 log.u_bbr.flex1 = flex1; 2646 log.u_bbr.flex2 = flex2; 2647 log.u_bbr.flex3 = flex3; 2648 log.u_bbr.flex4 = flex4; 2649 log.u_bbr.flex5 = flex5; 2650 log.u_bbr.flex6 = flex6; 2651 log.u_bbr.flex7 = flex7; 2652 log.u_bbr.flex8 = mod; 2653 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2654 &rack->rc_inp->inp_socket->so_rcv, 2655 &rack->rc_inp->inp_socket->so_snd, 2656 BBR_LOG_TIMERCANC, 0, 2657 0, &log, false, &tv); 2658 } 2659 } 2660 2661 static void 2662 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2663 { 2664 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2665 union tcp_log_stackspecific log; 2666 struct timeval tv; 2667 2668 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2669 log.u_bbr.flex1 = timers; 2670 log.u_bbr.flex2 = ret; 2671 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2672 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2673 log.u_bbr.flex5 = cts; 2674 if (rack->rack_no_prr) 2675 log.u_bbr.flex6 = 0; 2676 else 2677 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2678 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2679 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2680 log.u_bbr.pacing_gain = rack->r_must_retran; 2681 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2682 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2683 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2684 &rack->rc_inp->inp_socket->so_rcv, 2685 &rack->rc_inp->inp_socket->so_snd, 2686 BBR_LOG_TO_PROCESS, 0, 2687 0, &log, false, &tv); 2688 } 2689 } 2690 2691 static void 2692 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 2693 { 2694 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2695 union tcp_log_stackspecific log; 2696 struct timeval tv; 2697 2698 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2699 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2700 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2701 if (rack->rack_no_prr) 2702 log.u_bbr.flex3 = 0; 2703 else 2704 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2705 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2706 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2707 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2708 log.u_bbr.flex7 = line; 2709 log.u_bbr.flex8 = frm; 2710 log.u_bbr.pkts_out = orig_cwnd; 2711 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2712 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2713 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2714 log.u_bbr.use_lt_bw <<= 1; 2715 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2716 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2717 &rack->rc_inp->inp_socket->so_rcv, 2718 &rack->rc_inp->inp_socket->so_snd, 2719 BBR_LOG_BBRUPD, 0, 2720 0, &log, false, &tv); 2721 } 2722 } 2723 2724 #ifdef NETFLIX_EXP_DETECTION 2725 static void 2726 rack_log_sad(struct tcp_rack *rack, int event) 2727 { 2728 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2729 union tcp_log_stackspecific log; 2730 struct timeval tv; 2731 2732 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2733 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2734 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2735 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2736 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2737 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2738 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2739 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2740 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2741 log.u_bbr.lt_epoch |= rack->do_detection; 2742 log.u_bbr.applimited = tcp_map_minimum; 2743 log.u_bbr.flex7 = rack->sack_attack_disable; 2744 log.u_bbr.flex8 = event; 2745 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2746 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2747 log.u_bbr.delivered = tcp_sad_decay_val; 2748 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2749 &rack->rc_inp->inp_socket->so_rcv, 2750 &rack->rc_inp->inp_socket->so_snd, 2751 TCP_SAD_DETECTION, 0, 2752 0, &log, false, &tv); 2753 } 2754 } 2755 #endif 2756 2757 static void 2758 rack_counter_destroy(void) 2759 { 2760 counter_u64_free(rack_fto_send); 2761 counter_u64_free(rack_fto_rsm_send); 2762 counter_u64_free(rack_nfto_resend); 2763 counter_u64_free(rack_hw_pace_init_fail); 2764 counter_u64_free(rack_hw_pace_lost); 2765 counter_u64_free(rack_non_fto_send); 2766 counter_u64_free(rack_extended_rfo); 2767 counter_u64_free(rack_ack_total); 2768 counter_u64_free(rack_express_sack); 2769 counter_u64_free(rack_sack_total); 2770 counter_u64_free(rack_move_none); 2771 counter_u64_free(rack_move_some); 2772 counter_u64_free(rack_sack_attacks_detected); 2773 counter_u64_free(rack_sack_attacks_reversed); 2774 counter_u64_free(rack_sack_used_next_merge); 2775 counter_u64_free(rack_sack_used_prev_merge); 2776 counter_u64_free(rack_tlp_tot); 2777 counter_u64_free(rack_tlp_newdata); 2778 counter_u64_free(rack_tlp_retran); 2779 counter_u64_free(rack_tlp_retran_bytes); 2780 counter_u64_free(rack_to_tot); 2781 counter_u64_free(rack_saw_enobuf); 2782 counter_u64_free(rack_saw_enobuf_hw); 2783 counter_u64_free(rack_saw_enetunreach); 2784 counter_u64_free(rack_hot_alloc); 2785 counter_u64_free(rack_to_alloc); 2786 counter_u64_free(rack_to_alloc_hard); 2787 counter_u64_free(rack_to_alloc_emerg); 2788 counter_u64_free(rack_to_alloc_limited); 2789 counter_u64_free(rack_alloc_limited_conns); 2790 counter_u64_free(rack_split_limited); 2791 counter_u64_free(rack_multi_single_eq); 2792 counter_u64_free(rack_proc_non_comp_ack); 2793 counter_u64_free(rack_sack_proc_all); 2794 counter_u64_free(rack_sack_proc_restart); 2795 counter_u64_free(rack_sack_proc_short); 2796 counter_u64_free(rack_sack_skipped_acked); 2797 counter_u64_free(rack_sack_splits); 2798 counter_u64_free(rack_input_idle_reduces); 2799 counter_u64_free(rack_collapsed_win); 2800 counter_u64_free(rack_collapsed_win_rxt); 2801 counter_u64_free(rack_collapsed_win_rxt_bytes); 2802 counter_u64_free(rack_collapsed_win_seen); 2803 counter_u64_free(rack_try_scwnd); 2804 counter_u64_free(rack_persists_sends); 2805 counter_u64_free(rack_persists_acks); 2806 counter_u64_free(rack_persists_loss); 2807 counter_u64_free(rack_persists_lost_ends); 2808 #ifdef INVARIANTS 2809 counter_u64_free(rack_adjust_map_bw); 2810 #endif 2811 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2812 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2813 } 2814 2815 static struct rack_sendmap * 2816 rack_alloc(struct tcp_rack *rack) 2817 { 2818 struct rack_sendmap *rsm; 2819 2820 /* 2821 * First get the top of the list it in 2822 * theory is the "hottest" rsm we have, 2823 * possibly just freed by ack processing. 2824 */ 2825 if (rack->rc_free_cnt > rack_free_cache) { 2826 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2827 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2828 counter_u64_add(rack_hot_alloc, 1); 2829 rack->rc_free_cnt--; 2830 return (rsm); 2831 } 2832 /* 2833 * Once we get under our free cache we probably 2834 * no longer have a "hot" one available. Lets 2835 * get one from UMA. 2836 */ 2837 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2838 if (rsm) { 2839 rack->r_ctl.rc_num_maps_alloced++; 2840 counter_u64_add(rack_to_alloc, 1); 2841 return (rsm); 2842 } 2843 /* 2844 * Dig in to our aux rsm's (the last two) since 2845 * UMA failed to get us one. 2846 */ 2847 if (rack->rc_free_cnt) { 2848 counter_u64_add(rack_to_alloc_emerg, 1); 2849 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2850 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2851 rack->rc_free_cnt--; 2852 return (rsm); 2853 } 2854 return (NULL); 2855 } 2856 2857 static struct rack_sendmap * 2858 rack_alloc_full_limit(struct tcp_rack *rack) 2859 { 2860 if ((V_tcp_map_entries_limit > 0) && 2861 (rack->do_detection == 0) && 2862 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2863 counter_u64_add(rack_to_alloc_limited, 1); 2864 if (!rack->alloc_limit_reported) { 2865 rack->alloc_limit_reported = 1; 2866 counter_u64_add(rack_alloc_limited_conns, 1); 2867 } 2868 return (NULL); 2869 } 2870 return (rack_alloc(rack)); 2871 } 2872 2873 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2874 static struct rack_sendmap * 2875 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2876 { 2877 struct rack_sendmap *rsm; 2878 2879 if (limit_type) { 2880 /* currently there is only one limit type */ 2881 if (V_tcp_map_split_limit > 0 && 2882 (rack->do_detection == 0) && 2883 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2884 counter_u64_add(rack_split_limited, 1); 2885 if (!rack->alloc_limit_reported) { 2886 rack->alloc_limit_reported = 1; 2887 counter_u64_add(rack_alloc_limited_conns, 1); 2888 } 2889 return (NULL); 2890 } 2891 } 2892 2893 /* allocate and mark in the limit type, if set */ 2894 rsm = rack_alloc(rack); 2895 if (rsm != NULL && limit_type) { 2896 rsm->r_limit_type = limit_type; 2897 rack->r_ctl.rc_num_split_allocs++; 2898 } 2899 return (rsm); 2900 } 2901 2902 static void 2903 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2904 { 2905 if (rsm->r_flags & RACK_APP_LIMITED) { 2906 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2907 rack->r_ctl.rc_app_limited_cnt--; 2908 } 2909 } 2910 if (rsm->r_limit_type) { 2911 /* currently there is only one limit type */ 2912 rack->r_ctl.rc_num_split_allocs--; 2913 } 2914 if (rsm == rack->r_ctl.rc_first_appl) { 2915 if (rack->r_ctl.rc_app_limited_cnt == 0) 2916 rack->r_ctl.rc_first_appl = NULL; 2917 else { 2918 /* Follow the next one out */ 2919 struct rack_sendmap fe; 2920 2921 fe.r_start = rsm->r_nseq_appl; 2922 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2923 } 2924 } 2925 if (rsm == rack->r_ctl.rc_resend) 2926 rack->r_ctl.rc_resend = NULL; 2927 if (rsm == rack->r_ctl.rc_end_appl) 2928 rack->r_ctl.rc_end_appl = NULL; 2929 if (rack->r_ctl.rc_tlpsend == rsm) 2930 rack->r_ctl.rc_tlpsend = NULL; 2931 if (rack->r_ctl.rc_sacklast == rsm) 2932 rack->r_ctl.rc_sacklast = NULL; 2933 memset(rsm, 0, sizeof(struct rack_sendmap)); 2934 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2935 rack->rc_free_cnt++; 2936 } 2937 2938 static void 2939 rack_free_trim(struct tcp_rack *rack) 2940 { 2941 struct rack_sendmap *rsm; 2942 2943 /* 2944 * Free up all the tail entries until 2945 * we get our list down to the limit. 2946 */ 2947 while (rack->rc_free_cnt > rack_free_cache) { 2948 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2949 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2950 rack->rc_free_cnt--; 2951 uma_zfree(rack_zone, rsm); 2952 } 2953 } 2954 2955 2956 static uint32_t 2957 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2958 { 2959 uint64_t srtt, bw, len, tim; 2960 uint32_t segsiz, def_len, minl; 2961 2962 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2963 def_len = rack_def_data_window * segsiz; 2964 if (rack->rc_gp_filled == 0) { 2965 /* 2966 * We have no measurement (IW is in flight?) so 2967 * we can only guess using our data_window sysctl 2968 * value (usually 20MSS). 2969 */ 2970 return (def_len); 2971 } 2972 /* 2973 * Now we have a number of factors to consider. 2974 * 2975 * 1) We have a desired BDP which is usually 2976 * at least 2. 2977 * 2) We have a minimum number of rtt's usually 1 SRTT 2978 * but we allow it too to be more. 2979 * 3) We want to make sure a measurement last N useconds (if 2980 * we have set rack_min_measure_usec. 2981 * 2982 * We handle the first concern here by trying to create a data 2983 * window of max(rack_def_data_window, DesiredBDP). The 2984 * second concern we handle in not letting the measurement 2985 * window end normally until at least the required SRTT's 2986 * have gone by which is done further below in 2987 * rack_enough_for_measurement(). Finally the third concern 2988 * we also handle here by calculating how long that time 2989 * would take at the current BW and then return the 2990 * max of our first calculation and that length. Note 2991 * that if rack_min_measure_usec is 0, we don't deal 2992 * with concern 3. Also for both Concern 1 and 3 an 2993 * application limited period could end the measurement 2994 * earlier. 2995 * 2996 * So lets calculate the BDP with the "known" b/w using 2997 * the SRTT has our rtt and then multiply it by the 2998 * goal. 2999 */ 3000 bw = rack_get_bw(rack); 3001 srtt = (uint64_t)tp->t_srtt; 3002 len = bw * srtt; 3003 len /= (uint64_t)HPTS_USEC_IN_SEC; 3004 len *= max(1, rack_goal_bdp); 3005 /* Now we need to round up to the nearest MSS */ 3006 len = roundup(len, segsiz); 3007 if (rack_min_measure_usec) { 3008 /* Now calculate our min length for this b/w */ 3009 tim = rack_min_measure_usec; 3010 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 3011 if (minl == 0) 3012 minl = 1; 3013 minl = roundup(minl, segsiz); 3014 if (len < minl) 3015 len = minl; 3016 } 3017 /* 3018 * Now if we have a very small window we want 3019 * to attempt to get the window that is 3020 * as small as possible. This happens on 3021 * low b/w connections and we don't want to 3022 * span huge numbers of rtt's between measurements. 3023 * 3024 * We basically include 2 over our "MIN window" so 3025 * that the measurement can be shortened (possibly) by 3026 * an ack'ed packet. 3027 */ 3028 if (len < def_len) 3029 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 3030 else 3031 return (max((uint32_t)len, def_len)); 3032 3033 } 3034 3035 static int 3036 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 3037 { 3038 uint32_t tim, srtts, segsiz; 3039 3040 /* 3041 * Has enough time passed for the GP measurement to be valid? 3042 */ 3043 if ((tp->snd_max == tp->snd_una) || 3044 (th_ack == tp->snd_max)){ 3045 /* All is acked */ 3046 *quality = RACK_QUALITY_ALLACKED; 3047 return (1); 3048 } 3049 if (SEQ_LT(th_ack, tp->gput_seq)) { 3050 /* Not enough bytes yet */ 3051 return (0); 3052 } 3053 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3054 if (SEQ_LT(th_ack, tp->gput_ack) && 3055 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3056 /* Not enough bytes yet */ 3057 return (0); 3058 } 3059 if (rack->r_ctl.rc_first_appl && 3060 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3061 /* 3062 * We are up to the app limited send point 3063 * we have to measure irrespective of the time.. 3064 */ 3065 *quality = RACK_QUALITY_APPLIMITED; 3066 return (1); 3067 } 3068 /* Now what about time? */ 3069 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3070 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3071 if (tim >= srtts) { 3072 *quality = RACK_QUALITY_HIGH; 3073 return (1); 3074 } 3075 /* Nope not even a full SRTT has passed */ 3076 return (0); 3077 } 3078 3079 static void 3080 rack_log_timely(struct tcp_rack *rack, 3081 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3082 uint64_t up_bnd, int line, uint8_t method) 3083 { 3084 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3085 union tcp_log_stackspecific log; 3086 struct timeval tv; 3087 3088 memset(&log, 0, sizeof(log)); 3089 log.u_bbr.flex1 = logged; 3090 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3091 log.u_bbr.flex2 <<= 4; 3092 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3093 log.u_bbr.flex2 <<= 4; 3094 log.u_bbr.flex2 |= rack->rc_gp_incr; 3095 log.u_bbr.flex2 <<= 4; 3096 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3097 log.u_bbr.flex3 = rack->rc_gp_incr; 3098 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3099 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3100 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3101 log.u_bbr.flex7 = rack->rc_gp_bwred; 3102 log.u_bbr.flex8 = method; 3103 log.u_bbr.cur_del_rate = cur_bw; 3104 log.u_bbr.delRate = low_bnd; 3105 log.u_bbr.bw_inuse = up_bnd; 3106 log.u_bbr.rttProp = rack_get_bw(rack); 3107 log.u_bbr.pkt_epoch = line; 3108 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3109 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3110 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3111 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3112 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3113 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3114 log.u_bbr.cwnd_gain <<= 1; 3115 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3116 log.u_bbr.cwnd_gain <<= 1; 3117 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3118 log.u_bbr.cwnd_gain <<= 1; 3119 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3120 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3121 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3122 &rack->rc_inp->inp_socket->so_rcv, 3123 &rack->rc_inp->inp_socket->so_snd, 3124 TCP_TIMELY_WORK, 0, 3125 0, &log, false, &tv); 3126 } 3127 } 3128 3129 static int 3130 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3131 { 3132 /* 3133 * Before we increase we need to know if 3134 * the estimate just made was less than 3135 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3136 * 3137 * If we already are pacing at a fast enough 3138 * rate to push us faster there is no sense of 3139 * increasing. 3140 * 3141 * We first caculate our actual pacing rate (ss or ca multiplier 3142 * times our cur_bw). 3143 * 3144 * Then we take the last measured rate and multipy by our 3145 * maximum pacing overage to give us a max allowable rate. 3146 * 3147 * If our act_rate is smaller than our max_allowable rate 3148 * then we should increase. Else we should hold steady. 3149 * 3150 */ 3151 uint64_t act_rate, max_allow_rate; 3152 3153 if (rack_timely_no_stopping) 3154 return (1); 3155 3156 if ((cur_bw == 0) || (last_bw_est == 0)) { 3157 /* 3158 * Initial startup case or 3159 * everything is acked case. 3160 */ 3161 rack_log_timely(rack, mult, cur_bw, 0, 0, 3162 __LINE__, 9); 3163 return (1); 3164 } 3165 if (mult <= 100) { 3166 /* 3167 * We can always pace at or slightly above our rate. 3168 */ 3169 rack_log_timely(rack, mult, cur_bw, 0, 0, 3170 __LINE__, 9); 3171 return (1); 3172 } 3173 act_rate = cur_bw * (uint64_t)mult; 3174 act_rate /= 100; 3175 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3176 max_allow_rate /= 100; 3177 if (act_rate < max_allow_rate) { 3178 /* 3179 * Here the rate we are actually pacing at 3180 * is smaller than 10% above our last measurement. 3181 * This means we are pacing below what we would 3182 * like to try to achieve (plus some wiggle room). 3183 */ 3184 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3185 __LINE__, 9); 3186 return (1); 3187 } else { 3188 /* 3189 * Here we are already pacing at least rack_max_per_above(10%) 3190 * what we are getting back. This indicates most likely 3191 * that we are being limited (cwnd/rwnd/app) and can't 3192 * get any more b/w. There is no sense of trying to 3193 * raise up the pacing rate its not speeding us up 3194 * and we already are pacing faster than we are getting. 3195 */ 3196 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3197 __LINE__, 8); 3198 return (0); 3199 } 3200 } 3201 3202 static void 3203 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3204 { 3205 /* 3206 * When we drag bottom, we want to assure 3207 * that no multiplier is below 1.0, if so 3208 * we want to restore it to at least that. 3209 */ 3210 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3211 /* This is unlikely we usually do not touch recovery */ 3212 rack->r_ctl.rack_per_of_gp_rec = 100; 3213 } 3214 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3215 rack->r_ctl.rack_per_of_gp_ca = 100; 3216 } 3217 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3218 rack->r_ctl.rack_per_of_gp_ss = 100; 3219 } 3220 } 3221 3222 static void 3223 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3224 { 3225 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3226 rack->r_ctl.rack_per_of_gp_ca = 100; 3227 } 3228 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3229 rack->r_ctl.rack_per_of_gp_ss = 100; 3230 } 3231 } 3232 3233 static void 3234 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3235 { 3236 int32_t calc, logged, plus; 3237 3238 logged = 0; 3239 3240 if (override) { 3241 /* 3242 * override is passed when we are 3243 * loosing b/w and making one last 3244 * gasp at trying to not loose out 3245 * to a new-reno flow. 3246 */ 3247 goto extra_boost; 3248 } 3249 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3250 if (rack->rc_gp_incr && 3251 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3252 /* 3253 * Reset and get 5 strokes more before the boost. Note 3254 * that the count is 0 based so we have to add one. 3255 */ 3256 extra_boost: 3257 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3258 rack->rc_gp_timely_inc_cnt = 0; 3259 } else 3260 plus = (uint32_t)rack_gp_increase_per; 3261 /* Must be at least 1% increase for true timely increases */ 3262 if ((plus < 1) && 3263 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3264 plus = 1; 3265 if (rack->rc_gp_saw_rec && 3266 (rack->rc_gp_no_rec_chg == 0) && 3267 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3268 rack->r_ctl.rack_per_of_gp_rec)) { 3269 /* We have been in recovery ding it too */ 3270 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3271 if (calc > 0xffff) 3272 calc = 0xffff; 3273 logged |= 1; 3274 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3275 if (rack_per_upper_bound_ss && 3276 (rack->rc_dragged_bottom == 0) && 3277 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3278 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3279 } 3280 if (rack->rc_gp_saw_ca && 3281 (rack->rc_gp_saw_ss == 0) && 3282 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3283 rack->r_ctl.rack_per_of_gp_ca)) { 3284 /* In CA */ 3285 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3286 if (calc > 0xffff) 3287 calc = 0xffff; 3288 logged |= 2; 3289 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3290 if (rack_per_upper_bound_ca && 3291 (rack->rc_dragged_bottom == 0) && 3292 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3293 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3294 } 3295 if (rack->rc_gp_saw_ss && 3296 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3297 rack->r_ctl.rack_per_of_gp_ss)) { 3298 /* In SS */ 3299 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3300 if (calc > 0xffff) 3301 calc = 0xffff; 3302 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3303 if (rack_per_upper_bound_ss && 3304 (rack->rc_dragged_bottom == 0) && 3305 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3306 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3307 logged |= 4; 3308 } 3309 if (logged && 3310 (rack->rc_gp_incr == 0)){ 3311 /* Go into increment mode */ 3312 rack->rc_gp_incr = 1; 3313 rack->rc_gp_timely_inc_cnt = 0; 3314 } 3315 if (rack->rc_gp_incr && 3316 logged && 3317 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3318 rack->rc_gp_timely_inc_cnt++; 3319 } 3320 rack_log_timely(rack, logged, plus, 0, 0, 3321 __LINE__, 1); 3322 } 3323 3324 static uint32_t 3325 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3326 { 3327 /* 3328 * norm_grad = rtt_diff / minrtt; 3329 * new_per = curper * (1 - B * norm_grad) 3330 * 3331 * B = rack_gp_decrease_per (default 10%) 3332 * rtt_dif = input var current rtt-diff 3333 * curper = input var current percentage 3334 * minrtt = from rack filter 3335 * 3336 */ 3337 uint64_t perf; 3338 3339 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3340 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3341 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3342 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3343 (uint64_t)1000000)) / 3344 (uint64_t)1000000); 3345 if (perf > curper) { 3346 /* TSNH */ 3347 perf = curper - 1; 3348 } 3349 return ((uint32_t)perf); 3350 } 3351 3352 static uint32_t 3353 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3354 { 3355 /* 3356 * highrttthresh 3357 * result = curper * (1 - (B * ( 1 - ------ )) 3358 * gp_srtt 3359 * 3360 * B = rack_gp_decrease_per (default 10%) 3361 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3362 */ 3363 uint64_t perf; 3364 uint32_t highrttthresh; 3365 3366 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3367 3368 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3369 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3370 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3371 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3372 return (perf); 3373 } 3374 3375 static void 3376 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3377 { 3378 uint64_t logvar, logvar2, logvar3; 3379 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3380 3381 if (rack->rc_gp_incr) { 3382 /* Turn off increment counting */ 3383 rack->rc_gp_incr = 0; 3384 rack->rc_gp_timely_inc_cnt = 0; 3385 } 3386 ss_red = ca_red = rec_red = 0; 3387 logged = 0; 3388 /* Calculate the reduction value */ 3389 if (rtt_diff < 0) { 3390 rtt_diff *= -1; 3391 } 3392 /* Must be at least 1% reduction */ 3393 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3394 /* We have been in recovery ding it too */ 3395 if (timely_says == 2) { 3396 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3397 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3398 if (alt < new_per) 3399 val = alt; 3400 else 3401 val = new_per; 3402 } else 3403 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3404 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3405 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3406 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3407 } else { 3408 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3409 rec_red = 0; 3410 } 3411 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3412 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3413 logged |= 1; 3414 } 3415 if (rack->rc_gp_saw_ss) { 3416 /* Sent in SS */ 3417 if (timely_says == 2) { 3418 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3419 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3420 if (alt < new_per) 3421 val = alt; 3422 else 3423 val = new_per; 3424 } else 3425 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3426 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3427 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3428 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3429 } else { 3430 ss_red = new_per; 3431 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3432 logvar = new_per; 3433 logvar <<= 32; 3434 logvar |= alt; 3435 logvar2 = (uint32_t)rtt; 3436 logvar2 <<= 32; 3437 logvar2 |= (uint32_t)rtt_diff; 3438 logvar3 = rack_gp_rtt_maxmul; 3439 logvar3 <<= 32; 3440 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3441 rack_log_timely(rack, timely_says, 3442 logvar2, logvar3, 3443 logvar, __LINE__, 10); 3444 } 3445 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3446 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3447 logged |= 4; 3448 } else if (rack->rc_gp_saw_ca) { 3449 /* Sent in CA */ 3450 if (timely_says == 2) { 3451 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3452 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3453 if (alt < new_per) 3454 val = alt; 3455 else 3456 val = new_per; 3457 } else 3458 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3459 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3460 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3461 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3462 } else { 3463 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3464 ca_red = 0; 3465 logvar = new_per; 3466 logvar <<= 32; 3467 logvar |= alt; 3468 logvar2 = (uint32_t)rtt; 3469 logvar2 <<= 32; 3470 logvar2 |= (uint32_t)rtt_diff; 3471 logvar3 = rack_gp_rtt_maxmul; 3472 logvar3 <<= 32; 3473 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3474 rack_log_timely(rack, timely_says, 3475 logvar2, logvar3, 3476 logvar, __LINE__, 10); 3477 } 3478 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3479 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3480 logged |= 2; 3481 } 3482 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3483 rack->rc_gp_timely_dec_cnt++; 3484 if (rack_timely_dec_clear && 3485 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3486 rack->rc_gp_timely_dec_cnt = 0; 3487 } 3488 logvar = ss_red; 3489 logvar <<= 32; 3490 logvar |= ca_red; 3491 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3492 __LINE__, 2); 3493 } 3494 3495 static void 3496 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3497 uint32_t rtt, uint32_t line, uint8_t reas) 3498 { 3499 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3500 union tcp_log_stackspecific log; 3501 struct timeval tv; 3502 3503 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3504 log.u_bbr.flex1 = line; 3505 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3506 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3507 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3508 log.u_bbr.flex5 = rtt; 3509 log.u_bbr.flex6 = rack->rc_highly_buffered; 3510 log.u_bbr.flex6 <<= 1; 3511 log.u_bbr.flex6 |= rack->forced_ack; 3512 log.u_bbr.flex6 <<= 1; 3513 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3514 log.u_bbr.flex6 <<= 1; 3515 log.u_bbr.flex6 |= rack->in_probe_rtt; 3516 log.u_bbr.flex6 <<= 1; 3517 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3518 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3519 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3520 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3521 log.u_bbr.flex8 = reas; 3522 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3523 log.u_bbr.delRate = rack_get_bw(rack); 3524 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3525 log.u_bbr.cur_del_rate <<= 32; 3526 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3527 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3528 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3529 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3530 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3531 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3532 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3533 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3534 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3535 log.u_bbr.rttProp = us_cts; 3536 log.u_bbr.rttProp <<= 32; 3537 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3538 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3539 &rack->rc_inp->inp_socket->so_rcv, 3540 &rack->rc_inp->inp_socket->so_snd, 3541 BBR_LOG_RTT_SHRINKS, 0, 3542 0, &log, false, &rack->r_ctl.act_rcv_time); 3543 } 3544 } 3545 3546 static void 3547 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3548 { 3549 uint64_t bwdp; 3550 3551 bwdp = rack_get_bw(rack); 3552 bwdp *= (uint64_t)rtt; 3553 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3554 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3555 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3556 /* 3557 * A window protocol must be able to have 4 packets 3558 * outstanding as the floor in order to function 3559 * (especially considering delayed ack :D). 3560 */ 3561 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3562 } 3563 } 3564 3565 static void 3566 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3567 { 3568 /** 3569 * ProbeRTT is a bit different in rack_pacing than in 3570 * BBR. It is like BBR in that it uses the lowering of 3571 * the RTT as a signal that we saw something new and 3572 * counts from there for how long between. But it is 3573 * different in that its quite simple. It does not 3574 * play with the cwnd and wait until we get down 3575 * to N segments outstanding and hold that for 3576 * 200ms. Instead it just sets the pacing reduction 3577 * rate to a set percentage (70 by default) and hold 3578 * that for a number of recent GP Srtt's. 3579 */ 3580 uint32_t segsiz; 3581 3582 if (rack->rc_gp_dyn_mul == 0) 3583 return; 3584 3585 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3586 /* We are idle */ 3587 return; 3588 } 3589 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3590 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3591 /* 3592 * Stop the goodput now, the idea here is 3593 * that future measurements with in_probe_rtt 3594 * won't register if they are not greater so 3595 * we want to get what info (if any) is available 3596 * now. 3597 */ 3598 rack_do_goodput_measurement(rack->rc_tp, rack, 3599 rack->rc_tp->snd_una, __LINE__, 3600 RACK_QUALITY_PROBERTT); 3601 } 3602 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3603 rack->r_ctl.rc_time_probertt_entered = us_cts; 3604 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3605 rack->r_ctl.rc_pace_min_segs); 3606 rack->in_probe_rtt = 1; 3607 rack->measure_saw_probe_rtt = 1; 3608 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3609 rack->r_ctl.rc_time_probertt_starts = 0; 3610 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3611 if (rack_probertt_use_min_rtt_entry) 3612 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3613 else 3614 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3615 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3616 __LINE__, RACK_RTTS_ENTERPROBE); 3617 } 3618 3619 static void 3620 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3621 { 3622 struct rack_sendmap *rsm; 3623 uint32_t segsiz; 3624 3625 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3626 rack->r_ctl.rc_pace_min_segs); 3627 rack->in_probe_rtt = 0; 3628 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3629 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3630 /* 3631 * Stop the goodput now, the idea here is 3632 * that future measurements with in_probe_rtt 3633 * won't register if they are not greater so 3634 * we want to get what info (if any) is available 3635 * now. 3636 */ 3637 rack_do_goodput_measurement(rack->rc_tp, rack, 3638 rack->rc_tp->snd_una, __LINE__, 3639 RACK_QUALITY_PROBERTT); 3640 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3641 /* 3642 * We don't have enough data to make a measurement. 3643 * So lets just stop and start here after exiting 3644 * probe-rtt. We probably are not interested in 3645 * the results anyway. 3646 */ 3647 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3648 } 3649 /* 3650 * Measurements through the current snd_max are going 3651 * to be limited by the slower pacing rate. 3652 * 3653 * We need to mark these as app-limited so we 3654 * don't collapse the b/w. 3655 */ 3656 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3657 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3658 if (rack->r_ctl.rc_app_limited_cnt == 0) 3659 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3660 else { 3661 /* 3662 * Go out to the end app limited and mark 3663 * this new one as next and move the end_appl up 3664 * to this guy. 3665 */ 3666 if (rack->r_ctl.rc_end_appl) 3667 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3668 rack->r_ctl.rc_end_appl = rsm; 3669 } 3670 rsm->r_flags |= RACK_APP_LIMITED; 3671 rack->r_ctl.rc_app_limited_cnt++; 3672 } 3673 /* 3674 * Now, we need to examine our pacing rate multipliers. 3675 * If its under 100%, we need to kick it back up to 3676 * 100%. We also don't let it be over our "max" above 3677 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3678 * Note setting clamp_atexit_prtt to 0 has the effect 3679 * of setting CA/SS to 100% always at exit (which is 3680 * the default behavior). 3681 */ 3682 if (rack_probertt_clear_is) { 3683 rack->rc_gp_incr = 0; 3684 rack->rc_gp_bwred = 0; 3685 rack->rc_gp_timely_inc_cnt = 0; 3686 rack->rc_gp_timely_dec_cnt = 0; 3687 } 3688 /* Do we do any clamping at exit? */ 3689 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3690 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3691 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3692 } 3693 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3694 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3695 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3696 } 3697 /* 3698 * Lets set rtt_diff to 0, so that we will get a "boost" 3699 * after exiting. 3700 */ 3701 rack->r_ctl.rc_rtt_diff = 0; 3702 3703 /* Clear all flags so we start fresh */ 3704 rack->rc_tp->t_bytes_acked = 0; 3705 rack->rc_tp->ccv->flags &= ~CCF_ABC_SENTAWND; 3706 /* 3707 * If configured to, set the cwnd and ssthresh to 3708 * our targets. 3709 */ 3710 if (rack_probe_rtt_sets_cwnd) { 3711 uint64_t ebdp; 3712 uint32_t setto; 3713 3714 /* Set ssthresh so we get into CA once we hit our target */ 3715 if (rack_probertt_use_min_rtt_exit == 1) { 3716 /* Set to min rtt */ 3717 rack_set_prtt_target(rack, segsiz, 3718 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3719 } else if (rack_probertt_use_min_rtt_exit == 2) { 3720 /* Set to current gp rtt */ 3721 rack_set_prtt_target(rack, segsiz, 3722 rack->r_ctl.rc_gp_srtt); 3723 } else if (rack_probertt_use_min_rtt_exit == 3) { 3724 /* Set to entry gp rtt */ 3725 rack_set_prtt_target(rack, segsiz, 3726 rack->r_ctl.rc_entry_gp_rtt); 3727 } else { 3728 uint64_t sum; 3729 uint32_t setval; 3730 3731 sum = rack->r_ctl.rc_entry_gp_rtt; 3732 sum *= 10; 3733 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3734 if (sum >= 20) { 3735 /* 3736 * A highly buffered path needs 3737 * cwnd space for timely to work. 3738 * Lets set things up as if 3739 * we are heading back here again. 3740 */ 3741 setval = rack->r_ctl.rc_entry_gp_rtt; 3742 } else if (sum >= 15) { 3743 /* 3744 * Lets take the smaller of the 3745 * two since we are just somewhat 3746 * buffered. 3747 */ 3748 setval = rack->r_ctl.rc_gp_srtt; 3749 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3750 setval = rack->r_ctl.rc_entry_gp_rtt; 3751 } else { 3752 /* 3753 * Here we are not highly buffered 3754 * and should pick the min we can to 3755 * keep from causing loss. 3756 */ 3757 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3758 } 3759 rack_set_prtt_target(rack, segsiz, 3760 setval); 3761 } 3762 if (rack_probe_rtt_sets_cwnd > 1) { 3763 /* There is a percentage here to boost */ 3764 ebdp = rack->r_ctl.rc_target_probertt_flight; 3765 ebdp *= rack_probe_rtt_sets_cwnd; 3766 ebdp /= 100; 3767 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3768 } else 3769 setto = rack->r_ctl.rc_target_probertt_flight; 3770 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3771 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3772 /* Enforce a min */ 3773 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3774 } 3775 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3776 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3777 } 3778 rack_log_rtt_shrinks(rack, us_cts, 3779 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3780 __LINE__, RACK_RTTS_EXITPROBE); 3781 /* Clear times last so log has all the info */ 3782 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3783 rack->r_ctl.rc_time_probertt_entered = us_cts; 3784 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3785 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3786 } 3787 3788 static void 3789 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3790 { 3791 /* Check in on probe-rtt */ 3792 if (rack->rc_gp_filled == 0) { 3793 /* We do not do p-rtt unless we have gp measurements */ 3794 return; 3795 } 3796 if (rack->in_probe_rtt) { 3797 uint64_t no_overflow; 3798 uint32_t endtime, must_stay; 3799 3800 if (rack->r_ctl.rc_went_idle_time && 3801 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3802 /* 3803 * We went idle during prtt, just exit now. 3804 */ 3805 rack_exit_probertt(rack, us_cts); 3806 } else if (rack_probe_rtt_safety_val && 3807 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3808 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3809 /* 3810 * Probe RTT safety value triggered! 3811 */ 3812 rack_log_rtt_shrinks(rack, us_cts, 3813 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3814 __LINE__, RACK_RTTS_SAFETY); 3815 rack_exit_probertt(rack, us_cts); 3816 } 3817 /* Calculate the max we will wait */ 3818 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3819 if (rack->rc_highly_buffered) 3820 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3821 /* Calculate the min we must wait */ 3822 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3823 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3824 TSTMP_LT(us_cts, endtime)) { 3825 uint32_t calc; 3826 /* Do we lower more? */ 3827 no_exit: 3828 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3829 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3830 else 3831 calc = 0; 3832 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3833 if (calc) { 3834 /* Maybe */ 3835 calc *= rack_per_of_gp_probertt_reduce; 3836 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3837 /* Limit it too */ 3838 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3839 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3840 } 3841 /* We must reach target or the time set */ 3842 return; 3843 } 3844 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3845 if ((TSTMP_LT(us_cts, must_stay) && 3846 rack->rc_highly_buffered) || 3847 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3848 rack->r_ctl.rc_target_probertt_flight)) { 3849 /* We are not past the must_stay time */ 3850 goto no_exit; 3851 } 3852 rack_log_rtt_shrinks(rack, us_cts, 3853 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3854 __LINE__, RACK_RTTS_REACHTARGET); 3855 rack->r_ctl.rc_time_probertt_starts = us_cts; 3856 if (rack->r_ctl.rc_time_probertt_starts == 0) 3857 rack->r_ctl.rc_time_probertt_starts = 1; 3858 /* Restore back to our rate we want to pace at in prtt */ 3859 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3860 } 3861 /* 3862 * Setup our end time, some number of gp_srtts plus 200ms. 3863 */ 3864 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3865 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3866 if (rack_probertt_gpsrtt_cnt_div) 3867 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3868 else 3869 endtime = 0; 3870 endtime += rack_min_probertt_hold; 3871 endtime += rack->r_ctl.rc_time_probertt_starts; 3872 if (TSTMP_GEQ(us_cts, endtime)) { 3873 /* yes, exit probertt */ 3874 rack_exit_probertt(rack, us_cts); 3875 } 3876 3877 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3878 /* Go into probertt, its been too long since we went lower */ 3879 rack_enter_probertt(rack, us_cts); 3880 } 3881 } 3882 3883 static void 3884 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3885 uint32_t rtt, int32_t rtt_diff) 3886 { 3887 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3888 uint32_t losses; 3889 3890 if ((rack->rc_gp_dyn_mul == 0) || 3891 (rack->use_fixed_rate) || 3892 (rack->in_probe_rtt) || 3893 (rack->rc_always_pace == 0)) { 3894 /* No dynamic GP multiplier in play */ 3895 return; 3896 } 3897 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3898 cur_bw = rack_get_bw(rack); 3899 /* Calculate our up and down range */ 3900 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3901 up_bnd /= 100; 3902 up_bnd += rack->r_ctl.last_gp_comp_bw; 3903 3904 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3905 subfr /= 100; 3906 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3907 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3908 /* 3909 * This is the case where our RTT is above 3910 * the max target and we have been configured 3911 * to just do timely no bonus up stuff in that case. 3912 * 3913 * There are two configurations, set to 1, and we 3914 * just do timely if we are over our max. If its 3915 * set above 1 then we slam the multipliers down 3916 * to 100 and then decrement per timely. 3917 */ 3918 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3919 __LINE__, 3); 3920 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3921 rack_validate_multipliers_at_or_below_100(rack); 3922 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3923 } else if ((last_bw_est < low_bnd) && !losses) { 3924 /* 3925 * We are decreasing this is a bit complicated this 3926 * means we are loosing ground. This could be 3927 * because another flow entered and we are competing 3928 * for b/w with it. This will push the RTT up which 3929 * makes timely unusable unless we want to get shoved 3930 * into a corner and just be backed off (the age 3931 * old problem with delay based CC). 3932 * 3933 * On the other hand if it was a route change we 3934 * would like to stay somewhat contained and not 3935 * blow out the buffers. 3936 */ 3937 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3938 __LINE__, 3); 3939 rack->r_ctl.last_gp_comp_bw = cur_bw; 3940 if (rack->rc_gp_bwred == 0) { 3941 /* Go into reduction counting */ 3942 rack->rc_gp_bwred = 1; 3943 rack->rc_gp_timely_dec_cnt = 0; 3944 } 3945 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3946 (timely_says == 0)) { 3947 /* 3948 * Push another time with a faster pacing 3949 * to try to gain back (we include override to 3950 * get a full raise factor). 3951 */ 3952 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3953 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3954 (timely_says == 0) || 3955 (rack_down_raise_thresh == 0)) { 3956 /* 3957 * Do an override up in b/w if we were 3958 * below the threshold or if the threshold 3959 * is zero we always do the raise. 3960 */ 3961 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3962 } else { 3963 /* Log it stays the same */ 3964 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3965 __LINE__, 11); 3966 } 3967 rack->rc_gp_timely_dec_cnt++; 3968 /* We are not incrementing really no-count */ 3969 rack->rc_gp_incr = 0; 3970 rack->rc_gp_timely_inc_cnt = 0; 3971 } else { 3972 /* 3973 * Lets just use the RTT 3974 * information and give up 3975 * pushing. 3976 */ 3977 goto use_timely; 3978 } 3979 } else if ((timely_says != 2) && 3980 !losses && 3981 (last_bw_est > up_bnd)) { 3982 /* 3983 * We are increasing b/w lets keep going, updating 3984 * our b/w and ignoring any timely input, unless 3985 * of course we are at our max raise (if there is one). 3986 */ 3987 3988 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3989 __LINE__, 3); 3990 rack->r_ctl.last_gp_comp_bw = cur_bw; 3991 if (rack->rc_gp_saw_ss && 3992 rack_per_upper_bound_ss && 3993 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3994 /* 3995 * In cases where we can't go higher 3996 * we should just use timely. 3997 */ 3998 goto use_timely; 3999 } 4000 if (rack->rc_gp_saw_ca && 4001 rack_per_upper_bound_ca && 4002 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 4003 /* 4004 * In cases where we can't go higher 4005 * we should just use timely. 4006 */ 4007 goto use_timely; 4008 } 4009 rack->rc_gp_bwred = 0; 4010 rack->rc_gp_timely_dec_cnt = 0; 4011 /* You get a set number of pushes if timely is trying to reduce */ 4012 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 4013 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4014 } else { 4015 /* Log it stays the same */ 4016 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 4017 __LINE__, 12); 4018 } 4019 return; 4020 } else { 4021 /* 4022 * We are staying between the lower and upper range bounds 4023 * so use timely to decide. 4024 */ 4025 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 4026 __LINE__, 3); 4027 use_timely: 4028 if (timely_says) { 4029 rack->rc_gp_incr = 0; 4030 rack->rc_gp_timely_inc_cnt = 0; 4031 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 4032 !losses && 4033 (last_bw_est < low_bnd)) { 4034 /* We are loosing ground */ 4035 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4036 rack->rc_gp_timely_dec_cnt++; 4037 /* We are not incrementing really no-count */ 4038 rack->rc_gp_incr = 0; 4039 rack->rc_gp_timely_inc_cnt = 0; 4040 } else 4041 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4042 } else { 4043 rack->rc_gp_bwred = 0; 4044 rack->rc_gp_timely_dec_cnt = 0; 4045 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4046 } 4047 } 4048 } 4049 4050 static int32_t 4051 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4052 { 4053 int32_t timely_says; 4054 uint64_t log_mult, log_rtt_a_diff; 4055 4056 log_rtt_a_diff = rtt; 4057 log_rtt_a_diff <<= 32; 4058 log_rtt_a_diff |= (uint32_t)rtt_diff; 4059 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4060 rack_gp_rtt_maxmul)) { 4061 /* Reduce the b/w multiplier */ 4062 timely_says = 2; 4063 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4064 log_mult <<= 32; 4065 log_mult |= prev_rtt; 4066 rack_log_timely(rack, timely_says, log_mult, 4067 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4068 log_rtt_a_diff, __LINE__, 4); 4069 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4070 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4071 max(rack_gp_rtt_mindiv , 1)))) { 4072 /* Increase the b/w multiplier */ 4073 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4074 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4075 max(rack_gp_rtt_mindiv , 1)); 4076 log_mult <<= 32; 4077 log_mult |= prev_rtt; 4078 timely_says = 0; 4079 rack_log_timely(rack, timely_says, log_mult , 4080 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4081 log_rtt_a_diff, __LINE__, 5); 4082 } else { 4083 /* 4084 * Use a gradient to find it the timely gradient 4085 * is: 4086 * grad = rc_rtt_diff / min_rtt; 4087 * 4088 * anything below or equal to 0 will be 4089 * a increase indication. Anything above 4090 * zero is a decrease. Note we take care 4091 * of the actual gradient calculation 4092 * in the reduction (its not needed for 4093 * increase). 4094 */ 4095 log_mult = prev_rtt; 4096 if (rtt_diff <= 0) { 4097 /* 4098 * Rttdiff is less than zero, increase the 4099 * b/w multiplier (its 0 or negative) 4100 */ 4101 timely_says = 0; 4102 rack_log_timely(rack, timely_says, log_mult, 4103 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4104 } else { 4105 /* Reduce the b/w multiplier */ 4106 timely_says = 1; 4107 rack_log_timely(rack, timely_says, log_mult, 4108 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4109 } 4110 } 4111 return (timely_says); 4112 } 4113 4114 static void 4115 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4116 tcp_seq th_ack, int line, uint8_t quality) 4117 { 4118 uint64_t tim, bytes_ps, ltim, stim, utim; 4119 uint32_t segsiz, bytes, reqbytes, us_cts; 4120 int32_t gput, new_rtt_diff, timely_says; 4121 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4122 int did_add = 0; 4123 4124 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4125 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4126 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4127 tim = us_cts - tp->gput_ts; 4128 else 4129 tim = 0; 4130 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4131 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4132 else 4133 stim = 0; 4134 /* 4135 * Use the larger of the send time or ack time. This prevents us 4136 * from being influenced by ack artifacts to come up with too 4137 * high of measurement. Note that since we are spanning over many more 4138 * bytes in most of our measurements hopefully that is less likely to 4139 * occur. 4140 */ 4141 if (tim > stim) 4142 utim = max(tim, 1); 4143 else 4144 utim = max(stim, 1); 4145 /* Lets get a msec time ltim too for the old stuff */ 4146 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4147 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4148 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4149 if ((tim == 0) && (stim == 0)) { 4150 /* 4151 * Invalid measurement time, maybe 4152 * all on one ack/one send? 4153 */ 4154 bytes = 0; 4155 bytes_ps = 0; 4156 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4157 0, 0, 0, 10, __LINE__, NULL, quality); 4158 goto skip_measurement; 4159 } 4160 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4161 /* We never made a us_rtt measurement? */ 4162 bytes = 0; 4163 bytes_ps = 0; 4164 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4165 0, 0, 0, 10, __LINE__, NULL, quality); 4166 goto skip_measurement; 4167 } 4168 /* 4169 * Calculate the maximum possible b/w this connection 4170 * could have. We base our calculation on the lowest 4171 * rtt we have seen during the measurement and the 4172 * largest rwnd the client has given us in that time. This 4173 * forms a BDP that is the maximum that we could ever 4174 * get to the client. Anything larger is not valid. 4175 * 4176 * I originally had code here that rejected measurements 4177 * where the time was less than 1/2 the latest us_rtt. 4178 * But after thinking on that I realized its wrong since 4179 * say you had a 150Mbps or even 1Gbps link, and you 4180 * were a long way away.. example I am in Europe (100ms rtt) 4181 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4182 * bytes my time would be 1.2ms, and yet my rtt would say 4183 * the measurement was invalid the time was < 50ms. The 4184 * same thing is true for 150Mb (8ms of time). 4185 * 4186 * A better way I realized is to look at what the maximum 4187 * the connection could possibly do. This is gated on 4188 * the lowest RTT we have seen and the highest rwnd. 4189 * We should in theory never exceed that, if we are 4190 * then something on the path is storing up packets 4191 * and then feeding them all at once to our endpoint 4192 * messing up our measurement. 4193 */ 4194 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4195 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4196 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4197 if (SEQ_LT(th_ack, tp->gput_seq)) { 4198 /* No measurement can be made */ 4199 bytes = 0; 4200 bytes_ps = 0; 4201 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4202 0, 0, 0, 10, __LINE__, NULL, quality); 4203 goto skip_measurement; 4204 } else 4205 bytes = (th_ack - tp->gput_seq); 4206 bytes_ps = (uint64_t)bytes; 4207 /* 4208 * Don't measure a b/w for pacing unless we have gotten at least 4209 * an initial windows worth of data in this measurement interval. 4210 * 4211 * Small numbers of bytes get badly influenced by delayed ack and 4212 * other artifacts. Note we take the initial window or our 4213 * defined minimum GP (defaulting to 10 which hopefully is the 4214 * IW). 4215 */ 4216 if (rack->rc_gp_filled == 0) { 4217 /* 4218 * The initial estimate is special. We 4219 * have blasted out an IW worth of packets 4220 * without a real valid ack ts results. We 4221 * then setup the app_limited_needs_set flag, 4222 * this should get the first ack in (probably 2 4223 * MSS worth) to be recorded as the timestamp. 4224 * We thus allow a smaller number of bytes i.e. 4225 * IW - 2MSS. 4226 */ 4227 reqbytes -= (2 * segsiz); 4228 /* Also lets fill previous for our first measurement to be neutral */ 4229 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4230 } 4231 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4232 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4233 rack->r_ctl.rc_app_limited_cnt, 4234 0, 0, 10, __LINE__, NULL, quality); 4235 goto skip_measurement; 4236 } 4237 /* 4238 * We now need to calculate the Timely like status so 4239 * we can update (possibly) the b/w multipliers. 4240 */ 4241 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4242 if (rack->rc_gp_filled == 0) { 4243 /* No previous reading */ 4244 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4245 } else { 4246 if (rack->measure_saw_probe_rtt == 0) { 4247 /* 4248 * We don't want a probertt to be counted 4249 * since it will be negative incorrectly. We 4250 * expect to be reducing the RTT when we 4251 * pace at a slower rate. 4252 */ 4253 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4254 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4255 } 4256 } 4257 timely_says = rack_make_timely_judgement(rack, 4258 rack->r_ctl.rc_gp_srtt, 4259 rack->r_ctl.rc_rtt_diff, 4260 rack->r_ctl.rc_prev_gp_srtt 4261 ); 4262 bytes_ps *= HPTS_USEC_IN_SEC; 4263 bytes_ps /= utim; 4264 if (bytes_ps > rack->r_ctl.last_max_bw) { 4265 /* 4266 * Something is on path playing 4267 * since this b/w is not possible based 4268 * on our BDP (highest rwnd and lowest rtt 4269 * we saw in the measurement window). 4270 * 4271 * Another option here would be to 4272 * instead skip the measurement. 4273 */ 4274 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4275 bytes_ps, rack->r_ctl.last_max_bw, 0, 4276 11, __LINE__, NULL, quality); 4277 bytes_ps = rack->r_ctl.last_max_bw; 4278 } 4279 /* We store gp for b/w in bytes per second */ 4280 if (rack->rc_gp_filled == 0) { 4281 /* Initial measurement */ 4282 if (bytes_ps) { 4283 rack->r_ctl.gp_bw = bytes_ps; 4284 rack->rc_gp_filled = 1; 4285 rack->r_ctl.num_measurements = 1; 4286 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4287 } else { 4288 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4289 rack->r_ctl.rc_app_limited_cnt, 4290 0, 0, 10, __LINE__, NULL, quality); 4291 } 4292 if (tcp_in_hpts(rack->rc_inp) && 4293 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4294 /* 4295 * Ok we can't trust the pacer in this case 4296 * where we transition from un-paced to paced. 4297 * Or for that matter when the burst mitigation 4298 * was making a wild guess and got it wrong. 4299 * Stop the pacer and clear up all the aggregate 4300 * delays etc. 4301 */ 4302 tcp_hpts_remove(rack->rc_inp); 4303 rack->r_ctl.rc_hpts_flags = 0; 4304 rack->r_ctl.rc_last_output_to = 0; 4305 } 4306 did_add = 2; 4307 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4308 /* Still a small number run an average */ 4309 rack->r_ctl.gp_bw += bytes_ps; 4310 addpart = rack->r_ctl.num_measurements; 4311 rack->r_ctl.num_measurements++; 4312 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4313 /* We have collected enough to move forward */ 4314 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4315 } 4316 did_add = 3; 4317 } else { 4318 /* 4319 * We want to take 1/wma of the goodput and add in to 7/8th 4320 * of the old value weighted by the srtt. So if your measurement 4321 * period is say 2 SRTT's long you would get 1/4 as the 4322 * value, if it was like 1/2 SRTT then you would get 1/16th. 4323 * 4324 * But we must be careful not to take too much i.e. if the 4325 * srtt is say 20ms and the measurement is taken over 4326 * 400ms our weight would be 400/20 i.e. 20. On the 4327 * other hand if we get a measurement over 1ms with a 4328 * 10ms rtt we only want to take a much smaller portion. 4329 */ 4330 if (rack->r_ctl.num_measurements < 0xff) { 4331 rack->r_ctl.num_measurements++; 4332 } 4333 srtt = (uint64_t)tp->t_srtt; 4334 if (srtt == 0) { 4335 /* 4336 * Strange why did t_srtt go back to zero? 4337 */ 4338 if (rack->r_ctl.rc_rack_min_rtt) 4339 srtt = rack->r_ctl.rc_rack_min_rtt; 4340 else 4341 srtt = HPTS_USEC_IN_MSEC; 4342 } 4343 /* 4344 * XXXrrs: Note for reviewers, in playing with 4345 * dynamic pacing I discovered this GP calculation 4346 * as done originally leads to some undesired results. 4347 * Basically you can get longer measurements contributing 4348 * too much to the WMA. Thus I changed it if you are doing 4349 * dynamic adjustments to only do the aportioned adjustment 4350 * if we have a very small (time wise) measurement. Longer 4351 * measurements just get there weight (defaulting to 1/8) 4352 * add to the WMA. We may want to think about changing 4353 * this to always do that for both sides i.e. dynamic 4354 * and non-dynamic... but considering lots of folks 4355 * were playing with this I did not want to change the 4356 * calculation per.se. without your thoughts.. Lawerence? 4357 * Peter?? 4358 */ 4359 if (rack->rc_gp_dyn_mul == 0) { 4360 subpart = rack->r_ctl.gp_bw * utim; 4361 subpart /= (srtt * 8); 4362 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4363 /* 4364 * The b/w update takes no more 4365 * away then 1/2 our running total 4366 * so factor it in. 4367 */ 4368 addpart = bytes_ps * utim; 4369 addpart /= (srtt * 8); 4370 } else { 4371 /* 4372 * Don't allow a single measurement 4373 * to account for more than 1/2 of the 4374 * WMA. This could happen on a retransmission 4375 * where utim becomes huge compared to 4376 * srtt (multiple retransmissions when using 4377 * the sending rate which factors in all the 4378 * transmissions from the first one). 4379 */ 4380 subpart = rack->r_ctl.gp_bw / 2; 4381 addpart = bytes_ps / 2; 4382 } 4383 resid_bw = rack->r_ctl.gp_bw - subpart; 4384 rack->r_ctl.gp_bw = resid_bw + addpart; 4385 did_add = 1; 4386 } else { 4387 if ((utim / srtt) <= 1) { 4388 /* 4389 * The b/w update was over a small period 4390 * of time. The idea here is to prevent a small 4391 * measurement time period from counting 4392 * too much. So we scale it based on the 4393 * time so it attributes less than 1/rack_wma_divisor 4394 * of its measurement. 4395 */ 4396 subpart = rack->r_ctl.gp_bw * utim; 4397 subpart /= (srtt * rack_wma_divisor); 4398 addpart = bytes_ps * utim; 4399 addpart /= (srtt * rack_wma_divisor); 4400 } else { 4401 /* 4402 * The scaled measurement was long 4403 * enough so lets just add in the 4404 * portion of the measurement i.e. 1/rack_wma_divisor 4405 */ 4406 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4407 addpart = bytes_ps / rack_wma_divisor; 4408 } 4409 if ((rack->measure_saw_probe_rtt == 0) || 4410 (bytes_ps > rack->r_ctl.gp_bw)) { 4411 /* 4412 * For probe-rtt we only add it in 4413 * if its larger, all others we just 4414 * add in. 4415 */ 4416 did_add = 1; 4417 resid_bw = rack->r_ctl.gp_bw - subpart; 4418 rack->r_ctl.gp_bw = resid_bw + addpart; 4419 } 4420 } 4421 } 4422 if ((rack->gp_ready == 0) && 4423 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4424 /* We have enough measurements now */ 4425 rack->gp_ready = 1; 4426 rack_set_cc_pacing(rack); 4427 if (rack->defer_options) 4428 rack_apply_deferred_options(rack); 4429 } 4430 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4431 rack_get_bw(rack), 22, did_add, NULL, quality); 4432 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4433 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4434 rack_update_multiplier(rack, timely_says, bytes_ps, 4435 rack->r_ctl.rc_gp_srtt, 4436 rack->r_ctl.rc_rtt_diff); 4437 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4438 rack_get_bw(rack), 3, line, NULL, quality); 4439 /* reset the gp srtt and setup the new prev */ 4440 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4441 /* Record the lost count for the next measurement */ 4442 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4443 /* 4444 * We restart our diffs based on the gpsrtt in the 4445 * measurement window. 4446 */ 4447 rack->rc_gp_rtt_set = 0; 4448 rack->rc_gp_saw_rec = 0; 4449 rack->rc_gp_saw_ca = 0; 4450 rack->rc_gp_saw_ss = 0; 4451 rack->rc_dragged_bottom = 0; 4452 skip_measurement: 4453 4454 #ifdef STATS 4455 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4456 gput); 4457 /* 4458 * XXXLAS: This is a temporary hack, and should be 4459 * chained off VOI_TCP_GPUT when stats(9) grows an 4460 * API to deal with chained VOIs. 4461 */ 4462 if (tp->t_stats_gput_prev > 0) 4463 stats_voi_update_abs_s32(tp->t_stats, 4464 VOI_TCP_GPUT_ND, 4465 ((gput - tp->t_stats_gput_prev) * 100) / 4466 tp->t_stats_gput_prev); 4467 #endif 4468 tp->t_flags &= ~TF_GPUTINPROG; 4469 tp->t_stats_gput_prev = gput; 4470 /* 4471 * Now are we app limited now and there is space from where we 4472 * were to where we want to go? 4473 * 4474 * We don't do the other case i.e. non-applimited here since 4475 * the next send will trigger us picking up the missing data. 4476 */ 4477 if (rack->r_ctl.rc_first_appl && 4478 TCPS_HAVEESTABLISHED(tp->t_state) && 4479 rack->r_ctl.rc_app_limited_cnt && 4480 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4481 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4482 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4483 /* 4484 * Yep there is enough outstanding to make a measurement here. 4485 */ 4486 struct rack_sendmap *rsm, fe; 4487 4488 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4489 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4490 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4491 rack->app_limited_needs_set = 0; 4492 tp->gput_seq = th_ack; 4493 if (rack->in_probe_rtt) 4494 rack->measure_saw_probe_rtt = 1; 4495 else if ((rack->measure_saw_probe_rtt) && 4496 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4497 rack->measure_saw_probe_rtt = 0; 4498 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4499 /* There is a full window to gain info from */ 4500 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4501 } else { 4502 /* We can only measure up to the applimited point */ 4503 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4504 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4505 /* 4506 * We don't have enough to make a measurement. 4507 */ 4508 tp->t_flags &= ~TF_GPUTINPROG; 4509 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4510 0, 0, 0, 6, __LINE__, NULL, quality); 4511 return; 4512 } 4513 } 4514 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4515 /* 4516 * We will get no more data into the SB 4517 * this means we need to have the data available 4518 * before we start a measurement. 4519 */ 4520 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4521 /* Nope not enough data. */ 4522 return; 4523 } 4524 } 4525 tp->t_flags |= TF_GPUTINPROG; 4526 /* 4527 * Now we need to find the timestamp of the send at tp->gput_seq 4528 * for the send based measurement. 4529 */ 4530 fe.r_start = tp->gput_seq; 4531 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4532 if (rsm) { 4533 /* Ok send-based limit is set */ 4534 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4535 /* 4536 * Move back to include the earlier part 4537 * so our ack time lines up right (this may 4538 * make an overlapping measurement but thats 4539 * ok). 4540 */ 4541 tp->gput_seq = rsm->r_start; 4542 } 4543 if (rsm->r_flags & RACK_ACKED) 4544 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4545 else 4546 rack->app_limited_needs_set = 1; 4547 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4548 } else { 4549 /* 4550 * If we don't find the rsm due to some 4551 * send-limit set the current time, which 4552 * basically disables the send-limit. 4553 */ 4554 struct timeval tv; 4555 4556 microuptime(&tv); 4557 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4558 } 4559 rack_log_pacing_delay_calc(rack, 4560 tp->gput_seq, 4561 tp->gput_ack, 4562 (uint64_t)rsm, 4563 tp->gput_ts, 4564 rack->r_ctl.rc_app_limited_cnt, 4565 9, 4566 __LINE__, NULL, quality); 4567 } 4568 } 4569 4570 /* 4571 * CC wrapper hook functions 4572 */ 4573 static void 4574 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4575 uint16_t type, int32_t recovery) 4576 { 4577 uint32_t prior_cwnd, acked; 4578 struct tcp_log_buffer *lgb = NULL; 4579 uint8_t labc_to_use, quality; 4580 4581 INP_WLOCK_ASSERT(tp->t_inpcb); 4582 tp->ccv->nsegs = nsegs; 4583 acked = tp->ccv->bytes_this_ack = (th_ack - tp->snd_una); 4584 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4585 uint32_t max; 4586 4587 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4588 if (tp->ccv->bytes_this_ack > max) { 4589 tp->ccv->bytes_this_ack = max; 4590 } 4591 } 4592 #ifdef STATS 4593 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4594 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4595 #endif 4596 quality = RACK_QUALITY_NONE; 4597 if ((tp->t_flags & TF_GPUTINPROG) && 4598 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4599 /* Measure the Goodput */ 4600 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4601 #ifdef NETFLIX_PEAKRATE 4602 if ((type == CC_ACK) && 4603 (tp->t_maxpeakrate)) { 4604 /* 4605 * We update t_peakrate_thr. This gives us roughly 4606 * one update per round trip time. Note 4607 * it will only be used if pace_always is off i.e 4608 * we don't do this for paced flows. 4609 */ 4610 rack_update_peakrate_thr(tp); 4611 } 4612 #endif 4613 } 4614 /* Which way our we limited, if not cwnd limited no advance in CA */ 4615 if (tp->snd_cwnd <= tp->snd_wnd) 4616 tp->ccv->flags |= CCF_CWND_LIMITED; 4617 else 4618 tp->ccv->flags &= ~CCF_CWND_LIMITED; 4619 if (tp->snd_cwnd > tp->snd_ssthresh) { 4620 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 4621 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4622 /* For the setting of a window past use the actual scwnd we are using */ 4623 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4624 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4625 tp->ccv->flags |= CCF_ABC_SENTAWND; 4626 } 4627 } else { 4628 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 4629 tp->t_bytes_acked = 0; 4630 } 4631 prior_cwnd = tp->snd_cwnd; 4632 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4633 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4634 labc_to_use = rack->rc_labc; 4635 else 4636 labc_to_use = rack_max_abc_post_recovery; 4637 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4638 union tcp_log_stackspecific log; 4639 struct timeval tv; 4640 4641 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4642 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4643 log.u_bbr.flex1 = th_ack; 4644 log.u_bbr.flex2 = tp->ccv->flags; 4645 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4646 log.u_bbr.flex4 = tp->ccv->nsegs; 4647 log.u_bbr.flex5 = labc_to_use; 4648 log.u_bbr.flex6 = prior_cwnd; 4649 log.u_bbr.flex7 = V_tcp_do_newsack; 4650 log.u_bbr.flex8 = 1; 4651 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4652 0, &log, false, NULL, NULL, 0, &tv); 4653 } 4654 if (CC_ALGO(tp)->ack_received != NULL) { 4655 /* XXXLAS: Find a way to live without this */ 4656 tp->ccv->curack = th_ack; 4657 tp->ccv->labc = labc_to_use; 4658 tp->ccv->flags |= CCF_USE_LOCAL_ABC; 4659 CC_ALGO(tp)->ack_received(tp->ccv, type); 4660 } 4661 if (lgb) { 4662 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4663 } 4664 if (rack->r_must_retran) { 4665 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4666 /* 4667 * We now are beyond the rxt point so lets disable 4668 * the flag. 4669 */ 4670 rack->r_ctl.rc_out_at_rto = 0; 4671 rack->r_must_retran = 0; 4672 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4673 /* 4674 * Only decrement the rc_out_at_rto if the cwnd advances 4675 * at least a whole segment. Otherwise next time the peer 4676 * acks, we won't be able to send this generaly happens 4677 * when we are in Congestion Avoidance. 4678 */ 4679 if (acked <= rack->r_ctl.rc_out_at_rto){ 4680 rack->r_ctl.rc_out_at_rto -= acked; 4681 } else { 4682 rack->r_ctl.rc_out_at_rto = 0; 4683 } 4684 } 4685 } 4686 #ifdef STATS 4687 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4688 #endif 4689 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4690 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4691 } 4692 #ifdef NETFLIX_PEAKRATE 4693 /* we enforce max peak rate if it is set and we are not pacing */ 4694 if ((rack->rc_always_pace == 0) && 4695 tp->t_peakrate_thr && 4696 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4697 tp->snd_cwnd = tp->t_peakrate_thr; 4698 } 4699 #endif 4700 } 4701 4702 static void 4703 tcp_rack_partialack(struct tcpcb *tp) 4704 { 4705 struct tcp_rack *rack; 4706 4707 rack = (struct tcp_rack *)tp->t_fb_ptr; 4708 INP_WLOCK_ASSERT(tp->t_inpcb); 4709 /* 4710 * If we are doing PRR and have enough 4711 * room to send <or> we are pacing and prr 4712 * is disabled we will want to see if we 4713 * can send data (by setting r_wanted_output to 4714 * true). 4715 */ 4716 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4717 rack->rack_no_prr) 4718 rack->r_wanted_output = 1; 4719 } 4720 4721 static void 4722 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4723 { 4724 struct tcp_rack *rack; 4725 uint32_t orig_cwnd; 4726 4727 orig_cwnd = tp->snd_cwnd; 4728 INP_WLOCK_ASSERT(tp->t_inpcb); 4729 rack = (struct tcp_rack *)tp->t_fb_ptr; 4730 /* only alert CC if we alerted when we entered */ 4731 if (CC_ALGO(tp)->post_recovery != NULL) { 4732 tp->ccv->curack = th_ack; 4733 CC_ALGO(tp)->post_recovery(tp->ccv); 4734 if (tp->snd_cwnd < tp->snd_ssthresh) { 4735 /* 4736 * Rack has burst control and pacing 4737 * so lets not set this any lower than 4738 * snd_ssthresh per RFC-6582 (option 2). 4739 */ 4740 tp->snd_cwnd = tp->snd_ssthresh; 4741 } 4742 } 4743 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4744 union tcp_log_stackspecific log; 4745 struct timeval tv; 4746 4747 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4748 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4749 log.u_bbr.flex1 = th_ack; 4750 log.u_bbr.flex2 = tp->ccv->flags; 4751 log.u_bbr.flex3 = tp->ccv->bytes_this_ack; 4752 log.u_bbr.flex4 = tp->ccv->nsegs; 4753 log.u_bbr.flex5 = V_tcp_abc_l_var; 4754 log.u_bbr.flex6 = orig_cwnd; 4755 log.u_bbr.flex7 = V_tcp_do_newsack; 4756 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4757 log.u_bbr.flex8 = 2; 4758 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4759 0, &log, false, NULL, NULL, 0, &tv); 4760 } 4761 if ((rack->rack_no_prr == 0) && 4762 (rack->no_prr_addback == 0) && 4763 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4764 /* 4765 * Suck the next prr cnt back into cwnd, but 4766 * only do that if we are not application limited. 4767 */ 4768 if (ctf_outstanding(tp) <= sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 4769 /* 4770 * We are allowed to add back to the cwnd the amount we did 4771 * not get out if: 4772 * a) no_prr_addback is off. 4773 * b) we are not app limited 4774 * c) we are doing prr 4775 * <and> 4776 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4777 */ 4778 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4779 rack->r_ctl.rc_prr_sndcnt); 4780 } 4781 rack->r_ctl.rc_prr_sndcnt = 0; 4782 rack_log_to_prr(rack, 1, 0, __LINE__); 4783 } 4784 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 4785 tp->snd_recover = tp->snd_una; 4786 if (rack->r_ctl.dsack_persist) { 4787 rack->r_ctl.dsack_persist--; 4788 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4789 rack->r_ctl.num_dsack = 0; 4790 } 4791 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4792 } 4793 EXIT_RECOVERY(tp->t_flags); 4794 } 4795 4796 static void 4797 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 4798 { 4799 struct tcp_rack *rack; 4800 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4801 4802 INP_WLOCK_ASSERT(tp->t_inpcb); 4803 #ifdef STATS 4804 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4805 #endif 4806 if (IN_RECOVERY(tp->t_flags) == 0) { 4807 in_rec_at_entry = 0; 4808 ssthresh_enter = tp->snd_ssthresh; 4809 cwnd_enter = tp->snd_cwnd; 4810 } else 4811 in_rec_at_entry = 1; 4812 rack = (struct tcp_rack *)tp->t_fb_ptr; 4813 switch (type) { 4814 case CC_NDUPACK: 4815 tp->t_flags &= ~TF_WASFRECOVERY; 4816 tp->t_flags &= ~TF_WASCRECOVERY; 4817 if (!IN_FASTRECOVERY(tp->t_flags)) { 4818 rack->r_ctl.rc_prr_delivered = 0; 4819 rack->r_ctl.rc_prr_out = 0; 4820 if (rack->rack_no_prr == 0) { 4821 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4822 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 4823 } 4824 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4825 tp->snd_recover = tp->snd_max; 4826 if (tp->t_flags2 & TF2_ECN_PERMIT) 4827 tp->t_flags2 |= TF2_ECN_SND_CWR; 4828 } 4829 break; 4830 case CC_ECN: 4831 if (!IN_CONGRECOVERY(tp->t_flags) || 4832 /* 4833 * Allow ECN reaction on ACK to CWR, if 4834 * that data segment was also CE marked. 4835 */ 4836 SEQ_GEQ(ack, tp->snd_recover)) { 4837 EXIT_CONGRECOVERY(tp->t_flags); 4838 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4839 tp->snd_recover = tp->snd_max + 1; 4840 if (tp->t_flags2 & TF2_ECN_PERMIT) 4841 tp->t_flags2 |= TF2_ECN_SND_CWR; 4842 } 4843 break; 4844 case CC_RTO: 4845 tp->t_dupacks = 0; 4846 tp->t_bytes_acked = 0; 4847 EXIT_RECOVERY(tp->t_flags); 4848 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4849 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4850 orig_cwnd = tp->snd_cwnd; 4851 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4852 rack_log_to_prr(rack, 16, orig_cwnd, line); 4853 if (tp->t_flags2 & TF2_ECN_PERMIT) 4854 tp->t_flags2 |= TF2_ECN_SND_CWR; 4855 break; 4856 case CC_RTO_ERR: 4857 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4858 /* RTO was unnecessary, so reset everything. */ 4859 tp->snd_cwnd = tp->snd_cwnd_prev; 4860 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4861 tp->snd_recover = tp->snd_recover_prev; 4862 if (tp->t_flags & TF_WASFRECOVERY) { 4863 ENTER_FASTRECOVERY(tp->t_flags); 4864 tp->t_flags &= ~TF_WASFRECOVERY; 4865 } 4866 if (tp->t_flags & TF_WASCRECOVERY) { 4867 ENTER_CONGRECOVERY(tp->t_flags); 4868 tp->t_flags &= ~TF_WASCRECOVERY; 4869 } 4870 tp->snd_nxt = tp->snd_max; 4871 tp->t_badrxtwin = 0; 4872 break; 4873 } 4874 if ((CC_ALGO(tp)->cong_signal != NULL) && 4875 (type != CC_RTO)){ 4876 tp->ccv->curack = ack; 4877 CC_ALGO(tp)->cong_signal(tp->ccv, type); 4878 } 4879 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4880 rack_log_to_prr(rack, 15, cwnd_enter, line); 4881 rack->r_ctl.dsack_byte_cnt = 0; 4882 rack->r_ctl.retran_during_recovery = 0; 4883 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4884 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4885 rack->r_ent_rec_ns = 1; 4886 } 4887 } 4888 4889 static inline void 4890 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4891 { 4892 uint32_t i_cwnd; 4893 4894 INP_WLOCK_ASSERT(tp->t_inpcb); 4895 4896 #ifdef NETFLIX_STATS 4897 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4898 if (tp->t_state == TCPS_ESTABLISHED) 4899 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4900 #endif 4901 if (CC_ALGO(tp)->after_idle != NULL) 4902 CC_ALGO(tp)->after_idle(tp->ccv); 4903 4904 if (tp->snd_cwnd == 1) 4905 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4906 else 4907 i_cwnd = rc_init_window(rack); 4908 4909 /* 4910 * Being idle is no different than the initial window. If the cc 4911 * clamps it down below the initial window raise it to the initial 4912 * window. 4913 */ 4914 if (tp->snd_cwnd < i_cwnd) { 4915 tp->snd_cwnd = i_cwnd; 4916 } 4917 } 4918 4919 /* 4920 * Indicate whether this ack should be delayed. We can delay the ack if 4921 * following conditions are met: 4922 * - There is no delayed ack timer in progress. 4923 * - Our last ack wasn't a 0-sized window. We never want to delay 4924 * the ack that opens up a 0-sized window. 4925 * - LRO wasn't used for this segment. We make sure by checking that the 4926 * segment size is not larger than the MSS. 4927 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4928 * connection. 4929 */ 4930 #define DELAY_ACK(tp, tlen) \ 4931 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4932 ((tp->t_flags & TF_DELACK) == 0) && \ 4933 (tlen <= tp->t_maxseg) && \ 4934 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4935 4936 static struct rack_sendmap * 4937 rack_find_lowest_rsm(struct tcp_rack *rack) 4938 { 4939 struct rack_sendmap *rsm; 4940 4941 /* 4942 * Walk the time-order transmitted list looking for an rsm that is 4943 * not acked. This will be the one that was sent the longest time 4944 * ago that is still outstanding. 4945 */ 4946 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4947 if (rsm->r_flags & RACK_ACKED) { 4948 continue; 4949 } 4950 goto finish; 4951 } 4952 finish: 4953 return (rsm); 4954 } 4955 4956 static struct rack_sendmap * 4957 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4958 { 4959 struct rack_sendmap *prsm; 4960 4961 /* 4962 * Walk the sequence order list backward until we hit and arrive at 4963 * the highest seq not acked. In theory when this is called it 4964 * should be the last segment (which it was not). 4965 */ 4966 prsm = rsm; 4967 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4968 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4969 continue; 4970 } 4971 return (prsm); 4972 } 4973 return (NULL); 4974 } 4975 4976 static uint32_t 4977 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4978 { 4979 int32_t lro; 4980 uint32_t thresh; 4981 4982 /* 4983 * lro is the flag we use to determine if we have seen reordering. 4984 * If it gets set we have seen reordering. The reorder logic either 4985 * works in one of two ways: 4986 * 4987 * If reorder-fade is configured, then we track the last time we saw 4988 * re-ordering occur. If we reach the point where enough time as 4989 * passed we no longer consider reordering has occuring. 4990 * 4991 * Or if reorder-face is 0, then once we see reordering we consider 4992 * the connection to alway be subject to reordering and just set lro 4993 * to 1. 4994 * 4995 * In the end if lro is non-zero we add the extra time for 4996 * reordering in. 4997 */ 4998 if (srtt == 0) 4999 srtt = 1; 5000 if (rack->r_ctl.rc_reorder_ts) { 5001 if (rack->r_ctl.rc_reorder_fade) { 5002 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 5003 lro = cts - rack->r_ctl.rc_reorder_ts; 5004 if (lro == 0) { 5005 /* 5006 * No time as passed since the last 5007 * reorder, mark it as reordering. 5008 */ 5009 lro = 1; 5010 } 5011 } else { 5012 /* Negative time? */ 5013 lro = 0; 5014 } 5015 if (lro > rack->r_ctl.rc_reorder_fade) { 5016 /* Turn off reordering seen too */ 5017 rack->r_ctl.rc_reorder_ts = 0; 5018 lro = 0; 5019 } 5020 } else { 5021 /* Reodering does not fade */ 5022 lro = 1; 5023 } 5024 } else { 5025 lro = 0; 5026 } 5027 if (rack->rc_rack_tmr_std_based == 0) { 5028 thresh = srtt + rack->r_ctl.rc_pkt_delay; 5029 } else { 5030 /* Standards based pkt-delay is 1/4 srtt */ 5031 thresh = srtt + (srtt >> 2); 5032 } 5033 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 5034 /* It must be set, if not you get 1/4 rtt */ 5035 if (rack->r_ctl.rc_reorder_shift) 5036 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 5037 else 5038 thresh += (srtt >> 2); 5039 } 5040 if (rack->rc_rack_use_dsack && 5041 lro && 5042 (rack->r_ctl.num_dsack > 0)) { 5043 /* 5044 * We only increase the reordering window if we 5045 * have seen reordering <and> we have a DSACK count. 5046 */ 5047 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5048 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5049 } 5050 /* SRTT * 2 is the ceiling */ 5051 if (thresh > (srtt * 2)) { 5052 thresh = srtt * 2; 5053 } 5054 /* And we don't want it above the RTO max either */ 5055 if (thresh > rack_rto_max) { 5056 thresh = rack_rto_max; 5057 } 5058 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5059 return (thresh); 5060 } 5061 5062 static uint32_t 5063 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5064 struct rack_sendmap *rsm, uint32_t srtt) 5065 { 5066 struct rack_sendmap *prsm; 5067 uint32_t thresh, len; 5068 int segsiz; 5069 5070 if (srtt == 0) 5071 srtt = 1; 5072 if (rack->r_ctl.rc_tlp_threshold) 5073 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5074 else 5075 thresh = (srtt * 2); 5076 5077 /* Get the previous sent packet, if any */ 5078 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5079 len = rsm->r_end - rsm->r_start; 5080 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5081 /* Exactly like the ID */ 5082 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5083 uint32_t alt_thresh; 5084 /* 5085 * Compensate for delayed-ack with the d-ack time. 5086 */ 5087 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5088 if (alt_thresh > thresh) 5089 thresh = alt_thresh; 5090 } 5091 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5092 /* 2.1 behavior */ 5093 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5094 if (prsm && (len <= segsiz)) { 5095 /* 5096 * Two packets outstanding, thresh should be (2*srtt) + 5097 * possible inter-packet delay (if any). 5098 */ 5099 uint32_t inter_gap = 0; 5100 int idx, nidx; 5101 5102 idx = rsm->r_rtr_cnt - 1; 5103 nidx = prsm->r_rtr_cnt - 1; 5104 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5105 /* Yes it was sent later (or at the same time) */ 5106 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5107 } 5108 thresh += inter_gap; 5109 } else if (len <= segsiz) { 5110 /* 5111 * Possibly compensate for delayed-ack. 5112 */ 5113 uint32_t alt_thresh; 5114 5115 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5116 if (alt_thresh > thresh) 5117 thresh = alt_thresh; 5118 } 5119 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5120 /* 2.2 behavior */ 5121 if (len <= segsiz) { 5122 uint32_t alt_thresh; 5123 /* 5124 * Compensate for delayed-ack with the d-ack time. 5125 */ 5126 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5127 if (alt_thresh > thresh) 5128 thresh = alt_thresh; 5129 } 5130 } 5131 /* Not above an RTO */ 5132 if (thresh > tp->t_rxtcur) { 5133 thresh = tp->t_rxtcur; 5134 } 5135 /* Not above a RTO max */ 5136 if (thresh > rack_rto_max) { 5137 thresh = rack_rto_max; 5138 } 5139 /* Apply user supplied min TLP */ 5140 if (thresh < rack_tlp_min) { 5141 thresh = rack_tlp_min; 5142 } 5143 return (thresh); 5144 } 5145 5146 static uint32_t 5147 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5148 { 5149 /* 5150 * We want the rack_rtt which is the 5151 * last rtt we measured. However if that 5152 * does not exist we fallback to the srtt (which 5153 * we probably will never do) and then as a last 5154 * resort we use RACK_INITIAL_RTO if no srtt is 5155 * yet set. 5156 */ 5157 if (rack->rc_rack_rtt) 5158 return (rack->rc_rack_rtt); 5159 else if (tp->t_srtt == 0) 5160 return (RACK_INITIAL_RTO); 5161 return (tp->t_srtt); 5162 } 5163 5164 static struct rack_sendmap * 5165 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5166 { 5167 /* 5168 * Check to see that we don't need to fall into recovery. We will 5169 * need to do so if our oldest transmit is past the time we should 5170 * have had an ack. 5171 */ 5172 struct tcp_rack *rack; 5173 struct rack_sendmap *rsm; 5174 int32_t idx; 5175 uint32_t srtt, thresh; 5176 5177 rack = (struct tcp_rack *)tp->t_fb_ptr; 5178 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5179 return (NULL); 5180 } 5181 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5182 if (rsm == NULL) 5183 return (NULL); 5184 5185 5186 if (rsm->r_flags & RACK_ACKED) { 5187 rsm = rack_find_lowest_rsm(rack); 5188 if (rsm == NULL) 5189 return (NULL); 5190 } 5191 idx = rsm->r_rtr_cnt - 1; 5192 srtt = rack_grab_rtt(tp, rack); 5193 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5194 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5195 return (NULL); 5196 } 5197 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5198 return (NULL); 5199 } 5200 /* Ok if we reach here we are over-due and this guy can be sent */ 5201 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 5202 return (rsm); 5203 } 5204 5205 static uint32_t 5206 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5207 { 5208 int32_t t; 5209 int32_t tt; 5210 uint32_t ret_val; 5211 5212 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5213 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5214 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5215 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5216 ret_val = (uint32_t)tt; 5217 return (ret_val); 5218 } 5219 5220 static uint32_t 5221 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5222 { 5223 /* 5224 * Start the FR timer, we do this based on getting the first one in 5225 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5226 * events we need to stop the running timer (if its running) before 5227 * starting the new one. 5228 */ 5229 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5230 uint32_t srtt_cur; 5231 int32_t idx; 5232 int32_t is_tlp_timer = 0; 5233 struct rack_sendmap *rsm; 5234 5235 if (rack->t_timers_stopped) { 5236 /* All timers have been stopped none are to run */ 5237 return (0); 5238 } 5239 if (rack->rc_in_persist) { 5240 /* We can't start any timer in persists */ 5241 return (rack_get_persists_timer_val(tp, rack)); 5242 } 5243 rack->rc_on_min_to = 0; 5244 if ((tp->t_state < TCPS_ESTABLISHED) || 5245 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5246 goto activate_rxt; 5247 } 5248 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5249 if ((rsm == NULL) || sup_rack) { 5250 /* Nothing on the send map or no rack */ 5251 activate_rxt: 5252 time_since_sent = 0; 5253 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5254 if (rsm) { 5255 /* 5256 * Should we discount the RTX timer any? 5257 * 5258 * We want to discount it the smallest amount. 5259 * If a timer (Rack/TLP or RXT) has gone off more 5260 * recently thats the discount we want to use (now - timer time). 5261 * If the retransmit of the oldest packet was more recent then 5262 * we want to use that (now - oldest-packet-last_transmit_time). 5263 * 5264 */ 5265 idx = rsm->r_rtr_cnt - 1; 5266 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5267 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5268 else 5269 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5270 if (TSTMP_GT(cts, tstmp_touse)) 5271 time_since_sent = cts - tstmp_touse; 5272 } 5273 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 5274 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5275 to = tp->t_rxtcur; 5276 if (to > time_since_sent) 5277 to -= time_since_sent; 5278 else 5279 to = rack->r_ctl.rc_min_to; 5280 if (to == 0) 5281 to = 1; 5282 /* Special case for KEEPINIT */ 5283 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5284 (TP_KEEPINIT(tp) != 0) && 5285 rsm) { 5286 /* 5287 * We have to put a ceiling on the rxt timer 5288 * of the keep-init timeout. 5289 */ 5290 uint32_t max_time, red; 5291 5292 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5293 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5294 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5295 if (red < max_time) 5296 max_time -= red; 5297 else 5298 max_time = 1; 5299 } 5300 /* Reduce timeout to the keep value if needed */ 5301 if (max_time < to) 5302 to = max_time; 5303 } 5304 return (to); 5305 } 5306 return (0); 5307 } 5308 if (rsm->r_flags & RACK_ACKED) { 5309 rsm = rack_find_lowest_rsm(rack); 5310 if (rsm == NULL) { 5311 /* No lowest? */ 5312 goto activate_rxt; 5313 } 5314 } 5315 if (rack->sack_attack_disable) { 5316 /* 5317 * We don't want to do 5318 * any TLP's if you are an attacker. 5319 * Though if you are doing what 5320 * is expected you may still have 5321 * SACK-PASSED marks. 5322 */ 5323 goto activate_rxt; 5324 } 5325 /* Convert from ms to usecs */ 5326 if ((rsm->r_flags & RACK_SACK_PASSED) || 5327 (rsm->r_flags & RACK_RWND_COLLAPSED) || 5328 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5329 if ((tp->t_flags & TF_SENTFIN) && 5330 ((tp->snd_max - tp->snd_una) == 1) && 5331 (rsm->r_flags & RACK_HAS_FIN)) { 5332 /* 5333 * We don't start a rack timer if all we have is a 5334 * FIN outstanding. 5335 */ 5336 goto activate_rxt; 5337 } 5338 if ((rack->use_rack_rr == 0) && 5339 (IN_FASTRECOVERY(tp->t_flags)) && 5340 (rack->rack_no_prr == 0) && 5341 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5342 /* 5343 * We are not cheating, in recovery and 5344 * not enough ack's to yet get our next 5345 * retransmission out. 5346 * 5347 * Note that classified attackers do not 5348 * get to use the rack-cheat. 5349 */ 5350 goto activate_tlp; 5351 } 5352 srtt = rack_grab_rtt(tp, rack); 5353 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5354 idx = rsm->r_rtr_cnt - 1; 5355 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5356 if (SEQ_GEQ(exp, cts)) { 5357 to = exp - cts; 5358 if (to < rack->r_ctl.rc_min_to) { 5359 to = rack->r_ctl.rc_min_to; 5360 if (rack->r_rr_config == 3) 5361 rack->rc_on_min_to = 1; 5362 } 5363 } else { 5364 to = rack->r_ctl.rc_min_to; 5365 if (rack->r_rr_config == 3) 5366 rack->rc_on_min_to = 1; 5367 } 5368 } else { 5369 /* Ok we need to do a TLP not RACK */ 5370 activate_tlp: 5371 if ((rack->rc_tlp_in_progress != 0) && 5372 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5373 /* 5374 * The previous send was a TLP and we have sent 5375 * N TLP's without sending new data. 5376 */ 5377 goto activate_rxt; 5378 } 5379 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5380 if (rsm == NULL) { 5381 /* We found no rsm to TLP with. */ 5382 goto activate_rxt; 5383 } 5384 if (rsm->r_flags & RACK_HAS_FIN) { 5385 /* If its a FIN we dont do TLP */ 5386 rsm = NULL; 5387 goto activate_rxt; 5388 } 5389 idx = rsm->r_rtr_cnt - 1; 5390 time_since_sent = 0; 5391 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5392 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5393 else 5394 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5395 if (TSTMP_GT(cts, tstmp_touse)) 5396 time_since_sent = cts - tstmp_touse; 5397 is_tlp_timer = 1; 5398 if (tp->t_srtt) { 5399 if ((rack->rc_srtt_measure_made == 0) && 5400 (tp->t_srtt == 1)) { 5401 /* 5402 * If another stack as run and set srtt to 1, 5403 * then the srtt was 0, so lets use the initial. 5404 */ 5405 srtt = RACK_INITIAL_RTO; 5406 } else { 5407 srtt_cur = tp->t_srtt; 5408 srtt = srtt_cur; 5409 } 5410 } else 5411 srtt = RACK_INITIAL_RTO; 5412 /* 5413 * If the SRTT is not keeping up and the 5414 * rack RTT has spiked we want to use 5415 * the last RTT not the smoothed one. 5416 */ 5417 if (rack_tlp_use_greater && 5418 tp->t_srtt && 5419 (srtt < rack_grab_rtt(tp, rack))) { 5420 srtt = rack_grab_rtt(tp, rack); 5421 } 5422 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5423 if (thresh > time_since_sent) { 5424 to = thresh - time_since_sent; 5425 } else { 5426 to = rack->r_ctl.rc_min_to; 5427 rack_log_alt_to_to_cancel(rack, 5428 thresh, /* flex1 */ 5429 time_since_sent, /* flex2 */ 5430 tstmp_touse, /* flex3 */ 5431 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5432 (uint32_t)rsm->r_tim_lastsent[idx], 5433 srtt, 5434 idx, 99); 5435 } 5436 if (to < rack_tlp_min) { 5437 to = rack_tlp_min; 5438 } 5439 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5440 /* 5441 * If the TLP time works out to larger than the max 5442 * RTO lets not do TLP.. just RTO. 5443 */ 5444 goto activate_rxt; 5445 } 5446 } 5447 if (is_tlp_timer == 0) { 5448 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5449 } else { 5450 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5451 } 5452 if (to == 0) 5453 to = 1; 5454 return (to); 5455 } 5456 5457 static void 5458 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5459 { 5460 if (rack->rc_in_persist == 0) { 5461 if (tp->t_flags & TF_GPUTINPROG) { 5462 /* 5463 * Stop the goodput now, the calling of the 5464 * measurement function clears the flag. 5465 */ 5466 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5467 RACK_QUALITY_PERSIST); 5468 } 5469 #ifdef NETFLIX_SHARED_CWND 5470 if (rack->r_ctl.rc_scw) { 5471 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5472 rack->rack_scwnd_is_idle = 1; 5473 } 5474 #endif 5475 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5476 if (rack->r_ctl.rc_went_idle_time == 0) 5477 rack->r_ctl.rc_went_idle_time = 1; 5478 rack_timer_cancel(tp, rack, cts, __LINE__); 5479 rack->r_ctl.persist_lost_ends = 0; 5480 rack->probe_not_answered = 0; 5481 rack->forced_ack = 0; 5482 tp->t_rxtshift = 0; 5483 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5484 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5485 rack->rc_in_persist = 1; 5486 } 5487 } 5488 5489 static void 5490 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5491 { 5492 if (tcp_in_hpts(rack->rc_inp)) { 5493 tcp_hpts_remove(rack->rc_inp); 5494 rack->r_ctl.rc_hpts_flags = 0; 5495 } 5496 #ifdef NETFLIX_SHARED_CWND 5497 if (rack->r_ctl.rc_scw) { 5498 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5499 rack->rack_scwnd_is_idle = 0; 5500 } 5501 #endif 5502 if (rack->rc_gp_dyn_mul && 5503 (rack->use_fixed_rate == 0) && 5504 (rack->rc_always_pace)) { 5505 /* 5506 * Do we count this as if a probe-rtt just 5507 * finished? 5508 */ 5509 uint32_t time_idle, idle_min; 5510 5511 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5512 idle_min = rack_min_probertt_hold; 5513 if (rack_probertt_gpsrtt_cnt_div) { 5514 uint64_t extra; 5515 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5516 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5517 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5518 idle_min += (uint32_t)extra; 5519 } 5520 if (time_idle >= idle_min) { 5521 /* Yes, we count it as a probe-rtt. */ 5522 uint32_t us_cts; 5523 5524 us_cts = tcp_get_usecs(NULL); 5525 if (rack->in_probe_rtt == 0) { 5526 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5527 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5528 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5529 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5530 } else { 5531 rack_exit_probertt(rack, us_cts); 5532 } 5533 } 5534 } 5535 rack->rc_in_persist = 0; 5536 rack->r_ctl.rc_went_idle_time = 0; 5537 tp->t_rxtshift = 0; 5538 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5539 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5540 rack->r_ctl.rc_agg_delayed = 0; 5541 rack->r_early = 0; 5542 rack->r_late = 0; 5543 rack->r_ctl.rc_agg_early = 0; 5544 } 5545 5546 static void 5547 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5548 struct hpts_diag *diag, struct timeval *tv) 5549 { 5550 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5551 union tcp_log_stackspecific log; 5552 5553 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5554 log.u_bbr.flex1 = diag->p_nxt_slot; 5555 log.u_bbr.flex2 = diag->p_cur_slot; 5556 log.u_bbr.flex3 = diag->slot_req; 5557 log.u_bbr.flex4 = diag->inp_hptsslot; 5558 log.u_bbr.flex5 = diag->slot_remaining; 5559 log.u_bbr.flex6 = diag->need_new_to; 5560 log.u_bbr.flex7 = diag->p_hpts_active; 5561 log.u_bbr.flex8 = diag->p_on_min_sleep; 5562 /* Hijack other fields as needed */ 5563 log.u_bbr.epoch = diag->have_slept; 5564 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5565 log.u_bbr.pkts_out = diag->co_ret; 5566 log.u_bbr.applimited = diag->hpts_sleep_time; 5567 log.u_bbr.delivered = diag->p_prev_slot; 5568 log.u_bbr.inflight = diag->p_runningslot; 5569 log.u_bbr.bw_inuse = diag->wheel_slot; 5570 log.u_bbr.rttProp = diag->wheel_cts; 5571 log.u_bbr.timeStamp = cts; 5572 log.u_bbr.delRate = diag->maxslots; 5573 log.u_bbr.cur_del_rate = diag->p_curtick; 5574 log.u_bbr.cur_del_rate <<= 32; 5575 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5576 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5577 &rack->rc_inp->inp_socket->so_rcv, 5578 &rack->rc_inp->inp_socket->so_snd, 5579 BBR_LOG_HPTSDIAG, 0, 5580 0, &log, false, tv); 5581 } 5582 5583 } 5584 5585 static void 5586 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5587 { 5588 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5589 union tcp_log_stackspecific log; 5590 struct timeval tv; 5591 5592 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5593 log.u_bbr.flex1 = sb->sb_flags; 5594 log.u_bbr.flex2 = len; 5595 log.u_bbr.flex3 = sb->sb_state; 5596 log.u_bbr.flex8 = type; 5597 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5598 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5599 &rack->rc_inp->inp_socket->so_rcv, 5600 &rack->rc_inp->inp_socket->so_snd, 5601 TCP_LOG_SB_WAKE, 0, 5602 len, &log, false, &tv); 5603 } 5604 } 5605 5606 static void 5607 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5608 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5609 { 5610 struct hpts_diag diag; 5611 struct inpcb *inp; 5612 struct timeval tv; 5613 uint32_t delayed_ack = 0; 5614 uint32_t hpts_timeout; 5615 uint32_t entry_slot = slot; 5616 uint8_t stopped; 5617 uint32_t left = 0; 5618 uint32_t us_cts; 5619 5620 inp = tp->t_inpcb; 5621 if ((tp->t_state == TCPS_CLOSED) || 5622 (tp->t_state == TCPS_LISTEN)) { 5623 return; 5624 } 5625 if (tcp_in_hpts(inp)) { 5626 /* Already on the pacer */ 5627 return; 5628 } 5629 stopped = rack->rc_tmr_stopped; 5630 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5631 left = rack->r_ctl.rc_timer_exp - cts; 5632 } 5633 rack->r_ctl.rc_timer_exp = 0; 5634 rack->r_ctl.rc_hpts_flags = 0; 5635 us_cts = tcp_get_usecs(&tv); 5636 /* Now early/late accounting */ 5637 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5638 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5639 /* 5640 * We have a early carry over set, 5641 * we can always add more time so we 5642 * can always make this compensation. 5643 * 5644 * Note if ack's are allowed to wake us do not 5645 * penalize the next timer for being awoke 5646 * by an ack aka the rc_agg_early (non-paced mode). 5647 */ 5648 slot += rack->r_ctl.rc_agg_early; 5649 rack->r_early = 0; 5650 rack->r_ctl.rc_agg_early = 0; 5651 } 5652 if (rack->r_late) { 5653 /* 5654 * This is harder, we can 5655 * compensate some but it 5656 * really depends on what 5657 * the current pacing time is. 5658 */ 5659 if (rack->r_ctl.rc_agg_delayed >= slot) { 5660 /* 5661 * We can't compensate for it all. 5662 * And we have to have some time 5663 * on the clock. We always have a min 5664 * 10 slots (10 x 10 i.e. 100 usecs). 5665 */ 5666 if (slot <= HPTS_TICKS_PER_SLOT) { 5667 /* We gain delay */ 5668 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5669 slot = HPTS_TICKS_PER_SLOT; 5670 } else { 5671 /* We take off some */ 5672 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5673 slot = HPTS_TICKS_PER_SLOT; 5674 } 5675 } else { 5676 slot -= rack->r_ctl.rc_agg_delayed; 5677 rack->r_ctl.rc_agg_delayed = 0; 5678 /* Make sure we have 100 useconds at minimum */ 5679 if (slot < HPTS_TICKS_PER_SLOT) { 5680 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5681 slot = HPTS_TICKS_PER_SLOT; 5682 } 5683 if (rack->r_ctl.rc_agg_delayed == 0) 5684 rack->r_late = 0; 5685 } 5686 } 5687 if (slot) { 5688 /* We are pacing too */ 5689 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5690 } 5691 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5692 #ifdef NETFLIX_EXP_DETECTION 5693 if (rack->sack_attack_disable && 5694 (slot < tcp_sad_pacing_interval)) { 5695 /* 5696 * We have a potential attacker on 5697 * the line. We have possibly some 5698 * (or now) pacing time set. We want to 5699 * slow down the processing of sacks by some 5700 * amount (if it is an attacker). Set the default 5701 * slot for attackers in place (unless the orginal 5702 * interval is longer). Its stored in 5703 * micro-seconds, so lets convert to msecs. 5704 */ 5705 slot = tcp_sad_pacing_interval; 5706 } 5707 #endif 5708 if (tp->t_flags & TF_DELACK) { 5709 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5710 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5711 } 5712 if (delayed_ack && ((hpts_timeout == 0) || 5713 (delayed_ack < hpts_timeout))) 5714 hpts_timeout = delayed_ack; 5715 else 5716 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5717 /* 5718 * If no timers are going to run and we will fall off the hptsi 5719 * wheel, we resort to a keep-alive timer if its configured. 5720 */ 5721 if ((hpts_timeout == 0) && 5722 (slot == 0)) { 5723 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5724 (tp->t_state <= TCPS_CLOSING)) { 5725 /* 5726 * Ok we have no timer (persists, rack, tlp, rxt or 5727 * del-ack), we don't have segments being paced. So 5728 * all that is left is the keepalive timer. 5729 */ 5730 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5731 /* Get the established keep-alive time */ 5732 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5733 } else { 5734 /* 5735 * Get the initial setup keep-alive time, 5736 * note that this is probably not going to 5737 * happen, since rack will be running a rxt timer 5738 * if a SYN of some sort is outstanding. It is 5739 * actually handled in rack_timeout_rxt(). 5740 */ 5741 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5742 } 5743 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5744 if (rack->in_probe_rtt) { 5745 /* 5746 * We want to instead not wake up a long time from 5747 * now but to wake up about the time we would 5748 * exit probe-rtt and initiate a keep-alive ack. 5749 * This will get us out of probe-rtt and update 5750 * our min-rtt. 5751 */ 5752 hpts_timeout = rack_min_probertt_hold; 5753 } 5754 } 5755 } 5756 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5757 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5758 /* 5759 * RACK, TLP, persists and RXT timers all are restartable 5760 * based on actions input .. i.e we received a packet (ack 5761 * or sack) and that changes things (rw, or snd_una etc). 5762 * Thus we can restart them with a new value. For 5763 * keep-alive, delayed_ack we keep track of what was left 5764 * and restart the timer with a smaller value. 5765 */ 5766 if (left < hpts_timeout) 5767 hpts_timeout = left; 5768 } 5769 if (hpts_timeout) { 5770 /* 5771 * Hack alert for now we can't time-out over 2,147,483 5772 * seconds (a bit more than 596 hours), which is probably ok 5773 * :). 5774 */ 5775 if (hpts_timeout > 0x7ffffffe) 5776 hpts_timeout = 0x7ffffffe; 5777 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5778 } 5779 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5780 if ((rack->gp_ready == 0) && 5781 (rack->use_fixed_rate == 0) && 5782 (hpts_timeout < slot) && 5783 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5784 /* 5785 * We have no good estimate yet for the 5786 * old clunky burst mitigation or the 5787 * real pacing. And the tlp or rxt is smaller 5788 * than the pacing calculation. Lets not 5789 * pace that long since we know the calculation 5790 * so far is not accurate. 5791 */ 5792 slot = hpts_timeout; 5793 } 5794 /** 5795 * Turn off all the flags for queuing by default. The 5796 * flags have important meanings to what happens when 5797 * LRO interacts with the transport. Most likely (by default now) 5798 * mbuf_queueing and ack compression are on. So the transport 5799 * has a couple of flags that control what happens (if those 5800 * are not on then these flags won't have any effect since it 5801 * won't go through the queuing LRO path). 5802 * 5803 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5804 * pacing output, so don't disturb. But 5805 * it also means LRO can wake me if there 5806 * is a SACK arrival. 5807 * 5808 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5809 * with the above flag (QUEUE_READY) and 5810 * when present it says don't even wake me 5811 * if a SACK arrives. 5812 * 5813 * The idea behind these flags is that if we are pacing we 5814 * set the MBUF_QUEUE_READY and only get woken up if 5815 * a SACK arrives (which could change things) or if 5816 * our pacing timer expires. If, however, we have a rack 5817 * timer running, then we don't even want a sack to wake 5818 * us since the rack timer has to expire before we can send. 5819 * 5820 * Other cases should usually have none of the flags set 5821 * so LRO can call into us. 5822 */ 5823 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5824 if (slot) { 5825 rack->r_ctl.rc_last_output_to = us_cts + slot; 5826 /* 5827 * A pacing timer (slot) is being set, in 5828 * such a case we cannot send (we are blocked by 5829 * the timer). So lets tell LRO that it should not 5830 * wake us unless there is a SACK. Note this only 5831 * will be effective if mbuf queueing is on or 5832 * compressed acks are being processed. 5833 */ 5834 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5835 /* 5836 * But wait if we have a Rack timer running 5837 * even a SACK should not disturb us (with 5838 * the exception of r_rr_config 3). 5839 */ 5840 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5841 (rack->r_rr_config != 3)) 5842 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5843 if (rack->rc_ack_can_sendout_data) { 5844 /* 5845 * Ahh but wait, this is that special case 5846 * where the pacing timer can be disturbed 5847 * backout the changes (used for non-paced 5848 * burst limiting). 5849 */ 5850 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5851 } 5852 if ((rack->use_rack_rr) && 5853 (rack->r_rr_config < 2) && 5854 ((hpts_timeout) && (hpts_timeout < slot))) { 5855 /* 5856 * Arrange for the hpts to kick back in after the 5857 * t-o if the t-o does not cause a send. 5858 */ 5859 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5860 __LINE__, &diag); 5861 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5862 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5863 } else { 5864 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 5865 __LINE__, &diag); 5866 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5867 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5868 } 5869 } else if (hpts_timeout) { 5870 /* 5871 * With respect to inp_flags2 here, lets let any new acks wake 5872 * us up here. Since we are not pacing (no pacing timer), output 5873 * can happen so we should let it. If its a Rack timer, then any inbound 5874 * packet probably won't change the sending (we will be blocked) 5875 * but it may change the prr stats so letting it in (the set defaults 5876 * at the start of this block) are good enough. 5877 */ 5878 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 5879 __LINE__, &diag); 5880 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5881 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5882 } else { 5883 /* No timer starting */ 5884 #ifdef INVARIANTS 5885 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5886 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5887 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5888 } 5889 #endif 5890 } 5891 rack->rc_tmr_stopped = 0; 5892 if (slot) 5893 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5894 } 5895 5896 /* 5897 * RACK Timer, here we simply do logging and house keeping. 5898 * the normal rack_output() function will call the 5899 * appropriate thing to check if we need to do a RACK retransmit. 5900 * We return 1, saying don't proceed with rack_output only 5901 * when all timers have been stopped (destroyed PCB?). 5902 */ 5903 static int 5904 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5905 { 5906 /* 5907 * This timer simply provides an internal trigger to send out data. 5908 * The check_recovery_mode call will see if there are needed 5909 * retransmissions, if so we will enter fast-recovery. The output 5910 * call may or may not do the same thing depending on sysctl 5911 * settings. 5912 */ 5913 struct rack_sendmap *rsm; 5914 5915 if (tp->t_timers->tt_flags & TT_STOPPED) { 5916 return (1); 5917 } 5918 counter_u64_add(rack_to_tot, 1); 5919 if (rack->r_state && (rack->r_state != tp->t_state)) 5920 rack_set_state(tp, rack); 5921 rack->rc_on_min_to = 0; 5922 rsm = rack_check_recovery_mode(tp, cts); 5923 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5924 if (rsm) { 5925 rack->r_ctl.rc_resend = rsm; 5926 rack->r_timer_override = 1; 5927 if (rack->use_rack_rr) { 5928 /* 5929 * Don't accumulate extra pacing delay 5930 * we are allowing the rack timer to 5931 * over-ride pacing i.e. rrr takes precedence 5932 * if the pacing interval is longer than the rrr 5933 * time (in other words we get the min pacing 5934 * time versus rrr pacing time). 5935 */ 5936 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5937 } 5938 } 5939 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5940 if (rsm == NULL) { 5941 /* restart a timer and return 1 */ 5942 rack_start_hpts_timer(rack, tp, cts, 5943 0, 0, 0); 5944 return (1); 5945 } 5946 return (0); 5947 } 5948 5949 static void 5950 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5951 { 5952 if (rsm->m->m_len > rsm->orig_m_len) { 5953 /* 5954 * Mbuf grew, caused by sbcompress, our offset does 5955 * not change. 5956 */ 5957 rsm->orig_m_len = rsm->m->m_len; 5958 } else if (rsm->m->m_len < rsm->orig_m_len) { 5959 /* 5960 * Mbuf shrank, trimmed off the top by an ack, our 5961 * offset changes. 5962 */ 5963 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5964 rsm->orig_m_len = rsm->m->m_len; 5965 } 5966 } 5967 5968 static void 5969 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5970 { 5971 struct mbuf *m; 5972 uint32_t soff; 5973 5974 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5975 /* Fix up the orig_m_len and possibly the mbuf offset */ 5976 rack_adjust_orig_mlen(src_rsm); 5977 } 5978 m = src_rsm->m; 5979 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5980 while (soff >= m->m_len) { 5981 /* Move out past this mbuf */ 5982 soff -= m->m_len; 5983 m = m->m_next; 5984 KASSERT((m != NULL), 5985 ("rsm:%p nrsm:%p hit at soff:%u null m", 5986 src_rsm, rsm, soff)); 5987 } 5988 rsm->m = m; 5989 rsm->soff = soff; 5990 rsm->orig_m_len = m->m_len; 5991 } 5992 5993 static __inline void 5994 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5995 struct rack_sendmap *rsm, uint32_t start) 5996 { 5997 int idx; 5998 5999 nrsm->r_start = start; 6000 nrsm->r_end = rsm->r_end; 6001 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 6002 nrsm->r_flags = rsm->r_flags; 6003 nrsm->r_dupack = rsm->r_dupack; 6004 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 6005 nrsm->r_rtr_bytes = 0; 6006 nrsm->r_fas = rsm->r_fas; 6007 rsm->r_end = nrsm->r_start; 6008 nrsm->r_just_ret = rsm->r_just_ret; 6009 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 6010 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 6011 } 6012 /* Now if we have SYN flag we keep it on the left edge */ 6013 if (nrsm->r_flags & RACK_HAS_SYN) 6014 nrsm->r_flags &= ~RACK_HAS_SYN; 6015 /* Now if we have a FIN flag we keep it on the right edge */ 6016 if (rsm->r_flags & RACK_HAS_FIN) 6017 rsm->r_flags &= ~RACK_HAS_FIN; 6018 /* Push bit must go to the right edge as well */ 6019 if (rsm->r_flags & RACK_HAD_PUSH) 6020 rsm->r_flags &= ~RACK_HAD_PUSH; 6021 /* Clone over the state of the hw_tls flag */ 6022 nrsm->r_hw_tls = rsm->r_hw_tls; 6023 /* 6024 * Now we need to find nrsm's new location in the mbuf chain 6025 * we basically calculate a new offset, which is soff + 6026 * how much is left in original rsm. Then we walk out the mbuf 6027 * chain to find the righ position, it may be the same mbuf 6028 * or maybe not. 6029 */ 6030 KASSERT(((rsm->m != NULL) || 6031 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 6032 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 6033 if (rsm->m) 6034 rack_setup_offset_for_rsm(rsm, nrsm); 6035 } 6036 6037 static struct rack_sendmap * 6038 rack_merge_rsm(struct tcp_rack *rack, 6039 struct rack_sendmap *l_rsm, 6040 struct rack_sendmap *r_rsm) 6041 { 6042 /* 6043 * We are merging two ack'd RSM's, 6044 * the l_rsm is on the left (lower seq 6045 * values) and the r_rsm is on the right 6046 * (higher seq value). The simplest way 6047 * to merge these is to move the right 6048 * one into the left. I don't think there 6049 * is any reason we need to try to find 6050 * the oldest (or last oldest retransmitted). 6051 */ 6052 #ifdef INVARIANTS 6053 struct rack_sendmap *rm; 6054 #endif 6055 rack_log_map_chg(rack->rc_tp, rack, NULL, 6056 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6057 l_rsm->r_end = r_rsm->r_end; 6058 if (l_rsm->r_dupack < r_rsm->r_dupack) 6059 l_rsm->r_dupack = r_rsm->r_dupack; 6060 if (r_rsm->r_rtr_bytes) 6061 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6062 if (r_rsm->r_in_tmap) { 6063 /* This really should not happen */ 6064 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6065 r_rsm->r_in_tmap = 0; 6066 } 6067 6068 /* Now the flags */ 6069 if (r_rsm->r_flags & RACK_HAS_FIN) 6070 l_rsm->r_flags |= RACK_HAS_FIN; 6071 if (r_rsm->r_flags & RACK_TLP) 6072 l_rsm->r_flags |= RACK_TLP; 6073 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6074 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6075 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6076 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6077 /* 6078 * If both are app-limited then let the 6079 * free lower the count. If right is app 6080 * limited and left is not, transfer. 6081 */ 6082 l_rsm->r_flags |= RACK_APP_LIMITED; 6083 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6084 if (r_rsm == rack->r_ctl.rc_first_appl) 6085 rack->r_ctl.rc_first_appl = l_rsm; 6086 } 6087 #ifndef INVARIANTS 6088 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6089 #else 6090 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6091 if (rm != r_rsm) { 6092 panic("removing head in rack:%p rsm:%p rm:%p", 6093 rack, r_rsm, rm); 6094 } 6095 #endif 6096 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6097 /* Transfer the split limit to the map we free */ 6098 r_rsm->r_limit_type = l_rsm->r_limit_type; 6099 l_rsm->r_limit_type = 0; 6100 } 6101 rack_free(rack, r_rsm); 6102 return (l_rsm); 6103 } 6104 6105 /* 6106 * TLP Timer, here we simply setup what segment we want to 6107 * have the TLP expire on, the normal rack_output() will then 6108 * send it out. 6109 * 6110 * We return 1, saying don't proceed with rack_output only 6111 * when all timers have been stopped (destroyed PCB?). 6112 */ 6113 static int 6114 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6115 { 6116 /* 6117 * Tail Loss Probe. 6118 */ 6119 struct rack_sendmap *rsm = NULL; 6120 #ifdef INVARIANTS 6121 struct rack_sendmap *insret; 6122 #endif 6123 struct socket *so; 6124 uint32_t amm; 6125 uint32_t out, avail; 6126 int collapsed_win = 0; 6127 6128 if (tp->t_timers->tt_flags & TT_STOPPED) { 6129 return (1); 6130 } 6131 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6132 /* Its not time yet */ 6133 return (0); 6134 } 6135 if (ctf_progress_timeout_check(tp, true)) { 6136 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6137 return (-ETIMEDOUT); /* tcp_drop() */ 6138 } 6139 /* 6140 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6141 * need to figure out how to force a full MSS segment out. 6142 */ 6143 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6144 rack->r_ctl.retran_during_recovery = 0; 6145 rack->r_ctl.dsack_byte_cnt = 0; 6146 counter_u64_add(rack_tlp_tot, 1); 6147 if (rack->r_state && (rack->r_state != tp->t_state)) 6148 rack_set_state(tp, rack); 6149 so = tp->t_inpcb->inp_socket; 6150 avail = sbavail(&so->so_snd); 6151 out = tp->snd_max - tp->snd_una; 6152 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 6153 /* special case, we need a retransmission */ 6154 collapsed_win = 1; 6155 goto need_retran; 6156 } 6157 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6158 rack->r_ctl.dsack_persist--; 6159 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6160 rack->r_ctl.num_dsack = 0; 6161 } 6162 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6163 } 6164 if ((tp->t_flags & TF_GPUTINPROG) && 6165 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6166 /* 6167 * If this is the second in a row 6168 * TLP and we are doing a measurement 6169 * its time to abandon the measurement. 6170 * Something is likely broken on 6171 * the clients network and measuring a 6172 * broken network does us no good. 6173 */ 6174 tp->t_flags &= ~TF_GPUTINPROG; 6175 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6176 rack->r_ctl.rc_gp_srtt /*flex1*/, 6177 tp->gput_seq, 6178 0, 0, 18, __LINE__, NULL, 0); 6179 } 6180 /* 6181 * Check our send oldest always settings, and if 6182 * there is an oldest to send jump to the need_retran. 6183 */ 6184 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6185 goto need_retran; 6186 6187 if (avail > out) { 6188 /* New data is available */ 6189 amm = avail - out; 6190 if (amm > ctf_fixed_maxseg(tp)) { 6191 amm = ctf_fixed_maxseg(tp); 6192 if ((amm + out) > tp->snd_wnd) { 6193 /* We are rwnd limited */ 6194 goto need_retran; 6195 } 6196 } else if (amm < ctf_fixed_maxseg(tp)) { 6197 /* not enough to fill a MTU */ 6198 goto need_retran; 6199 } 6200 if (IN_FASTRECOVERY(tp->t_flags)) { 6201 /* Unlikely */ 6202 if (rack->rack_no_prr == 0) { 6203 if (out + amm <= tp->snd_wnd) { 6204 rack->r_ctl.rc_prr_sndcnt = amm; 6205 rack->r_ctl.rc_tlp_new_data = amm; 6206 rack_log_to_prr(rack, 4, 0, __LINE__); 6207 } 6208 } else 6209 goto need_retran; 6210 } else { 6211 /* Set the send-new override */ 6212 if (out + amm <= tp->snd_wnd) 6213 rack->r_ctl.rc_tlp_new_data = amm; 6214 else 6215 goto need_retran; 6216 } 6217 rack->r_ctl.rc_tlpsend = NULL; 6218 counter_u64_add(rack_tlp_newdata, 1); 6219 goto send; 6220 } 6221 need_retran: 6222 /* 6223 * Ok we need to arrange the last un-acked segment to be re-sent, or 6224 * optionally the first un-acked segment. 6225 */ 6226 if (collapsed_win == 0) { 6227 if (rack_always_send_oldest) 6228 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6229 else { 6230 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6231 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6232 rsm = rack_find_high_nonack(rack, rsm); 6233 } 6234 } 6235 if (rsm == NULL) { 6236 #ifdef TCP_BLACKBOX 6237 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6238 #endif 6239 goto out; 6240 } 6241 } else { 6242 /* 6243 * We must find the last segment 6244 * that was acceptable by the client. 6245 */ 6246 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6247 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6248 /* Found one */ 6249 break; 6250 } 6251 } 6252 if (rsm == NULL) { 6253 /* None? if so send the first */ 6254 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6255 if (rsm == NULL) { 6256 #ifdef TCP_BLACKBOX 6257 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6258 #endif 6259 goto out; 6260 } 6261 } 6262 } 6263 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6264 /* 6265 * We need to split this the last segment in two. 6266 */ 6267 struct rack_sendmap *nrsm; 6268 6269 nrsm = rack_alloc_full_limit(rack); 6270 if (nrsm == NULL) { 6271 /* 6272 * No memory to split, we will just exit and punt 6273 * off to the RXT timer. 6274 */ 6275 goto out; 6276 } 6277 rack_clone_rsm(rack, nrsm, rsm, 6278 (rsm->r_end - ctf_fixed_maxseg(tp))); 6279 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6280 #ifndef INVARIANTS 6281 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6282 #else 6283 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6284 if (insret != NULL) { 6285 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6286 nrsm, insret, rack, rsm); 6287 } 6288 #endif 6289 if (rsm->r_in_tmap) { 6290 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6291 nrsm->r_in_tmap = 1; 6292 } 6293 rsm = nrsm; 6294 } 6295 rack->r_ctl.rc_tlpsend = rsm; 6296 send: 6297 /* Make sure output path knows we are doing a TLP */ 6298 *doing_tlp = 1; 6299 rack->r_timer_override = 1; 6300 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6301 return (0); 6302 out: 6303 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6304 return (0); 6305 } 6306 6307 /* 6308 * Delayed ack Timer, here we simply need to setup the 6309 * ACK_NOW flag and remove the DELACK flag. From there 6310 * the output routine will send the ack out. 6311 * 6312 * We only return 1, saying don't proceed, if all timers 6313 * are stopped (destroyed PCB?). 6314 */ 6315 static int 6316 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6317 { 6318 if (tp->t_timers->tt_flags & TT_STOPPED) { 6319 return (1); 6320 } 6321 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6322 tp->t_flags &= ~TF_DELACK; 6323 tp->t_flags |= TF_ACKNOW; 6324 KMOD_TCPSTAT_INC(tcps_delack); 6325 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6326 return (0); 6327 } 6328 6329 /* 6330 * Persists timer, here we simply send the 6331 * same thing as a keepalive will. 6332 * the one byte send. 6333 * 6334 * We only return 1, saying don't proceed, if all timers 6335 * are stopped (destroyed PCB?). 6336 */ 6337 static int 6338 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6339 { 6340 struct tcptemp *t_template; 6341 #ifdef INVARIANTS 6342 struct inpcb *inp = tp->t_inpcb; 6343 #endif 6344 int32_t retval = 1; 6345 6346 if (tp->t_timers->tt_flags & TT_STOPPED) { 6347 return (1); 6348 } 6349 if (rack->rc_in_persist == 0) 6350 return (0); 6351 if (ctf_progress_timeout_check(tp, false)) { 6352 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6353 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6354 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6355 return (-ETIMEDOUT); /* tcp_drop() */ 6356 } 6357 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 6358 /* 6359 * Persistence timer into zero window. Force a byte to be output, if 6360 * possible. 6361 */ 6362 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6363 /* 6364 * Hack: if the peer is dead/unreachable, we do not time out if the 6365 * window is closed. After a full backoff, drop the connection if 6366 * the idle time (no responses to probes) reaches the maximum 6367 * backoff that we would use if retransmitting. 6368 */ 6369 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6370 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6371 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6372 KMOD_TCPSTAT_INC(tcps_persistdrop); 6373 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6374 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6375 retval = -ETIMEDOUT; /* tcp_drop() */ 6376 goto out; 6377 } 6378 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6379 tp->snd_una == tp->snd_max) 6380 rack_exit_persist(tp, rack, cts); 6381 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6382 /* 6383 * If the user has closed the socket then drop a persisting 6384 * connection after a much reduced timeout. 6385 */ 6386 if (tp->t_state > TCPS_CLOSE_WAIT && 6387 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6388 KMOD_TCPSTAT_INC(tcps_persistdrop); 6389 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6390 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6391 retval = -ETIMEDOUT; /* tcp_drop() */ 6392 goto out; 6393 } 6394 t_template = tcpip_maketemplate(rack->rc_inp); 6395 if (t_template) { 6396 /* only set it if we were answered */ 6397 if (rack->forced_ack == 0) { 6398 rack->forced_ack = 1; 6399 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6400 } else { 6401 rack->probe_not_answered = 1; 6402 counter_u64_add(rack_persists_loss, 1); 6403 rack->r_ctl.persist_lost_ends++; 6404 } 6405 counter_u64_add(rack_persists_sends, 1); 6406 tcp_respond(tp, t_template->tt_ipgen, 6407 &t_template->tt_t, (struct mbuf *)NULL, 6408 tp->rcv_nxt, tp->snd_una - 1, 0); 6409 /* This sends an ack */ 6410 if (tp->t_flags & TF_DELACK) 6411 tp->t_flags &= ~TF_DELACK; 6412 free(t_template, M_TEMP); 6413 } 6414 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6415 tp->t_rxtshift++; 6416 out: 6417 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6418 rack_start_hpts_timer(rack, tp, cts, 6419 0, 0, 0); 6420 return (retval); 6421 } 6422 6423 /* 6424 * If a keepalive goes off, we had no other timers 6425 * happening. We always return 1 here since this 6426 * routine either drops the connection or sends 6427 * out a segment with respond. 6428 */ 6429 static int 6430 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6431 { 6432 struct tcptemp *t_template; 6433 struct inpcb *inp; 6434 6435 if (tp->t_timers->tt_flags & TT_STOPPED) { 6436 return (1); 6437 } 6438 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6439 inp = tp->t_inpcb; 6440 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6441 /* 6442 * Keep-alive timer went off; send something or drop connection if 6443 * idle for too long. 6444 */ 6445 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6446 if (tp->t_state < TCPS_ESTABLISHED) 6447 goto dropit; 6448 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6449 tp->t_state <= TCPS_CLOSING) { 6450 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6451 goto dropit; 6452 /* 6453 * Send a packet designed to force a response if the peer is 6454 * up and reachable: either an ACK if the connection is 6455 * still alive, or an RST if the peer has closed the 6456 * connection due to timeout or reboot. Using sequence 6457 * number tp->snd_una-1 causes the transmitted zero-length 6458 * segment to lie outside the receive window; by the 6459 * protocol spec, this requires the correspondent TCP to 6460 * respond. 6461 */ 6462 KMOD_TCPSTAT_INC(tcps_keepprobe); 6463 t_template = tcpip_maketemplate(inp); 6464 if (t_template) { 6465 if (rack->forced_ack == 0) { 6466 rack->forced_ack = 1; 6467 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6468 } else { 6469 rack->probe_not_answered = 1; 6470 } 6471 tcp_respond(tp, t_template->tt_ipgen, 6472 &t_template->tt_t, (struct mbuf *)NULL, 6473 tp->rcv_nxt, tp->snd_una - 1, 0); 6474 free(t_template, M_TEMP); 6475 } 6476 } 6477 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6478 return (1); 6479 dropit: 6480 KMOD_TCPSTAT_INC(tcps_keepdrops); 6481 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6482 return (-ETIMEDOUT); /* tcp_drop() */ 6483 } 6484 6485 /* 6486 * Retransmit helper function, clear up all the ack 6487 * flags and take care of important book keeping. 6488 */ 6489 static void 6490 rack_remxt_tmr(struct tcpcb *tp) 6491 { 6492 /* 6493 * The retransmit timer went off, all sack'd blocks must be 6494 * un-acked. 6495 */ 6496 struct rack_sendmap *rsm, *trsm = NULL; 6497 struct tcp_rack *rack; 6498 6499 rack = (struct tcp_rack *)tp->t_fb_ptr; 6500 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6501 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6502 if (rack->r_state && (rack->r_state != tp->t_state)) 6503 rack_set_state(tp, rack); 6504 /* 6505 * Ideally we would like to be able to 6506 * mark SACK-PASS on anything not acked here. 6507 * 6508 * However, if we do that we would burst out 6509 * all that data 1ms apart. This would be unwise, 6510 * so for now we will just let the normal rxt timer 6511 * and tlp timer take care of it. 6512 * 6513 * Also we really need to stick them back in sequence 6514 * order. This way we send in the proper order and any 6515 * sacks that come floating in will "re-ack" the data. 6516 * To do this we zap the tmap with an INIT and then 6517 * walk through and place every rsm in the RB tree 6518 * back in its seq ordered place. 6519 */ 6520 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6521 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6522 rsm->r_dupack = 0; 6523 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6524 /* We must re-add it back to the tlist */ 6525 if (trsm == NULL) { 6526 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6527 } else { 6528 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6529 } 6530 rsm->r_in_tmap = 1; 6531 trsm = rsm; 6532 if (rsm->r_flags & RACK_ACKED) 6533 rsm->r_flags |= RACK_WAS_ACKED; 6534 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 6535 rsm->r_flags |= RACK_MUST_RXT; 6536 } 6537 /* Clear the count (we just un-acked them) */ 6538 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6539 rack->r_ctl.rc_sacked = 0; 6540 rack->r_ctl.rc_sacklast = NULL; 6541 rack->r_ctl.rc_agg_delayed = 0; 6542 rack->r_early = 0; 6543 rack->r_ctl.rc_agg_early = 0; 6544 rack->r_late = 0; 6545 /* Clear the tlp rtx mark */ 6546 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6547 if (rack->r_ctl.rc_resend != NULL) 6548 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6549 rack->r_ctl.rc_prr_sndcnt = 0; 6550 rack_log_to_prr(rack, 6, 0, __LINE__); 6551 rack->r_timer_override = 1; 6552 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6553 #ifdef NETFLIX_EXP_DETECTION 6554 || (rack->sack_attack_disable != 0) 6555 #endif 6556 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6557 /* 6558 * For non-sack customers new data 6559 * needs to go out as retransmits until 6560 * we retransmit up to snd_max. 6561 */ 6562 rack->r_must_retran = 1; 6563 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6564 rack->r_ctl.rc_sacked); 6565 } 6566 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6567 } 6568 6569 static void 6570 rack_convert_rtts(struct tcpcb *tp) 6571 { 6572 if (tp->t_srtt > 1) { 6573 uint32_t val, frac; 6574 6575 val = tp->t_srtt >> TCP_RTT_SHIFT; 6576 frac = tp->t_srtt & 0x1f; 6577 tp->t_srtt = TICKS_2_USEC(val); 6578 /* 6579 * frac is the fractional part of the srtt (if any) 6580 * but its in ticks and every bit represents 6581 * 1/32nd of a hz. 6582 */ 6583 if (frac) { 6584 if (hz == 1000) { 6585 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6586 } else { 6587 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6588 } 6589 tp->t_srtt += frac; 6590 } 6591 } 6592 if (tp->t_rttvar) { 6593 uint32_t val, frac; 6594 6595 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6596 frac = tp->t_rttvar & 0x1f; 6597 tp->t_rttvar = TICKS_2_USEC(val); 6598 /* 6599 * frac is the fractional part of the srtt (if any) 6600 * but its in ticks and every bit represents 6601 * 1/32nd of a hz. 6602 */ 6603 if (frac) { 6604 if (hz == 1000) { 6605 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6606 } else { 6607 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6608 } 6609 tp->t_rttvar += frac; 6610 } 6611 } 6612 tp->t_rxtcur = RACK_REXMTVAL(tp); 6613 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6614 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6615 } 6616 if (tp->t_rxtcur > rack_rto_max) { 6617 tp->t_rxtcur = rack_rto_max; 6618 } 6619 } 6620 6621 static void 6622 rack_cc_conn_init(struct tcpcb *tp) 6623 { 6624 struct tcp_rack *rack; 6625 uint32_t srtt; 6626 6627 rack = (struct tcp_rack *)tp->t_fb_ptr; 6628 srtt = tp->t_srtt; 6629 cc_conn_init(tp); 6630 /* 6631 * Now convert to rack's internal format, 6632 * if required. 6633 */ 6634 if ((srtt == 0) && (tp->t_srtt != 0)) 6635 rack_convert_rtts(tp); 6636 /* 6637 * We want a chance to stay in slowstart as 6638 * we create a connection. TCP spec says that 6639 * initially ssthresh is infinite. For our 6640 * purposes that is the snd_wnd. 6641 */ 6642 if (tp->snd_ssthresh < tp->snd_wnd) { 6643 tp->snd_ssthresh = tp->snd_wnd; 6644 } 6645 /* 6646 * We also want to assure a IW worth of 6647 * data can get inflight. 6648 */ 6649 if (rc_init_window(rack) < tp->snd_cwnd) 6650 tp->snd_cwnd = rc_init_window(rack); 6651 } 6652 6653 /* 6654 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6655 * we will setup to retransmit the lowest seq number outstanding. 6656 */ 6657 static int 6658 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6659 { 6660 int32_t rexmt; 6661 int32_t retval = 0; 6662 bool isipv6; 6663 6664 if (tp->t_timers->tt_flags & TT_STOPPED) { 6665 return (1); 6666 } 6667 if ((tp->t_flags & TF_GPUTINPROG) && 6668 (tp->t_rxtshift)) { 6669 /* 6670 * We have had a second timeout 6671 * measurements on successive rxt's are not profitable. 6672 * It is unlikely to be of any use (the network is 6673 * broken or the client went away). 6674 */ 6675 tp->t_flags &= ~TF_GPUTINPROG; 6676 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6677 rack->r_ctl.rc_gp_srtt /*flex1*/, 6678 tp->gput_seq, 6679 0, 0, 18, __LINE__, NULL, 0); 6680 } 6681 if (ctf_progress_timeout_check(tp, false)) { 6682 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6683 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6684 return (-ETIMEDOUT); /* tcp_drop() */ 6685 } 6686 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6687 rack->r_ctl.retran_during_recovery = 0; 6688 rack->rc_ack_required = 1; 6689 rack->r_ctl.dsack_byte_cnt = 0; 6690 if (IN_FASTRECOVERY(tp->t_flags)) 6691 tp->t_flags |= TF_WASFRECOVERY; 6692 else 6693 tp->t_flags &= ~TF_WASFRECOVERY; 6694 if (IN_CONGRECOVERY(tp->t_flags)) 6695 tp->t_flags |= TF_WASCRECOVERY; 6696 else 6697 tp->t_flags &= ~TF_WASCRECOVERY; 6698 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6699 (tp->snd_una == tp->snd_max)) { 6700 /* Nothing outstanding .. nothing to do */ 6701 return (0); 6702 } 6703 if (rack->r_ctl.dsack_persist) { 6704 rack->r_ctl.dsack_persist--; 6705 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6706 rack->r_ctl.num_dsack = 0; 6707 } 6708 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6709 } 6710 /* 6711 * Rack can only run one timer at a time, so we cannot 6712 * run a KEEPINIT (gating SYN sending) and a retransmit 6713 * timer for the SYN. So if we are in a front state and 6714 * have a KEEPINIT timer we need to check the first transmit 6715 * against now to see if we have exceeded the KEEPINIT time 6716 * (if one is set). 6717 */ 6718 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6719 (TP_KEEPINIT(tp) != 0)) { 6720 struct rack_sendmap *rsm; 6721 6722 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6723 if (rsm) { 6724 /* Ok we have something outstanding to test keepinit with */ 6725 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6726 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6727 /* We have exceeded the KEEPINIT time */ 6728 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6729 goto drop_it; 6730 } 6731 } 6732 } 6733 /* 6734 * Retransmission timer went off. Message has not been acked within 6735 * retransmit interval. Back off to a longer retransmit interval 6736 * and retransmit one segment. 6737 */ 6738 rack_remxt_tmr(tp); 6739 if ((rack->r_ctl.rc_resend == NULL) || 6740 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6741 /* 6742 * If the rwnd collapsed on 6743 * the one we are retransmitting 6744 * it does not count against the 6745 * rxt count. 6746 */ 6747 tp->t_rxtshift++; 6748 } 6749 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6750 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6751 drop_it: 6752 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6753 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6754 /* XXXGL: previously t_softerror was casted to uint16_t */ 6755 MPASS(tp->t_softerror >= 0); 6756 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6757 goto out; /* tcp_drop() */ 6758 } 6759 if (tp->t_state == TCPS_SYN_SENT) { 6760 /* 6761 * If the SYN was retransmitted, indicate CWND to be limited 6762 * to 1 segment in cc_conn_init(). 6763 */ 6764 tp->snd_cwnd = 1; 6765 } else if (tp->t_rxtshift == 1) { 6766 /* 6767 * first retransmit; record ssthresh and cwnd so they can be 6768 * recovered if this turns out to be a "bad" retransmit. A 6769 * retransmit is considered "bad" if an ACK for this segment 6770 * is received within RTT/2 interval; the assumption here is 6771 * that the ACK was already in flight. See "On Estimating 6772 * End-to-End Network Path Properties" by Allman and Paxson 6773 * for more details. 6774 */ 6775 tp->snd_cwnd_prev = tp->snd_cwnd; 6776 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6777 tp->snd_recover_prev = tp->snd_recover; 6778 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6779 tp->t_flags |= TF_PREVVALID; 6780 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6781 tp->t_flags &= ~TF_PREVVALID; 6782 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6783 if ((tp->t_state == TCPS_SYN_SENT) || 6784 (tp->t_state == TCPS_SYN_RECEIVED)) 6785 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6786 else 6787 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6788 6789 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6790 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6791 /* 6792 * We enter the path for PLMTUD if connection is established or, if 6793 * connection is FIN_WAIT_1 status, reason for the last is that if 6794 * amount of data we send is very small, we could send it in couple 6795 * of packets and process straight to FIN. In that case we won't 6796 * catch ESTABLISHED state. 6797 */ 6798 #ifdef INET6 6799 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 6800 #else 6801 isipv6 = false; 6802 #endif 6803 if (((V_tcp_pmtud_blackhole_detect == 1) || 6804 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6805 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6806 ((tp->t_state == TCPS_ESTABLISHED) || 6807 (tp->t_state == TCPS_FIN_WAIT_1))) { 6808 /* 6809 * Idea here is that at each stage of mtu probe (usually, 6810 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6811 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6812 * should take care of that. 6813 */ 6814 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6815 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6816 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6817 tp->t_rxtshift % 2 == 0)) { 6818 /* 6819 * Enter Path MTU Black-hole Detection mechanism: - 6820 * Disable Path MTU Discovery (IP "DF" bit). - 6821 * Reduce MTU to lower value than what we negotiated 6822 * with peer. 6823 */ 6824 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6825 /* Record that we may have found a black hole. */ 6826 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6827 /* Keep track of previous MSS. */ 6828 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6829 } 6830 6831 /* 6832 * Reduce the MSS to blackhole value or to the 6833 * default in an attempt to retransmit. 6834 */ 6835 #ifdef INET6 6836 if (isipv6 && 6837 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6838 /* Use the sysctl tuneable blackhole MSS. */ 6839 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6840 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6841 } else if (isipv6) { 6842 /* Use the default MSS. */ 6843 tp->t_maxseg = V_tcp_v6mssdflt; 6844 /* 6845 * Disable Path MTU Discovery when we switch 6846 * to minmss. 6847 */ 6848 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6849 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6850 } 6851 #endif 6852 #if defined(INET6) && defined(INET) 6853 else 6854 #endif 6855 #ifdef INET 6856 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6857 /* Use the sysctl tuneable blackhole MSS. */ 6858 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6859 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6860 } else { 6861 /* Use the default MSS. */ 6862 tp->t_maxseg = V_tcp_mssdflt; 6863 /* 6864 * Disable Path MTU Discovery when we switch 6865 * to minmss. 6866 */ 6867 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6868 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6869 } 6870 #endif 6871 } else { 6872 /* 6873 * If further retransmissions are still unsuccessful 6874 * with a lowered MTU, maybe this isn't a blackhole 6875 * and we restore the previous MSS and blackhole 6876 * detection flags. The limit '6' is determined by 6877 * giving each probe stage (1448, 1188, 524) 2 6878 * chances to recover. 6879 */ 6880 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6881 (tp->t_rxtshift >= 6)) { 6882 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6883 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6884 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6885 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6886 } 6887 } 6888 } 6889 /* 6890 * Disable RFC1323 and SACK if we haven't got any response to 6891 * our third SYN to work-around some broken terminal servers 6892 * (most of which have hopefully been retired) that have bad VJ 6893 * header compression code which trashes TCP segments containing 6894 * unknown-to-them TCP options. 6895 */ 6896 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6897 (tp->t_rxtshift == 3)) 6898 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6899 /* 6900 * If we backed off this far, our srtt estimate is probably bogus. 6901 * Clobber it so we'll take the next rtt measurement as our srtt; 6902 * move the current srtt into rttvar to keep the current retransmit 6903 * times until then. 6904 */ 6905 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6906 #ifdef INET6 6907 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 6908 in6_losing(tp->t_inpcb); 6909 else 6910 #endif 6911 in_losing(tp->t_inpcb); 6912 tp->t_rttvar += tp->t_srtt; 6913 tp->t_srtt = 0; 6914 } 6915 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6916 tp->snd_recover = tp->snd_max; 6917 tp->t_flags |= TF_ACKNOW; 6918 tp->t_rtttime = 0; 6919 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 6920 out: 6921 return (retval); 6922 } 6923 6924 static int 6925 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6926 { 6927 int32_t ret = 0; 6928 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6929 6930 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6931 (tp->t_flags & TF_GPUTINPROG)) { 6932 /* 6933 * We have a goodput in progress 6934 * and we have entered a late state. 6935 * Do we have enough data in the sb 6936 * to handle the GPUT request? 6937 */ 6938 uint32_t bytes; 6939 6940 bytes = tp->gput_ack - tp->gput_seq; 6941 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6942 bytes += tp->gput_seq - tp->snd_una; 6943 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 6944 /* 6945 * There are not enough bytes in the socket 6946 * buffer that have been sent to cover this 6947 * measurement. Cancel it. 6948 */ 6949 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6950 rack->r_ctl.rc_gp_srtt /*flex1*/, 6951 tp->gput_seq, 6952 0, 0, 18, __LINE__, NULL, 0); 6953 tp->t_flags &= ~TF_GPUTINPROG; 6954 } 6955 } 6956 if (timers == 0) { 6957 return (0); 6958 } 6959 if (tp->t_state == TCPS_LISTEN) { 6960 /* no timers on listen sockets */ 6961 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6962 return (0); 6963 return (1); 6964 } 6965 if ((timers & PACE_TMR_RACK) && 6966 rack->rc_on_min_to) { 6967 /* 6968 * For the rack timer when we 6969 * are on a min-timeout (which means rrr_conf = 3) 6970 * we don't want to check the timer. It may 6971 * be going off for a pace and thats ok we 6972 * want to send the retransmit (if its ready). 6973 * 6974 * If its on a normal rack timer (non-min) then 6975 * we will check if its expired. 6976 */ 6977 goto skip_time_check; 6978 } 6979 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6980 uint32_t left; 6981 6982 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6983 ret = -1; 6984 rack_log_to_processing(rack, cts, ret, 0); 6985 return (0); 6986 } 6987 if (hpts_calling == 0) { 6988 /* 6989 * A user send or queued mbuf (sack) has called us? We 6990 * return 0 and let the pacing guards 6991 * deal with it if they should or 6992 * should not cause a send. 6993 */ 6994 ret = -2; 6995 rack_log_to_processing(rack, cts, ret, 0); 6996 return (0); 6997 } 6998 /* 6999 * Ok our timer went off early and we are not paced false 7000 * alarm, go back to sleep. 7001 */ 7002 ret = -3; 7003 left = rack->r_ctl.rc_timer_exp - cts; 7004 tcp_hpts_insert(tp->t_inpcb, HPTS_MS_TO_SLOTS(left)); 7005 rack_log_to_processing(rack, cts, ret, left); 7006 return (1); 7007 } 7008 skip_time_check: 7009 rack->rc_tmr_stopped = 0; 7010 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 7011 if (timers & PACE_TMR_DELACK) { 7012 ret = rack_timeout_delack(tp, rack, cts); 7013 } else if (timers & PACE_TMR_RACK) { 7014 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7015 rack->r_fast_output = 0; 7016 ret = rack_timeout_rack(tp, rack, cts); 7017 } else if (timers & PACE_TMR_TLP) { 7018 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7019 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 7020 } else if (timers & PACE_TMR_RXT) { 7021 rack->r_ctl.rc_tlp_rxt_last_time = cts; 7022 rack->r_fast_output = 0; 7023 ret = rack_timeout_rxt(tp, rack, cts); 7024 } else if (timers & PACE_TMR_PERSIT) { 7025 ret = rack_timeout_persist(tp, rack, cts); 7026 } else if (timers & PACE_TMR_KEEP) { 7027 ret = rack_timeout_keepalive(tp, rack, cts); 7028 } 7029 rack_log_to_processing(rack, cts, ret, timers); 7030 return (ret); 7031 } 7032 7033 static void 7034 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 7035 { 7036 struct timeval tv; 7037 uint32_t us_cts, flags_on_entry; 7038 uint8_t hpts_removed = 0; 7039 7040 flags_on_entry = rack->r_ctl.rc_hpts_flags; 7041 us_cts = tcp_get_usecs(&tv); 7042 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 7043 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 7044 ((tp->snd_max - tp->snd_una) == 0))) { 7045 tcp_hpts_remove(rack->rc_inp); 7046 hpts_removed = 1; 7047 /* If we were not delayed cancel out the flag. */ 7048 if ((tp->snd_max - tp->snd_una) == 0) 7049 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 7050 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7051 } 7052 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 7053 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 7054 if (tcp_in_hpts(rack->rc_inp) && 7055 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 7056 /* 7057 * Canceling timer's when we have no output being 7058 * paced. We also must remove ourselves from the 7059 * hpts. 7060 */ 7061 tcp_hpts_remove(rack->rc_inp); 7062 hpts_removed = 1; 7063 } 7064 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7065 } 7066 if (hpts_removed == 0) 7067 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7068 } 7069 7070 static void 7071 rack_timer_stop(struct tcpcb *tp, uint32_t timer_type) 7072 { 7073 return; 7074 } 7075 7076 static int 7077 rack_stopall(struct tcpcb *tp) 7078 { 7079 struct tcp_rack *rack; 7080 rack = (struct tcp_rack *)tp->t_fb_ptr; 7081 rack->t_timers_stopped = 1; 7082 return (0); 7083 } 7084 7085 static void 7086 rack_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 7087 { 7088 return; 7089 } 7090 7091 static int 7092 rack_timer_active(struct tcpcb *tp, uint32_t timer_type) 7093 { 7094 return (0); 7095 } 7096 7097 static void 7098 rack_stop_all_timers(struct tcpcb *tp) 7099 { 7100 struct tcp_rack *rack; 7101 7102 /* 7103 * Assure no timers are running. 7104 */ 7105 if (tcp_timer_active(tp, TT_PERSIST)) { 7106 /* We enter in persists, set the flag appropriately */ 7107 rack = (struct tcp_rack *)tp->t_fb_ptr; 7108 rack->rc_in_persist = 1; 7109 } 7110 tcp_timer_suspend(tp, TT_PERSIST); 7111 tcp_timer_suspend(tp, TT_REXMT); 7112 tcp_timer_suspend(tp, TT_KEEP); 7113 tcp_timer_suspend(tp, TT_DELACK); 7114 } 7115 7116 static void 7117 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7118 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7119 { 7120 int32_t idx; 7121 7122 rsm->r_rtr_cnt++; 7123 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7124 rsm->r_dupack = 0; 7125 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7126 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7127 rsm->r_flags |= RACK_OVERMAX; 7128 } 7129 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7130 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7131 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7132 } 7133 idx = rsm->r_rtr_cnt - 1; 7134 rsm->r_tim_lastsent[idx] = ts; 7135 /* 7136 * Here we don't add in the len of send, since its already 7137 * in snduna <->snd_max. 7138 */ 7139 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7140 rack->r_ctl.rc_sacked); 7141 if (rsm->r_flags & RACK_ACKED) { 7142 /* Problably MTU discovery messing with us */ 7143 rsm->r_flags &= ~RACK_ACKED; 7144 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7145 } 7146 if (rsm->r_in_tmap) { 7147 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7148 rsm->r_in_tmap = 0; 7149 } 7150 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7151 rsm->r_in_tmap = 1; 7152 /* Take off the must retransmit flag, if its on */ 7153 if (rsm->r_flags & RACK_MUST_RXT) { 7154 if (rack->r_must_retran) 7155 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 7156 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 7157 /* 7158 * We have retransmitted all we need. Clear 7159 * any must retransmit flags. 7160 */ 7161 rack->r_must_retran = 0; 7162 rack->r_ctl.rc_out_at_rto = 0; 7163 } 7164 rsm->r_flags &= ~RACK_MUST_RXT; 7165 } 7166 if (rsm->r_flags & RACK_SACK_PASSED) { 7167 /* We have retransmitted due to the SACK pass */ 7168 rsm->r_flags &= ~RACK_SACK_PASSED; 7169 rsm->r_flags |= RACK_WAS_SACKPASS; 7170 } 7171 } 7172 7173 static uint32_t 7174 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7175 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7176 { 7177 /* 7178 * We (re-)transmitted starting at rsm->r_start for some length 7179 * (possibly less than r_end. 7180 */ 7181 struct rack_sendmap *nrsm; 7182 #ifdef INVARIANTS 7183 struct rack_sendmap *insret; 7184 #endif 7185 uint32_t c_end; 7186 int32_t len; 7187 7188 len = *lenp; 7189 c_end = rsm->r_start + len; 7190 if (SEQ_GEQ(c_end, rsm->r_end)) { 7191 /* 7192 * We retransmitted the whole piece or more than the whole 7193 * slopping into the next rsm. 7194 */ 7195 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7196 if (c_end == rsm->r_end) { 7197 *lenp = 0; 7198 return (0); 7199 } else { 7200 int32_t act_len; 7201 7202 /* Hangs over the end return whats left */ 7203 act_len = rsm->r_end - rsm->r_start; 7204 *lenp = (len - act_len); 7205 return (rsm->r_end); 7206 } 7207 /* We don't get out of this block. */ 7208 } 7209 /* 7210 * Here we retransmitted less than the whole thing which means we 7211 * have to split this into what was transmitted and what was not. 7212 */ 7213 nrsm = rack_alloc_full_limit(rack); 7214 if (nrsm == NULL) { 7215 /* 7216 * We can't get memory, so lets not proceed. 7217 */ 7218 *lenp = 0; 7219 return (0); 7220 } 7221 /* 7222 * So here we are going to take the original rsm and make it what we 7223 * retransmitted. nrsm will be the tail portion we did not 7224 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7225 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7226 * 1, 6 and the new piece will be 6, 11. 7227 */ 7228 rack_clone_rsm(rack, nrsm, rsm, c_end); 7229 nrsm->r_dupack = 0; 7230 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7231 #ifndef INVARIANTS 7232 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7233 #else 7234 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7235 if (insret != NULL) { 7236 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7237 nrsm, insret, rack, rsm); 7238 } 7239 #endif 7240 if (rsm->r_in_tmap) { 7241 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7242 nrsm->r_in_tmap = 1; 7243 } 7244 rsm->r_flags &= (~RACK_HAS_FIN); 7245 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7246 /* Log a split of rsm into rsm and nrsm */ 7247 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7248 *lenp = 0; 7249 return (0); 7250 } 7251 7252 static void 7253 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7254 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7255 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7256 { 7257 struct tcp_rack *rack; 7258 struct rack_sendmap *rsm, *nrsm, fe; 7259 #ifdef INVARIANTS 7260 struct rack_sendmap *insret; 7261 #endif 7262 register uint32_t snd_max, snd_una; 7263 7264 /* 7265 * Add to the RACK log of packets in flight or retransmitted. If 7266 * there is a TS option we will use the TS echoed, if not we will 7267 * grab a TS. 7268 * 7269 * Retransmissions will increment the count and move the ts to its 7270 * proper place. Note that if options do not include TS's then we 7271 * won't be able to effectively use the ACK for an RTT on a retran. 7272 * 7273 * Notes about r_start and r_end. Lets consider a send starting at 7274 * sequence 1 for 10 bytes. In such an example the r_start would be 7275 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7276 * This means that r_end is actually the first sequence for the next 7277 * slot (11). 7278 * 7279 */ 7280 /* 7281 * If err is set what do we do XXXrrs? should we not add the thing? 7282 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7283 * i.e. proceed with add ** do this for now. 7284 */ 7285 INP_WLOCK_ASSERT(tp->t_inpcb); 7286 if (err) 7287 /* 7288 * We don't log errors -- we could but snd_max does not 7289 * advance in this case either. 7290 */ 7291 return; 7292 7293 if (th_flags & TH_RST) { 7294 /* 7295 * We don't log resets and we return immediately from 7296 * sending 7297 */ 7298 return; 7299 } 7300 rack = (struct tcp_rack *)tp->t_fb_ptr; 7301 snd_una = tp->snd_una; 7302 snd_max = tp->snd_max; 7303 if (th_flags & (TH_SYN | TH_FIN)) { 7304 /* 7305 * The call to rack_log_output is made before bumping 7306 * snd_max. This means we can record one extra byte on a SYN 7307 * or FIN if seq_out is adding more on and a FIN is present 7308 * (and we are not resending). 7309 */ 7310 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7311 len++; 7312 if (th_flags & TH_FIN) 7313 len++; 7314 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7315 /* 7316 * The add/update as not been done for the FIN/SYN 7317 * yet. 7318 */ 7319 snd_max = tp->snd_nxt; 7320 } 7321 } 7322 if (SEQ_LEQ((seq_out + len), snd_una)) { 7323 /* Are sending an old segment to induce an ack (keep-alive)? */ 7324 return; 7325 } 7326 if (SEQ_LT(seq_out, snd_una)) { 7327 /* huh? should we panic? */ 7328 uint32_t end; 7329 7330 end = seq_out + len; 7331 seq_out = snd_una; 7332 if (SEQ_GEQ(end, seq_out)) 7333 len = end - seq_out; 7334 else 7335 len = 0; 7336 } 7337 if (len == 0) { 7338 /* We don't log zero window probes */ 7339 return; 7340 } 7341 if (IN_FASTRECOVERY(tp->t_flags)) { 7342 rack->r_ctl.rc_prr_out += len; 7343 } 7344 /* First question is it a retransmission or new? */ 7345 if (seq_out == snd_max) { 7346 /* Its new */ 7347 again: 7348 rsm = rack_alloc(rack); 7349 if (rsm == NULL) { 7350 /* 7351 * Hmm out of memory and the tcb got destroyed while 7352 * we tried to wait. 7353 */ 7354 return; 7355 } 7356 if (th_flags & TH_FIN) { 7357 rsm->r_flags = RACK_HAS_FIN|add_flag; 7358 } else { 7359 rsm->r_flags = add_flag; 7360 } 7361 if (hw_tls) 7362 rsm->r_hw_tls = 1; 7363 rsm->r_tim_lastsent[0] = cts; 7364 rsm->r_rtr_cnt = 1; 7365 rsm->r_rtr_bytes = 0; 7366 if (th_flags & TH_SYN) { 7367 /* The data space is one beyond snd_una */ 7368 rsm->r_flags |= RACK_HAS_SYN; 7369 } 7370 rsm->r_start = seq_out; 7371 rsm->r_end = rsm->r_start + len; 7372 rsm->r_dupack = 0; 7373 /* 7374 * save off the mbuf location that 7375 * sndmbuf_noadv returned (which is 7376 * where we started copying from).. 7377 */ 7378 rsm->m = s_mb; 7379 rsm->soff = s_moff; 7380 /* 7381 * Here we do add in the len of send, since its not yet 7382 * reflected in in snduna <->snd_max 7383 */ 7384 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7385 rack->r_ctl.rc_sacked) + 7386 (rsm->r_end - rsm->r_start)); 7387 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7388 if (rsm->m) { 7389 if (rsm->m->m_len <= rsm->soff) { 7390 /* 7391 * XXXrrs Question, will this happen? 7392 * 7393 * If sbsndptr is set at the correct place 7394 * then s_moff should always be somewhere 7395 * within rsm->m. But if the sbsndptr was 7396 * off then that won't be true. If it occurs 7397 * we need to walkout to the correct location. 7398 */ 7399 struct mbuf *lm; 7400 7401 lm = rsm->m; 7402 while (lm->m_len <= rsm->soff) { 7403 rsm->soff -= lm->m_len; 7404 lm = lm->m_next; 7405 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7406 __func__, rack, s_moff, s_mb, rsm->soff)); 7407 } 7408 rsm->m = lm; 7409 } 7410 rsm->orig_m_len = rsm->m->m_len; 7411 } else 7412 rsm->orig_m_len = 0; 7413 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7414 /* Log a new rsm */ 7415 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7416 #ifndef INVARIANTS 7417 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7418 #else 7419 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7420 if (insret != NULL) { 7421 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7422 nrsm, insret, rack, rsm); 7423 } 7424 #endif 7425 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7426 rsm->r_in_tmap = 1; 7427 /* 7428 * Special case detection, is there just a single 7429 * packet outstanding when we are not in recovery? 7430 * 7431 * If this is true mark it so. 7432 */ 7433 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7434 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7435 struct rack_sendmap *prsm; 7436 7437 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7438 if (prsm) 7439 prsm->r_one_out_nr = 1; 7440 } 7441 return; 7442 } 7443 /* 7444 * If we reach here its a retransmission and we need to find it. 7445 */ 7446 memset(&fe, 0, sizeof(fe)); 7447 more: 7448 if (hintrsm && (hintrsm->r_start == seq_out)) { 7449 rsm = hintrsm; 7450 hintrsm = NULL; 7451 } else { 7452 /* No hints sorry */ 7453 rsm = NULL; 7454 } 7455 if ((rsm) && (rsm->r_start == seq_out)) { 7456 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7457 if (len == 0) { 7458 return; 7459 } else { 7460 goto more; 7461 } 7462 } 7463 /* Ok it was not the last pointer go through it the hard way. */ 7464 refind: 7465 fe.r_start = seq_out; 7466 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7467 if (rsm) { 7468 if (rsm->r_start == seq_out) { 7469 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7470 if (len == 0) { 7471 return; 7472 } else { 7473 goto refind; 7474 } 7475 } 7476 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7477 /* Transmitted within this piece */ 7478 /* 7479 * Ok we must split off the front and then let the 7480 * update do the rest 7481 */ 7482 nrsm = rack_alloc_full_limit(rack); 7483 if (nrsm == NULL) { 7484 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7485 return; 7486 } 7487 /* 7488 * copy rsm to nrsm and then trim the front of rsm 7489 * to not include this part. 7490 */ 7491 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7492 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7493 #ifndef INVARIANTS 7494 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7495 #else 7496 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7497 if (insret != NULL) { 7498 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7499 nrsm, insret, rack, rsm); 7500 } 7501 #endif 7502 if (rsm->r_in_tmap) { 7503 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7504 nrsm->r_in_tmap = 1; 7505 } 7506 rsm->r_flags &= (~RACK_HAS_FIN); 7507 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7508 if (len == 0) { 7509 return; 7510 } else if (len > 0) 7511 goto refind; 7512 } 7513 } 7514 /* 7515 * Hmm not found in map did they retransmit both old and on into the 7516 * new? 7517 */ 7518 if (seq_out == tp->snd_max) { 7519 goto again; 7520 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7521 #ifdef INVARIANTS 7522 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7523 seq_out, len, tp->snd_una, tp->snd_max); 7524 printf("Starting Dump of all rack entries\n"); 7525 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7526 printf("rsm:%p start:%u end:%u\n", 7527 rsm, rsm->r_start, rsm->r_end); 7528 } 7529 printf("Dump complete\n"); 7530 panic("seq_out not found rack:%p tp:%p", 7531 rack, tp); 7532 #endif 7533 } else { 7534 #ifdef INVARIANTS 7535 /* 7536 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7537 * flag) 7538 */ 7539 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7540 seq_out, len, tp->snd_max, tp); 7541 #endif 7542 } 7543 } 7544 7545 /* 7546 * Record one of the RTT updates from an ack into 7547 * our sample structure. 7548 */ 7549 7550 static void 7551 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7552 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7553 { 7554 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7555 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7556 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7557 } 7558 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7559 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7560 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7561 } 7562 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7563 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7564 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7565 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7566 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7567 } 7568 if ((confidence == 1) && 7569 ((rsm == NULL) || 7570 (rsm->r_just_ret) || 7571 (rsm->r_one_out_nr && 7572 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7573 /* 7574 * If the rsm had a just return 7575 * hit it then we can't trust the 7576 * rtt measurement for buffer deterimination 7577 * Note that a confidence of 2, indicates 7578 * SACK'd which overrides the r_just_ret or 7579 * the r_one_out_nr. If it was a CUM-ACK and 7580 * we had only two outstanding, but get an 7581 * ack for only 1. Then that also lowers our 7582 * confidence. 7583 */ 7584 confidence = 0; 7585 } 7586 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7587 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7588 if (rack->r_ctl.rack_rs.confidence == 0) { 7589 /* 7590 * We take anything with no current confidence 7591 * saved. 7592 */ 7593 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7594 rack->r_ctl.rack_rs.confidence = confidence; 7595 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7596 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7597 /* 7598 * Once we have a confident number, 7599 * we can update it with a smaller 7600 * value since this confident number 7601 * may include the DSACK time until 7602 * the next segment (the second one) arrived. 7603 */ 7604 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7605 rack->r_ctl.rack_rs.confidence = confidence; 7606 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7607 } 7608 } 7609 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7610 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7611 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7612 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7613 } 7614 7615 /* 7616 * Collect new round-trip time estimate 7617 * and update averages and current timeout. 7618 */ 7619 static void 7620 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7621 { 7622 int32_t delta; 7623 int32_t rtt; 7624 7625 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7626 /* No valid sample */ 7627 return; 7628 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7629 /* We are to use the lowest RTT seen in a single ack */ 7630 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7631 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7632 /* We are to use the highest RTT seen in a single ack */ 7633 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7634 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7635 /* We are to use the average RTT seen in a single ack */ 7636 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7637 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7638 } else { 7639 #ifdef INVARIANTS 7640 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7641 #endif 7642 return; 7643 } 7644 if (rtt == 0) 7645 rtt = 1; 7646 if (rack->rc_gp_rtt_set == 0) { 7647 /* 7648 * With no RTT we have to accept 7649 * even one we are not confident of. 7650 */ 7651 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7652 rack->rc_gp_rtt_set = 1; 7653 } else if (rack->r_ctl.rack_rs.confidence) { 7654 /* update the running gp srtt */ 7655 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7656 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7657 } 7658 if (rack->r_ctl.rack_rs.confidence) { 7659 /* 7660 * record the low and high for highly buffered path computation, 7661 * we only do this if we are confident (not a retransmission). 7662 */ 7663 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7664 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7665 } 7666 if (rack->rc_highly_buffered == 0) { 7667 /* 7668 * Currently once we declare a path has 7669 * highly buffered there is no going 7670 * back, which may be a problem... 7671 */ 7672 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7673 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7674 rack->r_ctl.rc_highest_us_rtt, 7675 rack->r_ctl.rc_lowest_us_rtt, 7676 RACK_RTTS_SEEHBP); 7677 rack->rc_highly_buffered = 1; 7678 } 7679 } 7680 } 7681 if ((rack->r_ctl.rack_rs.confidence) || 7682 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7683 /* 7684 * If we are highly confident of it <or> it was 7685 * never retransmitted we accept it as the last us_rtt. 7686 */ 7687 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7688 /* The lowest rtt can be set if its was not retransmited */ 7689 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7690 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7691 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7692 rack->r_ctl.rc_lowest_us_rtt = 1; 7693 } 7694 } 7695 rack = (struct tcp_rack *)tp->t_fb_ptr; 7696 if (tp->t_srtt != 0) { 7697 /* 7698 * We keep a simple srtt in microseconds, like our rtt 7699 * measurement. We don't need to do any tricks with shifting 7700 * etc. Instead we just add in 1/8th of the new measurement 7701 * and subtract out 1/8 of the old srtt. We do the same with 7702 * the variance after finding the absolute value of the 7703 * difference between this sample and the current srtt. 7704 */ 7705 delta = tp->t_srtt - rtt; 7706 /* Take off 1/8th of the current sRTT */ 7707 tp->t_srtt -= (tp->t_srtt >> 3); 7708 /* Add in 1/8th of the new RTT just measured */ 7709 tp->t_srtt += (rtt >> 3); 7710 if (tp->t_srtt <= 0) 7711 tp->t_srtt = 1; 7712 /* Now lets make the absolute value of the variance */ 7713 if (delta < 0) 7714 delta = -delta; 7715 /* Subtract out 1/8th */ 7716 tp->t_rttvar -= (tp->t_rttvar >> 3); 7717 /* Add in 1/8th of the new variance we just saw */ 7718 tp->t_rttvar += (delta >> 3); 7719 if (tp->t_rttvar <= 0) 7720 tp->t_rttvar = 1; 7721 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 7722 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7723 } else { 7724 /* 7725 * No rtt measurement yet - use the unsmoothed rtt. Set the 7726 * variance to half the rtt (so our first retransmit happens 7727 * at 3*rtt). 7728 */ 7729 tp->t_srtt = rtt; 7730 tp->t_rttvar = rtt >> 1; 7731 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 7732 } 7733 rack->rc_srtt_measure_made = 1; 7734 KMOD_TCPSTAT_INC(tcps_rttupdated); 7735 tp->t_rttupdated++; 7736 #ifdef STATS 7737 if (rack_stats_gets_ms_rtt == 0) { 7738 /* Send in the microsecond rtt used for rxt timeout purposes */ 7739 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7740 } else if (rack_stats_gets_ms_rtt == 1) { 7741 /* Send in the millisecond rtt used for rxt timeout purposes */ 7742 int32_t ms_rtt; 7743 7744 /* Round up */ 7745 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7746 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7747 } else if (rack_stats_gets_ms_rtt == 2) { 7748 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7749 int32_t ms_rtt; 7750 7751 /* Round up */ 7752 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7753 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7754 } else { 7755 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7756 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7757 } 7758 7759 #endif 7760 /* 7761 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7762 * way we do the smoothing, srtt and rttvar will each average +1/2 7763 * tick of bias. When we compute the retransmit timer, we want 1/2 7764 * tick of rounding and 1 extra tick because of +-1/2 tick 7765 * uncertainty in the firing of the timer. The bias will give us 7766 * exactly the 1.5 tick we need. But, because the bias is 7767 * statistical, we have to test that we don't drop below the minimum 7768 * feasible timer (which is 2 ticks). 7769 */ 7770 tp->t_rxtshift = 0; 7771 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7772 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7773 rack_log_rtt_sample(rack, rtt); 7774 tp->t_softerror = 0; 7775 } 7776 7777 7778 static void 7779 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7780 { 7781 /* 7782 * Apply to filter the inbound us-rtt at us_cts. 7783 */ 7784 uint32_t old_rtt; 7785 7786 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7787 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7788 us_rtt, us_cts); 7789 if (old_rtt > us_rtt) { 7790 /* We just hit a new lower rtt time */ 7791 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7792 __LINE__, RACK_RTTS_NEWRTT); 7793 /* 7794 * Only count it if its lower than what we saw within our 7795 * calculated range. 7796 */ 7797 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7798 if (rack_probertt_lower_within && 7799 rack->rc_gp_dyn_mul && 7800 (rack->use_fixed_rate == 0) && 7801 (rack->rc_always_pace)) { 7802 /* 7803 * We are seeing a new lower rtt very close 7804 * to the time that we would have entered probe-rtt. 7805 * This is probably due to the fact that a peer flow 7806 * has entered probe-rtt. Lets go in now too. 7807 */ 7808 uint32_t val; 7809 7810 val = rack_probertt_lower_within * rack_time_between_probertt; 7811 val /= 100; 7812 if ((rack->in_probe_rtt == 0) && 7813 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7814 rack_enter_probertt(rack, us_cts); 7815 } 7816 } 7817 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7818 } 7819 } 7820 } 7821 7822 static int 7823 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7824 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7825 { 7826 uint32_t us_rtt; 7827 int32_t i, all; 7828 uint32_t t, len_acked; 7829 7830 if ((rsm->r_flags & RACK_ACKED) || 7831 (rsm->r_flags & RACK_WAS_ACKED)) 7832 /* Already done */ 7833 return (0); 7834 if (rsm->r_no_rtt_allowed) { 7835 /* Not allowed */ 7836 return (0); 7837 } 7838 if (ack_type == CUM_ACKED) { 7839 if (SEQ_GT(th_ack, rsm->r_end)) { 7840 len_acked = rsm->r_end - rsm->r_start; 7841 all = 1; 7842 } else { 7843 len_acked = th_ack - rsm->r_start; 7844 all = 0; 7845 } 7846 } else { 7847 len_acked = rsm->r_end - rsm->r_start; 7848 all = 0; 7849 } 7850 if (rsm->r_rtr_cnt == 1) { 7851 7852 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7853 if ((int)t <= 0) 7854 t = 1; 7855 if (!tp->t_rttlow || tp->t_rttlow > t) 7856 tp->t_rttlow = t; 7857 if (!rack->r_ctl.rc_rack_min_rtt || 7858 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7859 rack->r_ctl.rc_rack_min_rtt = t; 7860 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7861 rack->r_ctl.rc_rack_min_rtt = 1; 7862 } 7863 } 7864 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7865 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7866 else 7867 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7868 if (us_rtt == 0) 7869 us_rtt = 1; 7870 if (CC_ALGO(tp)->rttsample != NULL) { 7871 /* Kick the RTT to the CC */ 7872 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7873 } 7874 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7875 if (ack_type == SACKED) { 7876 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7877 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7878 } else { 7879 /* 7880 * We need to setup what our confidence 7881 * is in this ack. 7882 * 7883 * If the rsm was app limited and it is 7884 * less than a mss in length (the end 7885 * of the send) then we have a gap. If we 7886 * were app limited but say we were sending 7887 * multiple MSS's then we are more confident 7888 * int it. 7889 * 7890 * When we are not app-limited then we see if 7891 * the rsm is being included in the current 7892 * measurement, we tell this by the app_limited_needs_set 7893 * flag. 7894 * 7895 * Note that being cwnd blocked is not applimited 7896 * as well as the pacing delay between packets which 7897 * are sending only 1 or 2 MSS's also will show up 7898 * in the RTT. We probably need to examine this algorithm 7899 * a bit more and enhance it to account for the delay 7900 * between rsm's. We could do that by saving off the 7901 * pacing delay of each rsm (in an rsm) and then 7902 * factoring that in somehow though for now I am 7903 * not sure how :) 7904 */ 7905 int calc_conf = 0; 7906 7907 if (rsm->r_flags & RACK_APP_LIMITED) { 7908 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7909 calc_conf = 0; 7910 else 7911 calc_conf = 1; 7912 } else if (rack->app_limited_needs_set == 0) { 7913 calc_conf = 1; 7914 } else { 7915 calc_conf = 0; 7916 } 7917 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7918 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7919 calc_conf, rsm, rsm->r_rtr_cnt); 7920 } 7921 if ((rsm->r_flags & RACK_TLP) && 7922 (!IN_FASTRECOVERY(tp->t_flags))) { 7923 /* Segment was a TLP and our retrans matched */ 7924 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7925 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 7926 } 7927 } 7928 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7929 /* New more recent rack_tmit_time */ 7930 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7931 rack->rc_rack_rtt = t; 7932 } 7933 return (1); 7934 } 7935 /* 7936 * We clear the soft/rxtshift since we got an ack. 7937 * There is no assurance we will call the commit() function 7938 * so we need to clear these to avoid incorrect handling. 7939 */ 7940 tp->t_rxtshift = 0; 7941 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7942 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7943 tp->t_softerror = 0; 7944 if (to && (to->to_flags & TOF_TS) && 7945 (ack_type == CUM_ACKED) && 7946 (to->to_tsecr) && 7947 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7948 /* 7949 * Now which timestamp does it match? In this block the ACK 7950 * must be coming from a previous transmission. 7951 */ 7952 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7953 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7954 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7955 if ((int)t <= 0) 7956 t = 1; 7957 if (CC_ALGO(tp)->rttsample != NULL) { 7958 /* 7959 * Kick the RTT to the CC, here 7960 * we lie a bit in that we know the 7961 * retransmission is correct even though 7962 * we retransmitted. This is because 7963 * we match the timestamps. 7964 */ 7965 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7966 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7967 else 7968 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7969 CC_ALGO(tp)->rttsample(tp->ccv, us_rtt, 1, rsm->r_fas); 7970 } 7971 if ((i + 1) < rsm->r_rtr_cnt) { 7972 /* 7973 * The peer ack'd from our previous 7974 * transmission. We have a spurious 7975 * retransmission and thus we dont 7976 * want to update our rack_rtt. 7977 * 7978 * Hmm should there be a CC revert here? 7979 * 7980 */ 7981 return (0); 7982 } 7983 if (!tp->t_rttlow || tp->t_rttlow > t) 7984 tp->t_rttlow = t; 7985 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7986 rack->r_ctl.rc_rack_min_rtt = t; 7987 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7988 rack->r_ctl.rc_rack_min_rtt = 1; 7989 } 7990 } 7991 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7992 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7993 /* New more recent rack_tmit_time */ 7994 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7995 rack->rc_rack_rtt = t; 7996 } 7997 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7998 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7999 rsm->r_rtr_cnt); 8000 return (1); 8001 } 8002 } 8003 goto ts_not_found; 8004 } else { 8005 /* 8006 * Ok its a SACK block that we retransmitted. or a windows 8007 * machine without timestamps. We can tell nothing from the 8008 * time-stamp since its not there or the time the peer last 8009 * recieved a segment that moved forward its cum-ack point. 8010 */ 8011 ts_not_found: 8012 i = rsm->r_rtr_cnt - 1; 8013 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 8014 if ((int)t <= 0) 8015 t = 1; 8016 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8017 /* 8018 * We retransmitted and the ack came back in less 8019 * than the smallest rtt we have observed. We most 8020 * likely did an improper retransmit as outlined in 8021 * 6.2 Step 2 point 2 in the rack-draft so we 8022 * don't want to update our rack_rtt. We in 8023 * theory (in future) might want to think about reverting our 8024 * cwnd state but we won't for now. 8025 */ 8026 return (0); 8027 } else if (rack->r_ctl.rc_rack_min_rtt) { 8028 /* 8029 * We retransmitted it and the retransmit did the 8030 * job. 8031 */ 8032 if (!rack->r_ctl.rc_rack_min_rtt || 8033 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 8034 rack->r_ctl.rc_rack_min_rtt = t; 8035 if (rack->r_ctl.rc_rack_min_rtt == 0) { 8036 rack->r_ctl.rc_rack_min_rtt = 1; 8037 } 8038 } 8039 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 8040 /* New more recent rack_tmit_time */ 8041 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 8042 rack->rc_rack_rtt = t; 8043 } 8044 return (1); 8045 } 8046 } 8047 return (0); 8048 } 8049 8050 /* 8051 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 8052 */ 8053 static void 8054 rack_log_sack_passed(struct tcpcb *tp, 8055 struct tcp_rack *rack, struct rack_sendmap *rsm) 8056 { 8057 struct rack_sendmap *nrsm; 8058 8059 nrsm = rsm; 8060 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 8061 rack_head, r_tnext) { 8062 if (nrsm == rsm) { 8063 /* Skip orginal segment he is acked */ 8064 continue; 8065 } 8066 if (nrsm->r_flags & RACK_ACKED) { 8067 /* 8068 * Skip ack'd segments, though we 8069 * should not see these, since tmap 8070 * should not have ack'd segments. 8071 */ 8072 continue; 8073 } 8074 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 8075 /* 8076 * If the peer dropped the rwnd on 8077 * these then we don't worry about them. 8078 */ 8079 continue; 8080 } 8081 if (nrsm->r_flags & RACK_SACK_PASSED) { 8082 /* 8083 * We found one that is already marked 8084 * passed, we have been here before and 8085 * so all others below this are marked. 8086 */ 8087 break; 8088 } 8089 nrsm->r_flags |= RACK_SACK_PASSED; 8090 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8091 } 8092 } 8093 8094 static void 8095 rack_need_set_test(struct tcpcb *tp, 8096 struct tcp_rack *rack, 8097 struct rack_sendmap *rsm, 8098 tcp_seq th_ack, 8099 int line, 8100 int use_which) 8101 { 8102 8103 if ((tp->t_flags & TF_GPUTINPROG) && 8104 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8105 /* 8106 * We were app limited, and this ack 8107 * butts up or goes beyond the point where we want 8108 * to start our next measurement. We need 8109 * to record the new gput_ts as here and 8110 * possibly update the start sequence. 8111 */ 8112 uint32_t seq, ts; 8113 8114 if (rsm->r_rtr_cnt > 1) { 8115 /* 8116 * This is a retransmit, can we 8117 * really make any assessment at this 8118 * point? We are not really sure of 8119 * the timestamp, is it this or the 8120 * previous transmission? 8121 * 8122 * Lets wait for something better that 8123 * is not retransmitted. 8124 */ 8125 return; 8126 } 8127 seq = tp->gput_seq; 8128 ts = tp->gput_ts; 8129 rack->app_limited_needs_set = 0; 8130 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8131 /* Do we start at a new end? */ 8132 if ((use_which == RACK_USE_BEG) && 8133 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8134 /* 8135 * When we get an ACK that just eats 8136 * up some of the rsm, we set RACK_USE_BEG 8137 * since whats at r_start (i.e. th_ack) 8138 * is left unacked and thats where the 8139 * measurement not starts. 8140 */ 8141 tp->gput_seq = rsm->r_start; 8142 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8143 } 8144 if ((use_which == RACK_USE_END) && 8145 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8146 /* 8147 * We use the end when the cumack 8148 * is moving forward and completely 8149 * deleting the rsm passed so basically 8150 * r_end holds th_ack. 8151 * 8152 * For SACK's we also want to use the end 8153 * since this piece just got sacked and 8154 * we want to target anything after that 8155 * in our measurement. 8156 */ 8157 tp->gput_seq = rsm->r_end; 8158 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8159 } 8160 if (use_which == RACK_USE_END_OR_THACK) { 8161 /* 8162 * special case for ack moving forward, 8163 * not a sack, we need to move all the 8164 * way up to where this ack cum-ack moves 8165 * to. 8166 */ 8167 if (SEQ_GT(th_ack, rsm->r_end)) 8168 tp->gput_seq = th_ack; 8169 else 8170 tp->gput_seq = rsm->r_end; 8171 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8172 } 8173 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8174 /* 8175 * We moved beyond this guy's range, re-calculate 8176 * the new end point. 8177 */ 8178 if (rack->rc_gp_filled == 0) { 8179 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8180 } else { 8181 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8182 } 8183 } 8184 /* 8185 * We are moving the goal post, we may be able to clear the 8186 * measure_saw_probe_rtt flag. 8187 */ 8188 if ((rack->in_probe_rtt == 0) && 8189 (rack->measure_saw_probe_rtt) && 8190 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8191 rack->measure_saw_probe_rtt = 0; 8192 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8193 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8194 if (rack->rc_gp_filled && 8195 ((tp->gput_ack - tp->gput_seq) < 8196 max(rc_init_window(rack), (MIN_GP_WIN * 8197 ctf_fixed_maxseg(tp))))) { 8198 uint32_t ideal_amount; 8199 8200 ideal_amount = rack_get_measure_window(tp, rack); 8201 if (ideal_amount > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 8202 /* 8203 * There is no sense of continuing this measurement 8204 * because its too small to gain us anything we 8205 * trust. Skip it and that way we can start a new 8206 * measurement quicker. 8207 */ 8208 tp->t_flags &= ~TF_GPUTINPROG; 8209 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8210 0, 0, 0, 6, __LINE__, NULL, 0); 8211 } else { 8212 /* 8213 * Reset the window further out. 8214 */ 8215 tp->gput_ack = tp->gput_seq + ideal_amount; 8216 } 8217 } 8218 } 8219 } 8220 8221 static inline int 8222 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8223 { 8224 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8225 /* Behind our TLP definition or right at */ 8226 return (0); 8227 } 8228 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8229 /* The start is beyond or right at our end of TLP definition */ 8230 return (0); 8231 } 8232 /* It has to be a sub-part of the original TLP recorded */ 8233 return (1); 8234 } 8235 8236 8237 static uint32_t 8238 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8239 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8240 { 8241 uint32_t start, end, changed = 0; 8242 struct rack_sendmap stack_map; 8243 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8244 #ifdef INVARIANTS 8245 struct rack_sendmap *insret; 8246 #endif 8247 int32_t used_ref = 1; 8248 int moved = 0; 8249 8250 start = sack->start; 8251 end = sack->end; 8252 rsm = *prsm; 8253 memset(&fe, 0, sizeof(fe)); 8254 do_rest_ofb: 8255 if ((rsm == NULL) || 8256 (SEQ_LT(end, rsm->r_start)) || 8257 (SEQ_GEQ(start, rsm->r_end)) || 8258 (SEQ_LT(start, rsm->r_start))) { 8259 /* 8260 * We are not in the right spot, 8261 * find the correct spot in the tree. 8262 */ 8263 used_ref = 0; 8264 fe.r_start = start; 8265 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8266 moved++; 8267 } 8268 if (rsm == NULL) { 8269 /* TSNH */ 8270 goto out; 8271 } 8272 /* Ok we have an ACK for some piece of this rsm */ 8273 if (rsm->r_start != start) { 8274 if ((rsm->r_flags & RACK_ACKED) == 0) { 8275 /* 8276 * Before any splitting or hookery is 8277 * done is it a TLP of interest i.e. rxt? 8278 */ 8279 if ((rsm->r_flags & RACK_TLP) && 8280 (rsm->r_rtr_cnt > 1)) { 8281 /* 8282 * We are splitting a rxt TLP, check 8283 * if we need to save off the start/end 8284 */ 8285 if (rack->rc_last_tlp_acked_set && 8286 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8287 /* 8288 * We already turned this on since we are inside 8289 * the previous one was a partially sack now we 8290 * are getting another one (maybe all of it). 8291 * 8292 */ 8293 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8294 /* 8295 * Lets make sure we have all of it though. 8296 */ 8297 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8298 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8299 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8300 rack->r_ctl.last_tlp_acked_end); 8301 } 8302 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8303 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8304 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8305 rack->r_ctl.last_tlp_acked_end); 8306 } 8307 } else { 8308 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8309 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8310 rack->rc_last_tlp_past_cumack = 0; 8311 rack->rc_last_tlp_acked_set = 1; 8312 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8313 } 8314 } 8315 /** 8316 * Need to split this in two pieces the before and after, 8317 * the before remains in the map, the after must be 8318 * added. In other words we have: 8319 * rsm |--------------| 8320 * sackblk |-------> 8321 * rsm will become 8322 * rsm |---| 8323 * and nrsm will be the sacked piece 8324 * nrsm |----------| 8325 * 8326 * But before we start down that path lets 8327 * see if the sack spans over on top of 8328 * the next guy and it is already sacked. 8329 * 8330 */ 8331 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8332 if (next && (next->r_flags & RACK_ACKED) && 8333 SEQ_GEQ(end, next->r_start)) { 8334 /** 8335 * So the next one is already acked, and 8336 * we can thus by hookery use our stack_map 8337 * to reflect the piece being sacked and 8338 * then adjust the two tree entries moving 8339 * the start and ends around. So we start like: 8340 * rsm |------------| (not-acked) 8341 * next |-----------| (acked) 8342 * sackblk |--------> 8343 * We want to end like so: 8344 * rsm |------| (not-acked) 8345 * next |-----------------| (acked) 8346 * nrsm |-----| 8347 * Where nrsm is a temporary stack piece we 8348 * use to update all the gizmos. 8349 */ 8350 /* Copy up our fudge block */ 8351 nrsm = &stack_map; 8352 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8353 /* Now adjust our tree blocks */ 8354 rsm->r_end = start; 8355 next->r_start = start; 8356 /* Now we must adjust back where next->m is */ 8357 rack_setup_offset_for_rsm(rsm, next); 8358 8359 /* We don't need to adjust rsm, it did not change */ 8360 /* Clear out the dup ack count of the remainder */ 8361 rsm->r_dupack = 0; 8362 rsm->r_just_ret = 0; 8363 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8364 /* Now lets make sure our fudge block is right */ 8365 nrsm->r_start = start; 8366 /* Now lets update all the stats and such */ 8367 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8368 if (rack->app_limited_needs_set) 8369 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8370 changed += (nrsm->r_end - nrsm->r_start); 8371 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8372 if (nrsm->r_flags & RACK_SACK_PASSED) { 8373 rack->r_ctl.rc_reorder_ts = cts; 8374 } 8375 /* 8376 * Now we want to go up from rsm (the 8377 * one left un-acked) to the next one 8378 * in the tmap. We do this so when 8379 * we walk backwards we include marking 8380 * sack-passed on rsm (The one passed in 8381 * is skipped since it is generally called 8382 * on something sacked before removing it 8383 * from the tmap). 8384 */ 8385 if (rsm->r_in_tmap) { 8386 nrsm = TAILQ_NEXT(rsm, r_tnext); 8387 /* 8388 * Now that we have the next 8389 * one walk backwards from there. 8390 */ 8391 if (nrsm && nrsm->r_in_tmap) 8392 rack_log_sack_passed(tp, rack, nrsm); 8393 } 8394 /* Now are we done? */ 8395 if (SEQ_LT(end, next->r_end) || 8396 (end == next->r_end)) { 8397 /* Done with block */ 8398 goto out; 8399 } 8400 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8401 counter_u64_add(rack_sack_used_next_merge, 1); 8402 /* Postion for the next block */ 8403 start = next->r_end; 8404 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8405 if (rsm == NULL) 8406 goto out; 8407 } else { 8408 /** 8409 * We can't use any hookery here, so we 8410 * need to split the map. We enter like 8411 * so: 8412 * rsm |--------| 8413 * sackblk |-----> 8414 * We will add the new block nrsm and 8415 * that will be the new portion, and then 8416 * fall through after reseting rsm. So we 8417 * split and look like this: 8418 * rsm |----| 8419 * sackblk |-----> 8420 * nrsm |---| 8421 * We then fall through reseting 8422 * rsm to nrsm, so the next block 8423 * picks it up. 8424 */ 8425 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8426 if (nrsm == NULL) { 8427 /* 8428 * failed XXXrrs what can we do but loose the sack 8429 * info? 8430 */ 8431 goto out; 8432 } 8433 counter_u64_add(rack_sack_splits, 1); 8434 rack_clone_rsm(rack, nrsm, rsm, start); 8435 rsm->r_just_ret = 0; 8436 #ifndef INVARIANTS 8437 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8438 #else 8439 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8440 if (insret != NULL) { 8441 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8442 nrsm, insret, rack, rsm); 8443 } 8444 #endif 8445 if (rsm->r_in_tmap) { 8446 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8447 nrsm->r_in_tmap = 1; 8448 } 8449 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8450 rsm->r_flags &= (~RACK_HAS_FIN); 8451 /* Position us to point to the new nrsm that starts the sack blk */ 8452 rsm = nrsm; 8453 } 8454 } else { 8455 /* Already sacked this piece */ 8456 counter_u64_add(rack_sack_skipped_acked, 1); 8457 moved++; 8458 if (end == rsm->r_end) { 8459 /* Done with block */ 8460 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8461 goto out; 8462 } else if (SEQ_LT(end, rsm->r_end)) { 8463 /* A partial sack to a already sacked block */ 8464 moved++; 8465 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8466 goto out; 8467 } else { 8468 /* 8469 * The end goes beyond this guy 8470 * reposition the start to the 8471 * next block. 8472 */ 8473 start = rsm->r_end; 8474 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8475 if (rsm == NULL) 8476 goto out; 8477 } 8478 } 8479 } 8480 if (SEQ_GEQ(end, rsm->r_end)) { 8481 /** 8482 * The end of this block is either beyond this guy or right 8483 * at this guy. I.e.: 8484 * rsm --- |-----| 8485 * end |-----| 8486 * <or> 8487 * end |---------| 8488 */ 8489 if ((rsm->r_flags & RACK_ACKED) == 0) { 8490 /* 8491 * Is it a TLP of interest? 8492 */ 8493 if ((rsm->r_flags & RACK_TLP) && 8494 (rsm->r_rtr_cnt > 1)) { 8495 /* 8496 * We are splitting a rxt TLP, check 8497 * if we need to save off the start/end 8498 */ 8499 if (rack->rc_last_tlp_acked_set && 8500 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8501 /* 8502 * We already turned this on since we are inside 8503 * the previous one was a partially sack now we 8504 * are getting another one (maybe all of it). 8505 */ 8506 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8507 /* 8508 * Lets make sure we have all of it though. 8509 */ 8510 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8511 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8512 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8513 rack->r_ctl.last_tlp_acked_end); 8514 } 8515 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8516 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8517 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8518 rack->r_ctl.last_tlp_acked_end); 8519 } 8520 } else { 8521 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8522 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8523 rack->rc_last_tlp_past_cumack = 0; 8524 rack->rc_last_tlp_acked_set = 1; 8525 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8526 } 8527 } 8528 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8529 changed += (rsm->r_end - rsm->r_start); 8530 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8531 if (rsm->r_in_tmap) /* should be true */ 8532 rack_log_sack_passed(tp, rack, rsm); 8533 /* Is Reordering occuring? */ 8534 if (rsm->r_flags & RACK_SACK_PASSED) { 8535 rsm->r_flags &= ~RACK_SACK_PASSED; 8536 rack->r_ctl.rc_reorder_ts = cts; 8537 } 8538 if (rack->app_limited_needs_set) 8539 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8540 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8541 rsm->r_flags |= RACK_ACKED; 8542 if (rsm->r_in_tmap) { 8543 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8544 rsm->r_in_tmap = 0; 8545 } 8546 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8547 } else { 8548 counter_u64_add(rack_sack_skipped_acked, 1); 8549 moved++; 8550 } 8551 if (end == rsm->r_end) { 8552 /* This block only - done, setup for next */ 8553 goto out; 8554 } 8555 /* 8556 * There is more not coverend by this rsm move on 8557 * to the next block in the RB tree. 8558 */ 8559 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8560 start = rsm->r_end; 8561 rsm = nrsm; 8562 if (rsm == NULL) 8563 goto out; 8564 goto do_rest_ofb; 8565 } 8566 /** 8567 * The end of this sack block is smaller than 8568 * our rsm i.e.: 8569 * rsm --- |-----| 8570 * end |--| 8571 */ 8572 if ((rsm->r_flags & RACK_ACKED) == 0) { 8573 /* 8574 * Is it a TLP of interest? 8575 */ 8576 if ((rsm->r_flags & RACK_TLP) && 8577 (rsm->r_rtr_cnt > 1)) { 8578 /* 8579 * We are splitting a rxt TLP, check 8580 * if we need to save off the start/end 8581 */ 8582 if (rack->rc_last_tlp_acked_set && 8583 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8584 /* 8585 * We already turned this on since we are inside 8586 * the previous one was a partially sack now we 8587 * are getting another one (maybe all of it). 8588 */ 8589 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8590 /* 8591 * Lets make sure we have all of it though. 8592 */ 8593 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8594 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8595 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8596 rack->r_ctl.last_tlp_acked_end); 8597 } 8598 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8599 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8600 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8601 rack->r_ctl.last_tlp_acked_end); 8602 } 8603 } else { 8604 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8605 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8606 rack->rc_last_tlp_past_cumack = 0; 8607 rack->rc_last_tlp_acked_set = 1; 8608 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8609 } 8610 } 8611 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8612 if (prev && 8613 (prev->r_flags & RACK_ACKED)) { 8614 /** 8615 * Goal, we want the right remainder of rsm to shrink 8616 * in place and span from (rsm->r_start = end) to rsm->r_end. 8617 * We want to expand prev to go all the way 8618 * to prev->r_end <- end. 8619 * so in the tree we have before: 8620 * prev |--------| (acked) 8621 * rsm |-------| (non-acked) 8622 * sackblk |-| 8623 * We churn it so we end up with 8624 * prev |----------| (acked) 8625 * rsm |-----| (non-acked) 8626 * nrsm |-| (temporary) 8627 * 8628 * Note if either prev/rsm is a TLP we don't 8629 * do this. 8630 */ 8631 nrsm = &stack_map; 8632 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8633 prev->r_end = end; 8634 rsm->r_start = end; 8635 /* Now adjust nrsm (stack copy) to be 8636 * the one that is the small 8637 * piece that was "sacked". 8638 */ 8639 nrsm->r_end = end; 8640 rsm->r_dupack = 0; 8641 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8642 /* 8643 * Now that the rsm has had its start moved forward 8644 * lets go ahead and get its new place in the world. 8645 */ 8646 rack_setup_offset_for_rsm(prev, rsm); 8647 /* 8648 * Now nrsm is our new little piece 8649 * that is acked (which was merged 8650 * to prev). Update the rtt and changed 8651 * based on that. Also check for reordering. 8652 */ 8653 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8654 if (rack->app_limited_needs_set) 8655 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8656 changed += (nrsm->r_end - nrsm->r_start); 8657 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8658 if (nrsm->r_flags & RACK_SACK_PASSED) { 8659 rack->r_ctl.rc_reorder_ts = cts; 8660 } 8661 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8662 rsm = prev; 8663 counter_u64_add(rack_sack_used_prev_merge, 1); 8664 } else { 8665 /** 8666 * This is the case where our previous 8667 * block is not acked either, so we must 8668 * split the block in two. 8669 */ 8670 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8671 if (nrsm == NULL) { 8672 /* failed rrs what can we do but loose the sack info? */ 8673 goto out; 8674 } 8675 if ((rsm->r_flags & RACK_TLP) && 8676 (rsm->r_rtr_cnt > 1)) { 8677 /* 8678 * We are splitting a rxt TLP, check 8679 * if we need to save off the start/end 8680 */ 8681 if (rack->rc_last_tlp_acked_set && 8682 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8683 /* 8684 * We already turned this on since this block is inside 8685 * the previous one was a partially sack now we 8686 * are getting another one (maybe all of it). 8687 */ 8688 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8689 /* 8690 * Lets make sure we have all of it though. 8691 */ 8692 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8693 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8694 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8695 rack->r_ctl.last_tlp_acked_end); 8696 } 8697 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8698 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8699 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8700 rack->r_ctl.last_tlp_acked_end); 8701 } 8702 } else { 8703 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8704 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8705 rack->rc_last_tlp_acked_set = 1; 8706 rack->rc_last_tlp_past_cumack = 0; 8707 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8708 } 8709 } 8710 /** 8711 * In this case nrsm becomes 8712 * nrsm->r_start = end; 8713 * nrsm->r_end = rsm->r_end; 8714 * which is un-acked. 8715 * <and> 8716 * rsm->r_end = nrsm->r_start; 8717 * i.e. the remaining un-acked 8718 * piece is left on the left 8719 * hand side. 8720 * 8721 * So we start like this 8722 * rsm |----------| (not acked) 8723 * sackblk |---| 8724 * build it so we have 8725 * rsm |---| (acked) 8726 * nrsm |------| (not acked) 8727 */ 8728 counter_u64_add(rack_sack_splits, 1); 8729 rack_clone_rsm(rack, nrsm, rsm, end); 8730 rsm->r_flags &= (~RACK_HAS_FIN); 8731 rsm->r_just_ret = 0; 8732 #ifndef INVARIANTS 8733 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8734 #else 8735 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8736 if (insret != NULL) { 8737 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8738 nrsm, insret, rack, rsm); 8739 } 8740 #endif 8741 if (rsm->r_in_tmap) { 8742 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8743 nrsm->r_in_tmap = 1; 8744 } 8745 nrsm->r_dupack = 0; 8746 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8747 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8748 changed += (rsm->r_end - rsm->r_start); 8749 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8750 if (rsm->r_in_tmap) /* should be true */ 8751 rack_log_sack_passed(tp, rack, rsm); 8752 /* Is Reordering occuring? */ 8753 if (rsm->r_flags & RACK_SACK_PASSED) { 8754 rsm->r_flags &= ~RACK_SACK_PASSED; 8755 rack->r_ctl.rc_reorder_ts = cts; 8756 } 8757 if (rack->app_limited_needs_set) 8758 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8759 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8760 rsm->r_flags |= RACK_ACKED; 8761 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8762 if (rsm->r_in_tmap) { 8763 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8764 rsm->r_in_tmap = 0; 8765 } 8766 } 8767 } else if (start != end){ 8768 /* 8769 * The block was already acked. 8770 */ 8771 counter_u64_add(rack_sack_skipped_acked, 1); 8772 moved++; 8773 } 8774 out: 8775 if (rsm && 8776 ((rsm->r_flags & RACK_TLP) == 0) && 8777 (rsm->r_flags & RACK_ACKED)) { 8778 /* 8779 * Now can we merge where we worked 8780 * with either the previous or 8781 * next block? 8782 */ 8783 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8784 while (next) { 8785 if (next->r_flags & RACK_TLP) 8786 break; 8787 if (next->r_flags & RACK_ACKED) { 8788 /* yep this and next can be merged */ 8789 rsm = rack_merge_rsm(rack, rsm, next); 8790 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8791 } else 8792 break; 8793 } 8794 /* Now what about the previous? */ 8795 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8796 while (prev) { 8797 if (prev->r_flags & RACK_TLP) 8798 break; 8799 if (prev->r_flags & RACK_ACKED) { 8800 /* yep the previous and this can be merged */ 8801 rsm = rack_merge_rsm(rack, prev, rsm); 8802 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8803 } else 8804 break; 8805 } 8806 } 8807 if (used_ref == 0) { 8808 counter_u64_add(rack_sack_proc_all, 1); 8809 } else { 8810 counter_u64_add(rack_sack_proc_short, 1); 8811 } 8812 /* Save off the next one for quick reference. */ 8813 if (rsm) 8814 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8815 else 8816 nrsm = NULL; 8817 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8818 /* Pass back the moved. */ 8819 *moved_two = moved; 8820 return (changed); 8821 } 8822 8823 static void inline 8824 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8825 { 8826 struct rack_sendmap *tmap; 8827 8828 tmap = NULL; 8829 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8830 /* Its no longer sacked, mark it so */ 8831 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8832 #ifdef INVARIANTS 8833 if (rsm->r_in_tmap) { 8834 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8835 rack, rsm, rsm->r_flags); 8836 } 8837 #endif 8838 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8839 /* Rebuild it into our tmap */ 8840 if (tmap == NULL) { 8841 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8842 tmap = rsm; 8843 } else { 8844 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8845 tmap = rsm; 8846 } 8847 tmap->r_in_tmap = 1; 8848 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8849 } 8850 /* 8851 * Now lets possibly clear the sack filter so we start 8852 * recognizing sacks that cover this area. 8853 */ 8854 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8855 8856 } 8857 8858 static void 8859 rack_do_decay(struct tcp_rack *rack) 8860 { 8861 struct timeval res; 8862 8863 #define timersub(tvp, uvp, vvp) \ 8864 do { \ 8865 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8866 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8867 if ((vvp)->tv_usec < 0) { \ 8868 (vvp)->tv_sec--; \ 8869 (vvp)->tv_usec += 1000000; \ 8870 } \ 8871 } while (0) 8872 8873 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8874 #undef timersub 8875 8876 rack->r_ctl.input_pkt++; 8877 if ((rack->rc_in_persist) || 8878 (res.tv_sec >= 1) || 8879 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8880 /* 8881 * Check for decay of non-SAD, 8882 * we want all SAD detection metrics to 8883 * decay 1/4 per second (or more) passed. 8884 */ 8885 #ifdef NETFLIX_EXP_DETECTION 8886 uint32_t pkt_delta; 8887 8888 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8889 #endif 8890 /* Update our saved tracking values */ 8891 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8892 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8893 /* Now do we escape without decay? */ 8894 #ifdef NETFLIX_EXP_DETECTION 8895 if (rack->rc_in_persist || 8896 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8897 (pkt_delta < tcp_sad_low_pps)){ 8898 /* 8899 * We don't decay idle connections 8900 * or ones that have a low input pps. 8901 */ 8902 return; 8903 } 8904 /* Decay the counters */ 8905 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8906 tcp_sad_decay_val); 8907 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8908 tcp_sad_decay_val); 8909 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8910 tcp_sad_decay_val); 8911 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8912 tcp_sad_decay_val); 8913 #endif 8914 } 8915 } 8916 8917 static void 8918 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8919 { 8920 struct rack_sendmap *rsm; 8921 #ifdef INVARIANTS 8922 struct rack_sendmap *rm; 8923 #endif 8924 8925 /* 8926 * The ACK point is advancing to th_ack, we must drop off 8927 * the packets in the rack log and calculate any eligble 8928 * RTT's. 8929 */ 8930 rack->r_wanted_output = 1; 8931 8932 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8933 if ((rack->rc_last_tlp_acked_set == 1)&& 8934 (rack->rc_last_tlp_past_cumack == 1) && 8935 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8936 /* 8937 * We have reached the point where our last rack 8938 * tlp retransmit sequence is ahead of the cum-ack. 8939 * This can only happen when the cum-ack moves all 8940 * the way around (its been a full 2^^31+1 bytes 8941 * or more since we sent a retransmitted TLP). Lets 8942 * turn off the valid flag since its not really valid. 8943 * 8944 * Note since sack's also turn on this event we have 8945 * a complication, we have to wait to age it out until 8946 * the cum-ack is by the TLP before checking which is 8947 * what the next else clause does. 8948 */ 8949 rack_log_dsack_event(rack, 9, __LINE__, 8950 rack->r_ctl.last_tlp_acked_start, 8951 rack->r_ctl.last_tlp_acked_end); 8952 rack->rc_last_tlp_acked_set = 0; 8953 rack->rc_last_tlp_past_cumack = 0; 8954 } else if ((rack->rc_last_tlp_acked_set == 1) && 8955 (rack->rc_last_tlp_past_cumack == 0) && 8956 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8957 /* 8958 * It is safe to start aging TLP's out. 8959 */ 8960 rack->rc_last_tlp_past_cumack = 1; 8961 } 8962 /* We do the same for the tlp send seq as well */ 8963 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8964 (rack->rc_last_sent_tlp_past_cumack == 1) && 8965 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8966 rack_log_dsack_event(rack, 9, __LINE__, 8967 rack->r_ctl.last_sent_tlp_seq, 8968 (rack->r_ctl.last_sent_tlp_seq + 8969 rack->r_ctl.last_sent_tlp_len)); 8970 rack->rc_last_sent_tlp_seq_valid = 0; 8971 rack->rc_last_sent_tlp_past_cumack = 0; 8972 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8973 (rack->rc_last_sent_tlp_past_cumack == 0) && 8974 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8975 /* 8976 * It is safe to start aging TLP's send. 8977 */ 8978 rack->rc_last_sent_tlp_past_cumack = 1; 8979 } 8980 more: 8981 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8982 if (rsm == NULL) { 8983 if ((th_ack - 1) == tp->iss) { 8984 /* 8985 * For the SYN incoming case we will not 8986 * have called tcp_output for the sending of 8987 * the SYN, so there will be no map. All 8988 * other cases should probably be a panic. 8989 */ 8990 return; 8991 } 8992 if (tp->t_flags & TF_SENTFIN) { 8993 /* if we sent a FIN we often will not have map */ 8994 return; 8995 } 8996 #ifdef INVARIANTS 8997 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8998 tp, 8999 tp->t_state, th_ack, rack, 9000 tp->snd_una, tp->snd_max, tp->snd_nxt); 9001 #endif 9002 return; 9003 } 9004 if (SEQ_LT(th_ack, rsm->r_start)) { 9005 /* Huh map is missing this */ 9006 #ifdef INVARIANTS 9007 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 9008 rsm->r_start, 9009 th_ack, tp->t_state, rack->r_state); 9010 #endif 9011 return; 9012 } 9013 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 9014 9015 /* Now was it a retransmitted TLP? */ 9016 if ((rsm->r_flags & RACK_TLP) && 9017 (rsm->r_rtr_cnt > 1)) { 9018 /* 9019 * Yes, this rsm was a TLP and retransmitted, remember that 9020 * since if a DSACK comes back on this we don't want 9021 * to think of it as a reordered segment. This may 9022 * get updated again with possibly even other TLPs 9023 * in flight, but thats ok. Only when we don't send 9024 * a retransmitted TLP for 1/2 the sequences space 9025 * will it get turned off (above). 9026 */ 9027 if (rack->rc_last_tlp_acked_set && 9028 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 9029 /* 9030 * We already turned this on since the end matches, 9031 * the previous one was a partially ack now we 9032 * are getting another one (maybe all of it). 9033 */ 9034 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 9035 /* 9036 * Lets make sure we have all of it though. 9037 */ 9038 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 9039 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9040 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9041 rack->r_ctl.last_tlp_acked_end); 9042 } 9043 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 9044 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9045 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 9046 rack->r_ctl.last_tlp_acked_end); 9047 } 9048 } else { 9049 rack->rc_last_tlp_past_cumack = 1; 9050 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 9051 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 9052 rack->rc_last_tlp_acked_set = 1; 9053 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 9054 } 9055 } 9056 /* Now do we consume the whole thing? */ 9057 if (SEQ_GEQ(th_ack, rsm->r_end)) { 9058 /* Its all consumed. */ 9059 uint32_t left; 9060 uint8_t newly_acked; 9061 9062 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 9063 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 9064 rsm->r_rtr_bytes = 0; 9065 /* Record the time of highest cumack sent */ 9066 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9067 #ifndef INVARIANTS 9068 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9069 #else 9070 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 9071 if (rm != rsm) { 9072 panic("removing head in rack:%p rsm:%p rm:%p", 9073 rack, rsm, rm); 9074 } 9075 #endif 9076 if (rsm->r_in_tmap) { 9077 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 9078 rsm->r_in_tmap = 0; 9079 } 9080 newly_acked = 1; 9081 if (rsm->r_flags & RACK_ACKED) { 9082 /* 9083 * It was acked on the scoreboard -- remove 9084 * it from total 9085 */ 9086 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9087 newly_acked = 0; 9088 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9089 /* 9090 * There are segments ACKED on the 9091 * scoreboard further up. We are seeing 9092 * reordering. 9093 */ 9094 rsm->r_flags &= ~RACK_SACK_PASSED; 9095 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9096 rsm->r_flags |= RACK_ACKED; 9097 rack->r_ctl.rc_reorder_ts = cts; 9098 if (rack->r_ent_rec_ns) { 9099 /* 9100 * We have sent no more, and we saw an sack 9101 * then ack arrive. 9102 */ 9103 rack->r_might_revert = 1; 9104 } 9105 } 9106 if ((rsm->r_flags & RACK_TO_REXT) && 9107 (tp->t_flags & TF_RCVD_TSTMP) && 9108 (to->to_flags & TOF_TS) && 9109 (to->to_tsecr != 0) && 9110 (tp->t_flags & TF_PREVVALID)) { 9111 /* 9112 * We can use the timestamp to see 9113 * if this retransmission was from the 9114 * first transmit. If so we made a mistake. 9115 */ 9116 tp->t_flags &= ~TF_PREVVALID; 9117 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9118 /* The first transmit is what this ack is for */ 9119 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 9120 } 9121 } 9122 left = th_ack - rsm->r_end; 9123 if (rack->app_limited_needs_set && newly_acked) 9124 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9125 /* Free back to zone */ 9126 rack_free(rack, rsm); 9127 if (left) { 9128 goto more; 9129 } 9130 /* Check for reneging */ 9131 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9132 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9133 /* 9134 * The peer has moved snd_una up to 9135 * the edge of this send, i.e. one 9136 * that it had previously acked. The only 9137 * way that can be true if the peer threw 9138 * away data (space issues) that it had 9139 * previously sacked (else it would have 9140 * given us snd_una up to (rsm->r_end). 9141 * We need to undo the acked markings here. 9142 * 9143 * Note we have to look to make sure th_ack is 9144 * our rsm->r_start in case we get an old ack 9145 * where th_ack is behind snd_una. 9146 */ 9147 rack_peer_reneges(rack, rsm, th_ack); 9148 } 9149 return; 9150 } 9151 if (rsm->r_flags & RACK_ACKED) { 9152 /* 9153 * It was acked on the scoreboard -- remove it from 9154 * total for the part being cum-acked. 9155 */ 9156 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9157 } 9158 /* 9159 * Clear the dup ack count for 9160 * the piece that remains. 9161 */ 9162 rsm->r_dupack = 0; 9163 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9164 if (rsm->r_rtr_bytes) { 9165 /* 9166 * It was retransmitted adjust the 9167 * sack holes for what was acked. 9168 */ 9169 int ack_am; 9170 9171 ack_am = (th_ack - rsm->r_start); 9172 if (ack_am >= rsm->r_rtr_bytes) { 9173 rack->r_ctl.rc_holes_rxt -= ack_am; 9174 rsm->r_rtr_bytes -= ack_am; 9175 } 9176 } 9177 /* 9178 * Update where the piece starts and record 9179 * the time of send of highest cumack sent. 9180 */ 9181 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9182 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9183 /* Now we need to move our offset forward too */ 9184 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9185 /* Fix up the orig_m_len and possibly the mbuf offset */ 9186 rack_adjust_orig_mlen(rsm); 9187 } 9188 rsm->soff += (th_ack - rsm->r_start); 9189 rsm->r_start = th_ack; 9190 /* Now do we need to move the mbuf fwd too? */ 9191 if (rsm->m) { 9192 while (rsm->soff >= rsm->m->m_len) { 9193 rsm->soff -= rsm->m->m_len; 9194 rsm->m = rsm->m->m_next; 9195 KASSERT((rsm->m != NULL), 9196 (" nrsm:%p hit at soff:%u null m", 9197 rsm, rsm->soff)); 9198 } 9199 rsm->orig_m_len = rsm->m->m_len; 9200 } 9201 if (rack->app_limited_needs_set) 9202 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9203 } 9204 9205 static void 9206 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9207 { 9208 struct rack_sendmap *rsm; 9209 int sack_pass_fnd = 0; 9210 9211 if (rack->r_might_revert) { 9212 /* 9213 * Ok we have reordering, have not sent anything, we 9214 * might want to revert the congestion state if nothing 9215 * further has SACK_PASSED on it. Lets check. 9216 * 9217 * We also get here when we have DSACKs come in for 9218 * all the data that we FR'd. Note that a rxt or tlp 9219 * timer clears this from happening. 9220 */ 9221 9222 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9223 if (rsm->r_flags & RACK_SACK_PASSED) { 9224 sack_pass_fnd = 1; 9225 break; 9226 } 9227 } 9228 if (sack_pass_fnd == 0) { 9229 /* 9230 * We went into recovery 9231 * incorrectly due to reordering! 9232 */ 9233 int orig_cwnd; 9234 9235 rack->r_ent_rec_ns = 0; 9236 orig_cwnd = tp->snd_cwnd; 9237 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9238 tp->snd_recover = tp->snd_una; 9239 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 9240 EXIT_RECOVERY(tp->t_flags); 9241 } 9242 rack->r_might_revert = 0; 9243 } 9244 } 9245 9246 #ifdef NETFLIX_EXP_DETECTION 9247 static void 9248 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9249 { 9250 if ((rack->do_detection || tcp_force_detection) && 9251 tcp_sack_to_ack_thresh && 9252 tcp_sack_to_move_thresh && 9253 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9254 /* 9255 * We have thresholds set to find 9256 * possible attackers and disable sack. 9257 * Check them. 9258 */ 9259 uint64_t ackratio, moveratio, movetotal; 9260 9261 /* Log detecting */ 9262 rack_log_sad(rack, 1); 9263 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9264 ackratio *= (uint64_t)(1000); 9265 if (rack->r_ctl.ack_count) 9266 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9267 else { 9268 /* We really should not hit here */ 9269 ackratio = 1000; 9270 } 9271 if ((rack->sack_attack_disable == 0) && 9272 (ackratio > rack_highest_sack_thresh_seen)) 9273 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9274 movetotal = rack->r_ctl.sack_moved_extra; 9275 movetotal += rack->r_ctl.sack_noextra_move; 9276 moveratio = rack->r_ctl.sack_moved_extra; 9277 moveratio *= (uint64_t)1000; 9278 if (movetotal) 9279 moveratio /= movetotal; 9280 else { 9281 /* No moves, thats pretty good */ 9282 moveratio = 0; 9283 } 9284 if ((rack->sack_attack_disable == 0) && 9285 (moveratio > rack_highest_move_thresh_seen)) 9286 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9287 if (rack->sack_attack_disable == 0) { 9288 if ((ackratio > tcp_sack_to_ack_thresh) && 9289 (moveratio > tcp_sack_to_move_thresh)) { 9290 /* Disable sack processing */ 9291 rack->sack_attack_disable = 1; 9292 if (rack->r_rep_attack == 0) { 9293 rack->r_rep_attack = 1; 9294 counter_u64_add(rack_sack_attacks_detected, 1); 9295 } 9296 if (tcp_attack_on_turns_on_logging) { 9297 /* 9298 * Turn on logging, used for debugging 9299 * false positives. 9300 */ 9301 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9302 } 9303 /* Clamp the cwnd at flight size */ 9304 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9305 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9306 rack_log_sad(rack, 2); 9307 } 9308 } else { 9309 /* We are sack-disabled check for false positives */ 9310 if ((ackratio <= tcp_restoral_thresh) || 9311 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9312 rack->sack_attack_disable = 0; 9313 rack_log_sad(rack, 3); 9314 /* Restart counting */ 9315 rack->r_ctl.sack_count = 0; 9316 rack->r_ctl.sack_moved_extra = 0; 9317 rack->r_ctl.sack_noextra_move = 1; 9318 rack->r_ctl.ack_count = max(1, 9319 (bytes_this_ack / segsiz)); 9320 9321 if (rack->r_rep_reverse == 0) { 9322 rack->r_rep_reverse = 1; 9323 counter_u64_add(rack_sack_attacks_reversed, 1); 9324 } 9325 /* Restore the cwnd */ 9326 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9327 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9328 } 9329 } 9330 } 9331 } 9332 #endif 9333 9334 static int 9335 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9336 { 9337 9338 uint32_t am, l_end; 9339 int was_tlp = 0; 9340 9341 if (SEQ_GT(end, start)) 9342 am = end - start; 9343 else 9344 am = 0; 9345 if ((rack->rc_last_tlp_acked_set ) && 9346 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9347 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9348 /* 9349 * The DSACK is because of a TLP which we don't 9350 * do anything with the reordering window over since 9351 * it was not reordering that caused the DSACK but 9352 * our previous retransmit TLP. 9353 */ 9354 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9355 was_tlp = 1; 9356 goto skip_dsack_round; 9357 } 9358 if (rack->rc_last_sent_tlp_seq_valid) { 9359 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9360 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9361 (SEQ_LEQ(end, l_end))) { 9362 /* 9363 * This dsack is from the last sent TLP, ignore it 9364 * for reordering purposes. 9365 */ 9366 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9367 was_tlp = 1; 9368 goto skip_dsack_round; 9369 } 9370 } 9371 if (rack->rc_dsack_round_seen == 0) { 9372 rack->rc_dsack_round_seen = 1; 9373 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9374 rack->r_ctl.num_dsack++; 9375 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9376 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9377 } 9378 skip_dsack_round: 9379 /* 9380 * We keep track of how many DSACK blocks we get 9381 * after a recovery incident. 9382 */ 9383 rack->r_ctl.dsack_byte_cnt += am; 9384 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9385 rack->r_ctl.retran_during_recovery && 9386 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9387 /* 9388 * False recovery most likely culprit is reordering. If 9389 * nothing else is missing we need to revert. 9390 */ 9391 rack->r_might_revert = 1; 9392 rack_handle_might_revert(rack->rc_tp, rack); 9393 rack->r_might_revert = 0; 9394 rack->r_ctl.retran_during_recovery = 0; 9395 rack->r_ctl.dsack_byte_cnt = 0; 9396 } 9397 return (was_tlp); 9398 } 9399 9400 static uint32_t 9401 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 9402 { 9403 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 9404 } 9405 9406 static int32_t 9407 rack_compute_pipe(struct tcpcb *tp) 9408 { 9409 return ((int32_t)do_rack_compute_pipe(tp, 9410 (struct tcp_rack *)tp->t_fb_ptr, 9411 tp->snd_una)); 9412 } 9413 9414 static void 9415 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9416 { 9417 /* Deal with changed and PRR here (in recovery only) */ 9418 uint32_t pipe, snd_una; 9419 9420 rack->r_ctl.rc_prr_delivered += changed; 9421 9422 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9423 /* 9424 * It is all outstanding, we are application limited 9425 * and thus we don't need more room to send anything. 9426 * Note we use tp->snd_una here and not th_ack because 9427 * the data as yet not been cut from the sb. 9428 */ 9429 rack->r_ctl.rc_prr_sndcnt = 0; 9430 return; 9431 } 9432 /* Compute prr_sndcnt */ 9433 if (SEQ_GT(tp->snd_una, th_ack)) { 9434 snd_una = tp->snd_una; 9435 } else { 9436 snd_una = th_ack; 9437 } 9438 pipe = do_rack_compute_pipe(tp, rack, snd_una); 9439 if (pipe > tp->snd_ssthresh) { 9440 long sndcnt; 9441 9442 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9443 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9444 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9445 else { 9446 rack->r_ctl.rc_prr_sndcnt = 0; 9447 rack_log_to_prr(rack, 9, 0, __LINE__); 9448 sndcnt = 0; 9449 } 9450 sndcnt++; 9451 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9452 sndcnt -= rack->r_ctl.rc_prr_out; 9453 else 9454 sndcnt = 0; 9455 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9456 rack_log_to_prr(rack, 10, 0, __LINE__); 9457 } else { 9458 uint32_t limit; 9459 9460 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9461 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9462 else 9463 limit = 0; 9464 if (changed > limit) 9465 limit = changed; 9466 limit += ctf_fixed_maxseg(tp); 9467 if (tp->snd_ssthresh > pipe) { 9468 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9469 rack_log_to_prr(rack, 11, 0, __LINE__); 9470 } else { 9471 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9472 rack_log_to_prr(rack, 12, 0, __LINE__); 9473 } 9474 } 9475 } 9476 9477 static void 9478 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9479 { 9480 uint32_t changed; 9481 struct tcp_rack *rack; 9482 struct rack_sendmap *rsm; 9483 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9484 register uint32_t th_ack; 9485 int32_t i, j, k, num_sack_blks = 0; 9486 uint32_t cts, acked, ack_point; 9487 int loop_start = 0, moved_two = 0; 9488 uint32_t tsused; 9489 9490 9491 INP_WLOCK_ASSERT(tp->t_inpcb); 9492 if (tcp_get_flags(th) & TH_RST) { 9493 /* We don't log resets */ 9494 return; 9495 } 9496 rack = (struct tcp_rack *)tp->t_fb_ptr; 9497 cts = tcp_get_usecs(NULL); 9498 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9499 changed = 0; 9500 th_ack = th->th_ack; 9501 if (rack->sack_attack_disable == 0) 9502 rack_do_decay(rack); 9503 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9504 /* 9505 * You only get credit for 9506 * MSS and greater (and you get extra 9507 * credit for larger cum-ack moves). 9508 */ 9509 int ac; 9510 9511 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9512 rack->r_ctl.ack_count += ac; 9513 counter_u64_add(rack_ack_total, ac); 9514 } 9515 if (rack->r_ctl.ack_count > 0xfff00000) { 9516 /* 9517 * reduce the number to keep us under 9518 * a uint32_t. 9519 */ 9520 rack->r_ctl.ack_count /= 2; 9521 rack->r_ctl.sack_count /= 2; 9522 } 9523 if (SEQ_GT(th_ack, tp->snd_una)) { 9524 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9525 tp->t_acktime = ticks; 9526 } 9527 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9528 changed = th_ack - rsm->r_start; 9529 if (changed) { 9530 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9531 } 9532 if ((to->to_flags & TOF_SACK) == 0) { 9533 /* We are done nothing left and no sack. */ 9534 rack_handle_might_revert(tp, rack); 9535 /* 9536 * For cases where we struck a dup-ack 9537 * with no SACK, add to the changes so 9538 * PRR will work right. 9539 */ 9540 if (dup_ack_struck && (changed == 0)) { 9541 changed += ctf_fixed_maxseg(rack->rc_tp); 9542 } 9543 goto out; 9544 } 9545 /* Sack block processing */ 9546 if (SEQ_GT(th_ack, tp->snd_una)) 9547 ack_point = th_ack; 9548 else 9549 ack_point = tp->snd_una; 9550 for (i = 0; i < to->to_nsacks; i++) { 9551 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9552 &sack, sizeof(sack)); 9553 sack.start = ntohl(sack.start); 9554 sack.end = ntohl(sack.end); 9555 if (SEQ_GT(sack.end, sack.start) && 9556 SEQ_GT(sack.start, ack_point) && 9557 SEQ_LT(sack.start, tp->snd_max) && 9558 SEQ_GT(sack.end, ack_point) && 9559 SEQ_LEQ(sack.end, tp->snd_max)) { 9560 sack_blocks[num_sack_blks] = sack; 9561 num_sack_blks++; 9562 } else if (SEQ_LEQ(sack.start, th_ack) && 9563 SEQ_LEQ(sack.end, th_ack)) { 9564 int was_tlp; 9565 9566 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9567 /* 9568 * Its a D-SACK block. 9569 */ 9570 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9571 } 9572 } 9573 if (rack->rc_dsack_round_seen) { 9574 /* Is the dsack roound over? */ 9575 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9576 /* Yes it is */ 9577 rack->rc_dsack_round_seen = 0; 9578 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9579 } 9580 } 9581 /* 9582 * Sort the SACK blocks so we can update the rack scoreboard with 9583 * just one pass. 9584 */ 9585 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9586 num_sack_blks, th->th_ack); 9587 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9588 if (num_sack_blks == 0) { 9589 /* Nothing to sack (DSACKs?) */ 9590 goto out_with_totals; 9591 } 9592 if (num_sack_blks < 2) { 9593 /* Only one, we don't need to sort */ 9594 goto do_sack_work; 9595 } 9596 /* Sort the sacks */ 9597 for (i = 0; i < num_sack_blks; i++) { 9598 for (j = i + 1; j < num_sack_blks; j++) { 9599 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9600 sack = sack_blocks[i]; 9601 sack_blocks[i] = sack_blocks[j]; 9602 sack_blocks[j] = sack; 9603 } 9604 } 9605 } 9606 /* 9607 * Now are any of the sack block ends the same (yes some 9608 * implementations send these)? 9609 */ 9610 again: 9611 if (num_sack_blks == 0) 9612 goto out_with_totals; 9613 if (num_sack_blks > 1) { 9614 for (i = 0; i < num_sack_blks; i++) { 9615 for (j = i + 1; j < num_sack_blks; j++) { 9616 if (sack_blocks[i].end == sack_blocks[j].end) { 9617 /* 9618 * Ok these two have the same end we 9619 * want the smallest end and then 9620 * throw away the larger and start 9621 * again. 9622 */ 9623 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9624 /* 9625 * The second block covers 9626 * more area use that 9627 */ 9628 sack_blocks[i].start = sack_blocks[j].start; 9629 } 9630 /* 9631 * Now collapse out the dup-sack and 9632 * lower the count 9633 */ 9634 for (k = (j + 1); k < num_sack_blks; k++) { 9635 sack_blocks[j].start = sack_blocks[k].start; 9636 sack_blocks[j].end = sack_blocks[k].end; 9637 j++; 9638 } 9639 num_sack_blks--; 9640 goto again; 9641 } 9642 } 9643 } 9644 } 9645 do_sack_work: 9646 /* 9647 * First lets look to see if 9648 * we have retransmitted and 9649 * can use the transmit next? 9650 */ 9651 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9652 if (rsm && 9653 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9654 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9655 /* 9656 * We probably did the FR and the next 9657 * SACK in continues as we would expect. 9658 */ 9659 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9660 if (acked) { 9661 rack->r_wanted_output = 1; 9662 changed += acked; 9663 } 9664 if (num_sack_blks == 1) { 9665 /* 9666 * This is what we would expect from 9667 * a normal implementation to happen 9668 * after we have retransmitted the FR, 9669 * i.e the sack-filter pushes down 9670 * to 1 block and the next to be retransmitted 9671 * is the sequence in the sack block (has more 9672 * are acked). Count this as ACK'd data to boost 9673 * up the chances of recovering any false positives. 9674 */ 9675 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9676 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9677 counter_u64_add(rack_express_sack, 1); 9678 if (rack->r_ctl.ack_count > 0xfff00000) { 9679 /* 9680 * reduce the number to keep us under 9681 * a uint32_t. 9682 */ 9683 rack->r_ctl.ack_count /= 2; 9684 rack->r_ctl.sack_count /= 2; 9685 } 9686 goto out_with_totals; 9687 } else { 9688 /* 9689 * Start the loop through the 9690 * rest of blocks, past the first block. 9691 */ 9692 moved_two = 0; 9693 loop_start = 1; 9694 } 9695 } 9696 /* Its a sack of some sort */ 9697 rack->r_ctl.sack_count++; 9698 if (rack->r_ctl.sack_count > 0xfff00000) { 9699 /* 9700 * reduce the number to keep us under 9701 * a uint32_t. 9702 */ 9703 rack->r_ctl.ack_count /= 2; 9704 rack->r_ctl.sack_count /= 2; 9705 } 9706 counter_u64_add(rack_sack_total, 1); 9707 if (rack->sack_attack_disable) { 9708 /* An attacker disablement is in place */ 9709 if (num_sack_blks > 1) { 9710 rack->r_ctl.sack_count += (num_sack_blks - 1); 9711 rack->r_ctl.sack_moved_extra++; 9712 counter_u64_add(rack_move_some, 1); 9713 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9714 rack->r_ctl.sack_moved_extra /= 2; 9715 rack->r_ctl.sack_noextra_move /= 2; 9716 } 9717 } 9718 goto out; 9719 } 9720 rsm = rack->r_ctl.rc_sacklast; 9721 for (i = loop_start; i < num_sack_blks; i++) { 9722 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9723 if (acked) { 9724 rack->r_wanted_output = 1; 9725 changed += acked; 9726 } 9727 if (moved_two) { 9728 /* 9729 * If we did not get a SACK for at least a MSS and 9730 * had to move at all, or if we moved more than our 9731 * threshold, it counts against the "extra" move. 9732 */ 9733 rack->r_ctl.sack_moved_extra += moved_two; 9734 counter_u64_add(rack_move_some, 1); 9735 } else { 9736 /* 9737 * else we did not have to move 9738 * any more than we would expect. 9739 */ 9740 rack->r_ctl.sack_noextra_move++; 9741 counter_u64_add(rack_move_none, 1); 9742 } 9743 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9744 /* 9745 * If the SACK was not a full MSS then 9746 * we add to sack_count the number of 9747 * MSS's (or possibly more than 9748 * a MSS if its a TSO send) we had to skip by. 9749 */ 9750 rack->r_ctl.sack_count += moved_two; 9751 counter_u64_add(rack_sack_total, moved_two); 9752 } 9753 /* 9754 * Now we need to setup for the next 9755 * round. First we make sure we won't 9756 * exceed the size of our uint32_t on 9757 * the various counts, and then clear out 9758 * moved_two. 9759 */ 9760 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9761 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9762 rack->r_ctl.sack_moved_extra /= 2; 9763 rack->r_ctl.sack_noextra_move /= 2; 9764 } 9765 if (rack->r_ctl.sack_count > 0xfff00000) { 9766 rack->r_ctl.ack_count /= 2; 9767 rack->r_ctl.sack_count /= 2; 9768 } 9769 moved_two = 0; 9770 } 9771 out_with_totals: 9772 if (num_sack_blks > 1) { 9773 /* 9774 * You get an extra stroke if 9775 * you have more than one sack-blk, this 9776 * could be where we are skipping forward 9777 * and the sack-filter is still working, or 9778 * it could be an attacker constantly 9779 * moving us. 9780 */ 9781 rack->r_ctl.sack_moved_extra++; 9782 counter_u64_add(rack_move_some, 1); 9783 } 9784 out: 9785 #ifdef NETFLIX_EXP_DETECTION 9786 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9787 #endif 9788 if (changed) { 9789 /* Something changed cancel the rack timer */ 9790 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9791 } 9792 tsused = tcp_get_usecs(NULL); 9793 rsm = tcp_rack_output(tp, rack, tsused); 9794 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9795 rsm && 9796 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 9797 /* Enter recovery */ 9798 entered_recovery = 1; 9799 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9800 /* 9801 * When we enter recovery we need to assure we send 9802 * one packet. 9803 */ 9804 if (rack->rack_no_prr == 0) { 9805 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9806 rack_log_to_prr(rack, 8, 0, __LINE__); 9807 } 9808 rack->r_timer_override = 1; 9809 rack->r_early = 0; 9810 rack->r_ctl.rc_agg_early = 0; 9811 } else if (IN_FASTRECOVERY(tp->t_flags) && 9812 rsm && 9813 (rack->r_rr_config == 3)) { 9814 /* 9815 * Assure we can output and we get no 9816 * remembered pace time except the retransmit. 9817 */ 9818 rack->r_timer_override = 1; 9819 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9820 rack->r_ctl.rc_resend = rsm; 9821 } 9822 if (IN_FASTRECOVERY(tp->t_flags) && 9823 (rack->rack_no_prr == 0) && 9824 (entered_recovery == 0)) { 9825 rack_update_prr(tp, rack, changed, th_ack); 9826 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9827 ((tcp_in_hpts(rack->rc_inp) == 0) && 9828 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9829 /* 9830 * If you are pacing output you don't want 9831 * to override. 9832 */ 9833 rack->r_early = 0; 9834 rack->r_ctl.rc_agg_early = 0; 9835 rack->r_timer_override = 1; 9836 } 9837 } 9838 } 9839 9840 static void 9841 rack_strike_dupack(struct tcp_rack *rack) 9842 { 9843 struct rack_sendmap *rsm; 9844 9845 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9846 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9847 rsm = TAILQ_NEXT(rsm, r_tnext); 9848 if (rsm->r_flags & RACK_MUST_RXT) { 9849 /* Sendmap entries that are marked to 9850 * be retransmitted do not need dupack's 9851 * struck. We get these marks for a number 9852 * of reasons (rxt timeout with no sack, 9853 * mtu change, or rwnd collapses). When 9854 * these events occur, we know we must retransmit 9855 * them and mark the sendmap entries. Dupack counting 9856 * is not needed since we are already set to retransmit 9857 * it as soon as we can. 9858 */ 9859 continue; 9860 } 9861 } 9862 if (rsm && (rsm->r_dupack < 0xff)) { 9863 rsm->r_dupack++; 9864 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9865 struct timeval tv; 9866 uint32_t cts; 9867 /* 9868 * Here we see if we need to retransmit. For 9869 * a SACK type connection if enough time has passed 9870 * we will get a return of the rsm. For a non-sack 9871 * connection we will get the rsm returned if the 9872 * dupack value is 3 or more. 9873 */ 9874 cts = tcp_get_usecs(&tv); 9875 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9876 if (rack->r_ctl.rc_resend != NULL) { 9877 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9878 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9879 rack->rc_tp->snd_una, __LINE__); 9880 } 9881 rack->r_wanted_output = 1; 9882 rack->r_timer_override = 1; 9883 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9884 } 9885 } else { 9886 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9887 } 9888 } 9889 } 9890 9891 static void 9892 rack_check_bottom_drag(struct tcpcb *tp, 9893 struct tcp_rack *rack, 9894 struct socket *so, int32_t acked) 9895 { 9896 uint32_t segsiz, minseg; 9897 9898 segsiz = ctf_fixed_maxseg(tp); 9899 minseg = segsiz; 9900 9901 if (tp->snd_max == tp->snd_una) { 9902 /* 9903 * We are doing dynamic pacing and we are way 9904 * under. Basically everything got acked while 9905 * we were still waiting on the pacer to expire. 9906 * 9907 * This means we need to boost the b/w in 9908 * addition to any earlier boosting of 9909 * the multiplier. 9910 */ 9911 rack->rc_dragged_bottom = 1; 9912 rack_validate_multipliers_at_or_above100(rack); 9913 /* 9914 * Lets use the segment bytes acked plus 9915 * the lowest RTT seen as the basis to 9916 * form a b/w estimate. This will be off 9917 * due to the fact that the true estimate 9918 * should be around 1/2 the time of the RTT 9919 * but we can settle for that. 9920 */ 9921 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9922 acked) { 9923 uint64_t bw, calc_bw, rtt; 9924 9925 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9926 if (rtt == 0) { 9927 /* no us sample is there a ms one? */ 9928 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9929 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9930 } else { 9931 goto no_measurement; 9932 } 9933 } 9934 bw = acked; 9935 calc_bw = bw * 1000000; 9936 calc_bw /= rtt; 9937 if (rack->r_ctl.last_max_bw && 9938 (rack->r_ctl.last_max_bw < calc_bw)) { 9939 /* 9940 * If we have a last calculated max bw 9941 * enforce it. 9942 */ 9943 calc_bw = rack->r_ctl.last_max_bw; 9944 } 9945 /* now plop it in */ 9946 if (rack->rc_gp_filled == 0) { 9947 if (calc_bw > ONE_POINT_TWO_MEG) { 9948 /* 9949 * If we have no measurement 9950 * don't let us set in more than 9951 * 1.2Mbps. If we are still too 9952 * low after pacing with this we 9953 * will hopefully have a max b/w 9954 * available to sanity check things. 9955 */ 9956 calc_bw = ONE_POINT_TWO_MEG; 9957 } 9958 rack->r_ctl.rc_rtt_diff = 0; 9959 rack->r_ctl.gp_bw = calc_bw; 9960 rack->rc_gp_filled = 1; 9961 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9962 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9963 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9964 } else if (calc_bw > rack->r_ctl.gp_bw) { 9965 rack->r_ctl.rc_rtt_diff = 0; 9966 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9967 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9968 rack->r_ctl.gp_bw = calc_bw; 9969 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9970 } else 9971 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9972 if ((rack->gp_ready == 0) && 9973 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9974 /* We have enough measurements now */ 9975 rack->gp_ready = 1; 9976 rack_set_cc_pacing(rack); 9977 if (rack->defer_options) 9978 rack_apply_deferred_options(rack); 9979 } 9980 /* 9981 * For acks over 1mss we do a extra boost to simulate 9982 * where we would get 2 acks (we want 110 for the mul). 9983 */ 9984 if (acked > segsiz) 9985 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9986 } else { 9987 /* 9988 * zero rtt possibly?, settle for just an old increase. 9989 */ 9990 no_measurement: 9991 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9992 } 9993 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9994 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9995 minseg)) && 9996 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9997 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9998 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9999 (segsiz * rack_req_segs))) { 10000 /* 10001 * We are doing dynamic GP pacing and 10002 * we have everything except 1MSS or less 10003 * bytes left out. We are still pacing away. 10004 * And there is data that could be sent, This 10005 * means we are inserting delayed ack time in 10006 * our measurements because we are pacing too slow. 10007 */ 10008 rack_validate_multipliers_at_or_above100(rack); 10009 rack->rc_dragged_bottom = 1; 10010 rack_increase_bw_mul(rack, -1, 0, 0, 1); 10011 } 10012 } 10013 10014 10015 10016 static void 10017 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 10018 { 10019 /* 10020 * The fast output path is enabled and we 10021 * have moved the cumack forward. Lets see if 10022 * we can expand forward the fast path length by 10023 * that amount. What we would ideally like to 10024 * do is increase the number of bytes in the 10025 * fast path block (left_to_send) by the 10026 * acked amount. However we have to gate that 10027 * by two factors: 10028 * 1) The amount outstanding and the rwnd of the peer 10029 * (i.e. we don't want to exceed the rwnd of the peer). 10030 * <and> 10031 * 2) The amount of data left in the socket buffer (i.e. 10032 * we can't send beyond what is in the buffer). 10033 * 10034 * Note that this does not take into account any increase 10035 * in the cwnd. We will only extend the fast path by 10036 * what was acked. 10037 */ 10038 uint32_t new_total, gating_val; 10039 10040 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 10041 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 10042 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 10043 if (new_total <= gating_val) { 10044 /* We can increase left_to_send by the acked amount */ 10045 counter_u64_add(rack_extended_rfo, 1); 10046 rack->r_ctl.fsb.left_to_send = new_total; 10047 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 10048 ("rack:%p left_to_send:%u sbavail:%u out:%u", 10049 rack, rack->r_ctl.fsb.left_to_send, 10050 sbavail(&rack->rc_inp->inp_socket->so_snd), 10051 (tp->snd_max - tp->snd_una))); 10052 10053 } 10054 } 10055 10056 static void 10057 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 10058 { 10059 /* 10060 * Here any sendmap entry that points to the 10061 * beginning mbuf must be adjusted to the correct 10062 * offset. This must be called with: 10063 * 1) The socket buffer locked 10064 * 2) snd_una adjusted to its new postion. 10065 * 10066 * Note that (2) implies rack_ack_received has also 10067 * been called. 10068 * 10069 * We grab the first mbuf in the socket buffer and 10070 * then go through the front of the sendmap, recalculating 10071 * the stored offset for any sendmap entry that has 10072 * that mbuf. We must use the sb functions to do this 10073 * since its possible an add was done has well as 10074 * the subtraction we may have just completed. This should 10075 * not be a penalty though, since we just referenced the sb 10076 * to go in and trim off the mbufs that we freed (of course 10077 * there will be a penalty for the sendmap references though). 10078 */ 10079 struct mbuf *m; 10080 struct rack_sendmap *rsm; 10081 10082 SOCKBUF_LOCK_ASSERT(sb); 10083 m = sb->sb_mb; 10084 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 10085 if ((rsm == NULL) || (m == NULL)) { 10086 /* Nothing outstanding */ 10087 return; 10088 } 10089 while (rsm->m && (rsm->m == m)) { 10090 /* one to adjust */ 10091 #ifdef INVARIANTS 10092 struct mbuf *tm; 10093 uint32_t soff; 10094 10095 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10096 if (rsm->orig_m_len != m->m_len) { 10097 rack_adjust_orig_mlen(rsm); 10098 } 10099 if (rsm->soff != soff) { 10100 /* 10101 * This is not a fatal error, we anticipate it 10102 * might happen (the else code), so we count it here 10103 * so that under invariant we can see that it really 10104 * does happen. 10105 */ 10106 counter_u64_add(rack_adjust_map_bw, 1); 10107 } 10108 rsm->m = tm; 10109 rsm->soff = soff; 10110 if (tm) 10111 rsm->orig_m_len = rsm->m->m_len; 10112 else 10113 rsm->orig_m_len = 0; 10114 #else 10115 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10116 if (rsm->m) 10117 rsm->orig_m_len = rsm->m->m_len; 10118 else 10119 rsm->orig_m_len = 0; 10120 #endif 10121 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10122 rsm); 10123 if (rsm == NULL) 10124 break; 10125 } 10126 } 10127 10128 /* 10129 * Return value of 1, we do not need to call rack_process_data(). 10130 * return value of 0, rack_process_data can be called. 10131 * For ret_val if its 0 the TCP is locked, if its non-zero 10132 * its unlocked and probably unsafe to touch the TCB. 10133 */ 10134 static int 10135 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10136 struct tcpcb *tp, struct tcpopt *to, 10137 uint32_t tiwin, int32_t tlen, 10138 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10139 { 10140 int32_t ourfinisacked = 0; 10141 int32_t nsegs, acked_amount; 10142 int32_t acked; 10143 struct mbuf *mfree; 10144 struct tcp_rack *rack; 10145 int32_t under_pacing = 0; 10146 int32_t recovery = 0; 10147 10148 rack = (struct tcp_rack *)tp->t_fb_ptr; 10149 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10150 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10151 &rack->r_ctl.challenge_ack_ts, 10152 &rack->r_ctl.challenge_ack_cnt); 10153 rack->r_wanted_output = 1; 10154 return (1); 10155 } 10156 if (rack->gp_ready && 10157 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10158 under_pacing = 1; 10159 } 10160 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10161 int in_rec, dup_ack_struck = 0; 10162 10163 in_rec = IN_FASTRECOVERY(tp->t_flags); 10164 if (rack->rc_in_persist) { 10165 tp->t_rxtshift = 0; 10166 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10167 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10168 } 10169 if ((th->th_ack == tp->snd_una) && 10170 (tiwin == tp->snd_wnd) && 10171 ((to->to_flags & TOF_SACK) == 0)) { 10172 rack_strike_dupack(rack); 10173 dup_ack_struck = 1; 10174 } 10175 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10176 } 10177 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10178 /* 10179 * Old ack, behind (or duplicate to) the last one rcv'd 10180 * Note: We mark reordering is occuring if its 10181 * less than and we have not closed our window. 10182 */ 10183 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10184 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10185 } 10186 return (0); 10187 } 10188 /* 10189 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10190 * something we sent. 10191 */ 10192 if (tp->t_flags & TF_NEEDSYN) { 10193 /* 10194 * T/TCP: Connection was half-synchronized, and our SYN has 10195 * been ACK'd (so connection is now fully synchronized). Go 10196 * to non-starred state, increment snd_una for ACK of SYN, 10197 * and check if we can do window scaling. 10198 */ 10199 tp->t_flags &= ~TF_NEEDSYN; 10200 tp->snd_una++; 10201 /* Do window scaling? */ 10202 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10203 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10204 tp->rcv_scale = tp->request_r_scale; 10205 /* Send window already scaled. */ 10206 } 10207 } 10208 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10209 INP_WLOCK_ASSERT(tp->t_inpcb); 10210 10211 acked = BYTES_THIS_ACK(tp, th); 10212 if (acked) { 10213 /* 10214 * Any time we move the cum-ack forward clear 10215 * keep-alive tied probe-not-answered. The 10216 * persists clears its own on entry. 10217 */ 10218 rack->probe_not_answered = 0; 10219 } 10220 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10221 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10222 /* 10223 * If we just performed our first retransmit, and the ACK arrives 10224 * within our recovery window, then it was a mistake to do the 10225 * retransmit in the first place. Recover our original cwnd and 10226 * ssthresh, and proceed to transmit where we left off. 10227 */ 10228 if ((tp->t_flags & TF_PREVVALID) && 10229 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10230 tp->t_flags &= ~TF_PREVVALID; 10231 if (tp->t_rxtshift == 1 && 10232 (int)(ticks - tp->t_badrxtwin) < 0) 10233 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10234 } 10235 if (acked) { 10236 /* assure we are not backed off */ 10237 tp->t_rxtshift = 0; 10238 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10239 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10240 rack->rc_tlp_in_progress = 0; 10241 rack->r_ctl.rc_tlp_cnt_out = 0; 10242 /* 10243 * If it is the RXT timer we want to 10244 * stop it, so we can restart a TLP. 10245 */ 10246 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10247 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10248 #ifdef NETFLIX_HTTP_LOGGING 10249 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10250 #endif 10251 } 10252 /* 10253 * If we have a timestamp reply, update smoothed round trip time. If 10254 * no timestamp is present but transmit timer is running and timed 10255 * sequence number was acked, update smoothed round trip time. Since 10256 * we now have an rtt measurement, cancel the timer backoff (cf., 10257 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10258 * timer. 10259 * 10260 * Some boxes send broken timestamp replies during the SYN+ACK 10261 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10262 * and blow up the retransmit timer. 10263 */ 10264 /* 10265 * If all outstanding data is acked, stop retransmit timer and 10266 * remember to restart (more output or persist). If there is more 10267 * data to be acked, restart retransmit timer, using current 10268 * (possibly backed-off) value. 10269 */ 10270 if (acked == 0) { 10271 if (ofia) 10272 *ofia = ourfinisacked; 10273 return (0); 10274 } 10275 if (IN_RECOVERY(tp->t_flags)) { 10276 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10277 (SEQ_LT(th->th_ack, tp->snd_max))) { 10278 tcp_rack_partialack(tp); 10279 } else { 10280 rack_post_recovery(tp, th->th_ack); 10281 recovery = 1; 10282 } 10283 } 10284 /* 10285 * Let the congestion control algorithm update congestion control 10286 * related information. This typically means increasing the 10287 * congestion window. 10288 */ 10289 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10290 SOCKBUF_LOCK(&so->so_snd); 10291 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10292 tp->snd_wnd -= acked_amount; 10293 mfree = sbcut_locked(&so->so_snd, acked_amount); 10294 if ((sbused(&so->so_snd) == 0) && 10295 (acked > acked_amount) && 10296 (tp->t_state >= TCPS_FIN_WAIT_1) && 10297 (tp->t_flags & TF_SENTFIN)) { 10298 /* 10299 * We must be sure our fin 10300 * was sent and acked (we can be 10301 * in FIN_WAIT_1 without having 10302 * sent the fin). 10303 */ 10304 ourfinisacked = 1; 10305 } 10306 tp->snd_una = th->th_ack; 10307 if (acked_amount && sbavail(&so->so_snd)) 10308 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10309 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10310 /* NB: sowwakeup_locked() does an implicit unlock. */ 10311 sowwakeup_locked(so); 10312 m_freem(mfree); 10313 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10314 tp->snd_recover = tp->snd_una; 10315 10316 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10317 tp->snd_nxt = tp->snd_una; 10318 } 10319 if (under_pacing && 10320 (rack->use_fixed_rate == 0) && 10321 (rack->in_probe_rtt == 0) && 10322 rack->rc_gp_dyn_mul && 10323 rack->rc_always_pace) { 10324 /* Check if we are dragging bottom */ 10325 rack_check_bottom_drag(tp, rack, so, acked); 10326 } 10327 if (tp->snd_una == tp->snd_max) { 10328 /* Nothing left outstanding */ 10329 tp->t_flags &= ~TF_PREVVALID; 10330 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10331 rack->r_ctl.retran_during_recovery = 0; 10332 rack->r_ctl.dsack_byte_cnt = 0; 10333 if (rack->r_ctl.rc_went_idle_time == 0) 10334 rack->r_ctl.rc_went_idle_time = 1; 10335 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10336 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 10337 tp->t_acktime = 0; 10338 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10339 /* Set need output so persist might get set */ 10340 rack->r_wanted_output = 1; 10341 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10342 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10343 (sbavail(&so->so_snd) == 0) && 10344 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10345 /* 10346 * The socket was gone and the 10347 * peer sent data (now or in the past), time to 10348 * reset him. 10349 */ 10350 *ret_val = 1; 10351 /* tcp_close will kill the inp pre-log the Reset */ 10352 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10353 tp = tcp_close(tp); 10354 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10355 return (1); 10356 } 10357 } 10358 if (ofia) 10359 *ofia = ourfinisacked; 10360 return (0); 10361 } 10362 10363 10364 static void 10365 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 10366 int dir, uint32_t flags, struct rack_sendmap *rsm) 10367 { 10368 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 10369 union tcp_log_stackspecific log; 10370 struct timeval tv; 10371 10372 memset(&log, 0, sizeof(log)); 10373 log.u_bbr.flex1 = cnt; 10374 log.u_bbr.flex2 = split; 10375 log.u_bbr.flex3 = out; 10376 log.u_bbr.flex4 = line; 10377 log.u_bbr.flex5 = rack->r_must_retran; 10378 log.u_bbr.flex6 = flags; 10379 log.u_bbr.flex7 = rack->rc_has_collapsed; 10380 log.u_bbr.flex8 = dir; /* 10381 * 1 is collapsed, 0 is uncollapsed, 10382 * 2 is log of a rsm being marked, 3 is a split. 10383 */ 10384 if (rsm == NULL) 10385 log.u_bbr.rttProp = 0; 10386 else 10387 log.u_bbr.rttProp = (uint64_t)rsm; 10388 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 10389 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10390 TCP_LOG_EVENTP(rack->rc_tp, NULL, 10391 &rack->rc_inp->inp_socket->so_rcv, 10392 &rack->rc_inp->inp_socket->so_snd, 10393 TCP_RACK_LOG_COLLAPSE, 0, 10394 0, &log, false, &tv); 10395 } 10396 } 10397 10398 static void 10399 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line) 10400 { 10401 /* 10402 * Here all we do is mark the collapsed point and set the flag. 10403 * This may happen again and again, but there is no 10404 * sense splitting our map until we know where the 10405 * peer finally lands in the collapse. 10406 */ 10407 rack_trace_point(rack, RACK_TP_COLLAPSED_WND); 10408 if ((rack->rc_has_collapsed == 0) || 10409 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd))) 10410 counter_u64_add(rack_collapsed_win_seen, 1); 10411 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10412 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 10413 rack->rc_has_collapsed = 1; 10414 rack->r_collapse_point_valid = 1; 10415 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 10416 } 10417 10418 static void 10419 rack_un_collapse_window(struct tcp_rack *rack, int line) 10420 { 10421 struct rack_sendmap *nrsm, *rsm, fe; 10422 int cnt = 0, split = 0; 10423 #ifdef INVARIANTS 10424 struct rack_sendmap *insret; 10425 #endif 10426 10427 memset(&fe, 0, sizeof(fe)); 10428 rack->rc_has_collapsed = 0; 10429 fe.r_start = rack->r_ctl.last_collapse_point; 10430 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10431 if (rsm == NULL) { 10432 /* Nothing to do maybe the peer ack'ed it all */ 10433 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10434 return; 10435 } 10436 /* Now do we need to split this one? */ 10437 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 10438 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 10439 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 10440 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10441 if (nrsm == NULL) { 10442 /* We can't get a rsm, mark all? */ 10443 nrsm = rsm; 10444 goto no_split; 10445 } 10446 /* Clone it */ 10447 split = 1; 10448 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 10449 #ifndef INVARIANTS 10450 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10451 #else 10452 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10453 if (insret != NULL) { 10454 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10455 nrsm, insret, rack, rsm); 10456 } 10457 #endif 10458 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 10459 rack->r_ctl.last_collapse_point, __LINE__); 10460 if (rsm->r_in_tmap) { 10461 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10462 nrsm->r_in_tmap = 1; 10463 } 10464 /* 10465 * Set in the new RSM as the 10466 * collapsed starting point 10467 */ 10468 rsm = nrsm; 10469 } 10470 no_split: 10471 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10472 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10473 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 10474 cnt++; 10475 } 10476 if (cnt) { 10477 counter_u64_add(rack_collapsed_win, 1); 10478 } 10479 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10480 } 10481 10482 static void 10483 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10484 int32_t tlen, int32_t tfo_syn) 10485 { 10486 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10487 if (rack->rc_dack_mode && 10488 (tlen > 500) && 10489 (rack->rc_dack_toggle == 1)) { 10490 goto no_delayed_ack; 10491 } 10492 rack_timer_cancel(tp, rack, 10493 rack->r_ctl.rc_rcvtime, __LINE__); 10494 tp->t_flags |= TF_DELACK; 10495 } else { 10496 no_delayed_ack: 10497 rack->r_wanted_output = 1; 10498 tp->t_flags |= TF_ACKNOW; 10499 if (rack->rc_dack_mode) { 10500 if (tp->t_flags & TF_DELACK) 10501 rack->rc_dack_toggle = 1; 10502 else 10503 rack->rc_dack_toggle = 0; 10504 } 10505 } 10506 } 10507 10508 static void 10509 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10510 { 10511 /* 10512 * If fast output is in progress, lets validate that 10513 * the new window did not shrink on us and make it 10514 * so fast output should end. 10515 */ 10516 if (rack->r_fast_output) { 10517 uint32_t out; 10518 10519 /* 10520 * Calculate what we will send if left as is 10521 * and compare that to our send window. 10522 */ 10523 out = ctf_outstanding(tp); 10524 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10525 /* ok we have an issue */ 10526 if (out >= tp->snd_wnd) { 10527 /* Turn off fast output the window is met or collapsed */ 10528 rack->r_fast_output = 0; 10529 } else { 10530 /* we have some room left */ 10531 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10532 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10533 /* If not at least 1 full segment never mind */ 10534 rack->r_fast_output = 0; 10535 } 10536 } 10537 } 10538 } 10539 } 10540 10541 10542 /* 10543 * Return value of 1, the TCB is unlocked and most 10544 * likely gone, return value of 0, the TCP is still 10545 * locked. 10546 */ 10547 static int 10548 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10549 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10550 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10551 { 10552 /* 10553 * Update window information. Don't look at window if no ACK: TAC's 10554 * send garbage on first SYN. 10555 */ 10556 int32_t nsegs; 10557 int32_t tfo_syn; 10558 struct tcp_rack *rack; 10559 10560 rack = (struct tcp_rack *)tp->t_fb_ptr; 10561 INP_WLOCK_ASSERT(tp->t_inpcb); 10562 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10563 if ((thflags & TH_ACK) && 10564 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10565 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10566 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10567 /* keep track of pure window updates */ 10568 if (tlen == 0 && 10569 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10570 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10571 tp->snd_wnd = tiwin; 10572 rack_validate_fo_sendwin_up(tp, rack); 10573 tp->snd_wl1 = th->th_seq; 10574 tp->snd_wl2 = th->th_ack; 10575 if (tp->snd_wnd > tp->max_sndwnd) 10576 tp->max_sndwnd = tp->snd_wnd; 10577 rack->r_wanted_output = 1; 10578 } else if (thflags & TH_ACK) { 10579 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10580 tp->snd_wnd = tiwin; 10581 rack_validate_fo_sendwin_up(tp, rack); 10582 tp->snd_wl1 = th->th_seq; 10583 tp->snd_wl2 = th->th_ack; 10584 } 10585 } 10586 if (tp->snd_wnd < ctf_outstanding(tp)) 10587 /* The peer collapsed the window */ 10588 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 10589 else if (rack->rc_has_collapsed) 10590 rack_un_collapse_window(rack, __LINE__); 10591 if ((rack->r_collapse_point_valid) && 10592 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 10593 rack->r_collapse_point_valid = 0; 10594 /* Was persist timer active and now we have window space? */ 10595 if ((rack->rc_in_persist != 0) && 10596 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10597 rack->r_ctl.rc_pace_min_segs))) { 10598 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10599 tp->snd_nxt = tp->snd_max; 10600 /* Make sure we output to start the timer */ 10601 rack->r_wanted_output = 1; 10602 } 10603 /* Do we enter persists? */ 10604 if ((rack->rc_in_persist == 0) && 10605 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10606 TCPS_HAVEESTABLISHED(tp->t_state) && 10607 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10608 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 10609 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 10610 /* 10611 * Here the rwnd is less than 10612 * the pacing size, we are established, 10613 * nothing is outstanding, and there is 10614 * data to send. Enter persists. 10615 */ 10616 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10617 } 10618 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10619 m_freem(m); 10620 return (0); 10621 } 10622 /* 10623 * don't process the URG bit, ignore them drag 10624 * along the up. 10625 */ 10626 tp->rcv_up = tp->rcv_nxt; 10627 INP_WLOCK_ASSERT(tp->t_inpcb); 10628 10629 /* 10630 * Process the segment text, merging it into the TCP sequencing 10631 * queue, and arranging for acknowledgment of receipt if necessary. 10632 * This process logically involves adjusting tp->rcv_wnd as data is 10633 * presented to the user (this happens in tcp_usrreq.c, case 10634 * PRU_RCVD). If a FIN has already been received on this connection 10635 * then we just ignore the text. 10636 */ 10637 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10638 IS_FASTOPEN(tp->t_flags)); 10639 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10640 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10641 tcp_seq save_start = th->th_seq; 10642 tcp_seq save_rnxt = tp->rcv_nxt; 10643 int save_tlen = tlen; 10644 10645 m_adj(m, drop_hdrlen); /* delayed header drop */ 10646 /* 10647 * Insert segment which includes th into TCP reassembly 10648 * queue with control block tp. Set thflags to whether 10649 * reassembly now includes a segment with FIN. This handles 10650 * the common case inline (segment is the next to be 10651 * received on an established connection, and the queue is 10652 * empty), avoiding linkage into and removal from the queue 10653 * and repetition of various conversions. Set DELACK for 10654 * segments received in order, but ack immediately when 10655 * segments are out of order (so fast retransmit can work). 10656 */ 10657 if (th->th_seq == tp->rcv_nxt && 10658 SEGQ_EMPTY(tp) && 10659 (TCPS_HAVEESTABLISHED(tp->t_state) || 10660 tfo_syn)) { 10661 #ifdef NETFLIX_SB_LIMITS 10662 u_int mcnt, appended; 10663 10664 if (so->so_rcv.sb_shlim) { 10665 mcnt = m_memcnt(m); 10666 appended = 0; 10667 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10668 CFO_NOSLEEP, NULL) == false) { 10669 counter_u64_add(tcp_sb_shlim_fails, 1); 10670 m_freem(m); 10671 return (0); 10672 } 10673 } 10674 #endif 10675 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10676 tp->rcv_nxt += tlen; 10677 if (tlen && 10678 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10679 (tp->t_fbyte_in == 0)) { 10680 tp->t_fbyte_in = ticks; 10681 if (tp->t_fbyte_in == 0) 10682 tp->t_fbyte_in = 1; 10683 if (tp->t_fbyte_out && tp->t_fbyte_in) 10684 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10685 } 10686 thflags = tcp_get_flags(th) & TH_FIN; 10687 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10688 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10689 SOCKBUF_LOCK(&so->so_rcv); 10690 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10691 m_freem(m); 10692 } else 10693 #ifdef NETFLIX_SB_LIMITS 10694 appended = 10695 #endif 10696 sbappendstream_locked(&so->so_rcv, m, 0); 10697 10698 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10699 /* NB: sorwakeup_locked() does an implicit unlock. */ 10700 sorwakeup_locked(so); 10701 #ifdef NETFLIX_SB_LIMITS 10702 if (so->so_rcv.sb_shlim && appended != mcnt) 10703 counter_fo_release(so->so_rcv.sb_shlim, 10704 mcnt - appended); 10705 #endif 10706 } else { 10707 /* 10708 * XXX: Due to the header drop above "th" is 10709 * theoretically invalid by now. Fortunately 10710 * m_adj() doesn't actually frees any mbufs when 10711 * trimming from the head. 10712 */ 10713 tcp_seq temp = save_start; 10714 10715 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10716 tp->t_flags |= TF_ACKNOW; 10717 if (tp->t_flags & TF_WAKESOR) { 10718 tp->t_flags &= ~TF_WAKESOR; 10719 /* NB: sorwakeup_locked() does an implicit unlock. */ 10720 sorwakeup_locked(so); 10721 } 10722 } 10723 if ((tp->t_flags & TF_SACK_PERMIT) && 10724 (save_tlen > 0) && 10725 TCPS_HAVEESTABLISHED(tp->t_state)) { 10726 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10727 /* 10728 * DSACK actually handled in the fastpath 10729 * above. 10730 */ 10731 RACK_OPTS_INC(tcp_sack_path_1); 10732 tcp_update_sack_list(tp, save_start, 10733 save_start + save_tlen); 10734 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10735 if ((tp->rcv_numsacks >= 1) && 10736 (tp->sackblks[0].end == save_start)) { 10737 /* 10738 * Partial overlap, recorded at todrop 10739 * above. 10740 */ 10741 RACK_OPTS_INC(tcp_sack_path_2a); 10742 tcp_update_sack_list(tp, 10743 tp->sackblks[0].start, 10744 tp->sackblks[0].end); 10745 } else { 10746 RACK_OPTS_INC(tcp_sack_path_2b); 10747 tcp_update_dsack_list(tp, save_start, 10748 save_start + save_tlen); 10749 } 10750 } else if (tlen >= save_tlen) { 10751 /* Update of sackblks. */ 10752 RACK_OPTS_INC(tcp_sack_path_3); 10753 tcp_update_dsack_list(tp, save_start, 10754 save_start + save_tlen); 10755 } else if (tlen > 0) { 10756 RACK_OPTS_INC(tcp_sack_path_4); 10757 tcp_update_dsack_list(tp, save_start, 10758 save_start + tlen); 10759 } 10760 } 10761 } else { 10762 m_freem(m); 10763 thflags &= ~TH_FIN; 10764 } 10765 10766 /* 10767 * If FIN is received ACK the FIN and let the user know that the 10768 * connection is closing. 10769 */ 10770 if (thflags & TH_FIN) { 10771 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10772 /* The socket upcall is handled by socantrcvmore. */ 10773 socantrcvmore(so); 10774 /* 10775 * If connection is half-synchronized (ie NEEDSYN 10776 * flag on) then delay ACK, so it may be piggybacked 10777 * when SYN is sent. Otherwise, since we received a 10778 * FIN then no more input can be expected, send ACK 10779 * now. 10780 */ 10781 if (tp->t_flags & TF_NEEDSYN) { 10782 rack_timer_cancel(tp, rack, 10783 rack->r_ctl.rc_rcvtime, __LINE__); 10784 tp->t_flags |= TF_DELACK; 10785 } else { 10786 tp->t_flags |= TF_ACKNOW; 10787 } 10788 tp->rcv_nxt++; 10789 } 10790 switch (tp->t_state) { 10791 /* 10792 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10793 * CLOSE_WAIT state. 10794 */ 10795 case TCPS_SYN_RECEIVED: 10796 tp->t_starttime = ticks; 10797 /* FALLTHROUGH */ 10798 case TCPS_ESTABLISHED: 10799 rack_timer_cancel(tp, rack, 10800 rack->r_ctl.rc_rcvtime, __LINE__); 10801 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10802 break; 10803 10804 /* 10805 * If still in FIN_WAIT_1 STATE FIN has not been 10806 * acked so enter the CLOSING state. 10807 */ 10808 case TCPS_FIN_WAIT_1: 10809 rack_timer_cancel(tp, rack, 10810 rack->r_ctl.rc_rcvtime, __LINE__); 10811 tcp_state_change(tp, TCPS_CLOSING); 10812 break; 10813 10814 /* 10815 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10816 * starting the time-wait timer, turning off the 10817 * other standard timers. 10818 */ 10819 case TCPS_FIN_WAIT_2: 10820 rack_timer_cancel(tp, rack, 10821 rack->r_ctl.rc_rcvtime, __LINE__); 10822 tcp_twstart(tp); 10823 return (1); 10824 } 10825 } 10826 /* 10827 * Return any desired output. 10828 */ 10829 if ((tp->t_flags & TF_ACKNOW) || 10830 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10831 rack->r_wanted_output = 1; 10832 } 10833 INP_WLOCK_ASSERT(tp->t_inpcb); 10834 return (0); 10835 } 10836 10837 /* 10838 * Here nothing is really faster, its just that we 10839 * have broken out the fast-data path also just like 10840 * the fast-ack. 10841 */ 10842 static int 10843 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10844 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10845 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10846 { 10847 int32_t nsegs; 10848 int32_t newsize = 0; /* automatic sockbuf scaling */ 10849 struct tcp_rack *rack; 10850 #ifdef NETFLIX_SB_LIMITS 10851 u_int mcnt, appended; 10852 #endif 10853 #ifdef TCPDEBUG 10854 /* 10855 * The size of tcp_saveipgen must be the size of the max ip header, 10856 * now IPv6. 10857 */ 10858 u_char tcp_saveipgen[IP6_HDR_LEN]; 10859 struct tcphdr tcp_savetcp; 10860 short ostate = 0; 10861 10862 #endif 10863 /* 10864 * If last ACK falls within this segment's sequence numbers, record 10865 * the timestamp. NOTE that the test is modified according to the 10866 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10867 */ 10868 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10869 return (0); 10870 } 10871 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10872 return (0); 10873 } 10874 if (tiwin && tiwin != tp->snd_wnd) { 10875 return (0); 10876 } 10877 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10878 return (0); 10879 } 10880 if (__predict_false((to->to_flags & TOF_TS) && 10881 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10882 return (0); 10883 } 10884 if (__predict_false((th->th_ack != tp->snd_una))) { 10885 return (0); 10886 } 10887 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10888 return (0); 10889 } 10890 if ((to->to_flags & TOF_TS) != 0 && 10891 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10892 tp->ts_recent_age = tcp_ts_getticks(); 10893 tp->ts_recent = to->to_tsval; 10894 } 10895 rack = (struct tcp_rack *)tp->t_fb_ptr; 10896 /* 10897 * This is a pure, in-sequence data packet with nothing on the 10898 * reassembly queue and we have enough buffer space to take it. 10899 */ 10900 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10901 10902 #ifdef NETFLIX_SB_LIMITS 10903 if (so->so_rcv.sb_shlim) { 10904 mcnt = m_memcnt(m); 10905 appended = 0; 10906 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10907 CFO_NOSLEEP, NULL) == false) { 10908 counter_u64_add(tcp_sb_shlim_fails, 1); 10909 m_freem(m); 10910 return (1); 10911 } 10912 } 10913 #endif 10914 /* Clean receiver SACK report if present */ 10915 if (tp->rcv_numsacks) 10916 tcp_clean_sackreport(tp); 10917 KMOD_TCPSTAT_INC(tcps_preddat); 10918 tp->rcv_nxt += tlen; 10919 if (tlen && 10920 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10921 (tp->t_fbyte_in == 0)) { 10922 tp->t_fbyte_in = ticks; 10923 if (tp->t_fbyte_in == 0) 10924 tp->t_fbyte_in = 1; 10925 if (tp->t_fbyte_out && tp->t_fbyte_in) 10926 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10927 } 10928 /* 10929 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10930 */ 10931 tp->snd_wl1 = th->th_seq; 10932 /* 10933 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10934 */ 10935 tp->rcv_up = tp->rcv_nxt; 10936 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10937 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10938 #ifdef TCPDEBUG 10939 if (so->so_options & SO_DEBUG) 10940 tcp_trace(TA_INPUT, ostate, tp, 10941 (void *)tcp_saveipgen, &tcp_savetcp, 0); 10942 #endif 10943 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10944 10945 /* Add data to socket buffer. */ 10946 SOCKBUF_LOCK(&so->so_rcv); 10947 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10948 m_freem(m); 10949 } else { 10950 /* 10951 * Set new socket buffer size. Give up when limit is 10952 * reached. 10953 */ 10954 if (newsize) 10955 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 10956 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10957 m_adj(m, drop_hdrlen); /* delayed header drop */ 10958 #ifdef NETFLIX_SB_LIMITS 10959 appended = 10960 #endif 10961 sbappendstream_locked(&so->so_rcv, m, 0); 10962 ctf_calc_rwin(so, tp); 10963 } 10964 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10965 /* NB: sorwakeup_locked() does an implicit unlock. */ 10966 sorwakeup_locked(so); 10967 #ifdef NETFLIX_SB_LIMITS 10968 if (so->so_rcv.sb_shlim && mcnt != appended) 10969 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10970 #endif 10971 rack_handle_delayed_ack(tp, rack, tlen, 0); 10972 if (tp->snd_una == tp->snd_max) 10973 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10974 return (1); 10975 } 10976 10977 /* 10978 * This subfunction is used to try to highly optimize the 10979 * fast path. We again allow window updates that are 10980 * in sequence to remain in the fast-path. We also add 10981 * in the __predict's to attempt to help the compiler. 10982 * Note that if we return a 0, then we can *not* process 10983 * it and the caller should push the packet into the 10984 * slow-path. 10985 */ 10986 static int 10987 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10988 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10989 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10990 { 10991 int32_t acked; 10992 int32_t nsegs; 10993 #ifdef TCPDEBUG 10994 /* 10995 * The size of tcp_saveipgen must be the size of the max ip header, 10996 * now IPv6. 10997 */ 10998 u_char tcp_saveipgen[IP6_HDR_LEN]; 10999 struct tcphdr tcp_savetcp; 11000 short ostate = 0; 11001 #endif 11002 int32_t under_pacing = 0; 11003 struct tcp_rack *rack; 11004 11005 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 11006 /* Old ack, behind (or duplicate to) the last one rcv'd */ 11007 return (0); 11008 } 11009 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 11010 /* Above what we have sent? */ 11011 return (0); 11012 } 11013 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 11014 /* We are retransmitting */ 11015 return (0); 11016 } 11017 if (__predict_false(tiwin == 0)) { 11018 /* zero window */ 11019 return (0); 11020 } 11021 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 11022 /* We need a SYN or a FIN, unlikely.. */ 11023 return (0); 11024 } 11025 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 11026 /* Timestamp is behind .. old ack with seq wrap? */ 11027 return (0); 11028 } 11029 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 11030 /* Still recovering */ 11031 return (0); 11032 } 11033 rack = (struct tcp_rack *)tp->t_fb_ptr; 11034 if (rack->r_ctl.rc_sacked) { 11035 /* We have sack holes on our scoreboard */ 11036 return (0); 11037 } 11038 /* Ok if we reach here, we can process a fast-ack */ 11039 if (rack->gp_ready && 11040 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11041 under_pacing = 1; 11042 } 11043 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11044 rack_log_ack(tp, to, th, 0, 0); 11045 /* Did the window get updated? */ 11046 if (tiwin != tp->snd_wnd) { 11047 tp->snd_wnd = tiwin; 11048 rack_validate_fo_sendwin_up(tp, rack); 11049 tp->snd_wl1 = th->th_seq; 11050 if (tp->snd_wnd > tp->max_sndwnd) 11051 tp->max_sndwnd = tp->snd_wnd; 11052 } 11053 /* Do we exit persists? */ 11054 if ((rack->rc_in_persist != 0) && 11055 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 11056 rack->r_ctl.rc_pace_min_segs))) { 11057 rack_exit_persist(tp, rack, cts); 11058 } 11059 /* Do we enter persists? */ 11060 if ((rack->rc_in_persist == 0) && 11061 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 11062 TCPS_HAVEESTABLISHED(tp->t_state) && 11063 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 11064 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 11065 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 11066 /* 11067 * Here the rwnd is less than 11068 * the pacing size, we are established, 11069 * nothing is outstanding, and there is 11070 * data to send. Enter persists. 11071 */ 11072 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 11073 } 11074 /* 11075 * If last ACK falls within this segment's sequence numbers, record 11076 * the timestamp. NOTE that the test is modified according to the 11077 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 11078 */ 11079 if ((to->to_flags & TOF_TS) != 0 && 11080 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 11081 tp->ts_recent_age = tcp_ts_getticks(); 11082 tp->ts_recent = to->to_tsval; 11083 } 11084 /* 11085 * This is a pure ack for outstanding data. 11086 */ 11087 KMOD_TCPSTAT_INC(tcps_predack); 11088 11089 /* 11090 * "bad retransmit" recovery. 11091 */ 11092 if ((tp->t_flags & TF_PREVVALID) && 11093 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 11094 tp->t_flags &= ~TF_PREVVALID; 11095 if (tp->t_rxtshift == 1 && 11096 (int)(ticks - tp->t_badrxtwin) < 0) 11097 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 11098 } 11099 /* 11100 * Recalculate the transmit timer / rtt. 11101 * 11102 * Some boxes send broken timestamp replies during the SYN+ACK 11103 * phase, ignore timestamps of 0 or we could calculate a huge RTT 11104 * and blow up the retransmit timer. 11105 */ 11106 acked = BYTES_THIS_ACK(tp, th); 11107 11108 #ifdef TCP_HHOOK 11109 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11110 hhook_run_tcp_est_in(tp, th, to); 11111 #endif 11112 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11113 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11114 if (acked) { 11115 struct mbuf *mfree; 11116 11117 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11118 SOCKBUF_LOCK(&so->so_snd); 11119 mfree = sbcut_locked(&so->so_snd, acked); 11120 tp->snd_una = th->th_ack; 11121 /* Note we want to hold the sb lock through the sendmap adjust */ 11122 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11123 /* Wake up the socket if we have room to write more */ 11124 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11125 sowwakeup_locked(so); 11126 m_freem(mfree); 11127 tp->t_rxtshift = 0; 11128 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11129 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11130 rack->rc_tlp_in_progress = 0; 11131 rack->r_ctl.rc_tlp_cnt_out = 0; 11132 /* 11133 * If it is the RXT timer we want to 11134 * stop it, so we can restart a TLP. 11135 */ 11136 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11137 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11138 #ifdef NETFLIX_HTTP_LOGGING 11139 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11140 #endif 11141 } 11142 /* 11143 * Let the congestion control algorithm update congestion control 11144 * related information. This typically means increasing the 11145 * congestion window. 11146 */ 11147 if (tp->snd_wnd < ctf_outstanding(tp)) { 11148 /* The peer collapsed the window */ 11149 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 11150 } else if (rack->rc_has_collapsed) 11151 rack_un_collapse_window(rack, __LINE__); 11152 if ((rack->r_collapse_point_valid) && 11153 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 11154 rack->r_collapse_point_valid = 0; 11155 /* 11156 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11157 */ 11158 tp->snd_wl2 = th->th_ack; 11159 tp->t_dupacks = 0; 11160 m_freem(m); 11161 /* ND6_HINT(tp); *//* Some progress has been made. */ 11162 11163 /* 11164 * If all outstanding data are acked, stop retransmit timer, 11165 * otherwise restart timer using current (possibly backed-off) 11166 * value. If process is waiting for space, wakeup/selwakeup/signal. 11167 * If data are ready to send, let tcp_output decide between more 11168 * output or persist. 11169 */ 11170 #ifdef TCPDEBUG 11171 if (so->so_options & SO_DEBUG) 11172 tcp_trace(TA_INPUT, ostate, tp, 11173 (void *)tcp_saveipgen, 11174 &tcp_savetcp, 0); 11175 #endif 11176 if (under_pacing && 11177 (rack->use_fixed_rate == 0) && 11178 (rack->in_probe_rtt == 0) && 11179 rack->rc_gp_dyn_mul && 11180 rack->rc_always_pace) { 11181 /* Check if we are dragging bottom */ 11182 rack_check_bottom_drag(tp, rack, so, acked); 11183 } 11184 if (tp->snd_una == tp->snd_max) { 11185 tp->t_flags &= ~TF_PREVVALID; 11186 rack->r_ctl.retran_during_recovery = 0; 11187 rack->r_ctl.dsack_byte_cnt = 0; 11188 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11189 if (rack->r_ctl.rc_went_idle_time == 0) 11190 rack->r_ctl.rc_went_idle_time = 1; 11191 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11192 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 11193 tp->t_acktime = 0; 11194 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11195 } 11196 if (acked && rack->r_fast_output) 11197 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11198 if (sbavail(&so->so_snd)) { 11199 rack->r_wanted_output = 1; 11200 } 11201 return (1); 11202 } 11203 11204 /* 11205 * Return value of 1, the TCB is unlocked and most 11206 * likely gone, return value of 0, the TCP is still 11207 * locked. 11208 */ 11209 static int 11210 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11211 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11212 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11213 { 11214 int32_t ret_val = 0; 11215 int32_t todrop; 11216 int32_t ourfinisacked = 0; 11217 struct tcp_rack *rack; 11218 11219 ctf_calc_rwin(so, tp); 11220 /* 11221 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11222 * SYN, drop the input. if seg contains a RST, then drop the 11223 * connection. if seg does not contain SYN, then drop it. Otherwise 11224 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11225 * tp->irs if seg contains ack then advance tp->snd_una if seg 11226 * contains an ECE and ECN support is enabled, the stream is ECN 11227 * capable. if SYN has been acked change to ESTABLISHED else 11228 * SYN_RCVD state arrange for segment to be acked (eventually) 11229 * continue processing rest of data/controls. 11230 */ 11231 if ((thflags & TH_ACK) && 11232 (SEQ_LEQ(th->th_ack, tp->iss) || 11233 SEQ_GT(th->th_ack, tp->snd_max))) { 11234 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11235 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11236 return (1); 11237 } 11238 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11239 TCP_PROBE5(connect__refused, NULL, tp, 11240 mtod(m, const char *), tp, th); 11241 tp = tcp_drop(tp, ECONNREFUSED); 11242 ctf_do_drop(m, tp); 11243 return (1); 11244 } 11245 if (thflags & TH_RST) { 11246 ctf_do_drop(m, tp); 11247 return (1); 11248 } 11249 if (!(thflags & TH_SYN)) { 11250 ctf_do_drop(m, tp); 11251 return (1); 11252 } 11253 tp->irs = th->th_seq; 11254 tcp_rcvseqinit(tp); 11255 rack = (struct tcp_rack *)tp->t_fb_ptr; 11256 if (thflags & TH_ACK) { 11257 int tfo_partial = 0; 11258 11259 KMOD_TCPSTAT_INC(tcps_connects); 11260 soisconnected(so); 11261 #ifdef MAC 11262 mac_socketpeer_set_from_mbuf(m, so); 11263 #endif 11264 /* Do window scaling on this connection? */ 11265 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11266 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11267 tp->rcv_scale = tp->request_r_scale; 11268 } 11269 tp->rcv_adv += min(tp->rcv_wnd, 11270 TCP_MAXWIN << tp->rcv_scale); 11271 /* 11272 * If not all the data that was sent in the TFO SYN 11273 * has been acked, resend the remainder right away. 11274 */ 11275 if (IS_FASTOPEN(tp->t_flags) && 11276 (tp->snd_una != tp->snd_max)) { 11277 tp->snd_nxt = th->th_ack; 11278 tfo_partial = 1; 11279 } 11280 /* 11281 * If there's data, delay ACK; if there's also a FIN ACKNOW 11282 * will be turned on later. 11283 */ 11284 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11285 rack_timer_cancel(tp, rack, 11286 rack->r_ctl.rc_rcvtime, __LINE__); 11287 tp->t_flags |= TF_DELACK; 11288 } else { 11289 rack->r_wanted_output = 1; 11290 tp->t_flags |= TF_ACKNOW; 11291 rack->rc_dack_toggle = 0; 11292 } 11293 11294 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11295 11296 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11297 /* 11298 * We advance snd_una for the 11299 * fast open case. If th_ack is 11300 * acknowledging data beyond 11301 * snd_una we can't just call 11302 * ack-processing since the 11303 * data stream in our send-map 11304 * will start at snd_una + 1 (one 11305 * beyond the SYN). If its just 11306 * equal we don't need to do that 11307 * and there is no send_map. 11308 */ 11309 tp->snd_una++; 11310 } 11311 /* 11312 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11313 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11314 */ 11315 tp->t_starttime = ticks; 11316 if (tp->t_flags & TF_NEEDFIN) { 11317 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11318 tp->t_flags &= ~TF_NEEDFIN; 11319 thflags &= ~TH_SYN; 11320 } else { 11321 tcp_state_change(tp, TCPS_ESTABLISHED); 11322 TCP_PROBE5(connect__established, NULL, tp, 11323 mtod(m, const char *), tp, th); 11324 rack_cc_conn_init(tp); 11325 } 11326 } else { 11327 /* 11328 * Received initial SYN in SYN-SENT[*] state => simultaneous 11329 * open. If segment contains CC option and there is a 11330 * cached CC, apply TAO test. If it succeeds, connection is * 11331 * half-synchronized. Otherwise, do 3-way handshake: 11332 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11333 * there was no CC option, clear cached CC value. 11334 */ 11335 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 11336 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11337 } 11338 INP_WLOCK_ASSERT(tp->t_inpcb); 11339 /* 11340 * Advance th->th_seq to correspond to first data byte. If data, 11341 * trim to stay within window, dropping FIN if necessary. 11342 */ 11343 th->th_seq++; 11344 if (tlen > tp->rcv_wnd) { 11345 todrop = tlen - tp->rcv_wnd; 11346 m_adj(m, -todrop); 11347 tlen = tp->rcv_wnd; 11348 thflags &= ~TH_FIN; 11349 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11350 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11351 } 11352 tp->snd_wl1 = th->th_seq - 1; 11353 tp->rcv_up = th->th_seq; 11354 /* 11355 * Client side of transaction: already sent SYN and data. If the 11356 * remote host used T/TCP to validate the SYN, our data will be 11357 * ACK'd; if so, enter normal data segment processing in the middle 11358 * of step 5, ack processing. Otherwise, goto step 6. 11359 */ 11360 if (thflags & TH_ACK) { 11361 /* For syn-sent we need to possibly update the rtt */ 11362 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11363 uint32_t t, mcts; 11364 11365 mcts = tcp_ts_getticks(); 11366 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11367 if (!tp->t_rttlow || tp->t_rttlow > t) 11368 tp->t_rttlow = t; 11369 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11370 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11371 tcp_rack_xmit_timer_commit(rack, tp); 11372 } 11373 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11374 return (ret_val); 11375 /* We may have changed to FIN_WAIT_1 above */ 11376 if (tp->t_state == TCPS_FIN_WAIT_1) { 11377 /* 11378 * In FIN_WAIT_1 STATE in addition to the processing 11379 * for the ESTABLISHED state if our FIN is now 11380 * acknowledged then enter FIN_WAIT_2. 11381 */ 11382 if (ourfinisacked) { 11383 /* 11384 * If we can't receive any more data, then 11385 * closing user can proceed. Starting the 11386 * timer is contrary to the specification, 11387 * but if we don't get a FIN we'll hang 11388 * forever. 11389 * 11390 * XXXjl: we should release the tp also, and 11391 * use a compressed state. 11392 */ 11393 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11394 soisdisconnected(so); 11395 tcp_timer_activate(tp, TT_2MSL, 11396 (tcp_fast_finwait2_recycle ? 11397 tcp_finwait2_timeout : 11398 TP_MAXIDLE(tp))); 11399 } 11400 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11401 } 11402 } 11403 } 11404 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11405 tiwin, thflags, nxt_pkt)); 11406 } 11407 11408 /* 11409 * Return value of 1, the TCB is unlocked and most 11410 * likely gone, return value of 0, the TCP is still 11411 * locked. 11412 */ 11413 static int 11414 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11415 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11416 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11417 { 11418 struct tcp_rack *rack; 11419 int32_t ret_val = 0; 11420 int32_t ourfinisacked = 0; 11421 11422 ctf_calc_rwin(so, tp); 11423 if ((thflags & TH_ACK) && 11424 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11425 SEQ_GT(th->th_ack, tp->snd_max))) { 11426 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11427 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11428 return (1); 11429 } 11430 rack = (struct tcp_rack *)tp->t_fb_ptr; 11431 if (IS_FASTOPEN(tp->t_flags)) { 11432 /* 11433 * When a TFO connection is in SYN_RECEIVED, the 11434 * only valid packets are the initial SYN, a 11435 * retransmit/copy of the initial SYN (possibly with 11436 * a subset of the original data), a valid ACK, a 11437 * FIN, or a RST. 11438 */ 11439 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11440 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11441 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11442 return (1); 11443 } else if (thflags & TH_SYN) { 11444 /* non-initial SYN is ignored */ 11445 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11446 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11447 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11448 ctf_do_drop(m, NULL); 11449 return (0); 11450 } 11451 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11452 ctf_do_drop(m, NULL); 11453 return (0); 11454 } 11455 } 11456 11457 if ((thflags & TH_RST) || 11458 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11459 return (__ctf_process_rst(m, th, so, tp, 11460 &rack->r_ctl.challenge_ack_ts, 11461 &rack->r_ctl.challenge_ack_cnt)); 11462 /* 11463 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11464 * it's less than ts_recent, drop it. 11465 */ 11466 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11467 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11468 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11469 return (ret_val); 11470 } 11471 /* 11472 * In the SYN-RECEIVED state, validate that the packet belongs to 11473 * this connection before trimming the data to fit the receive 11474 * window. Check the sequence number versus IRS since we know the 11475 * sequence numbers haven't wrapped. This is a partial fix for the 11476 * "LAND" DoS attack. 11477 */ 11478 if (SEQ_LT(th->th_seq, tp->irs)) { 11479 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11480 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11481 return (1); 11482 } 11483 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11484 &rack->r_ctl.challenge_ack_ts, 11485 &rack->r_ctl.challenge_ack_cnt)) { 11486 return (ret_val); 11487 } 11488 /* 11489 * If last ACK falls within this segment's sequence numbers, record 11490 * its timestamp. NOTE: 1) That the test incorporates suggestions 11491 * from the latest proposal of the tcplw@cray.com list (Braden 11492 * 1993/04/26). 2) That updating only on newer timestamps interferes 11493 * with our earlier PAWS tests, so this check should be solely 11494 * predicated on the sequence space of this segment. 3) That we 11495 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11496 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11497 * SEG.Len, This modified check allows us to overcome RFC1323's 11498 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11499 * p.869. In such cases, we can still calculate the RTT correctly 11500 * when RCV.NXT == Last.ACK.Sent. 11501 */ 11502 if ((to->to_flags & TOF_TS) != 0 && 11503 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11504 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11505 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11506 tp->ts_recent_age = tcp_ts_getticks(); 11507 tp->ts_recent = to->to_tsval; 11508 } 11509 tp->snd_wnd = tiwin; 11510 rack_validate_fo_sendwin_up(tp, rack); 11511 /* 11512 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11513 * is on (half-synchronized state), then queue data for later 11514 * processing; else drop segment and return. 11515 */ 11516 if ((thflags & TH_ACK) == 0) { 11517 if (IS_FASTOPEN(tp->t_flags)) { 11518 rack_cc_conn_init(tp); 11519 } 11520 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11521 tiwin, thflags, nxt_pkt)); 11522 } 11523 KMOD_TCPSTAT_INC(tcps_connects); 11524 if (tp->t_flags & TF_SONOTCONN) { 11525 tp->t_flags &= ~TF_SONOTCONN; 11526 soisconnected(so); 11527 } 11528 /* Do window scaling? */ 11529 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11530 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11531 tp->rcv_scale = tp->request_r_scale; 11532 } 11533 /* 11534 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11535 * FIN-WAIT-1 11536 */ 11537 tp->t_starttime = ticks; 11538 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11539 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11540 tp->t_tfo_pending = NULL; 11541 } 11542 if (tp->t_flags & TF_NEEDFIN) { 11543 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11544 tp->t_flags &= ~TF_NEEDFIN; 11545 } else { 11546 tcp_state_change(tp, TCPS_ESTABLISHED); 11547 TCP_PROBE5(accept__established, NULL, tp, 11548 mtod(m, const char *), tp, th); 11549 /* 11550 * TFO connections call cc_conn_init() during SYN 11551 * processing. Calling it again here for such connections 11552 * is not harmless as it would undo the snd_cwnd reduction 11553 * that occurs when a TFO SYN|ACK is retransmitted. 11554 */ 11555 if (!IS_FASTOPEN(tp->t_flags)) 11556 rack_cc_conn_init(tp); 11557 } 11558 /* 11559 * Account for the ACK of our SYN prior to 11560 * regular ACK processing below, except for 11561 * simultaneous SYN, which is handled later. 11562 */ 11563 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11564 tp->snd_una++; 11565 /* 11566 * If segment contains data or ACK, will call tcp_reass() later; if 11567 * not, do so now to pass queued data to user. 11568 */ 11569 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11570 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11571 (struct mbuf *)0); 11572 if (tp->t_flags & TF_WAKESOR) { 11573 tp->t_flags &= ~TF_WAKESOR; 11574 /* NB: sorwakeup_locked() does an implicit unlock. */ 11575 sorwakeup_locked(so); 11576 } 11577 } 11578 tp->snd_wl1 = th->th_seq - 1; 11579 /* For syn-recv we need to possibly update the rtt */ 11580 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11581 uint32_t t, mcts; 11582 11583 mcts = tcp_ts_getticks(); 11584 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11585 if (!tp->t_rttlow || tp->t_rttlow > t) 11586 tp->t_rttlow = t; 11587 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11588 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11589 tcp_rack_xmit_timer_commit(rack, tp); 11590 } 11591 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11592 return (ret_val); 11593 } 11594 if (tp->t_state == TCPS_FIN_WAIT_1) { 11595 /* We could have went to FIN_WAIT_1 (or EST) above */ 11596 /* 11597 * In FIN_WAIT_1 STATE in addition to the processing for the 11598 * ESTABLISHED state if our FIN is now acknowledged then 11599 * enter FIN_WAIT_2. 11600 */ 11601 if (ourfinisacked) { 11602 /* 11603 * If we can't receive any more data, then closing 11604 * user can proceed. Starting the timer is contrary 11605 * to the specification, but if we don't get a FIN 11606 * we'll hang forever. 11607 * 11608 * XXXjl: we should release the tp also, and use a 11609 * compressed state. 11610 */ 11611 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11612 soisdisconnected(so); 11613 tcp_timer_activate(tp, TT_2MSL, 11614 (tcp_fast_finwait2_recycle ? 11615 tcp_finwait2_timeout : 11616 TP_MAXIDLE(tp))); 11617 } 11618 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11619 } 11620 } 11621 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11622 tiwin, thflags, nxt_pkt)); 11623 } 11624 11625 /* 11626 * Return value of 1, the TCB is unlocked and most 11627 * likely gone, return value of 0, the TCP is still 11628 * locked. 11629 */ 11630 static int 11631 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11632 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11633 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11634 { 11635 int32_t ret_val = 0; 11636 struct tcp_rack *rack; 11637 11638 /* 11639 * Header prediction: check for the two common cases of a 11640 * uni-directional data xfer. If the packet has no control flags, 11641 * is in-sequence, the window didn't change and we're not 11642 * retransmitting, it's a candidate. If the length is zero and the 11643 * ack moved forward, we're the sender side of the xfer. Just free 11644 * the data acked & wake any higher level process that was blocked 11645 * waiting for space. If the length is non-zero and the ack didn't 11646 * move, we're the receiver side. If we're getting packets in-order 11647 * (the reassembly queue is empty), add the data toc The socket 11648 * buffer and note that we need a delayed ack. Make sure that the 11649 * hidden state-flags are also off. Since we check for 11650 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11651 */ 11652 rack = (struct tcp_rack *)tp->t_fb_ptr; 11653 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11654 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11655 __predict_true(SEGQ_EMPTY(tp)) && 11656 __predict_true(th->th_seq == tp->rcv_nxt)) { 11657 if (tlen == 0) { 11658 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11659 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11660 return (0); 11661 } 11662 } else { 11663 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11664 tiwin, nxt_pkt, iptos)) { 11665 return (0); 11666 } 11667 } 11668 } 11669 ctf_calc_rwin(so, tp); 11670 11671 if ((thflags & TH_RST) || 11672 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11673 return (__ctf_process_rst(m, th, so, tp, 11674 &rack->r_ctl.challenge_ack_ts, 11675 &rack->r_ctl.challenge_ack_cnt)); 11676 11677 /* 11678 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11679 * synchronized state. 11680 */ 11681 if (thflags & TH_SYN) { 11682 ctf_challenge_ack(m, th, tp, &ret_val); 11683 return (ret_val); 11684 } 11685 /* 11686 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11687 * it's less than ts_recent, drop it. 11688 */ 11689 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11690 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11691 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11692 return (ret_val); 11693 } 11694 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11695 &rack->r_ctl.challenge_ack_ts, 11696 &rack->r_ctl.challenge_ack_cnt)) { 11697 return (ret_val); 11698 } 11699 /* 11700 * If last ACK falls within this segment's sequence numbers, record 11701 * its timestamp. NOTE: 1) That the test incorporates suggestions 11702 * from the latest proposal of the tcplw@cray.com list (Braden 11703 * 1993/04/26). 2) That updating only on newer timestamps interferes 11704 * with our earlier PAWS tests, so this check should be solely 11705 * predicated on the sequence space of this segment. 3) That we 11706 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11707 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11708 * SEG.Len, This modified check allows us to overcome RFC1323's 11709 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11710 * p.869. In such cases, we can still calculate the RTT correctly 11711 * when RCV.NXT == Last.ACK.Sent. 11712 */ 11713 if ((to->to_flags & TOF_TS) != 0 && 11714 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11715 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11716 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11717 tp->ts_recent_age = tcp_ts_getticks(); 11718 tp->ts_recent = to->to_tsval; 11719 } 11720 /* 11721 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11722 * is on (half-synchronized state), then queue data for later 11723 * processing; else drop segment and return. 11724 */ 11725 if ((thflags & TH_ACK) == 0) { 11726 if (tp->t_flags & TF_NEEDSYN) { 11727 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11728 tiwin, thflags, nxt_pkt)); 11729 11730 } else if (tp->t_flags & TF_ACKNOW) { 11731 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11732 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11733 return (ret_val); 11734 } else { 11735 ctf_do_drop(m, NULL); 11736 return (0); 11737 } 11738 } 11739 /* 11740 * Ack processing. 11741 */ 11742 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11743 return (ret_val); 11744 } 11745 if (sbavail(&so->so_snd)) { 11746 if (ctf_progress_timeout_check(tp, true)) { 11747 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11748 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11749 return (1); 11750 } 11751 } 11752 /* State changes only happen in rack_process_data() */ 11753 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11754 tiwin, thflags, nxt_pkt)); 11755 } 11756 11757 /* 11758 * Return value of 1, the TCB is unlocked and most 11759 * likely gone, return value of 0, the TCP is still 11760 * locked. 11761 */ 11762 static int 11763 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11764 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11765 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11766 { 11767 int32_t ret_val = 0; 11768 struct tcp_rack *rack; 11769 11770 rack = (struct tcp_rack *)tp->t_fb_ptr; 11771 ctf_calc_rwin(so, tp); 11772 if ((thflags & TH_RST) || 11773 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11774 return (__ctf_process_rst(m, th, so, tp, 11775 &rack->r_ctl.challenge_ack_ts, 11776 &rack->r_ctl.challenge_ack_cnt)); 11777 /* 11778 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11779 * synchronized state. 11780 */ 11781 if (thflags & TH_SYN) { 11782 ctf_challenge_ack(m, th, tp, &ret_val); 11783 return (ret_val); 11784 } 11785 /* 11786 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11787 * it's less than ts_recent, drop it. 11788 */ 11789 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11790 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11791 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11792 return (ret_val); 11793 } 11794 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11795 &rack->r_ctl.challenge_ack_ts, 11796 &rack->r_ctl.challenge_ack_cnt)) { 11797 return (ret_val); 11798 } 11799 /* 11800 * If last ACK falls within this segment's sequence numbers, record 11801 * its timestamp. NOTE: 1) That the test incorporates suggestions 11802 * from the latest proposal of the tcplw@cray.com list (Braden 11803 * 1993/04/26). 2) That updating only on newer timestamps interferes 11804 * with our earlier PAWS tests, so this check should be solely 11805 * predicated on the sequence space of this segment. 3) That we 11806 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11807 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11808 * SEG.Len, This modified check allows us to overcome RFC1323's 11809 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11810 * p.869. In such cases, we can still calculate the RTT correctly 11811 * when RCV.NXT == Last.ACK.Sent. 11812 */ 11813 if ((to->to_flags & TOF_TS) != 0 && 11814 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11815 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11816 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11817 tp->ts_recent_age = tcp_ts_getticks(); 11818 tp->ts_recent = to->to_tsval; 11819 } 11820 /* 11821 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11822 * is on (half-synchronized state), then queue data for later 11823 * processing; else drop segment and return. 11824 */ 11825 if ((thflags & TH_ACK) == 0) { 11826 if (tp->t_flags & TF_NEEDSYN) { 11827 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11828 tiwin, thflags, nxt_pkt)); 11829 11830 } else if (tp->t_flags & TF_ACKNOW) { 11831 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11832 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11833 return (ret_val); 11834 } else { 11835 ctf_do_drop(m, NULL); 11836 return (0); 11837 } 11838 } 11839 /* 11840 * Ack processing. 11841 */ 11842 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11843 return (ret_val); 11844 } 11845 if (sbavail(&so->so_snd)) { 11846 if (ctf_progress_timeout_check(tp, true)) { 11847 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11848 tp, tick, PROGRESS_DROP, __LINE__); 11849 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11850 return (1); 11851 } 11852 } 11853 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11854 tiwin, thflags, nxt_pkt)); 11855 } 11856 11857 static int 11858 rack_check_data_after_close(struct mbuf *m, 11859 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11860 { 11861 struct tcp_rack *rack; 11862 11863 rack = (struct tcp_rack *)tp->t_fb_ptr; 11864 if (rack->rc_allow_data_af_clo == 0) { 11865 close_now: 11866 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11867 /* tcp_close will kill the inp pre-log the Reset */ 11868 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11869 tp = tcp_close(tp); 11870 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11871 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11872 return (1); 11873 } 11874 if (sbavail(&so->so_snd) == 0) 11875 goto close_now; 11876 /* Ok we allow data that is ignored and a followup reset */ 11877 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11878 tp->rcv_nxt = th->th_seq + *tlen; 11879 tp->t_flags2 |= TF2_DROP_AF_DATA; 11880 rack->r_wanted_output = 1; 11881 *tlen = 0; 11882 return (0); 11883 } 11884 11885 /* 11886 * Return value of 1, the TCB is unlocked and most 11887 * likely gone, return value of 0, the TCP is still 11888 * locked. 11889 */ 11890 static int 11891 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11892 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11893 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11894 { 11895 int32_t ret_val = 0; 11896 int32_t ourfinisacked = 0; 11897 struct tcp_rack *rack; 11898 11899 rack = (struct tcp_rack *)tp->t_fb_ptr; 11900 ctf_calc_rwin(so, tp); 11901 11902 if ((thflags & TH_RST) || 11903 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11904 return (__ctf_process_rst(m, th, so, tp, 11905 &rack->r_ctl.challenge_ack_ts, 11906 &rack->r_ctl.challenge_ack_cnt)); 11907 /* 11908 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11909 * synchronized state. 11910 */ 11911 if (thflags & TH_SYN) { 11912 ctf_challenge_ack(m, th, tp, &ret_val); 11913 return (ret_val); 11914 } 11915 /* 11916 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11917 * it's less than ts_recent, drop it. 11918 */ 11919 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11920 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11921 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11922 return (ret_val); 11923 } 11924 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11925 &rack->r_ctl.challenge_ack_ts, 11926 &rack->r_ctl.challenge_ack_cnt)) { 11927 return (ret_val); 11928 } 11929 /* 11930 * If new data are received on a connection after the user processes 11931 * are gone, then RST the other end. 11932 */ 11933 if ((tp->t_flags & TF_CLOSED) && tlen && 11934 rack_check_data_after_close(m, tp, &tlen, th, so)) 11935 return (1); 11936 /* 11937 * If last ACK falls within this segment's sequence numbers, record 11938 * its timestamp. NOTE: 1) That the test incorporates suggestions 11939 * from the latest proposal of the tcplw@cray.com list (Braden 11940 * 1993/04/26). 2) That updating only on newer timestamps interferes 11941 * with our earlier PAWS tests, so this check should be solely 11942 * predicated on the sequence space of this segment. 3) That we 11943 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11944 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11945 * SEG.Len, This modified check allows us to overcome RFC1323's 11946 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11947 * p.869. In such cases, we can still calculate the RTT correctly 11948 * when RCV.NXT == Last.ACK.Sent. 11949 */ 11950 if ((to->to_flags & TOF_TS) != 0 && 11951 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11952 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11953 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11954 tp->ts_recent_age = tcp_ts_getticks(); 11955 tp->ts_recent = to->to_tsval; 11956 } 11957 /* 11958 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11959 * is on (half-synchronized state), then queue data for later 11960 * processing; else drop segment and return. 11961 */ 11962 if ((thflags & TH_ACK) == 0) { 11963 if (tp->t_flags & TF_NEEDSYN) { 11964 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11965 tiwin, thflags, nxt_pkt)); 11966 } else if (tp->t_flags & TF_ACKNOW) { 11967 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11968 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11969 return (ret_val); 11970 } else { 11971 ctf_do_drop(m, NULL); 11972 return (0); 11973 } 11974 } 11975 /* 11976 * Ack processing. 11977 */ 11978 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11979 return (ret_val); 11980 } 11981 if (ourfinisacked) { 11982 /* 11983 * If we can't receive any more data, then closing user can 11984 * proceed. Starting the timer is contrary to the 11985 * specification, but if we don't get a FIN we'll hang 11986 * forever. 11987 * 11988 * XXXjl: we should release the tp also, and use a 11989 * compressed state. 11990 */ 11991 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11992 soisdisconnected(so); 11993 tcp_timer_activate(tp, TT_2MSL, 11994 (tcp_fast_finwait2_recycle ? 11995 tcp_finwait2_timeout : 11996 TP_MAXIDLE(tp))); 11997 } 11998 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11999 } 12000 if (sbavail(&so->so_snd)) { 12001 if (ctf_progress_timeout_check(tp, true)) { 12002 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12003 tp, tick, PROGRESS_DROP, __LINE__); 12004 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12005 return (1); 12006 } 12007 } 12008 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12009 tiwin, thflags, nxt_pkt)); 12010 } 12011 12012 /* 12013 * Return value of 1, the TCB is unlocked and most 12014 * likely gone, return value of 0, the TCP is still 12015 * locked. 12016 */ 12017 static int 12018 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 12019 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12020 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12021 { 12022 int32_t ret_val = 0; 12023 int32_t ourfinisacked = 0; 12024 struct tcp_rack *rack; 12025 12026 rack = (struct tcp_rack *)tp->t_fb_ptr; 12027 ctf_calc_rwin(so, tp); 12028 12029 if ((thflags & TH_RST) || 12030 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12031 return (__ctf_process_rst(m, th, so, tp, 12032 &rack->r_ctl.challenge_ack_ts, 12033 &rack->r_ctl.challenge_ack_cnt)); 12034 /* 12035 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12036 * synchronized state. 12037 */ 12038 if (thflags & TH_SYN) { 12039 ctf_challenge_ack(m, th, tp, &ret_val); 12040 return (ret_val); 12041 } 12042 /* 12043 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12044 * it's less than ts_recent, drop it. 12045 */ 12046 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12047 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12048 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12049 return (ret_val); 12050 } 12051 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12052 &rack->r_ctl.challenge_ack_ts, 12053 &rack->r_ctl.challenge_ack_cnt)) { 12054 return (ret_val); 12055 } 12056 /* 12057 * If new data are received on a connection after the user processes 12058 * are gone, then RST the other end. 12059 */ 12060 if ((tp->t_flags & TF_CLOSED) && tlen && 12061 rack_check_data_after_close(m, tp, &tlen, th, so)) 12062 return (1); 12063 /* 12064 * If last ACK falls within this segment's sequence numbers, record 12065 * its timestamp. NOTE: 1) That the test incorporates suggestions 12066 * from the latest proposal of the tcplw@cray.com list (Braden 12067 * 1993/04/26). 2) That updating only on newer timestamps interferes 12068 * with our earlier PAWS tests, so this check should be solely 12069 * predicated on the sequence space of this segment. 3) That we 12070 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12071 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12072 * SEG.Len, This modified check allows us to overcome RFC1323's 12073 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12074 * p.869. In such cases, we can still calculate the RTT correctly 12075 * when RCV.NXT == Last.ACK.Sent. 12076 */ 12077 if ((to->to_flags & TOF_TS) != 0 && 12078 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12079 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12080 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12081 tp->ts_recent_age = tcp_ts_getticks(); 12082 tp->ts_recent = to->to_tsval; 12083 } 12084 /* 12085 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12086 * is on (half-synchronized state), then queue data for later 12087 * processing; else drop segment and return. 12088 */ 12089 if ((thflags & TH_ACK) == 0) { 12090 if (tp->t_flags & TF_NEEDSYN) { 12091 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12092 tiwin, thflags, nxt_pkt)); 12093 } else if (tp->t_flags & TF_ACKNOW) { 12094 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12095 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12096 return (ret_val); 12097 } else { 12098 ctf_do_drop(m, NULL); 12099 return (0); 12100 } 12101 } 12102 /* 12103 * Ack processing. 12104 */ 12105 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12106 return (ret_val); 12107 } 12108 if (ourfinisacked) { 12109 tcp_twstart(tp); 12110 m_freem(m); 12111 return (1); 12112 } 12113 if (sbavail(&so->so_snd)) { 12114 if (ctf_progress_timeout_check(tp, true)) { 12115 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12116 tp, tick, PROGRESS_DROP, __LINE__); 12117 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12118 return (1); 12119 } 12120 } 12121 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12122 tiwin, thflags, nxt_pkt)); 12123 } 12124 12125 /* 12126 * Return value of 1, the TCB is unlocked and most 12127 * likely gone, return value of 0, the TCP is still 12128 * locked. 12129 */ 12130 static int 12131 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12132 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12133 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12134 { 12135 int32_t ret_val = 0; 12136 int32_t ourfinisacked = 0; 12137 struct tcp_rack *rack; 12138 12139 rack = (struct tcp_rack *)tp->t_fb_ptr; 12140 ctf_calc_rwin(so, tp); 12141 12142 if ((thflags & TH_RST) || 12143 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12144 return (__ctf_process_rst(m, th, so, tp, 12145 &rack->r_ctl.challenge_ack_ts, 12146 &rack->r_ctl.challenge_ack_cnt)); 12147 /* 12148 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12149 * synchronized state. 12150 */ 12151 if (thflags & TH_SYN) { 12152 ctf_challenge_ack(m, th, tp, &ret_val); 12153 return (ret_val); 12154 } 12155 /* 12156 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12157 * it's less than ts_recent, drop it. 12158 */ 12159 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12160 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12161 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12162 return (ret_val); 12163 } 12164 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12165 &rack->r_ctl.challenge_ack_ts, 12166 &rack->r_ctl.challenge_ack_cnt)) { 12167 return (ret_val); 12168 } 12169 /* 12170 * If new data are received on a connection after the user processes 12171 * are gone, then RST the other end. 12172 */ 12173 if ((tp->t_flags & TF_CLOSED) && tlen && 12174 rack_check_data_after_close(m, tp, &tlen, th, so)) 12175 return (1); 12176 /* 12177 * If last ACK falls within this segment's sequence numbers, record 12178 * its timestamp. NOTE: 1) That the test incorporates suggestions 12179 * from the latest proposal of the tcplw@cray.com list (Braden 12180 * 1993/04/26). 2) That updating only on newer timestamps interferes 12181 * with our earlier PAWS tests, so this check should be solely 12182 * predicated on the sequence space of this segment. 3) That we 12183 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12184 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12185 * SEG.Len, This modified check allows us to overcome RFC1323's 12186 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12187 * p.869. In such cases, we can still calculate the RTT correctly 12188 * when RCV.NXT == Last.ACK.Sent. 12189 */ 12190 if ((to->to_flags & TOF_TS) != 0 && 12191 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12192 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12193 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12194 tp->ts_recent_age = tcp_ts_getticks(); 12195 tp->ts_recent = to->to_tsval; 12196 } 12197 /* 12198 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12199 * is on (half-synchronized state), then queue data for later 12200 * processing; else drop segment and return. 12201 */ 12202 if ((thflags & TH_ACK) == 0) { 12203 if (tp->t_flags & TF_NEEDSYN) { 12204 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12205 tiwin, thflags, nxt_pkt)); 12206 } else if (tp->t_flags & TF_ACKNOW) { 12207 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12208 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12209 return (ret_val); 12210 } else { 12211 ctf_do_drop(m, NULL); 12212 return (0); 12213 } 12214 } 12215 /* 12216 * case TCPS_LAST_ACK: Ack processing. 12217 */ 12218 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12219 return (ret_val); 12220 } 12221 if (ourfinisacked) { 12222 tp = tcp_close(tp); 12223 ctf_do_drop(m, tp); 12224 return (1); 12225 } 12226 if (sbavail(&so->so_snd)) { 12227 if (ctf_progress_timeout_check(tp, true)) { 12228 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12229 tp, tick, PROGRESS_DROP, __LINE__); 12230 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12231 return (1); 12232 } 12233 } 12234 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12235 tiwin, thflags, nxt_pkt)); 12236 } 12237 12238 /* 12239 * Return value of 1, the TCB is unlocked and most 12240 * likely gone, return value of 0, the TCP is still 12241 * locked. 12242 */ 12243 static int 12244 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12245 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12246 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12247 { 12248 int32_t ret_val = 0; 12249 int32_t ourfinisacked = 0; 12250 struct tcp_rack *rack; 12251 12252 rack = (struct tcp_rack *)tp->t_fb_ptr; 12253 ctf_calc_rwin(so, tp); 12254 12255 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12256 if ((thflags & TH_RST) || 12257 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12258 return (__ctf_process_rst(m, th, so, tp, 12259 &rack->r_ctl.challenge_ack_ts, 12260 &rack->r_ctl.challenge_ack_cnt)); 12261 /* 12262 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12263 * synchronized state. 12264 */ 12265 if (thflags & TH_SYN) { 12266 ctf_challenge_ack(m, th, tp, &ret_val); 12267 return (ret_val); 12268 } 12269 /* 12270 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12271 * it's less than ts_recent, drop it. 12272 */ 12273 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12274 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12275 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12276 return (ret_val); 12277 } 12278 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12279 &rack->r_ctl.challenge_ack_ts, 12280 &rack->r_ctl.challenge_ack_cnt)) { 12281 return (ret_val); 12282 } 12283 /* 12284 * If new data are received on a connection after the user processes 12285 * are gone, then RST the other end. 12286 */ 12287 if ((tp->t_flags & TF_CLOSED) && tlen && 12288 rack_check_data_after_close(m, tp, &tlen, th, so)) 12289 return (1); 12290 /* 12291 * If last ACK falls within this segment's sequence numbers, record 12292 * its timestamp. NOTE: 1) That the test incorporates suggestions 12293 * from the latest proposal of the tcplw@cray.com list (Braden 12294 * 1993/04/26). 2) That updating only on newer timestamps interferes 12295 * with our earlier PAWS tests, so this check should be solely 12296 * predicated on the sequence space of this segment. 3) That we 12297 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12298 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12299 * SEG.Len, This modified check allows us to overcome RFC1323's 12300 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12301 * p.869. In such cases, we can still calculate the RTT correctly 12302 * when RCV.NXT == Last.ACK.Sent. 12303 */ 12304 if ((to->to_flags & TOF_TS) != 0 && 12305 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12306 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12307 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12308 tp->ts_recent_age = tcp_ts_getticks(); 12309 tp->ts_recent = to->to_tsval; 12310 } 12311 /* 12312 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12313 * is on (half-synchronized state), then queue data for later 12314 * processing; else drop segment and return. 12315 */ 12316 if ((thflags & TH_ACK) == 0) { 12317 if (tp->t_flags & TF_NEEDSYN) { 12318 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12319 tiwin, thflags, nxt_pkt)); 12320 } else if (tp->t_flags & TF_ACKNOW) { 12321 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12322 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12323 return (ret_val); 12324 } else { 12325 ctf_do_drop(m, NULL); 12326 return (0); 12327 } 12328 } 12329 /* 12330 * Ack processing. 12331 */ 12332 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12333 return (ret_val); 12334 } 12335 if (sbavail(&so->so_snd)) { 12336 if (ctf_progress_timeout_check(tp, true)) { 12337 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12338 tp, tick, PROGRESS_DROP, __LINE__); 12339 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12340 return (1); 12341 } 12342 } 12343 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12344 tiwin, thflags, nxt_pkt)); 12345 } 12346 12347 static void inline 12348 rack_clear_rate_sample(struct tcp_rack *rack) 12349 { 12350 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12351 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12352 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12353 } 12354 12355 static void 12356 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12357 { 12358 uint64_t bw_est, rate_wanted; 12359 int chged = 0; 12360 uint32_t user_max, orig_min, orig_max; 12361 12362 orig_min = rack->r_ctl.rc_pace_min_segs; 12363 orig_max = rack->r_ctl.rc_pace_max_segs; 12364 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12365 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12366 chged = 1; 12367 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12368 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12369 if (user_max != rack->r_ctl.rc_pace_max_segs) 12370 chged = 1; 12371 } 12372 if (rack->rc_force_max_seg) { 12373 rack->r_ctl.rc_pace_max_segs = user_max; 12374 } else if (rack->use_fixed_rate) { 12375 bw_est = rack_get_bw(rack); 12376 if ((rack->r_ctl.crte == NULL) || 12377 (bw_est != rack->r_ctl.crte->rate)) { 12378 rack->r_ctl.rc_pace_max_segs = user_max; 12379 } else { 12380 /* We are pacing right at the hardware rate */ 12381 uint32_t segsiz; 12382 12383 segsiz = min(ctf_fixed_maxseg(tp), 12384 rack->r_ctl.rc_pace_min_segs); 12385 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12386 tp, bw_est, segsiz, 0, 12387 rack->r_ctl.crte, NULL); 12388 } 12389 } else if (rack->rc_always_pace) { 12390 if (rack->r_ctl.gp_bw || 12391 #ifdef NETFLIX_PEAKRATE 12392 rack->rc_tp->t_maxpeakrate || 12393 #endif 12394 rack->r_ctl.init_rate) { 12395 /* We have a rate of some sort set */ 12396 uint32_t orig; 12397 12398 bw_est = rack_get_bw(rack); 12399 orig = rack->r_ctl.rc_pace_max_segs; 12400 if (fill_override) 12401 rate_wanted = *fill_override; 12402 else 12403 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12404 if (rate_wanted) { 12405 /* We have something */ 12406 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12407 rate_wanted, 12408 ctf_fixed_maxseg(rack->rc_tp)); 12409 } else 12410 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12411 if (orig != rack->r_ctl.rc_pace_max_segs) 12412 chged = 1; 12413 } else if ((rack->r_ctl.gp_bw == 0) && 12414 (rack->r_ctl.rc_pace_max_segs == 0)) { 12415 /* 12416 * If we have nothing limit us to bursting 12417 * out IW sized pieces. 12418 */ 12419 chged = 1; 12420 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12421 } 12422 } 12423 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12424 chged = 1; 12425 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12426 } 12427 if (chged) 12428 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12429 } 12430 12431 12432 static void 12433 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12434 { 12435 #ifdef INET6 12436 struct ip6_hdr *ip6 = NULL; 12437 #endif 12438 #ifdef INET 12439 struct ip *ip = NULL; 12440 #endif 12441 struct udphdr *udp = NULL; 12442 12443 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12444 #ifdef INET6 12445 if (rack->r_is_v6) { 12446 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12447 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12448 if (tp->t_port) { 12449 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12450 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12451 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12452 udp->uh_dport = tp->t_port; 12453 rack->r_ctl.fsb.udp = udp; 12454 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12455 } else 12456 { 12457 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12458 rack->r_ctl.fsb.udp = NULL; 12459 } 12460 tcpip_fillheaders(rack->rc_inp, 12461 tp->t_port, 12462 ip6, rack->r_ctl.fsb.th); 12463 } else 12464 #endif /* INET6 */ 12465 { 12466 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12467 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12468 if (tp->t_port) { 12469 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12470 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12471 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12472 udp->uh_dport = tp->t_port; 12473 rack->r_ctl.fsb.udp = udp; 12474 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12475 } else 12476 { 12477 rack->r_ctl.fsb.udp = NULL; 12478 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12479 } 12480 tcpip_fillheaders(rack->rc_inp, 12481 tp->t_port, 12482 ip, rack->r_ctl.fsb.th); 12483 } 12484 rack->r_fsb_inited = 1; 12485 } 12486 12487 static int 12488 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12489 { 12490 /* 12491 * Allocate the larger of spaces V6 if available else just 12492 * V4 and include udphdr (overbook) 12493 */ 12494 #ifdef INET6 12495 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12496 #else 12497 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12498 #endif 12499 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12500 M_TCPFSB, M_NOWAIT|M_ZERO); 12501 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12502 return (ENOMEM); 12503 } 12504 rack->r_fsb_inited = 0; 12505 return (0); 12506 } 12507 12508 static int 12509 rack_init(struct tcpcb *tp) 12510 { 12511 struct tcp_rack *rack = NULL; 12512 #ifdef INVARIANTS 12513 struct rack_sendmap *insret; 12514 #endif 12515 uint32_t iwin, snt, us_cts; 12516 int err; 12517 12518 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12519 if (tp->t_fb_ptr == NULL) { 12520 /* 12521 * We need to allocate memory but cant. The INP and INP_INFO 12522 * locks and they are recursive (happens during setup. So a 12523 * scheme to drop the locks fails :( 12524 * 12525 */ 12526 return (ENOMEM); 12527 } 12528 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12529 12530 rack = (struct tcp_rack *)tp->t_fb_ptr; 12531 RB_INIT(&rack->r_ctl.rc_mtree); 12532 TAILQ_INIT(&rack->r_ctl.rc_free); 12533 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12534 rack->rc_tp = tp; 12535 rack->rc_inp = tp->t_inpcb; 12536 /* Set the flag */ 12537 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12538 /* Probably not needed but lets be sure */ 12539 rack_clear_rate_sample(rack); 12540 /* 12541 * Save off the default values, socket options will poke 12542 * at these if pacing is not on or we have not yet 12543 * reached where pacing is on (gp_ready/fixed enabled). 12544 * When they get set into the CC module (when gp_ready 12545 * is enabled or we enable fixed) then we will set these 12546 * values into the CC and place in here the old values 12547 * so we have a restoral. Then we will set the flag 12548 * rc_pacing_cc_set. That way whenever we turn off pacing 12549 * or switch off this stack, we will know to go restore 12550 * the saved values. 12551 */ 12552 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12553 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12554 /* We want abe like behavior as well */ 12555 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12556 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12557 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12558 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12559 rack->r_ctl.roundends = tp->snd_max; 12560 if (use_rack_rr) 12561 rack->use_rack_rr = 1; 12562 if (V_tcp_delack_enabled) 12563 tp->t_delayed_ack = 1; 12564 else 12565 tp->t_delayed_ack = 0; 12566 #ifdef TCP_ACCOUNTING 12567 if (rack_tcp_accounting) { 12568 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12569 } 12570 #endif 12571 if (rack_enable_shared_cwnd) 12572 rack->rack_enable_scwnd = 1; 12573 rack->rc_user_set_max_segs = rack_hptsi_segments; 12574 rack->rc_force_max_seg = 0; 12575 if (rack_use_imac_dack) 12576 rack->rc_dack_mode = 1; 12577 TAILQ_INIT(&rack->r_ctl.opt_list); 12578 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12579 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12580 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12581 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12582 rack->r_ctl.rc_highest_us_rtt = 0; 12583 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12584 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12585 if (rack_use_cmp_acks) 12586 rack->r_use_cmp_ack = 1; 12587 if (rack_disable_prr) 12588 rack->rack_no_prr = 1; 12589 if (rack_gp_no_rec_chg) 12590 rack->rc_gp_no_rec_chg = 1; 12591 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12592 rack->rc_always_pace = 1; 12593 if (rack->use_fixed_rate || rack->gp_ready) 12594 rack_set_cc_pacing(rack); 12595 } else 12596 rack->rc_always_pace = 0; 12597 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12598 rack->r_mbuf_queue = 1; 12599 else 12600 rack->r_mbuf_queue = 0; 12601 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12602 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12603 else 12604 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12605 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12606 if (rack_limits_scwnd) 12607 rack->r_limit_scw = 1; 12608 else 12609 rack->r_limit_scw = 0; 12610 rack->rc_labc = V_tcp_abc_l_var; 12611 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12612 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12613 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12614 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12615 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12616 rack->r_ctl.rc_min_to = rack_min_to; 12617 microuptime(&rack->r_ctl.act_rcv_time); 12618 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12619 rack->rc_init_win = rack_default_init_window; 12620 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12621 if (rack_hw_up_only) 12622 rack->r_up_only = 1; 12623 if (rack_do_dyn_mul) { 12624 /* When dynamic adjustment is on CA needs to start at 100% */ 12625 rack->rc_gp_dyn_mul = 1; 12626 if (rack_do_dyn_mul >= 100) 12627 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12628 } else 12629 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12630 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12631 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12632 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12633 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12634 rack_probertt_filter_life); 12635 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12636 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12637 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12638 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12639 rack->r_ctl.rc_time_probertt_starts = 0; 12640 if (rack_dsack_std_based & 0x1) { 12641 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12642 rack->rc_rack_tmr_std_based = 1; 12643 } 12644 if (rack_dsack_std_based & 0x2) { 12645 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12646 rack->rc_rack_use_dsack = 1; 12647 } 12648 /* We require at least one measurement, even if the sysctl is 0 */ 12649 if (rack_req_measurements) 12650 rack->r_ctl.req_measurements = rack_req_measurements; 12651 else 12652 rack->r_ctl.req_measurements = 1; 12653 if (rack_enable_hw_pacing) 12654 rack->rack_hdw_pace_ena = 1; 12655 if (rack_hw_rate_caps) 12656 rack->r_rack_hw_rate_caps = 1; 12657 /* Do we force on detection? */ 12658 #ifdef NETFLIX_EXP_DETECTION 12659 if (tcp_force_detection) 12660 rack->do_detection = 1; 12661 else 12662 #endif 12663 rack->do_detection = 0; 12664 if (rack_non_rxt_use_cr) 12665 rack->rack_rec_nonrxt_use_cr = 1; 12666 err = rack_init_fsb(tp, rack); 12667 if (err) { 12668 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12669 tp->t_fb_ptr = NULL; 12670 return (err); 12671 } 12672 if (tp->snd_una != tp->snd_max) { 12673 /* Create a send map for the current outstanding data */ 12674 struct rack_sendmap *rsm; 12675 12676 rsm = rack_alloc(rack); 12677 if (rsm == NULL) { 12678 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12679 tp->t_fb_ptr = NULL; 12680 return (ENOMEM); 12681 } 12682 rsm->r_no_rtt_allowed = 1; 12683 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12684 rsm->r_rtr_cnt = 1; 12685 rsm->r_rtr_bytes = 0; 12686 if (tp->t_flags & TF_SENTFIN) 12687 rsm->r_flags |= RACK_HAS_FIN; 12688 if ((tp->snd_una == tp->iss) && 12689 !TCPS_HAVEESTABLISHED(tp->t_state)) 12690 rsm->r_flags |= RACK_HAS_SYN; 12691 rsm->r_start = tp->snd_una; 12692 rsm->r_end = tp->snd_max; 12693 rsm->r_dupack = 0; 12694 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12695 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12696 if (rsm->m) 12697 rsm->orig_m_len = rsm->m->m_len; 12698 else 12699 rsm->orig_m_len = 0; 12700 } else { 12701 /* 12702 * This can happen if we have a stand-alone FIN or 12703 * SYN. 12704 */ 12705 rsm->m = NULL; 12706 rsm->orig_m_len = 0; 12707 rsm->soff = 0; 12708 } 12709 #ifndef INVARIANTS 12710 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12711 #else 12712 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12713 if (insret != NULL) { 12714 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12715 insret, rack, rsm); 12716 } 12717 #endif 12718 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12719 rsm->r_in_tmap = 1; 12720 } 12721 /* 12722 * Timers in Rack are kept in microseconds so lets 12723 * convert any initial incoming variables 12724 * from ticks into usecs. Note that we 12725 * also change the values of t_srtt and t_rttvar, if 12726 * they are non-zero. They are kept with a 5 12727 * bit decimal so we have to carefully convert 12728 * these to get the full precision. 12729 */ 12730 rack_convert_rtts(tp); 12731 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12732 if (rack_do_hystart) { 12733 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 12734 if (rack_do_hystart > 1) 12735 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 12736 if (rack_do_hystart > 2) 12737 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 12738 } 12739 if (rack_def_profile) 12740 rack_set_profile(rack, rack_def_profile); 12741 /* Cancel the GP measurement in progress */ 12742 tp->t_flags &= ~TF_GPUTINPROG; 12743 if (SEQ_GT(tp->snd_max, tp->iss)) 12744 snt = tp->snd_max - tp->iss; 12745 else 12746 snt = 0; 12747 iwin = rc_init_window(rack); 12748 if (snt < iwin) { 12749 /* We are not past the initial window 12750 * so we need to make sure cwnd is 12751 * correct. 12752 */ 12753 if (tp->snd_cwnd < iwin) 12754 tp->snd_cwnd = iwin; 12755 /* 12756 * If we are within the initial window 12757 * we want ssthresh to be unlimited. Setting 12758 * it to the rwnd (which the default stack does 12759 * and older racks) is not really a good idea 12760 * since we want to be in SS and grow both the 12761 * cwnd and the rwnd (via dynamic rwnd growth). If 12762 * we set it to the rwnd then as the peer grows its 12763 * rwnd we will be stuck in CA and never hit SS. 12764 * 12765 * Its far better to raise it up high (this takes the 12766 * risk that there as been a loss already, probably 12767 * we should have an indicator in all stacks of loss 12768 * but we don't), but considering the normal use this 12769 * is a risk worth taking. The consequences of not 12770 * hitting SS are far worse than going one more time 12771 * into it early on (before we have sent even a IW). 12772 * It is highly unlikely that we will have had a loss 12773 * before getting the IW out. 12774 */ 12775 tp->snd_ssthresh = 0xffffffff; 12776 } 12777 rack_stop_all_timers(tp); 12778 /* Lets setup the fsb block */ 12779 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12780 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12781 __LINE__, RACK_RTTS_INIT); 12782 return (0); 12783 } 12784 12785 static int 12786 rack_handoff_ok(struct tcpcb *tp) 12787 { 12788 if ((tp->t_state == TCPS_CLOSED) || 12789 (tp->t_state == TCPS_LISTEN)) { 12790 /* Sure no problem though it may not stick */ 12791 return (0); 12792 } 12793 if ((tp->t_state == TCPS_SYN_SENT) || 12794 (tp->t_state == TCPS_SYN_RECEIVED)) { 12795 /* 12796 * We really don't know if you support sack, 12797 * you have to get to ESTAB or beyond to tell. 12798 */ 12799 return (EAGAIN); 12800 } 12801 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12802 /* 12803 * Rack will only send a FIN after all data is acknowledged. 12804 * So in this case we have more data outstanding. We can't 12805 * switch stacks until either all data and only the FIN 12806 * is left (in which case rack_init() now knows how 12807 * to deal with that) <or> all is acknowledged and we 12808 * are only left with incoming data, though why you 12809 * would want to switch to rack after all data is acknowledged 12810 * I have no idea (rrs)! 12811 */ 12812 return (EAGAIN); 12813 } 12814 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12815 return (0); 12816 } 12817 /* 12818 * If we reach here we don't do SACK on this connection so we can 12819 * never do rack. 12820 */ 12821 return (EINVAL); 12822 } 12823 12824 12825 static void 12826 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12827 { 12828 if (tp->t_fb_ptr) { 12829 struct tcp_rack *rack; 12830 struct rack_sendmap *rsm, *nrsm; 12831 #ifdef INVARIANTS 12832 struct rack_sendmap *rm; 12833 #endif 12834 12835 rack = (struct tcp_rack *)tp->t_fb_ptr; 12836 if (tp->t_in_pkt) { 12837 /* 12838 * It is unsafe to process the packets since a 12839 * reset may be lurking in them (its rare but it 12840 * can occur). If we were to find a RST, then we 12841 * would end up dropping the connection and the 12842 * INP lock, so when we return the caller (tcp_usrreq) 12843 * will blow up when it trys to unlock the inp. 12844 */ 12845 struct mbuf *save, *m; 12846 12847 m = tp->t_in_pkt; 12848 tp->t_in_pkt = NULL; 12849 tp->t_tail_pkt = NULL; 12850 while (m) { 12851 save = m->m_nextpkt; 12852 m->m_nextpkt = NULL; 12853 m_freem(m); 12854 m = save; 12855 } 12856 } 12857 tp->t_flags &= ~TF_FORCEDATA; 12858 #ifdef NETFLIX_SHARED_CWND 12859 if (rack->r_ctl.rc_scw) { 12860 uint32_t limit; 12861 12862 if (rack->r_limit_scw) 12863 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12864 else 12865 limit = 0; 12866 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12867 rack->r_ctl.rc_scw_index, 12868 limit); 12869 rack->r_ctl.rc_scw = NULL; 12870 } 12871 #endif 12872 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12873 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12874 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12875 rack->r_ctl.fsb.th = NULL; 12876 } 12877 /* Convert back to ticks, with */ 12878 if (tp->t_srtt > 1) { 12879 uint32_t val, frac; 12880 12881 val = USEC_2_TICKS(tp->t_srtt); 12882 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12883 tp->t_srtt = val << TCP_RTT_SHIFT; 12884 /* 12885 * frac is the fractional part here is left 12886 * over from converting to hz and shifting. 12887 * We need to convert this to the 5 bit 12888 * remainder. 12889 */ 12890 if (frac) { 12891 if (hz == 1000) { 12892 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12893 } else { 12894 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12895 } 12896 tp->t_srtt += frac; 12897 } 12898 } 12899 if (tp->t_rttvar) { 12900 uint32_t val, frac; 12901 12902 val = USEC_2_TICKS(tp->t_rttvar); 12903 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12904 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12905 /* 12906 * frac is the fractional part here is left 12907 * over from converting to hz and shifting. 12908 * We need to convert this to the 5 bit 12909 * remainder. 12910 */ 12911 if (frac) { 12912 if (hz == 1000) { 12913 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12914 } else { 12915 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12916 } 12917 tp->t_rttvar += frac; 12918 } 12919 } 12920 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12921 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12922 if (rack->rc_always_pace) { 12923 tcp_decrement_paced_conn(); 12924 rack_undo_cc_pacing(rack); 12925 rack->rc_always_pace = 0; 12926 } 12927 /* Clean up any options if they were not applied */ 12928 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12929 struct deferred_opt_list *dol; 12930 12931 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12932 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12933 free(dol, M_TCPDO); 12934 } 12935 /* rack does not use force data but other stacks may clear it */ 12936 if (rack->r_ctl.crte != NULL) { 12937 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12938 rack->rack_hdrw_pacing = 0; 12939 rack->r_ctl.crte = NULL; 12940 } 12941 #ifdef TCP_BLACKBOX 12942 tcp_log_flowend(tp); 12943 #endif 12944 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12945 #ifndef INVARIANTS 12946 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12947 #else 12948 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12949 if (rm != rsm) { 12950 panic("At fini, rack:%p rsm:%p rm:%p", 12951 rack, rsm, rm); 12952 } 12953 #endif 12954 uma_zfree(rack_zone, rsm); 12955 } 12956 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12957 while (rsm) { 12958 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12959 uma_zfree(rack_zone, rsm); 12960 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12961 } 12962 rack->rc_free_cnt = 0; 12963 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12964 tp->t_fb_ptr = NULL; 12965 } 12966 if (tp->t_inpcb) { 12967 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12968 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12969 tp->t_inpcb->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12970 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_ACKCMP; 12971 /* Cancel the GP measurement in progress */ 12972 tp->t_flags &= ~TF_GPUTINPROG; 12973 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_L_ACKS; 12974 } 12975 /* Make sure snd_nxt is correctly set */ 12976 tp->snd_nxt = tp->snd_max; 12977 } 12978 12979 static void 12980 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12981 { 12982 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12983 rack->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 12984 } 12985 switch (tp->t_state) { 12986 case TCPS_SYN_SENT: 12987 rack->r_state = TCPS_SYN_SENT; 12988 rack->r_substate = rack_do_syn_sent; 12989 break; 12990 case TCPS_SYN_RECEIVED: 12991 rack->r_state = TCPS_SYN_RECEIVED; 12992 rack->r_substate = rack_do_syn_recv; 12993 break; 12994 case TCPS_ESTABLISHED: 12995 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12996 rack->r_state = TCPS_ESTABLISHED; 12997 rack->r_substate = rack_do_established; 12998 break; 12999 case TCPS_CLOSE_WAIT: 13000 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13001 rack->r_state = TCPS_CLOSE_WAIT; 13002 rack->r_substate = rack_do_close_wait; 13003 break; 13004 case TCPS_FIN_WAIT_1: 13005 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13006 rack->r_state = TCPS_FIN_WAIT_1; 13007 rack->r_substate = rack_do_fin_wait_1; 13008 break; 13009 case TCPS_CLOSING: 13010 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13011 rack->r_state = TCPS_CLOSING; 13012 rack->r_substate = rack_do_closing; 13013 break; 13014 case TCPS_LAST_ACK: 13015 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13016 rack->r_state = TCPS_LAST_ACK; 13017 rack->r_substate = rack_do_lastack; 13018 break; 13019 case TCPS_FIN_WAIT_2: 13020 rack_set_pace_segments(tp, rack, __LINE__, NULL); 13021 rack->r_state = TCPS_FIN_WAIT_2; 13022 rack->r_substate = rack_do_fin_wait_2; 13023 break; 13024 case TCPS_LISTEN: 13025 case TCPS_CLOSED: 13026 case TCPS_TIME_WAIT: 13027 default: 13028 break; 13029 }; 13030 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 13031 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 13032 13033 } 13034 13035 static void 13036 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 13037 { 13038 /* 13039 * We received an ack, and then did not 13040 * call send or were bounced out due to the 13041 * hpts was running. Now a timer is up as well, is 13042 * it the right timer? 13043 */ 13044 struct rack_sendmap *rsm; 13045 int tmr_up; 13046 13047 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 13048 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 13049 return; 13050 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 13051 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 13052 (tmr_up == PACE_TMR_RXT)) { 13053 /* Should be an RXT */ 13054 return; 13055 } 13056 if (rsm == NULL) { 13057 /* Nothing outstanding? */ 13058 if (tp->t_flags & TF_DELACK) { 13059 if (tmr_up == PACE_TMR_DELACK) 13060 /* We are supposed to have delayed ack up and we do */ 13061 return; 13062 } else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) { 13063 /* 13064 * if we hit enobufs then we would expect the possibility 13065 * of nothing outstanding and the RXT up (and the hptsi timer). 13066 */ 13067 return; 13068 } else if (((V_tcp_always_keepalive || 13069 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 13070 (tp->t_state <= TCPS_CLOSING)) && 13071 (tmr_up == PACE_TMR_KEEP) && 13072 (tp->snd_max == tp->snd_una)) { 13073 /* We should have keep alive up and we do */ 13074 return; 13075 } 13076 } 13077 if (SEQ_GT(tp->snd_max, tp->snd_una) && 13078 ((tmr_up == PACE_TMR_TLP) || 13079 (tmr_up == PACE_TMR_RACK) || 13080 (tmr_up == PACE_TMR_RXT))) { 13081 /* 13082 * Either a Rack, TLP or RXT is fine if we 13083 * have outstanding data. 13084 */ 13085 return; 13086 } else if (tmr_up == PACE_TMR_DELACK) { 13087 /* 13088 * If the delayed ack was going to go off 13089 * before the rtx/tlp/rack timer were going to 13090 * expire, then that would be the timer in control. 13091 * Note we don't check the time here trusting the 13092 * code is correct. 13093 */ 13094 return; 13095 } 13096 /* 13097 * Ok the timer originally started is not what we want now. 13098 * We will force the hpts to be stopped if any, and restart 13099 * with the slot set to what was in the saved slot. 13100 */ 13101 if (tcp_in_hpts(rack->rc_inp)) { 13102 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 13103 uint32_t us_cts; 13104 13105 us_cts = tcp_get_usecs(NULL); 13106 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 13107 rack->r_early = 1; 13108 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 13109 } 13110 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13111 } 13112 tcp_hpts_remove(tp->t_inpcb); 13113 } 13114 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13115 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13116 } 13117 13118 13119 static void 13120 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13121 { 13122 if ((SEQ_LT(tp->snd_wl1, seq) || 13123 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13124 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13125 /* keep track of pure window updates */ 13126 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13127 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13128 tp->snd_wnd = tiwin; 13129 rack_validate_fo_sendwin_up(tp, rack); 13130 tp->snd_wl1 = seq; 13131 tp->snd_wl2 = ack; 13132 if (tp->snd_wnd > tp->max_sndwnd) 13133 tp->max_sndwnd = tp->snd_wnd; 13134 rack->r_wanted_output = 1; 13135 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13136 tp->snd_wnd = tiwin; 13137 rack_validate_fo_sendwin_up(tp, rack); 13138 tp->snd_wl1 = seq; 13139 tp->snd_wl2 = ack; 13140 } else { 13141 /* Not a valid win update */ 13142 return; 13143 } 13144 /* Do we exit persists? */ 13145 if ((rack->rc_in_persist != 0) && 13146 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13147 rack->r_ctl.rc_pace_min_segs))) { 13148 rack_exit_persist(tp, rack, cts); 13149 } 13150 /* Do we enter persists? */ 13151 if ((rack->rc_in_persist == 0) && 13152 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13153 TCPS_HAVEESTABLISHED(tp->t_state) && 13154 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13155 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 13156 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 13157 /* 13158 * Here the rwnd is less than 13159 * the pacing size, we are established, 13160 * nothing is outstanding, and there is 13161 * data to send. Enter persists. 13162 */ 13163 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13164 } 13165 } 13166 13167 static void 13168 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13169 { 13170 13171 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13172 union tcp_log_stackspecific log; 13173 struct timeval ltv; 13174 char tcp_hdr_buf[60]; 13175 struct tcphdr *th; 13176 struct timespec ts; 13177 uint32_t orig_snd_una; 13178 uint8_t xx = 0; 13179 13180 #ifdef NETFLIX_HTTP_LOGGING 13181 struct http_sendfile_track *http_req; 13182 13183 if (SEQ_GT(ae->ack, tp->snd_una)) { 13184 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13185 } else { 13186 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13187 } 13188 #endif 13189 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13190 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13191 if (rack->rack_no_prr == 0) 13192 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13193 else 13194 log.u_bbr.flex1 = 0; 13195 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13196 log.u_bbr.use_lt_bw <<= 1; 13197 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13198 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13199 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13200 log.u_bbr.pkts_out = tp->t_maxseg; 13201 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13202 log.u_bbr.flex7 = 1; 13203 log.u_bbr.lost = ae->flags; 13204 log.u_bbr.cwnd_gain = ackval; 13205 log.u_bbr.pacing_gain = 0x2; 13206 if (ae->flags & TSTMP_HDWR) { 13207 /* Record the hardware timestamp if present */ 13208 log.u_bbr.flex3 = M_TSTMP; 13209 ts.tv_sec = ae->timestamp / 1000000000; 13210 ts.tv_nsec = ae->timestamp % 1000000000; 13211 ltv.tv_sec = ts.tv_sec; 13212 ltv.tv_usec = ts.tv_nsec / 1000; 13213 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13214 } else if (ae->flags & TSTMP_LRO) { 13215 /* Record the LRO the arrival timestamp */ 13216 log.u_bbr.flex3 = M_TSTMP_LRO; 13217 ts.tv_sec = ae->timestamp / 1000000000; 13218 ts.tv_nsec = ae->timestamp % 1000000000; 13219 ltv.tv_sec = ts.tv_sec; 13220 ltv.tv_usec = ts.tv_nsec / 1000; 13221 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13222 } 13223 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13224 /* Log the rcv time */ 13225 log.u_bbr.delRate = ae->timestamp; 13226 #ifdef NETFLIX_HTTP_LOGGING 13227 log.u_bbr.applimited = tp->t_http_closed; 13228 log.u_bbr.applimited <<= 8; 13229 log.u_bbr.applimited |= tp->t_http_open; 13230 log.u_bbr.applimited <<= 8; 13231 log.u_bbr.applimited |= tp->t_http_req; 13232 if (http_req) { 13233 /* Copy out any client req info */ 13234 /* seconds */ 13235 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13236 /* useconds */ 13237 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13238 log.u_bbr.rttProp = http_req->timestamp; 13239 log.u_bbr.cur_del_rate = http_req->start; 13240 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13241 log.u_bbr.flex8 |= 1; 13242 } else { 13243 log.u_bbr.flex8 |= 2; 13244 log.u_bbr.bw_inuse = http_req->end; 13245 } 13246 log.u_bbr.flex6 = http_req->start_seq; 13247 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13248 log.u_bbr.flex8 |= 4; 13249 log.u_bbr.epoch = http_req->end_seq; 13250 } 13251 } 13252 #endif 13253 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13254 th = (struct tcphdr *)tcp_hdr_buf; 13255 th->th_seq = ae->seq; 13256 th->th_ack = ae->ack; 13257 th->th_win = ae->win; 13258 /* Now fill in the ports */ 13259 th->th_sport = tp->t_inpcb->inp_fport; 13260 th->th_dport = tp->t_inpcb->inp_lport; 13261 tcp_set_flags(th, ae->flags); 13262 /* Now do we have a timestamp option? */ 13263 if (ae->flags & HAS_TSTMP) { 13264 u_char *cp; 13265 uint32_t val; 13266 13267 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13268 cp = (u_char *)(th + 1); 13269 *cp = TCPOPT_NOP; 13270 cp++; 13271 *cp = TCPOPT_NOP; 13272 cp++; 13273 *cp = TCPOPT_TIMESTAMP; 13274 cp++; 13275 *cp = TCPOLEN_TIMESTAMP; 13276 cp++; 13277 val = htonl(ae->ts_value); 13278 bcopy((char *)&val, 13279 (char *)cp, sizeof(uint32_t)); 13280 val = htonl(ae->ts_echo); 13281 bcopy((char *)&val, 13282 (char *)(cp + 4), sizeof(uint32_t)); 13283 } else 13284 th->th_off = (sizeof(struct tcphdr) >> 2); 13285 13286 /* 13287 * For sane logging we need to play a little trick. 13288 * If the ack were fully processed we would have moved 13289 * snd_una to high_seq, but since compressed acks are 13290 * processed in two phases, at this point (logging) snd_una 13291 * won't be advanced. So we would see multiple acks showing 13292 * the advancement. We can prevent that by "pretending" that 13293 * snd_una was advanced and then un-advancing it so that the 13294 * logging code has the right value for tlb_snd_una. 13295 */ 13296 if (tp->snd_una != high_seq) { 13297 orig_snd_una = tp->snd_una; 13298 tp->snd_una = high_seq; 13299 xx = 1; 13300 } else 13301 xx = 0; 13302 TCP_LOG_EVENTP(tp, th, 13303 &tp->t_inpcb->inp_socket->so_rcv, 13304 &tp->t_inpcb->inp_socket->so_snd, TCP_LOG_IN, 0, 13305 0, &log, true, <v); 13306 if (xx) { 13307 tp->snd_una = orig_snd_una; 13308 } 13309 } 13310 13311 } 13312 13313 static void 13314 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13315 { 13316 uint32_t us_rtt; 13317 /* 13318 * A persist or keep-alive was forced out, update our 13319 * min rtt time. Note now worry about lost responses. 13320 * When a subsequent keep-alive or persist times out 13321 * and forced_ack is still on, then the last probe 13322 * was not responded to. In such cases we have a 13323 * sysctl that controls the behavior. Either we apply 13324 * the rtt but with reduced confidence (0). Or we just 13325 * plain don't apply the rtt estimate. Having data flow 13326 * will clear the probe_not_answered flag i.e. cum-ack 13327 * move forward <or> exiting and reentering persists. 13328 */ 13329 13330 rack->forced_ack = 0; 13331 rack->rc_tp->t_rxtshift = 0; 13332 if ((rack->rc_in_persist && 13333 (tiwin == rack->rc_tp->snd_wnd)) || 13334 (rack->rc_in_persist == 0)) { 13335 /* 13336 * In persists only apply the RTT update if this is 13337 * a response to our window probe. And that 13338 * means the rwnd sent must match the current 13339 * snd_wnd. If it does not, then we got a 13340 * window update ack instead. For keepalive 13341 * we allow the answer no matter what the window. 13342 * 13343 * Note that if the probe_not_answered is set then 13344 * the forced_ack_ts is the oldest one i.e. the first 13345 * probe sent that might have been lost. This assures 13346 * us that if we do calculate an RTT it is longer not 13347 * some short thing. 13348 */ 13349 if (rack->rc_in_persist) 13350 counter_u64_add(rack_persists_acks, 1); 13351 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13352 if (us_rtt == 0) 13353 us_rtt = 1; 13354 if (rack->probe_not_answered == 0) { 13355 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13356 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13357 } else { 13358 /* We have a retransmitted probe here too */ 13359 if (rack_apply_rtt_with_reduced_conf) { 13360 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13361 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13362 } 13363 } 13364 } 13365 } 13366 13367 static int 13368 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13369 { 13370 /* 13371 * Handle a "special" compressed ack mbuf. Each incoming 13372 * ack has only four possible dispositions: 13373 * 13374 * A) It moves the cum-ack forward 13375 * B) It is behind the cum-ack. 13376 * C) It is a window-update ack. 13377 * D) It is a dup-ack. 13378 * 13379 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13380 * in the incoming mbuf. We also need to still pay attention 13381 * to nxt_pkt since there may be another packet after this 13382 * one. 13383 */ 13384 #ifdef TCP_ACCOUNTING 13385 uint64_t ts_val; 13386 uint64_t rdstc; 13387 #endif 13388 int segsiz; 13389 struct timespec ts; 13390 struct tcp_rack *rack; 13391 struct tcp_ackent *ae; 13392 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13393 int cnt, i, did_out, ourfinisacked = 0; 13394 struct tcpopt to_holder, *to = NULL; 13395 #ifdef TCP_ACCOUNTING 13396 int win_up_req = 0; 13397 #endif 13398 int nsegs = 0; 13399 int under_pacing = 1; 13400 int recovery = 0; 13401 #ifdef TCP_ACCOUNTING 13402 sched_pin(); 13403 #endif 13404 rack = (struct tcp_rack *)tp->t_fb_ptr; 13405 if (rack->gp_ready && 13406 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13407 under_pacing = 0; 13408 else 13409 under_pacing = 1; 13410 13411 if (rack->r_state != tp->t_state) 13412 rack_set_state(tp, rack); 13413 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13414 (tp->t_flags & TF_GPUTINPROG)) { 13415 /* 13416 * We have a goodput in progress 13417 * and we have entered a late state. 13418 * Do we have enough data in the sb 13419 * to handle the GPUT request? 13420 */ 13421 uint32_t bytes; 13422 13423 bytes = tp->gput_ack - tp->gput_seq; 13424 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13425 bytes += tp->gput_seq - tp->snd_una; 13426 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 13427 /* 13428 * There are not enough bytes in the socket 13429 * buffer that have been sent to cover this 13430 * measurement. Cancel it. 13431 */ 13432 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13433 rack->r_ctl.rc_gp_srtt /*flex1*/, 13434 tp->gput_seq, 13435 0, 0, 18, __LINE__, NULL, 0); 13436 tp->t_flags &= ~TF_GPUTINPROG; 13437 } 13438 } 13439 to = &to_holder; 13440 to->to_flags = 0; 13441 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13442 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13443 cnt = m->m_len / sizeof(struct tcp_ackent); 13444 counter_u64_add(rack_multi_single_eq, cnt); 13445 high_seq = tp->snd_una; 13446 the_win = tp->snd_wnd; 13447 win_seq = tp->snd_wl1; 13448 win_upd_ack = tp->snd_wl2; 13449 cts = tcp_tv_to_usectick(tv); 13450 ms_cts = tcp_tv_to_mssectick(tv); 13451 rack->r_ctl.rc_rcvtime = cts; 13452 segsiz = ctf_fixed_maxseg(tp); 13453 if ((rack->rc_gp_dyn_mul) && 13454 (rack->use_fixed_rate == 0) && 13455 (rack->rc_always_pace)) { 13456 /* Check in on probertt */ 13457 rack_check_probe_rtt(rack, cts); 13458 } 13459 for (i = 0; i < cnt; i++) { 13460 #ifdef TCP_ACCOUNTING 13461 ts_val = get_cyclecount(); 13462 #endif 13463 rack_clear_rate_sample(rack); 13464 ae = ((mtod(m, struct tcp_ackent *)) + i); 13465 /* Setup the window */ 13466 tiwin = ae->win << tp->snd_scale; 13467 if (tiwin > rack->r_ctl.rc_high_rwnd) 13468 rack->r_ctl.rc_high_rwnd = tiwin; 13469 /* figure out the type of ack */ 13470 if (SEQ_LT(ae->ack, high_seq)) { 13471 /* Case B*/ 13472 ae->ack_val_set = ACK_BEHIND; 13473 } else if (SEQ_GT(ae->ack, high_seq)) { 13474 /* Case A */ 13475 ae->ack_val_set = ACK_CUMACK; 13476 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13477 /* Case D */ 13478 ae->ack_val_set = ACK_DUPACK; 13479 } else { 13480 /* Case C */ 13481 ae->ack_val_set = ACK_RWND; 13482 } 13483 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13484 /* Validate timestamp */ 13485 if (ae->flags & HAS_TSTMP) { 13486 /* Setup for a timestamp */ 13487 to->to_flags = TOF_TS; 13488 ae->ts_echo -= tp->ts_offset; 13489 to->to_tsecr = ae->ts_echo; 13490 to->to_tsval = ae->ts_value; 13491 /* 13492 * If echoed timestamp is later than the current time, fall back to 13493 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13494 * were used when this connection was established. 13495 */ 13496 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13497 to->to_tsecr = 0; 13498 if (tp->ts_recent && 13499 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13500 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13501 #ifdef TCP_ACCOUNTING 13502 rdstc = get_cyclecount(); 13503 if (rdstc > ts_val) { 13504 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13505 (rdstc - ts_val)); 13506 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13507 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13508 } 13509 } 13510 #endif 13511 continue; 13512 } 13513 } 13514 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13515 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13516 tp->ts_recent_age = tcp_ts_getticks(); 13517 tp->ts_recent = ae->ts_value; 13518 } 13519 } else { 13520 /* Setup for a no options */ 13521 to->to_flags = 0; 13522 } 13523 /* Update the rcv time and perform idle reduction possibly */ 13524 if (tp->t_idle_reduce && 13525 (tp->snd_max == tp->snd_una) && 13526 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13527 counter_u64_add(rack_input_idle_reduces, 1); 13528 rack_cc_after_idle(rack, tp); 13529 } 13530 tp->t_rcvtime = ticks; 13531 /* Now what about ECN? */ 13532 if (tcp_ecn_input_segment(tp, ae->flags, ae->codepoint)) 13533 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 13534 #ifdef TCP_ACCOUNTING 13535 /* Count for the specific type of ack in */ 13536 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13537 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13538 tp->tcp_cnt_counters[ae->ack_val_set]++; 13539 } 13540 #endif 13541 /* 13542 * Note how we could move up these in the determination 13543 * above, but we don't so that way the timestamp checks (and ECN) 13544 * is done first before we do any processing on the ACK. 13545 * The non-compressed path through the code has this 13546 * weakness (noted by @jtl) that it actually does some 13547 * processing before verifying the timestamp information. 13548 * We don't take that path here which is why we set 13549 * the ack_val_set first, do the timestamp and ecn 13550 * processing, and then look at what we have setup. 13551 */ 13552 if (ae->ack_val_set == ACK_BEHIND) { 13553 /* 13554 * Case B flag reordering, if window is not closed 13555 * or it could be a keep-alive or persists 13556 */ 13557 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13558 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13559 } 13560 } else if (ae->ack_val_set == ACK_DUPACK) { 13561 /* Case D */ 13562 rack_strike_dupack(rack); 13563 } else if (ae->ack_val_set == ACK_RWND) { 13564 /* Case C */ 13565 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13566 ts.tv_sec = ae->timestamp / 1000000000; 13567 ts.tv_nsec = ae->timestamp % 1000000000; 13568 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13569 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13570 } else { 13571 rack->r_ctl.act_rcv_time = *tv; 13572 } 13573 if (rack->forced_ack) { 13574 rack_handle_probe_response(rack, tiwin, 13575 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13576 } 13577 #ifdef TCP_ACCOUNTING 13578 win_up_req = 1; 13579 #endif 13580 win_upd_ack = ae->ack; 13581 win_seq = ae->seq; 13582 the_win = tiwin; 13583 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13584 } else { 13585 /* Case A */ 13586 if (SEQ_GT(ae->ack, tp->snd_max)) { 13587 /* 13588 * We just send an ack since the incoming 13589 * ack is beyond the largest seq we sent. 13590 */ 13591 if ((tp->t_flags & TF_ACKNOW) == 0) { 13592 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13593 if (tp->t_flags && TF_ACKNOW) 13594 rack->r_wanted_output = 1; 13595 } 13596 } else { 13597 nsegs++; 13598 /* If the window changed setup to update */ 13599 if (tiwin != tp->snd_wnd) { 13600 win_upd_ack = ae->ack; 13601 win_seq = ae->seq; 13602 the_win = tiwin; 13603 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13604 } 13605 #ifdef TCP_ACCOUNTING 13606 /* Account for the acks */ 13607 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13608 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13609 } 13610 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13611 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13612 #endif 13613 high_seq = ae->ack; 13614 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13615 union tcp_log_stackspecific log; 13616 struct timeval tv; 13617 13618 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13619 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13620 log.u_bbr.flex1 = high_seq; 13621 log.u_bbr.flex2 = rack->r_ctl.roundends; 13622 log.u_bbr.flex3 = rack->r_ctl.current_round; 13623 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13624 log.u_bbr.flex8 = 8; 13625 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13626 0, &log, false, NULL, NULL, 0, &tv); 13627 } 13628 /* 13629 * The draft (v3) calls for us to use SEQ_GEQ, but that 13630 * causes issues when we are just going app limited. Lets 13631 * instead use SEQ_GT <or> where its equal but more data 13632 * is outstanding. 13633 */ 13634 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13635 ((high_seq == rack->r_ctl.roundends) && 13636 SEQ_GT(tp->snd_max, tp->snd_una))) { 13637 rack->r_ctl.current_round++; 13638 rack->r_ctl.roundends = tp->snd_max; 13639 if (CC_ALGO(tp)->newround != NULL) { 13640 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 13641 } 13642 } 13643 /* Setup our act_rcv_time */ 13644 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13645 ts.tv_sec = ae->timestamp / 1000000000; 13646 ts.tv_nsec = ae->timestamp % 1000000000; 13647 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13648 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13649 } else { 13650 rack->r_ctl.act_rcv_time = *tv; 13651 } 13652 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13653 if (rack->rc_dsack_round_seen) { 13654 /* Is the dsack round over? */ 13655 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13656 /* Yes it is */ 13657 rack->rc_dsack_round_seen = 0; 13658 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13659 } 13660 } 13661 } 13662 } 13663 /* And lets be sure to commit the rtt measurements for this ack */ 13664 tcp_rack_xmit_timer_commit(rack, tp); 13665 #ifdef TCP_ACCOUNTING 13666 rdstc = get_cyclecount(); 13667 if (rdstc > ts_val) { 13668 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13669 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13670 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13671 if (ae->ack_val_set == ACK_CUMACK) 13672 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13673 } 13674 } 13675 #endif 13676 } 13677 #ifdef TCP_ACCOUNTING 13678 ts_val = get_cyclecount(); 13679 #endif 13680 /* Tend to any collapsed window */ 13681 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 13682 /* The peer collapsed the window */ 13683 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__); 13684 } else if (rack->rc_has_collapsed) 13685 rack_un_collapse_window(rack, __LINE__); 13686 if ((rack->r_collapse_point_valid) && 13687 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 13688 rack->r_collapse_point_valid = 0; 13689 acked_amount = acked = (high_seq - tp->snd_una); 13690 if (acked) { 13691 /* 13692 * Clear the probe not answered flag 13693 * since cum-ack moved forward. 13694 */ 13695 rack->probe_not_answered = 0; 13696 if (rack->sack_attack_disable == 0) 13697 rack_do_decay(rack); 13698 if (acked >= segsiz) { 13699 /* 13700 * You only get credit for 13701 * MSS and greater (and you get extra 13702 * credit for larger cum-ack moves). 13703 */ 13704 int ac; 13705 13706 ac = acked / segsiz; 13707 rack->r_ctl.ack_count += ac; 13708 counter_u64_add(rack_ack_total, ac); 13709 } 13710 if (rack->r_ctl.ack_count > 0xfff00000) { 13711 /* 13712 * reduce the number to keep us under 13713 * a uint32_t. 13714 */ 13715 rack->r_ctl.ack_count /= 2; 13716 rack->r_ctl.sack_count /= 2; 13717 } 13718 if (tp->t_flags & TF_NEEDSYN) { 13719 /* 13720 * T/TCP: Connection was half-synchronized, and our SYN has 13721 * been ACK'd (so connection is now fully synchronized). Go 13722 * to non-starred state, increment snd_una for ACK of SYN, 13723 * and check if we can do window scaling. 13724 */ 13725 tp->t_flags &= ~TF_NEEDSYN; 13726 tp->snd_una++; 13727 acked_amount = acked = (high_seq - tp->snd_una); 13728 } 13729 if (acked > sbavail(&so->so_snd)) 13730 acked_amount = sbavail(&so->so_snd); 13731 #ifdef NETFLIX_EXP_DETECTION 13732 /* 13733 * We only care on a cum-ack move if we are in a sack-disabled 13734 * state. We have already added in to the ack_count, and we never 13735 * would disable on a cum-ack move, so we only care to do the 13736 * detection if it may "undo" it, i.e. we were in disabled already. 13737 */ 13738 if (rack->sack_attack_disable) 13739 rack_do_detection(tp, rack, acked_amount, segsiz); 13740 #endif 13741 if (IN_FASTRECOVERY(tp->t_flags) && 13742 (rack->rack_no_prr == 0)) 13743 rack_update_prr(tp, rack, acked_amount, high_seq); 13744 if (IN_RECOVERY(tp->t_flags)) { 13745 if (SEQ_LT(high_seq, tp->snd_recover) && 13746 (SEQ_LT(high_seq, tp->snd_max))) { 13747 tcp_rack_partialack(tp); 13748 } else { 13749 rack_post_recovery(tp, high_seq); 13750 recovery = 1; 13751 } 13752 } 13753 /* Handle the rack-log-ack part (sendmap) */ 13754 if ((sbused(&so->so_snd) == 0) && 13755 (acked > acked_amount) && 13756 (tp->t_state >= TCPS_FIN_WAIT_1) && 13757 (tp->t_flags & TF_SENTFIN)) { 13758 /* 13759 * We must be sure our fin 13760 * was sent and acked (we can be 13761 * in FIN_WAIT_1 without having 13762 * sent the fin). 13763 */ 13764 ourfinisacked = 1; 13765 /* 13766 * Lets make sure snd_una is updated 13767 * since most likely acked_amount = 0 (it 13768 * should be). 13769 */ 13770 tp->snd_una = high_seq; 13771 } 13772 /* Did we make a RTO error? */ 13773 if ((tp->t_flags & TF_PREVVALID) && 13774 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13775 tp->t_flags &= ~TF_PREVVALID; 13776 if (tp->t_rxtshift == 1 && 13777 (int)(ticks - tp->t_badrxtwin) < 0) 13778 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 13779 } 13780 /* Handle the data in the socket buffer */ 13781 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13782 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13783 if (acked_amount > 0) { 13784 struct mbuf *mfree; 13785 13786 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13787 SOCKBUF_LOCK(&so->so_snd); 13788 mfree = sbcut_locked(&so->so_snd, acked_amount); 13789 tp->snd_una = high_seq; 13790 /* Note we want to hold the sb lock through the sendmap adjust */ 13791 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13792 /* Wake up the socket if we have room to write more */ 13793 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13794 sowwakeup_locked(so); 13795 m_freem(mfree); 13796 } 13797 /* update progress */ 13798 tp->t_acktime = ticks; 13799 rack_log_progress_event(rack, tp, tp->t_acktime, 13800 PROGRESS_UPDATE, __LINE__); 13801 /* Clear out shifts and such */ 13802 tp->t_rxtshift = 0; 13803 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13804 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13805 rack->rc_tlp_in_progress = 0; 13806 rack->r_ctl.rc_tlp_cnt_out = 0; 13807 /* Send recover and snd_nxt must be dragged along */ 13808 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13809 tp->snd_recover = tp->snd_una; 13810 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13811 tp->snd_nxt = tp->snd_una; 13812 /* 13813 * If the RXT timer is running we want to 13814 * stop it, so we can restart a TLP (or new RXT). 13815 */ 13816 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13817 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13818 #ifdef NETFLIX_HTTP_LOGGING 13819 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13820 #endif 13821 tp->snd_wl2 = high_seq; 13822 tp->t_dupacks = 0; 13823 if (under_pacing && 13824 (rack->use_fixed_rate == 0) && 13825 (rack->in_probe_rtt == 0) && 13826 rack->rc_gp_dyn_mul && 13827 rack->rc_always_pace) { 13828 /* Check if we are dragging bottom */ 13829 rack_check_bottom_drag(tp, rack, so, acked); 13830 } 13831 if (tp->snd_una == tp->snd_max) { 13832 tp->t_flags &= ~TF_PREVVALID; 13833 rack->r_ctl.retran_during_recovery = 0; 13834 rack->r_ctl.dsack_byte_cnt = 0; 13835 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13836 if (rack->r_ctl.rc_went_idle_time == 0) 13837 rack->r_ctl.rc_went_idle_time = 1; 13838 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13839 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 13840 tp->t_acktime = 0; 13841 /* Set so we might enter persists... */ 13842 rack->r_wanted_output = 1; 13843 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13844 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13845 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13846 (sbavail(&so->so_snd) == 0) && 13847 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13848 /* 13849 * The socket was gone and the 13850 * peer sent data (not now in the past), time to 13851 * reset him. 13852 */ 13853 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13854 /* tcp_close will kill the inp pre-log the Reset */ 13855 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13856 #ifdef TCP_ACCOUNTING 13857 rdstc = get_cyclecount(); 13858 if (rdstc > ts_val) { 13859 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13860 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13861 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13862 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13863 } 13864 } 13865 #endif 13866 m_freem(m); 13867 tp = tcp_close(tp); 13868 if (tp == NULL) { 13869 #ifdef TCP_ACCOUNTING 13870 sched_unpin(); 13871 #endif 13872 return (1); 13873 } 13874 /* 13875 * We would normally do drop-with-reset which would 13876 * send back a reset. We can't since we don't have 13877 * all the needed bits. Instead lets arrange for 13878 * a call to tcp_output(). That way since we 13879 * are in the closed state we will generate a reset. 13880 * 13881 * Note if tcp_accounting is on we don't unpin since 13882 * we do that after the goto label. 13883 */ 13884 goto send_out_a_rst; 13885 } 13886 if ((sbused(&so->so_snd) == 0) && 13887 (tp->t_state >= TCPS_FIN_WAIT_1) && 13888 (tp->t_flags & TF_SENTFIN)) { 13889 /* 13890 * If we can't receive any more data, then closing user can 13891 * proceed. Starting the timer is contrary to the 13892 * specification, but if we don't get a FIN we'll hang 13893 * forever. 13894 * 13895 */ 13896 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13897 soisdisconnected(so); 13898 tcp_timer_activate(tp, TT_2MSL, 13899 (tcp_fast_finwait2_recycle ? 13900 tcp_finwait2_timeout : 13901 TP_MAXIDLE(tp))); 13902 } 13903 if (ourfinisacked == 0) { 13904 /* 13905 * We don't change to fin-wait-2 if we have our fin acked 13906 * which means we are probably in TCPS_CLOSING. 13907 */ 13908 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13909 } 13910 } 13911 } 13912 /* Wake up the socket if we have room to write more */ 13913 if (sbavail(&so->so_snd)) { 13914 rack->r_wanted_output = 1; 13915 if (ctf_progress_timeout_check(tp, true)) { 13916 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13917 tp, tick, PROGRESS_DROP, __LINE__); 13918 /* 13919 * We cheat here and don't send a RST, we should send one 13920 * when the pacer drops the connection. 13921 */ 13922 #ifdef TCP_ACCOUNTING 13923 rdstc = get_cyclecount(); 13924 if (rdstc > ts_val) { 13925 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13926 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13927 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13928 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13929 } 13930 } 13931 sched_unpin(); 13932 #endif 13933 (void)tcp_drop(tp, ETIMEDOUT); 13934 m_freem(m); 13935 return (1); 13936 } 13937 } 13938 if (ourfinisacked) { 13939 switch(tp->t_state) { 13940 case TCPS_CLOSING: 13941 #ifdef TCP_ACCOUNTING 13942 rdstc = get_cyclecount(); 13943 if (rdstc > ts_val) { 13944 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13945 (rdstc - ts_val)); 13946 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13947 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13948 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13949 } 13950 } 13951 sched_unpin(); 13952 #endif 13953 tcp_twstart(tp); 13954 m_freem(m); 13955 return (1); 13956 break; 13957 case TCPS_LAST_ACK: 13958 #ifdef TCP_ACCOUNTING 13959 rdstc = get_cyclecount(); 13960 if (rdstc > ts_val) { 13961 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13962 (rdstc - ts_val)); 13963 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13964 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13965 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13966 } 13967 } 13968 sched_unpin(); 13969 #endif 13970 tp = tcp_close(tp); 13971 ctf_do_drop(m, tp); 13972 return (1); 13973 break; 13974 case TCPS_FIN_WAIT_1: 13975 #ifdef TCP_ACCOUNTING 13976 rdstc = get_cyclecount(); 13977 if (rdstc > ts_val) { 13978 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13979 (rdstc - ts_val)); 13980 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13981 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13982 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13983 } 13984 } 13985 #endif 13986 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13987 soisdisconnected(so); 13988 tcp_timer_activate(tp, TT_2MSL, 13989 (tcp_fast_finwait2_recycle ? 13990 tcp_finwait2_timeout : 13991 TP_MAXIDLE(tp))); 13992 } 13993 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13994 break; 13995 default: 13996 break; 13997 } 13998 } 13999 if (rack->r_fast_output) { 14000 /* 14001 * We re doing fast output.. can we expand that? 14002 */ 14003 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 14004 } 14005 #ifdef TCP_ACCOUNTING 14006 rdstc = get_cyclecount(); 14007 if (rdstc > ts_val) { 14008 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 14009 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14010 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 14011 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 14012 } 14013 } 14014 14015 } else if (win_up_req) { 14016 rdstc = get_cyclecount(); 14017 if (rdstc > ts_val) { 14018 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 14019 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14020 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 14021 } 14022 } 14023 #endif 14024 } 14025 /* Now is there a next packet, if so we are done */ 14026 m_freem(m); 14027 did_out = 0; 14028 if (nxt_pkt) { 14029 #ifdef TCP_ACCOUNTING 14030 sched_unpin(); 14031 #endif 14032 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 14033 return (0); 14034 } 14035 rack_handle_might_revert(tp, rack); 14036 ctf_calc_rwin(so, tp); 14037 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14038 send_out_a_rst: 14039 if (tcp_output(tp) < 0) { 14040 #ifdef TCP_ACCOUNTING 14041 sched_unpin(); 14042 #endif 14043 return (1); 14044 } 14045 did_out = 1; 14046 } 14047 rack_free_trim(rack); 14048 #ifdef TCP_ACCOUNTING 14049 sched_unpin(); 14050 #endif 14051 rack_timer_audit(tp, rack, &so->so_snd); 14052 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 14053 return (0); 14054 } 14055 14056 14057 static int 14058 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 14059 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 14060 int32_t nxt_pkt, struct timeval *tv) 14061 { 14062 #ifdef TCP_ACCOUNTING 14063 uint64_t ts_val; 14064 #endif 14065 int32_t thflags, retval, did_out = 0; 14066 int32_t way_out = 0; 14067 /* 14068 * cts - is the current time from tv (caller gets ts) in microseconds. 14069 * ms_cts - is the current time from tv in milliseconds. 14070 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 14071 */ 14072 uint32_t cts, us_cts, ms_cts; 14073 uint32_t tiwin, high_seq; 14074 struct timespec ts; 14075 struct tcpopt to; 14076 struct tcp_rack *rack; 14077 struct rack_sendmap *rsm; 14078 int32_t prev_state = 0; 14079 #ifdef TCP_ACCOUNTING 14080 int ack_val_set = 0xf; 14081 #endif 14082 int nsegs; 14083 /* 14084 * tv passed from common code is from either M_TSTMP_LRO or 14085 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 14086 */ 14087 rack = (struct tcp_rack *)tp->t_fb_ptr; 14088 if (m->m_flags & M_ACKCMP) { 14089 /* 14090 * All compressed ack's are ack's by definition so 14091 * remove any ack required flag and then do the processing. 14092 */ 14093 rack->rc_ack_required = 0; 14094 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 14095 } 14096 if (m->m_flags & M_ACKCMP) { 14097 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 14098 } 14099 cts = tcp_tv_to_usectick(tv); 14100 ms_cts = tcp_tv_to_mssectick(tv); 14101 nsegs = m->m_pkthdr.lro_nsegs; 14102 counter_u64_add(rack_proc_non_comp_ack, 1); 14103 thflags = tcp_get_flags(th); 14104 #ifdef TCP_ACCOUNTING 14105 sched_pin(); 14106 if (thflags & TH_ACK) 14107 ts_val = get_cyclecount(); 14108 #endif 14109 if ((m->m_flags & M_TSTMP) || 14110 (m->m_flags & M_TSTMP_LRO)) { 14111 mbuf_tstmp2timespec(m, &ts); 14112 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14113 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14114 } else 14115 rack->r_ctl.act_rcv_time = *tv; 14116 kern_prefetch(rack, &prev_state); 14117 prev_state = 0; 14118 /* 14119 * Unscale the window into a 32-bit value. For the SYN_SENT state 14120 * the scale is zero. 14121 */ 14122 tiwin = th->th_win << tp->snd_scale; 14123 #ifdef TCP_ACCOUNTING 14124 if (thflags & TH_ACK) { 14125 /* 14126 * We have a tradeoff here. We can either do what we are 14127 * doing i.e. pinning to this CPU and then doing the accounting 14128 * <or> we could do a critical enter, setup the rdtsc and cpu 14129 * as in below, and then validate we are on the same CPU on 14130 * exit. I have choosen to not do the critical enter since 14131 * that often will gain you a context switch, and instead lock 14132 * us (line above this if) to the same CPU with sched_pin(). This 14133 * means we may be context switched out for a higher priority 14134 * interupt but we won't be moved to another CPU. 14135 * 14136 * If this occurs (which it won't very often since we most likely 14137 * are running this code in interupt context and only a higher 14138 * priority will bump us ... clock?) we will falsely add in 14139 * to the time the interupt processing time plus the ack processing 14140 * time. This is ok since its a rare event. 14141 */ 14142 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14143 ctf_fixed_maxseg(tp)); 14144 } 14145 #endif 14146 /* 14147 * Parse options on any incoming segment. 14148 */ 14149 memset(&to, 0, sizeof(to)); 14150 tcp_dooptions(&to, (u_char *)(th + 1), 14151 (th->th_off << 2) - sizeof(struct tcphdr), 14152 (thflags & TH_SYN) ? TO_SYN : 0); 14153 NET_EPOCH_ASSERT(); 14154 INP_WLOCK_ASSERT(tp->t_inpcb); 14155 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14156 __func__)); 14157 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14158 __func__)); 14159 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14160 (tp->t_flags & TF_GPUTINPROG)) { 14161 /* 14162 * We have a goodput in progress 14163 * and we have entered a late state. 14164 * Do we have enough data in the sb 14165 * to handle the GPUT request? 14166 */ 14167 uint32_t bytes; 14168 14169 bytes = tp->gput_ack - tp->gput_seq; 14170 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14171 bytes += tp->gput_seq - tp->snd_una; 14172 if (bytes > sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 14173 /* 14174 * There are not enough bytes in the socket 14175 * buffer that have been sent to cover this 14176 * measurement. Cancel it. 14177 */ 14178 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14179 rack->r_ctl.rc_gp_srtt /*flex1*/, 14180 tp->gput_seq, 14181 0, 0, 18, __LINE__, NULL, 0); 14182 tp->t_flags &= ~TF_GPUTINPROG; 14183 } 14184 } 14185 high_seq = th->th_ack; 14186 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14187 union tcp_log_stackspecific log; 14188 struct timeval ltv; 14189 #ifdef NETFLIX_HTTP_LOGGING 14190 struct http_sendfile_track *http_req; 14191 14192 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14193 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14194 } else { 14195 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14196 } 14197 #endif 14198 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14199 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14200 if (rack->rack_no_prr == 0) 14201 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14202 else 14203 log.u_bbr.flex1 = 0; 14204 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14205 log.u_bbr.use_lt_bw <<= 1; 14206 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14207 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14208 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14209 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14210 log.u_bbr.flex3 = m->m_flags; 14211 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14212 log.u_bbr.lost = thflags; 14213 log.u_bbr.pacing_gain = 0x1; 14214 #ifdef TCP_ACCOUNTING 14215 log.u_bbr.cwnd_gain = ack_val_set; 14216 #endif 14217 log.u_bbr.flex7 = 2; 14218 if (m->m_flags & M_TSTMP) { 14219 /* Record the hardware timestamp if present */ 14220 mbuf_tstmp2timespec(m, &ts); 14221 ltv.tv_sec = ts.tv_sec; 14222 ltv.tv_usec = ts.tv_nsec / 1000; 14223 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14224 } else if (m->m_flags & M_TSTMP_LRO) { 14225 /* Record the LRO the arrival timestamp */ 14226 mbuf_tstmp2timespec(m, &ts); 14227 ltv.tv_sec = ts.tv_sec; 14228 ltv.tv_usec = ts.tv_nsec / 1000; 14229 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14230 } 14231 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14232 /* Log the rcv time */ 14233 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14234 #ifdef NETFLIX_HTTP_LOGGING 14235 log.u_bbr.applimited = tp->t_http_closed; 14236 log.u_bbr.applimited <<= 8; 14237 log.u_bbr.applimited |= tp->t_http_open; 14238 log.u_bbr.applimited <<= 8; 14239 log.u_bbr.applimited |= tp->t_http_req; 14240 if (http_req) { 14241 /* Copy out any client req info */ 14242 /* seconds */ 14243 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14244 /* useconds */ 14245 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14246 log.u_bbr.rttProp = http_req->timestamp; 14247 log.u_bbr.cur_del_rate = http_req->start; 14248 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14249 log.u_bbr.flex8 |= 1; 14250 } else { 14251 log.u_bbr.flex8 |= 2; 14252 log.u_bbr.bw_inuse = http_req->end; 14253 } 14254 log.u_bbr.flex6 = http_req->start_seq; 14255 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14256 log.u_bbr.flex8 |= 4; 14257 log.u_bbr.epoch = http_req->end_seq; 14258 } 14259 } 14260 #endif 14261 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14262 tlen, &log, true, <v); 14263 } 14264 /* Remove ack required flag if set, we have one */ 14265 if (thflags & TH_ACK) 14266 rack->rc_ack_required = 0; 14267 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14268 way_out = 4; 14269 retval = 0; 14270 m_freem(m); 14271 goto done_with_input; 14272 } 14273 /* 14274 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14275 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14276 */ 14277 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14278 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14279 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14280 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14281 #ifdef TCP_ACCOUNTING 14282 sched_unpin(); 14283 #endif 14284 return (1); 14285 } 14286 /* 14287 * If timestamps were negotiated during SYN/ACK and a 14288 * segment without a timestamp is received, silently drop 14289 * the segment, unless it is a RST segment or missing timestamps are 14290 * tolerated. 14291 * See section 3.2 of RFC 7323. 14292 */ 14293 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14294 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14295 way_out = 5; 14296 retval = 0; 14297 m_freem(m); 14298 goto done_with_input; 14299 } 14300 14301 /* 14302 * Segment received on connection. Reset idle time and keep-alive 14303 * timer. XXX: This should be done after segment validation to 14304 * ignore broken/spoofed segs. 14305 */ 14306 if (tp->t_idle_reduce && 14307 (tp->snd_max == tp->snd_una) && 14308 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14309 counter_u64_add(rack_input_idle_reduces, 1); 14310 rack_cc_after_idle(rack, tp); 14311 } 14312 tp->t_rcvtime = ticks; 14313 #ifdef STATS 14314 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14315 #endif 14316 if (tiwin > rack->r_ctl.rc_high_rwnd) 14317 rack->r_ctl.rc_high_rwnd = tiwin; 14318 /* 14319 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14320 * this to occur after we've validated the segment. 14321 */ 14322 if (tcp_ecn_input_segment(tp, thflags, iptos)) 14323 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 14324 14325 /* 14326 * If echoed timestamp is later than the current time, fall back to 14327 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14328 * were used when this connection was established. 14329 */ 14330 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14331 to.to_tsecr -= tp->ts_offset; 14332 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14333 to.to_tsecr = 0; 14334 } 14335 14336 /* 14337 * If its the first time in we need to take care of options and 14338 * verify we can do SACK for rack! 14339 */ 14340 if (rack->r_state == 0) { 14341 /* Should be init'd by rack_init() */ 14342 KASSERT(rack->rc_inp != NULL, 14343 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14344 if (rack->rc_inp == NULL) { 14345 rack->rc_inp = tp->t_inpcb; 14346 } 14347 14348 /* 14349 * Process options only when we get SYN/ACK back. The SYN 14350 * case for incoming connections is handled in tcp_syncache. 14351 * According to RFC1323 the window field in a SYN (i.e., a 14352 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14353 * this is traditional behavior, may need to be cleaned up. 14354 */ 14355 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14356 /* Handle parallel SYN for ECN */ 14357 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14358 if ((to.to_flags & TOF_SCALE) && 14359 (tp->t_flags & TF_REQ_SCALE)) { 14360 tp->t_flags |= TF_RCVD_SCALE; 14361 tp->snd_scale = to.to_wscale; 14362 } else 14363 tp->t_flags &= ~TF_REQ_SCALE; 14364 /* 14365 * Initial send window. It will be updated with the 14366 * next incoming segment to the scaled value. 14367 */ 14368 tp->snd_wnd = th->th_win; 14369 rack_validate_fo_sendwin_up(tp, rack); 14370 if ((to.to_flags & TOF_TS) && 14371 (tp->t_flags & TF_REQ_TSTMP)) { 14372 tp->t_flags |= TF_RCVD_TSTMP; 14373 tp->ts_recent = to.to_tsval; 14374 tp->ts_recent_age = cts; 14375 } else 14376 tp->t_flags &= ~TF_REQ_TSTMP; 14377 if (to.to_flags & TOF_MSS) { 14378 tcp_mss(tp, to.to_mss); 14379 } 14380 if ((tp->t_flags & TF_SACK_PERMIT) && 14381 (to.to_flags & TOF_SACKPERM) == 0) 14382 tp->t_flags &= ~TF_SACK_PERMIT; 14383 if (IS_FASTOPEN(tp->t_flags)) { 14384 if (to.to_flags & TOF_FASTOPEN) { 14385 uint16_t mss; 14386 14387 if (to.to_flags & TOF_MSS) 14388 mss = to.to_mss; 14389 else 14390 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 14391 mss = TCP6_MSS; 14392 else 14393 mss = TCP_MSS; 14394 tcp_fastopen_update_cache(tp, mss, 14395 to.to_tfo_len, to.to_tfo_cookie); 14396 } else 14397 tcp_fastopen_disable_path(tp); 14398 } 14399 } 14400 /* 14401 * At this point we are at the initial call. Here we decide 14402 * if we are doing RACK or not. We do this by seeing if 14403 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14404 * The code now does do dup-ack counting so if you don't 14405 * switch back you won't get rack & TLP, but you will still 14406 * get this stack. 14407 */ 14408 14409 if ((rack_sack_not_required == 0) && 14410 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14411 tcp_switch_back_to_default(tp); 14412 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14413 tlen, iptos); 14414 #ifdef TCP_ACCOUNTING 14415 sched_unpin(); 14416 #endif 14417 return (1); 14418 } 14419 tcp_set_hpts(tp->t_inpcb); 14420 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14421 } 14422 if (thflags & TH_FIN) 14423 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14424 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14425 if ((rack->rc_gp_dyn_mul) && 14426 (rack->use_fixed_rate == 0) && 14427 (rack->rc_always_pace)) { 14428 /* Check in on probertt */ 14429 rack_check_probe_rtt(rack, us_cts); 14430 } 14431 rack_clear_rate_sample(rack); 14432 if ((rack->forced_ack) && 14433 ((tcp_get_flags(th) & TH_RST) == 0)) { 14434 rack_handle_probe_response(rack, tiwin, us_cts); 14435 } 14436 /* 14437 * This is the one exception case where we set the rack state 14438 * always. All other times (timers etc) we must have a rack-state 14439 * set (so we assure we have done the checks above for SACK). 14440 */ 14441 rack->r_ctl.rc_rcvtime = cts; 14442 if (rack->r_state != tp->t_state) 14443 rack_set_state(tp, rack); 14444 if (SEQ_GT(th->th_ack, tp->snd_una) && 14445 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14446 kern_prefetch(rsm, &prev_state); 14447 prev_state = rack->r_state; 14448 retval = (*rack->r_substate) (m, th, so, 14449 tp, &to, drop_hdrlen, 14450 tlen, tiwin, thflags, nxt_pkt, iptos); 14451 #ifdef INVARIANTS 14452 if ((retval == 0) && 14453 (tp->t_inpcb == NULL)) { 14454 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 14455 retval, tp, prev_state); 14456 } 14457 #endif 14458 if (retval == 0) { 14459 /* 14460 * If retval is 1 the tcb is unlocked and most likely the tp 14461 * is gone. 14462 */ 14463 INP_WLOCK_ASSERT(tp->t_inpcb); 14464 if ((rack->rc_gp_dyn_mul) && 14465 (rack->rc_always_pace) && 14466 (rack->use_fixed_rate == 0) && 14467 rack->in_probe_rtt && 14468 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14469 /* 14470 * If we are going for target, lets recheck before 14471 * we output. 14472 */ 14473 rack_check_probe_rtt(rack, us_cts); 14474 } 14475 if (rack->set_pacing_done_a_iw == 0) { 14476 /* How much has been acked? */ 14477 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14478 /* We have enough to set in the pacing segment size */ 14479 rack->set_pacing_done_a_iw = 1; 14480 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14481 } 14482 } 14483 tcp_rack_xmit_timer_commit(rack, tp); 14484 #ifdef TCP_ACCOUNTING 14485 /* 14486 * If we set the ack_val_se to what ack processing we are doing 14487 * we also want to track how many cycles we burned. Note 14488 * the bits after tcp_output we let be "free". This is because 14489 * we are also tracking the tcp_output times as well. Note the 14490 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14491 * 0xf cannot be returned and is what we initialize it too to 14492 * indicate we are not doing the tabulations. 14493 */ 14494 if (ack_val_set != 0xf) { 14495 uint64_t crtsc; 14496 14497 crtsc = get_cyclecount(); 14498 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14499 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14500 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14501 } 14502 } 14503 #endif 14504 if (nxt_pkt == 0) { 14505 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14506 do_output_now: 14507 if (tcp_output(tp) < 0) 14508 return (1); 14509 did_out = 1; 14510 } 14511 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14512 rack_free_trim(rack); 14513 } 14514 /* Update any rounds needed */ 14515 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14516 union tcp_log_stackspecific log; 14517 struct timeval tv; 14518 14519 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14520 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14521 log.u_bbr.flex1 = high_seq; 14522 log.u_bbr.flex2 = rack->r_ctl.roundends; 14523 log.u_bbr.flex3 = rack->r_ctl.current_round; 14524 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14525 log.u_bbr.flex8 = 9; 14526 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14527 0, &log, false, NULL, NULL, 0, &tv); 14528 } 14529 /* 14530 * The draft (v3) calls for us to use SEQ_GEQ, but that 14531 * causes issues when we are just going app limited. Lets 14532 * instead use SEQ_GT <or> where its equal but more data 14533 * is outstanding. 14534 */ 14535 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14536 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14537 rack->r_ctl.current_round++; 14538 rack->r_ctl.roundends = tp->snd_max; 14539 if (CC_ALGO(tp)->newround != NULL) { 14540 CC_ALGO(tp)->newround(tp->ccv, rack->r_ctl.current_round); 14541 } 14542 } 14543 if ((nxt_pkt == 0) && 14544 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14545 (SEQ_GT(tp->snd_max, tp->snd_una) || 14546 (tp->t_flags & TF_DELACK) || 14547 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14548 (tp->t_state <= TCPS_CLOSING)))) { 14549 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14550 if ((tp->snd_max == tp->snd_una) && 14551 ((tp->t_flags & TF_DELACK) == 0) && 14552 (tcp_in_hpts(rack->rc_inp)) && 14553 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14554 /* keep alive not needed if we are hptsi output yet */ 14555 ; 14556 } else { 14557 int late = 0; 14558 if (tcp_in_hpts(rack->rc_inp)) { 14559 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14560 us_cts = tcp_get_usecs(NULL); 14561 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14562 rack->r_early = 1; 14563 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14564 } else 14565 late = 1; 14566 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14567 } 14568 tcp_hpts_remove(tp->t_inpcb); 14569 } 14570 if (late && (did_out == 0)) { 14571 /* 14572 * We are late in the sending 14573 * and we did not call the output 14574 * (this probably should not happen). 14575 */ 14576 goto do_output_now; 14577 } 14578 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14579 } 14580 way_out = 1; 14581 } else if (nxt_pkt == 0) { 14582 /* Do we have the correct timer running? */ 14583 rack_timer_audit(tp, rack, &so->so_snd); 14584 way_out = 2; 14585 } 14586 done_with_input: 14587 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14588 if (did_out) 14589 rack->r_wanted_output = 0; 14590 #ifdef INVARIANTS 14591 if (tp->t_inpcb == NULL) { 14592 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 14593 did_out, 14594 retval, tp, prev_state); 14595 } 14596 #endif 14597 #ifdef TCP_ACCOUNTING 14598 } else { 14599 /* 14600 * Track the time (see above). 14601 */ 14602 if (ack_val_set != 0xf) { 14603 uint64_t crtsc; 14604 14605 crtsc = get_cyclecount(); 14606 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14607 /* 14608 * Note we *DO NOT* increment the per-tcb counters since 14609 * in the else the TP may be gone!! 14610 */ 14611 } 14612 #endif 14613 } 14614 #ifdef TCP_ACCOUNTING 14615 sched_unpin(); 14616 #endif 14617 return (retval); 14618 } 14619 14620 void 14621 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14622 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14623 { 14624 struct timeval tv; 14625 14626 /* First lets see if we have old packets */ 14627 if (tp->t_in_pkt) { 14628 if (ctf_do_queued_segments(so, tp, 1)) { 14629 m_freem(m); 14630 return; 14631 } 14632 } 14633 if (m->m_flags & M_TSTMP_LRO) { 14634 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 14635 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 14636 } else { 14637 /* Should not be should we kassert instead? */ 14638 tcp_get_usecs(&tv); 14639 } 14640 if (rack_do_segment_nounlock(m, th, so, tp, 14641 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14642 INP_WUNLOCK(tp->t_inpcb); 14643 } 14644 } 14645 14646 struct rack_sendmap * 14647 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14648 { 14649 struct rack_sendmap *rsm = NULL; 14650 int32_t idx; 14651 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14652 14653 /* Return the next guy to be re-transmitted */ 14654 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14655 return (NULL); 14656 } 14657 if (tp->t_flags & TF_SENTFIN) { 14658 /* retran the end FIN? */ 14659 return (NULL); 14660 } 14661 /* ok lets look at this one */ 14662 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14663 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 14664 return (rsm); 14665 } 14666 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14667 goto check_it; 14668 } 14669 rsm = rack_find_lowest_rsm(rack); 14670 if (rsm == NULL) { 14671 return (NULL); 14672 } 14673 check_it: 14674 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14675 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14676 /* 14677 * No sack so we automatically do the 3 strikes and 14678 * retransmit (no rack timer would be started). 14679 */ 14680 14681 return (rsm); 14682 } 14683 if (rsm->r_flags & RACK_ACKED) { 14684 return (NULL); 14685 } 14686 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14687 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14688 /* Its not yet ready */ 14689 return (NULL); 14690 } 14691 srtt = rack_grab_rtt(tp, rack); 14692 idx = rsm->r_rtr_cnt - 1; 14693 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14694 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14695 if ((tsused == ts_low) || 14696 (TSTMP_LT(tsused, ts_low))) { 14697 /* No time since sending */ 14698 return (NULL); 14699 } 14700 if ((tsused - ts_low) < thresh) { 14701 /* It has not been long enough yet */ 14702 return (NULL); 14703 } 14704 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14705 ((rsm->r_flags & RACK_SACK_PASSED) && 14706 (rack->sack_attack_disable == 0))) { 14707 /* 14708 * We have passed the dup-ack threshold <or> 14709 * a SACK has indicated this is missing. 14710 * Note that if you are a declared attacker 14711 * it is only the dup-ack threshold that 14712 * will cause retransmits. 14713 */ 14714 /* log retransmit reason */ 14715 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14716 rack->r_fast_output = 0; 14717 return (rsm); 14718 } 14719 return (NULL); 14720 } 14721 14722 static void 14723 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14724 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14725 int line, struct rack_sendmap *rsm, uint8_t quality) 14726 { 14727 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14728 union tcp_log_stackspecific log; 14729 struct timeval tv; 14730 14731 memset(&log, 0, sizeof(log)); 14732 log.u_bbr.flex1 = slot; 14733 log.u_bbr.flex2 = len; 14734 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14735 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14736 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14737 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14738 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14739 log.u_bbr.use_lt_bw <<= 1; 14740 log.u_bbr.use_lt_bw |= rack->r_late; 14741 log.u_bbr.use_lt_bw <<= 1; 14742 log.u_bbr.use_lt_bw |= rack->r_early; 14743 log.u_bbr.use_lt_bw <<= 1; 14744 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14745 log.u_bbr.use_lt_bw <<= 1; 14746 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14747 log.u_bbr.use_lt_bw <<= 1; 14748 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14749 log.u_bbr.use_lt_bw <<= 1; 14750 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14751 log.u_bbr.use_lt_bw <<= 1; 14752 log.u_bbr.use_lt_bw |= rack->gp_ready; 14753 log.u_bbr.pkt_epoch = line; 14754 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14755 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14756 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14757 log.u_bbr.bw_inuse = bw_est; 14758 log.u_bbr.delRate = bw; 14759 if (rack->r_ctl.gp_bw == 0) 14760 log.u_bbr.cur_del_rate = 0; 14761 else 14762 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14763 log.u_bbr.rttProp = len_time; 14764 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14765 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14766 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14767 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14768 /* We are in slow start */ 14769 log.u_bbr.flex7 = 1; 14770 } else { 14771 /* we are on congestion avoidance */ 14772 log.u_bbr.flex7 = 0; 14773 } 14774 log.u_bbr.flex8 = method; 14775 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14776 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14777 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14778 log.u_bbr.cwnd_gain <<= 1; 14779 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14780 log.u_bbr.cwnd_gain <<= 1; 14781 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14782 log.u_bbr.bbr_substate = quality; 14783 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14784 &rack->rc_inp->inp_socket->so_rcv, 14785 &rack->rc_inp->inp_socket->so_snd, 14786 BBR_LOG_HPTSI_CALC, 0, 14787 0, &log, false, &tv); 14788 } 14789 } 14790 14791 static uint32_t 14792 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14793 { 14794 uint32_t new_tso, user_max; 14795 14796 user_max = rack->rc_user_set_max_segs * mss; 14797 if (rack->rc_force_max_seg) { 14798 return (user_max); 14799 } 14800 if (rack->use_fixed_rate && 14801 ((rack->r_ctl.crte == NULL) || 14802 (bw != rack->r_ctl.crte->rate))) { 14803 /* Use the user mss since we are not exactly matched */ 14804 return (user_max); 14805 } 14806 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14807 if (new_tso > user_max) 14808 new_tso = user_max; 14809 return (new_tso); 14810 } 14811 14812 static int32_t 14813 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14814 { 14815 uint64_t lentim, fill_bw; 14816 14817 /* Lets first see if we are full, if so continue with normal rate */ 14818 rack->r_via_fill_cw = 0; 14819 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14820 return (slot); 14821 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14822 return (slot); 14823 if (rack->r_ctl.rc_last_us_rtt == 0) 14824 return (slot); 14825 if (rack->rc_pace_fill_if_rttin_range && 14826 (rack->r_ctl.rc_last_us_rtt >= 14827 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14828 /* The rtt is huge, N * smallest, lets not fill */ 14829 return (slot); 14830 } 14831 /* 14832 * first lets calculate the b/w based on the last us-rtt 14833 * and the sndwnd. 14834 */ 14835 fill_bw = rack->r_ctl.cwnd_to_use; 14836 /* Take the rwnd if its smaller */ 14837 if (fill_bw > rack->rc_tp->snd_wnd) 14838 fill_bw = rack->rc_tp->snd_wnd; 14839 if (rack->r_fill_less_agg) { 14840 /* 14841 * Now take away the inflight (this will reduce our 14842 * aggressiveness and yeah, if we get that much out in 1RTT 14843 * we will have had acks come back and still be behind). 14844 */ 14845 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14846 } 14847 /* Now lets make it into a b/w */ 14848 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14849 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14850 /* We are below the min b/w */ 14851 if (non_paced) 14852 *rate_wanted = fill_bw; 14853 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14854 return (slot); 14855 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14856 fill_bw = rack->r_ctl.bw_rate_cap; 14857 rack->r_via_fill_cw = 1; 14858 if (rack->r_rack_hw_rate_caps && 14859 (rack->r_ctl.crte != NULL)) { 14860 uint64_t high_rate; 14861 14862 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14863 if (fill_bw > high_rate) { 14864 /* We are capping bw at the highest rate table entry */ 14865 if (*rate_wanted > high_rate) { 14866 /* The original rate was also capped */ 14867 rack->r_via_fill_cw = 0; 14868 } 14869 rack_log_hdwr_pacing(rack, 14870 fill_bw, high_rate, __LINE__, 14871 0, 3); 14872 fill_bw = high_rate; 14873 if (capped) 14874 *capped = 1; 14875 } 14876 } else if ((rack->r_ctl.crte == NULL) && 14877 (rack->rack_hdrw_pacing == 0) && 14878 (rack->rack_hdw_pace_ena) && 14879 rack->r_rack_hw_rate_caps && 14880 (rack->rack_attempt_hdwr_pace == 0) && 14881 (rack->rc_inp->inp_route.ro_nh != NULL) && 14882 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14883 /* 14884 * Ok we may have a first attempt that is greater than our top rate 14885 * lets check. 14886 */ 14887 uint64_t high_rate; 14888 14889 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14890 if (high_rate) { 14891 if (fill_bw > high_rate) { 14892 fill_bw = high_rate; 14893 if (capped) 14894 *capped = 1; 14895 } 14896 } 14897 } 14898 /* 14899 * Ok fill_bw holds our mythical b/w to fill the cwnd 14900 * in a rtt, what does that time wise equate too? 14901 */ 14902 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14903 lentim /= fill_bw; 14904 *rate_wanted = fill_bw; 14905 if (non_paced || (lentim < slot)) { 14906 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14907 0, lentim, 12, __LINE__, NULL, 0); 14908 return ((int32_t)lentim); 14909 } else 14910 return (slot); 14911 } 14912 14913 static int32_t 14914 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14915 { 14916 uint64_t srtt; 14917 int32_t slot = 0; 14918 int can_start_hw_pacing = 1; 14919 int err; 14920 14921 if (rack->rc_always_pace == 0) { 14922 /* 14923 * We use the most optimistic possible cwnd/srtt for 14924 * sending calculations. This will make our 14925 * calculation anticipate getting more through 14926 * quicker then possible. But thats ok we don't want 14927 * the peer to have a gap in data sending. 14928 */ 14929 uint64_t cwnd, tr_perms = 0; 14930 int32_t reduce = 0; 14931 14932 old_method: 14933 /* 14934 * We keep no precise pacing with the old method 14935 * instead we use the pacer to mitigate bursts. 14936 */ 14937 if (rack->r_ctl.rc_rack_min_rtt) 14938 srtt = rack->r_ctl.rc_rack_min_rtt; 14939 else 14940 srtt = max(tp->t_srtt, 1); 14941 if (rack->r_ctl.rc_rack_largest_cwnd) 14942 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14943 else 14944 cwnd = rack->r_ctl.cwnd_to_use; 14945 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14946 tr_perms = (cwnd * 1000) / srtt; 14947 if (tr_perms == 0) { 14948 tr_perms = ctf_fixed_maxseg(tp); 14949 } 14950 /* 14951 * Calculate how long this will take to drain, if 14952 * the calculation comes out to zero, thats ok we 14953 * will use send_a_lot to possibly spin around for 14954 * more increasing tot_len_this_send to the point 14955 * that its going to require a pace, or we hit the 14956 * cwnd. Which in that case we are just waiting for 14957 * a ACK. 14958 */ 14959 slot = len / tr_perms; 14960 /* Now do we reduce the time so we don't run dry? */ 14961 if (slot && rack_slot_reduction) { 14962 reduce = (slot / rack_slot_reduction); 14963 if (reduce < slot) { 14964 slot -= reduce; 14965 } else 14966 slot = 0; 14967 } 14968 slot *= HPTS_USEC_IN_MSEC; 14969 if (rack->rc_pace_to_cwnd) { 14970 uint64_t rate_wanted = 0; 14971 14972 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14973 rack->rc_ack_can_sendout_data = 1; 14974 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14975 } else 14976 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14977 } else { 14978 uint64_t bw_est, res, lentim, rate_wanted; 14979 uint32_t orig_val, segs, oh; 14980 int capped = 0; 14981 int prev_fill; 14982 14983 if ((rack->r_rr_config == 1) && rsm) { 14984 return (rack->r_ctl.rc_min_to); 14985 } 14986 if (rack->use_fixed_rate) { 14987 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14988 } else if ((rack->r_ctl.init_rate == 0) && 14989 #ifdef NETFLIX_PEAKRATE 14990 (rack->rc_tp->t_maxpeakrate == 0) && 14991 #endif 14992 (rack->r_ctl.gp_bw == 0)) { 14993 /* no way to yet do an estimate */ 14994 bw_est = rate_wanted = 0; 14995 } else { 14996 bw_est = rack_get_bw(rack); 14997 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14998 } 14999 if ((bw_est == 0) || (rate_wanted == 0) || 15000 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 15001 /* 15002 * No way yet to make a b/w estimate or 15003 * our raise is set incorrectly. 15004 */ 15005 goto old_method; 15006 } 15007 /* We need to account for all the overheads */ 15008 segs = (len + segsiz - 1) / segsiz; 15009 /* 15010 * We need the diff between 1514 bytes (e-mtu with e-hdr) 15011 * and how much data we put in each packet. Yes this 15012 * means we may be off if we are larger than 1500 bytes 15013 * or smaller. But this just makes us more conservative. 15014 */ 15015 if (rack_hw_rate_min && 15016 (bw_est < rack_hw_rate_min)) 15017 can_start_hw_pacing = 0; 15018 if (ETHERNET_SEGMENT_SIZE > segsiz) 15019 oh = ETHERNET_SEGMENT_SIZE - segsiz; 15020 else 15021 oh = 0; 15022 segs *= oh; 15023 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 15024 res = lentim / rate_wanted; 15025 slot = (uint32_t)res; 15026 orig_val = rack->r_ctl.rc_pace_max_segs; 15027 if (rack->r_ctl.crte == NULL) { 15028 /* 15029 * Only do this if we are not hardware pacing 15030 * since if we are doing hw-pacing below we will 15031 * set make a call after setting up or changing 15032 * the rate. 15033 */ 15034 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 15035 } else if (rack->rc_inp->inp_snd_tag == NULL) { 15036 /* 15037 * We lost our rate somehow, this can happen 15038 * if the interface changed underneath us. 15039 */ 15040 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15041 rack->r_ctl.crte = NULL; 15042 /* Lets re-allow attempting to setup pacing */ 15043 rack->rack_hdrw_pacing = 0; 15044 rack->rack_attempt_hdwr_pace = 0; 15045 rack_log_hdwr_pacing(rack, 15046 rate_wanted, bw_est, __LINE__, 15047 0, 6); 15048 } 15049 /* Did we change the TSO size, if so log it */ 15050 if (rack->r_ctl.rc_pace_max_segs != orig_val) 15051 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 15052 prev_fill = rack->r_via_fill_cw; 15053 if ((rack->rc_pace_to_cwnd) && 15054 (capped == 0) && 15055 (rack->use_fixed_rate == 0) && 15056 (rack->in_probe_rtt == 0) && 15057 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 15058 /* 15059 * We want to pace at our rate *or* faster to 15060 * fill the cwnd to the max if its not full. 15061 */ 15062 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 15063 } 15064 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 15065 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 15066 if ((rack->rack_hdw_pace_ena) && 15067 (can_start_hw_pacing > 0) && 15068 (rack->rack_hdrw_pacing == 0) && 15069 (rack->rack_attempt_hdwr_pace == 0)) { 15070 /* 15071 * Lets attempt to turn on hardware pacing 15072 * if we can. 15073 */ 15074 rack->rack_attempt_hdwr_pace = 1; 15075 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 15076 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15077 rate_wanted, 15078 RS_PACING_GEQ, 15079 &err, &rack->r_ctl.crte_prev_rate); 15080 if (rack->r_ctl.crte) { 15081 rack->rack_hdrw_pacing = 1; 15082 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 15083 0, rack->r_ctl.crte, 15084 NULL); 15085 rack_log_hdwr_pacing(rack, 15086 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15087 err, 0); 15088 rack->r_ctl.last_hw_bw_req = rate_wanted; 15089 } else { 15090 counter_u64_add(rack_hw_pace_init_fail, 1); 15091 } 15092 } else if (rack->rack_hdrw_pacing && 15093 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 15094 /* Do we need to adjust our rate? */ 15095 const struct tcp_hwrate_limit_table *nrte; 15096 15097 if (rack->r_up_only && 15098 (rate_wanted < rack->r_ctl.crte->rate)) { 15099 /** 15100 * We have four possible states here 15101 * having to do with the previous time 15102 * and this time. 15103 * previous | this-time 15104 * A) 0 | 0 -- fill_cw not in the picture 15105 * B) 1 | 0 -- we were doing a fill-cw but now are not 15106 * C) 1 | 1 -- all rates from fill_cw 15107 * D) 0 | 1 -- we were doing non-fill and now we are filling 15108 * 15109 * For case A, C and D we don't allow a drop. But for 15110 * case B where we now our on our steady rate we do 15111 * allow a drop. 15112 * 15113 */ 15114 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 15115 goto done_w_hdwr; 15116 } 15117 if ((rate_wanted > rack->r_ctl.crte->rate) || 15118 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15119 if (rack_hw_rate_to_low && 15120 (bw_est < rack_hw_rate_to_low)) { 15121 /* 15122 * The pacing rate is too low for hardware, but 15123 * do allow hardware pacing to be restarted. 15124 */ 15125 rack_log_hdwr_pacing(rack, 15126 bw_est, rack->r_ctl.crte->rate, __LINE__, 15127 0, 5); 15128 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15129 rack->r_ctl.crte = NULL; 15130 rack->rack_attempt_hdwr_pace = 0; 15131 rack->rack_hdrw_pacing = 0; 15132 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15133 goto done_w_hdwr; 15134 } 15135 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15136 rack->rc_tp, 15137 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15138 rate_wanted, 15139 RS_PACING_GEQ, 15140 &err, &rack->r_ctl.crte_prev_rate); 15141 if (nrte == NULL) { 15142 /* Lost the rate */ 15143 rack->rack_hdrw_pacing = 0; 15144 rack->r_ctl.crte = NULL; 15145 rack_log_hdwr_pacing(rack, 15146 rate_wanted, 0, __LINE__, 15147 err, 1); 15148 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15149 counter_u64_add(rack_hw_pace_lost, 1); 15150 } else if (nrte != rack->r_ctl.crte) { 15151 rack->r_ctl.crte = nrte; 15152 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15153 segsiz, 0, 15154 rack->r_ctl.crte, 15155 NULL); 15156 rack_log_hdwr_pacing(rack, 15157 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15158 err, 2); 15159 rack->r_ctl.last_hw_bw_req = rate_wanted; 15160 } 15161 } else { 15162 /* We just need to adjust the segment size */ 15163 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15164 rack_log_hdwr_pacing(rack, 15165 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15166 0, 4); 15167 rack->r_ctl.last_hw_bw_req = rate_wanted; 15168 } 15169 } 15170 } 15171 if ((rack->r_ctl.crte != NULL) && 15172 (rack->r_ctl.crte->rate == rate_wanted)) { 15173 /* 15174 * We need to add a extra if the rates 15175 * are exactly matched. The idea is 15176 * we want the software to make sure the 15177 * queue is empty before adding more, this 15178 * gives us N MSS extra pace times where 15179 * N is our sysctl 15180 */ 15181 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15182 } 15183 done_w_hdwr: 15184 if (rack_limit_time_with_srtt && 15185 (rack->use_fixed_rate == 0) && 15186 #ifdef NETFLIX_PEAKRATE 15187 (rack->rc_tp->t_maxpeakrate == 0) && 15188 #endif 15189 (rack->rack_hdrw_pacing == 0)) { 15190 /* 15191 * Sanity check, we do not allow the pacing delay 15192 * to be longer than the SRTT of the path. If it is 15193 * a slow path, then adding a packet should increase 15194 * the RTT and compensate for this i.e. the srtt will 15195 * be greater so the allowed pacing time will be greater. 15196 * 15197 * Note this restriction is not for where a peak rate 15198 * is set, we are doing fixed pacing or hardware pacing. 15199 */ 15200 if (rack->rc_tp->t_srtt) 15201 srtt = rack->rc_tp->t_srtt; 15202 else 15203 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15204 if (srtt < (uint64_t)slot) { 15205 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15206 slot = srtt; 15207 } 15208 } 15209 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15210 } 15211 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15212 /* 15213 * If this rate is seeing enobufs when it 15214 * goes to send then either the nic is out 15215 * of gas or we are mis-estimating the time 15216 * somehow and not letting the queue empty 15217 * completely. Lets add to the pacing time. 15218 */ 15219 int hw_boost_delay; 15220 15221 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15222 if (hw_boost_delay > rack_enobuf_hw_max) 15223 hw_boost_delay = rack_enobuf_hw_max; 15224 else if (hw_boost_delay < rack_enobuf_hw_min) 15225 hw_boost_delay = rack_enobuf_hw_min; 15226 slot += hw_boost_delay; 15227 } 15228 return (slot); 15229 } 15230 15231 static void 15232 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15233 tcp_seq startseq, uint32_t sb_offset) 15234 { 15235 struct rack_sendmap *my_rsm = NULL; 15236 struct rack_sendmap fe; 15237 15238 if (tp->t_state < TCPS_ESTABLISHED) { 15239 /* 15240 * We don't start any measurements if we are 15241 * not at least established. 15242 */ 15243 return; 15244 } 15245 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15246 /* 15247 * We will get no more data into the SB 15248 * this means we need to have the data available 15249 * before we start a measurement. 15250 */ 15251 15252 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) < 15253 max(rc_init_window(rack), 15254 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15255 /* Nope not enough data */ 15256 return; 15257 } 15258 } 15259 tp->t_flags |= TF_GPUTINPROG; 15260 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15261 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15262 tp->gput_seq = startseq; 15263 rack->app_limited_needs_set = 0; 15264 if (rack->in_probe_rtt) 15265 rack->measure_saw_probe_rtt = 1; 15266 else if ((rack->measure_saw_probe_rtt) && 15267 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15268 rack->measure_saw_probe_rtt = 0; 15269 if (rack->rc_gp_filled) 15270 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15271 else { 15272 /* Special case initial measurement */ 15273 struct timeval tv; 15274 15275 tp->gput_ts = tcp_get_usecs(&tv); 15276 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15277 } 15278 /* 15279 * We take a guess out into the future, 15280 * if we have no measurement and no 15281 * initial rate, we measure the first 15282 * initial-windows worth of data to 15283 * speed up getting some GP measurement and 15284 * thus start pacing. 15285 */ 15286 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15287 rack->app_limited_needs_set = 1; 15288 tp->gput_ack = startseq + max(rc_init_window(rack), 15289 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15290 rack_log_pacing_delay_calc(rack, 15291 tp->gput_seq, 15292 tp->gput_ack, 15293 0, 15294 tp->gput_ts, 15295 rack->r_ctl.rc_app_limited_cnt, 15296 9, 15297 __LINE__, NULL, 0); 15298 return; 15299 } 15300 if (sb_offset) { 15301 /* 15302 * We are out somewhere in the sb 15303 * can we use the already outstanding data? 15304 */ 15305 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15306 /* 15307 * Yes first one is good and in this case 15308 * the tp->gput_ts is correctly set based on 15309 * the last ack that arrived (no need to 15310 * set things up when an ack comes in). 15311 */ 15312 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15313 if ((my_rsm == NULL) || 15314 (my_rsm->r_rtr_cnt != 1)) { 15315 /* retransmission? */ 15316 goto use_latest; 15317 } 15318 } else { 15319 if (rack->r_ctl.rc_first_appl == NULL) { 15320 /* 15321 * If rc_first_appl is NULL 15322 * then the cnt should be 0. 15323 * This is probably an error, maybe 15324 * a KASSERT would be approprate. 15325 */ 15326 goto use_latest; 15327 } 15328 /* 15329 * If we have a marker pointer to the last one that is 15330 * app limited we can use that, but we need to set 15331 * things up so that when it gets ack'ed we record 15332 * the ack time (if its not already acked). 15333 */ 15334 rack->app_limited_needs_set = 1; 15335 /* 15336 * We want to get to the rsm that is either 15337 * next with space i.e. over 1 MSS or the one 15338 * after that (after the app-limited). 15339 */ 15340 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15341 rack->r_ctl.rc_first_appl); 15342 if (my_rsm) { 15343 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15344 /* Have to use the next one */ 15345 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15346 my_rsm); 15347 else { 15348 /* Use after the first MSS of it is acked */ 15349 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15350 goto start_set; 15351 } 15352 } 15353 if ((my_rsm == NULL) || 15354 (my_rsm->r_rtr_cnt != 1)) { 15355 /* 15356 * Either its a retransmit or 15357 * the last is the app-limited one. 15358 */ 15359 goto use_latest; 15360 } 15361 } 15362 tp->gput_seq = my_rsm->r_start; 15363 start_set: 15364 if (my_rsm->r_flags & RACK_ACKED) { 15365 /* 15366 * This one has been acked use the arrival ack time 15367 */ 15368 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15369 rack->app_limited_needs_set = 0; 15370 } 15371 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15372 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15373 rack_log_pacing_delay_calc(rack, 15374 tp->gput_seq, 15375 tp->gput_ack, 15376 (uint64_t)my_rsm, 15377 tp->gput_ts, 15378 rack->r_ctl.rc_app_limited_cnt, 15379 9, 15380 __LINE__, NULL, 0); 15381 return; 15382 } 15383 15384 use_latest: 15385 /* 15386 * We don't know how long we may have been 15387 * idle or if this is the first-send. Lets 15388 * setup the flag so we will trim off 15389 * the first ack'd data so we get a true 15390 * measurement. 15391 */ 15392 rack->app_limited_needs_set = 1; 15393 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15394 /* Find this guy so we can pull the send time */ 15395 fe.r_start = startseq; 15396 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15397 if (my_rsm) { 15398 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15399 if (my_rsm->r_flags & RACK_ACKED) { 15400 /* 15401 * Unlikely since its probably what was 15402 * just transmitted (but I am paranoid). 15403 */ 15404 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15405 rack->app_limited_needs_set = 0; 15406 } 15407 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15408 /* This also is unlikely */ 15409 tp->gput_seq = my_rsm->r_start; 15410 } 15411 } else { 15412 /* 15413 * TSNH unless we have some send-map limit, 15414 * and even at that it should not be hitting 15415 * that limit (we should have stopped sending). 15416 */ 15417 struct timeval tv; 15418 15419 microuptime(&tv); 15420 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15421 } 15422 rack_log_pacing_delay_calc(rack, 15423 tp->gput_seq, 15424 tp->gput_ack, 15425 (uint64_t)my_rsm, 15426 tp->gput_ts, 15427 rack->r_ctl.rc_app_limited_cnt, 15428 9, __LINE__, NULL, 0); 15429 } 15430 15431 static inline uint32_t 15432 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15433 uint32_t avail, int32_t sb_offset) 15434 { 15435 uint32_t len; 15436 uint32_t sendwin; 15437 15438 if (tp->snd_wnd > cwnd_to_use) 15439 sendwin = cwnd_to_use; 15440 else 15441 sendwin = tp->snd_wnd; 15442 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15443 /* We never want to go over our peers rcv-window */ 15444 len = 0; 15445 } else { 15446 uint32_t flight; 15447 15448 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15449 if (flight >= sendwin) { 15450 /* 15451 * We have in flight what we are allowed by cwnd (if 15452 * it was rwnd blocking it would have hit above out 15453 * >= tp->snd_wnd). 15454 */ 15455 return (0); 15456 } 15457 len = sendwin - flight; 15458 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15459 /* We would send too much (beyond the rwnd) */ 15460 len = tp->snd_wnd - ctf_outstanding(tp); 15461 } 15462 if ((len + sb_offset) > avail) { 15463 /* 15464 * We don't have that much in the SB, how much is 15465 * there? 15466 */ 15467 len = avail - sb_offset; 15468 } 15469 } 15470 return (len); 15471 } 15472 15473 static void 15474 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15475 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15476 int rsm_is_null, int optlen, int line, uint16_t mode) 15477 { 15478 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15479 union tcp_log_stackspecific log; 15480 struct timeval tv; 15481 15482 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15483 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15484 log.u_bbr.flex1 = error; 15485 log.u_bbr.flex2 = flags; 15486 log.u_bbr.flex3 = rsm_is_null; 15487 log.u_bbr.flex4 = ipoptlen; 15488 log.u_bbr.flex5 = tp->rcv_numsacks; 15489 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15490 log.u_bbr.flex7 = optlen; 15491 log.u_bbr.flex8 = rack->r_fsb_inited; 15492 log.u_bbr.applimited = rack->r_fast_output; 15493 log.u_bbr.bw_inuse = rack_get_bw(rack); 15494 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15495 log.u_bbr.cwnd_gain = mode; 15496 log.u_bbr.pkts_out = orig_len; 15497 log.u_bbr.lt_epoch = len; 15498 log.u_bbr.delivered = line; 15499 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15500 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15501 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15502 len, &log, false, NULL, NULL, 0, &tv); 15503 } 15504 } 15505 15506 15507 static struct mbuf * 15508 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15509 struct rack_fast_send_blk *fsb, 15510 int32_t seglimit, int32_t segsize, int hw_tls) 15511 { 15512 #ifdef KERN_TLS 15513 struct ktls_session *tls, *ntls; 15514 #ifdef INVARIANTS 15515 struct mbuf *start; 15516 #endif 15517 #endif 15518 struct mbuf *m, *n, **np, *smb; 15519 struct mbuf *top; 15520 int32_t off, soff; 15521 int32_t len = *plen; 15522 int32_t fragsize; 15523 int32_t len_cp = 0; 15524 uint32_t mlen, frags; 15525 15526 soff = off = the_off; 15527 smb = m = the_m; 15528 np = ⊤ 15529 top = NULL; 15530 #ifdef KERN_TLS 15531 if (hw_tls && (m->m_flags & M_EXTPG)) 15532 tls = m->m_epg_tls; 15533 else 15534 tls = NULL; 15535 #ifdef INVARIANTS 15536 start = m; 15537 #endif 15538 #endif 15539 while (len > 0) { 15540 if (m == NULL) { 15541 *plen = len_cp; 15542 break; 15543 } 15544 #ifdef KERN_TLS 15545 if (hw_tls) { 15546 if (m->m_flags & M_EXTPG) 15547 ntls = m->m_epg_tls; 15548 else 15549 ntls = NULL; 15550 15551 /* 15552 * Avoid mixing TLS records with handshake 15553 * data or TLS records from different 15554 * sessions. 15555 */ 15556 if (tls != ntls) { 15557 MPASS(m != start); 15558 *plen = len_cp; 15559 break; 15560 } 15561 } 15562 #endif 15563 mlen = min(len, m->m_len - off); 15564 if (seglimit) { 15565 /* 15566 * For M_EXTPG mbufs, add 3 segments 15567 * + 1 in case we are crossing page boundaries 15568 * + 2 in case the TLS hdr/trailer are used 15569 * It is cheaper to just add the segments 15570 * than it is to take the cache miss to look 15571 * at the mbuf ext_pgs state in detail. 15572 */ 15573 if (m->m_flags & M_EXTPG) { 15574 fragsize = min(segsize, PAGE_SIZE); 15575 frags = 3; 15576 } else { 15577 fragsize = segsize; 15578 frags = 0; 15579 } 15580 15581 /* Break if we really can't fit anymore. */ 15582 if ((frags + 1) >= seglimit) { 15583 *plen = len_cp; 15584 break; 15585 } 15586 15587 /* 15588 * Reduce size if you can't copy the whole 15589 * mbuf. If we can't copy the whole mbuf, also 15590 * adjust len so the loop will end after this 15591 * mbuf. 15592 */ 15593 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15594 mlen = (seglimit - frags - 1) * fragsize; 15595 len = mlen; 15596 *plen = len_cp + len; 15597 } 15598 frags += howmany(mlen, fragsize); 15599 if (frags == 0) 15600 frags++; 15601 seglimit -= frags; 15602 KASSERT(seglimit > 0, 15603 ("%s: seglimit went too low", __func__)); 15604 } 15605 n = m_get(M_NOWAIT, m->m_type); 15606 *np = n; 15607 if (n == NULL) 15608 goto nospace; 15609 n->m_len = mlen; 15610 soff += mlen; 15611 len_cp += n->m_len; 15612 if (m->m_flags & (M_EXT|M_EXTPG)) { 15613 n->m_data = m->m_data + off; 15614 mb_dupcl(n, m); 15615 } else { 15616 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15617 (u_int)n->m_len); 15618 } 15619 len -= n->m_len; 15620 off = 0; 15621 m = m->m_next; 15622 np = &n->m_next; 15623 if (len || (soff == smb->m_len)) { 15624 /* 15625 * We have more so we move forward or 15626 * we have consumed the entire mbuf and 15627 * len has fell to 0. 15628 */ 15629 soff = 0; 15630 smb = m; 15631 } 15632 15633 } 15634 if (fsb != NULL) { 15635 fsb->m = smb; 15636 fsb->off = soff; 15637 if (smb) { 15638 /* 15639 * Save off the size of the mbuf. We do 15640 * this so that we can recognize when it 15641 * has been trimmed by sbcut() as acks 15642 * come in. 15643 */ 15644 fsb->o_m_len = smb->m_len; 15645 } else { 15646 /* 15647 * This is the case where the next mbuf went to NULL. This 15648 * means with this copy we have sent everything in the sb. 15649 * In theory we could clear the fast_output flag, but lets 15650 * not since its possible that we could get more added 15651 * and acks that call the extend function which would let 15652 * us send more. 15653 */ 15654 fsb->o_m_len = 0; 15655 } 15656 } 15657 return (top); 15658 nospace: 15659 if (top) 15660 m_freem(top); 15661 return (NULL); 15662 15663 } 15664 15665 /* 15666 * This is a copy of m_copym(), taking the TSO segment size/limit 15667 * constraints into account, and advancing the sndptr as it goes. 15668 */ 15669 static struct mbuf * 15670 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15671 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15672 { 15673 struct mbuf *m, *n; 15674 int32_t soff; 15675 15676 soff = rack->r_ctl.fsb.off; 15677 m = rack->r_ctl.fsb.m; 15678 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15679 /* 15680 * The mbuf had the front of it chopped off by an ack 15681 * we need to adjust the soff/off by that difference. 15682 */ 15683 uint32_t delta; 15684 15685 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15686 soff -= delta; 15687 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15688 /* 15689 * The mbuf was expanded probably by 15690 * a m_compress. Just update o_m_len. 15691 */ 15692 rack->r_ctl.fsb.o_m_len = m->m_len; 15693 } 15694 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15695 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15696 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15697 __FUNCTION__, 15698 rack, *plen, m, m->m_len)); 15699 /* Save off the right location before we copy and advance */ 15700 *s_soff = soff; 15701 *s_mb = rack->r_ctl.fsb.m; 15702 n = rack_fo_base_copym(m, soff, plen, 15703 &rack->r_ctl.fsb, 15704 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15705 return (n); 15706 } 15707 15708 static int 15709 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15710 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15711 { 15712 /* 15713 * Enter the fast retransmit path. We are given that a sched_pin is 15714 * in place (if accounting is compliled in) and the cycle count taken 15715 * at the entry is in the ts_val. The concept her is that the rsm 15716 * now holds the mbuf offsets and such so we can directly transmit 15717 * without a lot of overhead, the len field is already set for 15718 * us to prohibit us from sending too much (usually its 1MSS). 15719 */ 15720 struct ip *ip = NULL; 15721 struct udphdr *udp = NULL; 15722 struct tcphdr *th = NULL; 15723 struct mbuf *m = NULL; 15724 struct inpcb *inp; 15725 uint8_t *cpto; 15726 struct tcp_log_buffer *lgb; 15727 #ifdef TCP_ACCOUNTING 15728 uint64_t crtsc; 15729 int cnt_thru = 1; 15730 #endif 15731 struct tcpopt to; 15732 u_char opt[TCP_MAXOLEN]; 15733 uint32_t hdrlen, optlen; 15734 int32_t slot, segsiz, max_val, tso = 0, error, ulen = 0; 15735 uint16_t flags; 15736 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15737 uint32_t if_hw_tsomaxsegsize; 15738 15739 #ifdef INET6 15740 struct ip6_hdr *ip6 = NULL; 15741 15742 if (rack->r_is_v6) { 15743 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15744 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15745 } else 15746 #endif /* INET6 */ 15747 { 15748 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15749 hdrlen = sizeof(struct tcpiphdr); 15750 } 15751 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15752 goto failed; 15753 } 15754 if (doing_tlp) { 15755 /* Its a TLP add the flag, it may already be there but be sure */ 15756 rsm->r_flags |= RACK_TLP; 15757 } else { 15758 /* If it was a TLP it is not not on this retransmit */ 15759 rsm->r_flags &= ~RACK_TLP; 15760 } 15761 startseq = rsm->r_start; 15762 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15763 inp = rack->rc_inp; 15764 to.to_flags = 0; 15765 flags = tcp_outflags[tp->t_state]; 15766 if (flags & (TH_SYN|TH_RST)) { 15767 goto failed; 15768 } 15769 if (rsm->r_flags & RACK_HAS_FIN) { 15770 /* We can't send a FIN here */ 15771 goto failed; 15772 } 15773 if (flags & TH_FIN) { 15774 /* We never send a FIN */ 15775 flags &= ~TH_FIN; 15776 } 15777 if (tp->t_flags & TF_RCVD_TSTMP) { 15778 to.to_tsval = ms_cts + tp->ts_offset; 15779 to.to_tsecr = tp->ts_recent; 15780 to.to_flags = TOF_TS; 15781 } 15782 optlen = tcp_addoptions(&to, opt); 15783 hdrlen += optlen; 15784 udp = rack->r_ctl.fsb.udp; 15785 if (udp) 15786 hdrlen += sizeof(struct udphdr); 15787 if (rack->r_ctl.rc_pace_max_segs) 15788 max_val = rack->r_ctl.rc_pace_max_segs; 15789 else if (rack->rc_user_set_max_segs) 15790 max_val = rack->rc_user_set_max_segs * segsiz; 15791 else 15792 max_val = len; 15793 if ((tp->t_flags & TF_TSO) && 15794 V_tcp_do_tso && 15795 (len > segsiz) && 15796 (tp->t_port == 0)) 15797 tso = 1; 15798 #ifdef INET6 15799 if (MHLEN < hdrlen + max_linkhdr) 15800 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15801 else 15802 #endif 15803 m = m_gethdr(M_NOWAIT, MT_DATA); 15804 if (m == NULL) 15805 goto failed; 15806 m->m_data += max_linkhdr; 15807 m->m_len = hdrlen; 15808 th = rack->r_ctl.fsb.th; 15809 /* Establish the len to send */ 15810 if (len > max_val) 15811 len = max_val; 15812 if ((tso) && (len + optlen > tp->t_maxseg)) { 15813 uint32_t if_hw_tsomax; 15814 int32_t max_len; 15815 15816 /* extract TSO information */ 15817 if_hw_tsomax = tp->t_tsomax; 15818 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15819 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15820 /* 15821 * Check if we should limit by maximum payload 15822 * length: 15823 */ 15824 if (if_hw_tsomax != 0) { 15825 /* compute maximum TSO length */ 15826 max_len = (if_hw_tsomax - hdrlen - 15827 max_linkhdr); 15828 if (max_len <= 0) { 15829 goto failed; 15830 } else if (len > max_len) { 15831 len = max_len; 15832 } 15833 } 15834 if (len <= segsiz) { 15835 /* 15836 * In case there are too many small fragments don't 15837 * use TSO: 15838 */ 15839 tso = 0; 15840 } 15841 } else { 15842 tso = 0; 15843 } 15844 if ((tso == 0) && (len > segsiz)) 15845 len = segsiz; 15846 if ((len == 0) || 15847 (len <= MHLEN - hdrlen - max_linkhdr)) { 15848 goto failed; 15849 } 15850 th->th_seq = htonl(rsm->r_start); 15851 th->th_ack = htonl(tp->rcv_nxt); 15852 /* 15853 * The PUSH bit should only be applied 15854 * if the full retransmission is made. If 15855 * we are sending less than this is the 15856 * left hand edge and should not have 15857 * the PUSH bit. 15858 */ 15859 if ((rsm->r_flags & RACK_HAD_PUSH) && 15860 (len == (rsm->r_end - rsm->r_start))) 15861 flags |= TH_PUSH; 15862 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15863 if (th->th_win == 0) { 15864 tp->t_sndzerowin++; 15865 tp->t_flags |= TF_RXWIN0SENT; 15866 } else 15867 tp->t_flags &= ~TF_RXWIN0SENT; 15868 if (rsm->r_flags & RACK_TLP) { 15869 /* 15870 * TLP should not count in retran count, but 15871 * in its own bin 15872 */ 15873 counter_u64_add(rack_tlp_retran, 1); 15874 counter_u64_add(rack_tlp_retran_bytes, len); 15875 } else { 15876 tp->t_sndrexmitpack++; 15877 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15878 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15879 } 15880 #ifdef STATS 15881 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15882 len); 15883 #endif 15884 if (rsm->m == NULL) 15885 goto failed; 15886 if (rsm->orig_m_len != rsm->m->m_len) { 15887 /* Fix up the orig_m_len and possibly the mbuf offset */ 15888 rack_adjust_orig_mlen(rsm); 15889 } 15890 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15891 if (len <= segsiz) { 15892 /* 15893 * Must have ran out of mbufs for the copy 15894 * shorten it to no longer need tso. Lets 15895 * not put on sendalot since we are low on 15896 * mbufs. 15897 */ 15898 tso = 0; 15899 } 15900 if ((m->m_next == NULL) || (len <= 0)){ 15901 goto failed; 15902 } 15903 if (udp) { 15904 if (rack->r_is_v6) 15905 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15906 else 15907 ulen = hdrlen + len - sizeof(struct ip); 15908 udp->uh_ulen = htons(ulen); 15909 } 15910 m->m_pkthdr.rcvif = (struct ifnet *)0; 15911 if (TCPS_HAVERCVDSYN(tp->t_state) && 15912 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 15913 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15914 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15915 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15916 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15917 #ifdef INET6 15918 if (rack->r_is_v6) { 15919 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15920 ip6->ip6_flow |= htonl(ect << 20); 15921 } 15922 else 15923 #endif 15924 { 15925 ip->ip_tos &= ~IPTOS_ECN_MASK; 15926 ip->ip_tos |= ect; 15927 } 15928 } 15929 tcp_set_flags(th, flags); 15930 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15931 #ifdef INET6 15932 if (rack->r_is_v6) { 15933 if (tp->t_port) { 15934 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15935 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15936 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15937 th->th_sum = htons(0); 15938 UDPSTAT_INC(udps_opackets); 15939 } else { 15940 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15941 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15942 th->th_sum = in6_cksum_pseudo(ip6, 15943 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15944 0); 15945 } 15946 } 15947 #endif 15948 #if defined(INET6) && defined(INET) 15949 else 15950 #endif 15951 #ifdef INET 15952 { 15953 if (tp->t_port) { 15954 m->m_pkthdr.csum_flags = CSUM_UDP; 15955 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15956 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15957 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15958 th->th_sum = htons(0); 15959 UDPSTAT_INC(udps_opackets); 15960 } else { 15961 m->m_pkthdr.csum_flags = CSUM_TCP; 15962 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15963 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15964 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15965 IPPROTO_TCP + len + optlen)); 15966 } 15967 /* IP version must be set here for ipv4/ipv6 checking later */ 15968 KASSERT(ip->ip_v == IPVERSION, 15969 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15970 } 15971 #endif 15972 if (tso) { 15973 KASSERT(len > tp->t_maxseg - optlen, 15974 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15975 m->m_pkthdr.csum_flags |= CSUM_TSO; 15976 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15977 } 15978 #ifdef INET6 15979 if (rack->r_is_v6) { 15980 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15981 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15982 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15983 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15984 else 15985 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15986 } 15987 #endif 15988 #if defined(INET) && defined(INET6) 15989 else 15990 #endif 15991 #ifdef INET 15992 { 15993 ip->ip_len = htons(m->m_pkthdr.len); 15994 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15995 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15996 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15997 if (tp->t_port == 0 || len < V_tcp_minmss) { 15998 ip->ip_off |= htons(IP_DF); 15999 } 16000 } else { 16001 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16002 } 16003 } 16004 #endif 16005 /* Time to copy in our header */ 16006 cpto = mtod(m, uint8_t *); 16007 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16008 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16009 if (optlen) { 16010 bcopy(opt, th + 1, optlen); 16011 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16012 } else { 16013 th->th_off = sizeof(struct tcphdr) >> 2; 16014 } 16015 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16016 union tcp_log_stackspecific log; 16017 16018 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 16019 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 16020 counter_u64_add(rack_collapsed_win_rxt, 1); 16021 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 16022 } 16023 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16024 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16025 if (rack->rack_no_prr) 16026 log.u_bbr.flex1 = 0; 16027 else 16028 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16029 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16030 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16031 log.u_bbr.flex4 = max_val; 16032 log.u_bbr.flex5 = 0; 16033 /* Save off the early/late values */ 16034 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16035 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16036 log.u_bbr.bw_inuse = rack_get_bw(rack); 16037 if (doing_tlp == 0) 16038 log.u_bbr.flex8 = 1; 16039 else 16040 log.u_bbr.flex8 = 2; 16041 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16042 log.u_bbr.flex7 = 55; 16043 log.u_bbr.pkts_out = tp->t_maxseg; 16044 log.u_bbr.timeStamp = cts; 16045 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16046 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16047 log.u_bbr.delivered = 0; 16048 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16049 len, &log, false, NULL, NULL, 0, tv); 16050 } else 16051 lgb = NULL; 16052 #ifdef INET6 16053 if (rack->r_is_v6) { 16054 error = ip6_output(m, NULL, 16055 &inp->inp_route6, 16056 0, NULL, NULL, inp); 16057 } 16058 #endif 16059 #if defined(INET) && defined(INET6) 16060 else 16061 #endif 16062 #ifdef INET 16063 { 16064 error = ip_output(m, NULL, 16065 &inp->inp_route, 16066 0, 0, inp); 16067 } 16068 #endif 16069 m = NULL; 16070 if (lgb) { 16071 lgb->tlb_errno = error; 16072 lgb = NULL; 16073 } 16074 if (error) { 16075 goto failed; 16076 } 16077 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 16078 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 16079 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 16080 rack->rc_tlp_in_progress = 1; 16081 rack->r_ctl.rc_tlp_cnt_out++; 16082 } 16083 if (error == 0) { 16084 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 16085 if (doing_tlp) { 16086 rack->rc_last_sent_tlp_past_cumack = 0; 16087 rack->rc_last_sent_tlp_seq_valid = 1; 16088 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 16089 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 16090 } 16091 } 16092 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16093 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16094 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 16095 rack->r_ctl.retran_during_recovery += len; 16096 { 16097 int idx; 16098 16099 idx = (len / segsiz) + 3; 16100 if (idx >= TCP_MSS_ACCT_ATIMER) 16101 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16102 else 16103 counter_u64_add(rack_out_size[idx], 1); 16104 } 16105 if (tp->t_rtttime == 0) { 16106 tp->t_rtttime = ticks; 16107 tp->t_rtseq = startseq; 16108 KMOD_TCPSTAT_INC(tcps_segstimed); 16109 } 16110 counter_u64_add(rack_fto_rsm_send, 1); 16111 if (error && (error == ENOBUFS)) { 16112 if (rack->r_ctl.crte != NULL) { 16113 rack_trace_point(rack, RACK_TP_HWENOBUF); 16114 } else 16115 rack_trace_point(rack, RACK_TP_ENOBUF); 16116 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 16117 if (rack->rc_enobuf < 0x7f) 16118 rack->rc_enobuf++; 16119 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16120 slot = 10 * HPTS_USEC_IN_MSEC; 16121 } else 16122 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16123 if ((slot == 0) || 16124 (rack->rc_always_pace == 0) || 16125 (rack->r_rr_config == 1)) { 16126 /* 16127 * We have no pacing set or we 16128 * are using old-style rack or 16129 * we are overridden to use the old 1ms pacing. 16130 */ 16131 slot = rack->r_ctl.rc_min_to; 16132 } 16133 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16134 #ifdef TCP_ACCOUNTING 16135 crtsc = get_cyclecount(); 16136 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16137 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16138 } 16139 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16140 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16141 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16142 } 16143 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16144 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16145 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16146 } 16147 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16148 sched_unpin(); 16149 #endif 16150 return (0); 16151 failed: 16152 if (m) 16153 m_free(m); 16154 return (-1); 16155 } 16156 16157 static void 16158 rack_sndbuf_autoscale(struct tcp_rack *rack) 16159 { 16160 /* 16161 * Automatic sizing of send socket buffer. Often the send buffer 16162 * size is not optimally adjusted to the actual network conditions 16163 * at hand (delay bandwidth product). Setting the buffer size too 16164 * small limits throughput on links with high bandwidth and high 16165 * delay (eg. trans-continental/oceanic links). Setting the 16166 * buffer size too big consumes too much real kernel memory, 16167 * especially with many connections on busy servers. 16168 * 16169 * The criteria to step up the send buffer one notch are: 16170 * 1. receive window of remote host is larger than send buffer 16171 * (with a fudge factor of 5/4th); 16172 * 2. send buffer is filled to 7/8th with data (so we actually 16173 * have data to make use of it); 16174 * 3. send buffer fill has not hit maximal automatic size; 16175 * 4. our send window (slow start and cogestion controlled) is 16176 * larger than sent but unacknowledged data in send buffer. 16177 * 16178 * Note that the rack version moves things much faster since 16179 * we want to avoid hitting cache lines in the rack_fast_output() 16180 * path so this is called much less often and thus moves 16181 * the SB forward by a percentage. 16182 */ 16183 struct socket *so; 16184 struct tcpcb *tp; 16185 uint32_t sendwin, scaleup; 16186 16187 tp = rack->rc_tp; 16188 so = rack->rc_inp->inp_socket; 16189 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16190 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16191 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16192 sbused(&so->so_snd) >= 16193 (so->so_snd.sb_hiwat / 8 * 7) && 16194 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16195 sendwin >= (sbused(&so->so_snd) - 16196 (tp->snd_nxt - tp->snd_una))) { 16197 if (rack_autosndbuf_inc) 16198 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16199 else 16200 scaleup = V_tcp_autosndbuf_inc; 16201 if (scaleup < V_tcp_autosndbuf_inc) 16202 scaleup = V_tcp_autosndbuf_inc; 16203 scaleup += so->so_snd.sb_hiwat; 16204 if (scaleup > V_tcp_autosndbuf_max) 16205 scaleup = V_tcp_autosndbuf_max; 16206 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 16207 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16208 } 16209 } 16210 } 16211 16212 static int 16213 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16214 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16215 { 16216 /* 16217 * Enter to do fast output. We are given that the sched_pin is 16218 * in place (if accounting is compiled in) and the cycle count taken 16219 * at entry is in place in ts_val. The idea here is that 16220 * we know how many more bytes needs to be sent (presumably either 16221 * during pacing or to fill the cwnd and that was greater than 16222 * the max-burst). We have how much to send and all the info we 16223 * need to just send. 16224 */ 16225 struct ip *ip = NULL; 16226 struct udphdr *udp = NULL; 16227 struct tcphdr *th = NULL; 16228 struct mbuf *m, *s_mb; 16229 struct inpcb *inp; 16230 uint8_t *cpto; 16231 struct tcp_log_buffer *lgb; 16232 #ifdef TCP_ACCOUNTING 16233 uint64_t crtsc; 16234 #endif 16235 struct tcpopt to; 16236 u_char opt[TCP_MAXOLEN]; 16237 uint32_t hdrlen, optlen; 16238 #ifdef TCP_ACCOUNTING 16239 int cnt_thru = 1; 16240 #endif 16241 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16242 uint16_t flags; 16243 uint32_t s_soff; 16244 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16245 uint32_t if_hw_tsomaxsegsize; 16246 uint16_t add_flag = RACK_SENT_FP; 16247 #ifdef INET6 16248 struct ip6_hdr *ip6 = NULL; 16249 16250 if (rack->r_is_v6) { 16251 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16252 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16253 } else 16254 #endif /* INET6 */ 16255 { 16256 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16257 hdrlen = sizeof(struct tcpiphdr); 16258 } 16259 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16260 m = NULL; 16261 goto failed; 16262 } 16263 startseq = tp->snd_max; 16264 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16265 inp = rack->rc_inp; 16266 len = rack->r_ctl.fsb.left_to_send; 16267 to.to_flags = 0; 16268 flags = rack->r_ctl.fsb.tcp_flags; 16269 if (tp->t_flags & TF_RCVD_TSTMP) { 16270 to.to_tsval = ms_cts + tp->ts_offset; 16271 to.to_tsecr = tp->ts_recent; 16272 to.to_flags = TOF_TS; 16273 } 16274 optlen = tcp_addoptions(&to, opt); 16275 hdrlen += optlen; 16276 udp = rack->r_ctl.fsb.udp; 16277 if (udp) 16278 hdrlen += sizeof(struct udphdr); 16279 if (rack->r_ctl.rc_pace_max_segs) 16280 max_val = rack->r_ctl.rc_pace_max_segs; 16281 else if (rack->rc_user_set_max_segs) 16282 max_val = rack->rc_user_set_max_segs * segsiz; 16283 else 16284 max_val = len; 16285 if ((tp->t_flags & TF_TSO) && 16286 V_tcp_do_tso && 16287 (len > segsiz) && 16288 (tp->t_port == 0)) 16289 tso = 1; 16290 again: 16291 #ifdef INET6 16292 if (MHLEN < hdrlen + max_linkhdr) 16293 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16294 else 16295 #endif 16296 m = m_gethdr(M_NOWAIT, MT_DATA); 16297 if (m == NULL) 16298 goto failed; 16299 m->m_data += max_linkhdr; 16300 m->m_len = hdrlen; 16301 th = rack->r_ctl.fsb.th; 16302 /* Establish the len to send */ 16303 if (len > max_val) 16304 len = max_val; 16305 if ((tso) && (len + optlen > tp->t_maxseg)) { 16306 uint32_t if_hw_tsomax; 16307 int32_t max_len; 16308 16309 /* extract TSO information */ 16310 if_hw_tsomax = tp->t_tsomax; 16311 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16312 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16313 /* 16314 * Check if we should limit by maximum payload 16315 * length: 16316 */ 16317 if (if_hw_tsomax != 0) { 16318 /* compute maximum TSO length */ 16319 max_len = (if_hw_tsomax - hdrlen - 16320 max_linkhdr); 16321 if (max_len <= 0) { 16322 goto failed; 16323 } else if (len > max_len) { 16324 len = max_len; 16325 } 16326 } 16327 if (len <= segsiz) { 16328 /* 16329 * In case there are too many small fragments don't 16330 * use TSO: 16331 */ 16332 tso = 0; 16333 } 16334 } else { 16335 tso = 0; 16336 } 16337 if ((tso == 0) && (len > segsiz)) 16338 len = segsiz; 16339 if ((len == 0) || 16340 (len <= MHLEN - hdrlen - max_linkhdr)) { 16341 goto failed; 16342 } 16343 sb_offset = tp->snd_max - tp->snd_una; 16344 th->th_seq = htonl(tp->snd_max); 16345 th->th_ack = htonl(tp->rcv_nxt); 16346 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16347 if (th->th_win == 0) { 16348 tp->t_sndzerowin++; 16349 tp->t_flags |= TF_RXWIN0SENT; 16350 } else 16351 tp->t_flags &= ~TF_RXWIN0SENT; 16352 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16353 KMOD_TCPSTAT_INC(tcps_sndpack); 16354 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16355 #ifdef STATS 16356 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16357 len); 16358 #endif 16359 if (rack->r_ctl.fsb.m == NULL) 16360 goto failed; 16361 16362 /* s_mb and s_soff are saved for rack_log_output */ 16363 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16364 &s_mb, &s_soff); 16365 if (len <= segsiz) { 16366 /* 16367 * Must have ran out of mbufs for the copy 16368 * shorten it to no longer need tso. Lets 16369 * not put on sendalot since we are low on 16370 * mbufs. 16371 */ 16372 tso = 0; 16373 } 16374 if (rack->r_ctl.fsb.rfo_apply_push && 16375 (len == rack->r_ctl.fsb.left_to_send)) { 16376 flags |= TH_PUSH; 16377 add_flag |= RACK_HAD_PUSH; 16378 } 16379 if ((m->m_next == NULL) || (len <= 0)){ 16380 goto failed; 16381 } 16382 if (udp) { 16383 if (rack->r_is_v6) 16384 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16385 else 16386 ulen = hdrlen + len - sizeof(struct ip); 16387 udp->uh_ulen = htons(ulen); 16388 } 16389 m->m_pkthdr.rcvif = (struct ifnet *)0; 16390 if (TCPS_HAVERCVDSYN(tp->t_state) && 16391 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 16392 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16393 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16394 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16395 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16396 #ifdef INET6 16397 if (rack->r_is_v6) { 16398 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16399 ip6->ip6_flow |= htonl(ect << 20); 16400 } 16401 else 16402 #endif 16403 { 16404 ip->ip_tos &= ~IPTOS_ECN_MASK; 16405 ip->ip_tos |= ect; 16406 } 16407 } 16408 tcp_set_flags(th, flags); 16409 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16410 #ifdef INET6 16411 if (rack->r_is_v6) { 16412 if (tp->t_port) { 16413 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16414 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16415 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16416 th->th_sum = htons(0); 16417 UDPSTAT_INC(udps_opackets); 16418 } else { 16419 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16420 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16421 th->th_sum = in6_cksum_pseudo(ip6, 16422 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16423 0); 16424 } 16425 } 16426 #endif 16427 #if defined(INET6) && defined(INET) 16428 else 16429 #endif 16430 #ifdef INET 16431 { 16432 if (tp->t_port) { 16433 m->m_pkthdr.csum_flags = CSUM_UDP; 16434 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16435 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16436 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16437 th->th_sum = htons(0); 16438 UDPSTAT_INC(udps_opackets); 16439 } else { 16440 m->m_pkthdr.csum_flags = CSUM_TCP; 16441 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16442 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16443 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16444 IPPROTO_TCP + len + optlen)); 16445 } 16446 /* IP version must be set here for ipv4/ipv6 checking later */ 16447 KASSERT(ip->ip_v == IPVERSION, 16448 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16449 } 16450 #endif 16451 if (tso) { 16452 KASSERT(len > tp->t_maxseg - optlen, 16453 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16454 m->m_pkthdr.csum_flags |= CSUM_TSO; 16455 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16456 } 16457 #ifdef INET6 16458 if (rack->r_is_v6) { 16459 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16460 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16461 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16462 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16463 else 16464 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16465 } 16466 #endif 16467 #if defined(INET) && defined(INET6) 16468 else 16469 #endif 16470 #ifdef INET 16471 { 16472 ip->ip_len = htons(m->m_pkthdr.len); 16473 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16474 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16475 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16476 if (tp->t_port == 0 || len < V_tcp_minmss) { 16477 ip->ip_off |= htons(IP_DF); 16478 } 16479 } else { 16480 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16481 } 16482 } 16483 #endif 16484 /* Time to copy in our header */ 16485 cpto = mtod(m, uint8_t *); 16486 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16487 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16488 if (optlen) { 16489 bcopy(opt, th + 1, optlen); 16490 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16491 } else { 16492 th->th_off = sizeof(struct tcphdr) >> 2; 16493 } 16494 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16495 union tcp_log_stackspecific log; 16496 16497 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16498 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16499 if (rack->rack_no_prr) 16500 log.u_bbr.flex1 = 0; 16501 else 16502 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16503 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16504 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16505 log.u_bbr.flex4 = max_val; 16506 log.u_bbr.flex5 = 0; 16507 /* Save off the early/late values */ 16508 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16509 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16510 log.u_bbr.bw_inuse = rack_get_bw(rack); 16511 log.u_bbr.flex8 = 0; 16512 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16513 log.u_bbr.flex7 = 44; 16514 log.u_bbr.pkts_out = tp->t_maxseg; 16515 log.u_bbr.timeStamp = cts; 16516 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16517 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16518 log.u_bbr.delivered = 0; 16519 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16520 len, &log, false, NULL, NULL, 0, tv); 16521 } else 16522 lgb = NULL; 16523 #ifdef INET6 16524 if (rack->r_is_v6) { 16525 error = ip6_output(m, NULL, 16526 &inp->inp_route6, 16527 0, NULL, NULL, inp); 16528 } 16529 #endif 16530 #if defined(INET) && defined(INET6) 16531 else 16532 #endif 16533 #ifdef INET 16534 { 16535 error = ip_output(m, NULL, 16536 &inp->inp_route, 16537 0, 0, inp); 16538 } 16539 #endif 16540 if (lgb) { 16541 lgb->tlb_errno = error; 16542 lgb = NULL; 16543 } 16544 if (error) { 16545 *send_err = error; 16546 m = NULL; 16547 goto failed; 16548 } 16549 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16550 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16551 m = NULL; 16552 if (tp->snd_una == tp->snd_max) { 16553 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16554 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16555 tp->t_acktime = ticks; 16556 } 16557 if (error == 0) 16558 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16559 16560 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16561 tot_len += len; 16562 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16563 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16564 tp->snd_max += len; 16565 tp->snd_nxt = tp->snd_max; 16566 { 16567 int idx; 16568 16569 idx = (len / segsiz) + 3; 16570 if (idx >= TCP_MSS_ACCT_ATIMER) 16571 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16572 else 16573 counter_u64_add(rack_out_size[idx], 1); 16574 } 16575 if (len <= rack->r_ctl.fsb.left_to_send) 16576 rack->r_ctl.fsb.left_to_send -= len; 16577 else 16578 rack->r_ctl.fsb.left_to_send = 0; 16579 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16580 rack->r_fast_output = 0; 16581 rack->r_ctl.fsb.left_to_send = 0; 16582 /* At the end of fast_output scale up the sb */ 16583 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16584 rack_sndbuf_autoscale(rack); 16585 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16586 } 16587 if (tp->t_rtttime == 0) { 16588 tp->t_rtttime = ticks; 16589 tp->t_rtseq = startseq; 16590 KMOD_TCPSTAT_INC(tcps_segstimed); 16591 } 16592 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16593 (max_val > len) && 16594 (tso == 0)) { 16595 max_val -= len; 16596 len = segsiz; 16597 th = rack->r_ctl.fsb.th; 16598 #ifdef TCP_ACCOUNTING 16599 cnt_thru++; 16600 #endif 16601 goto again; 16602 } 16603 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16604 counter_u64_add(rack_fto_send, 1); 16605 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16606 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16607 #ifdef TCP_ACCOUNTING 16608 crtsc = get_cyclecount(); 16609 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16610 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16611 } 16612 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16613 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16614 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16615 } 16616 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16617 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16618 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16619 } 16620 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16621 sched_unpin(); 16622 #endif 16623 return (0); 16624 failed: 16625 if (m) 16626 m_free(m); 16627 rack->r_fast_output = 0; 16628 return (-1); 16629 } 16630 16631 static struct rack_sendmap * 16632 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 16633 { 16634 struct rack_sendmap *rsm = NULL; 16635 struct rack_sendmap fe; 16636 int thresh; 16637 16638 restart: 16639 fe.r_start = rack->r_ctl.last_collapse_point; 16640 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 16641 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 16642 /* Nothing, strange turn off validity */ 16643 rack->r_collapse_point_valid = 0; 16644 return (NULL); 16645 } 16646 /* Can we send it yet? */ 16647 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 16648 /* 16649 * Receiver window has not grown enough for 16650 * the segment to be put on the wire. 16651 */ 16652 return (NULL); 16653 } 16654 if (rsm->r_flags & RACK_ACKED) { 16655 /* 16656 * It has been sacked, lets move to the 16657 * next one if possible. 16658 */ 16659 rack->r_ctl.last_collapse_point = rsm->r_end; 16660 /* Are we done? */ 16661 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16662 rack->r_ctl.high_collapse_point)) { 16663 rack->r_collapse_point_valid = 0; 16664 return (NULL); 16665 } 16666 goto restart; 16667 } 16668 /* Now has it been long enough ? */ 16669 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 16670 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 16671 rack_log_collapse(rack, rsm->r_start, 16672 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16673 thresh, __LINE__, 6, rsm->r_flags, rsm); 16674 return (rsm); 16675 } 16676 /* Not enough time */ 16677 rack_log_collapse(rack, rsm->r_start, 16678 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16679 thresh, __LINE__, 7, rsm->r_flags, rsm); 16680 return (NULL); 16681 } 16682 16683 static int 16684 rack_output(struct tcpcb *tp) 16685 { 16686 struct socket *so; 16687 uint32_t recwin; 16688 uint32_t sb_offset, s_moff = 0; 16689 int32_t len, error = 0; 16690 uint16_t flags; 16691 struct mbuf *m, *s_mb = NULL; 16692 struct mbuf *mb; 16693 uint32_t if_hw_tsomaxsegcount = 0; 16694 uint32_t if_hw_tsomaxsegsize; 16695 int32_t segsiz, minseg; 16696 long tot_len_this_send = 0; 16697 #ifdef INET 16698 struct ip *ip = NULL; 16699 #endif 16700 struct udphdr *udp = NULL; 16701 struct tcp_rack *rack; 16702 struct tcphdr *th; 16703 uint8_t pass = 0; 16704 uint8_t mark = 0; 16705 uint8_t wanted_cookie = 0; 16706 u_char opt[TCP_MAXOLEN]; 16707 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16708 uint32_t rack_seq; 16709 16710 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16711 unsigned ipsec_optlen = 0; 16712 16713 #endif 16714 int32_t idle, sendalot; 16715 int32_t sub_from_prr = 0; 16716 volatile int32_t sack_rxmit; 16717 struct rack_sendmap *rsm = NULL; 16718 int32_t tso, mtu; 16719 struct tcpopt to; 16720 int32_t slot = 0; 16721 int32_t sup_rack = 0; 16722 uint32_t cts, ms_cts, delayed, early; 16723 uint16_t add_flag = RACK_SENT_SP; 16724 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16725 uint8_t hpts_calling, doing_tlp = 0; 16726 uint32_t cwnd_to_use, pace_max_seg; 16727 int32_t do_a_prefetch = 0; 16728 int32_t prefetch_rsm = 0; 16729 int32_t orig_len = 0; 16730 struct timeval tv; 16731 int32_t prefetch_so_done = 0; 16732 struct tcp_log_buffer *lgb; 16733 struct inpcb *inp; 16734 struct sockbuf *sb; 16735 uint64_t ts_val = 0; 16736 #ifdef TCP_ACCOUNTING 16737 uint64_t crtsc; 16738 #endif 16739 #ifdef INET6 16740 struct ip6_hdr *ip6 = NULL; 16741 int32_t isipv6; 16742 #endif 16743 bool hw_tls = false; 16744 16745 /* setup and take the cache hits here */ 16746 rack = (struct tcp_rack *)tp->t_fb_ptr; 16747 #ifdef TCP_ACCOUNTING 16748 sched_pin(); 16749 ts_val = get_cyclecount(); 16750 #endif 16751 hpts_calling = rack->rc_inp->inp_hpts_calls; 16752 NET_EPOCH_ASSERT(); 16753 INP_WLOCK_ASSERT(rack->rc_inp); 16754 #ifdef TCP_OFFLOAD 16755 if (tp->t_flags & TF_TOE) { 16756 #ifdef TCP_ACCOUNTING 16757 sched_unpin(); 16758 #endif 16759 return (tcp_offload_output(tp)); 16760 } 16761 #endif 16762 /* 16763 * For TFO connections in SYN_RECEIVED, only allow the initial 16764 * SYN|ACK and those sent by the retransmit timer. 16765 */ 16766 if (IS_FASTOPEN(tp->t_flags) && 16767 (tp->t_state == TCPS_SYN_RECEIVED) && 16768 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16769 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16770 #ifdef TCP_ACCOUNTING 16771 sched_unpin(); 16772 #endif 16773 return (0); 16774 } 16775 #ifdef INET6 16776 if (rack->r_state) { 16777 /* Use the cache line loaded if possible */ 16778 isipv6 = rack->r_is_v6; 16779 } else { 16780 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16781 } 16782 #endif 16783 early = 0; 16784 cts = tcp_get_usecs(&tv); 16785 ms_cts = tcp_tv_to_mssectick(&tv); 16786 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16787 tcp_in_hpts(rack->rc_inp)) { 16788 /* 16789 * We are on the hpts for some timer but not hptsi output. 16790 * Remove from the hpts unconditionally. 16791 */ 16792 rack_timer_cancel(tp, rack, cts, __LINE__); 16793 } 16794 /* Are we pacing and late? */ 16795 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16796 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16797 /* We are delayed */ 16798 delayed = cts - rack->r_ctl.rc_last_output_to; 16799 } else { 16800 delayed = 0; 16801 } 16802 /* Do the timers, which may override the pacer */ 16803 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16804 int retval; 16805 16806 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16807 &doing_tlp); 16808 if (retval != 0) { 16809 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16810 #ifdef TCP_ACCOUNTING 16811 sched_unpin(); 16812 #endif 16813 /* 16814 * If timers want tcp_drop(), then pass error out, 16815 * otherwise suppress it. 16816 */ 16817 return (retval < 0 ? retval : 0); 16818 } 16819 } 16820 if (rack->rc_in_persist) { 16821 if (tcp_in_hpts(rack->rc_inp) == 0) { 16822 /* Timer is not running */ 16823 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16824 } 16825 #ifdef TCP_ACCOUNTING 16826 sched_unpin(); 16827 #endif 16828 return (0); 16829 } 16830 if ((rack->rc_ack_required == 1) && 16831 (rack->r_timer_override == 0)){ 16832 /* A timeout occurred and no ack has arrived */ 16833 if (tcp_in_hpts(rack->rc_inp) == 0) { 16834 /* Timer is not running */ 16835 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16836 } 16837 #ifdef TCP_ACCOUNTING 16838 sched_unpin(); 16839 #endif 16840 return (0); 16841 } 16842 if ((rack->r_timer_override) || 16843 (rack->rc_ack_can_sendout_data) || 16844 (delayed) || 16845 (tp->t_state < TCPS_ESTABLISHED)) { 16846 rack->rc_ack_can_sendout_data = 0; 16847 if (tcp_in_hpts(rack->rc_inp)) 16848 tcp_hpts_remove(rack->rc_inp); 16849 } else if (tcp_in_hpts(rack->rc_inp)) { 16850 /* 16851 * On the hpts you can't pass even if ACKNOW is on, we will 16852 * when the hpts fires. 16853 */ 16854 #ifdef TCP_ACCOUNTING 16855 crtsc = get_cyclecount(); 16856 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16857 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16858 } 16859 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16860 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16861 tp->tcp_cnt_counters[SND_BLOCKED]++; 16862 } 16863 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16864 sched_unpin(); 16865 #endif 16866 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16867 return (0); 16868 } 16869 rack->rc_inp->inp_hpts_calls = 0; 16870 /* Finish out both pacing early and late accounting */ 16871 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16872 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16873 early = rack->r_ctl.rc_last_output_to - cts; 16874 } else 16875 early = 0; 16876 if (delayed) { 16877 rack->r_ctl.rc_agg_delayed += delayed; 16878 rack->r_late = 1; 16879 } else if (early) { 16880 rack->r_ctl.rc_agg_early += early; 16881 rack->r_early = 1; 16882 } 16883 /* Now that early/late accounting is done turn off the flag */ 16884 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16885 rack->r_wanted_output = 0; 16886 rack->r_timer_override = 0; 16887 if ((tp->t_state != rack->r_state) && 16888 TCPS_HAVEESTABLISHED(tp->t_state)) { 16889 rack_set_state(tp, rack); 16890 } 16891 if ((rack->r_fast_output) && 16892 (doing_tlp == 0) && 16893 (tp->rcv_numsacks == 0)) { 16894 int ret; 16895 16896 error = 0; 16897 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16898 if (ret >= 0) 16899 return(ret); 16900 else if (error) { 16901 inp = rack->rc_inp; 16902 so = inp->inp_socket; 16903 sb = &so->so_snd; 16904 goto nomore; 16905 } 16906 } 16907 inp = rack->rc_inp; 16908 /* 16909 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16910 * only allow the initial SYN or SYN|ACK and those sent 16911 * by the retransmit timer. 16912 */ 16913 if (IS_FASTOPEN(tp->t_flags) && 16914 ((tp->t_state == TCPS_SYN_RECEIVED) || 16915 (tp->t_state == TCPS_SYN_SENT)) && 16916 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16917 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16918 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16919 so = inp->inp_socket; 16920 sb = &so->so_snd; 16921 goto just_return_nolock; 16922 } 16923 /* 16924 * Determine length of data that should be transmitted, and flags 16925 * that will be used. If there is some data or critical controls 16926 * (SYN, RST) to send, then transmit; otherwise, investigate 16927 * further. 16928 */ 16929 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16930 if (tp->t_idle_reduce) { 16931 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16932 rack_cc_after_idle(rack, tp); 16933 } 16934 tp->t_flags &= ~TF_LASTIDLE; 16935 if (idle) { 16936 if (tp->t_flags & TF_MORETOCOME) { 16937 tp->t_flags |= TF_LASTIDLE; 16938 idle = 0; 16939 } 16940 } 16941 if ((tp->snd_una == tp->snd_max) && 16942 rack->r_ctl.rc_went_idle_time && 16943 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16944 idle = cts - rack->r_ctl.rc_went_idle_time; 16945 if (idle > rack_min_probertt_hold) { 16946 /* Count as a probe rtt */ 16947 if (rack->in_probe_rtt == 0) { 16948 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16949 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16950 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16951 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16952 } else { 16953 rack_exit_probertt(rack, cts); 16954 } 16955 } 16956 idle = 0; 16957 } 16958 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16959 rack_init_fsb_block(tp, rack); 16960 again: 16961 /* 16962 * If we've recently taken a timeout, snd_max will be greater than 16963 * snd_nxt. There may be SACK information that allows us to avoid 16964 * resending already delivered data. Adjust snd_nxt accordingly. 16965 */ 16966 sendalot = 0; 16967 cts = tcp_get_usecs(&tv); 16968 ms_cts = tcp_tv_to_mssectick(&tv); 16969 tso = 0; 16970 mtu = 0; 16971 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16972 minseg = segsiz; 16973 if (rack->r_ctl.rc_pace_max_segs == 0) 16974 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16975 else 16976 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16977 sb_offset = tp->snd_max - tp->snd_una; 16978 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16979 flags = tcp_outflags[tp->t_state]; 16980 while (rack->rc_free_cnt < rack_free_cache) { 16981 rsm = rack_alloc(rack); 16982 if (rsm == NULL) { 16983 if (inp->inp_hpts_calls) 16984 /* Retry in a ms */ 16985 slot = (1 * HPTS_USEC_IN_MSEC); 16986 so = inp->inp_socket; 16987 sb = &so->so_snd; 16988 goto just_return_nolock; 16989 } 16990 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16991 rack->rc_free_cnt++; 16992 rsm = NULL; 16993 } 16994 if (inp->inp_hpts_calls) 16995 inp->inp_hpts_calls = 0; 16996 sack_rxmit = 0; 16997 len = 0; 16998 rsm = NULL; 16999 if (flags & TH_RST) { 17000 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 17001 so = inp->inp_socket; 17002 sb = &so->so_snd; 17003 goto send; 17004 } 17005 if (rack->r_ctl.rc_resend) { 17006 /* Retransmit timer */ 17007 rsm = rack->r_ctl.rc_resend; 17008 rack->r_ctl.rc_resend = NULL; 17009 len = rsm->r_end - rsm->r_start; 17010 sack_rxmit = 1; 17011 sendalot = 0; 17012 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17013 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17014 __func__, __LINE__, 17015 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17016 sb_offset = rsm->r_start - tp->snd_una; 17017 if (len >= segsiz) 17018 len = segsiz; 17019 } else if (rack->r_collapse_point_valid && 17020 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 17021 /* 17022 * If an RSM is returned then enough time has passed 17023 * for us to retransmit it. Move up the collapse point, 17024 * since this rsm has its chance to retransmit now. 17025 */ 17026 rack_trace_point(rack, RACK_TP_COLLAPSED_RXT); 17027 rack->r_ctl.last_collapse_point = rsm->r_end; 17028 /* Are we done? */ 17029 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 17030 rack->r_ctl.high_collapse_point)) 17031 rack->r_collapse_point_valid = 0; 17032 sack_rxmit = 1; 17033 /* We are not doing a TLP */ 17034 doing_tlp = 0; 17035 len = rsm->r_end - rsm->r_start; 17036 sb_offset = rsm->r_start - tp->snd_una; 17037 sendalot = 0; 17038 if ((rack->full_size_rxt == 0) && 17039 (rack->shape_rxt_to_pacing_min == 0) && 17040 (len >= segsiz)) 17041 len = segsiz; 17042 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 17043 /* We have a retransmit that takes precedence */ 17044 if ((!IN_FASTRECOVERY(tp->t_flags)) && 17045 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 17046 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 17047 /* Enter recovery if not induced by a time-out */ 17048 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 17049 } 17050 #ifdef INVARIANTS 17051 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 17052 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 17053 tp, rack, rsm, rsm->r_start, tp->snd_una); 17054 } 17055 #endif 17056 len = rsm->r_end - rsm->r_start; 17057 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17058 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17059 __func__, __LINE__, 17060 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17061 sb_offset = rsm->r_start - tp->snd_una; 17062 sendalot = 0; 17063 if (len >= segsiz) 17064 len = segsiz; 17065 if (len > 0) { 17066 sack_rxmit = 1; 17067 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 17068 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 17069 min(len, segsiz)); 17070 } 17071 } else if (rack->r_ctl.rc_tlpsend) { 17072 /* Tail loss probe */ 17073 long cwin; 17074 long tlen; 17075 17076 /* 17077 * Check if we can do a TLP with a RACK'd packet 17078 * this can happen if we are not doing the rack 17079 * cheat and we skipped to a TLP and it 17080 * went off. 17081 */ 17082 rsm = rack->r_ctl.rc_tlpsend; 17083 /* We are doing a TLP make sure the flag is preent */ 17084 rsm->r_flags |= RACK_TLP; 17085 rack->r_ctl.rc_tlpsend = NULL; 17086 sack_rxmit = 1; 17087 tlen = rsm->r_end - rsm->r_start; 17088 if (tlen > segsiz) 17089 tlen = segsiz; 17090 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 17091 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 17092 __func__, __LINE__, 17093 rsm->r_start, tp->snd_una, tp, rack, rsm)); 17094 sb_offset = rsm->r_start - tp->snd_una; 17095 cwin = min(tp->snd_wnd, tlen); 17096 len = cwin; 17097 } 17098 if (rack->r_must_retran && 17099 (doing_tlp == 0) && 17100 (SEQ_GT(tp->snd_max, tp->snd_una)) && 17101 (rsm == NULL)) { 17102 /* 17103 * There are two different ways that we 17104 * can get into this block: 17105 * a) This is a non-sack connection, we had a time-out 17106 * and thus r_must_retran was set and everything 17107 * left outstanding as been marked for retransmit. 17108 * b) The MTU of the path shrank, so that everything 17109 * was marked to be retransmitted with the smaller 17110 * mtu and r_must_retran was set. 17111 * 17112 * This means that we expect the sendmap (outstanding) 17113 * to all be marked must. We can use the tmap to 17114 * look at them. 17115 * 17116 */ 17117 int sendwin, flight; 17118 17119 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17120 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17121 if (flight >= sendwin) { 17122 /* 17123 * We can't send yet. 17124 */ 17125 so = inp->inp_socket; 17126 sb = &so->so_snd; 17127 goto just_return_nolock; 17128 } 17129 /* 17130 * This is the case a/b mentioned above. All 17131 * outstanding/not-acked should be marked. 17132 * We can use the tmap to find them. 17133 */ 17134 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17135 if (rsm == NULL) { 17136 /* TSNH */ 17137 rack->r_must_retran = 0; 17138 rack->r_ctl.rc_out_at_rto = 0; 17139 so = inp->inp_socket; 17140 sb = &so->so_snd; 17141 goto just_return_nolock; 17142 } 17143 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 17144 /* 17145 * The first one does not have the flag, did we collapse 17146 * further up in our list? 17147 */ 17148 rack->r_must_retran = 0; 17149 rack->r_ctl.rc_out_at_rto = 0; 17150 rsm = NULL; 17151 sack_rxmit = 0; 17152 } else { 17153 sack_rxmit = 1; 17154 len = rsm->r_end - rsm->r_start; 17155 sb_offset = rsm->r_start - tp->snd_una; 17156 sendalot = 0; 17157 if ((rack->full_size_rxt == 0) && 17158 (rack->shape_rxt_to_pacing_min == 0) && 17159 (len >= segsiz)) 17160 len = segsiz; 17161 /* 17162 * Delay removing the flag RACK_MUST_RXT so 17163 * that the fastpath for retransmit will 17164 * work with this rsm. 17165 */ 17166 } 17167 } 17168 /* 17169 * Enforce a connection sendmap count limit if set 17170 * as long as we are not retransmiting. 17171 */ 17172 if ((rsm == NULL) && 17173 (rack->do_detection == 0) && 17174 (V_tcp_map_entries_limit > 0) && 17175 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17176 counter_u64_add(rack_to_alloc_limited, 1); 17177 if (!rack->alloc_limit_reported) { 17178 rack->alloc_limit_reported = 1; 17179 counter_u64_add(rack_alloc_limited_conns, 1); 17180 } 17181 so = inp->inp_socket; 17182 sb = &so->so_snd; 17183 goto just_return_nolock; 17184 } 17185 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17186 /* we are retransmitting the fin */ 17187 len--; 17188 if (len) { 17189 /* 17190 * When retransmitting data do *not* include the 17191 * FIN. This could happen from a TLP probe. 17192 */ 17193 flags &= ~TH_FIN; 17194 } 17195 } 17196 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17197 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17198 int ret; 17199 17200 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17201 if (ret == 0) 17202 return (0); 17203 } 17204 so = inp->inp_socket; 17205 sb = &so->so_snd; 17206 if (do_a_prefetch == 0) { 17207 kern_prefetch(sb, &do_a_prefetch); 17208 do_a_prefetch = 1; 17209 } 17210 #ifdef NETFLIX_SHARED_CWND 17211 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17212 rack->rack_enable_scwnd) { 17213 /* We are doing cwnd sharing */ 17214 if (rack->gp_ready && 17215 (rack->rack_attempted_scwnd == 0) && 17216 (rack->r_ctl.rc_scw == NULL) && 17217 tp->t_lib) { 17218 /* The pcbid is in, lets make an attempt */ 17219 counter_u64_add(rack_try_scwnd, 1); 17220 rack->rack_attempted_scwnd = 1; 17221 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17222 &rack->r_ctl.rc_scw_index, 17223 segsiz); 17224 } 17225 if (rack->r_ctl.rc_scw && 17226 (rack->rack_scwnd_is_idle == 1) && 17227 sbavail(&so->so_snd)) { 17228 /* we are no longer out of data */ 17229 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17230 rack->rack_scwnd_is_idle = 0; 17231 } 17232 if (rack->r_ctl.rc_scw) { 17233 /* First lets update and get the cwnd */ 17234 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17235 rack->r_ctl.rc_scw_index, 17236 tp->snd_cwnd, tp->snd_wnd, segsiz); 17237 } 17238 } 17239 #endif 17240 /* 17241 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17242 * state flags. 17243 */ 17244 if (tp->t_flags & TF_NEEDFIN) 17245 flags |= TH_FIN; 17246 if (tp->t_flags & TF_NEEDSYN) 17247 flags |= TH_SYN; 17248 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17249 void *end_rsm; 17250 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17251 if (end_rsm) 17252 kern_prefetch(end_rsm, &prefetch_rsm); 17253 prefetch_rsm = 1; 17254 } 17255 SOCKBUF_LOCK(sb); 17256 /* 17257 * If snd_nxt == snd_max and we have transmitted a FIN, the 17258 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17259 * negative length. This can also occur when TCP opens up its 17260 * congestion window while receiving additional duplicate acks after 17261 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17262 * the fast-retransmit. 17263 * 17264 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17265 * set to snd_una, the sb_offset will be 0, and the length may wind 17266 * up 0. 17267 * 17268 * If sack_rxmit is true we are retransmitting from the scoreboard 17269 * in which case len is already set. 17270 */ 17271 if ((sack_rxmit == 0) && 17272 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17273 uint32_t avail; 17274 17275 avail = sbavail(sb); 17276 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17277 sb_offset = tp->snd_nxt - tp->snd_una; 17278 else 17279 sb_offset = 0; 17280 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17281 if (rack->r_ctl.rc_tlp_new_data) { 17282 /* TLP is forcing out new data */ 17283 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17284 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17285 } 17286 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17287 if (tp->snd_wnd > sb_offset) 17288 len = tp->snd_wnd - sb_offset; 17289 else 17290 len = 0; 17291 } else { 17292 len = rack->r_ctl.rc_tlp_new_data; 17293 } 17294 rack->r_ctl.rc_tlp_new_data = 0; 17295 } else { 17296 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17297 } 17298 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17299 /* 17300 * For prr=off, we need to send only 1 MSS 17301 * at a time. We do this because another sack could 17302 * be arriving that causes us to send retransmits and 17303 * we don't want to be on a long pace due to a larger send 17304 * that keeps us from sending out the retransmit. 17305 */ 17306 len = segsiz; 17307 } 17308 } else { 17309 uint32_t outstanding; 17310 /* 17311 * We are inside of a Fast recovery episode, this 17312 * is caused by a SACK or 3 dup acks. At this point 17313 * we have sent all the retransmissions and we rely 17314 * on PRR to dictate what we will send in the form of 17315 * new data. 17316 */ 17317 17318 outstanding = tp->snd_max - tp->snd_una; 17319 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17320 if (tp->snd_wnd > outstanding) { 17321 len = tp->snd_wnd - outstanding; 17322 /* Check to see if we have the data */ 17323 if ((sb_offset + len) > avail) { 17324 /* It does not all fit */ 17325 if (avail > sb_offset) 17326 len = avail - sb_offset; 17327 else 17328 len = 0; 17329 } 17330 } else { 17331 len = 0; 17332 } 17333 } else if (avail > sb_offset) { 17334 len = avail - sb_offset; 17335 } else { 17336 len = 0; 17337 } 17338 if (len > 0) { 17339 if (len > rack->r_ctl.rc_prr_sndcnt) { 17340 len = rack->r_ctl.rc_prr_sndcnt; 17341 } 17342 if (len > 0) { 17343 sub_from_prr = 1; 17344 } 17345 } 17346 if (len > segsiz) { 17347 /* 17348 * We should never send more than a MSS when 17349 * retransmitting or sending new data in prr 17350 * mode unless the override flag is on. Most 17351 * likely the PRR algorithm is not going to 17352 * let us send a lot as well :-) 17353 */ 17354 if (rack->r_ctl.rc_prr_sendalot == 0) { 17355 len = segsiz; 17356 } 17357 } else if (len < segsiz) { 17358 /* 17359 * Do we send any? The idea here is if the 17360 * send empty's the socket buffer we want to 17361 * do it. However if not then lets just wait 17362 * for our prr_sndcnt to get bigger. 17363 */ 17364 long leftinsb; 17365 17366 leftinsb = sbavail(sb) - sb_offset; 17367 if (leftinsb > len) { 17368 /* This send does not empty the sb */ 17369 len = 0; 17370 } 17371 } 17372 } 17373 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17374 /* 17375 * If you have not established 17376 * and are not doing FAST OPEN 17377 * no data please. 17378 */ 17379 if ((sack_rxmit == 0) && 17380 (!IS_FASTOPEN(tp->t_flags))){ 17381 len = 0; 17382 sb_offset = 0; 17383 } 17384 } 17385 if (prefetch_so_done == 0) { 17386 kern_prefetch(so, &prefetch_so_done); 17387 prefetch_so_done = 1; 17388 } 17389 /* 17390 * Lop off SYN bit if it has already been sent. However, if this is 17391 * SYN-SENT state and if segment contains data and if we don't know 17392 * that foreign host supports TAO, suppress sending segment. 17393 */ 17394 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17395 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17396 /* 17397 * When sending additional segments following a TFO SYN|ACK, 17398 * do not include the SYN bit. 17399 */ 17400 if (IS_FASTOPEN(tp->t_flags) && 17401 (tp->t_state == TCPS_SYN_RECEIVED)) 17402 flags &= ~TH_SYN; 17403 } 17404 /* 17405 * Be careful not to send data and/or FIN on SYN segments. This 17406 * measure is needed to prevent interoperability problems with not 17407 * fully conformant TCP implementations. 17408 */ 17409 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17410 len = 0; 17411 flags &= ~TH_FIN; 17412 } 17413 /* 17414 * On TFO sockets, ensure no data is sent in the following cases: 17415 * 17416 * - When retransmitting SYN|ACK on a passively-created socket 17417 * 17418 * - When retransmitting SYN on an actively created socket 17419 * 17420 * - When sending a zero-length cookie (cookie request) on an 17421 * actively created socket 17422 * 17423 * - When the socket is in the CLOSED state (RST is being sent) 17424 */ 17425 if (IS_FASTOPEN(tp->t_flags) && 17426 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17427 ((tp->t_state == TCPS_SYN_SENT) && 17428 (tp->t_tfo_client_cookie_len == 0)) || 17429 (flags & TH_RST))) { 17430 sack_rxmit = 0; 17431 len = 0; 17432 } 17433 /* Without fast-open there should never be data sent on a SYN */ 17434 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17435 tp->snd_nxt = tp->iss; 17436 len = 0; 17437 } 17438 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17439 /* We only send 1 MSS if we have a DSACK block */ 17440 add_flag |= RACK_SENT_W_DSACK; 17441 len = segsiz; 17442 } 17443 orig_len = len; 17444 if (len <= 0) { 17445 /* 17446 * If FIN has been sent but not acked, but we haven't been 17447 * called to retransmit, len will be < 0. Otherwise, window 17448 * shrank after we sent into it. If window shrank to 0, 17449 * cancel pending retransmit, pull snd_nxt back to (closed) 17450 * window, and set the persist timer if it isn't already 17451 * going. If the window didn't close completely, just wait 17452 * for an ACK. 17453 * 17454 * We also do a general check here to ensure that we will 17455 * set the persist timer when we have data to send, but a 17456 * 0-byte window. This makes sure the persist timer is set 17457 * even if the packet hits one of the "goto send" lines 17458 * below. 17459 */ 17460 len = 0; 17461 if ((tp->snd_wnd == 0) && 17462 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17463 (tp->snd_una == tp->snd_max) && 17464 (sb_offset < (int)sbavail(sb))) { 17465 rack_enter_persist(tp, rack, cts); 17466 } 17467 } else if ((rsm == NULL) && 17468 (doing_tlp == 0) && 17469 (len < pace_max_seg)) { 17470 /* 17471 * We are not sending a maximum sized segment for 17472 * some reason. Should we not send anything (think 17473 * sws or persists)? 17474 */ 17475 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17476 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17477 (len < minseg) && 17478 (len < (int)(sbavail(sb) - sb_offset))) { 17479 /* 17480 * Here the rwnd is less than 17481 * the minimum pacing size, this is not a retransmit, 17482 * we are established and 17483 * the send is not the last in the socket buffer 17484 * we send nothing, and we may enter persists 17485 * if nothing is outstanding. 17486 */ 17487 len = 0; 17488 if (tp->snd_max == tp->snd_una) { 17489 /* 17490 * Nothing out we can 17491 * go into persists. 17492 */ 17493 rack_enter_persist(tp, rack, cts); 17494 } 17495 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17496 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17497 (len < (int)(sbavail(sb) - sb_offset)) && 17498 (len < minseg)) { 17499 /* 17500 * Here we are not retransmitting, and 17501 * the cwnd is not so small that we could 17502 * not send at least a min size (rxt timer 17503 * not having gone off), We have 2 segments or 17504 * more already in flight, its not the tail end 17505 * of the socket buffer and the cwnd is blocking 17506 * us from sending out a minimum pacing segment size. 17507 * Lets not send anything. 17508 */ 17509 len = 0; 17510 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17511 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17512 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17513 (len < (int)(sbavail(sb) - sb_offset)) && 17514 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17515 /* 17516 * Here we have a send window but we have 17517 * filled it up and we can't send another pacing segment. 17518 * We also have in flight more than 2 segments 17519 * and we are not completing the sb i.e. we allow 17520 * the last bytes of the sb to go out even if 17521 * its not a full pacing segment. 17522 */ 17523 len = 0; 17524 } else if ((rack->r_ctl.crte != NULL) && 17525 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17526 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17527 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17528 (len < (int)(sbavail(sb) - sb_offset))) { 17529 /* 17530 * Here we are doing hardware pacing, this is not a TLP, 17531 * we are not sending a pace max segment size, there is rwnd 17532 * room to send at least N pace_max_seg, the cwnd is greater 17533 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17534 * more segments in flight and its not the tail of the socket buffer. 17535 * 17536 * We don't want to send instead we need to get more ack's in to 17537 * allow us to send a full pacing segment. Normally, if we are pacing 17538 * about the right speed, we should have finished our pacing 17539 * send as most of the acks have come back if we are at the 17540 * right rate. This is a bit fuzzy since return path delay 17541 * can delay the acks, which is why we want to make sure we 17542 * have cwnd space to have a bit more than a max pace segments in flight. 17543 * 17544 * If we have not gotten our acks back we are pacing at too high a 17545 * rate delaying will not hurt and will bring our GP estimate down by 17546 * injecting the delay. If we don't do this we will send 17547 * 2 MSS out in response to the acks being clocked in which 17548 * defeats the point of hw-pacing (i.e. to help us get 17549 * larger TSO's out). 17550 */ 17551 len = 0; 17552 17553 } 17554 17555 } 17556 /* len will be >= 0 after this point. */ 17557 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17558 rack_sndbuf_autoscale(rack); 17559 /* 17560 * Decide if we can use TCP Segmentation Offloading (if supported by 17561 * hardware). 17562 * 17563 * TSO may only be used if we are in a pure bulk sending state. The 17564 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17565 * options prevent using TSO. With TSO the TCP header is the same 17566 * (except for the sequence number) for all generated packets. This 17567 * makes it impossible to transmit any options which vary per 17568 * generated segment or packet. 17569 * 17570 * IPv4 handling has a clear separation of ip options and ip header 17571 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17572 * the right thing below to provide length of just ip options and thus 17573 * checking for ipoptlen is enough to decide if ip options are present. 17574 */ 17575 ipoptlen = 0; 17576 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17577 /* 17578 * Pre-calculate here as we save another lookup into the darknesses 17579 * of IPsec that way and can actually decide if TSO is ok. 17580 */ 17581 #ifdef INET6 17582 if (isipv6 && IPSEC_ENABLED(ipv6)) 17583 ipsec_optlen = IPSEC_HDRSIZE(ipv6, tp->t_inpcb); 17584 #ifdef INET 17585 else 17586 #endif 17587 #endif /* INET6 */ 17588 #ifdef INET 17589 if (IPSEC_ENABLED(ipv4)) 17590 ipsec_optlen = IPSEC_HDRSIZE(ipv4, tp->t_inpcb); 17591 #endif /* INET */ 17592 #endif 17593 17594 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17595 ipoptlen += ipsec_optlen; 17596 #endif 17597 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17598 (tp->t_port == 0) && 17599 ((tp->t_flags & TF_SIGNATURE) == 0) && 17600 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17601 ipoptlen == 0) 17602 tso = 1; 17603 { 17604 uint32_t outstanding __unused; 17605 17606 outstanding = tp->snd_max - tp->snd_una; 17607 if (tp->t_flags & TF_SENTFIN) { 17608 /* 17609 * If we sent a fin, snd_max is 1 higher than 17610 * snd_una 17611 */ 17612 outstanding--; 17613 } 17614 if (sack_rxmit) { 17615 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17616 flags &= ~TH_FIN; 17617 } else { 17618 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17619 sbused(sb))) 17620 flags &= ~TH_FIN; 17621 } 17622 } 17623 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17624 (long)TCP_MAXWIN << tp->rcv_scale); 17625 17626 /* 17627 * Sender silly window avoidance. We transmit under the following 17628 * conditions when len is non-zero: 17629 * 17630 * - We have a full segment (or more with TSO) - This is the last 17631 * buffer in a write()/send() and we are either idle or running 17632 * NODELAY - we've timed out (e.g. persist timer) - we have more 17633 * then 1/2 the maximum send window's worth of data (receiver may be 17634 * limited the window size) - we need to retransmit 17635 */ 17636 if (len) { 17637 if (len >= segsiz) { 17638 goto send; 17639 } 17640 /* 17641 * NOTE! on localhost connections an 'ack' from the remote 17642 * end may occur synchronously with the output and cause us 17643 * to flush a buffer queued with moretocome. XXX 17644 * 17645 */ 17646 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17647 (idle || (tp->t_flags & TF_NODELAY)) && 17648 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17649 (tp->t_flags & TF_NOPUSH) == 0) { 17650 pass = 2; 17651 goto send; 17652 } 17653 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17654 pass = 22; 17655 goto send; 17656 } 17657 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17658 pass = 4; 17659 goto send; 17660 } 17661 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17662 pass = 5; 17663 goto send; 17664 } 17665 if (sack_rxmit) { 17666 pass = 6; 17667 goto send; 17668 } 17669 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17670 (ctf_outstanding(tp) < (segsiz * 2))) { 17671 /* 17672 * We have less than two MSS outstanding (delayed ack) 17673 * and our rwnd will not let us send a full sized 17674 * MSS. Lets go ahead and let this small segment 17675 * out because we want to try to have at least two 17676 * packets inflight to not be caught by delayed ack. 17677 */ 17678 pass = 12; 17679 goto send; 17680 } 17681 } 17682 /* 17683 * Sending of standalone window updates. 17684 * 17685 * Window updates are important when we close our window due to a 17686 * full socket buffer and are opening it again after the application 17687 * reads data from it. Once the window has opened again and the 17688 * remote end starts to send again the ACK clock takes over and 17689 * provides the most current window information. 17690 * 17691 * We must avoid the silly window syndrome whereas every read from 17692 * the receive buffer, no matter how small, causes a window update 17693 * to be sent. We also should avoid sending a flurry of window 17694 * updates when the socket buffer had queued a lot of data and the 17695 * application is doing small reads. 17696 * 17697 * Prevent a flurry of pointless window updates by only sending an 17698 * update when we can increase the advertized window by more than 17699 * 1/4th of the socket buffer capacity. When the buffer is getting 17700 * full or is very small be more aggressive and send an update 17701 * whenever we can increase by two mss sized segments. In all other 17702 * situations the ACK's to new incoming data will carry further 17703 * window increases. 17704 * 17705 * Don't send an independent window update if a delayed ACK is 17706 * pending (it will get piggy-backed on it) or the remote side 17707 * already has done a half-close and won't send more data. Skip 17708 * this if the connection is in T/TCP half-open state. 17709 */ 17710 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17711 !(tp->t_flags & TF_DELACK) && 17712 !TCPS_HAVERCVDFIN(tp->t_state)) { 17713 /* 17714 * "adv" is the amount we could increase the window, taking 17715 * into account that we are limited by TCP_MAXWIN << 17716 * tp->rcv_scale. 17717 */ 17718 int32_t adv; 17719 int oldwin; 17720 17721 adv = recwin; 17722 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17723 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17724 if (adv > oldwin) 17725 adv -= oldwin; 17726 else { 17727 /* We can't increase the window */ 17728 adv = 0; 17729 } 17730 } else 17731 oldwin = 0; 17732 17733 /* 17734 * If the new window size ends up being the same as or less 17735 * than the old size when it is scaled, then don't force 17736 * a window update. 17737 */ 17738 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17739 goto dontupdate; 17740 17741 if (adv >= (int32_t)(2 * segsiz) && 17742 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17743 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17744 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17745 pass = 7; 17746 goto send; 17747 } 17748 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17749 pass = 23; 17750 goto send; 17751 } 17752 } 17753 dontupdate: 17754 17755 /* 17756 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17757 * is also a catch-all for the retransmit timer timeout case. 17758 */ 17759 if (tp->t_flags & TF_ACKNOW) { 17760 pass = 8; 17761 goto send; 17762 } 17763 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17764 pass = 9; 17765 goto send; 17766 } 17767 /* 17768 * If our state indicates that FIN should be sent and we have not 17769 * yet done so, then we need to send. 17770 */ 17771 if ((flags & TH_FIN) && 17772 (tp->snd_nxt == tp->snd_una)) { 17773 pass = 11; 17774 goto send; 17775 } 17776 /* 17777 * No reason to send a segment, just return. 17778 */ 17779 just_return: 17780 SOCKBUF_UNLOCK(sb); 17781 just_return_nolock: 17782 { 17783 int app_limited = CTF_JR_SENT_DATA; 17784 17785 if (tot_len_this_send > 0) { 17786 /* Make sure snd_nxt is up to max */ 17787 rack->r_ctl.fsb.recwin = recwin; 17788 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17789 if ((error == 0) && 17790 rack_use_rfo && 17791 ((flags & (TH_SYN|TH_FIN)) == 0) && 17792 (ipoptlen == 0) && 17793 (tp->snd_nxt == tp->snd_max) && 17794 (tp->rcv_numsacks == 0) && 17795 rack->r_fsb_inited && 17796 TCPS_HAVEESTABLISHED(tp->t_state) && 17797 (rack->r_must_retran == 0) && 17798 ((tp->t_flags & TF_NEEDFIN) == 0) && 17799 (len > 0) && (orig_len > 0) && 17800 (orig_len > len) && 17801 ((orig_len - len) >= segsiz) && 17802 ((optlen == 0) || 17803 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17804 /* We can send at least one more MSS using our fsb */ 17805 17806 rack->r_fast_output = 1; 17807 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17808 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17809 rack->r_ctl.fsb.tcp_flags = flags; 17810 rack->r_ctl.fsb.left_to_send = orig_len - len; 17811 if (hw_tls) 17812 rack->r_ctl.fsb.hw_tls = 1; 17813 else 17814 rack->r_ctl.fsb.hw_tls = 0; 17815 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17816 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17817 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17818 (tp->snd_max - tp->snd_una))); 17819 if (rack->r_ctl.fsb.left_to_send < segsiz) 17820 rack->r_fast_output = 0; 17821 else { 17822 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17823 rack->r_ctl.fsb.rfo_apply_push = 1; 17824 else 17825 rack->r_ctl.fsb.rfo_apply_push = 0; 17826 } 17827 } else 17828 rack->r_fast_output = 0; 17829 17830 17831 rack_log_fsb(rack, tp, so, flags, 17832 ipoptlen, orig_len, len, 0, 17833 1, optlen, __LINE__, 1); 17834 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17835 tp->snd_nxt = tp->snd_max; 17836 } else { 17837 int end_window = 0; 17838 uint32_t seq = tp->gput_ack; 17839 17840 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17841 if (rsm) { 17842 /* 17843 * Mark the last sent that we just-returned (hinting 17844 * that delayed ack may play a role in any rtt measurement). 17845 */ 17846 rsm->r_just_ret = 1; 17847 } 17848 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17849 rack->r_ctl.rc_agg_delayed = 0; 17850 rack->r_early = 0; 17851 rack->r_late = 0; 17852 rack->r_ctl.rc_agg_early = 0; 17853 if ((ctf_outstanding(tp) + 17854 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17855 minseg)) >= tp->snd_wnd) { 17856 /* We are limited by the rwnd */ 17857 app_limited = CTF_JR_RWND_LIMITED; 17858 if (IN_FASTRECOVERY(tp->t_flags)) 17859 rack->r_ctl.rc_prr_sndcnt = 0; 17860 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17861 /* We are limited by whats available -- app limited */ 17862 app_limited = CTF_JR_APP_LIMITED; 17863 if (IN_FASTRECOVERY(tp->t_flags)) 17864 rack->r_ctl.rc_prr_sndcnt = 0; 17865 } else if ((idle == 0) && 17866 ((tp->t_flags & TF_NODELAY) == 0) && 17867 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17868 (len < segsiz)) { 17869 /* 17870 * No delay is not on and the 17871 * user is sending less than 1MSS. This 17872 * brings out SWS avoidance so we 17873 * don't send. Another app-limited case. 17874 */ 17875 app_limited = CTF_JR_APP_LIMITED; 17876 } else if (tp->t_flags & TF_NOPUSH) { 17877 /* 17878 * The user has requested no push of 17879 * the last segment and we are 17880 * at the last segment. Another app 17881 * limited case. 17882 */ 17883 app_limited = CTF_JR_APP_LIMITED; 17884 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17885 /* Its the cwnd */ 17886 app_limited = CTF_JR_CWND_LIMITED; 17887 } else if (IN_FASTRECOVERY(tp->t_flags) && 17888 (rack->rack_no_prr == 0) && 17889 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17890 app_limited = CTF_JR_PRR; 17891 } else { 17892 /* Now why here are we not sending? */ 17893 #ifdef NOW 17894 #ifdef INVARIANTS 17895 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17896 #endif 17897 #endif 17898 app_limited = CTF_JR_ASSESSING; 17899 } 17900 /* 17901 * App limited in some fashion, for our pacing GP 17902 * measurements we don't want any gap (even cwnd). 17903 * Close down the measurement window. 17904 */ 17905 if (rack_cwnd_block_ends_measure && 17906 ((app_limited == CTF_JR_CWND_LIMITED) || 17907 (app_limited == CTF_JR_PRR))) { 17908 /* 17909 * The reason we are not sending is 17910 * the cwnd (or prr). We have been configured 17911 * to end the measurement window in 17912 * this case. 17913 */ 17914 end_window = 1; 17915 } else if (rack_rwnd_block_ends_measure && 17916 (app_limited == CTF_JR_RWND_LIMITED)) { 17917 /* 17918 * We are rwnd limited and have been 17919 * configured to end the measurement 17920 * window in this case. 17921 */ 17922 end_window = 1; 17923 } else if (app_limited == CTF_JR_APP_LIMITED) { 17924 /* 17925 * A true application limited period, we have 17926 * ran out of data. 17927 */ 17928 end_window = 1; 17929 } else if (app_limited == CTF_JR_ASSESSING) { 17930 /* 17931 * In the assessing case we hit the end of 17932 * the if/else and had no known reason 17933 * This will panic us under invariants.. 17934 * 17935 * If we get this out in logs we need to 17936 * investagate which reason we missed. 17937 */ 17938 end_window = 1; 17939 } 17940 if (end_window) { 17941 uint8_t log = 0; 17942 17943 /* Adjust the Gput measurement */ 17944 if ((tp->t_flags & TF_GPUTINPROG) && 17945 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17946 tp->gput_ack = tp->snd_max; 17947 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17948 /* 17949 * There is not enough to measure. 17950 */ 17951 tp->t_flags &= ~TF_GPUTINPROG; 17952 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17953 rack->r_ctl.rc_gp_srtt /*flex1*/, 17954 tp->gput_seq, 17955 0, 0, 18, __LINE__, NULL, 0); 17956 } else 17957 log = 1; 17958 } 17959 /* Mark the last packet has app limited */ 17960 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17961 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17962 if (rack->r_ctl.rc_app_limited_cnt == 0) 17963 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17964 else { 17965 /* 17966 * Go out to the end app limited and mark 17967 * this new one as next and move the end_appl up 17968 * to this guy. 17969 */ 17970 if (rack->r_ctl.rc_end_appl) 17971 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17972 rack->r_ctl.rc_end_appl = rsm; 17973 } 17974 rsm->r_flags |= RACK_APP_LIMITED; 17975 rack->r_ctl.rc_app_limited_cnt++; 17976 } 17977 if (log) 17978 rack_log_pacing_delay_calc(rack, 17979 rack->r_ctl.rc_app_limited_cnt, seq, 17980 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17981 } 17982 } 17983 /* Check if we need to go into persists or not */ 17984 if ((tp->snd_max == tp->snd_una) && 17985 TCPS_HAVEESTABLISHED(tp->t_state) && 17986 sbavail(sb) && 17987 (sbavail(sb) > tp->snd_wnd) && 17988 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17989 /* Yes lets make sure to move to persist before timer-start */ 17990 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17991 } 17992 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17993 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17994 } 17995 #ifdef NETFLIX_SHARED_CWND 17996 if ((sbavail(sb) == 0) && 17997 rack->r_ctl.rc_scw) { 17998 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17999 rack->rack_scwnd_is_idle = 1; 18000 } 18001 #endif 18002 #ifdef TCP_ACCOUNTING 18003 if (tot_len_this_send > 0) { 18004 crtsc = get_cyclecount(); 18005 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18006 tp->tcp_cnt_counters[SND_OUT_DATA]++; 18007 } 18008 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 18009 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18010 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 18011 } 18012 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 18013 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18014 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 18015 } 18016 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 18017 } else { 18018 crtsc = get_cyclecount(); 18019 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18020 tp->tcp_cnt_counters[SND_LIMITED]++; 18021 } 18022 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 18023 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18024 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 18025 } 18026 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 18027 } 18028 sched_unpin(); 18029 #endif 18030 return (0); 18031 18032 send: 18033 if (rsm || sack_rxmit) 18034 counter_u64_add(rack_nfto_resend, 1); 18035 else 18036 counter_u64_add(rack_non_fto_send, 1); 18037 if ((flags & TH_FIN) && 18038 sbavail(sb)) { 18039 /* 18040 * We do not transmit a FIN 18041 * with data outstanding. We 18042 * need to make it so all data 18043 * is acked first. 18044 */ 18045 flags &= ~TH_FIN; 18046 } 18047 /* Enforce stack imposed max seg size if we have one */ 18048 if (rack->r_ctl.rc_pace_max_segs && 18049 (len > rack->r_ctl.rc_pace_max_segs)) { 18050 mark = 1; 18051 len = rack->r_ctl.rc_pace_max_segs; 18052 } 18053 SOCKBUF_LOCK_ASSERT(sb); 18054 if (len > 0) { 18055 if (len >= segsiz) 18056 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 18057 else 18058 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 18059 } 18060 /* 18061 * Before ESTABLISHED, force sending of initial options unless TCP 18062 * set not to do any options. NOTE: we assume that the IP/TCP header 18063 * plus TCP options always fit in a single mbuf, leaving room for a 18064 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 18065 * + optlen <= MCLBYTES 18066 */ 18067 optlen = 0; 18068 #ifdef INET6 18069 if (isipv6) 18070 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 18071 else 18072 #endif 18073 hdrlen = sizeof(struct tcpiphdr); 18074 18075 /* 18076 * Compute options for segment. We only have to care about SYN and 18077 * established connection segments. Options for SYN-ACK segments 18078 * are handled in TCP syncache. 18079 */ 18080 to.to_flags = 0; 18081 if ((tp->t_flags & TF_NOOPT) == 0) { 18082 /* Maximum segment size. */ 18083 if (flags & TH_SYN) { 18084 tp->snd_nxt = tp->iss; 18085 to.to_mss = tcp_mssopt(&inp->inp_inc); 18086 if (tp->t_port) 18087 to.to_mss -= V_tcp_udp_tunneling_overhead; 18088 to.to_flags |= TOF_MSS; 18089 18090 /* 18091 * On SYN or SYN|ACK transmits on TFO connections, 18092 * only include the TFO option if it is not a 18093 * retransmit, as the presence of the TFO option may 18094 * have caused the original SYN or SYN|ACK to have 18095 * been dropped by a middlebox. 18096 */ 18097 if (IS_FASTOPEN(tp->t_flags) && 18098 (tp->t_rxtshift == 0)) { 18099 if (tp->t_state == TCPS_SYN_RECEIVED) { 18100 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 18101 to.to_tfo_cookie = 18102 (u_int8_t *)&tp->t_tfo_cookie.server; 18103 to.to_flags |= TOF_FASTOPEN; 18104 wanted_cookie = 1; 18105 } else if (tp->t_state == TCPS_SYN_SENT) { 18106 to.to_tfo_len = 18107 tp->t_tfo_client_cookie_len; 18108 to.to_tfo_cookie = 18109 tp->t_tfo_cookie.client; 18110 to.to_flags |= TOF_FASTOPEN; 18111 wanted_cookie = 1; 18112 /* 18113 * If we wind up having more data to 18114 * send with the SYN than can fit in 18115 * one segment, don't send any more 18116 * until the SYN|ACK comes back from 18117 * the other end. 18118 */ 18119 sendalot = 0; 18120 } 18121 } 18122 } 18123 /* Window scaling. */ 18124 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18125 to.to_wscale = tp->request_r_scale; 18126 to.to_flags |= TOF_SCALE; 18127 } 18128 /* Timestamps. */ 18129 if ((tp->t_flags & TF_RCVD_TSTMP) || 18130 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18131 to.to_tsval = ms_cts + tp->ts_offset; 18132 to.to_tsecr = tp->ts_recent; 18133 to.to_flags |= TOF_TS; 18134 } 18135 /* Set receive buffer autosizing timestamp. */ 18136 if (tp->rfbuf_ts == 0 && 18137 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18138 tp->rfbuf_ts = tcp_ts_getticks(); 18139 /* Selective ACK's. */ 18140 if (tp->t_flags & TF_SACK_PERMIT) { 18141 if (flags & TH_SYN) 18142 to.to_flags |= TOF_SACKPERM; 18143 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18144 tp->rcv_numsacks > 0) { 18145 to.to_flags |= TOF_SACK; 18146 to.to_nsacks = tp->rcv_numsacks; 18147 to.to_sacks = (u_char *)tp->sackblks; 18148 } 18149 } 18150 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18151 /* TCP-MD5 (RFC2385). */ 18152 if (tp->t_flags & TF_SIGNATURE) 18153 to.to_flags |= TOF_SIGNATURE; 18154 #endif /* TCP_SIGNATURE */ 18155 18156 /* Processing the options. */ 18157 hdrlen += optlen = tcp_addoptions(&to, opt); 18158 /* 18159 * If we wanted a TFO option to be added, but it was unable 18160 * to fit, ensure no data is sent. 18161 */ 18162 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18163 !(to.to_flags & TOF_FASTOPEN)) 18164 len = 0; 18165 } 18166 if (tp->t_port) { 18167 if (V_tcp_udp_tunneling_port == 0) { 18168 /* The port was removed?? */ 18169 SOCKBUF_UNLOCK(&so->so_snd); 18170 #ifdef TCP_ACCOUNTING 18171 crtsc = get_cyclecount(); 18172 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18173 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18174 } 18175 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18176 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18177 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18178 } 18179 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18180 sched_unpin(); 18181 #endif 18182 return (EHOSTUNREACH); 18183 } 18184 hdrlen += sizeof(struct udphdr); 18185 } 18186 #ifdef INET6 18187 if (isipv6) 18188 ipoptlen = ip6_optlen(tp->t_inpcb); 18189 else 18190 #endif 18191 if (tp->t_inpcb->inp_options) 18192 ipoptlen = tp->t_inpcb->inp_options->m_len - 18193 offsetof(struct ipoption, ipopt_list); 18194 else 18195 ipoptlen = 0; 18196 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18197 ipoptlen += ipsec_optlen; 18198 #endif 18199 18200 /* 18201 * Adjust data length if insertion of options will bump the packet 18202 * length beyond the t_maxseg length. Clear the FIN bit because we 18203 * cut off the tail of the segment. 18204 */ 18205 if (len + optlen + ipoptlen > tp->t_maxseg) { 18206 if (tso) { 18207 uint32_t if_hw_tsomax; 18208 uint32_t moff; 18209 int32_t max_len; 18210 18211 /* extract TSO information */ 18212 if_hw_tsomax = tp->t_tsomax; 18213 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18214 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18215 KASSERT(ipoptlen == 0, 18216 ("%s: TSO can't do IP options", __func__)); 18217 18218 /* 18219 * Check if we should limit by maximum payload 18220 * length: 18221 */ 18222 if (if_hw_tsomax != 0) { 18223 /* compute maximum TSO length */ 18224 max_len = (if_hw_tsomax - hdrlen - 18225 max_linkhdr); 18226 if (max_len <= 0) { 18227 len = 0; 18228 } else if (len > max_len) { 18229 sendalot = 1; 18230 len = max_len; 18231 mark = 2; 18232 } 18233 } 18234 /* 18235 * Prevent the last segment from being fractional 18236 * unless the send sockbuf can be emptied: 18237 */ 18238 max_len = (tp->t_maxseg - optlen); 18239 if ((sb_offset + len) < sbavail(sb)) { 18240 moff = len % (u_int)max_len; 18241 if (moff != 0) { 18242 mark = 3; 18243 len -= moff; 18244 } 18245 } 18246 /* 18247 * In case there are too many small fragments don't 18248 * use TSO: 18249 */ 18250 if (len <= segsiz) { 18251 mark = 4; 18252 tso = 0; 18253 } 18254 /* 18255 * Send the FIN in a separate segment after the bulk 18256 * sending is done. We don't trust the TSO 18257 * implementations to clear the FIN flag on all but 18258 * the last segment. 18259 */ 18260 if (tp->t_flags & TF_NEEDFIN) { 18261 sendalot = 4; 18262 } 18263 } else { 18264 mark = 5; 18265 if (optlen + ipoptlen >= tp->t_maxseg) { 18266 /* 18267 * Since we don't have enough space to put 18268 * the IP header chain and the TCP header in 18269 * one packet as required by RFC 7112, don't 18270 * send it. Also ensure that at least one 18271 * byte of the payload can be put into the 18272 * TCP segment. 18273 */ 18274 SOCKBUF_UNLOCK(&so->so_snd); 18275 error = EMSGSIZE; 18276 sack_rxmit = 0; 18277 goto out; 18278 } 18279 len = tp->t_maxseg - optlen - ipoptlen; 18280 sendalot = 5; 18281 } 18282 } else { 18283 tso = 0; 18284 mark = 6; 18285 } 18286 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18287 ("%s: len > IP_MAXPACKET", __func__)); 18288 #ifdef DIAGNOSTIC 18289 #ifdef INET6 18290 if (max_linkhdr + hdrlen > MCLBYTES) 18291 #else 18292 if (max_linkhdr + hdrlen > MHLEN) 18293 #endif 18294 panic("tcphdr too big"); 18295 #endif 18296 18297 /* 18298 * This KASSERT is here to catch edge cases at a well defined place. 18299 * Before, those had triggered (random) panic conditions further 18300 * down. 18301 */ 18302 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18303 if ((len == 0) && 18304 (flags & TH_FIN) && 18305 (sbused(sb))) { 18306 /* 18307 * We have outstanding data, don't send a fin by itself!. 18308 */ 18309 goto just_return; 18310 } 18311 /* 18312 * Grab a header mbuf, attaching a copy of data to be transmitted, 18313 * and initialize the header from the template for sends on this 18314 * connection. 18315 */ 18316 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18317 if (len) { 18318 uint32_t max_val; 18319 uint32_t moff; 18320 18321 if (rack->r_ctl.rc_pace_max_segs) 18322 max_val = rack->r_ctl.rc_pace_max_segs; 18323 else if (rack->rc_user_set_max_segs) 18324 max_val = rack->rc_user_set_max_segs * segsiz; 18325 else 18326 max_val = len; 18327 /* 18328 * We allow a limit on sending with hptsi. 18329 */ 18330 if (len > max_val) { 18331 mark = 7; 18332 len = max_val; 18333 } 18334 #ifdef INET6 18335 if (MHLEN < hdrlen + max_linkhdr) 18336 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18337 else 18338 #endif 18339 m = m_gethdr(M_NOWAIT, MT_DATA); 18340 18341 if (m == NULL) { 18342 SOCKBUF_UNLOCK(sb); 18343 error = ENOBUFS; 18344 sack_rxmit = 0; 18345 goto out; 18346 } 18347 m->m_data += max_linkhdr; 18348 m->m_len = hdrlen; 18349 18350 /* 18351 * Start the m_copy functions from the closest mbuf to the 18352 * sb_offset in the socket buffer chain. 18353 */ 18354 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18355 s_mb = mb; 18356 s_moff = moff; 18357 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18358 m_copydata(mb, moff, (int)len, 18359 mtod(m, caddr_t)+hdrlen); 18360 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18361 sbsndptr_adv(sb, mb, len); 18362 m->m_len += len; 18363 } else { 18364 struct sockbuf *msb; 18365 18366 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18367 msb = NULL; 18368 else 18369 msb = sb; 18370 m->m_next = tcp_m_copym( 18371 mb, moff, &len, 18372 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18373 ((rsm == NULL) ? hw_tls : 0) 18374 #ifdef NETFLIX_COPY_ARGS 18375 , &s_mb, &s_moff 18376 #endif 18377 ); 18378 if (len <= (tp->t_maxseg - optlen)) { 18379 /* 18380 * Must have ran out of mbufs for the copy 18381 * shorten it to no longer need tso. Lets 18382 * not put on sendalot since we are low on 18383 * mbufs. 18384 */ 18385 tso = 0; 18386 } 18387 if (m->m_next == NULL) { 18388 SOCKBUF_UNLOCK(sb); 18389 (void)m_free(m); 18390 error = ENOBUFS; 18391 sack_rxmit = 0; 18392 goto out; 18393 } 18394 } 18395 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18396 if (rsm && (rsm->r_flags & RACK_TLP)) { 18397 /* 18398 * TLP should not count in retran count, but 18399 * in its own bin 18400 */ 18401 counter_u64_add(rack_tlp_retran, 1); 18402 counter_u64_add(rack_tlp_retran_bytes, len); 18403 } else { 18404 tp->t_sndrexmitpack++; 18405 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18406 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18407 } 18408 #ifdef STATS 18409 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18410 len); 18411 #endif 18412 } else { 18413 KMOD_TCPSTAT_INC(tcps_sndpack); 18414 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18415 #ifdef STATS 18416 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18417 len); 18418 #endif 18419 } 18420 /* 18421 * If we're sending everything we've got, set PUSH. (This 18422 * will keep happy those implementations which only give 18423 * data to the user when a buffer fills or a PUSH comes in.) 18424 */ 18425 if (sb_offset + len == sbused(sb) && 18426 sbused(sb) && 18427 !(flags & TH_SYN)) { 18428 flags |= TH_PUSH; 18429 add_flag |= RACK_HAD_PUSH; 18430 } 18431 18432 SOCKBUF_UNLOCK(sb); 18433 } else { 18434 SOCKBUF_UNLOCK(sb); 18435 if (tp->t_flags & TF_ACKNOW) 18436 KMOD_TCPSTAT_INC(tcps_sndacks); 18437 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18438 KMOD_TCPSTAT_INC(tcps_sndctrl); 18439 else 18440 KMOD_TCPSTAT_INC(tcps_sndwinup); 18441 18442 m = m_gethdr(M_NOWAIT, MT_DATA); 18443 if (m == NULL) { 18444 error = ENOBUFS; 18445 sack_rxmit = 0; 18446 goto out; 18447 } 18448 #ifdef INET6 18449 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18450 MHLEN >= hdrlen) { 18451 M_ALIGN(m, hdrlen); 18452 } else 18453 #endif 18454 m->m_data += max_linkhdr; 18455 m->m_len = hdrlen; 18456 } 18457 SOCKBUF_UNLOCK_ASSERT(sb); 18458 m->m_pkthdr.rcvif = (struct ifnet *)0; 18459 #ifdef MAC 18460 mac_inpcb_create_mbuf(inp, m); 18461 #endif 18462 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18463 #ifdef INET6 18464 if (isipv6) 18465 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18466 else 18467 #endif /* INET6 */ 18468 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18469 th = rack->r_ctl.fsb.th; 18470 udp = rack->r_ctl.fsb.udp; 18471 if (udp) { 18472 #ifdef INET6 18473 if (isipv6) 18474 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18475 else 18476 #endif /* INET6 */ 18477 ulen = hdrlen + len - sizeof(struct ip); 18478 udp->uh_ulen = htons(ulen); 18479 } 18480 } else { 18481 #ifdef INET6 18482 if (isipv6) { 18483 ip6 = mtod(m, struct ip6_hdr *); 18484 if (tp->t_port) { 18485 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18486 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18487 udp->uh_dport = tp->t_port; 18488 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18489 udp->uh_ulen = htons(ulen); 18490 th = (struct tcphdr *)(udp + 1); 18491 } else 18492 th = (struct tcphdr *)(ip6 + 1); 18493 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18494 } else 18495 #endif /* INET6 */ 18496 { 18497 ip = mtod(m, struct ip *); 18498 if (tp->t_port) { 18499 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18500 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18501 udp->uh_dport = tp->t_port; 18502 ulen = hdrlen + len - sizeof(struct ip); 18503 udp->uh_ulen = htons(ulen); 18504 th = (struct tcphdr *)(udp + 1); 18505 } else 18506 th = (struct tcphdr *)(ip + 1); 18507 tcpip_fillheaders(inp, tp->t_port, ip, th); 18508 } 18509 } 18510 /* 18511 * Fill in fields, remembering maximum advertised window for use in 18512 * delaying messages about window sizes. If resending a FIN, be sure 18513 * not to use a new sequence number. 18514 */ 18515 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18516 tp->snd_nxt == tp->snd_max) 18517 tp->snd_nxt--; 18518 /* 18519 * If we are starting a connection, send ECN setup SYN packet. If we 18520 * are on a retransmit, we may resend those bits a number of times 18521 * as per RFC 3168. 18522 */ 18523 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18524 flags |= tcp_ecn_output_syn_sent(tp); 18525 } 18526 /* Also handle parallel SYN for ECN */ 18527 if (TCPS_HAVERCVDSYN(tp->t_state) && 18528 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18529 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18530 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18531 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18532 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18533 #ifdef INET6 18534 if (isipv6) { 18535 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18536 ip6->ip6_flow |= htonl(ect << 20); 18537 } 18538 else 18539 #endif 18540 { 18541 ip->ip_tos &= ~IPTOS_ECN_MASK; 18542 ip->ip_tos |= ect; 18543 } 18544 } 18545 /* 18546 * If we are doing retransmissions, then snd_nxt will not reflect 18547 * the first unsent octet. For ACK only packets, we do not want the 18548 * sequence number of the retransmitted packet, we want the sequence 18549 * number of the next unsent octet. So, if there is no data (and no 18550 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18551 * ti_seq. But if we are in persist state, snd_max might reflect 18552 * one byte beyond the right edge of the window, so use snd_nxt in 18553 * that case, since we know we aren't doing a retransmission. 18554 * (retransmit and persist are mutually exclusive...) 18555 */ 18556 if (sack_rxmit == 0) { 18557 if (len || (flags & (TH_SYN | TH_FIN))) { 18558 th->th_seq = htonl(tp->snd_nxt); 18559 rack_seq = tp->snd_nxt; 18560 } else { 18561 th->th_seq = htonl(tp->snd_max); 18562 rack_seq = tp->snd_max; 18563 } 18564 } else { 18565 th->th_seq = htonl(rsm->r_start); 18566 rack_seq = rsm->r_start; 18567 } 18568 th->th_ack = htonl(tp->rcv_nxt); 18569 tcp_set_flags(th, flags); 18570 /* 18571 * Calculate receive window. Don't shrink window, but avoid silly 18572 * window syndrome. 18573 * If a RST segment is sent, advertise a window of zero. 18574 */ 18575 if (flags & TH_RST) { 18576 recwin = 0; 18577 } else { 18578 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18579 recwin < (long)segsiz) { 18580 recwin = 0; 18581 } 18582 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18583 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18584 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18585 } 18586 18587 /* 18588 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18589 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18590 * handled in syncache. 18591 */ 18592 if (flags & TH_SYN) 18593 th->th_win = htons((u_short) 18594 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18595 else { 18596 /* Avoid shrinking window with window scaling. */ 18597 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18598 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18599 } 18600 /* 18601 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18602 * window. This may cause the remote transmitter to stall. This 18603 * flag tells soreceive() to disable delayed acknowledgements when 18604 * draining the buffer. This can occur if the receiver is 18605 * attempting to read more data than can be buffered prior to 18606 * transmitting on the connection. 18607 */ 18608 if (th->th_win == 0) { 18609 tp->t_sndzerowin++; 18610 tp->t_flags |= TF_RXWIN0SENT; 18611 } else 18612 tp->t_flags &= ~TF_RXWIN0SENT; 18613 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18614 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18615 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18616 uint8_t *cpto; 18617 18618 cpto = mtod(m, uint8_t *); 18619 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18620 /* 18621 * We have just copied in: 18622 * IP/IP6 18623 * <optional udphdr> 18624 * tcphdr (no options) 18625 * 18626 * We need to grab the correct pointers into the mbuf 18627 * for both the tcp header, and possibly the udp header (if tunneling). 18628 * We do this by using the offset in the copy buffer and adding it 18629 * to the mbuf base pointer (cpto). 18630 */ 18631 #ifdef INET6 18632 if (isipv6) 18633 ip6 = mtod(m, struct ip6_hdr *); 18634 else 18635 #endif /* INET6 */ 18636 ip = mtod(m, struct ip *); 18637 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18638 /* If we have a udp header lets set it into the mbuf as well */ 18639 if (udp) 18640 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18641 } 18642 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18643 if (to.to_flags & TOF_SIGNATURE) { 18644 /* 18645 * Calculate MD5 signature and put it into the place 18646 * determined before. 18647 * NOTE: since TCP options buffer doesn't point into 18648 * mbuf's data, calculate offset and use it. 18649 */ 18650 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18651 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18652 /* 18653 * Do not send segment if the calculation of MD5 18654 * digest has failed. 18655 */ 18656 goto out; 18657 } 18658 } 18659 #endif 18660 if (optlen) { 18661 bcopy(opt, th + 1, optlen); 18662 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18663 } 18664 /* 18665 * Put TCP length in extended header, and then checksum extended 18666 * header and data. 18667 */ 18668 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18669 #ifdef INET6 18670 if (isipv6) { 18671 /* 18672 * ip6_plen is not need to be filled now, and will be filled 18673 * in ip6_output. 18674 */ 18675 if (tp->t_port) { 18676 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18677 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18678 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18679 th->th_sum = htons(0); 18680 UDPSTAT_INC(udps_opackets); 18681 } else { 18682 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18683 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18684 th->th_sum = in6_cksum_pseudo(ip6, 18685 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18686 0); 18687 } 18688 } 18689 #endif 18690 #if defined(INET6) && defined(INET) 18691 else 18692 #endif 18693 #ifdef INET 18694 { 18695 if (tp->t_port) { 18696 m->m_pkthdr.csum_flags = CSUM_UDP; 18697 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18698 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18699 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18700 th->th_sum = htons(0); 18701 UDPSTAT_INC(udps_opackets); 18702 } else { 18703 m->m_pkthdr.csum_flags = CSUM_TCP; 18704 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18705 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18706 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18707 IPPROTO_TCP + len + optlen)); 18708 } 18709 /* IP version must be set here for ipv4/ipv6 checking later */ 18710 KASSERT(ip->ip_v == IPVERSION, 18711 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18712 } 18713 #endif 18714 /* 18715 * Enable TSO and specify the size of the segments. The TCP pseudo 18716 * header checksum is always provided. XXX: Fixme: This is currently 18717 * not the case for IPv6. 18718 */ 18719 if (tso) { 18720 KASSERT(len > tp->t_maxseg - optlen, 18721 ("%s: len <= tso_segsz", __func__)); 18722 m->m_pkthdr.csum_flags |= CSUM_TSO; 18723 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18724 } 18725 KASSERT(len + hdrlen == m_length(m, NULL), 18726 ("%s: mbuf chain different than expected: %d + %u != %u", 18727 __func__, len, hdrlen, m_length(m, NULL))); 18728 18729 #ifdef TCP_HHOOK 18730 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18731 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18732 #endif 18733 /* We're getting ready to send; log now. */ 18734 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18735 union tcp_log_stackspecific log; 18736 18737 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18738 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18739 if (rack->rack_no_prr) 18740 log.u_bbr.flex1 = 0; 18741 else 18742 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18743 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18744 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18745 log.u_bbr.flex4 = orig_len; 18746 /* Save off the early/late values */ 18747 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18748 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18749 log.u_bbr.bw_inuse = rack_get_bw(rack); 18750 log.u_bbr.flex8 = 0; 18751 if (rsm) { 18752 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18753 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18754 counter_u64_add(rack_collapsed_win_rxt, 1); 18755 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18756 } 18757 if (doing_tlp) 18758 log.u_bbr.flex8 = 2; 18759 else 18760 log.u_bbr.flex8 = 1; 18761 } else { 18762 if (doing_tlp) 18763 log.u_bbr.flex8 = 3; 18764 else 18765 log.u_bbr.flex8 = 0; 18766 } 18767 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18768 log.u_bbr.flex7 = mark; 18769 log.u_bbr.flex7 <<= 8; 18770 log.u_bbr.flex7 |= pass; 18771 log.u_bbr.pkts_out = tp->t_maxseg; 18772 log.u_bbr.timeStamp = cts; 18773 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18774 log.u_bbr.lt_epoch = cwnd_to_use; 18775 log.u_bbr.delivered = sendalot; 18776 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18777 len, &log, false, NULL, NULL, 0, &tv); 18778 } else 18779 lgb = NULL; 18780 18781 /* 18782 * Fill in IP length and desired time to live and send to IP level. 18783 * There should be a better way to handle ttl and tos; we could keep 18784 * them in the template, but need a way to checksum without them. 18785 */ 18786 /* 18787 * m->m_pkthdr.len should have been set before cksum calcuration, 18788 * because in6_cksum() need it. 18789 */ 18790 #ifdef INET6 18791 if (isipv6) { 18792 /* 18793 * we separately set hoplimit for every segment, since the 18794 * user might want to change the value via setsockopt. Also, 18795 * desired default hop limit might be changed via Neighbor 18796 * Discovery. 18797 */ 18798 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18799 18800 /* 18801 * Set the packet size here for the benefit of DTrace 18802 * probes. ip6_output() will set it properly; it's supposed 18803 * to include the option header lengths as well. 18804 */ 18805 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18806 18807 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18808 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18809 else 18810 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18811 18812 if (tp->t_state == TCPS_SYN_SENT) 18813 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18814 18815 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18816 /* TODO: IPv6 IP6TOS_ECT bit on */ 18817 error = ip6_output(m, 18818 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18819 inp->in6p_outputopts, 18820 #else 18821 NULL, 18822 #endif 18823 &inp->inp_route6, 18824 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18825 NULL, NULL, inp); 18826 18827 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18828 mtu = inp->inp_route6.ro_nh->nh_mtu; 18829 } 18830 #endif /* INET6 */ 18831 #if defined(INET) && defined(INET6) 18832 else 18833 #endif 18834 #ifdef INET 18835 { 18836 ip->ip_len = htons(m->m_pkthdr.len); 18837 #ifdef INET6 18838 if (inp->inp_vflag & INP_IPV6PROTO) 18839 ip->ip_ttl = in6_selecthlim(inp, NULL); 18840 #endif /* INET6 */ 18841 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18842 /* 18843 * If we do path MTU discovery, then we set DF on every 18844 * packet. This might not be the best thing to do according 18845 * to RFC3390 Section 2. However the tcp hostcache migitates 18846 * the problem so it affects only the first tcp connection 18847 * with a host. 18848 * 18849 * NB: Don't set DF on small MTU/MSS to have a safe 18850 * fallback. 18851 */ 18852 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18853 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18854 if (tp->t_port == 0 || len < V_tcp_minmss) { 18855 ip->ip_off |= htons(IP_DF); 18856 } 18857 } else { 18858 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18859 } 18860 18861 if (tp->t_state == TCPS_SYN_SENT) 18862 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18863 18864 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18865 18866 error = ip_output(m, 18867 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18868 inp->inp_options, 18869 #else 18870 NULL, 18871 #endif 18872 &inp->inp_route, 18873 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18874 inp); 18875 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18876 mtu = inp->inp_route.ro_nh->nh_mtu; 18877 } 18878 #endif /* INET */ 18879 18880 out: 18881 if (lgb) { 18882 lgb->tlb_errno = error; 18883 lgb = NULL; 18884 } 18885 /* 18886 * In transmit state, time the transmission and arrange for the 18887 * retransmit. In persist state, just set snd_max. 18888 */ 18889 if (error == 0) { 18890 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18891 if (rsm && doing_tlp) { 18892 rack->rc_last_sent_tlp_past_cumack = 0; 18893 rack->rc_last_sent_tlp_seq_valid = 1; 18894 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18895 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18896 } 18897 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18898 if (rsm && (doing_tlp == 0)) { 18899 /* Set we retransmitted */ 18900 rack->rc_gp_saw_rec = 1; 18901 } else { 18902 if (cwnd_to_use > tp->snd_ssthresh) { 18903 /* Set we sent in CA */ 18904 rack->rc_gp_saw_ca = 1; 18905 } else { 18906 /* Set we sent in SS */ 18907 rack->rc_gp_saw_ss = 1; 18908 } 18909 } 18910 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18911 (tp->t_flags & TF_SACK_PERMIT) && 18912 tp->rcv_numsacks > 0) 18913 tcp_clean_dsack_blocks(tp); 18914 tot_len_this_send += len; 18915 if (len == 0) 18916 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18917 else if (len == 1) { 18918 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18919 } else if (len > 1) { 18920 int idx; 18921 18922 idx = (len / segsiz) + 3; 18923 if (idx >= TCP_MSS_ACCT_ATIMER) 18924 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18925 else 18926 counter_u64_add(rack_out_size[idx], 1); 18927 } 18928 } 18929 if ((rack->rack_no_prr == 0) && 18930 sub_from_prr && 18931 (error == 0)) { 18932 if (rack->r_ctl.rc_prr_sndcnt >= len) 18933 rack->r_ctl.rc_prr_sndcnt -= len; 18934 else 18935 rack->r_ctl.rc_prr_sndcnt = 0; 18936 } 18937 sub_from_prr = 0; 18938 if (doing_tlp) { 18939 /* Make sure the TLP is added */ 18940 add_flag |= RACK_TLP; 18941 } else if (rsm) { 18942 /* If its a resend without TLP then it must not have the flag */ 18943 rsm->r_flags &= ~RACK_TLP; 18944 } 18945 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18946 rack_to_usec_ts(&tv), 18947 rsm, add_flag, s_mb, s_moff, hw_tls); 18948 18949 18950 if ((error == 0) && 18951 (len > 0) && 18952 (tp->snd_una == tp->snd_max)) 18953 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18954 { 18955 tcp_seq startseq = tp->snd_nxt; 18956 18957 /* Track our lost count */ 18958 if (rsm && (doing_tlp == 0)) 18959 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18960 /* 18961 * Advance snd_nxt over sequence space of this segment. 18962 */ 18963 if (error) 18964 /* We don't log or do anything with errors */ 18965 goto nomore; 18966 if (doing_tlp == 0) { 18967 if (rsm == NULL) { 18968 /* 18969 * Not a retransmission of some 18970 * sort, new data is going out so 18971 * clear our TLP count and flag. 18972 */ 18973 rack->rc_tlp_in_progress = 0; 18974 rack->r_ctl.rc_tlp_cnt_out = 0; 18975 } 18976 } else { 18977 /* 18978 * We have just sent a TLP, mark that it is true 18979 * and make sure our in progress is set so we 18980 * continue to check the count. 18981 */ 18982 rack->rc_tlp_in_progress = 1; 18983 rack->r_ctl.rc_tlp_cnt_out++; 18984 } 18985 if (flags & (TH_SYN | TH_FIN)) { 18986 if (flags & TH_SYN) 18987 tp->snd_nxt++; 18988 if (flags & TH_FIN) { 18989 tp->snd_nxt++; 18990 tp->t_flags |= TF_SENTFIN; 18991 } 18992 } 18993 /* In the ENOBUFS case we do *not* update snd_max */ 18994 if (sack_rxmit) 18995 goto nomore; 18996 18997 tp->snd_nxt += len; 18998 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18999 if (tp->snd_una == tp->snd_max) { 19000 /* 19001 * Update the time we just added data since 19002 * none was outstanding. 19003 */ 19004 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 19005 tp->t_acktime = ticks; 19006 } 19007 tp->snd_max = tp->snd_nxt; 19008 /* 19009 * Time this transmission if not a retransmission and 19010 * not currently timing anything. 19011 * This is only relevant in case of switching back to 19012 * the base stack. 19013 */ 19014 if (tp->t_rtttime == 0) { 19015 tp->t_rtttime = ticks; 19016 tp->t_rtseq = startseq; 19017 KMOD_TCPSTAT_INC(tcps_segstimed); 19018 } 19019 if (len && 19020 ((tp->t_flags & TF_GPUTINPROG) == 0)) 19021 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 19022 } 19023 /* 19024 * If we are doing FO we need to update the mbuf position and subtract 19025 * this happens when the peer sends us duplicate information and 19026 * we thus want to send a DSACK. 19027 * 19028 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 19029 * turned off? If not then we are going to echo multiple DSACK blocks 19030 * out (with the TSO), which we should not be doing. 19031 */ 19032 if (rack->r_fast_output && len) { 19033 if (rack->r_ctl.fsb.left_to_send > len) 19034 rack->r_ctl.fsb.left_to_send -= len; 19035 else 19036 rack->r_ctl.fsb.left_to_send = 0; 19037 if (rack->r_ctl.fsb.left_to_send < segsiz) 19038 rack->r_fast_output = 0; 19039 if (rack->r_fast_output) { 19040 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19041 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19042 } 19043 } 19044 } 19045 nomore: 19046 if (error) { 19047 rack->r_ctl.rc_agg_delayed = 0; 19048 rack->r_early = 0; 19049 rack->r_late = 0; 19050 rack->r_ctl.rc_agg_early = 0; 19051 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 19052 /* 19053 * Failures do not advance the seq counter above. For the 19054 * case of ENOBUFS we will fall out and retry in 1ms with 19055 * the hpts. Everything else will just have to retransmit 19056 * with the timer. 19057 * 19058 * In any case, we do not want to loop around for another 19059 * send without a good reason. 19060 */ 19061 sendalot = 0; 19062 switch (error) { 19063 case EPERM: 19064 tp->t_softerror = error; 19065 #ifdef TCP_ACCOUNTING 19066 crtsc = get_cyclecount(); 19067 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19068 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19069 } 19070 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19071 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19072 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19073 } 19074 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19075 sched_unpin(); 19076 #endif 19077 return (error); 19078 case ENOBUFS: 19079 /* 19080 * Pace us right away to retry in a some 19081 * time 19082 */ 19083 if (rack->r_ctl.crte != NULL) { 19084 rack_trace_point(rack, RACK_TP_HWENOBUF); 19085 } else 19086 rack_trace_point(rack, RACK_TP_ENOBUF); 19087 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 19088 if (rack->rc_enobuf < 0x7f) 19089 rack->rc_enobuf++; 19090 if (slot < (10 * HPTS_USEC_IN_MSEC)) 19091 slot = 10 * HPTS_USEC_IN_MSEC; 19092 if (rack->r_ctl.crte != NULL) { 19093 counter_u64_add(rack_saw_enobuf_hw, 1); 19094 tcp_rl_log_enobuf(rack->r_ctl.crte); 19095 } 19096 counter_u64_add(rack_saw_enobuf, 1); 19097 goto enobufs; 19098 case EMSGSIZE: 19099 /* 19100 * For some reason the interface we used initially 19101 * to send segments changed to another or lowered 19102 * its MTU. If TSO was active we either got an 19103 * interface without TSO capabilits or TSO was 19104 * turned off. If we obtained mtu from ip_output() 19105 * then update it and try again. 19106 */ 19107 if (tso) 19108 tp->t_flags &= ~TF_TSO; 19109 if (mtu != 0) { 19110 tcp_mss_update(tp, -1, mtu, NULL, NULL); 19111 goto again; 19112 } 19113 slot = 10 * HPTS_USEC_IN_MSEC; 19114 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19115 #ifdef TCP_ACCOUNTING 19116 crtsc = get_cyclecount(); 19117 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19118 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19119 } 19120 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19121 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19122 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19123 } 19124 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19125 sched_unpin(); 19126 #endif 19127 return (error); 19128 case ENETUNREACH: 19129 counter_u64_add(rack_saw_enetunreach, 1); 19130 case EHOSTDOWN: 19131 case EHOSTUNREACH: 19132 case ENETDOWN: 19133 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19134 tp->t_softerror = error; 19135 } 19136 /* FALLTHROUGH */ 19137 default: 19138 slot = 10 * HPTS_USEC_IN_MSEC; 19139 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19140 #ifdef TCP_ACCOUNTING 19141 crtsc = get_cyclecount(); 19142 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19143 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19144 } 19145 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19146 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19147 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19148 } 19149 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19150 sched_unpin(); 19151 #endif 19152 return (error); 19153 } 19154 } else { 19155 rack->rc_enobuf = 0; 19156 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19157 rack->r_ctl.retran_during_recovery += len; 19158 } 19159 KMOD_TCPSTAT_INC(tcps_sndtotal); 19160 19161 /* 19162 * Data sent (as far as we can tell). If this advertises a larger 19163 * window than any other segment, then remember the size of the 19164 * advertised window. Any pending ACK has now been sent. 19165 */ 19166 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19167 tp->rcv_adv = tp->rcv_nxt + recwin; 19168 19169 tp->last_ack_sent = tp->rcv_nxt; 19170 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19171 enobufs: 19172 if (sendalot) { 19173 /* Do we need to turn off sendalot? */ 19174 if (rack->r_ctl.rc_pace_max_segs && 19175 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19176 /* We hit our max. */ 19177 sendalot = 0; 19178 } else if ((rack->rc_user_set_max_segs) && 19179 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19180 /* We hit the user defined max */ 19181 sendalot = 0; 19182 } 19183 } 19184 if ((error == 0) && (flags & TH_FIN)) 19185 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19186 if (flags & TH_RST) { 19187 /* 19188 * We don't send again after sending a RST. 19189 */ 19190 slot = 0; 19191 sendalot = 0; 19192 if (error == 0) 19193 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19194 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19195 /* 19196 * Get our pacing rate, if an error 19197 * occurred in sending (ENOBUF) we would 19198 * hit the else if with slot preset. Other 19199 * errors return. 19200 */ 19201 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19202 } 19203 if (rsm && 19204 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19205 rack->use_rack_rr) { 19206 /* Its a retransmit and we use the rack cheat? */ 19207 if ((slot == 0) || 19208 (rack->rc_always_pace == 0) || 19209 (rack->r_rr_config == 1)) { 19210 /* 19211 * We have no pacing set or we 19212 * are using old-style rack or 19213 * we are overridden to use the old 1ms pacing. 19214 */ 19215 slot = rack->r_ctl.rc_min_to; 19216 } 19217 } 19218 /* We have sent clear the flag */ 19219 rack->r_ent_rec_ns = 0; 19220 if (rack->r_must_retran) { 19221 if (rsm) { 19222 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19223 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19224 /* 19225 * We have retransmitted all. 19226 */ 19227 rack->r_must_retran = 0; 19228 rack->r_ctl.rc_out_at_rto = 0; 19229 } 19230 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19231 /* 19232 * Sending new data will also kill 19233 * the loop. 19234 */ 19235 rack->r_must_retran = 0; 19236 rack->r_ctl.rc_out_at_rto = 0; 19237 } 19238 } 19239 rack->r_ctl.fsb.recwin = recwin; 19240 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19241 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19242 /* 19243 * We hit an RTO and now have past snd_max at the RTO 19244 * clear all the WAS flags. 19245 */ 19246 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19247 } 19248 if (slot) { 19249 /* set the rack tcb into the slot N */ 19250 if ((error == 0) && 19251 rack_use_rfo && 19252 ((flags & (TH_SYN|TH_FIN)) == 0) && 19253 (rsm == NULL) && 19254 (tp->snd_nxt == tp->snd_max) && 19255 (ipoptlen == 0) && 19256 (tp->rcv_numsacks == 0) && 19257 rack->r_fsb_inited && 19258 TCPS_HAVEESTABLISHED(tp->t_state) && 19259 (rack->r_must_retran == 0) && 19260 ((tp->t_flags & TF_NEEDFIN) == 0) && 19261 (len > 0) && (orig_len > 0) && 19262 (orig_len > len) && 19263 ((orig_len - len) >= segsiz) && 19264 ((optlen == 0) || 19265 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19266 /* We can send at least one more MSS using our fsb */ 19267 19268 rack->r_fast_output = 1; 19269 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19270 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19271 rack->r_ctl.fsb.tcp_flags = flags; 19272 rack->r_ctl.fsb.left_to_send = orig_len - len; 19273 if (hw_tls) 19274 rack->r_ctl.fsb.hw_tls = 1; 19275 else 19276 rack->r_ctl.fsb.hw_tls = 0; 19277 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19278 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19279 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19280 (tp->snd_max - tp->snd_una))); 19281 if (rack->r_ctl.fsb.left_to_send < segsiz) 19282 rack->r_fast_output = 0; 19283 else { 19284 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19285 rack->r_ctl.fsb.rfo_apply_push = 1; 19286 else 19287 rack->r_ctl.fsb.rfo_apply_push = 0; 19288 } 19289 } else 19290 rack->r_fast_output = 0; 19291 rack_log_fsb(rack, tp, so, flags, 19292 ipoptlen, orig_len, len, error, 19293 (rsm == NULL), optlen, __LINE__, 2); 19294 } else if (sendalot) { 19295 int ret; 19296 19297 sack_rxmit = 0; 19298 if ((error == 0) && 19299 rack_use_rfo && 19300 ((flags & (TH_SYN|TH_FIN)) == 0) && 19301 (rsm == NULL) && 19302 (ipoptlen == 0) && 19303 (tp->rcv_numsacks == 0) && 19304 (tp->snd_nxt == tp->snd_max) && 19305 (rack->r_must_retran == 0) && 19306 rack->r_fsb_inited && 19307 TCPS_HAVEESTABLISHED(tp->t_state) && 19308 ((tp->t_flags & TF_NEEDFIN) == 0) && 19309 (len > 0) && (orig_len > 0) && 19310 (orig_len > len) && 19311 ((orig_len - len) >= segsiz) && 19312 ((optlen == 0) || 19313 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19314 /* we can use fast_output for more */ 19315 19316 rack->r_fast_output = 1; 19317 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19318 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19319 rack->r_ctl.fsb.tcp_flags = flags; 19320 rack->r_ctl.fsb.left_to_send = orig_len - len; 19321 if (hw_tls) 19322 rack->r_ctl.fsb.hw_tls = 1; 19323 else 19324 rack->r_ctl.fsb.hw_tls = 0; 19325 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19326 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19327 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19328 (tp->snd_max - tp->snd_una))); 19329 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19330 rack->r_fast_output = 0; 19331 } 19332 if (rack->r_fast_output) { 19333 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19334 rack->r_ctl.fsb.rfo_apply_push = 1; 19335 else 19336 rack->r_ctl.fsb.rfo_apply_push = 0; 19337 rack_log_fsb(rack, tp, so, flags, 19338 ipoptlen, orig_len, len, error, 19339 (rsm == NULL), optlen, __LINE__, 3); 19340 error = 0; 19341 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19342 if (ret >= 0) 19343 return (ret); 19344 else if (error) 19345 goto nomore; 19346 19347 } 19348 } 19349 goto again; 19350 } 19351 /* Assure when we leave that snd_nxt will point to top */ 19352 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19353 tp->snd_nxt = tp->snd_max; 19354 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19355 #ifdef TCP_ACCOUNTING 19356 crtsc = get_cyclecount() - ts_val; 19357 if (tot_len_this_send) { 19358 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19359 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19360 } 19361 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19362 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19363 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19364 } 19365 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19366 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19367 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19368 } 19369 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19370 } else { 19371 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19372 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19373 } 19374 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19375 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19376 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19377 } 19378 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19379 } 19380 sched_unpin(); 19381 #endif 19382 if (error == ENOBUFS) 19383 error = 0; 19384 return (error); 19385 } 19386 19387 static void 19388 rack_update_seg(struct tcp_rack *rack) 19389 { 19390 uint32_t orig_val; 19391 19392 orig_val = rack->r_ctl.rc_pace_max_segs; 19393 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19394 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19395 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19396 } 19397 19398 static void 19399 rack_mtu_change(struct tcpcb *tp) 19400 { 19401 /* 19402 * The MSS may have changed 19403 */ 19404 struct tcp_rack *rack; 19405 struct rack_sendmap *rsm; 19406 19407 rack = (struct tcp_rack *)tp->t_fb_ptr; 19408 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19409 /* 19410 * The MTU has changed we need to resend everything 19411 * since all we have sent is lost. We first fix 19412 * up the mtu though. 19413 */ 19414 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19415 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19416 rack_remxt_tmr(tp); 19417 rack->r_fast_output = 0; 19418 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19419 rack->r_ctl.rc_sacked); 19420 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19421 rack->r_must_retran = 1; 19422 /* Mark all inflight to needing to be rxt'd */ 19423 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19424 rsm->r_flags |= RACK_MUST_RXT; 19425 } 19426 } 19427 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19428 /* We don't use snd_nxt to retransmit */ 19429 tp->snd_nxt = tp->snd_max; 19430 } 19431 19432 static int 19433 rack_set_profile(struct tcp_rack *rack, int prof) 19434 { 19435 int err = EINVAL; 19436 if (prof == 1) { 19437 /* pace_always=1 */ 19438 if (rack->rc_always_pace == 0) { 19439 if (tcp_can_enable_pacing() == 0) 19440 return (EBUSY); 19441 } 19442 rack->rc_always_pace = 1; 19443 if (rack->use_fixed_rate || rack->gp_ready) 19444 rack_set_cc_pacing(rack); 19445 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19446 rack->rack_attempt_hdwr_pace = 0; 19447 /* cmpack=1 */ 19448 if (rack_use_cmp_acks) 19449 rack->r_use_cmp_ack = 1; 19450 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19451 rack->r_use_cmp_ack) 19452 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19453 /* scwnd=1 */ 19454 rack->rack_enable_scwnd = 1; 19455 /* dynamic=100 */ 19456 rack->rc_gp_dyn_mul = 1; 19457 /* gp_inc_ca */ 19458 rack->r_ctl.rack_per_of_gp_ca = 100; 19459 /* rrr_conf=3 */ 19460 rack->r_rr_config = 3; 19461 /* npush=2 */ 19462 rack->r_ctl.rc_no_push_at_mrtt = 2; 19463 /* fillcw=1 */ 19464 rack->rc_pace_to_cwnd = 1; 19465 rack->rc_pace_fill_if_rttin_range = 0; 19466 rack->rtt_limit_mul = 0; 19467 /* noprr=1 */ 19468 rack->rack_no_prr = 1; 19469 /* lscwnd=1 */ 19470 rack->r_limit_scw = 1; 19471 /* gp_inc_rec */ 19472 rack->r_ctl.rack_per_of_gp_rec = 90; 19473 err = 0; 19474 19475 } else if (prof == 3) { 19476 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19477 /* pace_always=1 */ 19478 if (rack->rc_always_pace == 0) { 19479 if (tcp_can_enable_pacing() == 0) 19480 return (EBUSY); 19481 } 19482 rack->rc_always_pace = 1; 19483 if (rack->use_fixed_rate || rack->gp_ready) 19484 rack_set_cc_pacing(rack); 19485 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19486 rack->rack_attempt_hdwr_pace = 0; 19487 /* cmpack=1 */ 19488 if (rack_use_cmp_acks) 19489 rack->r_use_cmp_ack = 1; 19490 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19491 rack->r_use_cmp_ack) 19492 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19493 /* scwnd=1 */ 19494 rack->rack_enable_scwnd = 1; 19495 /* dynamic=100 */ 19496 rack->rc_gp_dyn_mul = 1; 19497 /* gp_inc_ca */ 19498 rack->r_ctl.rack_per_of_gp_ca = 100; 19499 /* rrr_conf=3 */ 19500 rack->r_rr_config = 3; 19501 /* npush=2 */ 19502 rack->r_ctl.rc_no_push_at_mrtt = 2; 19503 /* fillcw=2 */ 19504 rack->rc_pace_to_cwnd = 1; 19505 rack->r_fill_less_agg = 1; 19506 rack->rc_pace_fill_if_rttin_range = 0; 19507 rack->rtt_limit_mul = 0; 19508 /* noprr=1 */ 19509 rack->rack_no_prr = 1; 19510 /* lscwnd=1 */ 19511 rack->r_limit_scw = 1; 19512 /* gp_inc_rec */ 19513 rack->r_ctl.rack_per_of_gp_rec = 90; 19514 err = 0; 19515 19516 19517 } else if (prof == 2) { 19518 /* cmpack=1 */ 19519 if (rack->rc_always_pace == 0) { 19520 if (tcp_can_enable_pacing() == 0) 19521 return (EBUSY); 19522 } 19523 rack->rc_always_pace = 1; 19524 if (rack->use_fixed_rate || rack->gp_ready) 19525 rack_set_cc_pacing(rack); 19526 rack->r_use_cmp_ack = 1; 19527 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19528 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19529 /* pace_always=1 */ 19530 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19531 /* scwnd=1 */ 19532 rack->rack_enable_scwnd = 1; 19533 /* dynamic=100 */ 19534 rack->rc_gp_dyn_mul = 1; 19535 rack->r_ctl.rack_per_of_gp_ca = 100; 19536 /* rrr_conf=3 */ 19537 rack->r_rr_config = 3; 19538 /* npush=2 */ 19539 rack->r_ctl.rc_no_push_at_mrtt = 2; 19540 /* fillcw=1 */ 19541 rack->rc_pace_to_cwnd = 1; 19542 rack->rc_pace_fill_if_rttin_range = 0; 19543 rack->rtt_limit_mul = 0; 19544 /* noprr=1 */ 19545 rack->rack_no_prr = 1; 19546 /* lscwnd=0 */ 19547 rack->r_limit_scw = 0; 19548 err = 0; 19549 } else if (prof == 0) { 19550 /* This changes things back to the default settings */ 19551 err = 0; 19552 if (rack->rc_always_pace) { 19553 tcp_decrement_paced_conn(); 19554 rack_undo_cc_pacing(rack); 19555 rack->rc_always_pace = 0; 19556 } 19557 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19558 rack->rc_always_pace = 1; 19559 if (rack->use_fixed_rate || rack->gp_ready) 19560 rack_set_cc_pacing(rack); 19561 } else 19562 rack->rc_always_pace = 0; 19563 if (rack_dsack_std_based & 0x1) { 19564 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19565 rack->rc_rack_tmr_std_based = 1; 19566 } 19567 if (rack_dsack_std_based & 0x2) { 19568 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19569 rack->rc_rack_use_dsack = 1; 19570 } 19571 if (rack_use_cmp_acks) 19572 rack->r_use_cmp_ack = 1; 19573 else 19574 rack->r_use_cmp_ack = 0; 19575 if (rack_disable_prr) 19576 rack->rack_no_prr = 1; 19577 else 19578 rack->rack_no_prr = 0; 19579 if (rack_gp_no_rec_chg) 19580 rack->rc_gp_no_rec_chg = 1; 19581 else 19582 rack->rc_gp_no_rec_chg = 0; 19583 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19584 rack->r_mbuf_queue = 1; 19585 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19586 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19587 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19588 } else { 19589 rack->r_mbuf_queue = 0; 19590 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19591 } 19592 if (rack_enable_shared_cwnd) 19593 rack->rack_enable_scwnd = 1; 19594 else 19595 rack->rack_enable_scwnd = 0; 19596 if (rack_do_dyn_mul) { 19597 /* When dynamic adjustment is on CA needs to start at 100% */ 19598 rack->rc_gp_dyn_mul = 1; 19599 if (rack_do_dyn_mul >= 100) 19600 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19601 } else { 19602 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19603 rack->rc_gp_dyn_mul = 0; 19604 } 19605 rack->r_rr_config = 0; 19606 rack->r_ctl.rc_no_push_at_mrtt = 0; 19607 rack->rc_pace_to_cwnd = 0; 19608 rack->rc_pace_fill_if_rttin_range = 0; 19609 rack->rtt_limit_mul = 0; 19610 19611 if (rack_enable_hw_pacing) 19612 rack->rack_hdw_pace_ena = 1; 19613 else 19614 rack->rack_hdw_pace_ena = 0; 19615 if (rack_disable_prr) 19616 rack->rack_no_prr = 1; 19617 else 19618 rack->rack_no_prr = 0; 19619 if (rack_limits_scwnd) 19620 rack->r_limit_scw = 1; 19621 else 19622 rack->r_limit_scw = 0; 19623 err = 0; 19624 } 19625 return (err); 19626 } 19627 19628 static int 19629 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19630 { 19631 struct deferred_opt_list *dol; 19632 19633 dol = malloc(sizeof(struct deferred_opt_list), 19634 M_TCPFSB, M_NOWAIT|M_ZERO); 19635 if (dol == NULL) { 19636 /* 19637 * No space yikes -- fail out.. 19638 */ 19639 return (0); 19640 } 19641 dol->optname = sopt_name; 19642 dol->optval = loptval; 19643 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19644 return (1); 19645 } 19646 19647 static int 19648 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19649 uint32_t optval, uint64_t loptval) 19650 { 19651 struct epoch_tracker et; 19652 struct sockopt sopt; 19653 struct cc_newreno_opts opt; 19654 uint64_t val; 19655 int error = 0; 19656 uint16_t ca, ss; 19657 19658 switch (sopt_name) { 19659 19660 case TCP_RACK_DSACK_OPT: 19661 RACK_OPTS_INC(tcp_rack_dsack_opt); 19662 if (optval & 0x1) { 19663 rack->rc_rack_tmr_std_based = 1; 19664 } else { 19665 rack->rc_rack_tmr_std_based = 0; 19666 } 19667 if (optval & 0x2) { 19668 rack->rc_rack_use_dsack = 1; 19669 } else { 19670 rack->rc_rack_use_dsack = 0; 19671 } 19672 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19673 break; 19674 case TCP_RACK_PACING_BETA: 19675 RACK_OPTS_INC(tcp_rack_beta); 19676 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19677 /* This only works for newreno. */ 19678 error = EINVAL; 19679 break; 19680 } 19681 if (rack->rc_pacing_cc_set) { 19682 /* 19683 * Set them into the real CC module 19684 * whats in the rack pcb is the old values 19685 * to be used on restoral/ 19686 */ 19687 sopt.sopt_dir = SOPT_SET; 19688 opt.name = CC_NEWRENO_BETA; 19689 opt.val = optval; 19690 if (CC_ALGO(tp)->ctl_output != NULL) 19691 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19692 else { 19693 error = ENOENT; 19694 break; 19695 } 19696 } else { 19697 /* 19698 * Not pacing yet so set it into our local 19699 * rack pcb storage. 19700 */ 19701 rack->r_ctl.rc_saved_beta.beta = optval; 19702 } 19703 break; 19704 case TCP_RACK_TIMER_SLOP: 19705 RACK_OPTS_INC(tcp_rack_timer_slop); 19706 rack->r_ctl.timer_slop = optval; 19707 if (rack->rc_tp->t_srtt) { 19708 /* 19709 * If we have an SRTT lets update t_rxtcur 19710 * to have the new slop. 19711 */ 19712 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19713 rack_rto_min, rack_rto_max, 19714 rack->r_ctl.timer_slop); 19715 } 19716 break; 19717 case TCP_RACK_PACING_BETA_ECN: 19718 RACK_OPTS_INC(tcp_rack_beta_ecn); 19719 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) { 19720 /* This only works for newreno. */ 19721 error = EINVAL; 19722 break; 19723 } 19724 if (rack->rc_pacing_cc_set) { 19725 /* 19726 * Set them into the real CC module 19727 * whats in the rack pcb is the old values 19728 * to be used on restoral/ 19729 */ 19730 sopt.sopt_dir = SOPT_SET; 19731 opt.name = CC_NEWRENO_BETA_ECN; 19732 opt.val = optval; 19733 if (CC_ALGO(tp)->ctl_output != NULL) 19734 error = CC_ALGO(tp)->ctl_output(tp->ccv, &sopt, &opt); 19735 else 19736 error = ENOENT; 19737 } else { 19738 /* 19739 * Not pacing yet so set it into our local 19740 * rack pcb storage. 19741 */ 19742 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19743 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19744 } 19745 break; 19746 case TCP_DEFER_OPTIONS: 19747 RACK_OPTS_INC(tcp_defer_opt); 19748 if (optval) { 19749 if (rack->gp_ready) { 19750 /* Too late */ 19751 error = EINVAL; 19752 break; 19753 } 19754 rack->defer_options = 1; 19755 } else 19756 rack->defer_options = 0; 19757 break; 19758 case TCP_RACK_MEASURE_CNT: 19759 RACK_OPTS_INC(tcp_rack_measure_cnt); 19760 if (optval && (optval <= 0xff)) { 19761 rack->r_ctl.req_measurements = optval; 19762 } else 19763 error = EINVAL; 19764 break; 19765 case TCP_REC_ABC_VAL: 19766 RACK_OPTS_INC(tcp_rec_abc_val); 19767 if (optval > 0) 19768 rack->r_use_labc_for_rec = 1; 19769 else 19770 rack->r_use_labc_for_rec = 0; 19771 break; 19772 case TCP_RACK_ABC_VAL: 19773 RACK_OPTS_INC(tcp_rack_abc_val); 19774 if ((optval > 0) && (optval < 255)) 19775 rack->rc_labc = optval; 19776 else 19777 error = EINVAL; 19778 break; 19779 case TCP_HDWR_UP_ONLY: 19780 RACK_OPTS_INC(tcp_pacing_up_only); 19781 if (optval) 19782 rack->r_up_only = 1; 19783 else 19784 rack->r_up_only = 0; 19785 break; 19786 case TCP_PACING_RATE_CAP: 19787 RACK_OPTS_INC(tcp_pacing_rate_cap); 19788 rack->r_ctl.bw_rate_cap = loptval; 19789 break; 19790 case TCP_RACK_PROFILE: 19791 RACK_OPTS_INC(tcp_profile); 19792 error = rack_set_profile(rack, optval); 19793 break; 19794 case TCP_USE_CMP_ACKS: 19795 RACK_OPTS_INC(tcp_use_cmp_acks); 19796 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19797 /* You can't turn it off once its on! */ 19798 error = EINVAL; 19799 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19800 rack->r_use_cmp_ack = 1; 19801 rack->r_mbuf_queue = 1; 19802 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19803 } 19804 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19805 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19806 break; 19807 case TCP_SHARED_CWND_TIME_LIMIT: 19808 RACK_OPTS_INC(tcp_lscwnd); 19809 if (optval) 19810 rack->r_limit_scw = 1; 19811 else 19812 rack->r_limit_scw = 0; 19813 break; 19814 case TCP_RACK_PACE_TO_FILL: 19815 RACK_OPTS_INC(tcp_fillcw); 19816 if (optval == 0) 19817 rack->rc_pace_to_cwnd = 0; 19818 else { 19819 rack->rc_pace_to_cwnd = 1; 19820 if (optval > 1) 19821 rack->r_fill_less_agg = 1; 19822 } 19823 if ((optval >= rack_gp_rtt_maxmul) && 19824 rack_gp_rtt_maxmul && 19825 (optval < 0xf)) { 19826 rack->rc_pace_fill_if_rttin_range = 1; 19827 rack->rtt_limit_mul = optval; 19828 } else { 19829 rack->rc_pace_fill_if_rttin_range = 0; 19830 rack->rtt_limit_mul = 0; 19831 } 19832 break; 19833 case TCP_RACK_NO_PUSH_AT_MAX: 19834 RACK_OPTS_INC(tcp_npush); 19835 if (optval == 0) 19836 rack->r_ctl.rc_no_push_at_mrtt = 0; 19837 else if (optval < 0xff) 19838 rack->r_ctl.rc_no_push_at_mrtt = optval; 19839 else 19840 error = EINVAL; 19841 break; 19842 case TCP_SHARED_CWND_ENABLE: 19843 RACK_OPTS_INC(tcp_rack_scwnd); 19844 if (optval == 0) 19845 rack->rack_enable_scwnd = 0; 19846 else 19847 rack->rack_enable_scwnd = 1; 19848 break; 19849 case TCP_RACK_MBUF_QUEUE: 19850 /* Now do we use the LRO mbuf-queue feature */ 19851 RACK_OPTS_INC(tcp_rack_mbufq); 19852 if (optval || rack->r_use_cmp_ack) 19853 rack->r_mbuf_queue = 1; 19854 else 19855 rack->r_mbuf_queue = 0; 19856 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19857 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19858 else 19859 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19860 break; 19861 case TCP_RACK_NONRXT_CFG_RATE: 19862 RACK_OPTS_INC(tcp_rack_cfg_rate); 19863 if (optval == 0) 19864 rack->rack_rec_nonrxt_use_cr = 0; 19865 else 19866 rack->rack_rec_nonrxt_use_cr = 1; 19867 break; 19868 case TCP_NO_PRR: 19869 RACK_OPTS_INC(tcp_rack_noprr); 19870 if (optval == 0) 19871 rack->rack_no_prr = 0; 19872 else if (optval == 1) 19873 rack->rack_no_prr = 1; 19874 else if (optval == 2) 19875 rack->no_prr_addback = 1; 19876 else 19877 error = EINVAL; 19878 break; 19879 case TCP_TIMELY_DYN_ADJ: 19880 RACK_OPTS_INC(tcp_timely_dyn); 19881 if (optval == 0) 19882 rack->rc_gp_dyn_mul = 0; 19883 else { 19884 rack->rc_gp_dyn_mul = 1; 19885 if (optval >= 100) { 19886 /* 19887 * If the user sets something 100 or more 19888 * its the gp_ca value. 19889 */ 19890 rack->r_ctl.rack_per_of_gp_ca = optval; 19891 } 19892 } 19893 break; 19894 case TCP_RACK_DO_DETECTION: 19895 RACK_OPTS_INC(tcp_rack_do_detection); 19896 if (optval == 0) 19897 rack->do_detection = 0; 19898 else 19899 rack->do_detection = 1; 19900 break; 19901 case TCP_RACK_TLP_USE: 19902 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19903 error = EINVAL; 19904 break; 19905 } 19906 RACK_OPTS_INC(tcp_tlp_use); 19907 rack->rack_tlp_threshold_use = optval; 19908 break; 19909 case TCP_RACK_TLP_REDUCE: 19910 /* RACK TLP cwnd reduction (bool) */ 19911 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19912 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19913 break; 19914 /* Pacing related ones */ 19915 case TCP_RACK_PACE_ALWAYS: 19916 /* 19917 * zero is old rack method, 1 is new 19918 * method using a pacing rate. 19919 */ 19920 RACK_OPTS_INC(tcp_rack_pace_always); 19921 if (optval > 0) { 19922 if (rack->rc_always_pace) { 19923 error = EALREADY; 19924 break; 19925 } else if (tcp_can_enable_pacing()) { 19926 rack->rc_always_pace = 1; 19927 if (rack->use_fixed_rate || rack->gp_ready) 19928 rack_set_cc_pacing(rack); 19929 } 19930 else { 19931 error = ENOSPC; 19932 break; 19933 } 19934 } else { 19935 if (rack->rc_always_pace) { 19936 tcp_decrement_paced_conn(); 19937 rack->rc_always_pace = 0; 19938 rack_undo_cc_pacing(rack); 19939 } 19940 } 19941 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19942 tp->t_inpcb->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19943 else 19944 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19945 /* A rate may be set irate or other, if so set seg size */ 19946 rack_update_seg(rack); 19947 break; 19948 case TCP_BBR_RACK_INIT_RATE: 19949 RACK_OPTS_INC(tcp_initial_rate); 19950 val = optval; 19951 /* Change from kbits per second to bytes per second */ 19952 val *= 1000; 19953 val /= 8; 19954 rack->r_ctl.init_rate = val; 19955 if (rack->rc_init_win != rack_default_init_window) { 19956 uint32_t win, snt; 19957 19958 /* 19959 * Options don't always get applied 19960 * in the order you think. So in order 19961 * to assure we update a cwnd we need 19962 * to check and see if we are still 19963 * where we should raise the cwnd. 19964 */ 19965 win = rc_init_window(rack); 19966 if (SEQ_GT(tp->snd_max, tp->iss)) 19967 snt = tp->snd_max - tp->iss; 19968 else 19969 snt = 0; 19970 if ((snt < win) && 19971 (tp->snd_cwnd < win)) 19972 tp->snd_cwnd = win; 19973 } 19974 if (rack->rc_always_pace) 19975 rack_update_seg(rack); 19976 break; 19977 case TCP_BBR_IWINTSO: 19978 RACK_OPTS_INC(tcp_initial_win); 19979 if (optval && (optval <= 0xff)) { 19980 uint32_t win, snt; 19981 19982 rack->rc_init_win = optval; 19983 win = rc_init_window(rack); 19984 if (SEQ_GT(tp->snd_max, tp->iss)) 19985 snt = tp->snd_max - tp->iss; 19986 else 19987 snt = 0; 19988 if ((snt < win) && 19989 (tp->t_srtt | 19990 #ifdef NETFLIX_PEAKRATE 19991 tp->t_maxpeakrate | 19992 #endif 19993 rack->r_ctl.init_rate)) { 19994 /* 19995 * We are not past the initial window 19996 * and we have some bases for pacing, 19997 * so we need to possibly adjust up 19998 * the cwnd. Note even if we don't set 19999 * the cwnd, its still ok to raise the rc_init_win 20000 * which can be used coming out of idle when we 20001 * would have a rate. 20002 */ 20003 if (tp->snd_cwnd < win) 20004 tp->snd_cwnd = win; 20005 } 20006 if (rack->rc_always_pace) 20007 rack_update_seg(rack); 20008 } else 20009 error = EINVAL; 20010 break; 20011 case TCP_RACK_FORCE_MSEG: 20012 RACK_OPTS_INC(tcp_rack_force_max_seg); 20013 if (optval) 20014 rack->rc_force_max_seg = 1; 20015 else 20016 rack->rc_force_max_seg = 0; 20017 break; 20018 case TCP_RACK_PACE_MAX_SEG: 20019 /* Max segments size in a pace in bytes */ 20020 RACK_OPTS_INC(tcp_rack_max_seg); 20021 rack->rc_user_set_max_segs = optval; 20022 rack_set_pace_segments(tp, rack, __LINE__, NULL); 20023 break; 20024 case TCP_RACK_PACE_RATE_REC: 20025 /* Set the fixed pacing rate in Bytes per second ca */ 20026 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 20027 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20028 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 20029 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20030 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20031 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20032 rack->use_fixed_rate = 1; 20033 if (rack->rc_always_pace) 20034 rack_set_cc_pacing(rack); 20035 rack_log_pacing_delay_calc(rack, 20036 rack->r_ctl.rc_fixed_pacing_rate_ss, 20037 rack->r_ctl.rc_fixed_pacing_rate_ca, 20038 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20039 __LINE__, NULL,0); 20040 break; 20041 20042 case TCP_RACK_PACE_RATE_SS: 20043 /* Set the fixed pacing rate in Bytes per second ca */ 20044 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 20045 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20046 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 20047 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20048 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20049 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20050 rack->use_fixed_rate = 1; 20051 if (rack->rc_always_pace) 20052 rack_set_cc_pacing(rack); 20053 rack_log_pacing_delay_calc(rack, 20054 rack->r_ctl.rc_fixed_pacing_rate_ss, 20055 rack->r_ctl.rc_fixed_pacing_rate_ca, 20056 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20057 __LINE__, NULL, 0); 20058 break; 20059 20060 case TCP_RACK_PACE_RATE_CA: 20061 /* Set the fixed pacing rate in Bytes per second ca */ 20062 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 20063 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 20064 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 20065 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 20066 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 20067 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 20068 rack->use_fixed_rate = 1; 20069 if (rack->rc_always_pace) 20070 rack_set_cc_pacing(rack); 20071 rack_log_pacing_delay_calc(rack, 20072 rack->r_ctl.rc_fixed_pacing_rate_ss, 20073 rack->r_ctl.rc_fixed_pacing_rate_ca, 20074 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 20075 __LINE__, NULL, 0); 20076 break; 20077 case TCP_RACK_GP_INCREASE_REC: 20078 RACK_OPTS_INC(tcp_gp_inc_rec); 20079 rack->r_ctl.rack_per_of_gp_rec = optval; 20080 rack_log_pacing_delay_calc(rack, 20081 rack->r_ctl.rack_per_of_gp_ss, 20082 rack->r_ctl.rack_per_of_gp_ca, 20083 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20084 __LINE__, NULL, 0); 20085 break; 20086 case TCP_RACK_GP_INCREASE_CA: 20087 RACK_OPTS_INC(tcp_gp_inc_ca); 20088 ca = optval; 20089 if (ca < 100) { 20090 /* 20091 * We don't allow any reduction 20092 * over the GP b/w. 20093 */ 20094 error = EINVAL; 20095 break; 20096 } 20097 rack->r_ctl.rack_per_of_gp_ca = ca; 20098 rack_log_pacing_delay_calc(rack, 20099 rack->r_ctl.rack_per_of_gp_ss, 20100 rack->r_ctl.rack_per_of_gp_ca, 20101 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20102 __LINE__, NULL, 0); 20103 break; 20104 case TCP_RACK_GP_INCREASE_SS: 20105 RACK_OPTS_INC(tcp_gp_inc_ss); 20106 ss = optval; 20107 if (ss < 100) { 20108 /* 20109 * We don't allow any reduction 20110 * over the GP b/w. 20111 */ 20112 error = EINVAL; 20113 break; 20114 } 20115 rack->r_ctl.rack_per_of_gp_ss = ss; 20116 rack_log_pacing_delay_calc(rack, 20117 rack->r_ctl.rack_per_of_gp_ss, 20118 rack->r_ctl.rack_per_of_gp_ca, 20119 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20120 __LINE__, NULL, 0); 20121 break; 20122 case TCP_RACK_RR_CONF: 20123 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20124 if (optval && optval <= 3) 20125 rack->r_rr_config = optval; 20126 else 20127 rack->r_rr_config = 0; 20128 break; 20129 case TCP_HDWR_RATE_CAP: 20130 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20131 if (optval) { 20132 if (rack->r_rack_hw_rate_caps == 0) 20133 rack->r_rack_hw_rate_caps = 1; 20134 else 20135 error = EALREADY; 20136 } else { 20137 rack->r_rack_hw_rate_caps = 0; 20138 } 20139 break; 20140 case TCP_BBR_HDWR_PACE: 20141 RACK_OPTS_INC(tcp_hdwr_pacing); 20142 if (optval){ 20143 if (rack->rack_hdrw_pacing == 0) { 20144 rack->rack_hdw_pace_ena = 1; 20145 rack->rack_attempt_hdwr_pace = 0; 20146 } else 20147 error = EALREADY; 20148 } else { 20149 rack->rack_hdw_pace_ena = 0; 20150 #ifdef RATELIMIT 20151 if (rack->r_ctl.crte != NULL) { 20152 rack->rack_hdrw_pacing = 0; 20153 rack->rack_attempt_hdwr_pace = 0; 20154 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20155 rack->r_ctl.crte = NULL; 20156 } 20157 #endif 20158 } 20159 break; 20160 /* End Pacing related ones */ 20161 case TCP_RACK_PRR_SENDALOT: 20162 /* Allow PRR to send more than one seg */ 20163 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20164 rack->r_ctl.rc_prr_sendalot = optval; 20165 break; 20166 case TCP_RACK_MIN_TO: 20167 /* Minimum time between rack t-o's in ms */ 20168 RACK_OPTS_INC(tcp_rack_min_to); 20169 rack->r_ctl.rc_min_to = optval; 20170 break; 20171 case TCP_RACK_EARLY_SEG: 20172 /* If early recovery max segments */ 20173 RACK_OPTS_INC(tcp_rack_early_seg); 20174 rack->r_ctl.rc_early_recovery_segs = optval; 20175 break; 20176 case TCP_RACK_ENABLE_HYSTART: 20177 { 20178 if (optval) { 20179 tp->ccv->flags |= CCF_HYSTART_ALLOWED; 20180 if (rack_do_hystart > RACK_HYSTART_ON) 20181 tp->ccv->flags |= CCF_HYSTART_CAN_SH_CWND; 20182 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 20183 tp->ccv->flags |= CCF_HYSTART_CONS_SSTH; 20184 } else { 20185 tp->ccv->flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 20186 } 20187 } 20188 break; 20189 case TCP_RACK_REORD_THRESH: 20190 /* RACK reorder threshold (shift amount) */ 20191 RACK_OPTS_INC(tcp_rack_reord_thresh); 20192 if ((optval > 0) && (optval < 31)) 20193 rack->r_ctl.rc_reorder_shift = optval; 20194 else 20195 error = EINVAL; 20196 break; 20197 case TCP_RACK_REORD_FADE: 20198 /* Does reordering fade after ms time */ 20199 RACK_OPTS_INC(tcp_rack_reord_fade); 20200 rack->r_ctl.rc_reorder_fade = optval; 20201 break; 20202 case TCP_RACK_TLP_THRESH: 20203 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20204 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20205 if (optval) 20206 rack->r_ctl.rc_tlp_threshold = optval; 20207 else 20208 error = EINVAL; 20209 break; 20210 case TCP_BBR_USE_RACK_RR: 20211 RACK_OPTS_INC(tcp_rack_rr); 20212 if (optval) 20213 rack->use_rack_rr = 1; 20214 else 20215 rack->use_rack_rr = 0; 20216 break; 20217 case TCP_FAST_RSM_HACK: 20218 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20219 if (optval) 20220 rack->fast_rsm_hack = 1; 20221 else 20222 rack->fast_rsm_hack = 0; 20223 break; 20224 case TCP_RACK_PKT_DELAY: 20225 /* RACK added ms i.e. rack-rtt + reord + N */ 20226 RACK_OPTS_INC(tcp_rack_pkt_delay); 20227 rack->r_ctl.rc_pkt_delay = optval; 20228 break; 20229 case TCP_DELACK: 20230 RACK_OPTS_INC(tcp_rack_delayed_ack); 20231 if (optval == 0) 20232 tp->t_delayed_ack = 0; 20233 else 20234 tp->t_delayed_ack = 1; 20235 if (tp->t_flags & TF_DELACK) { 20236 tp->t_flags &= ~TF_DELACK; 20237 tp->t_flags |= TF_ACKNOW; 20238 NET_EPOCH_ENTER(et); 20239 rack_output(tp); 20240 NET_EPOCH_EXIT(et); 20241 } 20242 break; 20243 20244 case TCP_BBR_RACK_RTT_USE: 20245 RACK_OPTS_INC(tcp_rack_rtt_use); 20246 if ((optval != USE_RTT_HIGH) && 20247 (optval != USE_RTT_LOW) && 20248 (optval != USE_RTT_AVG)) 20249 error = EINVAL; 20250 else 20251 rack->r_ctl.rc_rate_sample_method = optval; 20252 break; 20253 case TCP_DATA_AFTER_CLOSE: 20254 RACK_OPTS_INC(tcp_data_after_close); 20255 if (optval) 20256 rack->rc_allow_data_af_clo = 1; 20257 else 20258 rack->rc_allow_data_af_clo = 0; 20259 break; 20260 default: 20261 break; 20262 } 20263 #ifdef NETFLIX_STATS 20264 tcp_log_socket_option(tp, sopt_name, optval, error); 20265 #endif 20266 return (error); 20267 } 20268 20269 20270 static void 20271 rack_apply_deferred_options(struct tcp_rack *rack) 20272 { 20273 struct deferred_opt_list *dol, *sdol; 20274 uint32_t s_optval; 20275 20276 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20277 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20278 /* Disadvantage of deferal is you loose the error return */ 20279 s_optval = (uint32_t)dol->optval; 20280 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20281 free(dol, M_TCPDO); 20282 } 20283 } 20284 20285 static void 20286 rack_hw_tls_change(struct tcpcb *tp, int chg) 20287 { 20288 /* 20289 * HW tls state has changed.. fix all 20290 * rsm's in flight. 20291 */ 20292 struct tcp_rack *rack; 20293 struct rack_sendmap *rsm; 20294 20295 rack = (struct tcp_rack *)tp->t_fb_ptr; 20296 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20297 if (chg) 20298 rsm->r_hw_tls = 1; 20299 else 20300 rsm->r_hw_tls = 0; 20301 } 20302 if (chg) 20303 rack->r_ctl.fsb.hw_tls = 1; 20304 else 20305 rack->r_ctl.fsb.hw_tls = 0; 20306 } 20307 20308 static int 20309 rack_pru_options(struct tcpcb *tp, int flags) 20310 { 20311 if (flags & PRUS_OOB) 20312 return (EOPNOTSUPP); 20313 return (0); 20314 } 20315 20316 static struct tcp_function_block __tcp_rack = { 20317 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20318 .tfb_tcp_output = rack_output, 20319 .tfb_do_queued_segments = ctf_do_queued_segments, 20320 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20321 .tfb_tcp_do_segment = rack_do_segment, 20322 .tfb_tcp_ctloutput = rack_ctloutput, 20323 .tfb_tcp_fb_init = rack_init, 20324 .tfb_tcp_fb_fini = rack_fini, 20325 .tfb_tcp_timer_stop_all = rack_stopall, 20326 .tfb_tcp_timer_activate = rack_timer_activate, 20327 .tfb_tcp_timer_active = rack_timer_active, 20328 .tfb_tcp_timer_stop = rack_timer_stop, 20329 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20330 .tfb_tcp_handoff_ok = rack_handoff_ok, 20331 .tfb_tcp_mtu_chg = rack_mtu_change, 20332 .tfb_pru_options = rack_pru_options, 20333 .tfb_hwtls_change = rack_hw_tls_change, 20334 .tfb_compute_pipe = rack_compute_pipe, 20335 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20336 }; 20337 20338 /* 20339 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20340 * socket option arguments. When it re-acquires the lock after the copy, it 20341 * has to revalidate that the connection is still valid for the socket 20342 * option. 20343 */ 20344 static int 20345 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20346 { 20347 #ifdef INET6 20348 struct ip6_hdr *ip6; 20349 #endif 20350 #ifdef INET 20351 struct ip *ip; 20352 #endif 20353 struct tcpcb *tp; 20354 struct tcp_rack *rack; 20355 uint64_t loptval; 20356 int32_t error = 0, optval; 20357 20358 tp = intotcpcb(inp); 20359 rack = (struct tcp_rack *)tp->t_fb_ptr; 20360 if (rack == NULL) { 20361 INP_WUNLOCK(inp); 20362 return (EINVAL); 20363 } 20364 #ifdef INET6 20365 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20366 #endif 20367 #ifdef INET 20368 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20369 #endif 20370 20371 switch (sopt->sopt_level) { 20372 #ifdef INET6 20373 case IPPROTO_IPV6: 20374 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20375 switch (sopt->sopt_name) { 20376 case IPV6_USE_MIN_MTU: 20377 tcp6_use_min_mtu(tp); 20378 break; 20379 case IPV6_TCLASS: 20380 /* 20381 * The DSCP codepoint has changed, update the fsb. 20382 */ 20383 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20384 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20385 break; 20386 } 20387 INP_WUNLOCK(inp); 20388 return (0); 20389 #endif 20390 #ifdef INET 20391 case IPPROTO_IP: 20392 switch (sopt->sopt_name) { 20393 case IP_TOS: 20394 /* 20395 * The DSCP codepoint has changed, update the fsb. 20396 */ 20397 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20398 break; 20399 case IP_TTL: 20400 /* 20401 * The TTL has changed, update the fsb. 20402 */ 20403 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20404 break; 20405 } 20406 INP_WUNLOCK(inp); 20407 return (0); 20408 #endif 20409 } 20410 20411 switch (sopt->sopt_name) { 20412 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20413 /* Pacing related ones */ 20414 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20415 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20416 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20417 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20418 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20419 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20420 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20421 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20422 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20423 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20424 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20425 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20426 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20427 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20428 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20429 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20430 /* End pacing related */ 20431 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20432 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20433 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20434 case TCP_RACK_MIN_TO: /* URL:min_to */ 20435 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20436 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20437 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20438 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20439 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20440 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20441 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20442 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20443 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20444 case TCP_NO_PRR: /* URL:noprr */ 20445 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20446 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20447 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20448 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20449 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20450 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20451 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20452 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20453 case TCP_RACK_PROFILE: /* URL:profile */ 20454 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20455 case TCP_RACK_ABC_VAL: /* URL:labc */ 20456 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20457 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20458 case TCP_DEFER_OPTIONS: /* URL:defer */ 20459 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20460 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20461 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20462 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20463 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20464 break; 20465 default: 20466 /* Filter off all unknown options to the base stack */ 20467 return (tcp_default_ctloutput(inp, sopt)); 20468 break; 20469 } 20470 INP_WUNLOCK(inp); 20471 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20472 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20473 /* 20474 * We truncate it down to 32 bits for the socket-option trace this 20475 * means rates > 34Gbps won't show right, but thats probably ok. 20476 */ 20477 optval = (uint32_t)loptval; 20478 } else { 20479 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20480 /* Save it in 64 bit form too */ 20481 loptval = optval; 20482 } 20483 if (error) 20484 return (error); 20485 INP_WLOCK(inp); 20486 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 20487 INP_WUNLOCK(inp); 20488 return (ECONNRESET); 20489 } 20490 if (tp->t_fb != &__tcp_rack) { 20491 INP_WUNLOCK(inp); 20492 return (ENOPROTOOPT); 20493 } 20494 if (rack->defer_options && (rack->gp_ready == 0) && 20495 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20496 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20497 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20498 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20499 /* Options are beind deferred */ 20500 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20501 INP_WUNLOCK(inp); 20502 return (0); 20503 } else { 20504 /* No memory to defer, fail */ 20505 INP_WUNLOCK(inp); 20506 return (ENOMEM); 20507 } 20508 } 20509 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20510 INP_WUNLOCK(inp); 20511 return (error); 20512 } 20513 20514 static void 20515 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20516 { 20517 20518 INP_WLOCK_ASSERT(tp->t_inpcb); 20519 bzero(ti, sizeof(*ti)); 20520 20521 ti->tcpi_state = tp->t_state; 20522 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20523 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20524 if (tp->t_flags & TF_SACK_PERMIT) 20525 ti->tcpi_options |= TCPI_OPT_SACK; 20526 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20527 ti->tcpi_options |= TCPI_OPT_WSCALE; 20528 ti->tcpi_snd_wscale = tp->snd_scale; 20529 ti->tcpi_rcv_wscale = tp->rcv_scale; 20530 } 20531 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 20532 ti->tcpi_options |= TCPI_OPT_ECN; 20533 if (tp->t_flags & TF_FASTOPEN) 20534 ti->tcpi_options |= TCPI_OPT_TFO; 20535 /* still kept in ticks is t_rcvtime */ 20536 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20537 /* Since we hold everything in precise useconds this is easy */ 20538 ti->tcpi_rtt = tp->t_srtt; 20539 ti->tcpi_rttvar = tp->t_rttvar; 20540 ti->tcpi_rto = tp->t_rxtcur; 20541 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20542 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20543 /* 20544 * FreeBSD-specific extension fields for tcp_info. 20545 */ 20546 ti->tcpi_rcv_space = tp->rcv_wnd; 20547 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20548 ti->tcpi_snd_wnd = tp->snd_wnd; 20549 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20550 ti->tcpi_snd_nxt = tp->snd_nxt; 20551 ti->tcpi_snd_mss = tp->t_maxseg; 20552 ti->tcpi_rcv_mss = tp->t_maxseg; 20553 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20554 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20555 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20556 #ifdef NETFLIX_STATS 20557 ti->tcpi_total_tlp = tp->t_sndtlppack; 20558 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20559 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20560 #endif 20561 #ifdef TCP_OFFLOAD 20562 if (tp->t_flags & TF_TOE) { 20563 ti->tcpi_options |= TCPI_OPT_TOE; 20564 tcp_offload_tcp_info(tp, ti); 20565 } 20566 #endif 20567 } 20568 20569 static int 20570 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20571 { 20572 struct tcpcb *tp; 20573 struct tcp_rack *rack; 20574 int32_t error, optval; 20575 uint64_t val, loptval; 20576 struct tcp_info ti; 20577 /* 20578 * Because all our options are either boolean or an int, we can just 20579 * pull everything into optval and then unlock and copy. If we ever 20580 * add a option that is not a int, then this will have quite an 20581 * impact to this routine. 20582 */ 20583 error = 0; 20584 tp = intotcpcb(inp); 20585 rack = (struct tcp_rack *)tp->t_fb_ptr; 20586 if (rack == NULL) { 20587 INP_WUNLOCK(inp); 20588 return (EINVAL); 20589 } 20590 switch (sopt->sopt_name) { 20591 case TCP_INFO: 20592 /* First get the info filled */ 20593 rack_fill_info(tp, &ti); 20594 /* Fix up the rtt related fields if needed */ 20595 INP_WUNLOCK(inp); 20596 error = sooptcopyout(sopt, &ti, sizeof ti); 20597 return (error); 20598 /* 20599 * Beta is the congestion control value for NewReno that influences how 20600 * much of a backoff happens when loss is detected. It is normally set 20601 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20602 * when you exit recovery. 20603 */ 20604 case TCP_RACK_PACING_BETA: 20605 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20606 error = EINVAL; 20607 else if (rack->rc_pacing_cc_set == 0) 20608 optval = rack->r_ctl.rc_saved_beta.beta; 20609 else { 20610 /* 20611 * Reach out into the CC data and report back what 20612 * I have previously set. Yeah it looks hackish but 20613 * we don't want to report the saved values. 20614 */ 20615 if (tp->ccv->cc_data) 20616 optval = ((struct newreno *)tp->ccv->cc_data)->beta; 20617 else 20618 error = EINVAL; 20619 } 20620 break; 20621 /* 20622 * Beta_ecn is the congestion control value for NewReno that influences how 20623 * much of a backoff happens when a ECN mark is detected. It is normally set 20624 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20625 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20626 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20627 */ 20628 20629 case TCP_RACK_PACING_BETA_ECN: 20630 if (strcmp(tp->cc_algo->name, CCALGONAME_NEWRENO) != 0) 20631 error = EINVAL; 20632 else if (rack->rc_pacing_cc_set == 0) 20633 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20634 else { 20635 /* 20636 * Reach out into the CC data and report back what 20637 * I have previously set. Yeah it looks hackish but 20638 * we don't want to report the saved values. 20639 */ 20640 if (tp->ccv->cc_data) 20641 optval = ((struct newreno *)tp->ccv->cc_data)->beta_ecn; 20642 else 20643 error = EINVAL; 20644 } 20645 break; 20646 case TCP_RACK_DSACK_OPT: 20647 optval = 0; 20648 if (rack->rc_rack_tmr_std_based) { 20649 optval |= 1; 20650 } 20651 if (rack->rc_rack_use_dsack) { 20652 optval |= 2; 20653 } 20654 break; 20655 case TCP_RACK_ENABLE_HYSTART: 20656 { 20657 if (tp->ccv->flags & CCF_HYSTART_ALLOWED) { 20658 optval = RACK_HYSTART_ON; 20659 if (tp->ccv->flags & CCF_HYSTART_CAN_SH_CWND) 20660 optval = RACK_HYSTART_ON_W_SC; 20661 if (tp->ccv->flags & CCF_HYSTART_CONS_SSTH) 20662 optval = RACK_HYSTART_ON_W_SC_C; 20663 } else { 20664 optval = RACK_HYSTART_OFF; 20665 } 20666 } 20667 break; 20668 case TCP_FAST_RSM_HACK: 20669 optval = rack->fast_rsm_hack; 20670 break; 20671 case TCP_DEFER_OPTIONS: 20672 optval = rack->defer_options; 20673 break; 20674 case TCP_RACK_MEASURE_CNT: 20675 optval = rack->r_ctl.req_measurements; 20676 break; 20677 case TCP_REC_ABC_VAL: 20678 optval = rack->r_use_labc_for_rec; 20679 break; 20680 case TCP_RACK_ABC_VAL: 20681 optval = rack->rc_labc; 20682 break; 20683 case TCP_HDWR_UP_ONLY: 20684 optval= rack->r_up_only; 20685 break; 20686 case TCP_PACING_RATE_CAP: 20687 loptval = rack->r_ctl.bw_rate_cap; 20688 break; 20689 case TCP_RACK_PROFILE: 20690 /* You cannot retrieve a profile, its write only */ 20691 error = EINVAL; 20692 break; 20693 case TCP_USE_CMP_ACKS: 20694 optval = rack->r_use_cmp_ack; 20695 break; 20696 case TCP_RACK_PACE_TO_FILL: 20697 optval = rack->rc_pace_to_cwnd; 20698 if (optval && rack->r_fill_less_agg) 20699 optval++; 20700 break; 20701 case TCP_RACK_NO_PUSH_AT_MAX: 20702 optval = rack->r_ctl.rc_no_push_at_mrtt; 20703 break; 20704 case TCP_SHARED_CWND_ENABLE: 20705 optval = rack->rack_enable_scwnd; 20706 break; 20707 case TCP_RACK_NONRXT_CFG_RATE: 20708 optval = rack->rack_rec_nonrxt_use_cr; 20709 break; 20710 case TCP_NO_PRR: 20711 if (rack->rack_no_prr == 1) 20712 optval = 1; 20713 else if (rack->no_prr_addback == 1) 20714 optval = 2; 20715 else 20716 optval = 0; 20717 break; 20718 case TCP_RACK_DO_DETECTION: 20719 optval = rack->do_detection; 20720 break; 20721 case TCP_RACK_MBUF_QUEUE: 20722 /* Now do we use the LRO mbuf-queue feature */ 20723 optval = rack->r_mbuf_queue; 20724 break; 20725 case TCP_TIMELY_DYN_ADJ: 20726 optval = rack->rc_gp_dyn_mul; 20727 break; 20728 case TCP_BBR_IWINTSO: 20729 optval = rack->rc_init_win; 20730 break; 20731 case TCP_RACK_TLP_REDUCE: 20732 /* RACK TLP cwnd reduction (bool) */ 20733 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20734 break; 20735 case TCP_BBR_RACK_INIT_RATE: 20736 val = rack->r_ctl.init_rate; 20737 /* convert to kbits per sec */ 20738 val *= 8; 20739 val /= 1000; 20740 optval = (uint32_t)val; 20741 break; 20742 case TCP_RACK_FORCE_MSEG: 20743 optval = rack->rc_force_max_seg; 20744 break; 20745 case TCP_RACK_PACE_MAX_SEG: 20746 /* Max segments in a pace */ 20747 optval = rack->rc_user_set_max_segs; 20748 break; 20749 case TCP_RACK_PACE_ALWAYS: 20750 /* Use the always pace method */ 20751 optval = rack->rc_always_pace; 20752 break; 20753 case TCP_RACK_PRR_SENDALOT: 20754 /* Allow PRR to send more than one seg */ 20755 optval = rack->r_ctl.rc_prr_sendalot; 20756 break; 20757 case TCP_RACK_MIN_TO: 20758 /* Minimum time between rack t-o's in ms */ 20759 optval = rack->r_ctl.rc_min_to; 20760 break; 20761 case TCP_RACK_EARLY_SEG: 20762 /* If early recovery max segments */ 20763 optval = rack->r_ctl.rc_early_recovery_segs; 20764 break; 20765 case TCP_RACK_REORD_THRESH: 20766 /* RACK reorder threshold (shift amount) */ 20767 optval = rack->r_ctl.rc_reorder_shift; 20768 break; 20769 case TCP_RACK_REORD_FADE: 20770 /* Does reordering fade after ms time */ 20771 optval = rack->r_ctl.rc_reorder_fade; 20772 break; 20773 case TCP_BBR_USE_RACK_RR: 20774 /* Do we use the rack cheat for rxt */ 20775 optval = rack->use_rack_rr; 20776 break; 20777 case TCP_RACK_RR_CONF: 20778 optval = rack->r_rr_config; 20779 break; 20780 case TCP_HDWR_RATE_CAP: 20781 optval = rack->r_rack_hw_rate_caps; 20782 break; 20783 case TCP_BBR_HDWR_PACE: 20784 optval = rack->rack_hdw_pace_ena; 20785 break; 20786 case TCP_RACK_TLP_THRESH: 20787 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20788 optval = rack->r_ctl.rc_tlp_threshold; 20789 break; 20790 case TCP_RACK_PKT_DELAY: 20791 /* RACK added ms i.e. rack-rtt + reord + N */ 20792 optval = rack->r_ctl.rc_pkt_delay; 20793 break; 20794 case TCP_RACK_TLP_USE: 20795 optval = rack->rack_tlp_threshold_use; 20796 break; 20797 case TCP_RACK_PACE_RATE_CA: 20798 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20799 break; 20800 case TCP_RACK_PACE_RATE_SS: 20801 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20802 break; 20803 case TCP_RACK_PACE_RATE_REC: 20804 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20805 break; 20806 case TCP_RACK_GP_INCREASE_SS: 20807 optval = rack->r_ctl.rack_per_of_gp_ca; 20808 break; 20809 case TCP_RACK_GP_INCREASE_CA: 20810 optval = rack->r_ctl.rack_per_of_gp_ss; 20811 break; 20812 case TCP_BBR_RACK_RTT_USE: 20813 optval = rack->r_ctl.rc_rate_sample_method; 20814 break; 20815 case TCP_DELACK: 20816 optval = tp->t_delayed_ack; 20817 break; 20818 case TCP_DATA_AFTER_CLOSE: 20819 optval = rack->rc_allow_data_af_clo; 20820 break; 20821 case TCP_SHARED_CWND_TIME_LIMIT: 20822 optval = rack->r_limit_scw; 20823 break; 20824 case TCP_RACK_TIMER_SLOP: 20825 optval = rack->r_ctl.timer_slop; 20826 break; 20827 default: 20828 return (tcp_default_ctloutput(inp, sopt)); 20829 break; 20830 } 20831 INP_WUNLOCK(inp); 20832 if (error == 0) { 20833 if (TCP_PACING_RATE_CAP) 20834 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20835 else 20836 error = sooptcopyout(sopt, &optval, sizeof optval); 20837 } 20838 return (error); 20839 } 20840 20841 static int 20842 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20843 { 20844 if (sopt->sopt_dir == SOPT_SET) { 20845 return (rack_set_sockopt(inp, sopt)); 20846 } else if (sopt->sopt_dir == SOPT_GET) { 20847 return (rack_get_sockopt(inp, sopt)); 20848 } else { 20849 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20850 } 20851 } 20852 20853 static const char *rack_stack_names[] = { 20854 __XSTRING(STACKNAME), 20855 #ifdef STACKALIAS 20856 __XSTRING(STACKALIAS), 20857 #endif 20858 }; 20859 20860 static int 20861 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20862 { 20863 memset(mem, 0, size); 20864 return (0); 20865 } 20866 20867 static void 20868 rack_dtor(void *mem, int32_t size, void *arg) 20869 { 20870 20871 } 20872 20873 static bool rack_mod_inited = false; 20874 20875 static int 20876 tcp_addrack(module_t mod, int32_t type, void *data) 20877 { 20878 int32_t err = 0; 20879 int num_stacks; 20880 20881 switch (type) { 20882 case MOD_LOAD: 20883 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20884 sizeof(struct rack_sendmap), 20885 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20886 20887 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20888 sizeof(struct tcp_rack), 20889 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20890 20891 sysctl_ctx_init(&rack_sysctl_ctx); 20892 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20893 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20894 OID_AUTO, 20895 #ifdef STACKALIAS 20896 __XSTRING(STACKALIAS), 20897 #else 20898 __XSTRING(STACKNAME), 20899 #endif 20900 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20901 ""); 20902 if (rack_sysctl_root == NULL) { 20903 printf("Failed to add sysctl node\n"); 20904 err = EFAULT; 20905 goto free_uma; 20906 } 20907 rack_init_sysctls(); 20908 num_stacks = nitems(rack_stack_names); 20909 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20910 rack_stack_names, &num_stacks); 20911 if (err) { 20912 printf("Failed to register %s stack name for " 20913 "%s module\n", rack_stack_names[num_stacks], 20914 __XSTRING(MODNAME)); 20915 sysctl_ctx_free(&rack_sysctl_ctx); 20916 free_uma: 20917 uma_zdestroy(rack_zone); 20918 uma_zdestroy(rack_pcb_zone); 20919 rack_counter_destroy(); 20920 printf("Failed to register rack module -- err:%d\n", err); 20921 return (err); 20922 } 20923 tcp_lro_reg_mbufq(); 20924 rack_mod_inited = true; 20925 break; 20926 case MOD_QUIESCE: 20927 err = deregister_tcp_functions(&__tcp_rack, true, false); 20928 break; 20929 case MOD_UNLOAD: 20930 err = deregister_tcp_functions(&__tcp_rack, false, true); 20931 if (err == EBUSY) 20932 break; 20933 if (rack_mod_inited) { 20934 uma_zdestroy(rack_zone); 20935 uma_zdestroy(rack_pcb_zone); 20936 sysctl_ctx_free(&rack_sysctl_ctx); 20937 rack_counter_destroy(); 20938 rack_mod_inited = false; 20939 } 20940 tcp_lro_dereg_mbufq(); 20941 err = 0; 20942 break; 20943 default: 20944 return (EOPNOTSUPP); 20945 } 20946 return (err); 20947 } 20948 20949 static moduledata_t tcp_rack = { 20950 .name = __XSTRING(MODNAME), 20951 .evhand = tcp_addrack, 20952 .priv = 0 20953 }; 20954 20955 MODULE_VERSION(MODNAME, 1); 20956 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20957 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20958