1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_inet.h" 31 #include "opt_inet6.h" 32 #include "opt_ipsec.h" 33 #include "opt_ratelimit.h" 34 #include "opt_kern_tls.h" 35 #if defined(INET) || defined(INET6) 36 #include <sys/param.h> 37 #include <sys/arb.h> 38 #include <sys/module.h> 39 #include <sys/kernel.h> 40 #ifdef TCP_HHOOK 41 #include <sys/hhook.h> 42 #endif 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/mbuf.h> 48 #include <sys/proc.h> /* for proc0 declaration */ 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sysctl.h> 52 #include <sys/systm.h> 53 #ifdef STATS 54 #include <sys/qmath.h> 55 #include <sys/tree.h> 56 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 57 #else 58 #include <sys/tree.h> 59 #endif 60 #include <sys/refcount.h> 61 #include <sys/queue.h> 62 #include <sys/tim_filter.h> 63 #include <sys/smp.h> 64 #include <sys/kthread.h> 65 #include <sys/kern_prefetch.h> 66 #include <sys/protosw.h> 67 #ifdef TCP_ACCOUNTING 68 #include <sys/sched.h> 69 #include <machine/cpu.h> 70 #endif 71 #include <vm/uma.h> 72 73 #include <net/route.h> 74 #include <net/route/nhop.h> 75 #include <net/vnet.h> 76 77 #define TCPSTATES /* for logging */ 78 79 #include <netinet/in.h> 80 #include <netinet/in_kdtrace.h> 81 #include <netinet/in_pcb.h> 82 #include <netinet/ip.h> 83 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 84 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 85 #include <netinet/ip_var.h> 86 #include <netinet/ip6.h> 87 #include <netinet6/in6_pcb.h> 88 #include <netinet6/ip6_var.h> 89 #include <netinet/tcp.h> 90 #define TCPOUTFLAGS 91 #include <netinet/tcp_fsm.h> 92 #include <netinet/tcp_log_buf.h> 93 #include <netinet/tcp_seq.h> 94 #include <netinet/tcp_timer.h> 95 #include <netinet/tcp_var.h> 96 #include <netinet/tcp_syncache.h> 97 #include <netinet/tcp_hpts.h> 98 #include <netinet/tcp_ratelimit.h> 99 #include <netinet/tcp_accounting.h> 100 #include <netinet/tcpip.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/cc/cc_newreno.h> 103 #include <netinet/tcp_fastopen.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef NETFLIX_SHARED_CWND 106 #include <netinet/tcp_shared_cwnd.h> 107 #endif 108 #ifdef TCP_OFFLOAD 109 #include <netinet/tcp_offload.h> 110 #endif 111 #ifdef INET6 112 #include <netinet6/tcp6_var.h> 113 #endif 114 #include <netinet/tcp_ecn.h> 115 116 #include <netipsec/ipsec_support.h> 117 118 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 119 #include <netipsec/ipsec.h> 120 #include <netipsec/ipsec6.h> 121 #endif /* IPSEC */ 122 123 #include <netinet/udp.h> 124 #include <netinet/udp_var.h> 125 #include <machine/in_cksum.h> 126 127 #ifdef MAC 128 #include <security/mac/mac_framework.h> 129 #endif 130 #include "sack_filter.h" 131 #include "tcp_rack.h" 132 #include "rack_bbr_common.h" 133 134 uma_zone_t rack_zone; 135 uma_zone_t rack_pcb_zone; 136 137 #ifndef TICKS2SBT 138 #define TICKS2SBT(__t) (tick_sbt * ((sbintime_t)(__t))) 139 #endif 140 141 VNET_DECLARE(uint32_t, newreno_beta); 142 VNET_DECLARE(uint32_t, newreno_beta_ecn); 143 #define V_newreno_beta VNET(newreno_beta) 144 #define V_newreno_beta_ecn VNET(newreno_beta_ecn) 145 146 147 MALLOC_DEFINE(M_TCPFSB, "tcp_fsb", "TCP fast send block"); 148 MALLOC_DEFINE(M_TCPDO, "tcp_do", "TCP deferred options"); 149 150 struct sysctl_ctx_list rack_sysctl_ctx; 151 struct sysctl_oid *rack_sysctl_root; 152 153 #define CUM_ACKED 1 154 #define SACKED 2 155 156 /* 157 * The RACK module incorporates a number of 158 * TCP ideas that have been put out into the IETF 159 * over the last few years: 160 * - Matt Mathis's Rate Halving which slowly drops 161 * the congestion window so that the ack clock can 162 * be maintained during a recovery. 163 * - Yuchung Cheng's RACK TCP (for which its named) that 164 * will stop us using the number of dup acks and instead 165 * use time as the gage of when we retransmit. 166 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 167 * of Dukkipati et.al. 168 * RACK depends on SACK, so if an endpoint arrives that 169 * cannot do SACK the state machine below will shuttle the 170 * connection back to using the "default" TCP stack that is 171 * in FreeBSD. 172 * 173 * To implement RACK the original TCP stack was first decomposed 174 * into a functional state machine with individual states 175 * for each of the possible TCP connection states. The do_segment 176 * functions role in life is to mandate the connection supports SACK 177 * initially and then assure that the RACK state matches the conenction 178 * state before calling the states do_segment function. Each 179 * state is simplified due to the fact that the original do_segment 180 * has been decomposed and we *know* what state we are in (no 181 * switches on the state) and all tests for SACK are gone. This 182 * greatly simplifies what each state does. 183 * 184 * TCP output is also over-written with a new version since it 185 * must maintain the new rack scoreboard. 186 * 187 */ 188 static int32_t rack_tlp_thresh = 1; 189 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */ 190 static int32_t rack_tlp_use_greater = 1; 191 static int32_t rack_reorder_thresh = 2; 192 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000 193 * - 60 seconds */ 194 static uint8_t rack_req_measurements = 1; 195 /* Attack threshold detections */ 196 static uint32_t rack_highest_sack_thresh_seen = 0; 197 static uint32_t rack_highest_move_thresh_seen = 0; 198 static int32_t rack_enable_hw_pacing = 0; /* Due to CCSP keep it off by default */ 199 static int32_t rack_hw_pace_extra_slots = 2; /* 2 extra MSS time betweens */ 200 static int32_t rack_hw_rate_caps = 1; /* 1; */ 201 static int32_t rack_hw_rate_min = 0; /* 1500000;*/ 202 static int32_t rack_hw_rate_to_low = 0; /* 1200000; */ 203 static int32_t rack_hw_up_only = 1; 204 static int32_t rack_stats_gets_ms_rtt = 1; 205 static int32_t rack_prr_addbackmax = 2; 206 static int32_t rack_do_hystart = 0; 207 static int32_t rack_apply_rtt_with_reduced_conf = 0; 208 209 static int32_t rack_pkt_delay = 1000; 210 static int32_t rack_send_a_lot_in_prr = 1; 211 static int32_t rack_min_to = 1000; /* Number of microsecond min timeout */ 212 static int32_t rack_verbose_logging = 0; 213 static int32_t rack_ignore_data_after_close = 1; 214 static int32_t rack_enable_shared_cwnd = 1; 215 static int32_t rack_use_cmp_acks = 1; 216 static int32_t rack_use_fsb = 1; 217 static int32_t rack_use_rfo = 1; 218 static int32_t rack_use_rsm_rfo = 1; 219 static int32_t rack_max_abc_post_recovery = 2; 220 static int32_t rack_client_low_buf = 0; 221 static int32_t rack_dsack_std_based = 0x3; /* bit field bit 1 sets rc_rack_tmr_std_based and bit 2 sets rc_rack_use_dsack */ 222 #ifdef TCP_ACCOUNTING 223 static int32_t rack_tcp_accounting = 0; 224 #endif 225 static int32_t rack_limits_scwnd = 1; 226 static int32_t rack_enable_mqueue_for_nonpaced = 0; 227 static int32_t rack_disable_prr = 0; 228 static int32_t use_rack_rr = 1; 229 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/ca)? */ 230 static int32_t rack_persist_min = 250000; /* 250usec */ 231 static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */ 232 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */ 233 static int32_t rack_default_init_window = 0; /* Use system default */ 234 static int32_t rack_limit_time_with_srtt = 0; 235 static int32_t rack_autosndbuf_inc = 20; /* In percentage form */ 236 static int32_t rack_enobuf_hw_boost_mult = 2; /* How many times the hw rate we boost slot using time_between */ 237 static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */ 238 static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */ 239 static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */ 240 241 /* 242 * Currently regular tcp has a rto_min of 30ms 243 * the backoff goes 12 times so that ends up 244 * being a total of 122.850 seconds before a 245 * connection is killed. 246 */ 247 static uint32_t rack_def_data_window = 20; 248 static uint32_t rack_goal_bdp = 2; 249 static uint32_t rack_min_srtts = 1; 250 static uint32_t rack_min_measure_usec = 0; 251 static int32_t rack_tlp_min = 10000; /* 10ms */ 252 static int32_t rack_rto_min = 30000; /* 30,000 usec same as main freebsd */ 253 static int32_t rack_rto_max = 4000000; /* 4 seconds in usec's */ 254 static const int32_t rack_free_cache = 2; 255 static int32_t rack_hptsi_segments = 40; 256 static int32_t rack_rate_sample_method = USE_RTT_LOW; 257 static int32_t rack_pace_every_seg = 0; 258 static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */ 259 static int32_t rack_slot_reduction = 4; 260 static int32_t rack_wma_divisor = 8; /* For WMA calculation */ 261 static int32_t rack_cwnd_block_ends_measure = 0; 262 static int32_t rack_rwnd_block_ends_measure = 0; 263 static int32_t rack_def_profile = 0; 264 265 static int32_t rack_lower_cwnd_at_tlp = 0; 266 static int32_t rack_limited_retran = 0; 267 static int32_t rack_always_send_oldest = 0; 268 static int32_t rack_tlp_threshold_use = TLP_USE_TWO_ONE; 269 270 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */ 271 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */ 272 static uint16_t rack_per_of_gp_rec = 200; /* 200 % of bw */ 273 274 /* Probertt */ 275 static uint16_t rack_per_of_gp_probertt = 60; /* 60% of bw */ 276 static uint16_t rack_per_of_gp_lowthresh = 40; /* 40% is bottom */ 277 static uint16_t rack_per_of_gp_probertt_reduce = 10; /* 10% reduction */ 278 static uint16_t rack_atexit_prtt_hbp = 130; /* Clamp to 130% on exit prtt if highly buffered path */ 279 static uint16_t rack_atexit_prtt = 130; /* Clamp to 100% on exit prtt if non highly buffered path */ 280 281 static uint32_t rack_max_drain_wait = 2; /* How man gp srtt's before we give up draining */ 282 static uint32_t rack_must_drain = 1; /* How many GP srtt's we *must* wait */ 283 static uint32_t rack_probertt_use_min_rtt_entry = 1; /* Use the min to calculate the goal else gp_srtt */ 284 static uint32_t rack_probertt_use_min_rtt_exit = 0; 285 static uint32_t rack_probe_rtt_sets_cwnd = 0; 286 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */ 287 static uint32_t rack_time_between_probertt = 9600000; /* 9.6 sec in usecs */ 288 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top fraction */ 289 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bottom fraction */ 290 static uint32_t rack_min_probertt_hold = 40000; /* Equal to delayed ack time */ 291 static uint32_t rack_probertt_filter_life = 10000000; 292 static uint32_t rack_probertt_lower_within = 10; 293 static uint32_t rack_min_rtt_movement = 250000; /* Must move at least 250ms (in microseconds) to count as a lowering */ 294 static int32_t rack_pace_one_seg = 0; /* Shall we pace for less than 1.4Meg 1MSS at a time */ 295 static int32_t rack_probertt_clear_is = 1; 296 static int32_t rack_max_drain_hbp = 1; /* Extra drain times gpsrtt for highly buffered paths */ 297 static int32_t rack_hbp_thresh = 3; /* what is the divisor max_rtt/min_rtt to decided a hbp */ 298 299 /* Part of pacing */ 300 static int32_t rack_max_per_above = 30; /* When we go to increment stop if above 100+this% */ 301 302 /* Timely information */ 303 /* Combine these two gives the range of 'no change' to bw */ 304 /* ie the up/down provide the upper and lower bound */ 305 static int32_t rack_gp_per_bw_mul_up = 2; /* 2% */ 306 static int32_t rack_gp_per_bw_mul_down = 4; /* 4% */ 307 static int32_t rack_gp_rtt_maxmul = 3; /* 3 x maxmin */ 308 static int32_t rack_gp_rtt_minmul = 1; /* minrtt + (minrtt/mindiv) is lower rtt */ 309 static int32_t rack_gp_rtt_mindiv = 4; /* minrtt + (minrtt * minmul/mindiv) is lower rtt */ 310 static int32_t rack_gp_decrease_per = 20; /* 20% decrease in multiplier */ 311 static int32_t rack_gp_increase_per = 2; /* 2% increase in multiplier */ 312 static int32_t rack_per_lower_bound = 50; /* Don't allow to drop below this multiplier */ 313 static int32_t rack_per_upper_bound_ss = 0; /* Don't allow SS to grow above this */ 314 static int32_t rack_per_upper_bound_ca = 0; /* Don't allow CA to grow above this */ 315 static int32_t rack_do_dyn_mul = 0; /* Are the rack gp multipliers dynamic */ 316 static int32_t rack_gp_no_rec_chg = 1; /* Prohibit recovery from reducing it's multiplier */ 317 static int32_t rack_timely_dec_clear = 6; /* Do we clear decrement count at a value (6)? */ 318 static int32_t rack_timely_max_push_rise = 3; /* One round of pushing */ 319 static int32_t rack_timely_max_push_drop = 3; /* Three round of pushing */ 320 static int32_t rack_timely_min_segs = 4; /* 4 segment minimum */ 321 static int32_t rack_use_max_for_nobackoff = 0; 322 static int32_t rack_timely_int_timely_only = 0; /* do interim timely's only use the timely algo (no b/w changes)? */ 323 static int32_t rack_timely_no_stopping = 0; 324 static int32_t rack_down_raise_thresh = 100; 325 static int32_t rack_req_segs = 1; 326 static uint64_t rack_bw_rate_cap = 0; 327 static uint32_t rack_trace_point_config = 0; 328 static uint32_t rack_trace_point_bb_mode = 4; 329 static int32_t rack_trace_point_count = 0; 330 331 332 /* Weird delayed ack mode */ 333 static int32_t rack_use_imac_dack = 0; 334 /* Rack specific counters */ 335 counter_u64_t rack_saw_enobuf; 336 counter_u64_t rack_saw_enobuf_hw; 337 counter_u64_t rack_saw_enetunreach; 338 counter_u64_t rack_persists_sends; 339 counter_u64_t rack_persists_acks; 340 counter_u64_t rack_persists_loss; 341 counter_u64_t rack_persists_lost_ends; 342 #ifdef INVARIANTS 343 counter_u64_t rack_adjust_map_bw; 344 #endif 345 /* Tail loss probe counters */ 346 counter_u64_t rack_tlp_tot; 347 counter_u64_t rack_tlp_newdata; 348 counter_u64_t rack_tlp_retran; 349 counter_u64_t rack_tlp_retran_bytes; 350 counter_u64_t rack_to_tot; 351 counter_u64_t rack_hot_alloc; 352 counter_u64_t rack_to_alloc; 353 counter_u64_t rack_to_alloc_hard; 354 counter_u64_t rack_to_alloc_emerg; 355 counter_u64_t rack_to_alloc_limited; 356 counter_u64_t rack_alloc_limited_conns; 357 counter_u64_t rack_split_limited; 358 359 counter_u64_t rack_multi_single_eq; 360 counter_u64_t rack_proc_non_comp_ack; 361 362 counter_u64_t rack_fto_send; 363 counter_u64_t rack_fto_rsm_send; 364 counter_u64_t rack_nfto_resend; 365 counter_u64_t rack_non_fto_send; 366 counter_u64_t rack_extended_rfo; 367 368 counter_u64_t rack_sack_proc_all; 369 counter_u64_t rack_sack_proc_short; 370 counter_u64_t rack_sack_proc_restart; 371 counter_u64_t rack_sack_attacks_detected; 372 counter_u64_t rack_sack_attacks_reversed; 373 counter_u64_t rack_sack_used_next_merge; 374 counter_u64_t rack_sack_splits; 375 counter_u64_t rack_sack_used_prev_merge; 376 counter_u64_t rack_sack_skipped_acked; 377 counter_u64_t rack_ack_total; 378 counter_u64_t rack_express_sack; 379 counter_u64_t rack_sack_total; 380 counter_u64_t rack_move_none; 381 counter_u64_t rack_move_some; 382 383 counter_u64_t rack_input_idle_reduces; 384 counter_u64_t rack_collapsed_win; 385 counter_u64_t rack_collapsed_win_seen; 386 counter_u64_t rack_collapsed_win_rxt; 387 counter_u64_t rack_collapsed_win_rxt_bytes; 388 counter_u64_t rack_try_scwnd; 389 counter_u64_t rack_hw_pace_init_fail; 390 counter_u64_t rack_hw_pace_lost; 391 392 counter_u64_t rack_out_size[TCP_MSS_ACCT_SIZE]; 393 counter_u64_t rack_opts_arry[RACK_OPTS_SIZE]; 394 395 396 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2))) 397 398 #define RACK_TCPT_RANGESET(tv, value, tvmin, tvmax, slop) do { \ 399 (tv) = (value) + slop; \ 400 if ((u_long)(tv) < (u_long)(tvmin)) \ 401 (tv) = (tvmin); \ 402 if ((u_long)(tv) > (u_long)(tvmax)) \ 403 (tv) = (tvmax); \ 404 } while (0) 405 406 static void 407 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line); 408 409 static int 410 rack_process_ack(struct mbuf *m, struct tcphdr *th, 411 struct socket *so, struct tcpcb *tp, struct tcpopt *to, 412 uint32_t tiwin, int32_t tlen, int32_t * ofia, int32_t thflags, int32_t * ret_val); 413 static int 414 rack_process_data(struct mbuf *m, struct tcphdr *th, 415 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 416 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt); 417 static void 418 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, 419 uint32_t th_ack, uint16_t nsegs, uint16_t type, int32_t recovery); 420 static struct rack_sendmap *rack_alloc(struct tcp_rack *rack); 421 static struct rack_sendmap *rack_alloc_limit(struct tcp_rack *rack, 422 uint8_t limit_type); 423 static struct rack_sendmap * 424 rack_check_recovery_mode(struct tcpcb *tp, 425 uint32_t tsused); 426 static void 427 rack_cong_signal(struct tcpcb *tp, 428 uint32_t type, uint32_t ack, int ); 429 static void rack_counter_destroy(void); 430 static int 431 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt); 432 static int32_t rack_ctor(void *mem, int32_t size, void *arg, int32_t how); 433 static void 434 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override); 435 static void 436 rack_do_segment(struct mbuf *m, struct tcphdr *th, 437 struct socket *so, struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 438 uint8_t iptos); 439 static void rack_dtor(void *mem, int32_t size, void *arg); 440 static void 441 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 442 uint32_t flex1, uint32_t flex2, 443 uint32_t flex3, uint32_t flex4, 444 uint32_t flex5, uint32_t flex6, 445 uint16_t flex7, uint8_t mod); 446 447 static void 448 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 449 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line, 450 struct rack_sendmap *rsm, uint8_t quality); 451 static struct rack_sendmap * 452 rack_find_high_nonack(struct tcp_rack *rack, 453 struct rack_sendmap *rsm); 454 static struct rack_sendmap *rack_find_lowest_rsm(struct tcp_rack *rack); 455 static void rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm); 456 static void rack_fini(struct tcpcb *tp, int32_t tcb_is_purged); 457 static int rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt); 458 static void 459 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 460 tcp_seq th_ack, int line, uint8_t quality); 461 static uint32_t 462 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss); 463 static int32_t rack_handoff_ok(struct tcpcb *tp); 464 static int32_t rack_init(struct tcpcb *tp); 465 static void rack_init_sysctls(void); 466 static void 467 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, 468 struct tcphdr *th, int entered_rec, int dup_ack_struck); 469 static void 470 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 471 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t ts, 472 struct rack_sendmap *hintrsm, uint16_t add_flags, struct mbuf *s_mb, uint32_t s_moff, int hw_tls); 473 474 static void 475 rack_log_sack_passed(struct tcpcb *tp, struct tcp_rack *rack, 476 struct rack_sendmap *rsm); 477 static void rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm); 478 static int32_t rack_output(struct tcpcb *tp); 479 480 static uint32_t 481 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, 482 struct sackblk *sack, struct tcpopt *to, struct rack_sendmap **prsm, 483 uint32_t cts, int *moved_two); 484 static void rack_post_recovery(struct tcpcb *tp, uint32_t th_seq); 485 static void rack_remxt_tmr(struct tcpcb *tp); 486 static int rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt); 487 static void rack_set_state(struct tcpcb *tp, struct tcp_rack *rack); 488 static int32_t rack_stopall(struct tcpcb *tp); 489 static void rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line); 490 static uint32_t 491 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 492 struct rack_sendmap *rsm, uint64_t ts, int32_t * lenp, uint16_t add_flag); 493 static void 494 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 495 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag); 496 static int 497 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 498 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack); 499 static int32_t tcp_addrack(module_t mod, int32_t type, void *data); 500 static int 501 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, 502 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 503 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 504 static int 505 rack_do_closing(struct mbuf *m, struct tcphdr *th, 506 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 507 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 508 static int 509 rack_do_established(struct mbuf *m, struct tcphdr *th, 510 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 511 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 512 static int 513 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, 514 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 515 int32_t tlen, uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos); 516 static int 517 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, 518 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 519 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 520 static int 521 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, 522 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 523 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 524 static int 525 rack_do_lastack(struct mbuf *m, struct tcphdr *th, 526 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 527 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 528 static int 529 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, 530 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 531 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 532 static int 533 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, 534 struct socket *so, struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, 535 int32_t tlen, uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos); 536 struct rack_sendmap * 537 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, 538 uint32_t tsused); 539 static void tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, 540 uint32_t len, uint32_t us_tim, int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt); 541 static void 542 tcp_rack_partialack(struct tcpcb *tp); 543 static int 544 rack_set_profile(struct tcp_rack *rack, int prof); 545 static void 546 rack_apply_deferred_options(struct tcp_rack *rack); 547 548 int32_t rack_clear_counter=0; 549 550 static inline void 551 rack_trace_point(struct tcp_rack *rack, int num) 552 { 553 if (((rack_trace_point_config == num) || 554 (rack_trace_point_config = 0xffffffff)) && 555 (rack_trace_point_bb_mode != 0) && 556 (rack_trace_point_count > 0) && 557 (rack->rc_tp->t_logstate == 0)) { 558 int res; 559 res = atomic_fetchadd_int(&rack_trace_point_count, -1); 560 if (res > 0) { 561 rack->rc_tp->t_logstate = rack_trace_point_bb_mode; 562 } else { 563 /* Loss a race assure its zero now */ 564 rack_trace_point_count = 0; 565 } 566 } 567 } 568 569 static void 570 rack_swap_beta_values(struct tcp_rack *rack, uint8_t flex8) 571 { 572 struct sockopt sopt; 573 struct cc_newreno_opts opt; 574 struct newreno old; 575 struct tcpcb *tp; 576 int error, failed = 0; 577 578 tp = rack->rc_tp; 579 if (tp->t_cc == NULL) { 580 /* Tcb is leaving */ 581 return; 582 } 583 rack->rc_pacing_cc_set = 1; 584 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 585 /* Not new-reno we can't play games with beta! */ 586 failed = 1; 587 goto out; 588 589 } 590 if (CC_ALGO(tp)->ctl_output == NULL) { 591 /* Huh, not using new-reno so no swaps.? */ 592 failed = 2; 593 goto out; 594 } 595 /* Get the current values out */ 596 sopt.sopt_valsize = sizeof(struct cc_newreno_opts); 597 sopt.sopt_dir = SOPT_GET; 598 opt.name = CC_NEWRENO_BETA; 599 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 600 if (error) { 601 failed = 3; 602 goto out; 603 } 604 old.beta = opt.val; 605 opt.name = CC_NEWRENO_BETA_ECN; 606 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 607 if (error) { 608 failed = 4; 609 goto out; 610 } 611 old.beta_ecn = opt.val; 612 613 /* Now lets set in the values we have stored */ 614 sopt.sopt_dir = SOPT_SET; 615 opt.name = CC_NEWRENO_BETA; 616 opt.val = rack->r_ctl.rc_saved_beta.beta; 617 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 618 if (error) { 619 failed = 5; 620 goto out; 621 } 622 opt.name = CC_NEWRENO_BETA_ECN; 623 opt.val = rack->r_ctl.rc_saved_beta.beta_ecn; 624 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 625 if (error) { 626 failed = 6; 627 goto out; 628 } 629 /* Save off the values for restoral */ 630 memcpy(&rack->r_ctl.rc_saved_beta, &old, sizeof(struct newreno)); 631 out: 632 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 633 union tcp_log_stackspecific log; 634 struct timeval tv; 635 struct newreno *ptr; 636 637 ptr = ((struct newreno *)tp->t_ccv.cc_data); 638 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 639 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 640 log.u_bbr.flex1 = ptr->beta; 641 log.u_bbr.flex2 = ptr->beta_ecn; 642 log.u_bbr.flex3 = ptr->newreno_flags; 643 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta.beta; 644 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta.beta_ecn; 645 log.u_bbr.flex6 = failed; 646 log.u_bbr.flex7 = rack->gp_ready; 647 log.u_bbr.flex7 <<= 1; 648 log.u_bbr.flex7 |= rack->use_fixed_rate; 649 log.u_bbr.flex7 <<= 1; 650 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; 651 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 652 log.u_bbr.flex8 = flex8; 653 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, error, 654 0, &log, false, NULL, NULL, 0, &tv); 655 } 656 } 657 658 static void 659 rack_set_cc_pacing(struct tcp_rack *rack) 660 { 661 if (rack->rc_pacing_cc_set) 662 return; 663 /* 664 * Use the swap utility placing in 3 for flex8 to id a 665 * set of a new set of values. 666 */ 667 rack->rc_pacing_cc_set = 1; 668 rack_swap_beta_values(rack, 3); 669 } 670 671 static void 672 rack_undo_cc_pacing(struct tcp_rack *rack) 673 { 674 if (rack->rc_pacing_cc_set == 0) 675 return; 676 /* 677 * Use the swap utility placing in 4 for flex8 to id a 678 * restoral of the old values. 679 */ 680 rack->rc_pacing_cc_set = 0; 681 rack_swap_beta_values(rack, 4); 682 } 683 684 #ifdef NETFLIX_PEAKRATE 685 static inline void 686 rack_update_peakrate_thr(struct tcpcb *tp) 687 { 688 /* Keep in mind that t_maxpeakrate is in B/s. */ 689 uint64_t peak; 690 peak = uqmax((tp->t_maxseg * 2), 691 (((uint64_t)tp->t_maxpeakrate * (uint64_t)(tp->t_srtt)) / (uint64_t)HPTS_USEC_IN_SEC)); 692 tp->t_peakrate_thr = (uint32_t)uqmin(peak, UINT32_MAX); 693 } 694 #endif 695 696 static int 697 sysctl_rack_clear(SYSCTL_HANDLER_ARGS) 698 { 699 uint32_t stat; 700 int32_t error; 701 702 error = SYSCTL_OUT(req, &rack_clear_counter, sizeof(uint32_t)); 703 if (error || req->newptr == NULL) 704 return error; 705 706 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 707 if (error) 708 return (error); 709 if (stat == 1) { 710 #ifdef INVARIANTS 711 printf("Clearing RACK counters\n"); 712 #endif 713 counter_u64_zero(rack_tlp_tot); 714 counter_u64_zero(rack_tlp_newdata); 715 counter_u64_zero(rack_tlp_retran); 716 counter_u64_zero(rack_tlp_retran_bytes); 717 counter_u64_zero(rack_to_tot); 718 counter_u64_zero(rack_saw_enobuf); 719 counter_u64_zero(rack_saw_enobuf_hw); 720 counter_u64_zero(rack_saw_enetunreach); 721 counter_u64_zero(rack_persists_sends); 722 counter_u64_zero(rack_persists_acks); 723 counter_u64_zero(rack_persists_loss); 724 counter_u64_zero(rack_persists_lost_ends); 725 #ifdef INVARIANTS 726 counter_u64_zero(rack_adjust_map_bw); 727 #endif 728 counter_u64_zero(rack_to_alloc_hard); 729 counter_u64_zero(rack_to_alloc_emerg); 730 counter_u64_zero(rack_sack_proc_all); 731 counter_u64_zero(rack_fto_send); 732 counter_u64_zero(rack_fto_rsm_send); 733 counter_u64_zero(rack_extended_rfo); 734 counter_u64_zero(rack_hw_pace_init_fail); 735 counter_u64_zero(rack_hw_pace_lost); 736 counter_u64_zero(rack_non_fto_send); 737 counter_u64_zero(rack_nfto_resend); 738 counter_u64_zero(rack_sack_proc_short); 739 counter_u64_zero(rack_sack_proc_restart); 740 counter_u64_zero(rack_to_alloc); 741 counter_u64_zero(rack_to_alloc_limited); 742 counter_u64_zero(rack_alloc_limited_conns); 743 counter_u64_zero(rack_split_limited); 744 counter_u64_zero(rack_multi_single_eq); 745 counter_u64_zero(rack_proc_non_comp_ack); 746 counter_u64_zero(rack_sack_attacks_detected); 747 counter_u64_zero(rack_sack_attacks_reversed); 748 counter_u64_zero(rack_sack_used_next_merge); 749 counter_u64_zero(rack_sack_used_prev_merge); 750 counter_u64_zero(rack_sack_splits); 751 counter_u64_zero(rack_sack_skipped_acked); 752 counter_u64_zero(rack_ack_total); 753 counter_u64_zero(rack_express_sack); 754 counter_u64_zero(rack_sack_total); 755 counter_u64_zero(rack_move_none); 756 counter_u64_zero(rack_move_some); 757 counter_u64_zero(rack_try_scwnd); 758 counter_u64_zero(rack_collapsed_win); 759 counter_u64_zero(rack_collapsed_win_rxt); 760 counter_u64_zero(rack_collapsed_win_seen); 761 counter_u64_zero(rack_collapsed_win_rxt_bytes); 762 } 763 rack_clear_counter = 0; 764 return (0); 765 } 766 767 static void 768 rack_init_sysctls(void) 769 { 770 struct sysctl_oid *rack_counters; 771 struct sysctl_oid *rack_attack; 772 struct sysctl_oid *rack_pacing; 773 struct sysctl_oid *rack_timely; 774 struct sysctl_oid *rack_timers; 775 struct sysctl_oid *rack_tlp; 776 struct sysctl_oid *rack_misc; 777 struct sysctl_oid *rack_features; 778 struct sysctl_oid *rack_measure; 779 struct sysctl_oid *rack_probertt; 780 struct sysctl_oid *rack_hw_pacing; 781 struct sysctl_oid *rack_tracepoint; 782 783 rack_attack = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 784 SYSCTL_CHILDREN(rack_sysctl_root), 785 OID_AUTO, 786 "sack_attack", 787 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 788 "Rack Sack Attack Counters and Controls"); 789 rack_counters = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 790 SYSCTL_CHILDREN(rack_sysctl_root), 791 OID_AUTO, 792 "stats", 793 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 794 "Rack Counters"); 795 SYSCTL_ADD_S32(&rack_sysctl_ctx, 796 SYSCTL_CHILDREN(rack_sysctl_root), 797 OID_AUTO, "rate_sample_method", CTLFLAG_RW, 798 &rack_rate_sample_method , USE_RTT_LOW, 799 "What method should we use for rate sampling 0=high, 1=low "); 800 /* Probe rtt related controls */ 801 rack_probertt = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 802 SYSCTL_CHILDREN(rack_sysctl_root), 803 OID_AUTO, 804 "probertt", 805 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 806 "ProbeRTT related Controls"); 807 SYSCTL_ADD_U16(&rack_sysctl_ctx, 808 SYSCTL_CHILDREN(rack_probertt), 809 OID_AUTO, "exit_per_hpb", CTLFLAG_RW, 810 &rack_atexit_prtt_hbp, 130, 811 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); 812 SYSCTL_ADD_U16(&rack_sysctl_ctx, 813 SYSCTL_CHILDREN(rack_probertt), 814 OID_AUTO, "exit_per_nonhpb", CTLFLAG_RW, 815 &rack_atexit_prtt, 130, 816 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); 817 SYSCTL_ADD_U16(&rack_sysctl_ctx, 818 SYSCTL_CHILDREN(rack_probertt), 819 OID_AUTO, "gp_per_mul", CTLFLAG_RW, 820 &rack_per_of_gp_probertt, 60, 821 "What percentage of goodput do we pace at in probertt"); 822 SYSCTL_ADD_U16(&rack_sysctl_ctx, 823 SYSCTL_CHILDREN(rack_probertt), 824 OID_AUTO, "gp_per_reduce", CTLFLAG_RW, 825 &rack_per_of_gp_probertt_reduce, 10, 826 "What percentage of goodput do we reduce every gp_srtt"); 827 SYSCTL_ADD_U16(&rack_sysctl_ctx, 828 SYSCTL_CHILDREN(rack_probertt), 829 OID_AUTO, "gp_per_low", CTLFLAG_RW, 830 &rack_per_of_gp_lowthresh, 40, 831 "What percentage of goodput do we allow the multiplier to fall to"); 832 SYSCTL_ADD_U32(&rack_sysctl_ctx, 833 SYSCTL_CHILDREN(rack_probertt), 834 OID_AUTO, "time_between", CTLFLAG_RW, 835 & rack_time_between_probertt, 96000000, 836 "How many useconds between the lowest rtt falling must past before we enter probertt"); 837 SYSCTL_ADD_U32(&rack_sysctl_ctx, 838 SYSCTL_CHILDREN(rack_probertt), 839 OID_AUTO, "safety", CTLFLAG_RW, 840 &rack_probe_rtt_safety_val, 2000000, 841 "If not zero, provides a maximum usecond that you can stay in probertt (2sec = 2000000)"); 842 SYSCTL_ADD_U32(&rack_sysctl_ctx, 843 SYSCTL_CHILDREN(rack_probertt), 844 OID_AUTO, "sets_cwnd", CTLFLAG_RW, 845 &rack_probe_rtt_sets_cwnd, 0, 846 "Do we set the cwnd too (if always_lower is on)"); 847 SYSCTL_ADD_U32(&rack_sysctl_ctx, 848 SYSCTL_CHILDREN(rack_probertt), 849 OID_AUTO, "maxdrainsrtts", CTLFLAG_RW, 850 &rack_max_drain_wait, 2, 851 "Maximum number of gp_srtt's to hold in drain waiting for flight to reach goal"); 852 SYSCTL_ADD_U32(&rack_sysctl_ctx, 853 SYSCTL_CHILDREN(rack_probertt), 854 OID_AUTO, "mustdrainsrtts", CTLFLAG_RW, 855 &rack_must_drain, 1, 856 "We must drain this many gp_srtt's waiting for flight to reach goal"); 857 SYSCTL_ADD_U32(&rack_sysctl_ctx, 858 SYSCTL_CHILDREN(rack_probertt), 859 OID_AUTO, "goal_use_min_entry", CTLFLAG_RW, 860 &rack_probertt_use_min_rtt_entry, 1, 861 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); 862 SYSCTL_ADD_U32(&rack_sysctl_ctx, 863 SYSCTL_CHILDREN(rack_probertt), 864 OID_AUTO, "goal_use_min_exit", CTLFLAG_RW, 865 &rack_probertt_use_min_rtt_exit, 0, 866 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); 867 SYSCTL_ADD_U32(&rack_sysctl_ctx, 868 SYSCTL_CHILDREN(rack_probertt), 869 OID_AUTO, "length_div", CTLFLAG_RW, 870 &rack_probertt_gpsrtt_cnt_div, 0, 871 "How many recent goodput srtt periods plus hold tim does probertt last (bottom of fraction)"); 872 SYSCTL_ADD_U32(&rack_sysctl_ctx, 873 SYSCTL_CHILDREN(rack_probertt), 874 OID_AUTO, "length_mul", CTLFLAG_RW, 875 &rack_probertt_gpsrtt_cnt_mul, 0, 876 "How many recent goodput srtt periods plus hold tim does probertt last (top of fraction)"); 877 SYSCTL_ADD_U32(&rack_sysctl_ctx, 878 SYSCTL_CHILDREN(rack_probertt), 879 OID_AUTO, "holdtim_at_target", CTLFLAG_RW, 880 &rack_min_probertt_hold, 200000, 881 "What is the minimum time we hold probertt at target"); 882 SYSCTL_ADD_U32(&rack_sysctl_ctx, 883 SYSCTL_CHILDREN(rack_probertt), 884 OID_AUTO, "filter_life", CTLFLAG_RW, 885 &rack_probertt_filter_life, 10000000, 886 "What is the time for the filters life in useconds"); 887 SYSCTL_ADD_U32(&rack_sysctl_ctx, 888 SYSCTL_CHILDREN(rack_probertt), 889 OID_AUTO, "lower_within", CTLFLAG_RW, 890 &rack_probertt_lower_within, 10, 891 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); 892 SYSCTL_ADD_U32(&rack_sysctl_ctx, 893 SYSCTL_CHILDREN(rack_probertt), 894 OID_AUTO, "must_move", CTLFLAG_RW, 895 &rack_min_rtt_movement, 250, 896 "How much is the minimum movement in rtt to count as a drop for probertt purposes"); 897 SYSCTL_ADD_U32(&rack_sysctl_ctx, 898 SYSCTL_CHILDREN(rack_probertt), 899 OID_AUTO, "clear_is_cnts", CTLFLAG_RW, 900 &rack_probertt_clear_is, 1, 901 "Do we clear I/S counts on exiting probe-rtt"); 902 SYSCTL_ADD_S32(&rack_sysctl_ctx, 903 SYSCTL_CHILDREN(rack_probertt), 904 OID_AUTO, "hbp_extra_drain", CTLFLAG_RW, 905 &rack_max_drain_hbp, 1, 906 "How many extra drain gpsrtt's do we get in highly buffered paths"); 907 SYSCTL_ADD_S32(&rack_sysctl_ctx, 908 SYSCTL_CHILDREN(rack_probertt), 909 OID_AUTO, "hbp_threshold", CTLFLAG_RW, 910 &rack_hbp_thresh, 3, 911 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); 912 913 rack_tracepoint = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 914 SYSCTL_CHILDREN(rack_sysctl_root), 915 OID_AUTO, 916 "tp", 917 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 918 "Rack tracepoint facility"); 919 SYSCTL_ADD_U32(&rack_sysctl_ctx, 920 SYSCTL_CHILDREN(rack_tracepoint), 921 OID_AUTO, "number", CTLFLAG_RW, 922 &rack_trace_point_config, 0, 923 "What is the trace point number to activate (0=none, 0xffffffff = all)?"); 924 SYSCTL_ADD_U32(&rack_sysctl_ctx, 925 SYSCTL_CHILDREN(rack_tracepoint), 926 OID_AUTO, "bbmode", CTLFLAG_RW, 927 &rack_trace_point_bb_mode, 4, 928 "What is BB logging mode that is activated?"); 929 SYSCTL_ADD_S32(&rack_sysctl_ctx, 930 SYSCTL_CHILDREN(rack_tracepoint), 931 OID_AUTO, "count", CTLFLAG_RW, 932 &rack_trace_point_count, 0, 933 "How many connections will have BB logging turned on that hit the tracepoint?"); 934 /* Pacing related sysctls */ 935 rack_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 936 SYSCTL_CHILDREN(rack_sysctl_root), 937 OID_AUTO, 938 "pacing", 939 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 940 "Pacing related Controls"); 941 SYSCTL_ADD_S32(&rack_sysctl_ctx, 942 SYSCTL_CHILDREN(rack_pacing), 943 OID_AUTO, "max_pace_over", CTLFLAG_RW, 944 &rack_max_per_above, 30, 945 "What is the maximum allowable percentage that we can pace above (so 30 = 130% of our goal)"); 946 SYSCTL_ADD_S32(&rack_sysctl_ctx, 947 SYSCTL_CHILDREN(rack_pacing), 948 OID_AUTO, "pace_to_one", CTLFLAG_RW, 949 &rack_pace_one_seg, 0, 950 "Do we allow low b/w pacing of 1MSS instead of two"); 951 SYSCTL_ADD_S32(&rack_sysctl_ctx, 952 SYSCTL_CHILDREN(rack_pacing), 953 OID_AUTO, "limit_wsrtt", CTLFLAG_RW, 954 &rack_limit_time_with_srtt, 0, 955 "Do we limit pacing time based on srtt"); 956 SYSCTL_ADD_S32(&rack_sysctl_ctx, 957 SYSCTL_CHILDREN(rack_pacing), 958 OID_AUTO, "init_win", CTLFLAG_RW, 959 &rack_default_init_window, 0, 960 "Do we have a rack initial window 0 = system default"); 961 SYSCTL_ADD_U16(&rack_sysctl_ctx, 962 SYSCTL_CHILDREN(rack_pacing), 963 OID_AUTO, "gp_per_ss", CTLFLAG_RW, 964 &rack_per_of_gp_ss, 250, 965 "If non zero, what percentage of goodput to pace at in slow start"); 966 SYSCTL_ADD_U16(&rack_sysctl_ctx, 967 SYSCTL_CHILDREN(rack_pacing), 968 OID_AUTO, "gp_per_ca", CTLFLAG_RW, 969 &rack_per_of_gp_ca, 150, 970 "If non zero, what percentage of goodput to pace at in congestion avoidance"); 971 SYSCTL_ADD_U16(&rack_sysctl_ctx, 972 SYSCTL_CHILDREN(rack_pacing), 973 OID_AUTO, "gp_per_rec", CTLFLAG_RW, 974 &rack_per_of_gp_rec, 200, 975 "If non zero, what percentage of goodput to pace at in recovery"); 976 SYSCTL_ADD_S32(&rack_sysctl_ctx, 977 SYSCTL_CHILDREN(rack_pacing), 978 OID_AUTO, "pace_max_seg", CTLFLAG_RW, 979 &rack_hptsi_segments, 40, 980 "What size is the max for TSO segments in pacing and burst mitigation"); 981 SYSCTL_ADD_S32(&rack_sysctl_ctx, 982 SYSCTL_CHILDREN(rack_pacing), 983 OID_AUTO, "burst_reduces", CTLFLAG_RW, 984 &rack_slot_reduction, 4, 985 "When doing only burst mitigation what is the reduce divisor"); 986 SYSCTL_ADD_S32(&rack_sysctl_ctx, 987 SYSCTL_CHILDREN(rack_sysctl_root), 988 OID_AUTO, "use_pacing", CTLFLAG_RW, 989 &rack_pace_every_seg, 0, 990 "If set we use pacing, if clear we use only the original burst mitigation"); 991 SYSCTL_ADD_U64(&rack_sysctl_ctx, 992 SYSCTL_CHILDREN(rack_pacing), 993 OID_AUTO, "rate_cap", CTLFLAG_RW, 994 &rack_bw_rate_cap, 0, 995 "If set we apply this value to the absolute rate cap used by pacing"); 996 SYSCTL_ADD_U8(&rack_sysctl_ctx, 997 SYSCTL_CHILDREN(rack_sysctl_root), 998 OID_AUTO, "req_measure_cnt", CTLFLAG_RW, 999 &rack_req_measurements, 1, 1000 "If doing dynamic pacing, how many measurements must be in before we start pacing?"); 1001 /* Hardware pacing */ 1002 rack_hw_pacing = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1003 SYSCTL_CHILDREN(rack_sysctl_root), 1004 OID_AUTO, 1005 "hdwr_pacing", 1006 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1007 "Pacing related Controls"); 1008 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1009 SYSCTL_CHILDREN(rack_hw_pacing), 1010 OID_AUTO, "rwnd_factor", CTLFLAG_RW, 1011 &rack_hw_rwnd_factor, 2, 1012 "How many times does snd_wnd need to be bigger than pace_max_seg so we will hold off and get more acks?"); 1013 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1014 SYSCTL_CHILDREN(rack_hw_pacing), 1015 OID_AUTO, "pace_enobuf_mult", CTLFLAG_RW, 1016 &rack_enobuf_hw_boost_mult, 2, 1017 "By how many time_betweens should we boost the pacing time if we see a ENOBUFS?"); 1018 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1019 SYSCTL_CHILDREN(rack_hw_pacing), 1020 OID_AUTO, "pace_enobuf_max", CTLFLAG_RW, 1021 &rack_enobuf_hw_max, 2, 1022 "What is the max boost the pacing time if we see a ENOBUFS?"); 1023 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1024 SYSCTL_CHILDREN(rack_hw_pacing), 1025 OID_AUTO, "pace_enobuf_min", CTLFLAG_RW, 1026 &rack_enobuf_hw_min, 2, 1027 "What is the min boost the pacing time if we see a ENOBUFS?"); 1028 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1029 SYSCTL_CHILDREN(rack_hw_pacing), 1030 OID_AUTO, "enable", CTLFLAG_RW, 1031 &rack_enable_hw_pacing, 0, 1032 "Should RACK attempt to use hw pacing?"); 1033 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1034 SYSCTL_CHILDREN(rack_hw_pacing), 1035 OID_AUTO, "rate_cap", CTLFLAG_RW, 1036 &rack_hw_rate_caps, 1, 1037 "Does the highest hardware pacing rate cap the rate we will send at??"); 1038 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1039 SYSCTL_CHILDREN(rack_hw_pacing), 1040 OID_AUTO, "rate_min", CTLFLAG_RW, 1041 &rack_hw_rate_min, 0, 1042 "Do we need a minimum estimate of this many bytes per second in order to engage hw pacing?"); 1043 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1044 SYSCTL_CHILDREN(rack_hw_pacing), 1045 OID_AUTO, "rate_to_low", CTLFLAG_RW, 1046 &rack_hw_rate_to_low, 0, 1047 "If we fall below this rate, dis-engage hw pacing?"); 1048 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1049 SYSCTL_CHILDREN(rack_hw_pacing), 1050 OID_AUTO, "up_only", CTLFLAG_RW, 1051 &rack_hw_up_only, 1, 1052 "Do we allow hw pacing to lower the rate selected?"); 1053 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1054 SYSCTL_CHILDREN(rack_hw_pacing), 1055 OID_AUTO, "extra_mss_precise", CTLFLAG_RW, 1056 &rack_hw_pace_extra_slots, 2, 1057 "If the rates between software and hardware match precisely how many extra time_betweens do we get?"); 1058 rack_timely = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1059 SYSCTL_CHILDREN(rack_sysctl_root), 1060 OID_AUTO, 1061 "timely", 1062 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1063 "Rack Timely RTT Controls"); 1064 /* Timely based GP dynmics */ 1065 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1066 SYSCTL_CHILDREN(rack_timely), 1067 OID_AUTO, "upper", CTLFLAG_RW, 1068 &rack_gp_per_bw_mul_up, 2, 1069 "Rack timely upper range for equal b/w (in percentage)"); 1070 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1071 SYSCTL_CHILDREN(rack_timely), 1072 OID_AUTO, "lower", CTLFLAG_RW, 1073 &rack_gp_per_bw_mul_down, 4, 1074 "Rack timely lower range for equal b/w (in percentage)"); 1075 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1076 SYSCTL_CHILDREN(rack_timely), 1077 OID_AUTO, "rtt_max_mul", CTLFLAG_RW, 1078 &rack_gp_rtt_maxmul, 3, 1079 "Rack timely multiplier of lowest rtt for rtt_max"); 1080 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1081 SYSCTL_CHILDREN(rack_timely), 1082 OID_AUTO, "rtt_min_div", CTLFLAG_RW, 1083 &rack_gp_rtt_mindiv, 4, 1084 "Rack timely divisor used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1085 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1086 SYSCTL_CHILDREN(rack_timely), 1087 OID_AUTO, "rtt_min_mul", CTLFLAG_RW, 1088 &rack_gp_rtt_minmul, 1, 1089 "Rack timely multiplier used for rtt + (rtt * mul/divisor) for check for lower rtt"); 1090 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1091 SYSCTL_CHILDREN(rack_timely), 1092 OID_AUTO, "decrease", CTLFLAG_RW, 1093 &rack_gp_decrease_per, 20, 1094 "Rack timely decrease percentage of our GP multiplication factor"); 1095 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1096 SYSCTL_CHILDREN(rack_timely), 1097 OID_AUTO, "increase", CTLFLAG_RW, 1098 &rack_gp_increase_per, 2, 1099 "Rack timely increase perentage of our GP multiplication factor"); 1100 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1101 SYSCTL_CHILDREN(rack_timely), 1102 OID_AUTO, "lowerbound", CTLFLAG_RW, 1103 &rack_per_lower_bound, 50, 1104 "Rack timely lowest percentage we allow GP multiplier to fall to"); 1105 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1106 SYSCTL_CHILDREN(rack_timely), 1107 OID_AUTO, "upperboundss", CTLFLAG_RW, 1108 &rack_per_upper_bound_ss, 0, 1109 "Rack timely highest percentage we allow GP multiplier in SS to raise to (0 is no upperbound)"); 1110 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1111 SYSCTL_CHILDREN(rack_timely), 1112 OID_AUTO, "upperboundca", CTLFLAG_RW, 1113 &rack_per_upper_bound_ca, 0, 1114 "Rack timely highest percentage we allow GP multiplier to CA raise to (0 is no upperbound)"); 1115 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1116 SYSCTL_CHILDREN(rack_timely), 1117 OID_AUTO, "dynamicgp", CTLFLAG_RW, 1118 &rack_do_dyn_mul, 0, 1119 "Rack timely do we enable dynmaic timely goodput by default"); 1120 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1121 SYSCTL_CHILDREN(rack_timely), 1122 OID_AUTO, "no_rec_red", CTLFLAG_RW, 1123 &rack_gp_no_rec_chg, 1, 1124 "Rack timely do we prohibit the recovery multiplier from being lowered"); 1125 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1126 SYSCTL_CHILDREN(rack_timely), 1127 OID_AUTO, "red_clear_cnt", CTLFLAG_RW, 1128 &rack_timely_dec_clear, 6, 1129 "Rack timely what threshold do we count to before another boost during b/w decent"); 1130 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1131 SYSCTL_CHILDREN(rack_timely), 1132 OID_AUTO, "max_push_rise", CTLFLAG_RW, 1133 &rack_timely_max_push_rise, 3, 1134 "Rack timely how many times do we push up with b/w increase"); 1135 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1136 SYSCTL_CHILDREN(rack_timely), 1137 OID_AUTO, "max_push_drop", CTLFLAG_RW, 1138 &rack_timely_max_push_drop, 3, 1139 "Rack timely how many times do we push back on b/w decent"); 1140 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1141 SYSCTL_CHILDREN(rack_timely), 1142 OID_AUTO, "min_segs", CTLFLAG_RW, 1143 &rack_timely_min_segs, 4, 1144 "Rack timely when setting the cwnd what is the min num segments"); 1145 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1146 SYSCTL_CHILDREN(rack_timely), 1147 OID_AUTO, "noback_max", CTLFLAG_RW, 1148 &rack_use_max_for_nobackoff, 0, 1149 "Rack timely when deciding if to backoff on a loss, do we use under max rtt else min"); 1150 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1151 SYSCTL_CHILDREN(rack_timely), 1152 OID_AUTO, "interim_timely_only", CTLFLAG_RW, 1153 &rack_timely_int_timely_only, 0, 1154 "Rack timely when doing interim timely's do we only do timely (no b/w consideration)"); 1155 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1156 SYSCTL_CHILDREN(rack_timely), 1157 OID_AUTO, "nonstop", CTLFLAG_RW, 1158 &rack_timely_no_stopping, 0, 1159 "Rack timely don't stop increase"); 1160 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1161 SYSCTL_CHILDREN(rack_timely), 1162 OID_AUTO, "dec_raise_thresh", CTLFLAG_RW, 1163 &rack_down_raise_thresh, 100, 1164 "If the CA or SS is below this threshold raise on the first 3 b/w lowers (0=always)"); 1165 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1166 SYSCTL_CHILDREN(rack_timely), 1167 OID_AUTO, "bottom_drag_segs", CTLFLAG_RW, 1168 &rack_req_segs, 1, 1169 "Bottom dragging if not these many segments outstanding and room"); 1170 1171 /* TLP and Rack related parameters */ 1172 rack_tlp = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1173 SYSCTL_CHILDREN(rack_sysctl_root), 1174 OID_AUTO, 1175 "tlp", 1176 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1177 "TLP and Rack related Controls"); 1178 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1179 SYSCTL_CHILDREN(rack_tlp), 1180 OID_AUTO, "use_rrr", CTLFLAG_RW, 1181 &use_rack_rr, 1, 1182 "Do we use Rack Rapid Recovery"); 1183 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1184 SYSCTL_CHILDREN(rack_tlp), 1185 OID_AUTO, "post_rec_labc", CTLFLAG_RW, 1186 &rack_max_abc_post_recovery, 2, 1187 "Since we do early recovery, do we override the l_abc to a value, if so what?"); 1188 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1189 SYSCTL_CHILDREN(rack_tlp), 1190 OID_AUTO, "nonrxt_use_cr", CTLFLAG_RW, 1191 &rack_non_rxt_use_cr, 0, 1192 "Do we use ss/ca rate if in recovery we are transmitting a new data chunk"); 1193 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1194 SYSCTL_CHILDREN(rack_tlp), 1195 OID_AUTO, "tlpmethod", CTLFLAG_RW, 1196 &rack_tlp_threshold_use, TLP_USE_TWO_ONE, 1197 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); 1198 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1199 SYSCTL_CHILDREN(rack_tlp), 1200 OID_AUTO, "limit", CTLFLAG_RW, 1201 &rack_tlp_limit, 2, 1202 "How many TLP's can be sent without sending new data"); 1203 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1204 SYSCTL_CHILDREN(rack_tlp), 1205 OID_AUTO, "use_greater", CTLFLAG_RW, 1206 &rack_tlp_use_greater, 1, 1207 "Should we use the rack_rtt time if its greater than srtt"); 1208 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1209 SYSCTL_CHILDREN(rack_tlp), 1210 OID_AUTO, "tlpminto", CTLFLAG_RW, 1211 &rack_tlp_min, 10000, 1212 "TLP minimum timeout per the specification (in microseconds)"); 1213 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1214 SYSCTL_CHILDREN(rack_tlp), 1215 OID_AUTO, "send_oldest", CTLFLAG_RW, 1216 &rack_always_send_oldest, 0, 1217 "Should we always send the oldest TLP and RACK-TLP"); 1218 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1219 SYSCTL_CHILDREN(rack_tlp), 1220 OID_AUTO, "rack_tlimit", CTLFLAG_RW, 1221 &rack_limited_retran, 0, 1222 "How many times can a rack timeout drive out sends"); 1223 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1224 SYSCTL_CHILDREN(rack_tlp), 1225 OID_AUTO, "tlp_cwnd_flag", CTLFLAG_RW, 1226 &rack_lower_cwnd_at_tlp, 0, 1227 "When a TLP completes a retran should we enter recovery"); 1228 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1229 SYSCTL_CHILDREN(rack_tlp), 1230 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1231 &rack_reorder_thresh, 2, 1232 "What factor for rack will be added when seeing reordering (shift right)"); 1233 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1234 SYSCTL_CHILDREN(rack_tlp), 1235 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1236 &rack_tlp_thresh, 1, 1237 "What divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1238 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1239 SYSCTL_CHILDREN(rack_tlp), 1240 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1241 &rack_reorder_fade, 60000000, 1242 "Does reorder detection fade, if so how many microseconds (0 means never)"); 1243 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1244 SYSCTL_CHILDREN(rack_tlp), 1245 OID_AUTO, "pktdelay", CTLFLAG_RW, 1246 &rack_pkt_delay, 1000, 1247 "Extra RACK time (in microseconds) besides reordering thresh"); 1248 1249 /* Timer related controls */ 1250 rack_timers = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1251 SYSCTL_CHILDREN(rack_sysctl_root), 1252 OID_AUTO, 1253 "timers", 1254 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1255 "Timer related controls"); 1256 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1257 SYSCTL_CHILDREN(rack_timers), 1258 OID_AUTO, "persmin", CTLFLAG_RW, 1259 &rack_persist_min, 250000, 1260 "What is the minimum time in microseconds between persists"); 1261 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1262 SYSCTL_CHILDREN(rack_timers), 1263 OID_AUTO, "persmax", CTLFLAG_RW, 1264 &rack_persist_max, 2000000, 1265 "What is the largest delay in microseconds between persists"); 1266 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1267 SYSCTL_CHILDREN(rack_timers), 1268 OID_AUTO, "delayed_ack", CTLFLAG_RW, 1269 &rack_delayed_ack_time, 40000, 1270 "Delayed ack time (40ms in microseconds)"); 1271 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1272 SYSCTL_CHILDREN(rack_timers), 1273 OID_AUTO, "minrto", CTLFLAG_RW, 1274 &rack_rto_min, 30000, 1275 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); 1276 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1277 SYSCTL_CHILDREN(rack_timers), 1278 OID_AUTO, "maxrto", CTLFLAG_RW, 1279 &rack_rto_max, 4000000, 1280 "Maximum RTO in microseconds -- should be at least as large as min_rto"); 1281 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1282 SYSCTL_CHILDREN(rack_timers), 1283 OID_AUTO, "minto", CTLFLAG_RW, 1284 &rack_min_to, 1000, 1285 "Minimum rack timeout in microseconds"); 1286 /* Measure controls */ 1287 rack_measure = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1288 SYSCTL_CHILDREN(rack_sysctl_root), 1289 OID_AUTO, 1290 "measure", 1291 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1292 "Measure related controls"); 1293 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1294 SYSCTL_CHILDREN(rack_measure), 1295 OID_AUTO, "wma_divisor", CTLFLAG_RW, 1296 &rack_wma_divisor, 8, 1297 "When doing b/w calculation what is the divisor for the WMA"); 1298 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1299 SYSCTL_CHILDREN(rack_measure), 1300 OID_AUTO, "end_cwnd", CTLFLAG_RW, 1301 &rack_cwnd_block_ends_measure, 0, 1302 "Does a cwnd just-return end the measurement window (app limited)"); 1303 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1304 SYSCTL_CHILDREN(rack_measure), 1305 OID_AUTO, "end_rwnd", CTLFLAG_RW, 1306 &rack_rwnd_block_ends_measure, 0, 1307 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); 1308 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1309 SYSCTL_CHILDREN(rack_measure), 1310 OID_AUTO, "min_target", CTLFLAG_RW, 1311 &rack_def_data_window, 20, 1312 "What is the minimum target window (in mss) for a GP measurements"); 1313 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1314 SYSCTL_CHILDREN(rack_measure), 1315 OID_AUTO, "goal_bdp", CTLFLAG_RW, 1316 &rack_goal_bdp, 2, 1317 "What is the goal BDP to measure"); 1318 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1319 SYSCTL_CHILDREN(rack_measure), 1320 OID_AUTO, "min_srtts", CTLFLAG_RW, 1321 &rack_min_srtts, 1, 1322 "What is the goal BDP to measure"); 1323 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1324 SYSCTL_CHILDREN(rack_measure), 1325 OID_AUTO, "min_measure_tim", CTLFLAG_RW, 1326 &rack_min_measure_usec, 0, 1327 "What is the Minimum time time for a measurement if 0, this is off"); 1328 /* Features */ 1329 rack_features = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1330 SYSCTL_CHILDREN(rack_sysctl_root), 1331 OID_AUTO, 1332 "features", 1333 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1334 "Feature controls"); 1335 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1336 SYSCTL_CHILDREN(rack_features), 1337 OID_AUTO, "cmpack", CTLFLAG_RW, 1338 &rack_use_cmp_acks, 1, 1339 "Should RACK have LRO send compressed acks"); 1340 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1341 SYSCTL_CHILDREN(rack_features), 1342 OID_AUTO, "fsb", CTLFLAG_RW, 1343 &rack_use_fsb, 1, 1344 "Should RACK use the fast send block?"); 1345 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1346 SYSCTL_CHILDREN(rack_features), 1347 OID_AUTO, "rfo", CTLFLAG_RW, 1348 &rack_use_rfo, 1, 1349 "Should RACK use rack_fast_output()?"); 1350 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1351 SYSCTL_CHILDREN(rack_features), 1352 OID_AUTO, "rsmrfo", CTLFLAG_RW, 1353 &rack_use_rsm_rfo, 1, 1354 "Should RACK use rack_fast_rsm_output()?"); 1355 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1356 SYSCTL_CHILDREN(rack_features), 1357 OID_AUTO, "non_paced_lro_queue", CTLFLAG_RW, 1358 &rack_enable_mqueue_for_nonpaced, 0, 1359 "Should RACK use mbuf queuing for non-paced connections"); 1360 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1361 SYSCTL_CHILDREN(rack_features), 1362 OID_AUTO, "hystartplusplus", CTLFLAG_RW, 1363 &rack_do_hystart, 0, 1364 "Should RACK enable HyStart++ on connections?"); 1365 /* Misc rack controls */ 1366 rack_misc = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 1367 SYSCTL_CHILDREN(rack_sysctl_root), 1368 OID_AUTO, 1369 "misc", 1370 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1371 "Misc related controls"); 1372 #ifdef TCP_ACCOUNTING 1373 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1374 SYSCTL_CHILDREN(rack_misc), 1375 OID_AUTO, "tcp_acct", CTLFLAG_RW, 1376 &rack_tcp_accounting, 0, 1377 "Should we turn on TCP accounting for all rack sessions?"); 1378 #endif 1379 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1380 SYSCTL_CHILDREN(rack_misc), 1381 OID_AUTO, "apply_rtt_with_low_conf", CTLFLAG_RW, 1382 &rack_apply_rtt_with_reduced_conf, 0, 1383 "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); 1384 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1385 SYSCTL_CHILDREN(rack_misc), 1386 OID_AUTO, "rack_dsack_ctl", CTLFLAG_RW, 1387 &rack_dsack_std_based, 3, 1388 "How do we process dsack with respect to rack timers, bit field, 3 is standards based?"); 1389 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1390 SYSCTL_CHILDREN(rack_misc), 1391 OID_AUTO, "prr_addback_max", CTLFLAG_RW, 1392 &rack_prr_addbackmax, 2, 1393 "What is the maximum number of MSS we allow to be added back if prr can't send all its data?"); 1394 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1395 SYSCTL_CHILDREN(rack_misc), 1396 OID_AUTO, "stats_gets_ms", CTLFLAG_RW, 1397 &rack_stats_gets_ms_rtt, 1, 1398 "What do we feed the stats framework (1 = ms_rtt, 0 = us_rtt, 2 = ms_rtt from hdwr, > 2 usec rtt from hdwr)?"); 1399 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1400 SYSCTL_CHILDREN(rack_misc), 1401 OID_AUTO, "clientlowbuf", CTLFLAG_RW, 1402 &rack_client_low_buf, 0, 1403 "Client low buffer level (below this we are more aggressive in DGP exiting recovery (0 = off)?"); 1404 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1405 SYSCTL_CHILDREN(rack_misc), 1406 OID_AUTO, "defprofile", CTLFLAG_RW, 1407 &rack_def_profile, 0, 1408 "Should RACK use a default profile (0=no, num == profile num)?"); 1409 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1410 SYSCTL_CHILDREN(rack_misc), 1411 OID_AUTO, "shared_cwnd", CTLFLAG_RW, 1412 &rack_enable_shared_cwnd, 1, 1413 "Should RACK try to use the shared cwnd on connections where allowed"); 1414 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1415 SYSCTL_CHILDREN(rack_misc), 1416 OID_AUTO, "limits_on_scwnd", CTLFLAG_RW, 1417 &rack_limits_scwnd, 1, 1418 "Should RACK place low end time limits on the shared cwnd feature"); 1419 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1420 SYSCTL_CHILDREN(rack_misc), 1421 OID_AUTO, "iMac_dack", CTLFLAG_RW, 1422 &rack_use_imac_dack, 0, 1423 "Should RACK try to emulate iMac delayed ack"); 1424 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1425 SYSCTL_CHILDREN(rack_misc), 1426 OID_AUTO, "no_prr", CTLFLAG_RW, 1427 &rack_disable_prr, 0, 1428 "Should RACK not use prr and only pace (must have pacing on)"); 1429 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1430 SYSCTL_CHILDREN(rack_misc), 1431 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1432 &rack_verbose_logging, 0, 1433 "Should RACK black box logging be verbose"); 1434 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1435 SYSCTL_CHILDREN(rack_misc), 1436 OID_AUTO, "data_after_close", CTLFLAG_RW, 1437 &rack_ignore_data_after_close, 1, 1438 "Do we hold off sending a RST until all pending data is ack'd"); 1439 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1440 SYSCTL_CHILDREN(rack_misc), 1441 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1442 &rack_sack_not_required, 1, 1443 "Do we allow rack to run on connections not supporting SACK"); 1444 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1445 SYSCTL_CHILDREN(rack_misc), 1446 OID_AUTO, "prr_sendalot", CTLFLAG_RW, 1447 &rack_send_a_lot_in_prr, 1, 1448 "Send a lot in prr"); 1449 SYSCTL_ADD_S32(&rack_sysctl_ctx, 1450 SYSCTL_CHILDREN(rack_misc), 1451 OID_AUTO, "autoscale", CTLFLAG_RW, 1452 &rack_autosndbuf_inc, 20, 1453 "What percentage should rack scale up its snd buffer by?"); 1454 /* Sack Attacker detection stuff */ 1455 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1456 SYSCTL_CHILDREN(rack_attack), 1457 OID_AUTO, "detect_highsackratio", CTLFLAG_RW, 1458 &rack_highest_sack_thresh_seen, 0, 1459 "Highest sack to ack ratio seen"); 1460 SYSCTL_ADD_U32(&rack_sysctl_ctx, 1461 SYSCTL_CHILDREN(rack_attack), 1462 OID_AUTO, "detect_highmoveratio", CTLFLAG_RW, 1463 &rack_highest_move_thresh_seen, 0, 1464 "Highest move to non-move ratio seen"); 1465 rack_ack_total = counter_u64_alloc(M_WAITOK); 1466 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1467 SYSCTL_CHILDREN(rack_attack), 1468 OID_AUTO, "acktotal", CTLFLAG_RD, 1469 &rack_ack_total, 1470 "Total number of Ack's"); 1471 rack_express_sack = counter_u64_alloc(M_WAITOK); 1472 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1473 SYSCTL_CHILDREN(rack_attack), 1474 OID_AUTO, "exp_sacktotal", CTLFLAG_RD, 1475 &rack_express_sack, 1476 "Total expresss number of Sack's"); 1477 rack_sack_total = counter_u64_alloc(M_WAITOK); 1478 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1479 SYSCTL_CHILDREN(rack_attack), 1480 OID_AUTO, "sacktotal", CTLFLAG_RD, 1481 &rack_sack_total, 1482 "Total number of SACKs"); 1483 rack_move_none = counter_u64_alloc(M_WAITOK); 1484 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1485 SYSCTL_CHILDREN(rack_attack), 1486 OID_AUTO, "move_none", CTLFLAG_RD, 1487 &rack_move_none, 1488 "Total number of SACK index reuse of positions under threshold"); 1489 rack_move_some = counter_u64_alloc(M_WAITOK); 1490 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1491 SYSCTL_CHILDREN(rack_attack), 1492 OID_AUTO, "move_some", CTLFLAG_RD, 1493 &rack_move_some, 1494 "Total number of SACK index reuse of positions over threshold"); 1495 rack_sack_attacks_detected = counter_u64_alloc(M_WAITOK); 1496 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1497 SYSCTL_CHILDREN(rack_attack), 1498 OID_AUTO, "attacks", CTLFLAG_RD, 1499 &rack_sack_attacks_detected, 1500 "Total number of SACK attackers that had sack disabled"); 1501 rack_sack_attacks_reversed = counter_u64_alloc(M_WAITOK); 1502 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1503 SYSCTL_CHILDREN(rack_attack), 1504 OID_AUTO, "reversed", CTLFLAG_RD, 1505 &rack_sack_attacks_reversed, 1506 "Total number of SACK attackers that were later determined false positive"); 1507 rack_sack_used_next_merge = counter_u64_alloc(M_WAITOK); 1508 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1509 SYSCTL_CHILDREN(rack_attack), 1510 OID_AUTO, "nextmerge", CTLFLAG_RD, 1511 &rack_sack_used_next_merge, 1512 "Total number of times we used the next merge"); 1513 rack_sack_used_prev_merge = counter_u64_alloc(M_WAITOK); 1514 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1515 SYSCTL_CHILDREN(rack_attack), 1516 OID_AUTO, "prevmerge", CTLFLAG_RD, 1517 &rack_sack_used_prev_merge, 1518 "Total number of times we used the prev merge"); 1519 /* Counters */ 1520 rack_fto_send = counter_u64_alloc(M_WAITOK); 1521 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1522 SYSCTL_CHILDREN(rack_counters), 1523 OID_AUTO, "fto_send", CTLFLAG_RD, 1524 &rack_fto_send, "Total number of rack_fast_output sends"); 1525 rack_fto_rsm_send = counter_u64_alloc(M_WAITOK); 1526 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1527 SYSCTL_CHILDREN(rack_counters), 1528 OID_AUTO, "fto_rsm_send", CTLFLAG_RD, 1529 &rack_fto_rsm_send, "Total number of rack_fast_rsm_output sends"); 1530 rack_nfto_resend = counter_u64_alloc(M_WAITOK); 1531 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1532 SYSCTL_CHILDREN(rack_counters), 1533 OID_AUTO, "nfto_resend", CTLFLAG_RD, 1534 &rack_nfto_resend, "Total number of rack_output retransmissions"); 1535 rack_non_fto_send = counter_u64_alloc(M_WAITOK); 1536 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1537 SYSCTL_CHILDREN(rack_counters), 1538 OID_AUTO, "nfto_send", CTLFLAG_RD, 1539 &rack_non_fto_send, "Total number of rack_output first sends"); 1540 rack_extended_rfo = counter_u64_alloc(M_WAITOK); 1541 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1542 SYSCTL_CHILDREN(rack_counters), 1543 OID_AUTO, "rfo_extended", CTLFLAG_RD, 1544 &rack_extended_rfo, "Total number of times we extended rfo"); 1545 1546 rack_hw_pace_init_fail = counter_u64_alloc(M_WAITOK); 1547 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1548 SYSCTL_CHILDREN(rack_counters), 1549 OID_AUTO, "hwpace_init_fail", CTLFLAG_RD, 1550 &rack_hw_pace_init_fail, "Total number of times we failed to initialize hw pacing"); 1551 rack_hw_pace_lost = counter_u64_alloc(M_WAITOK); 1552 1553 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1554 SYSCTL_CHILDREN(rack_counters), 1555 OID_AUTO, "hwpace_lost", CTLFLAG_RD, 1556 &rack_hw_pace_lost, "Total number of times we failed to initialize hw pacing"); 1557 rack_tlp_tot = counter_u64_alloc(M_WAITOK); 1558 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1559 SYSCTL_CHILDREN(rack_counters), 1560 OID_AUTO, "tlp_to_total", CTLFLAG_RD, 1561 &rack_tlp_tot, 1562 "Total number of tail loss probe expirations"); 1563 rack_tlp_newdata = counter_u64_alloc(M_WAITOK); 1564 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1565 SYSCTL_CHILDREN(rack_counters), 1566 OID_AUTO, "tlp_new", CTLFLAG_RD, 1567 &rack_tlp_newdata, 1568 "Total number of tail loss probe sending new data"); 1569 rack_tlp_retran = counter_u64_alloc(M_WAITOK); 1570 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1571 SYSCTL_CHILDREN(rack_counters), 1572 OID_AUTO, "tlp_retran", CTLFLAG_RD, 1573 &rack_tlp_retran, 1574 "Total number of tail loss probe sending retransmitted data"); 1575 rack_tlp_retran_bytes = counter_u64_alloc(M_WAITOK); 1576 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1577 SYSCTL_CHILDREN(rack_counters), 1578 OID_AUTO, "tlp_retran_bytes", CTLFLAG_RD, 1579 &rack_tlp_retran_bytes, 1580 "Total bytes of tail loss probe sending retransmitted data"); 1581 rack_to_tot = counter_u64_alloc(M_WAITOK); 1582 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1583 SYSCTL_CHILDREN(rack_counters), 1584 OID_AUTO, "rack_to_tot", CTLFLAG_RD, 1585 &rack_to_tot, 1586 "Total number of times the rack to expired"); 1587 rack_saw_enobuf = counter_u64_alloc(M_WAITOK); 1588 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1589 SYSCTL_CHILDREN(rack_counters), 1590 OID_AUTO, "saw_enobufs", CTLFLAG_RD, 1591 &rack_saw_enobuf, 1592 "Total number of times a sends returned enobuf for non-hdwr paced connections"); 1593 rack_saw_enobuf_hw = counter_u64_alloc(M_WAITOK); 1594 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1595 SYSCTL_CHILDREN(rack_counters), 1596 OID_AUTO, "saw_enobufs_hw", CTLFLAG_RD, 1597 &rack_saw_enobuf_hw, 1598 "Total number of times a send returned enobuf for hdwr paced connections"); 1599 rack_saw_enetunreach = counter_u64_alloc(M_WAITOK); 1600 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1601 SYSCTL_CHILDREN(rack_counters), 1602 OID_AUTO, "saw_enetunreach", CTLFLAG_RD, 1603 &rack_saw_enetunreach, 1604 "Total number of times a send received a enetunreachable"); 1605 rack_hot_alloc = counter_u64_alloc(M_WAITOK); 1606 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1607 SYSCTL_CHILDREN(rack_counters), 1608 OID_AUTO, "alloc_hot", CTLFLAG_RD, 1609 &rack_hot_alloc, 1610 "Total allocations from the top of our list"); 1611 rack_to_alloc = counter_u64_alloc(M_WAITOK); 1612 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1613 SYSCTL_CHILDREN(rack_counters), 1614 OID_AUTO, "allocs", CTLFLAG_RD, 1615 &rack_to_alloc, 1616 "Total allocations of tracking structures"); 1617 rack_to_alloc_hard = counter_u64_alloc(M_WAITOK); 1618 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1619 SYSCTL_CHILDREN(rack_counters), 1620 OID_AUTO, "allochard", CTLFLAG_RD, 1621 &rack_to_alloc_hard, 1622 "Total allocations done with sleeping the hard way"); 1623 rack_to_alloc_emerg = counter_u64_alloc(M_WAITOK); 1624 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1625 SYSCTL_CHILDREN(rack_counters), 1626 OID_AUTO, "allocemerg", CTLFLAG_RD, 1627 &rack_to_alloc_emerg, 1628 "Total allocations done from emergency cache"); 1629 rack_to_alloc_limited = counter_u64_alloc(M_WAITOK); 1630 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1631 SYSCTL_CHILDREN(rack_counters), 1632 OID_AUTO, "alloc_limited", CTLFLAG_RD, 1633 &rack_to_alloc_limited, 1634 "Total allocations dropped due to limit"); 1635 rack_alloc_limited_conns = counter_u64_alloc(M_WAITOK); 1636 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1637 SYSCTL_CHILDREN(rack_counters), 1638 OID_AUTO, "alloc_limited_conns", CTLFLAG_RD, 1639 &rack_alloc_limited_conns, 1640 "Connections with allocations dropped due to limit"); 1641 rack_split_limited = counter_u64_alloc(M_WAITOK); 1642 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1643 SYSCTL_CHILDREN(rack_counters), 1644 OID_AUTO, "split_limited", CTLFLAG_RD, 1645 &rack_split_limited, 1646 "Split allocations dropped due to limit"); 1647 rack_persists_sends = counter_u64_alloc(M_WAITOK); 1648 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1649 SYSCTL_CHILDREN(rack_counters), 1650 OID_AUTO, "persist_sends", CTLFLAG_RD, 1651 &rack_persists_sends, 1652 "Number of times we sent a persist probe"); 1653 rack_persists_acks = counter_u64_alloc(M_WAITOK); 1654 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1655 SYSCTL_CHILDREN(rack_counters), 1656 OID_AUTO, "persist_acks", CTLFLAG_RD, 1657 &rack_persists_acks, 1658 "Number of times a persist probe was acked"); 1659 rack_persists_loss = counter_u64_alloc(M_WAITOK); 1660 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1661 SYSCTL_CHILDREN(rack_counters), 1662 OID_AUTO, "persist_loss", CTLFLAG_RD, 1663 &rack_persists_loss, 1664 "Number of times we detected a lost persist probe (no ack)"); 1665 rack_persists_lost_ends = counter_u64_alloc(M_WAITOK); 1666 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1667 SYSCTL_CHILDREN(rack_counters), 1668 OID_AUTO, "persist_loss_ends", CTLFLAG_RD, 1669 &rack_persists_lost_ends, 1670 "Number of lost persist probe (no ack) that the run ended with a PERSIST abort"); 1671 #ifdef INVARIANTS 1672 rack_adjust_map_bw = counter_u64_alloc(M_WAITOK); 1673 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1674 SYSCTL_CHILDREN(rack_counters), 1675 OID_AUTO, "map_adjust_req", CTLFLAG_RD, 1676 &rack_adjust_map_bw, 1677 "Number of times we hit the case where the sb went up and down on a sendmap entry"); 1678 #endif 1679 rack_multi_single_eq = counter_u64_alloc(M_WAITOK); 1680 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1681 SYSCTL_CHILDREN(rack_counters), 1682 OID_AUTO, "cmp_ack_equiv", CTLFLAG_RD, 1683 &rack_multi_single_eq, 1684 "Number of compressed acks total represented"); 1685 rack_proc_non_comp_ack = counter_u64_alloc(M_WAITOK); 1686 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1687 SYSCTL_CHILDREN(rack_counters), 1688 OID_AUTO, "cmp_ack_not", CTLFLAG_RD, 1689 &rack_proc_non_comp_ack, 1690 "Number of non compresseds acks that we processed"); 1691 1692 1693 rack_sack_proc_all = counter_u64_alloc(M_WAITOK); 1694 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1695 SYSCTL_CHILDREN(rack_counters), 1696 OID_AUTO, "sack_long", CTLFLAG_RD, 1697 &rack_sack_proc_all, 1698 "Total times we had to walk whole list for sack processing"); 1699 rack_sack_proc_restart = counter_u64_alloc(M_WAITOK); 1700 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1701 SYSCTL_CHILDREN(rack_counters), 1702 OID_AUTO, "sack_restart", CTLFLAG_RD, 1703 &rack_sack_proc_restart, 1704 "Total times we had to walk whole list due to a restart"); 1705 rack_sack_proc_short = counter_u64_alloc(M_WAITOK); 1706 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1707 SYSCTL_CHILDREN(rack_counters), 1708 OID_AUTO, "sack_short", CTLFLAG_RD, 1709 &rack_sack_proc_short, 1710 "Total times we took shortcut for sack processing"); 1711 rack_sack_skipped_acked = counter_u64_alloc(M_WAITOK); 1712 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1713 SYSCTL_CHILDREN(rack_attack), 1714 OID_AUTO, "skipacked", CTLFLAG_RD, 1715 &rack_sack_skipped_acked, 1716 "Total number of times we skipped previously sacked"); 1717 rack_sack_splits = counter_u64_alloc(M_WAITOK); 1718 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1719 SYSCTL_CHILDREN(rack_attack), 1720 OID_AUTO, "ofsplit", CTLFLAG_RD, 1721 &rack_sack_splits, 1722 "Total number of times we did the old fashion tree split"); 1723 rack_input_idle_reduces = counter_u64_alloc(M_WAITOK); 1724 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1725 SYSCTL_CHILDREN(rack_counters), 1726 OID_AUTO, "idle_reduce_oninput", CTLFLAG_RD, 1727 &rack_input_idle_reduces, 1728 "Total number of idle reductions on input"); 1729 rack_collapsed_win_seen = counter_u64_alloc(M_WAITOK); 1730 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1731 SYSCTL_CHILDREN(rack_counters), 1732 OID_AUTO, "collapsed_win_seen", CTLFLAG_RD, 1733 &rack_collapsed_win_seen, 1734 "Total number of collapsed window events seen (where our window shrinks)"); 1735 1736 rack_collapsed_win = counter_u64_alloc(M_WAITOK); 1737 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1738 SYSCTL_CHILDREN(rack_counters), 1739 OID_AUTO, "collapsed_win", CTLFLAG_RD, 1740 &rack_collapsed_win, 1741 "Total number of collapsed window events where we mark packets"); 1742 rack_collapsed_win_rxt = counter_u64_alloc(M_WAITOK); 1743 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1744 SYSCTL_CHILDREN(rack_counters), 1745 OID_AUTO, "collapsed_win_rxt", CTLFLAG_RD, 1746 &rack_collapsed_win_rxt, 1747 "Total number of packets that were retransmitted"); 1748 rack_collapsed_win_rxt_bytes = counter_u64_alloc(M_WAITOK); 1749 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1750 SYSCTL_CHILDREN(rack_counters), 1751 OID_AUTO, "collapsed_win_bytes", CTLFLAG_RD, 1752 &rack_collapsed_win_rxt_bytes, 1753 "Total number of bytes that were retransmitted"); 1754 rack_try_scwnd = counter_u64_alloc(M_WAITOK); 1755 SYSCTL_ADD_COUNTER_U64(&rack_sysctl_ctx, 1756 SYSCTL_CHILDREN(rack_counters), 1757 OID_AUTO, "tried_scwnd", CTLFLAG_RD, 1758 &rack_try_scwnd, 1759 "Total number of scwnd attempts"); 1760 COUNTER_ARRAY_ALLOC(rack_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1761 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1762 OID_AUTO, "outsize", CTLFLAG_RD, 1763 rack_out_size, TCP_MSS_ACCT_SIZE, "MSS send sizes"); 1764 COUNTER_ARRAY_ALLOC(rack_opts_arry, RACK_OPTS_SIZE, M_WAITOK); 1765 SYSCTL_ADD_COUNTER_U64_ARRAY(&rack_sysctl_ctx, SYSCTL_CHILDREN(rack_sysctl_root), 1766 OID_AUTO, "opts", CTLFLAG_RD, 1767 rack_opts_arry, RACK_OPTS_SIZE, "RACK Option Stats"); 1768 SYSCTL_ADD_PROC(&rack_sysctl_ctx, 1769 SYSCTL_CHILDREN(rack_sysctl_root), 1770 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1771 &rack_clear_counter, 0, sysctl_rack_clear, "IU", "Clear counters"); 1772 } 1773 1774 static __inline int 1775 rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a) 1776 { 1777 if (SEQ_GEQ(b->r_start, a->r_start) && 1778 SEQ_LT(b->r_start, a->r_end)) { 1779 /* 1780 * The entry b is within the 1781 * block a. i.e.: 1782 * a -- |-------------| 1783 * b -- |----| 1784 * <or> 1785 * b -- |------| 1786 * <or> 1787 * b -- |-----------| 1788 */ 1789 return (0); 1790 } else if (SEQ_GEQ(b->r_start, a->r_end)) { 1791 /* 1792 * b falls as either the next 1793 * sequence block after a so a 1794 * is said to be smaller than b. 1795 * i.e: 1796 * a -- |------| 1797 * b -- |--------| 1798 * or 1799 * b -- |-----| 1800 */ 1801 return (1); 1802 } 1803 /* 1804 * Whats left is where a is 1805 * larger than b. i.e: 1806 * a -- |-------| 1807 * b -- |---| 1808 * or even possibly 1809 * b -- |--------------| 1810 */ 1811 return (-1); 1812 } 1813 1814 RB_PROTOTYPE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1815 RB_GENERATE(rack_rb_tree_head, rack_sendmap, r_next, rb_map_cmp); 1816 1817 static uint32_t 1818 rc_init_window(struct tcp_rack *rack) 1819 { 1820 uint32_t win; 1821 1822 if (rack->rc_init_win == 0) { 1823 /* 1824 * Nothing set by the user, use the system stack 1825 * default. 1826 */ 1827 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); 1828 } 1829 win = ctf_fixed_maxseg(rack->rc_tp) * rack->rc_init_win; 1830 return (win); 1831 } 1832 1833 static uint64_t 1834 rack_get_fixed_pacing_bw(struct tcp_rack *rack) 1835 { 1836 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) 1837 return (rack->r_ctl.rc_fixed_pacing_rate_rec); 1838 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1839 return (rack->r_ctl.rc_fixed_pacing_rate_ss); 1840 else 1841 return (rack->r_ctl.rc_fixed_pacing_rate_ca); 1842 } 1843 1844 static uint64_t 1845 rack_get_bw(struct tcp_rack *rack) 1846 { 1847 if (rack->use_fixed_rate) { 1848 /* Return the fixed pacing rate */ 1849 return (rack_get_fixed_pacing_bw(rack)); 1850 } 1851 if (rack->r_ctl.gp_bw == 0) { 1852 /* 1853 * We have yet no b/w measurement, 1854 * if we have a user set initial bw 1855 * return it. If we don't have that and 1856 * we have an srtt, use the tcp IW (10) to 1857 * calculate a fictional b/w over the SRTT 1858 * which is more or less a guess. Note 1859 * we don't use our IW from rack on purpose 1860 * so if we have like IW=30, we are not 1861 * calculating a "huge" b/w. 1862 */ 1863 uint64_t bw, srtt; 1864 if (rack->r_ctl.init_rate) 1865 return (rack->r_ctl.init_rate); 1866 1867 /* Has the user set a max peak rate? */ 1868 #ifdef NETFLIX_PEAKRATE 1869 if (rack->rc_tp->t_maxpeakrate) 1870 return (rack->rc_tp->t_maxpeakrate); 1871 #endif 1872 /* Ok lets come up with the IW guess, if we have a srtt */ 1873 if (rack->rc_tp->t_srtt == 0) { 1874 /* 1875 * Go with old pacing method 1876 * i.e. burst mitigation only. 1877 */ 1878 return (0); 1879 } 1880 /* Ok lets get the initial TCP win (not racks) */ 1881 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); 1882 srtt = (uint64_t)rack->rc_tp->t_srtt; 1883 bw *= (uint64_t)USECS_IN_SECOND; 1884 bw /= srtt; 1885 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1886 bw = rack->r_ctl.bw_rate_cap; 1887 return (bw); 1888 } else { 1889 uint64_t bw; 1890 1891 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 1892 /* Averaging is done, we can return the value */ 1893 bw = rack->r_ctl.gp_bw; 1894 } else { 1895 /* Still doing initial average must calculate */ 1896 bw = rack->r_ctl.gp_bw / rack->r_ctl.num_measurements; 1897 } 1898 #ifdef NETFLIX_PEAKRATE 1899 if ((rack->rc_tp->t_maxpeakrate) && 1900 (bw > rack->rc_tp->t_maxpeakrate)) { 1901 /* The user has set a peak rate to pace at 1902 * don't allow us to pace faster than that. 1903 */ 1904 return (rack->rc_tp->t_maxpeakrate); 1905 } 1906 #endif 1907 if (rack->r_ctl.bw_rate_cap && (bw > rack->r_ctl.bw_rate_cap)) 1908 bw = rack->r_ctl.bw_rate_cap; 1909 return (bw); 1910 } 1911 } 1912 1913 static uint16_t 1914 rack_get_output_gain(struct tcp_rack *rack, struct rack_sendmap *rsm) 1915 { 1916 if (rack->use_fixed_rate) { 1917 return (100); 1918 } else if (rack->in_probe_rtt && (rsm == NULL)) 1919 return (rack->r_ctl.rack_per_of_gp_probertt); 1920 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && 1921 rack->r_ctl.rack_per_of_gp_rec)) { 1922 if (rsm) { 1923 /* a retransmission always use the recovery rate */ 1924 return (rack->r_ctl.rack_per_of_gp_rec); 1925 } else if (rack->rack_rec_nonrxt_use_cr) { 1926 /* Directed to use the configured rate */ 1927 goto configured_rate; 1928 } else if (rack->rack_no_prr && 1929 (rack->r_ctl.rack_per_of_gp_rec > 100)) { 1930 /* No PRR, lets just use the b/w estimate only */ 1931 return (100); 1932 } else { 1933 /* 1934 * Here we may have a non-retransmit but we 1935 * have no overrides, so just use the recovery 1936 * rate (prr is in effect). 1937 */ 1938 return (rack->r_ctl.rack_per_of_gp_rec); 1939 } 1940 } 1941 configured_rate: 1942 /* For the configured rate we look at our cwnd vs the ssthresh */ 1943 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) 1944 return (rack->r_ctl.rack_per_of_gp_ss); 1945 else 1946 return (rack->r_ctl.rack_per_of_gp_ca); 1947 } 1948 1949 static void 1950 rack_log_dsack_event(struct tcp_rack *rack, uint8_t mod, uint32_t flex4, uint32_t flex5, uint32_t flex6) 1951 { 1952 /* 1953 * Types of logs (mod value) 1954 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. 1955 * 2 = a dsack round begins, persist is reset to 16. 1956 * 3 = a dsack round ends 1957 * 4 = Dsack option increases rack rtt flex5 is the srtt input, flex6 is thresh 1958 * 5 = Socket option set changing the control flags rc_rack_tmr_std_based, rc_rack_use_dsack 1959 * 6 = Final rack rtt, flex4 is srtt and flex6 is final limited thresh. 1960 */ 1961 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1962 union tcp_log_stackspecific log; 1963 struct timeval tv; 1964 1965 memset(&log, 0, sizeof(log)); 1966 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; 1967 log.u_bbr.flex1 <<= 1; 1968 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; 1969 log.u_bbr.flex1 <<= 1; 1970 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; 1971 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; 1972 log.u_bbr.flex3 = rack->r_ctl.num_dsack; 1973 log.u_bbr.flex4 = flex4; 1974 log.u_bbr.flex5 = flex5; 1975 log.u_bbr.flex6 = flex6; 1976 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; 1977 log.u_bbr.flex8 = mod; 1978 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 1979 TCP_LOG_EVENTP(rack->rc_tp, NULL, 1980 &rack->rc_inp->inp_socket->so_rcv, 1981 &rack->rc_inp->inp_socket->so_snd, 1982 RACK_DSACK_HANDLING, 0, 1983 0, &log, false, &tv); 1984 } 1985 } 1986 1987 static void 1988 rack_log_hdwr_pacing(struct tcp_rack *rack, 1989 uint64_t rate, uint64_t hw_rate, int line, 1990 int error, uint16_t mod) 1991 { 1992 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1993 union tcp_log_stackspecific log; 1994 struct timeval tv; 1995 const struct ifnet *ifp; 1996 1997 memset(&log, 0, sizeof(log)); 1998 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 1999 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2000 if (rack->r_ctl.crte) { 2001 ifp = rack->r_ctl.crte->ptbl->rs_ifp; 2002 } else if (rack->rc_inp->inp_route.ro_nh && 2003 rack->rc_inp->inp_route.ro_nh->nh_ifp) { 2004 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; 2005 } else 2006 ifp = NULL; 2007 if (ifp) { 2008 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2009 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2010 } 2011 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2012 log.u_bbr.bw_inuse = rate; 2013 log.u_bbr.flex5 = line; 2014 log.u_bbr.flex6 = error; 2015 log.u_bbr.flex7 = mod; 2016 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; 2017 log.u_bbr.flex8 = rack->use_fixed_rate; 2018 log.u_bbr.flex8 <<= 1; 2019 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; 2020 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 2021 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; 2022 if (rack->r_ctl.crte) 2023 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; 2024 else 2025 log.u_bbr.cur_del_rate = 0; 2026 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; 2027 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2028 &rack->rc_inp->inp_socket->so_rcv, 2029 &rack->rc_inp->inp_socket->so_snd, 2030 BBR_LOG_HDWR_PACE, 0, 2031 0, &log, false, &tv); 2032 } 2033 } 2034 2035 static uint64_t 2036 rack_get_output_bw(struct tcp_rack *rack, uint64_t bw, struct rack_sendmap *rsm, int *capped) 2037 { 2038 /* 2039 * We allow rack_per_of_gp_xx to dictate our bw rate we want. 2040 */ 2041 uint64_t bw_est, high_rate; 2042 uint64_t gain; 2043 2044 gain = (uint64_t)rack_get_output_gain(rack, rsm); 2045 bw_est = bw * gain; 2046 bw_est /= (uint64_t)100; 2047 /* Never fall below the minimum (def 64kbps) */ 2048 if (bw_est < RACK_MIN_BW) 2049 bw_est = RACK_MIN_BW; 2050 if (rack->r_rack_hw_rate_caps) { 2051 /* Rate caps are in place */ 2052 if (rack->r_ctl.crte != NULL) { 2053 /* We have a hdwr rate already */ 2054 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 2055 if (bw_est >= high_rate) { 2056 /* We are capping bw at the highest rate table entry */ 2057 rack_log_hdwr_pacing(rack, 2058 bw_est, high_rate, __LINE__, 2059 0, 3); 2060 bw_est = high_rate; 2061 if (capped) 2062 *capped = 1; 2063 } 2064 } else if ((rack->rack_hdrw_pacing == 0) && 2065 (rack->rack_hdw_pace_ena) && 2066 (rack->rack_attempt_hdwr_pace == 0) && 2067 (rack->rc_inp->inp_route.ro_nh != NULL) && 2068 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 2069 /* 2070 * Special case, we have not yet attempted hardware 2071 * pacing, and yet we may, when we do, find out if we are 2072 * above the highest rate. We need to know the maxbw for the interface 2073 * in question (if it supports ratelimiting). We get back 2074 * a 0, if the interface is not found in the RL lists. 2075 */ 2076 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 2077 if (high_rate) { 2078 /* Yep, we have a rate is it above this rate? */ 2079 if (bw_est > high_rate) { 2080 bw_est = high_rate; 2081 if (capped) 2082 *capped = 1; 2083 } 2084 } 2085 } 2086 } 2087 return (bw_est); 2088 } 2089 2090 static void 2091 rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t tsused, uint32_t thresh, int mod) 2092 { 2093 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2094 union tcp_log_stackspecific log; 2095 struct timeval tv; 2096 2097 if ((mod != 1) && (rack_verbose_logging == 0)) { 2098 /* 2099 * We get 3 values currently for mod 2100 * 1 - We are retransmitting and this tells the reason. 2101 * 2 - We are clearing a dup-ack count. 2102 * 3 - We are incrementing a dup-ack count. 2103 * 2104 * The clear/increment are only logged 2105 * if you have BBverbose on. 2106 */ 2107 return; 2108 } 2109 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2110 log.u_bbr.flex1 = tsused; 2111 log.u_bbr.flex2 = thresh; 2112 log.u_bbr.flex3 = rsm->r_flags; 2113 log.u_bbr.flex4 = rsm->r_dupack; 2114 log.u_bbr.flex5 = rsm->r_start; 2115 log.u_bbr.flex6 = rsm->r_end; 2116 log.u_bbr.flex8 = mod; 2117 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2118 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2119 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2120 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2121 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2122 log.u_bbr.pacing_gain = rack->r_must_retran; 2123 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2124 &rack->rc_inp->inp_socket->so_rcv, 2125 &rack->rc_inp->inp_socket->so_snd, 2126 BBR_LOG_SETTINGS_CHG, 0, 2127 0, &log, false, &tv); 2128 } 2129 } 2130 2131 static void 2132 rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2133 { 2134 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2135 union tcp_log_stackspecific log; 2136 struct timeval tv; 2137 2138 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2139 log.u_bbr.flex1 = rack->rc_tp->t_srtt; 2140 log.u_bbr.flex2 = to; 2141 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; 2142 log.u_bbr.flex4 = slot; 2143 log.u_bbr.flex5 = rack->rc_inp->inp_hptsslot; 2144 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2145 log.u_bbr.flex7 = rack->rc_in_persist; 2146 log.u_bbr.flex8 = which; 2147 if (rack->rack_no_prr) 2148 log.u_bbr.pkts_out = 0; 2149 else 2150 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 2151 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2152 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2153 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2154 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2155 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2156 log.u_bbr.pacing_gain = rack->r_must_retran; 2157 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2158 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; 2159 log.u_bbr.lost = rack_rto_min; 2160 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2161 &rack->rc_inp->inp_socket->so_rcv, 2162 &rack->rc_inp->inp_socket->so_snd, 2163 BBR_LOG_TIMERSTAR, 0, 2164 0, &log, false, &tv); 2165 } 2166 } 2167 2168 static void 2169 rack_log_to_event(struct tcp_rack *rack, int32_t to_num, struct rack_sendmap *rsm) 2170 { 2171 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2172 union tcp_log_stackspecific log; 2173 struct timeval tv; 2174 2175 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2176 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2177 log.u_bbr.flex8 = to_num; 2178 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; 2179 log.u_bbr.flex2 = rack->rc_rack_rtt; 2180 if (rsm == NULL) 2181 log.u_bbr.flex3 = 0; 2182 else 2183 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; 2184 if (rack->rack_no_prr) 2185 log.u_bbr.flex5 = 0; 2186 else 2187 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2188 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2189 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2190 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2191 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2192 log.u_bbr.pacing_gain = rack->r_must_retran; 2193 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2194 &rack->rc_inp->inp_socket->so_rcv, 2195 &rack->rc_inp->inp_socket->so_snd, 2196 BBR_LOG_RTO, 0, 2197 0, &log, false, &tv); 2198 } 2199 } 2200 2201 static void 2202 rack_log_map_chg(struct tcpcb *tp, struct tcp_rack *rack, 2203 struct rack_sendmap *prev, 2204 struct rack_sendmap *rsm, 2205 struct rack_sendmap *next, 2206 int flag, uint32_t th_ack, int line) 2207 { 2208 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2209 union tcp_log_stackspecific log; 2210 struct timeval tv; 2211 2212 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2213 log.u_bbr.flex8 = flag; 2214 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2215 log.u_bbr.cur_del_rate = (uint64_t)prev; 2216 log.u_bbr.delRate = (uint64_t)rsm; 2217 log.u_bbr.rttProp = (uint64_t)next; 2218 log.u_bbr.flex7 = 0; 2219 if (prev) { 2220 log.u_bbr.flex1 = prev->r_start; 2221 log.u_bbr.flex2 = prev->r_end; 2222 log.u_bbr.flex7 |= 0x4; 2223 } 2224 if (rsm) { 2225 log.u_bbr.flex3 = rsm->r_start; 2226 log.u_bbr.flex4 = rsm->r_end; 2227 log.u_bbr.flex7 |= 0x2; 2228 } 2229 if (next) { 2230 log.u_bbr.flex5 = next->r_start; 2231 log.u_bbr.flex6 = next->r_end; 2232 log.u_bbr.flex7 |= 0x1; 2233 } 2234 log.u_bbr.applimited = line; 2235 log.u_bbr.pkts_out = th_ack; 2236 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2237 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2238 if (rack->rack_no_prr) 2239 log.u_bbr.lost = 0; 2240 else 2241 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; 2242 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2243 &rack->rc_inp->inp_socket->so_rcv, 2244 &rack->rc_inp->inp_socket->so_snd, 2245 TCP_LOG_MAPCHG, 0, 2246 0, &log, false, &tv); 2247 } 2248 } 2249 2250 static void 2251 rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, uint32_t t, uint32_t len, 2252 struct rack_sendmap *rsm, int conf) 2253 { 2254 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2255 union tcp_log_stackspecific log; 2256 struct timeval tv; 2257 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2258 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2259 log.u_bbr.flex1 = t; 2260 log.u_bbr.flex2 = len; 2261 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; 2262 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; 2263 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; 2264 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2265 log.u_bbr.flex7 = conf; 2266 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; 2267 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; 2268 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2269 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; 2270 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; 2271 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2272 if (rsm) { 2273 log.u_bbr.pkt_epoch = rsm->r_start; 2274 log.u_bbr.lost = rsm->r_end; 2275 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; 2276 /* We loose any upper of the 24 bits */ 2277 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; 2278 } else { 2279 /* Its a SYN */ 2280 log.u_bbr.pkt_epoch = rack->rc_tp->iss; 2281 log.u_bbr.lost = 0; 2282 log.u_bbr.cwnd_gain = 0; 2283 log.u_bbr.pacing_gain = 0; 2284 } 2285 /* Write out general bits of interest rrs here */ 2286 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; 2287 log.u_bbr.use_lt_bw <<= 1; 2288 log.u_bbr.use_lt_bw |= rack->forced_ack; 2289 log.u_bbr.use_lt_bw <<= 1; 2290 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; 2291 log.u_bbr.use_lt_bw <<= 1; 2292 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 2293 log.u_bbr.use_lt_bw <<= 1; 2294 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 2295 log.u_bbr.use_lt_bw <<= 1; 2296 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 2297 log.u_bbr.use_lt_bw <<= 1; 2298 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 2299 log.u_bbr.use_lt_bw <<= 1; 2300 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; 2301 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; 2302 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; 2303 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; 2304 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; 2305 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; 2306 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 2307 log.u_bbr.bw_inuse <<= 32; 2308 if (rsm) 2309 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 2310 TCP_LOG_EVENTP(tp, NULL, 2311 &rack->rc_inp->inp_socket->so_rcv, 2312 &rack->rc_inp->inp_socket->so_snd, 2313 BBR_LOG_BBRRTT, 0, 2314 0, &log, false, &tv); 2315 2316 2317 } 2318 } 2319 2320 static void 2321 rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt) 2322 { 2323 /* 2324 * Log the rtt sample we are 2325 * applying to the srtt algorithm in 2326 * useconds. 2327 */ 2328 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2329 union tcp_log_stackspecific log; 2330 struct timeval tv; 2331 2332 /* Convert our ms to a microsecond */ 2333 memset(&log, 0, sizeof(log)); 2334 log.u_bbr.flex1 = rtt; 2335 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2336 log.u_bbr.flex3 = rack->r_ctl.sack_count; 2337 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2338 log.u_bbr.flex5 = rack->r_ctl.sack_moved_extra; 2339 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2340 log.u_bbr.flex7 = 1; 2341 log.u_bbr.flex8 = rack->sack_attack_disable; 2342 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2343 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2344 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2345 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2346 log.u_bbr.pacing_gain = rack->r_must_retran; 2347 /* 2348 * We capture in delRate the upper 32 bits as 2349 * the confidence level we had declared, and the 2350 * lower 32 bits as the actual RTT using the arrival 2351 * timestamp. 2352 */ 2353 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; 2354 log.u_bbr.delRate <<= 32; 2355 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; 2356 /* Lets capture all the things that make up t_rtxcur */ 2357 log.u_bbr.applimited = rack_rto_min; 2358 log.u_bbr.epoch = rack_rto_max; 2359 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; 2360 log.u_bbr.lost = rack_rto_min; 2361 log.u_bbr.pkt_epoch = TICKS_2_USEC(tcp_rexmit_slop); 2362 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); 2363 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; 2364 log.u_bbr.bw_inuse *= HPTS_USEC_IN_SEC; 2365 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; 2366 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2367 &rack->rc_inp->inp_socket->so_rcv, 2368 &rack->rc_inp->inp_socket->so_snd, 2369 TCP_LOG_RTT, 0, 2370 0, &log, false, &tv); 2371 } 2372 } 2373 2374 static void 2375 rack_log_rtt_sample_calc(struct tcp_rack *rack, uint32_t rtt, uint32_t send_time, uint32_t ack_time, int where) 2376 { 2377 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2378 union tcp_log_stackspecific log; 2379 struct timeval tv; 2380 2381 /* Convert our ms to a microsecond */ 2382 memset(&log, 0, sizeof(log)); 2383 log.u_bbr.flex1 = rtt; 2384 log.u_bbr.flex2 = send_time; 2385 log.u_bbr.flex3 = ack_time; 2386 log.u_bbr.flex4 = where; 2387 log.u_bbr.flex7 = 2; 2388 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2389 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2390 &rack->rc_inp->inp_socket->so_rcv, 2391 &rack->rc_inp->inp_socket->so_snd, 2392 TCP_LOG_RTT, 0, 2393 0, &log, false, &tv); 2394 } 2395 } 2396 2397 2398 2399 static inline void 2400 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int line) 2401 { 2402 if (rack_verbose_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) { 2403 union tcp_log_stackspecific log; 2404 struct timeval tv; 2405 2406 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2407 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2408 log.u_bbr.flex1 = line; 2409 log.u_bbr.flex2 = tick; 2410 log.u_bbr.flex3 = tp->t_maxunacktime; 2411 log.u_bbr.flex4 = tp->t_acktime; 2412 log.u_bbr.flex8 = event; 2413 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2414 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2415 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2416 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2417 log.u_bbr.pacing_gain = rack->r_must_retran; 2418 TCP_LOG_EVENTP(tp, NULL, 2419 &rack->rc_inp->inp_socket->so_rcv, 2420 &rack->rc_inp->inp_socket->so_snd, 2421 BBR_LOG_PROGRESS, 0, 2422 0, &log, false, &tv); 2423 } 2424 } 2425 2426 static void 2427 rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv) 2428 { 2429 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2430 union tcp_log_stackspecific log; 2431 2432 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2433 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2434 log.u_bbr.flex1 = slot; 2435 if (rack->rack_no_prr) 2436 log.u_bbr.flex2 = 0; 2437 else 2438 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; 2439 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); 2440 log.u_bbr.flex8 = rack->rc_in_persist; 2441 log.u_bbr.timeStamp = cts; 2442 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2443 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2444 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2445 log.u_bbr.pacing_gain = rack->r_must_retran; 2446 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2447 &rack->rc_inp->inp_socket->so_rcv, 2448 &rack->rc_inp->inp_socket->so_snd, 2449 BBR_LOG_BBRSND, 0, 2450 0, &log, false, tv); 2451 } 2452 } 2453 2454 static void 2455 rack_log_doseg_done(struct tcp_rack *rack, uint32_t cts, int32_t nxt_pkt, int32_t did_out, int way_out, int nsegs) 2456 { 2457 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2458 union tcp_log_stackspecific log; 2459 struct timeval tv; 2460 2461 memset(&log, 0, sizeof(log)); 2462 log.u_bbr.flex1 = did_out; 2463 log.u_bbr.flex2 = nxt_pkt; 2464 log.u_bbr.flex3 = way_out; 2465 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2466 if (rack->rack_no_prr) 2467 log.u_bbr.flex5 = 0; 2468 else 2469 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2470 log.u_bbr.flex6 = nsegs; 2471 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; 2472 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ 2473 log.u_bbr.flex7 <<= 1; 2474 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ 2475 log.u_bbr.flex7 <<= 1; 2476 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ 2477 log.u_bbr.flex8 = rack->rc_in_persist; 2478 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2479 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2480 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2481 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2482 log.u_bbr.use_lt_bw <<= 1; 2483 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2484 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2485 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2486 log.u_bbr.pacing_gain = rack->r_must_retran; 2487 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2488 &rack->rc_inp->inp_socket->so_rcv, 2489 &rack->rc_inp->inp_socket->so_snd, 2490 BBR_LOG_DOSEG_DONE, 0, 2491 0, &log, false, &tv); 2492 } 2493 } 2494 2495 static void 2496 rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint8_t frm) 2497 { 2498 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 2499 union tcp_log_stackspecific log; 2500 struct timeval tv; 2501 2502 memset(&log, 0, sizeof(log)); 2503 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; 2504 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 2505 log.u_bbr.flex4 = arg1; 2506 log.u_bbr.flex5 = arg2; 2507 log.u_bbr.flex6 = arg3; 2508 log.u_bbr.flex8 = frm; 2509 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2510 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2511 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2512 log.u_bbr.applimited = rack->r_ctl.rc_sacked; 2513 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2514 log.u_bbr.pacing_gain = rack->r_must_retran; 2515 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, 2516 &tptosocket(tp)->so_snd, 2517 TCP_HDWR_PACE_SIZE, 0, 0, &log, false, &tv); 2518 } 2519 } 2520 2521 static void 2522 rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, 2523 uint8_t hpts_calling, int reason, uint32_t cwnd_to_use) 2524 { 2525 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2526 union tcp_log_stackspecific log; 2527 struct timeval tv; 2528 2529 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2530 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2531 log.u_bbr.flex1 = slot; 2532 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; 2533 log.u_bbr.flex4 = reason; 2534 if (rack->rack_no_prr) 2535 log.u_bbr.flex5 = 0; 2536 else 2537 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2538 log.u_bbr.flex7 = hpts_calling; 2539 log.u_bbr.flex8 = rack->rc_in_persist; 2540 log.u_bbr.lt_epoch = cwnd_to_use; 2541 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2542 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2543 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2544 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2545 log.u_bbr.pacing_gain = rack->r_must_retran; 2546 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; 2547 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2548 &rack->rc_inp->inp_socket->so_rcv, 2549 &rack->rc_inp->inp_socket->so_snd, 2550 BBR_LOG_JUSTRET, 0, 2551 tlen, &log, false, &tv); 2552 } 2553 } 2554 2555 static void 2556 rack_log_to_cancel(struct tcp_rack *rack, int32_t hpts_removed, int line, uint32_t us_cts, 2557 struct timeval *tv, uint32_t flags_on_entry) 2558 { 2559 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2560 union tcp_log_stackspecific log; 2561 2562 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2563 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 2564 log.u_bbr.flex1 = line; 2565 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; 2566 log.u_bbr.flex3 = flags_on_entry; 2567 log.u_bbr.flex4 = us_cts; 2568 if (rack->rack_no_prr) 2569 log.u_bbr.flex5 = 0; 2570 else 2571 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; 2572 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; 2573 log.u_bbr.flex7 = hpts_removed; 2574 log.u_bbr.flex8 = 1; 2575 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; 2576 log.u_bbr.timeStamp = us_cts; 2577 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2578 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2579 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2580 log.u_bbr.pacing_gain = rack->r_must_retran; 2581 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2582 &rack->rc_inp->inp_socket->so_rcv, 2583 &rack->rc_inp->inp_socket->so_snd, 2584 BBR_LOG_TIMERCANC, 0, 2585 0, &log, false, tv); 2586 } 2587 } 2588 2589 static void 2590 rack_log_alt_to_to_cancel(struct tcp_rack *rack, 2591 uint32_t flex1, uint32_t flex2, 2592 uint32_t flex3, uint32_t flex4, 2593 uint32_t flex5, uint32_t flex6, 2594 uint16_t flex7, uint8_t mod) 2595 { 2596 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2597 union tcp_log_stackspecific log; 2598 struct timeval tv; 2599 2600 if (mod == 1) { 2601 /* No you can't use 1, its for the real to cancel */ 2602 return; 2603 } 2604 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2605 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2606 log.u_bbr.flex1 = flex1; 2607 log.u_bbr.flex2 = flex2; 2608 log.u_bbr.flex3 = flex3; 2609 log.u_bbr.flex4 = flex4; 2610 log.u_bbr.flex5 = flex5; 2611 log.u_bbr.flex6 = flex6; 2612 log.u_bbr.flex7 = flex7; 2613 log.u_bbr.flex8 = mod; 2614 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2615 &rack->rc_inp->inp_socket->so_rcv, 2616 &rack->rc_inp->inp_socket->so_snd, 2617 BBR_LOG_TIMERCANC, 0, 2618 0, &log, false, &tv); 2619 } 2620 } 2621 2622 static void 2623 rack_log_to_processing(struct tcp_rack *rack, uint32_t cts, int32_t ret, int32_t timers) 2624 { 2625 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2626 union tcp_log_stackspecific log; 2627 struct timeval tv; 2628 2629 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2630 log.u_bbr.flex1 = timers; 2631 log.u_bbr.flex2 = ret; 2632 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; 2633 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 2634 log.u_bbr.flex5 = cts; 2635 if (rack->rack_no_prr) 2636 log.u_bbr.flex6 = 0; 2637 else 2638 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; 2639 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; 2640 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; 2641 log.u_bbr.pacing_gain = rack->r_must_retran; 2642 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2643 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2644 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2645 &rack->rc_inp->inp_socket->so_rcv, 2646 &rack->rc_inp->inp_socket->so_snd, 2647 BBR_LOG_TO_PROCESS, 0, 2648 0, &log, false, &tv); 2649 } 2650 } 2651 2652 static void 2653 rack_log_to_prr(struct tcp_rack *rack, int frm, int orig_cwnd, int line) 2654 { 2655 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2656 union tcp_log_stackspecific log; 2657 struct timeval tv; 2658 2659 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2660 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; 2661 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; 2662 if (rack->rack_no_prr) 2663 log.u_bbr.flex3 = 0; 2664 else 2665 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; 2666 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; 2667 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; 2668 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; 2669 log.u_bbr.flex7 = line; 2670 log.u_bbr.flex8 = frm; 2671 log.u_bbr.pkts_out = orig_cwnd; 2672 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2673 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2674 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 2675 log.u_bbr.use_lt_bw <<= 1; 2676 log.u_bbr.use_lt_bw |= rack->r_might_revert; 2677 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2678 &rack->rc_inp->inp_socket->so_rcv, 2679 &rack->rc_inp->inp_socket->so_snd, 2680 BBR_LOG_BBRUPD, 0, 2681 0, &log, false, &tv); 2682 } 2683 } 2684 2685 #ifdef NETFLIX_EXP_DETECTION 2686 static void 2687 rack_log_sad(struct tcp_rack *rack, int event) 2688 { 2689 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2690 union tcp_log_stackspecific log; 2691 struct timeval tv; 2692 2693 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 2694 log.u_bbr.flex1 = rack->r_ctl.sack_count; 2695 log.u_bbr.flex2 = rack->r_ctl.ack_count; 2696 log.u_bbr.flex3 = rack->r_ctl.sack_moved_extra; 2697 log.u_bbr.flex4 = rack->r_ctl.sack_noextra_move; 2698 log.u_bbr.flex5 = rack->r_ctl.rc_num_maps_alloced; 2699 log.u_bbr.flex6 = tcp_sack_to_ack_thresh; 2700 log.u_bbr.pkts_out = tcp_sack_to_move_thresh; 2701 log.u_bbr.lt_epoch = (tcp_force_detection << 8); 2702 log.u_bbr.lt_epoch |= rack->do_detection; 2703 log.u_bbr.applimited = tcp_map_minimum; 2704 log.u_bbr.flex7 = rack->sack_attack_disable; 2705 log.u_bbr.flex8 = event; 2706 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 2707 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 2708 log.u_bbr.delivered = tcp_sad_decay_val; 2709 TCP_LOG_EVENTP(rack->rc_tp, NULL, 2710 &rack->rc_inp->inp_socket->so_rcv, 2711 &rack->rc_inp->inp_socket->so_snd, 2712 TCP_SAD_DETECTION, 0, 2713 0, &log, false, &tv); 2714 } 2715 } 2716 #endif 2717 2718 static void 2719 rack_counter_destroy(void) 2720 { 2721 counter_u64_free(rack_fto_send); 2722 counter_u64_free(rack_fto_rsm_send); 2723 counter_u64_free(rack_nfto_resend); 2724 counter_u64_free(rack_hw_pace_init_fail); 2725 counter_u64_free(rack_hw_pace_lost); 2726 counter_u64_free(rack_non_fto_send); 2727 counter_u64_free(rack_extended_rfo); 2728 counter_u64_free(rack_ack_total); 2729 counter_u64_free(rack_express_sack); 2730 counter_u64_free(rack_sack_total); 2731 counter_u64_free(rack_move_none); 2732 counter_u64_free(rack_move_some); 2733 counter_u64_free(rack_sack_attacks_detected); 2734 counter_u64_free(rack_sack_attacks_reversed); 2735 counter_u64_free(rack_sack_used_next_merge); 2736 counter_u64_free(rack_sack_used_prev_merge); 2737 counter_u64_free(rack_tlp_tot); 2738 counter_u64_free(rack_tlp_newdata); 2739 counter_u64_free(rack_tlp_retran); 2740 counter_u64_free(rack_tlp_retran_bytes); 2741 counter_u64_free(rack_to_tot); 2742 counter_u64_free(rack_saw_enobuf); 2743 counter_u64_free(rack_saw_enobuf_hw); 2744 counter_u64_free(rack_saw_enetunreach); 2745 counter_u64_free(rack_hot_alloc); 2746 counter_u64_free(rack_to_alloc); 2747 counter_u64_free(rack_to_alloc_hard); 2748 counter_u64_free(rack_to_alloc_emerg); 2749 counter_u64_free(rack_to_alloc_limited); 2750 counter_u64_free(rack_alloc_limited_conns); 2751 counter_u64_free(rack_split_limited); 2752 counter_u64_free(rack_multi_single_eq); 2753 counter_u64_free(rack_proc_non_comp_ack); 2754 counter_u64_free(rack_sack_proc_all); 2755 counter_u64_free(rack_sack_proc_restart); 2756 counter_u64_free(rack_sack_proc_short); 2757 counter_u64_free(rack_sack_skipped_acked); 2758 counter_u64_free(rack_sack_splits); 2759 counter_u64_free(rack_input_idle_reduces); 2760 counter_u64_free(rack_collapsed_win); 2761 counter_u64_free(rack_collapsed_win_rxt); 2762 counter_u64_free(rack_collapsed_win_rxt_bytes); 2763 counter_u64_free(rack_collapsed_win_seen); 2764 counter_u64_free(rack_try_scwnd); 2765 counter_u64_free(rack_persists_sends); 2766 counter_u64_free(rack_persists_acks); 2767 counter_u64_free(rack_persists_loss); 2768 counter_u64_free(rack_persists_lost_ends); 2769 #ifdef INVARIANTS 2770 counter_u64_free(rack_adjust_map_bw); 2771 #endif 2772 COUNTER_ARRAY_FREE(rack_out_size, TCP_MSS_ACCT_SIZE); 2773 COUNTER_ARRAY_FREE(rack_opts_arry, RACK_OPTS_SIZE); 2774 } 2775 2776 static struct rack_sendmap * 2777 rack_alloc(struct tcp_rack *rack) 2778 { 2779 struct rack_sendmap *rsm; 2780 2781 /* 2782 * First get the top of the list it in 2783 * theory is the "hottest" rsm we have, 2784 * possibly just freed by ack processing. 2785 */ 2786 if (rack->rc_free_cnt > rack_free_cache) { 2787 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2788 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2789 counter_u64_add(rack_hot_alloc, 1); 2790 rack->rc_free_cnt--; 2791 return (rsm); 2792 } 2793 /* 2794 * Once we get under our free cache we probably 2795 * no longer have a "hot" one available. Lets 2796 * get one from UMA. 2797 */ 2798 rsm = uma_zalloc(rack_zone, M_NOWAIT); 2799 if (rsm) { 2800 rack->r_ctl.rc_num_maps_alloced++; 2801 counter_u64_add(rack_to_alloc, 1); 2802 return (rsm); 2803 } 2804 /* 2805 * Dig in to our aux rsm's (the last two) since 2806 * UMA failed to get us one. 2807 */ 2808 if (rack->rc_free_cnt) { 2809 counter_u64_add(rack_to_alloc_emerg, 1); 2810 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 2811 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2812 rack->rc_free_cnt--; 2813 return (rsm); 2814 } 2815 return (NULL); 2816 } 2817 2818 static struct rack_sendmap * 2819 rack_alloc_full_limit(struct tcp_rack *rack) 2820 { 2821 if ((V_tcp_map_entries_limit > 0) && 2822 (rack->do_detection == 0) && 2823 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 2824 counter_u64_add(rack_to_alloc_limited, 1); 2825 if (!rack->alloc_limit_reported) { 2826 rack->alloc_limit_reported = 1; 2827 counter_u64_add(rack_alloc_limited_conns, 1); 2828 } 2829 return (NULL); 2830 } 2831 return (rack_alloc(rack)); 2832 } 2833 2834 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 2835 static struct rack_sendmap * 2836 rack_alloc_limit(struct tcp_rack *rack, uint8_t limit_type) 2837 { 2838 struct rack_sendmap *rsm; 2839 2840 if (limit_type) { 2841 /* currently there is only one limit type */ 2842 if (V_tcp_map_split_limit > 0 && 2843 (rack->do_detection == 0) && 2844 rack->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 2845 counter_u64_add(rack_split_limited, 1); 2846 if (!rack->alloc_limit_reported) { 2847 rack->alloc_limit_reported = 1; 2848 counter_u64_add(rack_alloc_limited_conns, 1); 2849 } 2850 return (NULL); 2851 } 2852 } 2853 2854 /* allocate and mark in the limit type, if set */ 2855 rsm = rack_alloc(rack); 2856 if (rsm != NULL && limit_type) { 2857 rsm->r_limit_type = limit_type; 2858 rack->r_ctl.rc_num_split_allocs++; 2859 } 2860 return (rsm); 2861 } 2862 2863 static void 2864 rack_free(struct tcp_rack *rack, struct rack_sendmap *rsm) 2865 { 2866 if (rsm->r_flags & RACK_APP_LIMITED) { 2867 if (rack->r_ctl.rc_app_limited_cnt > 0) { 2868 rack->r_ctl.rc_app_limited_cnt--; 2869 } 2870 } 2871 if (rsm->r_limit_type) { 2872 /* currently there is only one limit type */ 2873 rack->r_ctl.rc_num_split_allocs--; 2874 } 2875 if (rsm == rack->r_ctl.rc_first_appl) { 2876 if (rack->r_ctl.rc_app_limited_cnt == 0) 2877 rack->r_ctl.rc_first_appl = NULL; 2878 else { 2879 /* Follow the next one out */ 2880 struct rack_sendmap fe; 2881 2882 fe.r_start = rsm->r_nseq_appl; 2883 rack->r_ctl.rc_first_appl = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 2884 } 2885 } 2886 if (rsm == rack->r_ctl.rc_resend) 2887 rack->r_ctl.rc_resend = NULL; 2888 if (rsm == rack->r_ctl.rc_end_appl) 2889 rack->r_ctl.rc_end_appl = NULL; 2890 if (rack->r_ctl.rc_tlpsend == rsm) 2891 rack->r_ctl.rc_tlpsend = NULL; 2892 if (rack->r_ctl.rc_sacklast == rsm) 2893 rack->r_ctl.rc_sacklast = NULL; 2894 memset(rsm, 0, sizeof(struct rack_sendmap)); 2895 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); 2896 rack->rc_free_cnt++; 2897 } 2898 2899 static void 2900 rack_free_trim(struct tcp_rack *rack) 2901 { 2902 struct rack_sendmap *rsm; 2903 2904 /* 2905 * Free up all the tail entries until 2906 * we get our list down to the limit. 2907 */ 2908 while (rack->rc_free_cnt > rack_free_cache) { 2909 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); 2910 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 2911 rack->rc_free_cnt--; 2912 uma_zfree(rack_zone, rsm); 2913 } 2914 } 2915 2916 2917 static uint32_t 2918 rack_get_measure_window(struct tcpcb *tp, struct tcp_rack *rack) 2919 { 2920 uint64_t srtt, bw, len, tim; 2921 uint32_t segsiz, def_len, minl; 2922 2923 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 2924 def_len = rack_def_data_window * segsiz; 2925 if (rack->rc_gp_filled == 0) { 2926 /* 2927 * We have no measurement (IW is in flight?) so 2928 * we can only guess using our data_window sysctl 2929 * value (usually 20MSS). 2930 */ 2931 return (def_len); 2932 } 2933 /* 2934 * Now we have a number of factors to consider. 2935 * 2936 * 1) We have a desired BDP which is usually 2937 * at least 2. 2938 * 2) We have a minimum number of rtt's usually 1 SRTT 2939 * but we allow it too to be more. 2940 * 3) We want to make sure a measurement last N useconds (if 2941 * we have set rack_min_measure_usec. 2942 * 2943 * We handle the first concern here by trying to create a data 2944 * window of max(rack_def_data_window, DesiredBDP). The 2945 * second concern we handle in not letting the measurement 2946 * window end normally until at least the required SRTT's 2947 * have gone by which is done further below in 2948 * rack_enough_for_measurement(). Finally the third concern 2949 * we also handle here by calculating how long that time 2950 * would take at the current BW and then return the 2951 * max of our first calculation and that length. Note 2952 * that if rack_min_measure_usec is 0, we don't deal 2953 * with concern 3. Also for both Concern 1 and 3 an 2954 * application limited period could end the measurement 2955 * earlier. 2956 * 2957 * So lets calculate the BDP with the "known" b/w using 2958 * the SRTT has our rtt and then multiply it by the 2959 * goal. 2960 */ 2961 bw = rack_get_bw(rack); 2962 srtt = (uint64_t)tp->t_srtt; 2963 len = bw * srtt; 2964 len /= (uint64_t)HPTS_USEC_IN_SEC; 2965 len *= max(1, rack_goal_bdp); 2966 /* Now we need to round up to the nearest MSS */ 2967 len = roundup(len, segsiz); 2968 if (rack_min_measure_usec) { 2969 /* Now calculate our min length for this b/w */ 2970 tim = rack_min_measure_usec; 2971 minl = (tim * bw) / (uint64_t)HPTS_USEC_IN_SEC; 2972 if (minl == 0) 2973 minl = 1; 2974 minl = roundup(minl, segsiz); 2975 if (len < minl) 2976 len = minl; 2977 } 2978 /* 2979 * Now if we have a very small window we want 2980 * to attempt to get the window that is 2981 * as small as possible. This happens on 2982 * low b/w connections and we don't want to 2983 * span huge numbers of rtt's between measurements. 2984 * 2985 * We basically include 2 over our "MIN window" so 2986 * that the measurement can be shortened (possibly) by 2987 * an ack'ed packet. 2988 */ 2989 if (len < def_len) 2990 return (max((uint32_t)len, ((MIN_GP_WIN+2) * segsiz))); 2991 else 2992 return (max((uint32_t)len, def_len)); 2993 2994 } 2995 2996 static int 2997 rack_enough_for_measurement(struct tcpcb *tp, struct tcp_rack *rack, tcp_seq th_ack, uint8_t *quality) 2998 { 2999 uint32_t tim, srtts, segsiz; 3000 3001 /* 3002 * Has enough time passed for the GP measurement to be valid? 3003 */ 3004 if ((tp->snd_max == tp->snd_una) || 3005 (th_ack == tp->snd_max)){ 3006 /* All is acked */ 3007 *quality = RACK_QUALITY_ALLACKED; 3008 return (1); 3009 } 3010 if (SEQ_LT(th_ack, tp->gput_seq)) { 3011 /* Not enough bytes yet */ 3012 return (0); 3013 } 3014 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 3015 if (SEQ_LT(th_ack, tp->gput_ack) && 3016 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 3017 /* Not enough bytes yet */ 3018 return (0); 3019 } 3020 if (rack->r_ctl.rc_first_appl && 3021 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { 3022 /* 3023 * We are up to the app limited send point 3024 * we have to measure irrespective of the time.. 3025 */ 3026 *quality = RACK_QUALITY_APPLIMITED; 3027 return (1); 3028 } 3029 /* Now what about time? */ 3030 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); 3031 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; 3032 if (tim >= srtts) { 3033 *quality = RACK_QUALITY_HIGH; 3034 return (1); 3035 } 3036 /* Nope not even a full SRTT has passed */ 3037 return (0); 3038 } 3039 3040 static void 3041 rack_log_timely(struct tcp_rack *rack, 3042 uint32_t logged, uint64_t cur_bw, uint64_t low_bnd, 3043 uint64_t up_bnd, int line, uint8_t method) 3044 { 3045 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3046 union tcp_log_stackspecific log; 3047 struct timeval tv; 3048 3049 memset(&log, 0, sizeof(log)); 3050 log.u_bbr.flex1 = logged; 3051 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; 3052 log.u_bbr.flex2 <<= 4; 3053 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; 3054 log.u_bbr.flex2 <<= 4; 3055 log.u_bbr.flex2 |= rack->rc_gp_incr; 3056 log.u_bbr.flex2 <<= 4; 3057 log.u_bbr.flex2 |= rack->rc_gp_bwred; 3058 log.u_bbr.flex3 = rack->rc_gp_incr; 3059 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3060 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; 3061 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; 3062 log.u_bbr.flex7 = rack->rc_gp_bwred; 3063 log.u_bbr.flex8 = method; 3064 log.u_bbr.cur_del_rate = cur_bw; 3065 log.u_bbr.delRate = low_bnd; 3066 log.u_bbr.bw_inuse = up_bnd; 3067 log.u_bbr.rttProp = rack_get_bw(rack); 3068 log.u_bbr.pkt_epoch = line; 3069 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3070 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3071 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3072 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3073 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3074 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; 3075 log.u_bbr.cwnd_gain <<= 1; 3076 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; 3077 log.u_bbr.cwnd_gain <<= 1; 3078 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 3079 log.u_bbr.cwnd_gain <<= 1; 3080 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 3081 log.u_bbr.lost = rack->r_ctl.rc_loss_count; 3082 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3083 &rack->rc_inp->inp_socket->so_rcv, 3084 &rack->rc_inp->inp_socket->so_snd, 3085 TCP_TIMELY_WORK, 0, 3086 0, &log, false, &tv); 3087 } 3088 } 3089 3090 static int 3091 rack_bw_can_be_raised(struct tcp_rack *rack, uint64_t cur_bw, uint64_t last_bw_est, uint16_t mult) 3092 { 3093 /* 3094 * Before we increase we need to know if 3095 * the estimate just made was less than 3096 * our pacing goal (i.e. (cur_bw * mult) > last_bw_est) 3097 * 3098 * If we already are pacing at a fast enough 3099 * rate to push us faster there is no sense of 3100 * increasing. 3101 * 3102 * We first caculate our actual pacing rate (ss or ca multiplier 3103 * times our cur_bw). 3104 * 3105 * Then we take the last measured rate and multipy by our 3106 * maximum pacing overage to give us a max allowable rate. 3107 * 3108 * If our act_rate is smaller than our max_allowable rate 3109 * then we should increase. Else we should hold steady. 3110 * 3111 */ 3112 uint64_t act_rate, max_allow_rate; 3113 3114 if (rack_timely_no_stopping) 3115 return (1); 3116 3117 if ((cur_bw == 0) || (last_bw_est == 0)) { 3118 /* 3119 * Initial startup case or 3120 * everything is acked case. 3121 */ 3122 rack_log_timely(rack, mult, cur_bw, 0, 0, 3123 __LINE__, 9); 3124 return (1); 3125 } 3126 if (mult <= 100) { 3127 /* 3128 * We can always pace at or slightly above our rate. 3129 */ 3130 rack_log_timely(rack, mult, cur_bw, 0, 0, 3131 __LINE__, 9); 3132 return (1); 3133 } 3134 act_rate = cur_bw * (uint64_t)mult; 3135 act_rate /= 100; 3136 max_allow_rate = last_bw_est * ((uint64_t)rack_max_per_above + (uint64_t)100); 3137 max_allow_rate /= 100; 3138 if (act_rate < max_allow_rate) { 3139 /* 3140 * Here the rate we are actually pacing at 3141 * is smaller than 10% above our last measurement. 3142 * This means we are pacing below what we would 3143 * like to try to achieve (plus some wiggle room). 3144 */ 3145 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3146 __LINE__, 9); 3147 return (1); 3148 } else { 3149 /* 3150 * Here we are already pacing at least rack_max_per_above(10%) 3151 * what we are getting back. This indicates most likely 3152 * that we are being limited (cwnd/rwnd/app) and can't 3153 * get any more b/w. There is no sense of trying to 3154 * raise up the pacing rate its not speeding us up 3155 * and we already are pacing faster than we are getting. 3156 */ 3157 rack_log_timely(rack, mult, cur_bw, act_rate, max_allow_rate, 3158 __LINE__, 8); 3159 return (0); 3160 } 3161 } 3162 3163 static void 3164 rack_validate_multipliers_at_or_above100(struct tcp_rack *rack) 3165 { 3166 /* 3167 * When we drag bottom, we want to assure 3168 * that no multiplier is below 1.0, if so 3169 * we want to restore it to at least that. 3170 */ 3171 if (rack->r_ctl.rack_per_of_gp_rec < 100) { 3172 /* This is unlikely we usually do not touch recovery */ 3173 rack->r_ctl.rack_per_of_gp_rec = 100; 3174 } 3175 if (rack->r_ctl.rack_per_of_gp_ca < 100) { 3176 rack->r_ctl.rack_per_of_gp_ca = 100; 3177 } 3178 if (rack->r_ctl.rack_per_of_gp_ss < 100) { 3179 rack->r_ctl.rack_per_of_gp_ss = 100; 3180 } 3181 } 3182 3183 static void 3184 rack_validate_multipliers_at_or_below_100(struct tcp_rack *rack) 3185 { 3186 if (rack->r_ctl.rack_per_of_gp_ca > 100) { 3187 rack->r_ctl.rack_per_of_gp_ca = 100; 3188 } 3189 if (rack->r_ctl.rack_per_of_gp_ss > 100) { 3190 rack->r_ctl.rack_per_of_gp_ss = 100; 3191 } 3192 } 3193 3194 static void 3195 rack_increase_bw_mul(struct tcp_rack *rack, int timely_says, uint64_t cur_bw, uint64_t last_bw_est, int override) 3196 { 3197 int32_t calc, logged, plus; 3198 3199 logged = 0; 3200 3201 if (override) { 3202 /* 3203 * override is passed when we are 3204 * loosing b/w and making one last 3205 * gasp at trying to not loose out 3206 * to a new-reno flow. 3207 */ 3208 goto extra_boost; 3209 } 3210 /* In classic timely we boost by 5x if we have 5 increases in a row, lets not */ 3211 if (rack->rc_gp_incr && 3212 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { 3213 /* 3214 * Reset and get 5 strokes more before the boost. Note 3215 * that the count is 0 based so we have to add one. 3216 */ 3217 extra_boost: 3218 plus = (uint32_t)rack_gp_increase_per * RACK_TIMELY_CNT_BOOST; 3219 rack->rc_gp_timely_inc_cnt = 0; 3220 } else 3221 plus = (uint32_t)rack_gp_increase_per; 3222 /* Must be at least 1% increase for true timely increases */ 3223 if ((plus < 1) && 3224 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) 3225 plus = 1; 3226 if (rack->rc_gp_saw_rec && 3227 (rack->rc_gp_no_rec_chg == 0) && 3228 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3229 rack->r_ctl.rack_per_of_gp_rec)) { 3230 /* We have been in recovery ding it too */ 3231 calc = rack->r_ctl.rack_per_of_gp_rec + plus; 3232 if (calc > 0xffff) 3233 calc = 0xffff; 3234 logged |= 1; 3235 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; 3236 if (rack_per_upper_bound_ss && 3237 (rack->rc_dragged_bottom == 0) && 3238 (rack->r_ctl.rack_per_of_gp_rec > rack_per_upper_bound_ss)) 3239 rack->r_ctl.rack_per_of_gp_rec = rack_per_upper_bound_ss; 3240 } 3241 if (rack->rc_gp_saw_ca && 3242 (rack->rc_gp_saw_ss == 0) && 3243 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3244 rack->r_ctl.rack_per_of_gp_ca)) { 3245 /* In CA */ 3246 calc = rack->r_ctl.rack_per_of_gp_ca + plus; 3247 if (calc > 0xffff) 3248 calc = 0xffff; 3249 logged |= 2; 3250 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; 3251 if (rack_per_upper_bound_ca && 3252 (rack->rc_dragged_bottom == 0) && 3253 (rack->r_ctl.rack_per_of_gp_ca > rack_per_upper_bound_ca)) 3254 rack->r_ctl.rack_per_of_gp_ca = rack_per_upper_bound_ca; 3255 } 3256 if (rack->rc_gp_saw_ss && 3257 rack_bw_can_be_raised(rack, cur_bw, last_bw_est, 3258 rack->r_ctl.rack_per_of_gp_ss)) { 3259 /* In SS */ 3260 calc = rack->r_ctl.rack_per_of_gp_ss + plus; 3261 if (calc > 0xffff) 3262 calc = 0xffff; 3263 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; 3264 if (rack_per_upper_bound_ss && 3265 (rack->rc_dragged_bottom == 0) && 3266 (rack->r_ctl.rack_per_of_gp_ss > rack_per_upper_bound_ss)) 3267 rack->r_ctl.rack_per_of_gp_ss = rack_per_upper_bound_ss; 3268 logged |= 4; 3269 } 3270 if (logged && 3271 (rack->rc_gp_incr == 0)){ 3272 /* Go into increment mode */ 3273 rack->rc_gp_incr = 1; 3274 rack->rc_gp_timely_inc_cnt = 0; 3275 } 3276 if (rack->rc_gp_incr && 3277 logged && 3278 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { 3279 rack->rc_gp_timely_inc_cnt++; 3280 } 3281 rack_log_timely(rack, logged, plus, 0, 0, 3282 __LINE__, 1); 3283 } 3284 3285 static uint32_t 3286 rack_get_decrease(struct tcp_rack *rack, uint32_t curper, int32_t rtt_diff) 3287 { 3288 /* 3289 * norm_grad = rtt_diff / minrtt; 3290 * new_per = curper * (1 - B * norm_grad) 3291 * 3292 * B = rack_gp_decrease_per (default 10%) 3293 * rtt_dif = input var current rtt-diff 3294 * curper = input var current percentage 3295 * minrtt = from rack filter 3296 * 3297 */ 3298 uint64_t perf; 3299 3300 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3301 ((uint64_t)rack_gp_decrease_per * (uint64_t)10000 * 3302 (((uint64_t)rtt_diff * (uint64_t)1000000)/ 3303 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ 3304 (uint64_t)1000000)) / 3305 (uint64_t)1000000); 3306 if (perf > curper) { 3307 /* TSNH */ 3308 perf = curper - 1; 3309 } 3310 return ((uint32_t)perf); 3311 } 3312 3313 static uint32_t 3314 rack_decrease_highrtt(struct tcp_rack *rack, uint32_t curper, uint32_t rtt) 3315 { 3316 /* 3317 * highrttthresh 3318 * result = curper * (1 - (B * ( 1 - ------ )) 3319 * gp_srtt 3320 * 3321 * B = rack_gp_decrease_per (default 10%) 3322 * highrttthresh = filter_min * rack_gp_rtt_maxmul 3323 */ 3324 uint64_t perf; 3325 uint32_t highrttthresh; 3326 3327 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 3328 3329 perf = (((uint64_t)curper * ((uint64_t)1000000 - 3330 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - 3331 ((uint64_t)highrttthresh * (uint64_t)1000000) / 3332 (uint64_t)rtt)) / 100)) /(uint64_t)1000000); 3333 return (perf); 3334 } 3335 3336 static void 3337 rack_decrease_bw_mul(struct tcp_rack *rack, int timely_says, uint32_t rtt, int32_t rtt_diff) 3338 { 3339 uint64_t logvar, logvar2, logvar3; 3340 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; 3341 3342 if (rack->rc_gp_incr) { 3343 /* Turn off increment counting */ 3344 rack->rc_gp_incr = 0; 3345 rack->rc_gp_timely_inc_cnt = 0; 3346 } 3347 ss_red = ca_red = rec_red = 0; 3348 logged = 0; 3349 /* Calculate the reduction value */ 3350 if (rtt_diff < 0) { 3351 rtt_diff *= -1; 3352 } 3353 /* Must be at least 1% reduction */ 3354 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { 3355 /* We have been in recovery ding it too */ 3356 if (timely_says == 2) { 3357 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); 3358 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3359 if (alt < new_per) 3360 val = alt; 3361 else 3362 val = new_per; 3363 } else 3364 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3365 if (rack->r_ctl.rack_per_of_gp_rec > val) { 3366 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); 3367 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; 3368 } else { 3369 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3370 rec_red = 0; 3371 } 3372 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) 3373 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; 3374 logged |= 1; 3375 } 3376 if (rack->rc_gp_saw_ss) { 3377 /* Sent in SS */ 3378 if (timely_says == 2) { 3379 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); 3380 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3381 if (alt < new_per) 3382 val = alt; 3383 else 3384 val = new_per; 3385 } else 3386 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); 3387 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { 3388 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; 3389 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; 3390 } else { 3391 ss_red = new_per; 3392 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3393 logvar = new_per; 3394 logvar <<= 32; 3395 logvar |= alt; 3396 logvar2 = (uint32_t)rtt; 3397 logvar2 <<= 32; 3398 logvar2 |= (uint32_t)rtt_diff; 3399 logvar3 = rack_gp_rtt_maxmul; 3400 logvar3 <<= 32; 3401 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3402 rack_log_timely(rack, timely_says, 3403 logvar2, logvar3, 3404 logvar, __LINE__, 10); 3405 } 3406 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) 3407 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; 3408 logged |= 4; 3409 } else if (rack->rc_gp_saw_ca) { 3410 /* Sent in CA */ 3411 if (timely_says == 2) { 3412 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); 3413 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); 3414 if (alt < new_per) 3415 val = alt; 3416 else 3417 val = new_per; 3418 } else 3419 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); 3420 if (rack->r_ctl.rack_per_of_gp_ca > val) { 3421 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; 3422 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; 3423 } else { 3424 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3425 ca_red = 0; 3426 logvar = new_per; 3427 logvar <<= 32; 3428 logvar |= alt; 3429 logvar2 = (uint32_t)rtt; 3430 logvar2 <<= 32; 3431 logvar2 |= (uint32_t)rtt_diff; 3432 logvar3 = rack_gp_rtt_maxmul; 3433 logvar3 <<= 32; 3434 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3435 rack_log_timely(rack, timely_says, 3436 logvar2, logvar3, 3437 logvar, __LINE__, 10); 3438 } 3439 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) 3440 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; 3441 logged |= 2; 3442 } 3443 if (rack->rc_gp_timely_dec_cnt < 0x7) { 3444 rack->rc_gp_timely_dec_cnt++; 3445 if (rack_timely_dec_clear && 3446 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) 3447 rack->rc_gp_timely_dec_cnt = 0; 3448 } 3449 logvar = ss_red; 3450 logvar <<= 32; 3451 logvar |= ca_red; 3452 rack_log_timely(rack, logged, rec_red, rack_per_lower_bound, logvar, 3453 __LINE__, 2); 3454 } 3455 3456 static void 3457 rack_log_rtt_shrinks(struct tcp_rack *rack, uint32_t us_cts, 3458 uint32_t rtt, uint32_t line, uint8_t reas) 3459 { 3460 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 3461 union tcp_log_stackspecific log; 3462 struct timeval tv; 3463 3464 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 3465 log.u_bbr.flex1 = line; 3466 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; 3467 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; 3468 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; 3469 log.u_bbr.flex5 = rtt; 3470 log.u_bbr.flex6 = rack->rc_highly_buffered; 3471 log.u_bbr.flex6 <<= 1; 3472 log.u_bbr.flex6 |= rack->forced_ack; 3473 log.u_bbr.flex6 <<= 1; 3474 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; 3475 log.u_bbr.flex6 <<= 1; 3476 log.u_bbr.flex6 |= rack->in_probe_rtt; 3477 log.u_bbr.flex6 <<= 1; 3478 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; 3479 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; 3480 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; 3481 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; 3482 log.u_bbr.flex8 = reas; 3483 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 3484 log.u_bbr.delRate = rack_get_bw(rack); 3485 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; 3486 log.u_bbr.cur_del_rate <<= 32; 3487 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; 3488 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; 3489 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; 3490 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 3491 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; 3492 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; 3493 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; 3494 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; 3495 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3496 log.u_bbr.rttProp = us_cts; 3497 log.u_bbr.rttProp <<= 32; 3498 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; 3499 TCP_LOG_EVENTP(rack->rc_tp, NULL, 3500 &rack->rc_inp->inp_socket->so_rcv, 3501 &rack->rc_inp->inp_socket->so_snd, 3502 BBR_LOG_RTT_SHRINKS, 0, 3503 0, &log, false, &rack->r_ctl.act_rcv_time); 3504 } 3505 } 3506 3507 static void 3508 rack_set_prtt_target(struct tcp_rack *rack, uint32_t segsiz, uint32_t rtt) 3509 { 3510 uint64_t bwdp; 3511 3512 bwdp = rack_get_bw(rack); 3513 bwdp *= (uint64_t)rtt; 3514 bwdp /= (uint64_t)HPTS_USEC_IN_SEC; 3515 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); 3516 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { 3517 /* 3518 * A window protocol must be able to have 4 packets 3519 * outstanding as the floor in order to function 3520 * (especially considering delayed ack :D). 3521 */ 3522 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); 3523 } 3524 } 3525 3526 static void 3527 rack_enter_probertt(struct tcp_rack *rack, uint32_t us_cts) 3528 { 3529 /** 3530 * ProbeRTT is a bit different in rack_pacing than in 3531 * BBR. It is like BBR in that it uses the lowering of 3532 * the RTT as a signal that we saw something new and 3533 * counts from there for how long between. But it is 3534 * different in that its quite simple. It does not 3535 * play with the cwnd and wait until we get down 3536 * to N segments outstanding and hold that for 3537 * 200ms. Instead it just sets the pacing reduction 3538 * rate to a set percentage (70 by default) and hold 3539 * that for a number of recent GP Srtt's. 3540 */ 3541 uint32_t segsiz; 3542 3543 if (rack->rc_gp_dyn_mul == 0) 3544 return; 3545 3546 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { 3547 /* We are idle */ 3548 return; 3549 } 3550 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3551 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3552 /* 3553 * Stop the goodput now, the idea here is 3554 * that future measurements with in_probe_rtt 3555 * won't register if they are not greater so 3556 * we want to get what info (if any) is available 3557 * now. 3558 */ 3559 rack_do_goodput_measurement(rack->rc_tp, rack, 3560 rack->rc_tp->snd_una, __LINE__, 3561 RACK_QUALITY_PROBERTT); 3562 } 3563 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3564 rack->r_ctl.rc_time_probertt_entered = us_cts; 3565 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3566 rack->r_ctl.rc_pace_min_segs); 3567 rack->in_probe_rtt = 1; 3568 rack->measure_saw_probe_rtt = 1; 3569 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3570 rack->r_ctl.rc_time_probertt_starts = 0; 3571 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; 3572 if (rack_probertt_use_min_rtt_entry) 3573 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3574 else 3575 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); 3576 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3577 __LINE__, RACK_RTTS_ENTERPROBE); 3578 } 3579 3580 static void 3581 rack_exit_probertt(struct tcp_rack *rack, uint32_t us_cts) 3582 { 3583 struct rack_sendmap *rsm; 3584 uint32_t segsiz; 3585 3586 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), 3587 rack->r_ctl.rc_pace_min_segs); 3588 rack->in_probe_rtt = 0; 3589 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && 3590 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { 3591 /* 3592 * Stop the goodput now, the idea here is 3593 * that future measurements with in_probe_rtt 3594 * won't register if they are not greater so 3595 * we want to get what info (if any) is available 3596 * now. 3597 */ 3598 rack_do_goodput_measurement(rack->rc_tp, rack, 3599 rack->rc_tp->snd_una, __LINE__, 3600 RACK_QUALITY_PROBERTT); 3601 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 3602 /* 3603 * We don't have enough data to make a measurement. 3604 * So lets just stop and start here after exiting 3605 * probe-rtt. We probably are not interested in 3606 * the results anyway. 3607 */ 3608 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; 3609 } 3610 /* 3611 * Measurements through the current snd_max are going 3612 * to be limited by the slower pacing rate. 3613 * 3614 * We need to mark these as app-limited so we 3615 * don't collapse the b/w. 3616 */ 3617 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 3618 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 3619 if (rack->r_ctl.rc_app_limited_cnt == 0) 3620 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 3621 else { 3622 /* 3623 * Go out to the end app limited and mark 3624 * this new one as next and move the end_appl up 3625 * to this guy. 3626 */ 3627 if (rack->r_ctl.rc_end_appl) 3628 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 3629 rack->r_ctl.rc_end_appl = rsm; 3630 } 3631 rsm->r_flags |= RACK_APP_LIMITED; 3632 rack->r_ctl.rc_app_limited_cnt++; 3633 } 3634 /* 3635 * Now, we need to examine our pacing rate multipliers. 3636 * If its under 100%, we need to kick it back up to 3637 * 100%. We also don't let it be over our "max" above 3638 * the actual rate i.e. 100% + rack_clamp_atexit_prtt. 3639 * Note setting clamp_atexit_prtt to 0 has the effect 3640 * of setting CA/SS to 100% always at exit (which is 3641 * the default behavior). 3642 */ 3643 if (rack_probertt_clear_is) { 3644 rack->rc_gp_incr = 0; 3645 rack->rc_gp_bwred = 0; 3646 rack->rc_gp_timely_inc_cnt = 0; 3647 rack->rc_gp_timely_dec_cnt = 0; 3648 } 3649 /* Do we do any clamping at exit? */ 3650 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { 3651 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; 3652 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; 3653 } 3654 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { 3655 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; 3656 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; 3657 } 3658 /* 3659 * Lets set rtt_diff to 0, so that we will get a "boost" 3660 * after exiting. 3661 */ 3662 rack->r_ctl.rc_rtt_diff = 0; 3663 3664 /* Clear all flags so we start fresh */ 3665 rack->rc_tp->t_bytes_acked = 0; 3666 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 3667 /* 3668 * If configured to, set the cwnd and ssthresh to 3669 * our targets. 3670 */ 3671 if (rack_probe_rtt_sets_cwnd) { 3672 uint64_t ebdp; 3673 uint32_t setto; 3674 3675 /* Set ssthresh so we get into CA once we hit our target */ 3676 if (rack_probertt_use_min_rtt_exit == 1) { 3677 /* Set to min rtt */ 3678 rack_set_prtt_target(rack, segsiz, 3679 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); 3680 } else if (rack_probertt_use_min_rtt_exit == 2) { 3681 /* Set to current gp rtt */ 3682 rack_set_prtt_target(rack, segsiz, 3683 rack->r_ctl.rc_gp_srtt); 3684 } else if (rack_probertt_use_min_rtt_exit == 3) { 3685 /* Set to entry gp rtt */ 3686 rack_set_prtt_target(rack, segsiz, 3687 rack->r_ctl.rc_entry_gp_rtt); 3688 } else { 3689 uint64_t sum; 3690 uint32_t setval; 3691 3692 sum = rack->r_ctl.rc_entry_gp_rtt; 3693 sum *= 10; 3694 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); 3695 if (sum >= 20) { 3696 /* 3697 * A highly buffered path needs 3698 * cwnd space for timely to work. 3699 * Lets set things up as if 3700 * we are heading back here again. 3701 */ 3702 setval = rack->r_ctl.rc_entry_gp_rtt; 3703 } else if (sum >= 15) { 3704 /* 3705 * Lets take the smaller of the 3706 * two since we are just somewhat 3707 * buffered. 3708 */ 3709 setval = rack->r_ctl.rc_gp_srtt; 3710 if (setval > rack->r_ctl.rc_entry_gp_rtt) 3711 setval = rack->r_ctl.rc_entry_gp_rtt; 3712 } else { 3713 /* 3714 * Here we are not highly buffered 3715 * and should pick the min we can to 3716 * keep from causing loss. 3717 */ 3718 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 3719 } 3720 rack_set_prtt_target(rack, segsiz, 3721 setval); 3722 } 3723 if (rack_probe_rtt_sets_cwnd > 1) { 3724 /* There is a percentage here to boost */ 3725 ebdp = rack->r_ctl.rc_target_probertt_flight; 3726 ebdp *= rack_probe_rtt_sets_cwnd; 3727 ebdp /= 100; 3728 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; 3729 } else 3730 setto = rack->r_ctl.rc_target_probertt_flight; 3731 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); 3732 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { 3733 /* Enforce a min */ 3734 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; 3735 } 3736 /* If we set in the cwnd also set the ssthresh point so we are in CA */ 3737 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); 3738 } 3739 rack_log_rtt_shrinks(rack, us_cts, 3740 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3741 __LINE__, RACK_RTTS_EXITPROBE); 3742 /* Clear times last so log has all the info */ 3743 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; 3744 rack->r_ctl.rc_time_probertt_entered = us_cts; 3745 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 3746 rack->r_ctl.rc_time_of_last_probertt = us_cts; 3747 } 3748 3749 static void 3750 rack_check_probe_rtt(struct tcp_rack *rack, uint32_t us_cts) 3751 { 3752 /* Check in on probe-rtt */ 3753 if (rack->rc_gp_filled == 0) { 3754 /* We do not do p-rtt unless we have gp measurements */ 3755 return; 3756 } 3757 if (rack->in_probe_rtt) { 3758 uint64_t no_overflow; 3759 uint32_t endtime, must_stay; 3760 3761 if (rack->r_ctl.rc_went_idle_time && 3762 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { 3763 /* 3764 * We went idle during prtt, just exit now. 3765 */ 3766 rack_exit_probertt(rack, us_cts); 3767 } else if (rack_probe_rtt_safety_val && 3768 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && 3769 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { 3770 /* 3771 * Probe RTT safety value triggered! 3772 */ 3773 rack_log_rtt_shrinks(rack, us_cts, 3774 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3775 __LINE__, RACK_RTTS_SAFETY); 3776 rack_exit_probertt(rack, us_cts); 3777 } 3778 /* Calculate the max we will wait */ 3779 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); 3780 if (rack->rc_highly_buffered) 3781 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); 3782 /* Calculate the min we must wait */ 3783 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); 3784 if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) && 3785 TSTMP_LT(us_cts, endtime)) { 3786 uint32_t calc; 3787 /* Do we lower more? */ 3788 no_exit: 3789 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) 3790 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; 3791 else 3792 calc = 0; 3793 calc /= max(rack->r_ctl.rc_gp_srtt, 1); 3794 if (calc) { 3795 /* Maybe */ 3796 calc *= rack_per_of_gp_probertt_reduce; 3797 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; 3798 /* Limit it too */ 3799 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) 3800 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; 3801 } 3802 /* We must reach target or the time set */ 3803 return; 3804 } 3805 if (rack->r_ctl.rc_time_probertt_starts == 0) { 3806 if ((TSTMP_LT(us_cts, must_stay) && 3807 rack->rc_highly_buffered) || 3808 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > 3809 rack->r_ctl.rc_target_probertt_flight)) { 3810 /* We are not past the must_stay time */ 3811 goto no_exit; 3812 } 3813 rack_log_rtt_shrinks(rack, us_cts, 3814 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 3815 __LINE__, RACK_RTTS_REACHTARGET); 3816 rack->r_ctl.rc_time_probertt_starts = us_cts; 3817 if (rack->r_ctl.rc_time_probertt_starts == 0) 3818 rack->r_ctl.rc_time_probertt_starts = 1; 3819 /* Restore back to our rate we want to pace at in prtt */ 3820 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 3821 } 3822 /* 3823 * Setup our end time, some number of gp_srtts plus 200ms. 3824 */ 3825 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * 3826 (uint64_t)rack_probertt_gpsrtt_cnt_mul); 3827 if (rack_probertt_gpsrtt_cnt_div) 3828 endtime = (uint32_t)(no_overflow / (uint64_t)rack_probertt_gpsrtt_cnt_div); 3829 else 3830 endtime = 0; 3831 endtime += rack_min_probertt_hold; 3832 endtime += rack->r_ctl.rc_time_probertt_starts; 3833 if (TSTMP_GEQ(us_cts, endtime)) { 3834 /* yes, exit probertt */ 3835 rack_exit_probertt(rack, us_cts); 3836 } 3837 3838 } else if ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt) { 3839 /* Go into probertt, its been too long since we went lower */ 3840 rack_enter_probertt(rack, us_cts); 3841 } 3842 } 3843 3844 static void 3845 rack_update_multiplier(struct tcp_rack *rack, int32_t timely_says, uint64_t last_bw_est, 3846 uint32_t rtt, int32_t rtt_diff) 3847 { 3848 uint64_t cur_bw, up_bnd, low_bnd, subfr; 3849 uint32_t losses; 3850 3851 if ((rack->rc_gp_dyn_mul == 0) || 3852 (rack->use_fixed_rate) || 3853 (rack->in_probe_rtt) || 3854 (rack->rc_always_pace == 0)) { 3855 /* No dynamic GP multiplier in play */ 3856 return; 3857 } 3858 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; 3859 cur_bw = rack_get_bw(rack); 3860 /* Calculate our up and down range */ 3861 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; 3862 up_bnd /= 100; 3863 up_bnd += rack->r_ctl.last_gp_comp_bw; 3864 3865 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; 3866 subfr /= 100; 3867 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; 3868 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { 3869 /* 3870 * This is the case where our RTT is above 3871 * the max target and we have been configured 3872 * to just do timely no bonus up stuff in that case. 3873 * 3874 * There are two configurations, set to 1, and we 3875 * just do timely if we are over our max. If its 3876 * set above 1 then we slam the multipliers down 3877 * to 100 and then decrement per timely. 3878 */ 3879 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3880 __LINE__, 3); 3881 if (rack->r_ctl.rc_no_push_at_mrtt > 1) 3882 rack_validate_multipliers_at_or_below_100(rack); 3883 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 3884 } else if ((last_bw_est < low_bnd) && !losses) { 3885 /* 3886 * We are decreasing this is a bit complicated this 3887 * means we are loosing ground. This could be 3888 * because another flow entered and we are competing 3889 * for b/w with it. This will push the RTT up which 3890 * makes timely unusable unless we want to get shoved 3891 * into a corner and just be backed off (the age 3892 * old problem with delay based CC). 3893 * 3894 * On the other hand if it was a route change we 3895 * would like to stay somewhat contained and not 3896 * blow out the buffers. 3897 */ 3898 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3899 __LINE__, 3); 3900 rack->r_ctl.last_gp_comp_bw = cur_bw; 3901 if (rack->rc_gp_bwred == 0) { 3902 /* Go into reduction counting */ 3903 rack->rc_gp_bwred = 1; 3904 rack->rc_gp_timely_dec_cnt = 0; 3905 } 3906 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) || 3907 (timely_says == 0)) { 3908 /* 3909 * Push another time with a faster pacing 3910 * to try to gain back (we include override to 3911 * get a full raise factor). 3912 */ 3913 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || 3914 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || 3915 (timely_says == 0) || 3916 (rack_down_raise_thresh == 0)) { 3917 /* 3918 * Do an override up in b/w if we were 3919 * below the threshold or if the threshold 3920 * is zero we always do the raise. 3921 */ 3922 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 1); 3923 } else { 3924 /* Log it stays the same */ 3925 rack_log_timely(rack, 0, last_bw_est, low_bnd, 0, 3926 __LINE__, 11); 3927 } 3928 rack->rc_gp_timely_dec_cnt++; 3929 /* We are not incrementing really no-count */ 3930 rack->rc_gp_incr = 0; 3931 rack->rc_gp_timely_inc_cnt = 0; 3932 } else { 3933 /* 3934 * Lets just use the RTT 3935 * information and give up 3936 * pushing. 3937 */ 3938 goto use_timely; 3939 } 3940 } else if ((timely_says != 2) && 3941 !losses && 3942 (last_bw_est > up_bnd)) { 3943 /* 3944 * We are increasing b/w lets keep going, updating 3945 * our b/w and ignoring any timely input, unless 3946 * of course we are at our max raise (if there is one). 3947 */ 3948 3949 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3950 __LINE__, 3); 3951 rack->r_ctl.last_gp_comp_bw = cur_bw; 3952 if (rack->rc_gp_saw_ss && 3953 rack_per_upper_bound_ss && 3954 (rack->r_ctl.rack_per_of_gp_ss == rack_per_upper_bound_ss)) { 3955 /* 3956 * In cases where we can't go higher 3957 * we should just use timely. 3958 */ 3959 goto use_timely; 3960 } 3961 if (rack->rc_gp_saw_ca && 3962 rack_per_upper_bound_ca && 3963 (rack->r_ctl.rack_per_of_gp_ca == rack_per_upper_bound_ca)) { 3964 /* 3965 * In cases where we can't go higher 3966 * we should just use timely. 3967 */ 3968 goto use_timely; 3969 } 3970 rack->rc_gp_bwred = 0; 3971 rack->rc_gp_timely_dec_cnt = 0; 3972 /* You get a set number of pushes if timely is trying to reduce */ 3973 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { 3974 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3975 } else { 3976 /* Log it stays the same */ 3977 rack_log_timely(rack, 0, last_bw_est, up_bnd, 0, 3978 __LINE__, 12); 3979 } 3980 return; 3981 } else { 3982 /* 3983 * We are staying between the lower and upper range bounds 3984 * so use timely to decide. 3985 */ 3986 rack_log_timely(rack, timely_says, cur_bw, low_bnd, up_bnd, 3987 __LINE__, 3); 3988 use_timely: 3989 if (timely_says) { 3990 rack->rc_gp_incr = 0; 3991 rack->rc_gp_timely_inc_cnt = 0; 3992 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && 3993 !losses && 3994 (last_bw_est < low_bnd)) { 3995 /* We are loosing ground */ 3996 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 3997 rack->rc_gp_timely_dec_cnt++; 3998 /* We are not incrementing really no-count */ 3999 rack->rc_gp_incr = 0; 4000 rack->rc_gp_timely_inc_cnt = 0; 4001 } else 4002 rack_decrease_bw_mul(rack, timely_says, rtt, rtt_diff); 4003 } else { 4004 rack->rc_gp_bwred = 0; 4005 rack->rc_gp_timely_dec_cnt = 0; 4006 rack_increase_bw_mul(rack, timely_says, cur_bw, last_bw_est, 0); 4007 } 4008 } 4009 } 4010 4011 static int32_t 4012 rack_make_timely_judgement(struct tcp_rack *rack, uint32_t rtt, int32_t rtt_diff, uint32_t prev_rtt) 4013 { 4014 int32_t timely_says; 4015 uint64_t log_mult, log_rtt_a_diff; 4016 4017 log_rtt_a_diff = rtt; 4018 log_rtt_a_diff <<= 32; 4019 log_rtt_a_diff |= (uint32_t)rtt_diff; 4020 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * 4021 rack_gp_rtt_maxmul)) { 4022 /* Reduce the b/w multiplier */ 4023 timely_says = 2; 4024 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; 4025 log_mult <<= 32; 4026 log_mult |= prev_rtt; 4027 rack_log_timely(rack, timely_says, log_mult, 4028 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4029 log_rtt_a_diff, __LINE__, 4); 4030 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4031 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4032 max(rack_gp_rtt_mindiv , 1)))) { 4033 /* Increase the b/w multiplier */ 4034 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + 4035 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / 4036 max(rack_gp_rtt_mindiv , 1)); 4037 log_mult <<= 32; 4038 log_mult |= prev_rtt; 4039 timely_says = 0; 4040 rack_log_timely(rack, timely_says, log_mult , 4041 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), 4042 log_rtt_a_diff, __LINE__, 5); 4043 } else { 4044 /* 4045 * Use a gradient to find it the timely gradient 4046 * is: 4047 * grad = rc_rtt_diff / min_rtt; 4048 * 4049 * anything below or equal to 0 will be 4050 * a increase indication. Anything above 4051 * zero is a decrease. Note we take care 4052 * of the actual gradient calculation 4053 * in the reduction (its not needed for 4054 * increase). 4055 */ 4056 log_mult = prev_rtt; 4057 if (rtt_diff <= 0) { 4058 /* 4059 * Rttdiff is less than zero, increase the 4060 * b/w multiplier (its 0 or negative) 4061 */ 4062 timely_says = 0; 4063 rack_log_timely(rack, timely_says, log_mult, 4064 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); 4065 } else { 4066 /* Reduce the b/w multiplier */ 4067 timely_says = 1; 4068 rack_log_timely(rack, timely_says, log_mult, 4069 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); 4070 } 4071 } 4072 return (timely_says); 4073 } 4074 4075 static void 4076 rack_do_goodput_measurement(struct tcpcb *tp, struct tcp_rack *rack, 4077 tcp_seq th_ack, int line, uint8_t quality) 4078 { 4079 uint64_t tim, bytes_ps, ltim, stim, utim; 4080 uint32_t segsiz, bytes, reqbytes, us_cts; 4081 int32_t gput, new_rtt_diff, timely_says; 4082 uint64_t resid_bw, subpart = 0, addpart = 0, srtt; 4083 int did_add = 0; 4084 4085 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4086 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 4087 if (TSTMP_GEQ(us_cts, tp->gput_ts)) 4088 tim = us_cts - tp->gput_ts; 4089 else 4090 tim = 0; 4091 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) 4092 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; 4093 else 4094 stim = 0; 4095 /* 4096 * Use the larger of the send time or ack time. This prevents us 4097 * from being influenced by ack artifacts to come up with too 4098 * high of measurement. Note that since we are spanning over many more 4099 * bytes in most of our measurements hopefully that is less likely to 4100 * occur. 4101 */ 4102 if (tim > stim) 4103 utim = max(tim, 1); 4104 else 4105 utim = max(stim, 1); 4106 /* Lets get a msec time ltim too for the old stuff */ 4107 ltim = max(1, (utim / HPTS_USEC_IN_MSEC)); 4108 gput = (((uint64_t) (th_ack - tp->gput_seq)) << 3) / ltim; 4109 reqbytes = min(rc_init_window(rack), (MIN_GP_WIN * segsiz)); 4110 if ((tim == 0) && (stim == 0)) { 4111 /* 4112 * Invalid measurement time, maybe 4113 * all on one ack/one send? 4114 */ 4115 bytes = 0; 4116 bytes_ps = 0; 4117 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4118 0, 0, 0, 10, __LINE__, NULL, quality); 4119 goto skip_measurement; 4120 } 4121 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { 4122 /* We never made a us_rtt measurement? */ 4123 bytes = 0; 4124 bytes_ps = 0; 4125 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4126 0, 0, 0, 10, __LINE__, NULL, quality); 4127 goto skip_measurement; 4128 } 4129 /* 4130 * Calculate the maximum possible b/w this connection 4131 * could have. We base our calculation on the lowest 4132 * rtt we have seen during the measurement and the 4133 * largest rwnd the client has given us in that time. This 4134 * forms a BDP that is the maximum that we could ever 4135 * get to the client. Anything larger is not valid. 4136 * 4137 * I originally had code here that rejected measurements 4138 * where the time was less than 1/2 the latest us_rtt. 4139 * But after thinking on that I realized its wrong since 4140 * say you had a 150Mbps or even 1Gbps link, and you 4141 * were a long way away.. example I am in Europe (100ms rtt) 4142 * talking to my 1Gbps link in S.C. Now measuring say 150,000 4143 * bytes my time would be 1.2ms, and yet my rtt would say 4144 * the measurement was invalid the time was < 50ms. The 4145 * same thing is true for 150Mb (8ms of time). 4146 * 4147 * A better way I realized is to look at what the maximum 4148 * the connection could possibly do. This is gated on 4149 * the lowest RTT we have seen and the highest rwnd. 4150 * We should in theory never exceed that, if we are 4151 * then something on the path is storing up packets 4152 * and then feeding them all at once to our endpoint 4153 * messing up our measurement. 4154 */ 4155 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; 4156 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; 4157 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; 4158 if (SEQ_LT(th_ack, tp->gput_seq)) { 4159 /* No measurement can be made */ 4160 bytes = 0; 4161 bytes_ps = 0; 4162 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4163 0, 0, 0, 10, __LINE__, NULL, quality); 4164 goto skip_measurement; 4165 } else 4166 bytes = (th_ack - tp->gput_seq); 4167 bytes_ps = (uint64_t)bytes; 4168 /* 4169 * Don't measure a b/w for pacing unless we have gotten at least 4170 * an initial windows worth of data in this measurement interval. 4171 * 4172 * Small numbers of bytes get badly influenced by delayed ack and 4173 * other artifacts. Note we take the initial window or our 4174 * defined minimum GP (defaulting to 10 which hopefully is the 4175 * IW). 4176 */ 4177 if (rack->rc_gp_filled == 0) { 4178 /* 4179 * The initial estimate is special. We 4180 * have blasted out an IW worth of packets 4181 * without a real valid ack ts results. We 4182 * then setup the app_limited_needs_set flag, 4183 * this should get the first ack in (probably 2 4184 * MSS worth) to be recorded as the timestamp. 4185 * We thus allow a smaller number of bytes i.e. 4186 * IW - 2MSS. 4187 */ 4188 reqbytes -= (2 * segsiz); 4189 /* Also lets fill previous for our first measurement to be neutral */ 4190 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4191 } 4192 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { 4193 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4194 rack->r_ctl.rc_app_limited_cnt, 4195 0, 0, 10, __LINE__, NULL, quality); 4196 goto skip_measurement; 4197 } 4198 /* 4199 * We now need to calculate the Timely like status so 4200 * we can update (possibly) the b/w multipliers. 4201 */ 4202 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; 4203 if (rack->rc_gp_filled == 0) { 4204 /* No previous reading */ 4205 rack->r_ctl.rc_rtt_diff = new_rtt_diff; 4206 } else { 4207 if (rack->measure_saw_probe_rtt == 0) { 4208 /* 4209 * We don't want a probertt to be counted 4210 * since it will be negative incorrectly. We 4211 * expect to be reducing the RTT when we 4212 * pace at a slower rate. 4213 */ 4214 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); 4215 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); 4216 } 4217 } 4218 timely_says = rack_make_timely_judgement(rack, 4219 rack->r_ctl.rc_gp_srtt, 4220 rack->r_ctl.rc_rtt_diff, 4221 rack->r_ctl.rc_prev_gp_srtt 4222 ); 4223 bytes_ps *= HPTS_USEC_IN_SEC; 4224 bytes_ps /= utim; 4225 if (bytes_ps > rack->r_ctl.last_max_bw) { 4226 /* 4227 * Something is on path playing 4228 * since this b/w is not possible based 4229 * on our BDP (highest rwnd and lowest rtt 4230 * we saw in the measurement window). 4231 * 4232 * Another option here would be to 4233 * instead skip the measurement. 4234 */ 4235 rack_log_pacing_delay_calc(rack, bytes, reqbytes, 4236 bytes_ps, rack->r_ctl.last_max_bw, 0, 4237 11, __LINE__, NULL, quality); 4238 bytes_ps = rack->r_ctl.last_max_bw; 4239 } 4240 /* We store gp for b/w in bytes per second */ 4241 if (rack->rc_gp_filled == 0) { 4242 /* Initial measurement */ 4243 if (bytes_ps) { 4244 rack->r_ctl.gp_bw = bytes_ps; 4245 rack->rc_gp_filled = 1; 4246 rack->r_ctl.num_measurements = 1; 4247 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 4248 } else { 4249 rack_log_pacing_delay_calc(rack, bytes_ps, reqbytes, 4250 rack->r_ctl.rc_app_limited_cnt, 4251 0, 0, 10, __LINE__, NULL, quality); 4252 } 4253 if (tcp_in_hpts(rack->rc_inp) && 4254 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 4255 /* 4256 * Ok we can't trust the pacer in this case 4257 * where we transition from un-paced to paced. 4258 * Or for that matter when the burst mitigation 4259 * was making a wild guess and got it wrong. 4260 * Stop the pacer and clear up all the aggregate 4261 * delays etc. 4262 */ 4263 tcp_hpts_remove(rack->rc_inp); 4264 rack->r_ctl.rc_hpts_flags = 0; 4265 rack->r_ctl.rc_last_output_to = 0; 4266 } 4267 did_add = 2; 4268 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { 4269 /* Still a small number run an average */ 4270 rack->r_ctl.gp_bw += bytes_ps; 4271 addpart = rack->r_ctl.num_measurements; 4272 rack->r_ctl.num_measurements++; 4273 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { 4274 /* We have collected enough to move forward */ 4275 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; 4276 } 4277 did_add = 3; 4278 } else { 4279 /* 4280 * We want to take 1/wma of the goodput and add in to 7/8th 4281 * of the old value weighted by the srtt. So if your measurement 4282 * period is say 2 SRTT's long you would get 1/4 as the 4283 * value, if it was like 1/2 SRTT then you would get 1/16th. 4284 * 4285 * But we must be careful not to take too much i.e. if the 4286 * srtt is say 20ms and the measurement is taken over 4287 * 400ms our weight would be 400/20 i.e. 20. On the 4288 * other hand if we get a measurement over 1ms with a 4289 * 10ms rtt we only want to take a much smaller portion. 4290 */ 4291 if (rack->r_ctl.num_measurements < 0xff) { 4292 rack->r_ctl.num_measurements++; 4293 } 4294 srtt = (uint64_t)tp->t_srtt; 4295 if (srtt == 0) { 4296 /* 4297 * Strange why did t_srtt go back to zero? 4298 */ 4299 if (rack->r_ctl.rc_rack_min_rtt) 4300 srtt = rack->r_ctl.rc_rack_min_rtt; 4301 else 4302 srtt = HPTS_USEC_IN_MSEC; 4303 } 4304 /* 4305 * XXXrrs: Note for reviewers, in playing with 4306 * dynamic pacing I discovered this GP calculation 4307 * as done originally leads to some undesired results. 4308 * Basically you can get longer measurements contributing 4309 * too much to the WMA. Thus I changed it if you are doing 4310 * dynamic adjustments to only do the aportioned adjustment 4311 * if we have a very small (time wise) measurement. Longer 4312 * measurements just get there weight (defaulting to 1/8) 4313 * add to the WMA. We may want to think about changing 4314 * this to always do that for both sides i.e. dynamic 4315 * and non-dynamic... but considering lots of folks 4316 * were playing with this I did not want to change the 4317 * calculation per.se. without your thoughts.. Lawerence? 4318 * Peter?? 4319 */ 4320 if (rack->rc_gp_dyn_mul == 0) { 4321 subpart = rack->r_ctl.gp_bw * utim; 4322 subpart /= (srtt * 8); 4323 if (subpart < (rack->r_ctl.gp_bw / 2)) { 4324 /* 4325 * The b/w update takes no more 4326 * away then 1/2 our running total 4327 * so factor it in. 4328 */ 4329 addpart = bytes_ps * utim; 4330 addpart /= (srtt * 8); 4331 } else { 4332 /* 4333 * Don't allow a single measurement 4334 * to account for more than 1/2 of the 4335 * WMA. This could happen on a retransmission 4336 * where utim becomes huge compared to 4337 * srtt (multiple retransmissions when using 4338 * the sending rate which factors in all the 4339 * transmissions from the first one). 4340 */ 4341 subpart = rack->r_ctl.gp_bw / 2; 4342 addpart = bytes_ps / 2; 4343 } 4344 resid_bw = rack->r_ctl.gp_bw - subpart; 4345 rack->r_ctl.gp_bw = resid_bw + addpart; 4346 did_add = 1; 4347 } else { 4348 if ((utim / srtt) <= 1) { 4349 /* 4350 * The b/w update was over a small period 4351 * of time. The idea here is to prevent a small 4352 * measurement time period from counting 4353 * too much. So we scale it based on the 4354 * time so it attributes less than 1/rack_wma_divisor 4355 * of its measurement. 4356 */ 4357 subpart = rack->r_ctl.gp_bw * utim; 4358 subpart /= (srtt * rack_wma_divisor); 4359 addpart = bytes_ps * utim; 4360 addpart /= (srtt * rack_wma_divisor); 4361 } else { 4362 /* 4363 * The scaled measurement was long 4364 * enough so lets just add in the 4365 * portion of the measurement i.e. 1/rack_wma_divisor 4366 */ 4367 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; 4368 addpart = bytes_ps / rack_wma_divisor; 4369 } 4370 if ((rack->measure_saw_probe_rtt == 0) || 4371 (bytes_ps > rack->r_ctl.gp_bw)) { 4372 /* 4373 * For probe-rtt we only add it in 4374 * if its larger, all others we just 4375 * add in. 4376 */ 4377 did_add = 1; 4378 resid_bw = rack->r_ctl.gp_bw - subpart; 4379 rack->r_ctl.gp_bw = resid_bw + addpart; 4380 } 4381 } 4382 } 4383 if ((rack->gp_ready == 0) && 4384 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 4385 /* We have enough measurements now */ 4386 rack->gp_ready = 1; 4387 rack_set_cc_pacing(rack); 4388 if (rack->defer_options) 4389 rack_apply_deferred_options(rack); 4390 } 4391 rack_log_pacing_delay_calc(rack, subpart, addpart, bytes_ps, stim, 4392 rack_get_bw(rack), 22, did_add, NULL, quality); 4393 /* We do not update any multipliers if we are in or have seen a probe-rtt */ 4394 if ((rack->measure_saw_probe_rtt == 0) && rack->rc_gp_rtt_set) 4395 rack_update_multiplier(rack, timely_says, bytes_ps, 4396 rack->r_ctl.rc_gp_srtt, 4397 rack->r_ctl.rc_rtt_diff); 4398 rack_log_pacing_delay_calc(rack, bytes, tim, bytes_ps, stim, 4399 rack_get_bw(rack), 3, line, NULL, quality); 4400 /* reset the gp srtt and setup the new prev */ 4401 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; 4402 /* Record the lost count for the next measurement */ 4403 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; 4404 /* 4405 * We restart our diffs based on the gpsrtt in the 4406 * measurement window. 4407 */ 4408 rack->rc_gp_rtt_set = 0; 4409 rack->rc_gp_saw_rec = 0; 4410 rack->rc_gp_saw_ca = 0; 4411 rack->rc_gp_saw_ss = 0; 4412 rack->rc_dragged_bottom = 0; 4413 skip_measurement: 4414 4415 #ifdef STATS 4416 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 4417 gput); 4418 /* 4419 * XXXLAS: This is a temporary hack, and should be 4420 * chained off VOI_TCP_GPUT when stats(9) grows an 4421 * API to deal with chained VOIs. 4422 */ 4423 if (tp->t_stats_gput_prev > 0) 4424 stats_voi_update_abs_s32(tp->t_stats, 4425 VOI_TCP_GPUT_ND, 4426 ((gput - tp->t_stats_gput_prev) * 100) / 4427 tp->t_stats_gput_prev); 4428 #endif 4429 tp->t_flags &= ~TF_GPUTINPROG; 4430 tp->t_stats_gput_prev = gput; 4431 /* 4432 * Now are we app limited now and there is space from where we 4433 * were to where we want to go? 4434 * 4435 * We don't do the other case i.e. non-applimited here since 4436 * the next send will trigger us picking up the missing data. 4437 */ 4438 if (rack->r_ctl.rc_first_appl && 4439 TCPS_HAVEESTABLISHED(tp->t_state) && 4440 rack->r_ctl.rc_app_limited_cnt && 4441 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && 4442 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > 4443 max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { 4444 /* 4445 * Yep there is enough outstanding to make a measurement here. 4446 */ 4447 struct rack_sendmap *rsm, fe; 4448 4449 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 4450 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 4451 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 4452 rack->app_limited_needs_set = 0; 4453 tp->gput_seq = th_ack; 4454 if (rack->in_probe_rtt) 4455 rack->measure_saw_probe_rtt = 1; 4456 else if ((rack->measure_saw_probe_rtt) && 4457 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 4458 rack->measure_saw_probe_rtt = 0; 4459 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { 4460 /* There is a full window to gain info from */ 4461 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 4462 } else { 4463 /* We can only measure up to the applimited point */ 4464 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); 4465 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 4466 /* 4467 * We don't have enough to make a measurement. 4468 */ 4469 tp->t_flags &= ~TF_GPUTINPROG; 4470 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 4471 0, 0, 0, 6, __LINE__, NULL, quality); 4472 return; 4473 } 4474 } 4475 if (tp->t_state >= TCPS_FIN_WAIT_1) { 4476 /* 4477 * We will get no more data into the SB 4478 * this means we need to have the data available 4479 * before we start a measurement. 4480 */ 4481 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { 4482 /* Nope not enough data. */ 4483 return; 4484 } 4485 } 4486 tp->t_flags |= TF_GPUTINPROG; 4487 /* 4488 * Now we need to find the timestamp of the send at tp->gput_seq 4489 * for the send based measurement. 4490 */ 4491 fe.r_start = tp->gput_seq; 4492 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 4493 if (rsm) { 4494 /* Ok send-based limit is set */ 4495 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { 4496 /* 4497 * Move back to include the earlier part 4498 * so our ack time lines up right (this may 4499 * make an overlapping measurement but thats 4500 * ok). 4501 */ 4502 tp->gput_seq = rsm->r_start; 4503 } 4504 if (rsm->r_flags & RACK_ACKED) 4505 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; 4506 else 4507 rack->app_limited_needs_set = 1; 4508 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 4509 } else { 4510 /* 4511 * If we don't find the rsm due to some 4512 * send-limit set the current time, which 4513 * basically disables the send-limit. 4514 */ 4515 struct timeval tv; 4516 4517 microuptime(&tv); 4518 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 4519 } 4520 rack_log_pacing_delay_calc(rack, 4521 tp->gput_seq, 4522 tp->gput_ack, 4523 (uint64_t)rsm, 4524 tp->gput_ts, 4525 rack->r_ctl.rc_app_limited_cnt, 4526 9, 4527 __LINE__, NULL, quality); 4528 } 4529 } 4530 4531 /* 4532 * CC wrapper hook functions 4533 */ 4534 static void 4535 rack_ack_received(struct tcpcb *tp, struct tcp_rack *rack, uint32_t th_ack, uint16_t nsegs, 4536 uint16_t type, int32_t recovery) 4537 { 4538 uint32_t prior_cwnd, acked; 4539 struct tcp_log_buffer *lgb = NULL; 4540 uint8_t labc_to_use, quality; 4541 4542 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4543 tp->t_ccv.nsegs = nsegs; 4544 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); 4545 if ((recovery) && (rack->r_ctl.rc_early_recovery_segs)) { 4546 uint32_t max; 4547 4548 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); 4549 if (tp->t_ccv.bytes_this_ack > max) { 4550 tp->t_ccv.bytes_this_ack = max; 4551 } 4552 } 4553 #ifdef STATS 4554 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, 4555 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); 4556 #endif 4557 quality = RACK_QUALITY_NONE; 4558 if ((tp->t_flags & TF_GPUTINPROG) && 4559 rack_enough_for_measurement(tp, rack, th_ack, &quality)) { 4560 /* Measure the Goodput */ 4561 rack_do_goodput_measurement(tp, rack, th_ack, __LINE__, quality); 4562 #ifdef NETFLIX_PEAKRATE 4563 if ((type == CC_ACK) && 4564 (tp->t_maxpeakrate)) { 4565 /* 4566 * We update t_peakrate_thr. This gives us roughly 4567 * one update per round trip time. Note 4568 * it will only be used if pace_always is off i.e 4569 * we don't do this for paced flows. 4570 */ 4571 rack_update_peakrate_thr(tp); 4572 } 4573 #endif 4574 } 4575 /* Which way our we limited, if not cwnd limited no advance in CA */ 4576 if (tp->snd_cwnd <= tp->snd_wnd) 4577 tp->t_ccv.flags |= CCF_CWND_LIMITED; 4578 else 4579 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; 4580 if (tp->snd_cwnd > tp->snd_ssthresh) { 4581 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, 4582 nsegs * V_tcp_abc_l_var * ctf_fixed_maxseg(tp)); 4583 /* For the setting of a window past use the actual scwnd we are using */ 4584 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { 4585 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; 4586 tp->t_ccv.flags |= CCF_ABC_SENTAWND; 4587 } 4588 } else { 4589 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; 4590 tp->t_bytes_acked = 0; 4591 } 4592 prior_cwnd = tp->snd_cwnd; 4593 if ((recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || 4594 (rack_client_low_buf && (rack->client_bufferlvl < rack_client_low_buf))) 4595 labc_to_use = rack->rc_labc; 4596 else 4597 labc_to_use = rack_max_abc_post_recovery; 4598 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4599 union tcp_log_stackspecific log; 4600 struct timeval tv; 4601 4602 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4603 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4604 log.u_bbr.flex1 = th_ack; 4605 log.u_bbr.flex2 = tp->t_ccv.flags; 4606 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 4607 log.u_bbr.flex4 = tp->t_ccv.nsegs; 4608 log.u_bbr.flex5 = labc_to_use; 4609 log.u_bbr.flex6 = prior_cwnd; 4610 log.u_bbr.flex7 = V_tcp_do_newsack; 4611 log.u_bbr.flex8 = 1; 4612 lgb = tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4613 0, &log, false, NULL, NULL, 0, &tv); 4614 } 4615 if (CC_ALGO(tp)->ack_received != NULL) { 4616 /* XXXLAS: Find a way to live without this */ 4617 tp->t_ccv.curack = th_ack; 4618 tp->t_ccv.labc = labc_to_use; 4619 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; 4620 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); 4621 } 4622 if (lgb) { 4623 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; 4624 } 4625 if (rack->r_must_retran) { 4626 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { 4627 /* 4628 * We now are beyond the rxt point so lets disable 4629 * the flag. 4630 */ 4631 rack->r_ctl.rc_out_at_rto = 0; 4632 rack->r_must_retran = 0; 4633 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { 4634 /* 4635 * Only decrement the rc_out_at_rto if the cwnd advances 4636 * at least a whole segment. Otherwise next time the peer 4637 * acks, we won't be able to send this generaly happens 4638 * when we are in Congestion Avoidance. 4639 */ 4640 if (acked <= rack->r_ctl.rc_out_at_rto){ 4641 rack->r_ctl.rc_out_at_rto -= acked; 4642 } else { 4643 rack->r_ctl.rc_out_at_rto = 0; 4644 } 4645 } 4646 } 4647 #ifdef STATS 4648 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); 4649 #endif 4650 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { 4651 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; 4652 } 4653 #ifdef NETFLIX_PEAKRATE 4654 /* we enforce max peak rate if it is set and we are not pacing */ 4655 if ((rack->rc_always_pace == 0) && 4656 tp->t_peakrate_thr && 4657 (tp->snd_cwnd > tp->t_peakrate_thr)) { 4658 tp->snd_cwnd = tp->t_peakrate_thr; 4659 } 4660 #endif 4661 } 4662 4663 static void 4664 tcp_rack_partialack(struct tcpcb *tp) 4665 { 4666 struct tcp_rack *rack; 4667 4668 rack = (struct tcp_rack *)tp->t_fb_ptr; 4669 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4670 /* 4671 * If we are doing PRR and have enough 4672 * room to send <or> we are pacing and prr 4673 * is disabled we will want to see if we 4674 * can send data (by setting r_wanted_output to 4675 * true). 4676 */ 4677 if ((rack->r_ctl.rc_prr_sndcnt > 0) || 4678 rack->rack_no_prr) 4679 rack->r_wanted_output = 1; 4680 } 4681 4682 static void 4683 rack_post_recovery(struct tcpcb *tp, uint32_t th_ack) 4684 { 4685 struct tcp_rack *rack; 4686 uint32_t orig_cwnd; 4687 4688 orig_cwnd = tp->snd_cwnd; 4689 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4690 rack = (struct tcp_rack *)tp->t_fb_ptr; 4691 /* only alert CC if we alerted when we entered */ 4692 if (CC_ALGO(tp)->post_recovery != NULL) { 4693 tp->t_ccv.curack = th_ack; 4694 CC_ALGO(tp)->post_recovery(&tp->t_ccv); 4695 if (tp->snd_cwnd < tp->snd_ssthresh) { 4696 /* 4697 * Rack has burst control and pacing 4698 * so lets not set this any lower than 4699 * snd_ssthresh per RFC-6582 (option 2). 4700 */ 4701 tp->snd_cwnd = tp->snd_ssthresh; 4702 } 4703 } 4704 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 4705 union tcp_log_stackspecific log; 4706 struct timeval tv; 4707 4708 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 4709 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 4710 log.u_bbr.flex1 = th_ack; 4711 log.u_bbr.flex2 = tp->t_ccv.flags; 4712 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; 4713 log.u_bbr.flex4 = tp->t_ccv.nsegs; 4714 log.u_bbr.flex5 = V_tcp_abc_l_var; 4715 log.u_bbr.flex6 = orig_cwnd; 4716 log.u_bbr.flex7 = V_tcp_do_newsack; 4717 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; 4718 log.u_bbr.flex8 = 2; 4719 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 4720 0, &log, false, NULL, NULL, 0, &tv); 4721 } 4722 if ((rack->rack_no_prr == 0) && 4723 (rack->no_prr_addback == 0) && 4724 (rack->r_ctl.rc_prr_sndcnt > 0)) { 4725 /* 4726 * Suck the next prr cnt back into cwnd, but 4727 * only do that if we are not application limited. 4728 */ 4729 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { 4730 /* 4731 * We are allowed to add back to the cwnd the amount we did 4732 * not get out if: 4733 * a) no_prr_addback is off. 4734 * b) we are not app limited 4735 * c) we are doing prr 4736 * <and> 4737 * d) it is bounded by rack_prr_addbackmax (if addback is 0, then none). 4738 */ 4739 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), 4740 rack->r_ctl.rc_prr_sndcnt); 4741 } 4742 rack->r_ctl.rc_prr_sndcnt = 0; 4743 rack_log_to_prr(rack, 1, 0, __LINE__); 4744 } 4745 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 4746 tp->snd_recover = tp->snd_una; 4747 if (rack->r_ctl.dsack_persist) { 4748 rack->r_ctl.dsack_persist--; 4749 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 4750 rack->r_ctl.num_dsack = 0; 4751 } 4752 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 4753 } 4754 EXIT_RECOVERY(tp->t_flags); 4755 } 4756 4757 static void 4758 rack_cong_signal(struct tcpcb *tp, uint32_t type, uint32_t ack, int line) 4759 { 4760 struct tcp_rack *rack; 4761 uint32_t ssthresh_enter, cwnd_enter, in_rec_at_entry, orig_cwnd; 4762 4763 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4764 #ifdef STATS 4765 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); 4766 #endif 4767 if (IN_RECOVERY(tp->t_flags) == 0) { 4768 in_rec_at_entry = 0; 4769 ssthresh_enter = tp->snd_ssthresh; 4770 cwnd_enter = tp->snd_cwnd; 4771 } else 4772 in_rec_at_entry = 1; 4773 rack = (struct tcp_rack *)tp->t_fb_ptr; 4774 switch (type) { 4775 case CC_NDUPACK: 4776 tp->t_flags &= ~TF_WASFRECOVERY; 4777 tp->t_flags &= ~TF_WASCRECOVERY; 4778 if (!IN_FASTRECOVERY(tp->t_flags)) { 4779 rack->r_ctl.rc_prr_delivered = 0; 4780 rack->r_ctl.rc_prr_out = 0; 4781 if (rack->rack_no_prr == 0) { 4782 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 4783 rack_log_to_prr(rack, 2, in_rec_at_entry, line); 4784 } 4785 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; 4786 tp->snd_recover = tp->snd_max; 4787 if (tp->t_flags2 & TF2_ECN_PERMIT) 4788 tp->t_flags2 |= TF2_ECN_SND_CWR; 4789 } 4790 break; 4791 case CC_ECN: 4792 if (!IN_CONGRECOVERY(tp->t_flags) || 4793 /* 4794 * Allow ECN reaction on ACK to CWR, if 4795 * that data segment was also CE marked. 4796 */ 4797 SEQ_GEQ(ack, tp->snd_recover)) { 4798 EXIT_CONGRECOVERY(tp->t_flags); 4799 KMOD_TCPSTAT_INC(tcps_ecn_rcwnd); 4800 tp->snd_recover = tp->snd_max + 1; 4801 if (tp->t_flags2 & TF2_ECN_PERMIT) 4802 tp->t_flags2 |= TF2_ECN_SND_CWR; 4803 } 4804 break; 4805 case CC_RTO: 4806 tp->t_dupacks = 0; 4807 tp->t_bytes_acked = 0; 4808 EXIT_RECOVERY(tp->t_flags); 4809 tp->snd_ssthresh = max(2, min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / 4810 ctf_fixed_maxseg(tp)) * ctf_fixed_maxseg(tp); 4811 orig_cwnd = tp->snd_cwnd; 4812 tp->snd_cwnd = ctf_fixed_maxseg(tp); 4813 rack_log_to_prr(rack, 16, orig_cwnd, line); 4814 if (tp->t_flags2 & TF2_ECN_PERMIT) 4815 tp->t_flags2 |= TF2_ECN_SND_CWR; 4816 break; 4817 case CC_RTO_ERR: 4818 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4819 /* RTO was unnecessary, so reset everything. */ 4820 tp->snd_cwnd = tp->snd_cwnd_prev; 4821 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4822 tp->snd_recover = tp->snd_recover_prev; 4823 if (tp->t_flags & TF_WASFRECOVERY) { 4824 ENTER_FASTRECOVERY(tp->t_flags); 4825 tp->t_flags &= ~TF_WASFRECOVERY; 4826 } 4827 if (tp->t_flags & TF_WASCRECOVERY) { 4828 ENTER_CONGRECOVERY(tp->t_flags); 4829 tp->t_flags &= ~TF_WASCRECOVERY; 4830 } 4831 tp->snd_nxt = tp->snd_max; 4832 tp->t_badrxtwin = 0; 4833 break; 4834 } 4835 if ((CC_ALGO(tp)->cong_signal != NULL) && 4836 (type != CC_RTO)){ 4837 tp->t_ccv.curack = ack; 4838 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); 4839 } 4840 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { 4841 rack_log_to_prr(rack, 15, cwnd_enter, line); 4842 rack->r_ctl.dsack_byte_cnt = 0; 4843 rack->r_ctl.retran_during_recovery = 0; 4844 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; 4845 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; 4846 rack->r_ent_rec_ns = 1; 4847 } 4848 } 4849 4850 static inline void 4851 rack_cc_after_idle(struct tcp_rack *rack, struct tcpcb *tp) 4852 { 4853 uint32_t i_cwnd; 4854 4855 INP_WLOCK_ASSERT(tptoinpcb(tp)); 4856 4857 #ifdef NETFLIX_STATS 4858 KMOD_TCPSTAT_INC(tcps_idle_restarts); 4859 if (tp->t_state == TCPS_ESTABLISHED) 4860 KMOD_TCPSTAT_INC(tcps_idle_estrestarts); 4861 #endif 4862 if (CC_ALGO(tp)->after_idle != NULL) 4863 CC_ALGO(tp)->after_idle(&tp->t_ccv); 4864 4865 if (tp->snd_cwnd == 1) 4866 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 4867 else 4868 i_cwnd = rc_init_window(rack); 4869 4870 /* 4871 * Being idle is no different than the initial window. If the cc 4872 * clamps it down below the initial window raise it to the initial 4873 * window. 4874 */ 4875 if (tp->snd_cwnd < i_cwnd) { 4876 tp->snd_cwnd = i_cwnd; 4877 } 4878 } 4879 4880 /* 4881 * Indicate whether this ack should be delayed. We can delay the ack if 4882 * following conditions are met: 4883 * - There is no delayed ack timer in progress. 4884 * - Our last ack wasn't a 0-sized window. We never want to delay 4885 * the ack that opens up a 0-sized window. 4886 * - LRO wasn't used for this segment. We make sure by checking that the 4887 * segment size is not larger than the MSS. 4888 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4889 * connection. 4890 */ 4891 #define DELAY_ACK(tp, tlen) \ 4892 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4893 ((tp->t_flags & TF_DELACK) == 0) && \ 4894 (tlen <= tp->t_maxseg) && \ 4895 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4896 4897 static struct rack_sendmap * 4898 rack_find_lowest_rsm(struct tcp_rack *rack) 4899 { 4900 struct rack_sendmap *rsm; 4901 4902 /* 4903 * Walk the time-order transmitted list looking for an rsm that is 4904 * not acked. This will be the one that was sent the longest time 4905 * ago that is still outstanding. 4906 */ 4907 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 4908 if (rsm->r_flags & RACK_ACKED) { 4909 continue; 4910 } 4911 goto finish; 4912 } 4913 finish: 4914 return (rsm); 4915 } 4916 4917 static struct rack_sendmap * 4918 rack_find_high_nonack(struct tcp_rack *rack, struct rack_sendmap *rsm) 4919 { 4920 struct rack_sendmap *prsm; 4921 4922 /* 4923 * Walk the sequence order list backward until we hit and arrive at 4924 * the highest seq not acked. In theory when this is called it 4925 * should be the last segment (which it was not). 4926 */ 4927 prsm = rsm; 4928 RB_FOREACH_REVERSE_FROM(prsm, rack_rb_tree_head, rsm) { 4929 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { 4930 continue; 4931 } 4932 return (prsm); 4933 } 4934 return (NULL); 4935 } 4936 4937 static uint32_t 4938 rack_calc_thresh_rack(struct tcp_rack *rack, uint32_t srtt, uint32_t cts) 4939 { 4940 int32_t lro; 4941 uint32_t thresh; 4942 4943 /* 4944 * lro is the flag we use to determine if we have seen reordering. 4945 * If it gets set we have seen reordering. The reorder logic either 4946 * works in one of two ways: 4947 * 4948 * If reorder-fade is configured, then we track the last time we saw 4949 * re-ordering occur. If we reach the point where enough time as 4950 * passed we no longer consider reordering has occuring. 4951 * 4952 * Or if reorder-face is 0, then once we see reordering we consider 4953 * the connection to alway be subject to reordering and just set lro 4954 * to 1. 4955 * 4956 * In the end if lro is non-zero we add the extra time for 4957 * reordering in. 4958 */ 4959 if (srtt == 0) 4960 srtt = 1; 4961 if (rack->r_ctl.rc_reorder_ts) { 4962 if (rack->r_ctl.rc_reorder_fade) { 4963 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { 4964 lro = cts - rack->r_ctl.rc_reorder_ts; 4965 if (lro == 0) { 4966 /* 4967 * No time as passed since the last 4968 * reorder, mark it as reordering. 4969 */ 4970 lro = 1; 4971 } 4972 } else { 4973 /* Negative time? */ 4974 lro = 0; 4975 } 4976 if (lro > rack->r_ctl.rc_reorder_fade) { 4977 /* Turn off reordering seen too */ 4978 rack->r_ctl.rc_reorder_ts = 0; 4979 lro = 0; 4980 } 4981 } else { 4982 /* Reodering does not fade */ 4983 lro = 1; 4984 } 4985 } else { 4986 lro = 0; 4987 } 4988 if (rack->rc_rack_tmr_std_based == 0) { 4989 thresh = srtt + rack->r_ctl.rc_pkt_delay; 4990 } else { 4991 /* Standards based pkt-delay is 1/4 srtt */ 4992 thresh = srtt + (srtt >> 2); 4993 } 4994 if (lro && (rack->rc_rack_tmr_std_based == 0)) { 4995 /* It must be set, if not you get 1/4 rtt */ 4996 if (rack->r_ctl.rc_reorder_shift) 4997 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); 4998 else 4999 thresh += (srtt >> 2); 5000 } 5001 if (rack->rc_rack_use_dsack && 5002 lro && 5003 (rack->r_ctl.num_dsack > 0)) { 5004 /* 5005 * We only increase the reordering window if we 5006 * have seen reordering <and> we have a DSACK count. 5007 */ 5008 thresh += rack->r_ctl.num_dsack * (srtt >> 2); 5009 rack_log_dsack_event(rack, 4, __LINE__, srtt, thresh); 5010 } 5011 /* SRTT * 2 is the ceiling */ 5012 if (thresh > (srtt * 2)) { 5013 thresh = srtt * 2; 5014 } 5015 /* And we don't want it above the RTO max either */ 5016 if (thresh > rack_rto_max) { 5017 thresh = rack_rto_max; 5018 } 5019 rack_log_dsack_event(rack, 6, __LINE__, srtt, thresh); 5020 return (thresh); 5021 } 5022 5023 static uint32_t 5024 rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack, 5025 struct rack_sendmap *rsm, uint32_t srtt) 5026 { 5027 struct rack_sendmap *prsm; 5028 uint32_t thresh, len; 5029 int segsiz; 5030 5031 if (srtt == 0) 5032 srtt = 1; 5033 if (rack->r_ctl.rc_tlp_threshold) 5034 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); 5035 else 5036 thresh = (srtt * 2); 5037 5038 /* Get the previous sent packet, if any */ 5039 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 5040 len = rsm->r_end - rsm->r_start; 5041 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { 5042 /* Exactly like the ID */ 5043 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { 5044 uint32_t alt_thresh; 5045 /* 5046 * Compensate for delayed-ack with the d-ack time. 5047 */ 5048 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5049 if (alt_thresh > thresh) 5050 thresh = alt_thresh; 5051 } 5052 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { 5053 /* 2.1 behavior */ 5054 prsm = TAILQ_PREV(rsm, rack_head, r_tnext); 5055 if (prsm && (len <= segsiz)) { 5056 /* 5057 * Two packets outstanding, thresh should be (2*srtt) + 5058 * possible inter-packet delay (if any). 5059 */ 5060 uint32_t inter_gap = 0; 5061 int idx, nidx; 5062 5063 idx = rsm->r_rtr_cnt - 1; 5064 nidx = prsm->r_rtr_cnt - 1; 5065 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { 5066 /* Yes it was sent later (or at the same time) */ 5067 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 5068 } 5069 thresh += inter_gap; 5070 } else if (len <= segsiz) { 5071 /* 5072 * Possibly compensate for delayed-ack. 5073 */ 5074 uint32_t alt_thresh; 5075 5076 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5077 if (alt_thresh > thresh) 5078 thresh = alt_thresh; 5079 } 5080 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { 5081 /* 2.2 behavior */ 5082 if (len <= segsiz) { 5083 uint32_t alt_thresh; 5084 /* 5085 * Compensate for delayed-ack with the d-ack time. 5086 */ 5087 alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time; 5088 if (alt_thresh > thresh) 5089 thresh = alt_thresh; 5090 } 5091 } 5092 /* Not above an RTO */ 5093 if (thresh > tp->t_rxtcur) { 5094 thresh = tp->t_rxtcur; 5095 } 5096 /* Not above a RTO max */ 5097 if (thresh > rack_rto_max) { 5098 thresh = rack_rto_max; 5099 } 5100 /* Apply user supplied min TLP */ 5101 if (thresh < rack_tlp_min) { 5102 thresh = rack_tlp_min; 5103 } 5104 return (thresh); 5105 } 5106 5107 static uint32_t 5108 rack_grab_rtt(struct tcpcb *tp, struct tcp_rack *rack) 5109 { 5110 /* 5111 * We want the rack_rtt which is the 5112 * last rtt we measured. However if that 5113 * does not exist we fallback to the srtt (which 5114 * we probably will never do) and then as a last 5115 * resort we use RACK_INITIAL_RTO if no srtt is 5116 * yet set. 5117 */ 5118 if (rack->rc_rack_rtt) 5119 return (rack->rc_rack_rtt); 5120 else if (tp->t_srtt == 0) 5121 return (RACK_INITIAL_RTO); 5122 return (tp->t_srtt); 5123 } 5124 5125 static struct rack_sendmap * 5126 rack_check_recovery_mode(struct tcpcb *tp, uint32_t tsused) 5127 { 5128 /* 5129 * Check to see that we don't need to fall into recovery. We will 5130 * need to do so if our oldest transmit is past the time we should 5131 * have had an ack. 5132 */ 5133 struct tcp_rack *rack; 5134 struct rack_sendmap *rsm; 5135 int32_t idx; 5136 uint32_t srtt, thresh; 5137 5138 rack = (struct tcp_rack *)tp->t_fb_ptr; 5139 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 5140 return (NULL); 5141 } 5142 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5143 if (rsm == NULL) 5144 return (NULL); 5145 5146 5147 if (rsm->r_flags & RACK_ACKED) { 5148 rsm = rack_find_lowest_rsm(rack); 5149 if (rsm == NULL) 5150 return (NULL); 5151 } 5152 idx = rsm->r_rtr_cnt - 1; 5153 srtt = rack_grab_rtt(tp, rack); 5154 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 5155 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { 5156 return (NULL); 5157 } 5158 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { 5159 return (NULL); 5160 } 5161 /* Ok if we reach here we are over-due and this guy can be sent */ 5162 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 5163 return (rsm); 5164 } 5165 5166 static uint32_t 5167 rack_get_persists_timer_val(struct tcpcb *tp, struct tcp_rack *rack) 5168 { 5169 int32_t t; 5170 int32_t tt; 5171 uint32_t ret_val; 5172 5173 t = (tp->t_srtt + (tp->t_rttvar << 2)); 5174 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], 5175 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); 5176 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 5177 ret_val = (uint32_t)tt; 5178 return (ret_val); 5179 } 5180 5181 static uint32_t 5182 rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_rack) 5183 { 5184 /* 5185 * Start the FR timer, we do this based on getting the first one in 5186 * the rc_tmap. Note that if its NULL we must stop the timer. in all 5187 * events we need to stop the running timer (if its running) before 5188 * starting the new one. 5189 */ 5190 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 5191 uint32_t srtt_cur; 5192 int32_t idx; 5193 int32_t is_tlp_timer = 0; 5194 struct rack_sendmap *rsm; 5195 5196 if (rack->t_timers_stopped) { 5197 /* All timers have been stopped none are to run */ 5198 return (0); 5199 } 5200 if (rack->rc_in_persist) { 5201 /* We can't start any timer in persists */ 5202 return (rack_get_persists_timer_val(tp, rack)); 5203 } 5204 rack->rc_on_min_to = 0; 5205 if ((tp->t_state < TCPS_ESTABLISHED) || 5206 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 5207 goto activate_rxt; 5208 } 5209 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5210 if ((rsm == NULL) || sup_rack) { 5211 /* Nothing on the send map or no rack */ 5212 activate_rxt: 5213 time_since_sent = 0; 5214 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 5215 if (rsm) { 5216 /* 5217 * Should we discount the RTX timer any? 5218 * 5219 * We want to discount it the smallest amount. 5220 * If a timer (Rack/TLP or RXT) has gone off more 5221 * recently thats the discount we want to use (now - timer time). 5222 * If the retransmit of the oldest packet was more recent then 5223 * we want to use that (now - oldest-packet-last_transmit_time). 5224 * 5225 */ 5226 idx = rsm->r_rtr_cnt - 1; 5227 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) 5228 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5229 else 5230 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5231 if (TSTMP_GT(cts, tstmp_touse)) 5232 time_since_sent = cts - tstmp_touse; 5233 } 5234 if (SEQ_LT(tp->snd_una, tp->snd_max) || 5235 sbavail(&tptosocket(tp)->so_snd)) { 5236 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 5237 to = tp->t_rxtcur; 5238 if (to > time_since_sent) 5239 to -= time_since_sent; 5240 else 5241 to = rack->r_ctl.rc_min_to; 5242 if (to == 0) 5243 to = 1; 5244 /* Special case for KEEPINIT */ 5245 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 5246 (TP_KEEPINIT(tp) != 0) && 5247 rsm) { 5248 /* 5249 * We have to put a ceiling on the rxt timer 5250 * of the keep-init timeout. 5251 */ 5252 uint32_t max_time, red; 5253 5254 max_time = TICKS_2_USEC(TP_KEEPINIT(tp)); 5255 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { 5256 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); 5257 if (red < max_time) 5258 max_time -= red; 5259 else 5260 max_time = 1; 5261 } 5262 /* Reduce timeout to the keep value if needed */ 5263 if (max_time < to) 5264 to = max_time; 5265 } 5266 return (to); 5267 } 5268 return (0); 5269 } 5270 if (rsm->r_flags & RACK_ACKED) { 5271 rsm = rack_find_lowest_rsm(rack); 5272 if (rsm == NULL) { 5273 /* No lowest? */ 5274 goto activate_rxt; 5275 } 5276 } 5277 if (rack->sack_attack_disable) { 5278 /* 5279 * We don't want to do 5280 * any TLP's if you are an attacker. 5281 * Though if you are doing what 5282 * is expected you may still have 5283 * SACK-PASSED marks. 5284 */ 5285 goto activate_rxt; 5286 } 5287 /* Convert from ms to usecs */ 5288 if ((rsm->r_flags & RACK_SACK_PASSED) || 5289 (rsm->r_flags & RACK_RWND_COLLAPSED) || 5290 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 5291 if ((tp->t_flags & TF_SENTFIN) && 5292 ((tp->snd_max - tp->snd_una) == 1) && 5293 (rsm->r_flags & RACK_HAS_FIN)) { 5294 /* 5295 * We don't start a rack timer if all we have is a 5296 * FIN outstanding. 5297 */ 5298 goto activate_rxt; 5299 } 5300 if ((rack->use_rack_rr == 0) && 5301 (IN_FASTRECOVERY(tp->t_flags)) && 5302 (rack->rack_no_prr == 0) && 5303 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { 5304 /* 5305 * We are not cheating, in recovery and 5306 * not enough ack's to yet get our next 5307 * retransmission out. 5308 * 5309 * Note that classified attackers do not 5310 * get to use the rack-cheat. 5311 */ 5312 goto activate_tlp; 5313 } 5314 srtt = rack_grab_rtt(tp, rack); 5315 thresh = rack_calc_thresh_rack(rack, srtt, cts); 5316 idx = rsm->r_rtr_cnt - 1; 5317 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; 5318 if (SEQ_GEQ(exp, cts)) { 5319 to = exp - cts; 5320 if (to < rack->r_ctl.rc_min_to) { 5321 to = rack->r_ctl.rc_min_to; 5322 if (rack->r_rr_config == 3) 5323 rack->rc_on_min_to = 1; 5324 } 5325 } else { 5326 to = rack->r_ctl.rc_min_to; 5327 if (rack->r_rr_config == 3) 5328 rack->rc_on_min_to = 1; 5329 } 5330 } else { 5331 /* Ok we need to do a TLP not RACK */ 5332 activate_tlp: 5333 if ((rack->rc_tlp_in_progress != 0) && 5334 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { 5335 /* 5336 * The previous send was a TLP and we have sent 5337 * N TLP's without sending new data. 5338 */ 5339 goto activate_rxt; 5340 } 5341 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 5342 if (rsm == NULL) { 5343 /* We found no rsm to TLP with. */ 5344 goto activate_rxt; 5345 } 5346 if (rsm->r_flags & RACK_HAS_FIN) { 5347 /* If its a FIN we dont do TLP */ 5348 rsm = NULL; 5349 goto activate_rxt; 5350 } 5351 idx = rsm->r_rtr_cnt - 1; 5352 time_since_sent = 0; 5353 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) 5354 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; 5355 else 5356 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; 5357 if (TSTMP_GT(cts, tstmp_touse)) 5358 time_since_sent = cts - tstmp_touse; 5359 is_tlp_timer = 1; 5360 if (tp->t_srtt) { 5361 if ((rack->rc_srtt_measure_made == 0) && 5362 (tp->t_srtt == 1)) { 5363 /* 5364 * If another stack as run and set srtt to 1, 5365 * then the srtt was 0, so lets use the initial. 5366 */ 5367 srtt = RACK_INITIAL_RTO; 5368 } else { 5369 srtt_cur = tp->t_srtt; 5370 srtt = srtt_cur; 5371 } 5372 } else 5373 srtt = RACK_INITIAL_RTO; 5374 /* 5375 * If the SRTT is not keeping up and the 5376 * rack RTT has spiked we want to use 5377 * the last RTT not the smoothed one. 5378 */ 5379 if (rack_tlp_use_greater && 5380 tp->t_srtt && 5381 (srtt < rack_grab_rtt(tp, rack))) { 5382 srtt = rack_grab_rtt(tp, rack); 5383 } 5384 thresh = rack_calc_thresh_tlp(tp, rack, rsm, srtt); 5385 if (thresh > time_since_sent) { 5386 to = thresh - time_since_sent; 5387 } else { 5388 to = rack->r_ctl.rc_min_to; 5389 rack_log_alt_to_to_cancel(rack, 5390 thresh, /* flex1 */ 5391 time_since_sent, /* flex2 */ 5392 tstmp_touse, /* flex3 */ 5393 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ 5394 (uint32_t)rsm->r_tim_lastsent[idx], 5395 srtt, 5396 idx, 99); 5397 } 5398 if (to < rack_tlp_min) { 5399 to = rack_tlp_min; 5400 } 5401 if (to > TICKS_2_USEC(TCPTV_REXMTMAX)) { 5402 /* 5403 * If the TLP time works out to larger than the max 5404 * RTO lets not do TLP.. just RTO. 5405 */ 5406 goto activate_rxt; 5407 } 5408 } 5409 if (is_tlp_timer == 0) { 5410 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 5411 } else { 5412 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 5413 } 5414 if (to == 0) 5415 to = 1; 5416 return (to); 5417 } 5418 5419 static void 5420 rack_enter_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5421 { 5422 if (rack->rc_in_persist == 0) { 5423 if (tp->t_flags & TF_GPUTINPROG) { 5424 /* 5425 * Stop the goodput now, the calling of the 5426 * measurement function clears the flag. 5427 */ 5428 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, 5429 RACK_QUALITY_PERSIST); 5430 } 5431 #ifdef NETFLIX_SHARED_CWND 5432 if (rack->r_ctl.rc_scw) { 5433 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5434 rack->rack_scwnd_is_idle = 1; 5435 } 5436 #endif 5437 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 5438 if (rack->r_ctl.rc_went_idle_time == 0) 5439 rack->r_ctl.rc_went_idle_time = 1; 5440 rack_timer_cancel(tp, rack, cts, __LINE__); 5441 rack->r_ctl.persist_lost_ends = 0; 5442 rack->probe_not_answered = 0; 5443 rack->forced_ack = 0; 5444 tp->t_rxtshift = 0; 5445 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5446 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5447 rack->rc_in_persist = 1; 5448 } 5449 } 5450 5451 static void 5452 rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5453 { 5454 if (tcp_in_hpts(rack->rc_inp)) { 5455 tcp_hpts_remove(rack->rc_inp); 5456 rack->r_ctl.rc_hpts_flags = 0; 5457 } 5458 #ifdef NETFLIX_SHARED_CWND 5459 if (rack->r_ctl.rc_scw) { 5460 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 5461 rack->rack_scwnd_is_idle = 0; 5462 } 5463 #endif 5464 if (rack->rc_gp_dyn_mul && 5465 (rack->use_fixed_rate == 0) && 5466 (rack->rc_always_pace)) { 5467 /* 5468 * Do we count this as if a probe-rtt just 5469 * finished? 5470 */ 5471 uint32_t time_idle, idle_min; 5472 5473 time_idle = tcp_get_usecs(NULL) - rack->r_ctl.rc_went_idle_time; 5474 idle_min = rack_min_probertt_hold; 5475 if (rack_probertt_gpsrtt_cnt_div) { 5476 uint64_t extra; 5477 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * 5478 (uint64_t)rack_probertt_gpsrtt_cnt_mul; 5479 extra /= (uint64_t)rack_probertt_gpsrtt_cnt_div; 5480 idle_min += (uint32_t)extra; 5481 } 5482 if (time_idle >= idle_min) { 5483 /* Yes, we count it as a probe-rtt. */ 5484 uint32_t us_cts; 5485 5486 us_cts = tcp_get_usecs(NULL); 5487 if (rack->in_probe_rtt == 0) { 5488 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 5489 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 5490 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 5491 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 5492 } else { 5493 rack_exit_probertt(rack, us_cts); 5494 } 5495 } 5496 } 5497 rack->rc_in_persist = 0; 5498 rack->r_ctl.rc_went_idle_time = 0; 5499 tp->t_rxtshift = 0; 5500 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 5501 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 5502 rack->r_ctl.rc_agg_delayed = 0; 5503 rack->r_early = 0; 5504 rack->r_late = 0; 5505 rack->r_ctl.rc_agg_early = 0; 5506 } 5507 5508 static void 5509 rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts, 5510 struct hpts_diag *diag, struct timeval *tv) 5511 { 5512 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5513 union tcp_log_stackspecific log; 5514 5515 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5516 log.u_bbr.flex1 = diag->p_nxt_slot; 5517 log.u_bbr.flex2 = diag->p_cur_slot; 5518 log.u_bbr.flex3 = diag->slot_req; 5519 log.u_bbr.flex4 = diag->inp_hptsslot; 5520 log.u_bbr.flex5 = diag->slot_remaining; 5521 log.u_bbr.flex6 = diag->need_new_to; 5522 log.u_bbr.flex7 = diag->p_hpts_active; 5523 log.u_bbr.flex8 = diag->p_on_min_sleep; 5524 /* Hijack other fields as needed */ 5525 log.u_bbr.epoch = diag->have_slept; 5526 log.u_bbr.lt_epoch = diag->yet_to_sleep; 5527 log.u_bbr.pkts_out = diag->co_ret; 5528 log.u_bbr.applimited = diag->hpts_sleep_time; 5529 log.u_bbr.delivered = diag->p_prev_slot; 5530 log.u_bbr.inflight = diag->p_runningslot; 5531 log.u_bbr.bw_inuse = diag->wheel_slot; 5532 log.u_bbr.rttProp = diag->wheel_cts; 5533 log.u_bbr.timeStamp = cts; 5534 log.u_bbr.delRate = diag->maxslots; 5535 log.u_bbr.cur_del_rate = diag->p_curtick; 5536 log.u_bbr.cur_del_rate <<= 32; 5537 log.u_bbr.cur_del_rate |= diag->p_lasttick; 5538 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5539 &rack->rc_inp->inp_socket->so_rcv, 5540 &rack->rc_inp->inp_socket->so_snd, 5541 BBR_LOG_HPTSDIAG, 0, 5542 0, &log, false, tv); 5543 } 5544 5545 } 5546 5547 static void 5548 rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uint32_t len, int type) 5549 { 5550 if (rack_verbose_logging && rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 5551 union tcp_log_stackspecific log; 5552 struct timeval tv; 5553 5554 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 5555 log.u_bbr.flex1 = sb->sb_flags; 5556 log.u_bbr.flex2 = len; 5557 log.u_bbr.flex3 = sb->sb_state; 5558 log.u_bbr.flex8 = type; 5559 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 5560 TCP_LOG_EVENTP(rack->rc_tp, NULL, 5561 &rack->rc_inp->inp_socket->so_rcv, 5562 &rack->rc_inp->inp_socket->so_snd, 5563 TCP_LOG_SB_WAKE, 0, 5564 len, &log, false, &tv); 5565 } 5566 } 5567 5568 static void 5569 rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, 5570 int32_t slot, uint32_t tot_len_this_send, int sup_rack) 5571 { 5572 struct hpts_diag diag; 5573 struct inpcb *inp = tptoinpcb(tp); 5574 struct timeval tv; 5575 uint32_t delayed_ack = 0; 5576 uint32_t hpts_timeout; 5577 uint32_t entry_slot = slot; 5578 uint8_t stopped; 5579 uint32_t left = 0; 5580 uint32_t us_cts; 5581 5582 if ((tp->t_state == TCPS_CLOSED) || 5583 (tp->t_state == TCPS_LISTEN)) { 5584 return; 5585 } 5586 if (tcp_in_hpts(inp)) { 5587 /* Already on the pacer */ 5588 return; 5589 } 5590 stopped = rack->rc_tmr_stopped; 5591 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { 5592 left = rack->r_ctl.rc_timer_exp - cts; 5593 } 5594 rack->r_ctl.rc_timer_exp = 0; 5595 rack->r_ctl.rc_hpts_flags = 0; 5596 us_cts = tcp_get_usecs(&tv); 5597 /* Now early/late accounting */ 5598 rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0); 5599 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { 5600 /* 5601 * We have a early carry over set, 5602 * we can always add more time so we 5603 * can always make this compensation. 5604 * 5605 * Note if ack's are allowed to wake us do not 5606 * penalize the next timer for being awoke 5607 * by an ack aka the rc_agg_early (non-paced mode). 5608 */ 5609 slot += rack->r_ctl.rc_agg_early; 5610 rack->r_early = 0; 5611 rack->r_ctl.rc_agg_early = 0; 5612 } 5613 if (rack->r_late) { 5614 /* 5615 * This is harder, we can 5616 * compensate some but it 5617 * really depends on what 5618 * the current pacing time is. 5619 */ 5620 if (rack->r_ctl.rc_agg_delayed >= slot) { 5621 /* 5622 * We can't compensate for it all. 5623 * And we have to have some time 5624 * on the clock. We always have a min 5625 * 10 slots (10 x 10 i.e. 100 usecs). 5626 */ 5627 if (slot <= HPTS_TICKS_PER_SLOT) { 5628 /* We gain delay */ 5629 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); 5630 slot = HPTS_TICKS_PER_SLOT; 5631 } else { 5632 /* We take off some */ 5633 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); 5634 slot = HPTS_TICKS_PER_SLOT; 5635 } 5636 } else { 5637 slot -= rack->r_ctl.rc_agg_delayed; 5638 rack->r_ctl.rc_agg_delayed = 0; 5639 /* Make sure we have 100 useconds at minimum */ 5640 if (slot < HPTS_TICKS_PER_SLOT) { 5641 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; 5642 slot = HPTS_TICKS_PER_SLOT; 5643 } 5644 if (rack->r_ctl.rc_agg_delayed == 0) 5645 rack->r_late = 0; 5646 } 5647 } 5648 if (slot) { 5649 /* We are pacing too */ 5650 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 5651 } 5652 hpts_timeout = rack_timer_start(tp, rack, cts, sup_rack); 5653 #ifdef NETFLIX_EXP_DETECTION 5654 if (rack->sack_attack_disable && 5655 (slot < tcp_sad_pacing_interval)) { 5656 /* 5657 * We have a potential attacker on 5658 * the line. We have possibly some 5659 * (or now) pacing time set. We want to 5660 * slow down the processing of sacks by some 5661 * amount (if it is an attacker). Set the default 5662 * slot for attackers in place (unless the orginal 5663 * interval is longer). Its stored in 5664 * micro-seconds, so lets convert to msecs. 5665 */ 5666 slot = tcp_sad_pacing_interval; 5667 } 5668 #endif 5669 if (tp->t_flags & TF_DELACK) { 5670 delayed_ack = TICKS_2_USEC(tcp_delacktime); 5671 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; 5672 } 5673 if (delayed_ack && ((hpts_timeout == 0) || 5674 (delayed_ack < hpts_timeout))) 5675 hpts_timeout = delayed_ack; 5676 else 5677 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 5678 /* 5679 * If no timers are going to run and we will fall off the hptsi 5680 * wheel, we resort to a keep-alive timer if its configured. 5681 */ 5682 if ((hpts_timeout == 0) && 5683 (slot == 0)) { 5684 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 5685 (tp->t_state <= TCPS_CLOSING)) { 5686 /* 5687 * Ok we have no timer (persists, rack, tlp, rxt or 5688 * del-ack), we don't have segments being paced. So 5689 * all that is left is the keepalive timer. 5690 */ 5691 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 5692 /* Get the established keep-alive time */ 5693 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 5694 } else { 5695 /* 5696 * Get the initial setup keep-alive time, 5697 * note that this is probably not going to 5698 * happen, since rack will be running a rxt timer 5699 * if a SYN of some sort is outstanding. It is 5700 * actually handled in rack_timeout_rxt(). 5701 */ 5702 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 5703 } 5704 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 5705 if (rack->in_probe_rtt) { 5706 /* 5707 * We want to instead not wake up a long time from 5708 * now but to wake up about the time we would 5709 * exit probe-rtt and initiate a keep-alive ack. 5710 * This will get us out of probe-rtt and update 5711 * our min-rtt. 5712 */ 5713 hpts_timeout = rack_min_probertt_hold; 5714 } 5715 } 5716 } 5717 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 5718 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 5719 /* 5720 * RACK, TLP, persists and RXT timers all are restartable 5721 * based on actions input .. i.e we received a packet (ack 5722 * or sack) and that changes things (rw, or snd_una etc). 5723 * Thus we can restart them with a new value. For 5724 * keep-alive, delayed_ack we keep track of what was left 5725 * and restart the timer with a smaller value. 5726 */ 5727 if (left < hpts_timeout) 5728 hpts_timeout = left; 5729 } 5730 if (hpts_timeout) { 5731 /* 5732 * Hack alert for now we can't time-out over 2,147,483 5733 * seconds (a bit more than 596 hours), which is probably ok 5734 * :). 5735 */ 5736 if (hpts_timeout > 0x7ffffffe) 5737 hpts_timeout = 0x7ffffffe; 5738 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; 5739 } 5740 rack_log_pacing_delay_calc(rack, entry_slot, slot, hpts_timeout, 0, 0, 27, __LINE__, NULL, 0); 5741 if ((rack->gp_ready == 0) && 5742 (rack->use_fixed_rate == 0) && 5743 (hpts_timeout < slot) && 5744 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 5745 /* 5746 * We have no good estimate yet for the 5747 * old clunky burst mitigation or the 5748 * real pacing. And the tlp or rxt is smaller 5749 * than the pacing calculation. Lets not 5750 * pace that long since we know the calculation 5751 * so far is not accurate. 5752 */ 5753 slot = hpts_timeout; 5754 } 5755 /** 5756 * Turn off all the flags for queuing by default. The 5757 * flags have important meanings to what happens when 5758 * LRO interacts with the transport. Most likely (by default now) 5759 * mbuf_queueing and ack compression are on. So the transport 5760 * has a couple of flags that control what happens (if those 5761 * are not on then these flags won't have any effect since it 5762 * won't go through the queuing LRO path). 5763 * 5764 * INP_MBUF_QUEUE_READY - This flags says that I am busy 5765 * pacing output, so don't disturb. But 5766 * it also means LRO can wake me if there 5767 * is a SACK arrival. 5768 * 5769 * INP_DONT_SACK_QUEUE - This flag is used in conjunction 5770 * with the above flag (QUEUE_READY) and 5771 * when present it says don't even wake me 5772 * if a SACK arrives. 5773 * 5774 * The idea behind these flags is that if we are pacing we 5775 * set the MBUF_QUEUE_READY and only get woken up if 5776 * a SACK arrives (which could change things) or if 5777 * our pacing timer expires. If, however, we have a rack 5778 * timer running, then we don't even want a sack to wake 5779 * us since the rack timer has to expire before we can send. 5780 * 5781 * Other cases should usually have none of the flags set 5782 * so LRO can call into us. 5783 */ 5784 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5785 if (slot) { 5786 rack->r_ctl.rc_last_output_to = us_cts + slot; 5787 /* 5788 * A pacing timer (slot) is being set, in 5789 * such a case we cannot send (we are blocked by 5790 * the timer). So lets tell LRO that it should not 5791 * wake us unless there is a SACK. Note this only 5792 * will be effective if mbuf queueing is on or 5793 * compressed acks are being processed. 5794 */ 5795 inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 5796 /* 5797 * But wait if we have a Rack timer running 5798 * even a SACK should not disturb us (with 5799 * the exception of r_rr_config 3). 5800 */ 5801 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 5802 (rack->r_rr_config != 3)) 5803 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 5804 if (rack->rc_ack_can_sendout_data) { 5805 /* 5806 * Ahh but wait, this is that special case 5807 * where the pacing timer can be disturbed 5808 * backout the changes (used for non-paced 5809 * burst limiting). 5810 */ 5811 inp->inp_flags2 &= ~(INP_DONT_SACK_QUEUE|INP_MBUF_QUEUE_READY); 5812 } 5813 if ((rack->use_rack_rr) && 5814 (rack->r_rr_config < 2) && 5815 ((hpts_timeout) && (hpts_timeout < slot))) { 5816 /* 5817 * Arrange for the hpts to kick back in after the 5818 * t-o if the t-o does not cause a send. 5819 */ 5820 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5821 __LINE__, &diag); 5822 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5823 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5824 } else { 5825 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(slot), 5826 __LINE__, &diag); 5827 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5828 rack_log_to_start(rack, cts, hpts_timeout, slot, 1); 5829 } 5830 } else if (hpts_timeout) { 5831 /* 5832 * With respect to inp_flags2 here, lets let any new acks wake 5833 * us up here. Since we are not pacing (no pacing timer), output 5834 * can happen so we should let it. If its a Rack timer, then any inbound 5835 * packet probably won't change the sending (we will be blocked) 5836 * but it may change the prr stats so letting it in (the set defaults 5837 * at the start of this block) are good enough. 5838 */ 5839 (void)tcp_hpts_insert_diag(inp, HPTS_USEC_TO_SLOTS(hpts_timeout), 5840 __LINE__, &diag); 5841 rack_log_hpts_diag(rack, us_cts, &diag, &tv); 5842 rack_log_to_start(rack, cts, hpts_timeout, slot, 0); 5843 } else { 5844 /* No timer starting */ 5845 #ifdef INVARIANTS 5846 if (SEQ_GT(tp->snd_max, tp->snd_una)) { 5847 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", 5848 tp, rack, tot_len_this_send, cts, slot, hpts_timeout); 5849 } 5850 #endif 5851 } 5852 rack->rc_tmr_stopped = 0; 5853 if (slot) 5854 rack_log_type_bbrsnd(rack, tot_len_this_send, slot, us_cts, &tv); 5855 } 5856 5857 /* 5858 * RACK Timer, here we simply do logging and house keeping. 5859 * the normal rack_output() function will call the 5860 * appropriate thing to check if we need to do a RACK retransmit. 5861 * We return 1, saying don't proceed with rack_output only 5862 * when all timers have been stopped (destroyed PCB?). 5863 */ 5864 static int 5865 rack_timeout_rack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 5866 { 5867 /* 5868 * This timer simply provides an internal trigger to send out data. 5869 * The check_recovery_mode call will see if there are needed 5870 * retransmissions, if so we will enter fast-recovery. The output 5871 * call may or may not do the same thing depending on sysctl 5872 * settings. 5873 */ 5874 struct rack_sendmap *rsm; 5875 5876 counter_u64_add(rack_to_tot, 1); 5877 if (rack->r_state && (rack->r_state != tp->t_state)) 5878 rack_set_state(tp, rack); 5879 rack->rc_on_min_to = 0; 5880 rsm = rack_check_recovery_mode(tp, cts); 5881 rack_log_to_event(rack, RACK_TO_FRM_RACK, rsm); 5882 if (rsm) { 5883 rack->r_ctl.rc_resend = rsm; 5884 rack->r_timer_override = 1; 5885 if (rack->use_rack_rr) { 5886 /* 5887 * Don't accumulate extra pacing delay 5888 * we are allowing the rack timer to 5889 * over-ride pacing i.e. rrr takes precedence 5890 * if the pacing interval is longer than the rrr 5891 * time (in other words we get the min pacing 5892 * time versus rrr pacing time). 5893 */ 5894 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 5895 } 5896 } 5897 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 5898 if (rsm == NULL) { 5899 /* restart a timer and return 1 */ 5900 rack_start_hpts_timer(rack, tp, cts, 5901 0, 0, 0); 5902 return (1); 5903 } 5904 return (0); 5905 } 5906 5907 static void 5908 rack_adjust_orig_mlen(struct rack_sendmap *rsm) 5909 { 5910 if (rsm->m->m_len > rsm->orig_m_len) { 5911 /* 5912 * Mbuf grew, caused by sbcompress, our offset does 5913 * not change. 5914 */ 5915 rsm->orig_m_len = rsm->m->m_len; 5916 } else if (rsm->m->m_len < rsm->orig_m_len) { 5917 /* 5918 * Mbuf shrank, trimmed off the top by an ack, our 5919 * offset changes. 5920 */ 5921 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); 5922 rsm->orig_m_len = rsm->m->m_len; 5923 } 5924 } 5925 5926 static void 5927 rack_setup_offset_for_rsm(struct rack_sendmap *src_rsm, struct rack_sendmap *rsm) 5928 { 5929 struct mbuf *m; 5930 uint32_t soff; 5931 5932 if (src_rsm->m && (src_rsm->orig_m_len != src_rsm->m->m_len)) { 5933 /* Fix up the orig_m_len and possibly the mbuf offset */ 5934 rack_adjust_orig_mlen(src_rsm); 5935 } 5936 m = src_rsm->m; 5937 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); 5938 while (soff >= m->m_len) { 5939 /* Move out past this mbuf */ 5940 soff -= m->m_len; 5941 m = m->m_next; 5942 KASSERT((m != NULL), 5943 ("rsm:%p nrsm:%p hit at soff:%u null m", 5944 src_rsm, rsm, soff)); 5945 } 5946 rsm->m = m; 5947 rsm->soff = soff; 5948 rsm->orig_m_len = m->m_len; 5949 } 5950 5951 static __inline void 5952 rack_clone_rsm(struct tcp_rack *rack, struct rack_sendmap *nrsm, 5953 struct rack_sendmap *rsm, uint32_t start) 5954 { 5955 int idx; 5956 5957 nrsm->r_start = start; 5958 nrsm->r_end = rsm->r_end; 5959 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 5960 nrsm->r_flags = rsm->r_flags; 5961 nrsm->r_dupack = rsm->r_dupack; 5962 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; 5963 nrsm->r_rtr_bytes = 0; 5964 nrsm->r_fas = rsm->r_fas; 5965 rsm->r_end = nrsm->r_start; 5966 nrsm->r_just_ret = rsm->r_just_ret; 5967 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 5968 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 5969 } 5970 /* Now if we have SYN flag we keep it on the left edge */ 5971 if (nrsm->r_flags & RACK_HAS_SYN) 5972 nrsm->r_flags &= ~RACK_HAS_SYN; 5973 /* Now if we have a FIN flag we keep it on the right edge */ 5974 if (rsm->r_flags & RACK_HAS_FIN) 5975 rsm->r_flags &= ~RACK_HAS_FIN; 5976 /* Push bit must go to the right edge as well */ 5977 if (rsm->r_flags & RACK_HAD_PUSH) 5978 rsm->r_flags &= ~RACK_HAD_PUSH; 5979 /* Clone over the state of the hw_tls flag */ 5980 nrsm->r_hw_tls = rsm->r_hw_tls; 5981 /* 5982 * Now we need to find nrsm's new location in the mbuf chain 5983 * we basically calculate a new offset, which is soff + 5984 * how much is left in original rsm. Then we walk out the mbuf 5985 * chain to find the righ position, it may be the same mbuf 5986 * or maybe not. 5987 */ 5988 KASSERT(((rsm->m != NULL) || 5989 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), 5990 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); 5991 if (rsm->m) 5992 rack_setup_offset_for_rsm(rsm, nrsm); 5993 } 5994 5995 static struct rack_sendmap * 5996 rack_merge_rsm(struct tcp_rack *rack, 5997 struct rack_sendmap *l_rsm, 5998 struct rack_sendmap *r_rsm) 5999 { 6000 /* 6001 * We are merging two ack'd RSM's, 6002 * the l_rsm is on the left (lower seq 6003 * values) and the r_rsm is on the right 6004 * (higher seq value). The simplest way 6005 * to merge these is to move the right 6006 * one into the left. I don't think there 6007 * is any reason we need to try to find 6008 * the oldest (or last oldest retransmitted). 6009 */ 6010 #ifdef INVARIANTS 6011 struct rack_sendmap *rm; 6012 #endif 6013 rack_log_map_chg(rack->rc_tp, rack, NULL, 6014 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); 6015 l_rsm->r_end = r_rsm->r_end; 6016 if (l_rsm->r_dupack < r_rsm->r_dupack) 6017 l_rsm->r_dupack = r_rsm->r_dupack; 6018 if (r_rsm->r_rtr_bytes) 6019 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 6020 if (r_rsm->r_in_tmap) { 6021 /* This really should not happen */ 6022 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); 6023 r_rsm->r_in_tmap = 0; 6024 } 6025 6026 /* Now the flags */ 6027 if (r_rsm->r_flags & RACK_HAS_FIN) 6028 l_rsm->r_flags |= RACK_HAS_FIN; 6029 if (r_rsm->r_flags & RACK_TLP) 6030 l_rsm->r_flags |= RACK_TLP; 6031 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) 6032 l_rsm->r_flags |= RACK_RWND_COLLAPSED; 6033 if ((r_rsm->r_flags & RACK_APP_LIMITED) && 6034 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { 6035 /* 6036 * If both are app-limited then let the 6037 * free lower the count. If right is app 6038 * limited and left is not, transfer. 6039 */ 6040 l_rsm->r_flags |= RACK_APP_LIMITED; 6041 r_rsm->r_flags &= ~RACK_APP_LIMITED; 6042 if (r_rsm == rack->r_ctl.rc_first_appl) 6043 rack->r_ctl.rc_first_appl = l_rsm; 6044 } 6045 #ifndef INVARIANTS 6046 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6047 #else 6048 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, r_rsm); 6049 if (rm != r_rsm) { 6050 panic("removing head in rack:%p rsm:%p rm:%p", 6051 rack, r_rsm, rm); 6052 } 6053 #endif 6054 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 6055 /* Transfer the split limit to the map we free */ 6056 r_rsm->r_limit_type = l_rsm->r_limit_type; 6057 l_rsm->r_limit_type = 0; 6058 } 6059 rack_free(rack, r_rsm); 6060 return (l_rsm); 6061 } 6062 6063 /* 6064 * TLP Timer, here we simply setup what segment we want to 6065 * have the TLP expire on, the normal rack_output() will then 6066 * send it out. 6067 * 6068 * We return 1, saying don't proceed with rack_output only 6069 * when all timers have been stopped (destroyed PCB?). 6070 */ 6071 static int 6072 rack_timeout_tlp(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t *doing_tlp) 6073 { 6074 /* 6075 * Tail Loss Probe. 6076 */ 6077 struct rack_sendmap *rsm = NULL; 6078 #ifdef INVARIANTS 6079 struct rack_sendmap *insret; 6080 #endif 6081 struct socket *so = tptosocket(tp); 6082 uint32_t amm; 6083 uint32_t out, avail; 6084 int collapsed_win = 0; 6085 6086 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6087 /* Its not time yet */ 6088 return (0); 6089 } 6090 if (ctf_progress_timeout_check(tp, true)) { 6091 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6092 return (-ETIMEDOUT); /* tcp_drop() */ 6093 } 6094 /* 6095 * A TLP timer has expired. We have been idle for 2 rtts. So we now 6096 * need to figure out how to force a full MSS segment out. 6097 */ 6098 rack_log_to_event(rack, RACK_TO_FRM_TLP, NULL); 6099 rack->r_ctl.retran_during_recovery = 0; 6100 rack->r_ctl.dsack_byte_cnt = 0; 6101 counter_u64_add(rack_tlp_tot, 1); 6102 if (rack->r_state && (rack->r_state != tp->t_state)) 6103 rack_set_state(tp, rack); 6104 avail = sbavail(&so->so_snd); 6105 out = tp->snd_max - tp->snd_una; 6106 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { 6107 /* special case, we need a retransmission */ 6108 collapsed_win = 1; 6109 goto need_retran; 6110 } 6111 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { 6112 rack->r_ctl.dsack_persist--; 6113 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6114 rack->r_ctl.num_dsack = 0; 6115 } 6116 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6117 } 6118 if ((tp->t_flags & TF_GPUTINPROG) && 6119 (rack->r_ctl.rc_tlp_cnt_out == 1)) { 6120 /* 6121 * If this is the second in a row 6122 * TLP and we are doing a measurement 6123 * its time to abandon the measurement. 6124 * Something is likely broken on 6125 * the clients network and measuring a 6126 * broken network does us no good. 6127 */ 6128 tp->t_flags &= ~TF_GPUTINPROG; 6129 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6130 rack->r_ctl.rc_gp_srtt /*flex1*/, 6131 tp->gput_seq, 6132 0, 0, 18, __LINE__, NULL, 0); 6133 } 6134 /* 6135 * Check our send oldest always settings, and if 6136 * there is an oldest to send jump to the need_retran. 6137 */ 6138 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) 6139 goto need_retran; 6140 6141 if (avail > out) { 6142 /* New data is available */ 6143 amm = avail - out; 6144 if (amm > ctf_fixed_maxseg(tp)) { 6145 amm = ctf_fixed_maxseg(tp); 6146 if ((amm + out) > tp->snd_wnd) { 6147 /* We are rwnd limited */ 6148 goto need_retran; 6149 } 6150 } else if (amm < ctf_fixed_maxseg(tp)) { 6151 /* not enough to fill a MTU */ 6152 goto need_retran; 6153 } 6154 if (IN_FASTRECOVERY(tp->t_flags)) { 6155 /* Unlikely */ 6156 if (rack->rack_no_prr == 0) { 6157 if (out + amm <= tp->snd_wnd) { 6158 rack->r_ctl.rc_prr_sndcnt = amm; 6159 rack->r_ctl.rc_tlp_new_data = amm; 6160 rack_log_to_prr(rack, 4, 0, __LINE__); 6161 } 6162 } else 6163 goto need_retran; 6164 } else { 6165 /* Set the send-new override */ 6166 if (out + amm <= tp->snd_wnd) 6167 rack->r_ctl.rc_tlp_new_data = amm; 6168 else 6169 goto need_retran; 6170 } 6171 rack->r_ctl.rc_tlpsend = NULL; 6172 counter_u64_add(rack_tlp_newdata, 1); 6173 goto send; 6174 } 6175 need_retran: 6176 /* 6177 * Ok we need to arrange the last un-acked segment to be re-sent, or 6178 * optionally the first un-acked segment. 6179 */ 6180 if (collapsed_win == 0) { 6181 if (rack_always_send_oldest) 6182 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 6183 else { 6184 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6185 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { 6186 rsm = rack_find_high_nonack(rack, rsm); 6187 } 6188 } 6189 if (rsm == NULL) { 6190 #ifdef TCP_BLACKBOX 6191 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6192 #endif 6193 goto out; 6194 } 6195 } else { 6196 /* 6197 * We must find the last segment 6198 * that was acceptable by the client. 6199 */ 6200 RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6201 if ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0) { 6202 /* Found one */ 6203 break; 6204 } 6205 } 6206 if (rsm == NULL) { 6207 /* None? if so send the first */ 6208 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6209 if (rsm == NULL) { 6210 #ifdef TCP_BLACKBOX 6211 tcp_log_dump_tp_logbuf(tp, "nada counter trips", M_NOWAIT, true); 6212 #endif 6213 goto out; 6214 } 6215 } 6216 } 6217 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { 6218 /* 6219 * We need to split this the last segment in two. 6220 */ 6221 struct rack_sendmap *nrsm; 6222 6223 nrsm = rack_alloc_full_limit(rack); 6224 if (nrsm == NULL) { 6225 /* 6226 * No memory to split, we will just exit and punt 6227 * off to the RXT timer. 6228 */ 6229 goto out; 6230 } 6231 rack_clone_rsm(rack, nrsm, rsm, 6232 (rsm->r_end - ctf_fixed_maxseg(tp))); 6233 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 6234 #ifndef INVARIANTS 6235 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6236 #else 6237 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 6238 if (insret != NULL) { 6239 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 6240 nrsm, insret, rack, rsm); 6241 } 6242 #endif 6243 if (rsm->r_in_tmap) { 6244 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6245 nrsm->r_in_tmap = 1; 6246 } 6247 rsm = nrsm; 6248 } 6249 rack->r_ctl.rc_tlpsend = rsm; 6250 send: 6251 /* Make sure output path knows we are doing a TLP */ 6252 *doing_tlp = 1; 6253 rack->r_timer_override = 1; 6254 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6255 return (0); 6256 out: 6257 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 6258 return (0); 6259 } 6260 6261 /* 6262 * Delayed ack Timer, here we simply need to setup the 6263 * ACK_NOW flag and remove the DELACK flag. From there 6264 * the output routine will send the ack out. 6265 * 6266 * We only return 1, saying don't proceed, if all timers 6267 * are stopped (destroyed PCB?). 6268 */ 6269 static int 6270 rack_timeout_delack(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6271 { 6272 6273 rack_log_to_event(rack, RACK_TO_FRM_DELACK, NULL); 6274 tp->t_flags &= ~TF_DELACK; 6275 tp->t_flags |= TF_ACKNOW; 6276 KMOD_TCPSTAT_INC(tcps_delack); 6277 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 6278 return (0); 6279 } 6280 6281 /* 6282 * Persists timer, here we simply send the 6283 * same thing as a keepalive will. 6284 * the one byte send. 6285 * 6286 * We only return 1, saying don't proceed, if all timers 6287 * are stopped (destroyed PCB?). 6288 */ 6289 static int 6290 rack_timeout_persist(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6291 { 6292 struct tcptemp *t_template; 6293 int32_t retval = 1; 6294 6295 if (rack->rc_in_persist == 0) 6296 return (0); 6297 if (ctf_progress_timeout_check(tp, false)) { 6298 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6299 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6300 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6301 return (-ETIMEDOUT); /* tcp_drop() */ 6302 } 6303 /* 6304 * Persistence timer into zero window. Force a byte to be output, if 6305 * possible. 6306 */ 6307 KMOD_TCPSTAT_INC(tcps_persisttimeo); 6308 /* 6309 * Hack: if the peer is dead/unreachable, we do not time out if the 6310 * window is closed. After a full backoff, drop the connection if 6311 * the idle time (no responses to probes) reaches the maximum 6312 * backoff that we would use if retransmitting. 6313 */ 6314 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 6315 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 6316 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { 6317 KMOD_TCPSTAT_INC(tcps_persistdrop); 6318 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6319 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6320 retval = -ETIMEDOUT; /* tcp_drop() */ 6321 goto out; 6322 } 6323 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && 6324 tp->snd_una == tp->snd_max) 6325 rack_exit_persist(tp, rack, cts); 6326 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 6327 /* 6328 * If the user has closed the socket then drop a persisting 6329 * connection after a much reduced timeout. 6330 */ 6331 if (tp->t_state > TCPS_CLOSE_WAIT && 6332 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 6333 KMOD_TCPSTAT_INC(tcps_persistdrop); 6334 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 6335 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); 6336 retval = -ETIMEDOUT; /* tcp_drop() */ 6337 goto out; 6338 } 6339 t_template = tcpip_maketemplate(rack->rc_inp); 6340 if (t_template) { 6341 /* only set it if we were answered */ 6342 if (rack->forced_ack == 0) { 6343 rack->forced_ack = 1; 6344 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6345 } else { 6346 rack->probe_not_answered = 1; 6347 counter_u64_add(rack_persists_loss, 1); 6348 rack->r_ctl.persist_lost_ends++; 6349 } 6350 counter_u64_add(rack_persists_sends, 1); 6351 tcp_respond(tp, t_template->tt_ipgen, 6352 &t_template->tt_t, (struct mbuf *)NULL, 6353 tp->rcv_nxt, tp->snd_una - 1, 0); 6354 /* This sends an ack */ 6355 if (tp->t_flags & TF_DELACK) 6356 tp->t_flags &= ~TF_DELACK; 6357 free(t_template, M_TEMP); 6358 } 6359 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 6360 tp->t_rxtshift++; 6361 out: 6362 rack_log_to_event(rack, RACK_TO_FRM_PERSIST, NULL); 6363 rack_start_hpts_timer(rack, tp, cts, 6364 0, 0, 0); 6365 return (retval); 6366 } 6367 6368 /* 6369 * If a keepalive goes off, we had no other timers 6370 * happening. We always return 1 here since this 6371 * routine either drops the connection or sends 6372 * out a segment with respond. 6373 */ 6374 static int 6375 rack_timeout_keepalive(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6376 { 6377 struct tcptemp *t_template; 6378 struct inpcb *inp = tptoinpcb(tp); 6379 6380 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 6381 rack_log_to_event(rack, RACK_TO_FRM_KEEP, NULL); 6382 /* 6383 * Keep-alive timer went off; send something or drop connection if 6384 * idle for too long. 6385 */ 6386 KMOD_TCPSTAT_INC(tcps_keeptimeo); 6387 if (tp->t_state < TCPS_ESTABLISHED) 6388 goto dropit; 6389 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 6390 tp->t_state <= TCPS_CLOSING) { 6391 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 6392 goto dropit; 6393 /* 6394 * Send a packet designed to force a response if the peer is 6395 * up and reachable: either an ACK if the connection is 6396 * still alive, or an RST if the peer has closed the 6397 * connection due to timeout or reboot. Using sequence 6398 * number tp->snd_una-1 causes the transmitted zero-length 6399 * segment to lie outside the receive window; by the 6400 * protocol spec, this requires the correspondent TCP to 6401 * respond. 6402 */ 6403 KMOD_TCPSTAT_INC(tcps_keepprobe); 6404 t_template = tcpip_maketemplate(inp); 6405 if (t_template) { 6406 if (rack->forced_ack == 0) { 6407 rack->forced_ack = 1; 6408 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); 6409 } else { 6410 rack->probe_not_answered = 1; 6411 } 6412 tcp_respond(tp, t_template->tt_ipgen, 6413 &t_template->tt_t, (struct mbuf *)NULL, 6414 tp->rcv_nxt, tp->snd_una - 1, 0); 6415 free(t_template, M_TEMP); 6416 } 6417 } 6418 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 6419 return (1); 6420 dropit: 6421 KMOD_TCPSTAT_INC(tcps_keepdrops); 6422 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6423 return (-ETIMEDOUT); /* tcp_drop() */ 6424 } 6425 6426 /* 6427 * Retransmit helper function, clear up all the ack 6428 * flags and take care of important book keeping. 6429 */ 6430 static void 6431 rack_remxt_tmr(struct tcpcb *tp) 6432 { 6433 /* 6434 * The retransmit timer went off, all sack'd blocks must be 6435 * un-acked. 6436 */ 6437 struct rack_sendmap *rsm, *trsm = NULL; 6438 struct tcp_rack *rack; 6439 6440 rack = (struct tcp_rack *)tp->t_fb_ptr; 6441 rack_timer_cancel(tp, rack, tcp_get_usecs(NULL), __LINE__); 6442 rack_log_to_event(rack, RACK_TO_FRM_TMR, NULL); 6443 if (rack->r_state && (rack->r_state != tp->t_state)) 6444 rack_set_state(tp, rack); 6445 /* 6446 * Ideally we would like to be able to 6447 * mark SACK-PASS on anything not acked here. 6448 * 6449 * However, if we do that we would burst out 6450 * all that data 1ms apart. This would be unwise, 6451 * so for now we will just let the normal rxt timer 6452 * and tlp timer take care of it. 6453 * 6454 * Also we really need to stick them back in sequence 6455 * order. This way we send in the proper order and any 6456 * sacks that come floating in will "re-ack" the data. 6457 * To do this we zap the tmap with an INIT and then 6458 * walk through and place every rsm in the RB tree 6459 * back in its seq ordered place. 6460 */ 6461 TAILQ_INIT(&rack->r_ctl.rc_tmap); 6462 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 6463 rsm->r_dupack = 0; 6464 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 6465 /* We must re-add it back to the tlist */ 6466 if (trsm == NULL) { 6467 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 6468 } else { 6469 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); 6470 } 6471 rsm->r_in_tmap = 1; 6472 trsm = rsm; 6473 if (rsm->r_flags & RACK_ACKED) 6474 rsm->r_flags |= RACK_WAS_ACKED; 6475 rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED); 6476 rsm->r_flags |= RACK_MUST_RXT; 6477 } 6478 /* Clear the count (we just un-acked them) */ 6479 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; 6480 rack->r_ctl.rc_sacked = 0; 6481 rack->r_ctl.rc_sacklast = NULL; 6482 rack->r_ctl.rc_agg_delayed = 0; 6483 rack->r_early = 0; 6484 rack->r_ctl.rc_agg_early = 0; 6485 rack->r_late = 0; 6486 /* Clear the tlp rtx mark */ 6487 rack->r_ctl.rc_resend = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6488 if (rack->r_ctl.rc_resend != NULL) 6489 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; 6490 rack->r_ctl.rc_prr_sndcnt = 0; 6491 rack_log_to_prr(rack, 6, 0, __LINE__); 6492 rack->r_timer_override = 1; 6493 if ((((tp->t_flags & TF_SACK_PERMIT) == 0) 6494 #ifdef NETFLIX_EXP_DETECTION 6495 || (rack->sack_attack_disable != 0) 6496 #endif 6497 ) && ((tp->t_flags & TF_SENTFIN) == 0)) { 6498 /* 6499 * For non-sack customers new data 6500 * needs to go out as retransmits until 6501 * we retransmit up to snd_max. 6502 */ 6503 rack->r_must_retran = 1; 6504 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, 6505 rack->r_ctl.rc_sacked); 6506 } 6507 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 6508 } 6509 6510 static void 6511 rack_convert_rtts(struct tcpcb *tp) 6512 { 6513 if (tp->t_srtt > 1) { 6514 uint32_t val, frac; 6515 6516 val = tp->t_srtt >> TCP_RTT_SHIFT; 6517 frac = tp->t_srtt & 0x1f; 6518 tp->t_srtt = TICKS_2_USEC(val); 6519 /* 6520 * frac is the fractional part of the srtt (if any) 6521 * but its in ticks and every bit represents 6522 * 1/32nd of a hz. 6523 */ 6524 if (frac) { 6525 if (hz == 1000) { 6526 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6527 } else { 6528 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6529 } 6530 tp->t_srtt += frac; 6531 } 6532 } 6533 if (tp->t_rttvar) { 6534 uint32_t val, frac; 6535 6536 val = tp->t_rttvar >> TCP_RTTVAR_SHIFT; 6537 frac = tp->t_rttvar & 0x1f; 6538 tp->t_rttvar = TICKS_2_USEC(val); 6539 /* 6540 * frac is the fractional part of the srtt (if any) 6541 * but its in ticks and every bit represents 6542 * 1/32nd of a hz. 6543 */ 6544 if (frac) { 6545 if (hz == 1000) { 6546 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_MSEC) / (uint64_t)TCP_RTT_SCALE); 6547 } else { 6548 frac = (((uint64_t)frac * (uint64_t)HPTS_USEC_IN_SEC) / ((uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE)); 6549 } 6550 tp->t_rttvar += frac; 6551 } 6552 } 6553 tp->t_rxtcur = RACK_REXMTVAL(tp); 6554 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 6555 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); 6556 } 6557 if (tp->t_rxtcur > rack_rto_max) { 6558 tp->t_rxtcur = rack_rto_max; 6559 } 6560 } 6561 6562 static void 6563 rack_cc_conn_init(struct tcpcb *tp) 6564 { 6565 struct tcp_rack *rack; 6566 uint32_t srtt; 6567 6568 rack = (struct tcp_rack *)tp->t_fb_ptr; 6569 srtt = tp->t_srtt; 6570 cc_conn_init(tp); 6571 /* 6572 * Now convert to rack's internal format, 6573 * if required. 6574 */ 6575 if ((srtt == 0) && (tp->t_srtt != 0)) 6576 rack_convert_rtts(tp); 6577 /* 6578 * We want a chance to stay in slowstart as 6579 * we create a connection. TCP spec says that 6580 * initially ssthresh is infinite. For our 6581 * purposes that is the snd_wnd. 6582 */ 6583 if (tp->snd_ssthresh < tp->snd_wnd) { 6584 tp->snd_ssthresh = tp->snd_wnd; 6585 } 6586 /* 6587 * We also want to assure a IW worth of 6588 * data can get inflight. 6589 */ 6590 if (rc_init_window(rack) < tp->snd_cwnd) 6591 tp->snd_cwnd = rc_init_window(rack); 6592 } 6593 6594 /* 6595 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 6596 * we will setup to retransmit the lowest seq number outstanding. 6597 */ 6598 static int 6599 rack_timeout_rxt(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts) 6600 { 6601 struct inpcb *inp = tptoinpcb(tp); 6602 int32_t rexmt; 6603 int32_t retval = 0; 6604 bool isipv6; 6605 6606 if ((tp->t_flags & TF_GPUTINPROG) && 6607 (tp->t_rxtshift)) { 6608 /* 6609 * We have had a second timeout 6610 * measurements on successive rxt's are not profitable. 6611 * It is unlikely to be of any use (the network is 6612 * broken or the client went away). 6613 */ 6614 tp->t_flags &= ~TF_GPUTINPROG; 6615 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6616 rack->r_ctl.rc_gp_srtt /*flex1*/, 6617 tp->gput_seq, 6618 0, 0, 18, __LINE__, NULL, 0); 6619 } 6620 if (ctf_progress_timeout_check(tp, false)) { 6621 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6622 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 6623 return (-ETIMEDOUT); /* tcp_drop() */ 6624 } 6625 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 6626 rack->r_ctl.retran_during_recovery = 0; 6627 rack->rc_ack_required = 1; 6628 rack->r_ctl.dsack_byte_cnt = 0; 6629 if (IN_FASTRECOVERY(tp->t_flags)) 6630 tp->t_flags |= TF_WASFRECOVERY; 6631 else 6632 tp->t_flags &= ~TF_WASFRECOVERY; 6633 if (IN_CONGRECOVERY(tp->t_flags)) 6634 tp->t_flags |= TF_WASCRECOVERY; 6635 else 6636 tp->t_flags &= ~TF_WASCRECOVERY; 6637 if (TCPS_HAVEESTABLISHED(tp->t_state) && 6638 (tp->snd_una == tp->snd_max)) { 6639 /* Nothing outstanding .. nothing to do */ 6640 return (0); 6641 } 6642 if (rack->r_ctl.dsack_persist) { 6643 rack->r_ctl.dsack_persist--; 6644 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { 6645 rack->r_ctl.num_dsack = 0; 6646 } 6647 rack_log_dsack_event(rack, 1, __LINE__, 0, 0); 6648 } 6649 /* 6650 * Rack can only run one timer at a time, so we cannot 6651 * run a KEEPINIT (gating SYN sending) and a retransmit 6652 * timer for the SYN. So if we are in a front state and 6653 * have a KEEPINIT timer we need to check the first transmit 6654 * against now to see if we have exceeded the KEEPINIT time 6655 * (if one is set). 6656 */ 6657 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && 6658 (TP_KEEPINIT(tp) != 0)) { 6659 struct rack_sendmap *rsm; 6660 6661 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 6662 if (rsm) { 6663 /* Ok we have something outstanding to test keepinit with */ 6664 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && 6665 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { 6666 /* We have exceeded the KEEPINIT time */ 6667 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 6668 goto drop_it; 6669 } 6670 } 6671 } 6672 /* 6673 * Retransmission timer went off. Message has not been acked within 6674 * retransmit interval. Back off to a longer retransmit interval 6675 * and retransmit one segment. 6676 */ 6677 rack_remxt_tmr(tp); 6678 if ((rack->r_ctl.rc_resend == NULL) || 6679 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { 6680 /* 6681 * If the rwnd collapsed on 6682 * the one we are retransmitting 6683 * it does not count against the 6684 * rxt count. 6685 */ 6686 tp->t_rxtshift++; 6687 } 6688 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 6689 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 6690 drop_it: 6691 tp->t_rxtshift = TCP_MAXRXTSHIFT; 6692 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 6693 /* XXXGL: previously t_softerror was casted to uint16_t */ 6694 MPASS(tp->t_softerror >= 0); 6695 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; 6696 goto out; /* tcp_drop() */ 6697 } 6698 if (tp->t_state == TCPS_SYN_SENT) { 6699 /* 6700 * If the SYN was retransmitted, indicate CWND to be limited 6701 * to 1 segment in cc_conn_init(). 6702 */ 6703 tp->snd_cwnd = 1; 6704 } else if (tp->t_rxtshift == 1) { 6705 /* 6706 * first retransmit; record ssthresh and cwnd so they can be 6707 * recovered if this turns out to be a "bad" retransmit. A 6708 * retransmit is considered "bad" if an ACK for this segment 6709 * is received within RTT/2 interval; the assumption here is 6710 * that the ACK was already in flight. See "On Estimating 6711 * End-to-End Network Path Properties" by Allman and Paxson 6712 * for more details. 6713 */ 6714 tp->snd_cwnd_prev = tp->snd_cwnd; 6715 tp->snd_ssthresh_prev = tp->snd_ssthresh; 6716 tp->snd_recover_prev = tp->snd_recover; 6717 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); 6718 tp->t_flags |= TF_PREVVALID; 6719 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) 6720 tp->t_flags &= ~TF_PREVVALID; 6721 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 6722 if ((tp->t_state == TCPS_SYN_SENT) || 6723 (tp->t_state == TCPS_SYN_RECEIVED)) 6724 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; 6725 else 6726 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; 6727 6728 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, 6729 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); 6730 /* 6731 * We enter the path for PLMTUD if connection is established or, if 6732 * connection is FIN_WAIT_1 status, reason for the last is that if 6733 * amount of data we send is very small, we could send it in couple 6734 * of packets and process straight to FIN. In that case we won't 6735 * catch ESTABLISHED state. 6736 */ 6737 #ifdef INET6 6738 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; 6739 #else 6740 isipv6 = false; 6741 #endif 6742 if (((V_tcp_pmtud_blackhole_detect == 1) || 6743 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 6744 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 6745 ((tp->t_state == TCPS_ESTABLISHED) || 6746 (tp->t_state == TCPS_FIN_WAIT_1))) { 6747 /* 6748 * Idea here is that at each stage of mtu probe (usually, 6749 * 1448 -> 1188 -> 524) should be given 2 chances to recover 6750 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 6751 * should take care of that. 6752 */ 6753 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 6754 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 6755 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 6756 tp->t_rxtshift % 2 == 0)) { 6757 /* 6758 * Enter Path MTU Black-hole Detection mechanism: - 6759 * Disable Path MTU Discovery (IP "DF" bit). - 6760 * Reduce MTU to lower value than what we negotiated 6761 * with peer. 6762 */ 6763 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 6764 /* Record that we may have found a black hole. */ 6765 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 6766 /* Keep track of previous MSS. */ 6767 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 6768 } 6769 6770 /* 6771 * Reduce the MSS to blackhole value or to the 6772 * default in an attempt to retransmit. 6773 */ 6774 #ifdef INET6 6775 if (isipv6 && 6776 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 6777 /* Use the sysctl tuneable blackhole MSS. */ 6778 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 6779 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6780 } else if (isipv6) { 6781 /* Use the default MSS. */ 6782 tp->t_maxseg = V_tcp_v6mssdflt; 6783 /* 6784 * Disable Path MTU Discovery when we switch 6785 * to minmss. 6786 */ 6787 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6788 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6789 } 6790 #endif 6791 #if defined(INET6) && defined(INET) 6792 else 6793 #endif 6794 #ifdef INET 6795 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 6796 /* Use the sysctl tuneable blackhole MSS. */ 6797 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 6798 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 6799 } else { 6800 /* Use the default MSS. */ 6801 tp->t_maxseg = V_tcp_mssdflt; 6802 /* 6803 * Disable Path MTU Discovery when we switch 6804 * to minmss. 6805 */ 6806 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 6807 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 6808 } 6809 #endif 6810 } else { 6811 /* 6812 * If further retransmissions are still unsuccessful 6813 * with a lowered MTU, maybe this isn't a blackhole 6814 * and we restore the previous MSS and blackhole 6815 * detection flags. The limit '6' is determined by 6816 * giving each probe stage (1448, 1188, 524) 2 6817 * chances to recover. 6818 */ 6819 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 6820 (tp->t_rxtshift >= 6)) { 6821 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 6822 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 6823 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 6824 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 6825 } 6826 } 6827 } 6828 /* 6829 * Disable RFC1323 and SACK if we haven't got any response to 6830 * our third SYN to work-around some broken terminal servers 6831 * (most of which have hopefully been retired) that have bad VJ 6832 * header compression code which trashes TCP segments containing 6833 * unknown-to-them TCP options. 6834 */ 6835 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 6836 (tp->t_rxtshift == 3)) 6837 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); 6838 /* 6839 * If we backed off this far, our srtt estimate is probably bogus. 6840 * Clobber it so we'll take the next rtt measurement as our srtt; 6841 * move the current srtt into rttvar to keep the current retransmit 6842 * times until then. 6843 */ 6844 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 6845 #ifdef INET6 6846 if ((inp->inp_vflag & INP_IPV6) != 0) 6847 in6_losing(inp); 6848 else 6849 #endif 6850 in_losing(inp); 6851 tp->t_rttvar += tp->t_srtt; 6852 tp->t_srtt = 0; 6853 } 6854 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 6855 tp->snd_recover = tp->snd_max; 6856 tp->t_flags |= TF_ACKNOW; 6857 tp->t_rtttime = 0; 6858 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); 6859 out: 6860 return (retval); 6861 } 6862 6863 static int 6864 rack_process_timers(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, uint8_t hpts_calling, uint8_t *doing_tlp) 6865 { 6866 int32_t ret = 0; 6867 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 6868 6869 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 6870 (tp->t_flags & TF_GPUTINPROG)) { 6871 /* 6872 * We have a goodput in progress 6873 * and we have entered a late state. 6874 * Do we have enough data in the sb 6875 * to handle the GPUT request? 6876 */ 6877 uint32_t bytes; 6878 6879 bytes = tp->gput_ack - tp->gput_seq; 6880 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 6881 bytes += tp->gput_seq - tp->snd_una; 6882 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 6883 /* 6884 * There are not enough bytes in the socket 6885 * buffer that have been sent to cover this 6886 * measurement. Cancel it. 6887 */ 6888 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 6889 rack->r_ctl.rc_gp_srtt /*flex1*/, 6890 tp->gput_seq, 6891 0, 0, 18, __LINE__, NULL, 0); 6892 tp->t_flags &= ~TF_GPUTINPROG; 6893 } 6894 } 6895 if (timers == 0) { 6896 return (0); 6897 } 6898 if (tp->t_state == TCPS_LISTEN) { 6899 /* no timers on listen sockets */ 6900 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 6901 return (0); 6902 return (1); 6903 } 6904 if ((timers & PACE_TMR_RACK) && 6905 rack->rc_on_min_to) { 6906 /* 6907 * For the rack timer when we 6908 * are on a min-timeout (which means rrr_conf = 3) 6909 * we don't want to check the timer. It may 6910 * be going off for a pace and thats ok we 6911 * want to send the retransmit (if its ready). 6912 * 6913 * If its on a normal rack timer (non-min) then 6914 * we will check if its expired. 6915 */ 6916 goto skip_time_check; 6917 } 6918 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { 6919 uint32_t left; 6920 6921 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 6922 ret = -1; 6923 rack_log_to_processing(rack, cts, ret, 0); 6924 return (0); 6925 } 6926 if (hpts_calling == 0) { 6927 /* 6928 * A user send or queued mbuf (sack) has called us? We 6929 * return 0 and let the pacing guards 6930 * deal with it if they should or 6931 * should not cause a send. 6932 */ 6933 ret = -2; 6934 rack_log_to_processing(rack, cts, ret, 0); 6935 return (0); 6936 } 6937 /* 6938 * Ok our timer went off early and we are not paced false 6939 * alarm, go back to sleep. 6940 */ 6941 ret = -3; 6942 left = rack->r_ctl.rc_timer_exp - cts; 6943 tcp_hpts_insert(tptoinpcb(tp), HPTS_MS_TO_SLOTS(left)); 6944 rack_log_to_processing(rack, cts, ret, left); 6945 return (1); 6946 } 6947 skip_time_check: 6948 rack->rc_tmr_stopped = 0; 6949 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 6950 if (timers & PACE_TMR_DELACK) { 6951 ret = rack_timeout_delack(tp, rack, cts); 6952 } else if (timers & PACE_TMR_RACK) { 6953 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6954 rack->r_fast_output = 0; 6955 ret = rack_timeout_rack(tp, rack, cts); 6956 } else if (timers & PACE_TMR_TLP) { 6957 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6958 ret = rack_timeout_tlp(tp, rack, cts, doing_tlp); 6959 } else if (timers & PACE_TMR_RXT) { 6960 rack->r_ctl.rc_tlp_rxt_last_time = cts; 6961 rack->r_fast_output = 0; 6962 ret = rack_timeout_rxt(tp, rack, cts); 6963 } else if (timers & PACE_TMR_PERSIT) { 6964 ret = rack_timeout_persist(tp, rack, cts); 6965 } else if (timers & PACE_TMR_KEEP) { 6966 ret = rack_timeout_keepalive(tp, rack, cts); 6967 } 6968 rack_log_to_processing(rack, cts, ret, timers); 6969 return (ret); 6970 } 6971 6972 static void 6973 rack_timer_cancel(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int line) 6974 { 6975 struct timeval tv; 6976 uint32_t us_cts, flags_on_entry; 6977 uint8_t hpts_removed = 0; 6978 6979 flags_on_entry = rack->r_ctl.rc_hpts_flags; 6980 us_cts = tcp_get_usecs(&tv); 6981 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 6982 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || 6983 ((tp->snd_max - tp->snd_una) == 0))) { 6984 tcp_hpts_remove(rack->rc_inp); 6985 hpts_removed = 1; 6986 /* If we were not delayed cancel out the flag. */ 6987 if ((tp->snd_max - tp->snd_una) == 0) 6988 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 6989 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 6990 } 6991 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 6992 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 6993 if (tcp_in_hpts(rack->rc_inp) && 6994 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { 6995 /* 6996 * Canceling timer's when we have no output being 6997 * paced. We also must remove ourselves from the 6998 * hpts. 6999 */ 7000 tcp_hpts_remove(rack->rc_inp); 7001 hpts_removed = 1; 7002 } 7003 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 7004 } 7005 if (hpts_removed == 0) 7006 rack_log_to_cancel(rack, hpts_removed, line, us_cts, &tv, flags_on_entry); 7007 } 7008 7009 static int 7010 rack_stopall(struct tcpcb *tp) 7011 { 7012 struct tcp_rack *rack; 7013 rack = (struct tcp_rack *)tp->t_fb_ptr; 7014 rack->t_timers_stopped = 1; 7015 return (0); 7016 } 7017 7018 static void 7019 rack_stop_all_timers(struct tcpcb *tp) 7020 { 7021 struct tcp_rack *rack; 7022 7023 /* 7024 * Assure no timers are running. 7025 */ 7026 if (tcp_timer_active(tp, TT_PERSIST)) { 7027 /* We enter in persists, set the flag appropriately */ 7028 rack = (struct tcp_rack *)tp->t_fb_ptr; 7029 rack->rc_in_persist = 1; 7030 } 7031 } 7032 7033 static void 7034 rack_update_rsm(struct tcpcb *tp, struct tcp_rack *rack, 7035 struct rack_sendmap *rsm, uint64_t ts, uint16_t add_flag) 7036 { 7037 int32_t idx; 7038 7039 rsm->r_rtr_cnt++; 7040 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7041 rsm->r_dupack = 0; 7042 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { 7043 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; 7044 rsm->r_flags |= RACK_OVERMAX; 7045 } 7046 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { 7047 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 7048 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 7049 } 7050 idx = rsm->r_rtr_cnt - 1; 7051 rsm->r_tim_lastsent[idx] = ts; 7052 /* 7053 * Here we don't add in the len of send, since its already 7054 * in snduna <->snd_max. 7055 */ 7056 rsm->r_fas = ctf_flight_size(rack->rc_tp, 7057 rack->r_ctl.rc_sacked); 7058 if (rsm->r_flags & RACK_ACKED) { 7059 /* Problably MTU discovery messing with us */ 7060 rsm->r_flags &= ~RACK_ACKED; 7061 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7062 } 7063 if (rsm->r_in_tmap) { 7064 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7065 rsm->r_in_tmap = 0; 7066 } 7067 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7068 rsm->r_in_tmap = 1; 7069 /* Take off the must retransmit flag, if its on */ 7070 if (rsm->r_flags & RACK_MUST_RXT) { 7071 if (rack->r_must_retran) 7072 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 7073 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 7074 /* 7075 * We have retransmitted all we need. Clear 7076 * any must retransmit flags. 7077 */ 7078 rack->r_must_retran = 0; 7079 rack->r_ctl.rc_out_at_rto = 0; 7080 } 7081 rsm->r_flags &= ~RACK_MUST_RXT; 7082 } 7083 if (rsm->r_flags & RACK_SACK_PASSED) { 7084 /* We have retransmitted due to the SACK pass */ 7085 rsm->r_flags &= ~RACK_SACK_PASSED; 7086 rsm->r_flags |= RACK_WAS_SACKPASS; 7087 } 7088 } 7089 7090 static uint32_t 7091 rack_update_entry(struct tcpcb *tp, struct tcp_rack *rack, 7092 struct rack_sendmap *rsm, uint64_t ts, int32_t *lenp, uint16_t add_flag) 7093 { 7094 /* 7095 * We (re-)transmitted starting at rsm->r_start for some length 7096 * (possibly less than r_end. 7097 */ 7098 struct rack_sendmap *nrsm; 7099 #ifdef INVARIANTS 7100 struct rack_sendmap *insret; 7101 #endif 7102 uint32_t c_end; 7103 int32_t len; 7104 7105 len = *lenp; 7106 c_end = rsm->r_start + len; 7107 if (SEQ_GEQ(c_end, rsm->r_end)) { 7108 /* 7109 * We retransmitted the whole piece or more than the whole 7110 * slopping into the next rsm. 7111 */ 7112 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7113 if (c_end == rsm->r_end) { 7114 *lenp = 0; 7115 return (0); 7116 } else { 7117 int32_t act_len; 7118 7119 /* Hangs over the end return whats left */ 7120 act_len = rsm->r_end - rsm->r_start; 7121 *lenp = (len - act_len); 7122 return (rsm->r_end); 7123 } 7124 /* We don't get out of this block. */ 7125 } 7126 /* 7127 * Here we retransmitted less than the whole thing which means we 7128 * have to split this into what was transmitted and what was not. 7129 */ 7130 nrsm = rack_alloc_full_limit(rack); 7131 if (nrsm == NULL) { 7132 /* 7133 * We can't get memory, so lets not proceed. 7134 */ 7135 *lenp = 0; 7136 return (0); 7137 } 7138 /* 7139 * So here we are going to take the original rsm and make it what we 7140 * retransmitted. nrsm will be the tail portion we did not 7141 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 7142 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 7143 * 1, 6 and the new piece will be 6, 11. 7144 */ 7145 rack_clone_rsm(rack, nrsm, rsm, c_end); 7146 nrsm->r_dupack = 0; 7147 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 7148 #ifndef INVARIANTS 7149 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7150 #else 7151 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7152 if (insret != NULL) { 7153 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7154 nrsm, insret, rack, rsm); 7155 } 7156 #endif 7157 if (rsm->r_in_tmap) { 7158 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7159 nrsm->r_in_tmap = 1; 7160 } 7161 rsm->r_flags &= (~RACK_HAS_FIN); 7162 rack_update_rsm(tp, rack, rsm, ts, add_flag); 7163 /* Log a split of rsm into rsm and nrsm */ 7164 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7165 *lenp = 0; 7166 return (0); 7167 } 7168 7169 static void 7170 rack_log_output(struct tcpcb *tp, struct tcpopt *to, int32_t len, 7171 uint32_t seq_out, uint16_t th_flags, int32_t err, uint64_t cts, 7172 struct rack_sendmap *hintrsm, uint16_t add_flag, struct mbuf *s_mb, uint32_t s_moff, int hw_tls) 7173 { 7174 struct tcp_rack *rack; 7175 struct rack_sendmap *rsm, *nrsm, fe; 7176 #ifdef INVARIANTS 7177 struct rack_sendmap *insret; 7178 #endif 7179 register uint32_t snd_max, snd_una; 7180 7181 /* 7182 * Add to the RACK log of packets in flight or retransmitted. If 7183 * there is a TS option we will use the TS echoed, if not we will 7184 * grab a TS. 7185 * 7186 * Retransmissions will increment the count and move the ts to its 7187 * proper place. Note that if options do not include TS's then we 7188 * won't be able to effectively use the ACK for an RTT on a retran. 7189 * 7190 * Notes about r_start and r_end. Lets consider a send starting at 7191 * sequence 1 for 10 bytes. In such an example the r_start would be 7192 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 7193 * This means that r_end is actually the first sequence for the next 7194 * slot (11). 7195 * 7196 */ 7197 /* 7198 * If err is set what do we do XXXrrs? should we not add the thing? 7199 * -- i.e. return if err != 0 or should we pretend we sent it? -- 7200 * i.e. proceed with add ** do this for now. 7201 */ 7202 INP_WLOCK_ASSERT(tptoinpcb(tp)); 7203 if (err) 7204 /* 7205 * We don't log errors -- we could but snd_max does not 7206 * advance in this case either. 7207 */ 7208 return; 7209 7210 if (th_flags & TH_RST) { 7211 /* 7212 * We don't log resets and we return immediately from 7213 * sending 7214 */ 7215 return; 7216 } 7217 rack = (struct tcp_rack *)tp->t_fb_ptr; 7218 snd_una = tp->snd_una; 7219 snd_max = tp->snd_max; 7220 if (th_flags & (TH_SYN | TH_FIN)) { 7221 /* 7222 * The call to rack_log_output is made before bumping 7223 * snd_max. This means we can record one extra byte on a SYN 7224 * or FIN if seq_out is adding more on and a FIN is present 7225 * (and we are not resending). 7226 */ 7227 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) 7228 len++; 7229 if (th_flags & TH_FIN) 7230 len++; 7231 if (SEQ_LT(snd_max, tp->snd_nxt)) { 7232 /* 7233 * The add/update as not been done for the FIN/SYN 7234 * yet. 7235 */ 7236 snd_max = tp->snd_nxt; 7237 } 7238 } 7239 if (SEQ_LEQ((seq_out + len), snd_una)) { 7240 /* Are sending an old segment to induce an ack (keep-alive)? */ 7241 return; 7242 } 7243 if (SEQ_LT(seq_out, snd_una)) { 7244 /* huh? should we panic? */ 7245 uint32_t end; 7246 7247 end = seq_out + len; 7248 seq_out = snd_una; 7249 if (SEQ_GEQ(end, seq_out)) 7250 len = end - seq_out; 7251 else 7252 len = 0; 7253 } 7254 if (len == 0) { 7255 /* We don't log zero window probes */ 7256 return; 7257 } 7258 if (IN_FASTRECOVERY(tp->t_flags)) { 7259 rack->r_ctl.rc_prr_out += len; 7260 } 7261 /* First question is it a retransmission or new? */ 7262 if (seq_out == snd_max) { 7263 /* Its new */ 7264 again: 7265 rsm = rack_alloc(rack); 7266 if (rsm == NULL) { 7267 /* 7268 * Hmm out of memory and the tcb got destroyed while 7269 * we tried to wait. 7270 */ 7271 return; 7272 } 7273 if (th_flags & TH_FIN) { 7274 rsm->r_flags = RACK_HAS_FIN|add_flag; 7275 } else { 7276 rsm->r_flags = add_flag; 7277 } 7278 if (hw_tls) 7279 rsm->r_hw_tls = 1; 7280 rsm->r_tim_lastsent[0] = cts; 7281 rsm->r_rtr_cnt = 1; 7282 rsm->r_rtr_bytes = 0; 7283 if (th_flags & TH_SYN) { 7284 /* The data space is one beyond snd_una */ 7285 rsm->r_flags |= RACK_HAS_SYN; 7286 } 7287 rsm->r_start = seq_out; 7288 rsm->r_end = rsm->r_start + len; 7289 rsm->r_dupack = 0; 7290 /* 7291 * save off the mbuf location that 7292 * sndmbuf_noadv returned (which is 7293 * where we started copying from).. 7294 */ 7295 rsm->m = s_mb; 7296 rsm->soff = s_moff; 7297 /* 7298 * Here we do add in the len of send, since its not yet 7299 * reflected in in snduna <->snd_max 7300 */ 7301 rsm->r_fas = (ctf_flight_size(rack->rc_tp, 7302 rack->r_ctl.rc_sacked) + 7303 (rsm->r_end - rsm->r_start)); 7304 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ 7305 if (rsm->m) { 7306 if (rsm->m->m_len <= rsm->soff) { 7307 /* 7308 * XXXrrs Question, will this happen? 7309 * 7310 * If sbsndptr is set at the correct place 7311 * then s_moff should always be somewhere 7312 * within rsm->m. But if the sbsndptr was 7313 * off then that won't be true. If it occurs 7314 * we need to walkout to the correct location. 7315 */ 7316 struct mbuf *lm; 7317 7318 lm = rsm->m; 7319 while (lm->m_len <= rsm->soff) { 7320 rsm->soff -= lm->m_len; 7321 lm = lm->m_next; 7322 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", 7323 __func__, rack, s_moff, s_mb, rsm->soff)); 7324 } 7325 rsm->m = lm; 7326 } 7327 rsm->orig_m_len = rsm->m->m_len; 7328 } else 7329 rsm->orig_m_len = 0; 7330 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 7331 /* Log a new rsm */ 7332 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_NEW, 0, __LINE__); 7333 #ifndef INVARIANTS 7334 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7335 #else 7336 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7337 if (insret != NULL) { 7338 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7339 nrsm, insret, rack, rsm); 7340 } 7341 #endif 7342 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 7343 rsm->r_in_tmap = 1; 7344 /* 7345 * Special case detection, is there just a single 7346 * packet outstanding when we are not in recovery? 7347 * 7348 * If this is true mark it so. 7349 */ 7350 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 7351 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { 7352 struct rack_sendmap *prsm; 7353 7354 prsm = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 7355 if (prsm) 7356 prsm->r_one_out_nr = 1; 7357 } 7358 return; 7359 } 7360 /* 7361 * If we reach here its a retransmission and we need to find it. 7362 */ 7363 memset(&fe, 0, sizeof(fe)); 7364 more: 7365 if (hintrsm && (hintrsm->r_start == seq_out)) { 7366 rsm = hintrsm; 7367 hintrsm = NULL; 7368 } else { 7369 /* No hints sorry */ 7370 rsm = NULL; 7371 } 7372 if ((rsm) && (rsm->r_start == seq_out)) { 7373 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7374 if (len == 0) { 7375 return; 7376 } else { 7377 goto more; 7378 } 7379 } 7380 /* Ok it was not the last pointer go through it the hard way. */ 7381 refind: 7382 fe.r_start = seq_out; 7383 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 7384 if (rsm) { 7385 if (rsm->r_start == seq_out) { 7386 seq_out = rack_update_entry(tp, rack, rsm, cts, &len, add_flag); 7387 if (len == 0) { 7388 return; 7389 } else { 7390 goto refind; 7391 } 7392 } 7393 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 7394 /* Transmitted within this piece */ 7395 /* 7396 * Ok we must split off the front and then let the 7397 * update do the rest 7398 */ 7399 nrsm = rack_alloc_full_limit(rack); 7400 if (nrsm == NULL) { 7401 rack_update_rsm(tp, rack, rsm, cts, add_flag); 7402 return; 7403 } 7404 /* 7405 * copy rsm to nrsm and then trim the front of rsm 7406 * to not include this part. 7407 */ 7408 rack_clone_rsm(rack, nrsm, rsm, seq_out); 7409 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 0, __LINE__); 7410 #ifndef INVARIANTS 7411 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7412 #else 7413 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 7414 if (insret != NULL) { 7415 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 7416 nrsm, insret, rack, rsm); 7417 } 7418 #endif 7419 if (rsm->r_in_tmap) { 7420 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7421 nrsm->r_in_tmap = 1; 7422 } 7423 rsm->r_flags &= (~RACK_HAS_FIN); 7424 seq_out = rack_update_entry(tp, rack, nrsm, cts, &len, add_flag); 7425 if (len == 0) { 7426 return; 7427 } else if (len > 0) 7428 goto refind; 7429 } 7430 } 7431 /* 7432 * Hmm not found in map did they retransmit both old and on into the 7433 * new? 7434 */ 7435 if (seq_out == tp->snd_max) { 7436 goto again; 7437 } else if (SEQ_LT(seq_out, tp->snd_max)) { 7438 #ifdef INVARIANTS 7439 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 7440 seq_out, len, tp->snd_una, tp->snd_max); 7441 printf("Starting Dump of all rack entries\n"); 7442 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 7443 printf("rsm:%p start:%u end:%u\n", 7444 rsm, rsm->r_start, rsm->r_end); 7445 } 7446 printf("Dump complete\n"); 7447 panic("seq_out not found rack:%p tp:%p", 7448 rack, tp); 7449 #endif 7450 } else { 7451 #ifdef INVARIANTS 7452 /* 7453 * Hmm beyond sndmax? (only if we are using the new rtt-pack 7454 * flag) 7455 */ 7456 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 7457 seq_out, len, tp->snd_max, tp); 7458 #endif 7459 } 7460 } 7461 7462 /* 7463 * Record one of the RTT updates from an ack into 7464 * our sample structure. 7465 */ 7466 7467 static void 7468 tcp_rack_xmit_timer(struct tcp_rack *rack, int32_t rtt, uint32_t len, uint32_t us_rtt, 7469 int confidence, struct rack_sendmap *rsm, uint16_t rtrcnt) 7470 { 7471 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7472 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { 7473 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; 7474 } 7475 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7476 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { 7477 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; 7478 } 7479 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { 7480 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) 7481 rack->r_ctl.rc_gp_lowrtt = us_rtt; 7482 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) 7483 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 7484 } 7485 if ((confidence == 1) && 7486 ((rsm == NULL) || 7487 (rsm->r_just_ret) || 7488 (rsm->r_one_out_nr && 7489 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { 7490 /* 7491 * If the rsm had a just return 7492 * hit it then we can't trust the 7493 * rtt measurement for buffer deterimination 7494 * Note that a confidence of 2, indicates 7495 * SACK'd which overrides the r_just_ret or 7496 * the r_one_out_nr. If it was a CUM-ACK and 7497 * we had only two outstanding, but get an 7498 * ack for only 1. Then that also lowers our 7499 * confidence. 7500 */ 7501 confidence = 0; 7502 } 7503 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || 7504 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { 7505 if (rack->r_ctl.rack_rs.confidence == 0) { 7506 /* 7507 * We take anything with no current confidence 7508 * saved. 7509 */ 7510 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7511 rack->r_ctl.rack_rs.confidence = confidence; 7512 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7513 } else if (confidence || rack->r_ctl.rack_rs.confidence) { 7514 /* 7515 * Once we have a confident number, 7516 * we can update it with a smaller 7517 * value since this confident number 7518 * may include the DSACK time until 7519 * the next segment (the second one) arrived. 7520 */ 7521 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; 7522 rack->r_ctl.rack_rs.confidence = confidence; 7523 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; 7524 } 7525 } 7526 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); 7527 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; 7528 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; 7529 rack->r_ctl.rack_rs.rs_rtt_cnt++; 7530 } 7531 7532 /* 7533 * Collect new round-trip time estimate 7534 * and update averages and current timeout. 7535 */ 7536 static void 7537 tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp) 7538 { 7539 int32_t delta; 7540 int32_t rtt; 7541 7542 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) 7543 /* No valid sample */ 7544 return; 7545 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { 7546 /* We are to use the lowest RTT seen in a single ack */ 7547 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 7548 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { 7549 /* We are to use the highest RTT seen in a single ack */ 7550 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; 7551 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { 7552 /* We are to use the average RTT seen in a single ack */ 7553 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / 7554 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); 7555 } else { 7556 #ifdef INVARIANTS 7557 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); 7558 #endif 7559 return; 7560 } 7561 if (rtt == 0) 7562 rtt = 1; 7563 if (rack->rc_gp_rtt_set == 0) { 7564 /* 7565 * With no RTT we have to accept 7566 * even one we are not confident of. 7567 */ 7568 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; 7569 rack->rc_gp_rtt_set = 1; 7570 } else if (rack->r_ctl.rack_rs.confidence) { 7571 /* update the running gp srtt */ 7572 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); 7573 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; 7574 } 7575 if (rack->r_ctl.rack_rs.confidence) { 7576 /* 7577 * record the low and high for highly buffered path computation, 7578 * we only do this if we are confident (not a retransmission). 7579 */ 7580 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { 7581 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7582 } 7583 if (rack->rc_highly_buffered == 0) { 7584 /* 7585 * Currently once we declare a path has 7586 * highly buffered there is no going 7587 * back, which may be a problem... 7588 */ 7589 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { 7590 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, 7591 rack->r_ctl.rc_highest_us_rtt, 7592 rack->r_ctl.rc_lowest_us_rtt, 7593 RACK_RTTS_SEEHBP); 7594 rack->rc_highly_buffered = 1; 7595 } 7596 } 7597 } 7598 if ((rack->r_ctl.rack_rs.confidence) || 7599 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { 7600 /* 7601 * If we are highly confident of it <or> it was 7602 * never retransmitted we accept it as the last us_rtt. 7603 */ 7604 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7605 /* The lowest rtt can be set if its was not retransmited */ 7606 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { 7607 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; 7608 if (rack->r_ctl.rc_lowest_us_rtt == 0) 7609 rack->r_ctl.rc_lowest_us_rtt = 1; 7610 } 7611 } 7612 rack = (struct tcp_rack *)tp->t_fb_ptr; 7613 if (tp->t_srtt != 0) { 7614 /* 7615 * We keep a simple srtt in microseconds, like our rtt 7616 * measurement. We don't need to do any tricks with shifting 7617 * etc. Instead we just add in 1/8th of the new measurement 7618 * and subtract out 1/8 of the old srtt. We do the same with 7619 * the variance after finding the absolute value of the 7620 * difference between this sample and the current srtt. 7621 */ 7622 delta = tp->t_srtt - rtt; 7623 /* Take off 1/8th of the current sRTT */ 7624 tp->t_srtt -= (tp->t_srtt >> 3); 7625 /* Add in 1/8th of the new RTT just measured */ 7626 tp->t_srtt += (rtt >> 3); 7627 if (tp->t_srtt <= 0) 7628 tp->t_srtt = 1; 7629 /* Now lets make the absolute value of the variance */ 7630 if (delta < 0) 7631 delta = -delta; 7632 /* Subtract out 1/8th */ 7633 tp->t_rttvar -= (tp->t_rttvar >> 3); 7634 /* Add in 1/8th of the new variance we just saw */ 7635 tp->t_rttvar += (delta >> 3); 7636 if (tp->t_rttvar <= 0) 7637 tp->t_rttvar = 1; 7638 } else { 7639 /* 7640 * No rtt measurement yet - use the unsmoothed rtt. Set the 7641 * variance to half the rtt (so our first retransmit happens 7642 * at 3*rtt). 7643 */ 7644 tp->t_srtt = rtt; 7645 tp->t_rttvar = rtt >> 1; 7646 } 7647 rack->rc_srtt_measure_made = 1; 7648 KMOD_TCPSTAT_INC(tcps_rttupdated); 7649 tp->t_rttupdated++; 7650 #ifdef STATS 7651 if (rack_stats_gets_ms_rtt == 0) { 7652 /* Send in the microsecond rtt used for rxt timeout purposes */ 7653 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); 7654 } else if (rack_stats_gets_ms_rtt == 1) { 7655 /* Send in the millisecond rtt used for rxt timeout purposes */ 7656 int32_t ms_rtt; 7657 7658 /* Round up */ 7659 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7660 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7661 } else if (rack_stats_gets_ms_rtt == 2) { 7662 /* Send in the millisecond rtt has close to the path RTT as we can get */ 7663 int32_t ms_rtt; 7664 7665 /* Round up */ 7666 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; 7667 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); 7668 } else { 7669 /* Send in the microsecond rtt has close to the path RTT as we can get */ 7670 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); 7671 } 7672 7673 #endif 7674 /* 7675 * the retransmit should happen at rtt + 4 * rttvar. Because of the 7676 * way we do the smoothing, srtt and rttvar will each average +1/2 7677 * tick of bias. When we compute the retransmit timer, we want 1/2 7678 * tick of rounding and 1 extra tick because of +-1/2 tick 7679 * uncertainty in the firing of the timer. The bias will give us 7680 * exactly the 1.5 tick we need. But, because the bias is 7681 * statistical, we have to test that we don't drop below the minimum 7682 * feasible timer (which is 2 ticks). 7683 */ 7684 tp->t_rxtshift = 0; 7685 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7686 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); 7687 rack_log_rtt_sample(rack, rtt); 7688 tp->t_softerror = 0; 7689 } 7690 7691 7692 static void 7693 rack_apply_updated_usrtt(struct tcp_rack *rack, uint32_t us_rtt, uint32_t us_cts) 7694 { 7695 /* 7696 * Apply to filter the inbound us-rtt at us_cts. 7697 */ 7698 uint32_t old_rtt; 7699 7700 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); 7701 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, 7702 us_rtt, us_cts); 7703 if (old_rtt > us_rtt) { 7704 /* We just hit a new lower rtt time */ 7705 rack_log_rtt_shrinks(rack, us_cts, old_rtt, 7706 __LINE__, RACK_RTTS_NEWRTT); 7707 /* 7708 * Only count it if its lower than what we saw within our 7709 * calculated range. 7710 */ 7711 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { 7712 if (rack_probertt_lower_within && 7713 rack->rc_gp_dyn_mul && 7714 (rack->use_fixed_rate == 0) && 7715 (rack->rc_always_pace)) { 7716 /* 7717 * We are seeing a new lower rtt very close 7718 * to the time that we would have entered probe-rtt. 7719 * This is probably due to the fact that a peer flow 7720 * has entered probe-rtt. Lets go in now too. 7721 */ 7722 uint32_t val; 7723 7724 val = rack_probertt_lower_within * rack_time_between_probertt; 7725 val /= 100; 7726 if ((rack->in_probe_rtt == 0) && 7727 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { 7728 rack_enter_probertt(rack, us_cts); 7729 } 7730 } 7731 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 7732 } 7733 } 7734 } 7735 7736 static int 7737 rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack, 7738 struct rack_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, tcp_seq th_ack) 7739 { 7740 uint32_t us_rtt; 7741 int32_t i, all; 7742 uint32_t t, len_acked; 7743 7744 if ((rsm->r_flags & RACK_ACKED) || 7745 (rsm->r_flags & RACK_WAS_ACKED)) 7746 /* Already done */ 7747 return (0); 7748 if (rsm->r_no_rtt_allowed) { 7749 /* Not allowed */ 7750 return (0); 7751 } 7752 if (ack_type == CUM_ACKED) { 7753 if (SEQ_GT(th_ack, rsm->r_end)) { 7754 len_acked = rsm->r_end - rsm->r_start; 7755 all = 1; 7756 } else { 7757 len_acked = th_ack - rsm->r_start; 7758 all = 0; 7759 } 7760 } else { 7761 len_acked = rsm->r_end - rsm->r_start; 7762 all = 0; 7763 } 7764 if (rsm->r_rtr_cnt == 1) { 7765 7766 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7767 if ((int)t <= 0) 7768 t = 1; 7769 if (!tp->t_rttlow || tp->t_rttlow > t) 7770 tp->t_rttlow = t; 7771 if (!rack->r_ctl.rc_rack_min_rtt || 7772 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7773 rack->r_ctl.rc_rack_min_rtt = t; 7774 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7775 rack->r_ctl.rc_rack_min_rtt = 1; 7776 } 7777 } 7778 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) 7779 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7780 else 7781 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 7782 if (us_rtt == 0) 7783 us_rtt = 1; 7784 if (CC_ALGO(tp)->rttsample != NULL) { 7785 /* Kick the RTT to the CC */ 7786 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 7787 } 7788 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 7789 if (ack_type == SACKED) { 7790 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); 7791 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); 7792 } else { 7793 /* 7794 * We need to setup what our confidence 7795 * is in this ack. 7796 * 7797 * If the rsm was app limited and it is 7798 * less than a mss in length (the end 7799 * of the send) then we have a gap. If we 7800 * were app limited but say we were sending 7801 * multiple MSS's then we are more confident 7802 * int it. 7803 * 7804 * When we are not app-limited then we see if 7805 * the rsm is being included in the current 7806 * measurement, we tell this by the app_limited_needs_set 7807 * flag. 7808 * 7809 * Note that being cwnd blocked is not applimited 7810 * as well as the pacing delay between packets which 7811 * are sending only 1 or 2 MSS's also will show up 7812 * in the RTT. We probably need to examine this algorithm 7813 * a bit more and enhance it to account for the delay 7814 * between rsm's. We could do that by saving off the 7815 * pacing delay of each rsm (in an rsm) and then 7816 * factoring that in somehow though for now I am 7817 * not sure how :) 7818 */ 7819 int calc_conf = 0; 7820 7821 if (rsm->r_flags & RACK_APP_LIMITED) { 7822 if (all && (len_acked <= ctf_fixed_maxseg(tp))) 7823 calc_conf = 0; 7824 else 7825 calc_conf = 1; 7826 } else if (rack->app_limited_needs_set == 0) { 7827 calc_conf = 1; 7828 } else { 7829 calc_conf = 0; 7830 } 7831 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); 7832 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 7833 calc_conf, rsm, rsm->r_rtr_cnt); 7834 } 7835 if ((rsm->r_flags & RACK_TLP) && 7836 (!IN_FASTRECOVERY(tp->t_flags))) { 7837 /* Segment was a TLP and our retrans matched */ 7838 if (rack->r_ctl.rc_tlp_cwnd_reduce) { 7839 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 7840 } 7841 } 7842 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7843 /* New more recent rack_tmit_time */ 7844 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7845 rack->rc_rack_rtt = t; 7846 } 7847 return (1); 7848 } 7849 /* 7850 * We clear the soft/rxtshift since we got an ack. 7851 * There is no assurance we will call the commit() function 7852 * so we need to clear these to avoid incorrect handling. 7853 */ 7854 tp->t_rxtshift = 0; 7855 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 7856 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 7857 tp->t_softerror = 0; 7858 if (to && (to->to_flags & TOF_TS) && 7859 (ack_type == CUM_ACKED) && 7860 (to->to_tsecr) && 7861 ((rsm->r_flags & RACK_OVERMAX) == 0)) { 7862 /* 7863 * Now which timestamp does it match? In this block the ACK 7864 * must be coming from a previous transmission. 7865 */ 7866 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7867 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { 7868 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7869 if ((int)t <= 0) 7870 t = 1; 7871 if (CC_ALGO(tp)->rttsample != NULL) { 7872 /* 7873 * Kick the RTT to the CC, here 7874 * we lie a bit in that we know the 7875 * retransmission is correct even though 7876 * we retransmitted. This is because 7877 * we match the timestamps. 7878 */ 7879 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) 7880 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; 7881 else 7882 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; 7883 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); 7884 } 7885 if ((i + 1) < rsm->r_rtr_cnt) { 7886 /* 7887 * The peer ack'd from our previous 7888 * transmission. We have a spurious 7889 * retransmission and thus we dont 7890 * want to update our rack_rtt. 7891 * 7892 * Hmm should there be a CC revert here? 7893 * 7894 */ 7895 return (0); 7896 } 7897 if (!tp->t_rttlow || tp->t_rttlow > t) 7898 tp->t_rttlow = t; 7899 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7900 rack->r_ctl.rc_rack_min_rtt = t; 7901 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7902 rack->r_ctl.rc_rack_min_rtt = 1; 7903 } 7904 } 7905 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, 7906 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) { 7907 /* New more recent rack_tmit_time */ 7908 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 7909 rack->rc_rack_rtt = t; 7910 } 7911 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); 7912 tcp_rack_xmit_timer(rack, t + 1, len_acked, t, 0, rsm, 7913 rsm->r_rtr_cnt); 7914 return (1); 7915 } 7916 } 7917 goto ts_not_found; 7918 } else { 7919 /* 7920 * Ok its a SACK block that we retransmitted. or a windows 7921 * machine without timestamps. We can tell nothing from the 7922 * time-stamp since its not there or the time the peer last 7923 * recieved a segment that moved forward its cum-ack point. 7924 */ 7925 ts_not_found: 7926 i = rsm->r_rtr_cnt - 1; 7927 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; 7928 if ((int)t <= 0) 7929 t = 1; 7930 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7931 /* 7932 * We retransmitted and the ack came back in less 7933 * than the smallest rtt we have observed. We most 7934 * likely did an improper retransmit as outlined in 7935 * 6.2 Step 2 point 2 in the rack-draft so we 7936 * don't want to update our rack_rtt. We in 7937 * theory (in future) might want to think about reverting our 7938 * cwnd state but we won't for now. 7939 */ 7940 return (0); 7941 } else if (rack->r_ctl.rc_rack_min_rtt) { 7942 /* 7943 * We retransmitted it and the retransmit did the 7944 * job. 7945 */ 7946 if (!rack->r_ctl.rc_rack_min_rtt || 7947 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { 7948 rack->r_ctl.rc_rack_min_rtt = t; 7949 if (rack->r_ctl.rc_rack_min_rtt == 0) { 7950 rack->r_ctl.rc_rack_min_rtt = 1; 7951 } 7952 } 7953 if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, (uint32_t)rsm->r_tim_lastsent[i])) { 7954 /* New more recent rack_tmit_time */ 7955 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; 7956 rack->rc_rack_rtt = t; 7957 } 7958 return (1); 7959 } 7960 } 7961 return (0); 7962 } 7963 7964 /* 7965 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7966 */ 7967 static void 7968 rack_log_sack_passed(struct tcpcb *tp, 7969 struct tcp_rack *rack, struct rack_sendmap *rsm) 7970 { 7971 struct rack_sendmap *nrsm; 7972 7973 nrsm = rsm; 7974 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, 7975 rack_head, r_tnext) { 7976 if (nrsm == rsm) { 7977 /* Skip orginal segment he is acked */ 7978 continue; 7979 } 7980 if (nrsm->r_flags & RACK_ACKED) { 7981 /* 7982 * Skip ack'd segments, though we 7983 * should not see these, since tmap 7984 * should not have ack'd segments. 7985 */ 7986 continue; 7987 } 7988 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { 7989 /* 7990 * If the peer dropped the rwnd on 7991 * these then we don't worry about them. 7992 */ 7993 continue; 7994 } 7995 if (nrsm->r_flags & RACK_SACK_PASSED) { 7996 /* 7997 * We found one that is already marked 7998 * passed, we have been here before and 7999 * so all others below this are marked. 8000 */ 8001 break; 8002 } 8003 nrsm->r_flags |= RACK_SACK_PASSED; 8004 nrsm->r_flags &= ~RACK_WAS_SACKPASS; 8005 } 8006 } 8007 8008 static void 8009 rack_need_set_test(struct tcpcb *tp, 8010 struct tcp_rack *rack, 8011 struct rack_sendmap *rsm, 8012 tcp_seq th_ack, 8013 int line, 8014 int use_which) 8015 { 8016 8017 if ((tp->t_flags & TF_GPUTINPROG) && 8018 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8019 /* 8020 * We were app limited, and this ack 8021 * butts up or goes beyond the point where we want 8022 * to start our next measurement. We need 8023 * to record the new gput_ts as here and 8024 * possibly update the start sequence. 8025 */ 8026 uint32_t seq, ts; 8027 8028 if (rsm->r_rtr_cnt > 1) { 8029 /* 8030 * This is a retransmit, can we 8031 * really make any assessment at this 8032 * point? We are not really sure of 8033 * the timestamp, is it this or the 8034 * previous transmission? 8035 * 8036 * Lets wait for something better that 8037 * is not retransmitted. 8038 */ 8039 return; 8040 } 8041 seq = tp->gput_seq; 8042 ts = tp->gput_ts; 8043 rack->app_limited_needs_set = 0; 8044 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 8045 /* Do we start at a new end? */ 8046 if ((use_which == RACK_USE_BEG) && 8047 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { 8048 /* 8049 * When we get an ACK that just eats 8050 * up some of the rsm, we set RACK_USE_BEG 8051 * since whats at r_start (i.e. th_ack) 8052 * is left unacked and thats where the 8053 * measurement not starts. 8054 */ 8055 tp->gput_seq = rsm->r_start; 8056 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8057 } 8058 if ((use_which == RACK_USE_END) && 8059 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { 8060 /* 8061 * We use the end when the cumack 8062 * is moving forward and completely 8063 * deleting the rsm passed so basically 8064 * r_end holds th_ack. 8065 * 8066 * For SACK's we also want to use the end 8067 * since this piece just got sacked and 8068 * we want to target anything after that 8069 * in our measurement. 8070 */ 8071 tp->gput_seq = rsm->r_end; 8072 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8073 } 8074 if (use_which == RACK_USE_END_OR_THACK) { 8075 /* 8076 * special case for ack moving forward, 8077 * not a sack, we need to move all the 8078 * way up to where this ack cum-ack moves 8079 * to. 8080 */ 8081 if (SEQ_GT(th_ack, rsm->r_end)) 8082 tp->gput_seq = th_ack; 8083 else 8084 tp->gput_seq = rsm->r_end; 8085 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8086 } 8087 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { 8088 /* 8089 * We moved beyond this guy's range, re-calculate 8090 * the new end point. 8091 */ 8092 if (rack->rc_gp_filled == 0) { 8093 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 8094 } else { 8095 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 8096 } 8097 } 8098 /* 8099 * We are moving the goal post, we may be able to clear the 8100 * measure_saw_probe_rtt flag. 8101 */ 8102 if ((rack->in_probe_rtt == 0) && 8103 (rack->measure_saw_probe_rtt) && 8104 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 8105 rack->measure_saw_probe_rtt = 0; 8106 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, 8107 seq, tp->gput_seq, 0, 5, line, NULL, 0); 8108 if (rack->rc_gp_filled && 8109 ((tp->gput_ack - tp->gput_seq) < 8110 max(rc_init_window(rack), (MIN_GP_WIN * 8111 ctf_fixed_maxseg(tp))))) { 8112 uint32_t ideal_amount; 8113 8114 ideal_amount = rack_get_measure_window(tp, rack); 8115 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { 8116 /* 8117 * There is no sense of continuing this measurement 8118 * because its too small to gain us anything we 8119 * trust. Skip it and that way we can start a new 8120 * measurement quicker. 8121 */ 8122 tp->t_flags &= ~TF_GPUTINPROG; 8123 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, 8124 0, 0, 0, 6, __LINE__, NULL, 0); 8125 } else { 8126 /* 8127 * Reset the window further out. 8128 */ 8129 tp->gput_ack = tp->gput_seq + ideal_amount; 8130 } 8131 } 8132 } 8133 } 8134 8135 static inline int 8136 is_rsm_inside_declared_tlp_block(struct tcp_rack *rack, struct rack_sendmap *rsm) 8137 { 8138 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { 8139 /* Behind our TLP definition or right at */ 8140 return (0); 8141 } 8142 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { 8143 /* The start is beyond or right at our end of TLP definition */ 8144 return (0); 8145 } 8146 /* It has to be a sub-part of the original TLP recorded */ 8147 return (1); 8148 } 8149 8150 8151 static uint32_t 8152 rack_proc_sack_blk(struct tcpcb *tp, struct tcp_rack *rack, struct sackblk *sack, 8153 struct tcpopt *to, struct rack_sendmap **prsm, uint32_t cts, int *moved_two) 8154 { 8155 uint32_t start, end, changed = 0; 8156 struct rack_sendmap stack_map; 8157 struct rack_sendmap *rsm, *nrsm, fe, *prev, *next; 8158 #ifdef INVARIANTS 8159 struct rack_sendmap *insret; 8160 #endif 8161 int32_t used_ref = 1; 8162 int moved = 0; 8163 8164 start = sack->start; 8165 end = sack->end; 8166 rsm = *prsm; 8167 memset(&fe, 0, sizeof(fe)); 8168 do_rest_ofb: 8169 if ((rsm == NULL) || 8170 (SEQ_LT(end, rsm->r_start)) || 8171 (SEQ_GEQ(start, rsm->r_end)) || 8172 (SEQ_LT(start, rsm->r_start))) { 8173 /* 8174 * We are not in the right spot, 8175 * find the correct spot in the tree. 8176 */ 8177 used_ref = 0; 8178 fe.r_start = start; 8179 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 8180 moved++; 8181 } 8182 if (rsm == NULL) { 8183 /* TSNH */ 8184 goto out; 8185 } 8186 /* Ok we have an ACK for some piece of this rsm */ 8187 if (rsm->r_start != start) { 8188 if ((rsm->r_flags & RACK_ACKED) == 0) { 8189 /* 8190 * Before any splitting or hookery is 8191 * done is it a TLP of interest i.e. rxt? 8192 */ 8193 if ((rsm->r_flags & RACK_TLP) && 8194 (rsm->r_rtr_cnt > 1)) { 8195 /* 8196 * We are splitting a rxt TLP, check 8197 * if we need to save off the start/end 8198 */ 8199 if (rack->rc_last_tlp_acked_set && 8200 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8201 /* 8202 * We already turned this on since we are inside 8203 * the previous one was a partially sack now we 8204 * are getting another one (maybe all of it). 8205 * 8206 */ 8207 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8208 /* 8209 * Lets make sure we have all of it though. 8210 */ 8211 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8212 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8213 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8214 rack->r_ctl.last_tlp_acked_end); 8215 } 8216 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8217 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8218 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8219 rack->r_ctl.last_tlp_acked_end); 8220 } 8221 } else { 8222 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8223 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8224 rack->rc_last_tlp_past_cumack = 0; 8225 rack->rc_last_tlp_acked_set = 1; 8226 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8227 } 8228 } 8229 /** 8230 * Need to split this in two pieces the before and after, 8231 * the before remains in the map, the after must be 8232 * added. In other words we have: 8233 * rsm |--------------| 8234 * sackblk |-------> 8235 * rsm will become 8236 * rsm |---| 8237 * and nrsm will be the sacked piece 8238 * nrsm |----------| 8239 * 8240 * But before we start down that path lets 8241 * see if the sack spans over on top of 8242 * the next guy and it is already sacked. 8243 * 8244 */ 8245 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8246 if (next && (next->r_flags & RACK_ACKED) && 8247 SEQ_GEQ(end, next->r_start)) { 8248 /** 8249 * So the next one is already acked, and 8250 * we can thus by hookery use our stack_map 8251 * to reflect the piece being sacked and 8252 * then adjust the two tree entries moving 8253 * the start and ends around. So we start like: 8254 * rsm |------------| (not-acked) 8255 * next |-----------| (acked) 8256 * sackblk |--------> 8257 * We want to end like so: 8258 * rsm |------| (not-acked) 8259 * next |-----------------| (acked) 8260 * nrsm |-----| 8261 * Where nrsm is a temporary stack piece we 8262 * use to update all the gizmos. 8263 */ 8264 /* Copy up our fudge block */ 8265 nrsm = &stack_map; 8266 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8267 /* Now adjust our tree blocks */ 8268 rsm->r_end = start; 8269 next->r_start = start; 8270 /* Now we must adjust back where next->m is */ 8271 rack_setup_offset_for_rsm(rsm, next); 8272 8273 /* We don't need to adjust rsm, it did not change */ 8274 /* Clear out the dup ack count of the remainder */ 8275 rsm->r_dupack = 0; 8276 rsm->r_just_ret = 0; 8277 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8278 /* Now lets make sure our fudge block is right */ 8279 nrsm->r_start = start; 8280 /* Now lets update all the stats and such */ 8281 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8282 if (rack->app_limited_needs_set) 8283 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8284 changed += (nrsm->r_end - nrsm->r_start); 8285 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8286 if (nrsm->r_flags & RACK_SACK_PASSED) { 8287 rack->r_ctl.rc_reorder_ts = cts; 8288 } 8289 /* 8290 * Now we want to go up from rsm (the 8291 * one left un-acked) to the next one 8292 * in the tmap. We do this so when 8293 * we walk backwards we include marking 8294 * sack-passed on rsm (The one passed in 8295 * is skipped since it is generally called 8296 * on something sacked before removing it 8297 * from the tmap). 8298 */ 8299 if (rsm->r_in_tmap) { 8300 nrsm = TAILQ_NEXT(rsm, r_tnext); 8301 /* 8302 * Now that we have the next 8303 * one walk backwards from there. 8304 */ 8305 if (nrsm && nrsm->r_in_tmap) 8306 rack_log_sack_passed(tp, rack, nrsm); 8307 } 8308 /* Now are we done? */ 8309 if (SEQ_LT(end, next->r_end) || 8310 (end == next->r_end)) { 8311 /* Done with block */ 8312 goto out; 8313 } 8314 rack_log_map_chg(tp, rack, &stack_map, rsm, next, MAP_SACK_M1, end, __LINE__); 8315 counter_u64_add(rack_sack_used_next_merge, 1); 8316 /* Postion for the next block */ 8317 start = next->r_end; 8318 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, next); 8319 if (rsm == NULL) 8320 goto out; 8321 } else { 8322 /** 8323 * We can't use any hookery here, so we 8324 * need to split the map. We enter like 8325 * so: 8326 * rsm |--------| 8327 * sackblk |-----> 8328 * We will add the new block nrsm and 8329 * that will be the new portion, and then 8330 * fall through after reseting rsm. So we 8331 * split and look like this: 8332 * rsm |----| 8333 * sackblk |-----> 8334 * nrsm |---| 8335 * We then fall through reseting 8336 * rsm to nrsm, so the next block 8337 * picks it up. 8338 */ 8339 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8340 if (nrsm == NULL) { 8341 /* 8342 * failed XXXrrs what can we do but loose the sack 8343 * info? 8344 */ 8345 goto out; 8346 } 8347 counter_u64_add(rack_sack_splits, 1); 8348 rack_clone_rsm(rack, nrsm, rsm, start); 8349 rsm->r_just_ret = 0; 8350 #ifndef INVARIANTS 8351 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8352 #else 8353 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8354 if (insret != NULL) { 8355 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8356 nrsm, insret, rack, rsm); 8357 } 8358 #endif 8359 if (rsm->r_in_tmap) { 8360 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8361 nrsm->r_in_tmap = 1; 8362 } 8363 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M2, end, __LINE__); 8364 rsm->r_flags &= (~RACK_HAS_FIN); 8365 /* Position us to point to the new nrsm that starts the sack blk */ 8366 rsm = nrsm; 8367 } 8368 } else { 8369 /* Already sacked this piece */ 8370 counter_u64_add(rack_sack_skipped_acked, 1); 8371 moved++; 8372 if (end == rsm->r_end) { 8373 /* Done with block */ 8374 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8375 goto out; 8376 } else if (SEQ_LT(end, rsm->r_end)) { 8377 /* A partial sack to a already sacked block */ 8378 moved++; 8379 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8380 goto out; 8381 } else { 8382 /* 8383 * The end goes beyond this guy 8384 * reposition the start to the 8385 * next block. 8386 */ 8387 start = rsm->r_end; 8388 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8389 if (rsm == NULL) 8390 goto out; 8391 } 8392 } 8393 } 8394 if (SEQ_GEQ(end, rsm->r_end)) { 8395 /** 8396 * The end of this block is either beyond this guy or right 8397 * at this guy. I.e.: 8398 * rsm --- |-----| 8399 * end |-----| 8400 * <or> 8401 * end |---------| 8402 */ 8403 if ((rsm->r_flags & RACK_ACKED) == 0) { 8404 /* 8405 * Is it a TLP of interest? 8406 */ 8407 if ((rsm->r_flags & RACK_TLP) && 8408 (rsm->r_rtr_cnt > 1)) { 8409 /* 8410 * We are splitting a rxt TLP, check 8411 * if we need to save off the start/end 8412 */ 8413 if (rack->rc_last_tlp_acked_set && 8414 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8415 /* 8416 * We already turned this on since we are inside 8417 * the previous one was a partially sack now we 8418 * are getting another one (maybe all of it). 8419 */ 8420 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8421 /* 8422 * Lets make sure we have all of it though. 8423 */ 8424 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8425 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8426 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8427 rack->r_ctl.last_tlp_acked_end); 8428 } 8429 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8430 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8431 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8432 rack->r_ctl.last_tlp_acked_end); 8433 } 8434 } else { 8435 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8436 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8437 rack->rc_last_tlp_past_cumack = 0; 8438 rack->rc_last_tlp_acked_set = 1; 8439 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8440 } 8441 } 8442 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8443 changed += (rsm->r_end - rsm->r_start); 8444 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8445 if (rsm->r_in_tmap) /* should be true */ 8446 rack_log_sack_passed(tp, rack, rsm); 8447 /* Is Reordering occuring? */ 8448 if (rsm->r_flags & RACK_SACK_PASSED) { 8449 rsm->r_flags &= ~RACK_SACK_PASSED; 8450 rack->r_ctl.rc_reorder_ts = cts; 8451 } 8452 if (rack->app_limited_needs_set) 8453 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8454 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8455 rsm->r_flags |= RACK_ACKED; 8456 if (rsm->r_in_tmap) { 8457 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8458 rsm->r_in_tmap = 0; 8459 } 8460 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_SACK_M3, end, __LINE__); 8461 } else { 8462 counter_u64_add(rack_sack_skipped_acked, 1); 8463 moved++; 8464 } 8465 if (end == rsm->r_end) { 8466 /* This block only - done, setup for next */ 8467 goto out; 8468 } 8469 /* 8470 * There is more not coverend by this rsm move on 8471 * to the next block in the RB tree. 8472 */ 8473 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8474 start = rsm->r_end; 8475 rsm = nrsm; 8476 if (rsm == NULL) 8477 goto out; 8478 goto do_rest_ofb; 8479 } 8480 /** 8481 * The end of this sack block is smaller than 8482 * our rsm i.e.: 8483 * rsm --- |-----| 8484 * end |--| 8485 */ 8486 if ((rsm->r_flags & RACK_ACKED) == 0) { 8487 /* 8488 * Is it a TLP of interest? 8489 */ 8490 if ((rsm->r_flags & RACK_TLP) && 8491 (rsm->r_rtr_cnt > 1)) { 8492 /* 8493 * We are splitting a rxt TLP, check 8494 * if we need to save off the start/end 8495 */ 8496 if (rack->rc_last_tlp_acked_set && 8497 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8498 /* 8499 * We already turned this on since we are inside 8500 * the previous one was a partially sack now we 8501 * are getting another one (maybe all of it). 8502 */ 8503 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8504 /* 8505 * Lets make sure we have all of it though. 8506 */ 8507 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8508 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8509 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8510 rack->r_ctl.last_tlp_acked_end); 8511 } 8512 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8513 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8514 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8515 rack->r_ctl.last_tlp_acked_end); 8516 } 8517 } else { 8518 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8519 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8520 rack->rc_last_tlp_past_cumack = 0; 8521 rack->rc_last_tlp_acked_set = 1; 8522 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8523 } 8524 } 8525 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8526 if (prev && 8527 (prev->r_flags & RACK_ACKED)) { 8528 /** 8529 * Goal, we want the right remainder of rsm to shrink 8530 * in place and span from (rsm->r_start = end) to rsm->r_end. 8531 * We want to expand prev to go all the way 8532 * to prev->r_end <- end. 8533 * so in the tree we have before: 8534 * prev |--------| (acked) 8535 * rsm |-------| (non-acked) 8536 * sackblk |-| 8537 * We churn it so we end up with 8538 * prev |----------| (acked) 8539 * rsm |-----| (non-acked) 8540 * nrsm |-| (temporary) 8541 * 8542 * Note if either prev/rsm is a TLP we don't 8543 * do this. 8544 */ 8545 nrsm = &stack_map; 8546 memcpy(nrsm, rsm, sizeof(struct rack_sendmap)); 8547 prev->r_end = end; 8548 rsm->r_start = end; 8549 /* Now adjust nrsm (stack copy) to be 8550 * the one that is the small 8551 * piece that was "sacked". 8552 */ 8553 nrsm->r_end = end; 8554 rsm->r_dupack = 0; 8555 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 8556 /* 8557 * Now that the rsm has had its start moved forward 8558 * lets go ahead and get its new place in the world. 8559 */ 8560 rack_setup_offset_for_rsm(prev, rsm); 8561 /* 8562 * Now nrsm is our new little piece 8563 * that is acked (which was merged 8564 * to prev). Update the rtt and changed 8565 * based on that. Also check for reordering. 8566 */ 8567 rack_update_rtt(tp, rack, nrsm, to, cts, SACKED, 0); 8568 if (rack->app_limited_needs_set) 8569 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); 8570 changed += (nrsm->r_end - nrsm->r_start); 8571 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); 8572 if (nrsm->r_flags & RACK_SACK_PASSED) { 8573 rack->r_ctl.rc_reorder_ts = cts; 8574 } 8575 rack_log_map_chg(tp, rack, prev, &stack_map, rsm, MAP_SACK_M4, end, __LINE__); 8576 rsm = prev; 8577 counter_u64_add(rack_sack_used_prev_merge, 1); 8578 } else { 8579 /** 8580 * This is the case where our previous 8581 * block is not acked either, so we must 8582 * split the block in two. 8583 */ 8584 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 8585 if (nrsm == NULL) { 8586 /* failed rrs what can we do but loose the sack info? */ 8587 goto out; 8588 } 8589 if ((rsm->r_flags & RACK_TLP) && 8590 (rsm->r_rtr_cnt > 1)) { 8591 /* 8592 * We are splitting a rxt TLP, check 8593 * if we need to save off the start/end 8594 */ 8595 if (rack->rc_last_tlp_acked_set && 8596 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8597 /* 8598 * We already turned this on since this block is inside 8599 * the previous one was a partially sack now we 8600 * are getting another one (maybe all of it). 8601 */ 8602 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8603 /* 8604 * Lets make sure we have all of it though. 8605 */ 8606 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8607 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8608 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8609 rack->r_ctl.last_tlp_acked_end); 8610 } 8611 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8612 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8613 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8614 rack->r_ctl.last_tlp_acked_end); 8615 } 8616 } else { 8617 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8618 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8619 rack->rc_last_tlp_acked_set = 1; 8620 rack->rc_last_tlp_past_cumack = 0; 8621 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8622 } 8623 } 8624 /** 8625 * In this case nrsm becomes 8626 * nrsm->r_start = end; 8627 * nrsm->r_end = rsm->r_end; 8628 * which is un-acked. 8629 * <and> 8630 * rsm->r_end = nrsm->r_start; 8631 * i.e. the remaining un-acked 8632 * piece is left on the left 8633 * hand side. 8634 * 8635 * So we start like this 8636 * rsm |----------| (not acked) 8637 * sackblk |---| 8638 * build it so we have 8639 * rsm |---| (acked) 8640 * nrsm |------| (not acked) 8641 */ 8642 counter_u64_add(rack_sack_splits, 1); 8643 rack_clone_rsm(rack, nrsm, rsm, end); 8644 rsm->r_flags &= (~RACK_HAS_FIN); 8645 rsm->r_just_ret = 0; 8646 #ifndef INVARIANTS 8647 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8648 #else 8649 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 8650 if (insret != NULL) { 8651 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 8652 nrsm, insret, rack, rsm); 8653 } 8654 #endif 8655 if (rsm->r_in_tmap) { 8656 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8657 nrsm->r_in_tmap = 1; 8658 } 8659 nrsm->r_dupack = 0; 8660 rack_log_retran_reason(rack, nrsm, __LINE__, 0, 2); 8661 rack_update_rtt(tp, rack, rsm, to, cts, SACKED, 0); 8662 changed += (rsm->r_end - rsm->r_start); 8663 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 8664 if (rsm->r_in_tmap) /* should be true */ 8665 rack_log_sack_passed(tp, rack, rsm); 8666 /* Is Reordering occuring? */ 8667 if (rsm->r_flags & RACK_SACK_PASSED) { 8668 rsm->r_flags &= ~RACK_SACK_PASSED; 8669 rack->r_ctl.rc_reorder_ts = cts; 8670 } 8671 if (rack->app_limited_needs_set) 8672 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); 8673 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 8674 rsm->r_flags |= RACK_ACKED; 8675 rack_log_map_chg(tp, rack, NULL, rsm, nrsm, MAP_SACK_M5, end, __LINE__); 8676 if (rsm->r_in_tmap) { 8677 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8678 rsm->r_in_tmap = 0; 8679 } 8680 } 8681 } else if (start != end){ 8682 /* 8683 * The block was already acked. 8684 */ 8685 counter_u64_add(rack_sack_skipped_acked, 1); 8686 moved++; 8687 } 8688 out: 8689 if (rsm && 8690 ((rsm->r_flags & RACK_TLP) == 0) && 8691 (rsm->r_flags & RACK_ACKED)) { 8692 /* 8693 * Now can we merge where we worked 8694 * with either the previous or 8695 * next block? 8696 */ 8697 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8698 while (next) { 8699 if (next->r_flags & RACK_TLP) 8700 break; 8701 if (next->r_flags & RACK_ACKED) { 8702 /* yep this and next can be merged */ 8703 rsm = rack_merge_rsm(rack, rsm, next); 8704 next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8705 } else 8706 break; 8707 } 8708 /* Now what about the previous? */ 8709 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8710 while (prev) { 8711 if (prev->r_flags & RACK_TLP) 8712 break; 8713 if (prev->r_flags & RACK_ACKED) { 8714 /* yep the previous and this can be merged */ 8715 rsm = rack_merge_rsm(rack, prev, rsm); 8716 prev = RB_PREV(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8717 } else 8718 break; 8719 } 8720 } 8721 if (used_ref == 0) { 8722 counter_u64_add(rack_sack_proc_all, 1); 8723 } else { 8724 counter_u64_add(rack_sack_proc_short, 1); 8725 } 8726 /* Save off the next one for quick reference. */ 8727 if (rsm) 8728 nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8729 else 8730 nrsm = NULL; 8731 *prsm = rack->r_ctl.rc_sacklast = nrsm; 8732 /* Pass back the moved. */ 8733 *moved_two = moved; 8734 return (changed); 8735 } 8736 8737 static void inline 8738 rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack) 8739 { 8740 struct rack_sendmap *tmap; 8741 8742 tmap = NULL; 8743 while (rsm && (rsm->r_flags & RACK_ACKED)) { 8744 /* Its no longer sacked, mark it so */ 8745 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 8746 #ifdef INVARIANTS 8747 if (rsm->r_in_tmap) { 8748 panic("rack:%p rsm:%p flags:0x%x in tmap?", 8749 rack, rsm, rsm->r_flags); 8750 } 8751 #endif 8752 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); 8753 /* Rebuild it into our tmap */ 8754 if (tmap == NULL) { 8755 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8756 tmap = rsm; 8757 } else { 8758 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); 8759 tmap = rsm; 8760 } 8761 tmap->r_in_tmap = 1; 8762 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8763 } 8764 /* 8765 * Now lets possibly clear the sack filter so we start 8766 * recognizing sacks that cover this area. 8767 */ 8768 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); 8769 8770 } 8771 8772 static void 8773 rack_do_decay(struct tcp_rack *rack) 8774 { 8775 struct timeval res; 8776 8777 #define timersub(tvp, uvp, vvp) \ 8778 do { \ 8779 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 8780 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 8781 if ((vvp)->tv_usec < 0) { \ 8782 (vvp)->tv_sec--; \ 8783 (vvp)->tv_usec += 1000000; \ 8784 } \ 8785 } while (0) 8786 8787 timersub(&rack->r_ctl.act_rcv_time, &rack->r_ctl.rc_last_time_decay, &res); 8788 #undef timersub 8789 8790 rack->r_ctl.input_pkt++; 8791 if ((rack->rc_in_persist) || 8792 (res.tv_sec >= 1) || 8793 (rack->rc_tp->snd_max == rack->rc_tp->snd_una)) { 8794 /* 8795 * Check for decay of non-SAD, 8796 * we want all SAD detection metrics to 8797 * decay 1/4 per second (or more) passed. 8798 */ 8799 #ifdef NETFLIX_EXP_DETECTION 8800 uint32_t pkt_delta; 8801 8802 pkt_delta = rack->r_ctl.input_pkt - rack->r_ctl.saved_input_pkt; 8803 #endif 8804 /* Update our saved tracking values */ 8805 rack->r_ctl.saved_input_pkt = rack->r_ctl.input_pkt; 8806 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 8807 /* Now do we escape without decay? */ 8808 #ifdef NETFLIX_EXP_DETECTION 8809 if (rack->rc_in_persist || 8810 (rack->rc_tp->snd_max == rack->rc_tp->snd_una) || 8811 (pkt_delta < tcp_sad_low_pps)){ 8812 /* 8813 * We don't decay idle connections 8814 * or ones that have a low input pps. 8815 */ 8816 return; 8817 } 8818 /* Decay the counters */ 8819 rack->r_ctl.ack_count = ctf_decay_count(rack->r_ctl.ack_count, 8820 tcp_sad_decay_val); 8821 rack->r_ctl.sack_count = ctf_decay_count(rack->r_ctl.sack_count, 8822 tcp_sad_decay_val); 8823 rack->r_ctl.sack_moved_extra = ctf_decay_count(rack->r_ctl.sack_moved_extra, 8824 tcp_sad_decay_val); 8825 rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move, 8826 tcp_sad_decay_val); 8827 #endif 8828 } 8829 } 8830 8831 static void 8832 rack_process_to_cumack(struct tcpcb *tp, struct tcp_rack *rack, register uint32_t th_ack, uint32_t cts, struct tcpopt *to) 8833 { 8834 struct rack_sendmap *rsm; 8835 #ifdef INVARIANTS 8836 struct rack_sendmap *rm; 8837 #endif 8838 8839 /* 8840 * The ACK point is advancing to th_ack, we must drop off 8841 * the packets in the rack log and calculate any eligble 8842 * RTT's. 8843 */ 8844 rack->r_wanted_output = 1; 8845 8846 /* Tend any TLP that has been marked for 1/2 the seq space (its old) */ 8847 if ((rack->rc_last_tlp_acked_set == 1)&& 8848 (rack->rc_last_tlp_past_cumack == 1) && 8849 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { 8850 /* 8851 * We have reached the point where our last rack 8852 * tlp retransmit sequence is ahead of the cum-ack. 8853 * This can only happen when the cum-ack moves all 8854 * the way around (its been a full 2^^31+1 bytes 8855 * or more since we sent a retransmitted TLP). Lets 8856 * turn off the valid flag since its not really valid. 8857 * 8858 * Note since sack's also turn on this event we have 8859 * a complication, we have to wait to age it out until 8860 * the cum-ack is by the TLP before checking which is 8861 * what the next else clause does. 8862 */ 8863 rack_log_dsack_event(rack, 9, __LINE__, 8864 rack->r_ctl.last_tlp_acked_start, 8865 rack->r_ctl.last_tlp_acked_end); 8866 rack->rc_last_tlp_acked_set = 0; 8867 rack->rc_last_tlp_past_cumack = 0; 8868 } else if ((rack->rc_last_tlp_acked_set == 1) && 8869 (rack->rc_last_tlp_past_cumack == 0) && 8870 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { 8871 /* 8872 * It is safe to start aging TLP's out. 8873 */ 8874 rack->rc_last_tlp_past_cumack = 1; 8875 } 8876 /* We do the same for the tlp send seq as well */ 8877 if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8878 (rack->rc_last_sent_tlp_past_cumack == 1) && 8879 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { 8880 rack_log_dsack_event(rack, 9, __LINE__, 8881 rack->r_ctl.last_sent_tlp_seq, 8882 (rack->r_ctl.last_sent_tlp_seq + 8883 rack->r_ctl.last_sent_tlp_len)); 8884 rack->rc_last_sent_tlp_seq_valid = 0; 8885 rack->rc_last_sent_tlp_past_cumack = 0; 8886 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && 8887 (rack->rc_last_sent_tlp_past_cumack == 0) && 8888 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { 8889 /* 8890 * It is safe to start aging TLP's send. 8891 */ 8892 rack->rc_last_sent_tlp_past_cumack = 1; 8893 } 8894 more: 8895 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 8896 if (rsm == NULL) { 8897 if ((th_ack - 1) == tp->iss) { 8898 /* 8899 * For the SYN incoming case we will not 8900 * have called tcp_output for the sending of 8901 * the SYN, so there will be no map. All 8902 * other cases should probably be a panic. 8903 */ 8904 return; 8905 } 8906 if (tp->t_flags & TF_SENTFIN) { 8907 /* if we sent a FIN we often will not have map */ 8908 return; 8909 } 8910 #ifdef INVARIANTS 8911 panic("No rack map tp:%p for state:%d ack:%u rack:%p snd_una:%u snd_max:%u snd_nxt:%u\n", 8912 tp, 8913 tp->t_state, th_ack, rack, 8914 tp->snd_una, tp->snd_max, tp->snd_nxt); 8915 #endif 8916 return; 8917 } 8918 if (SEQ_LT(th_ack, rsm->r_start)) { 8919 /* Huh map is missing this */ 8920 #ifdef INVARIANTS 8921 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d\n", 8922 rsm->r_start, 8923 th_ack, tp->t_state, rack->r_state); 8924 #endif 8925 return; 8926 } 8927 rack_update_rtt(tp, rack, rsm, to, cts, CUM_ACKED, th_ack); 8928 8929 /* Now was it a retransmitted TLP? */ 8930 if ((rsm->r_flags & RACK_TLP) && 8931 (rsm->r_rtr_cnt > 1)) { 8932 /* 8933 * Yes, this rsm was a TLP and retransmitted, remember that 8934 * since if a DSACK comes back on this we don't want 8935 * to think of it as a reordered segment. This may 8936 * get updated again with possibly even other TLPs 8937 * in flight, but thats ok. Only when we don't send 8938 * a retransmitted TLP for 1/2 the sequences space 8939 * will it get turned off (above). 8940 */ 8941 if (rack->rc_last_tlp_acked_set && 8942 (is_rsm_inside_declared_tlp_block(rack, rsm))) { 8943 /* 8944 * We already turned this on since the end matches, 8945 * the previous one was a partially ack now we 8946 * are getting another one (maybe all of it). 8947 */ 8948 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); 8949 /* 8950 * Lets make sure we have all of it though. 8951 */ 8952 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { 8953 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8954 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8955 rack->r_ctl.last_tlp_acked_end); 8956 } 8957 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { 8958 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8959 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, 8960 rack->r_ctl.last_tlp_acked_end); 8961 } 8962 } else { 8963 rack->rc_last_tlp_past_cumack = 1; 8964 rack->r_ctl.last_tlp_acked_start = rsm->r_start; 8965 rack->r_ctl.last_tlp_acked_end = rsm->r_end; 8966 rack->rc_last_tlp_acked_set = 1; 8967 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); 8968 } 8969 } 8970 /* Now do we consume the whole thing? */ 8971 if (SEQ_GEQ(th_ack, rsm->r_end)) { 8972 /* Its all consumed. */ 8973 uint32_t left; 8974 uint8_t newly_acked; 8975 8976 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); 8977 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 8978 rsm->r_rtr_bytes = 0; 8979 /* Record the time of highest cumack sent */ 8980 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 8981 #ifndef INVARIANTS 8982 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8983 #else 8984 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 8985 if (rm != rsm) { 8986 panic("removing head in rack:%p rsm:%p rm:%p", 8987 rack, rsm, rm); 8988 } 8989 #endif 8990 if (rsm->r_in_tmap) { 8991 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); 8992 rsm->r_in_tmap = 0; 8993 } 8994 newly_acked = 1; 8995 if (rsm->r_flags & RACK_ACKED) { 8996 /* 8997 * It was acked on the scoreboard -- remove 8998 * it from total 8999 */ 9000 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 9001 newly_acked = 0; 9002 } else if (rsm->r_flags & RACK_SACK_PASSED) { 9003 /* 9004 * There are segments ACKED on the 9005 * scoreboard further up. We are seeing 9006 * reordering. 9007 */ 9008 rsm->r_flags &= ~RACK_SACK_PASSED; 9009 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 9010 rsm->r_flags |= RACK_ACKED; 9011 rack->r_ctl.rc_reorder_ts = cts; 9012 if (rack->r_ent_rec_ns) { 9013 /* 9014 * We have sent no more, and we saw an sack 9015 * then ack arrive. 9016 */ 9017 rack->r_might_revert = 1; 9018 } 9019 } 9020 if ((rsm->r_flags & RACK_TO_REXT) && 9021 (tp->t_flags & TF_RCVD_TSTMP) && 9022 (to->to_flags & TOF_TS) && 9023 (to->to_tsecr != 0) && 9024 (tp->t_flags & TF_PREVVALID)) { 9025 /* 9026 * We can use the timestamp to see 9027 * if this retransmission was from the 9028 * first transmit. If so we made a mistake. 9029 */ 9030 tp->t_flags &= ~TF_PREVVALID; 9031 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { 9032 /* The first transmit is what this ack is for */ 9033 rack_cong_signal(tp, CC_RTO_ERR, th_ack, __LINE__); 9034 } 9035 } 9036 left = th_ack - rsm->r_end; 9037 if (rack->app_limited_needs_set && newly_acked) 9038 rack_need_set_test(tp, rack, rsm, th_ack, __LINE__, RACK_USE_END_OR_THACK); 9039 /* Free back to zone */ 9040 rack_free(rack, rsm); 9041 if (left) { 9042 goto more; 9043 } 9044 /* Check for reneging */ 9045 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9046 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { 9047 /* 9048 * The peer has moved snd_una up to 9049 * the edge of this send, i.e. one 9050 * that it had previously acked. The only 9051 * way that can be true if the peer threw 9052 * away data (space issues) that it had 9053 * previously sacked (else it would have 9054 * given us snd_una up to (rsm->r_end). 9055 * We need to undo the acked markings here. 9056 * 9057 * Note we have to look to make sure th_ack is 9058 * our rsm->r_start in case we get an old ack 9059 * where th_ack is behind snd_una. 9060 */ 9061 rack_peer_reneges(rack, rsm, th_ack); 9062 } 9063 return; 9064 } 9065 if (rsm->r_flags & RACK_ACKED) { 9066 /* 9067 * It was acked on the scoreboard -- remove it from 9068 * total for the part being cum-acked. 9069 */ 9070 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 9071 } 9072 /* 9073 * Clear the dup ack count for 9074 * the piece that remains. 9075 */ 9076 rsm->r_dupack = 0; 9077 rack_log_retran_reason(rack, rsm, __LINE__, 0, 2); 9078 if (rsm->r_rtr_bytes) { 9079 /* 9080 * It was retransmitted adjust the 9081 * sack holes for what was acked. 9082 */ 9083 int ack_am; 9084 9085 ack_am = (th_ack - rsm->r_start); 9086 if (ack_am >= rsm->r_rtr_bytes) { 9087 rack->r_ctl.rc_holes_rxt -= ack_am; 9088 rsm->r_rtr_bytes -= ack_am; 9089 } 9090 } 9091 /* 9092 * Update where the piece starts and record 9093 * the time of send of highest cumack sent. 9094 */ 9095 rack->r_ctl.rc_gp_cumack_ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; 9096 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_TRIM_HEAD, th_ack, __LINE__); 9097 /* Now we need to move our offset forward too */ 9098 if (rsm->m && (rsm->orig_m_len != rsm->m->m_len)) { 9099 /* Fix up the orig_m_len and possibly the mbuf offset */ 9100 rack_adjust_orig_mlen(rsm); 9101 } 9102 rsm->soff += (th_ack - rsm->r_start); 9103 rsm->r_start = th_ack; 9104 /* Now do we need to move the mbuf fwd too? */ 9105 if (rsm->m) { 9106 while (rsm->soff >= rsm->m->m_len) { 9107 rsm->soff -= rsm->m->m_len; 9108 rsm->m = rsm->m->m_next; 9109 KASSERT((rsm->m != NULL), 9110 (" nrsm:%p hit at soff:%u null m", 9111 rsm, rsm->soff)); 9112 } 9113 rsm->orig_m_len = rsm->m->m_len; 9114 } 9115 if (rack->app_limited_needs_set) 9116 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); 9117 } 9118 9119 static void 9120 rack_handle_might_revert(struct tcpcb *tp, struct tcp_rack *rack) 9121 { 9122 struct rack_sendmap *rsm; 9123 int sack_pass_fnd = 0; 9124 9125 if (rack->r_might_revert) { 9126 /* 9127 * Ok we have reordering, have not sent anything, we 9128 * might want to revert the congestion state if nothing 9129 * further has SACK_PASSED on it. Lets check. 9130 * 9131 * We also get here when we have DSACKs come in for 9132 * all the data that we FR'd. Note that a rxt or tlp 9133 * timer clears this from happening. 9134 */ 9135 9136 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 9137 if (rsm->r_flags & RACK_SACK_PASSED) { 9138 sack_pass_fnd = 1; 9139 break; 9140 } 9141 } 9142 if (sack_pass_fnd == 0) { 9143 /* 9144 * We went into recovery 9145 * incorrectly due to reordering! 9146 */ 9147 int orig_cwnd; 9148 9149 rack->r_ent_rec_ns = 0; 9150 orig_cwnd = tp->snd_cwnd; 9151 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; 9152 tp->snd_recover = tp->snd_una; 9153 rack_log_to_prr(rack, 14, orig_cwnd, __LINE__); 9154 EXIT_RECOVERY(tp->t_flags); 9155 } 9156 rack->r_might_revert = 0; 9157 } 9158 } 9159 9160 #ifdef NETFLIX_EXP_DETECTION 9161 static void 9162 rack_do_detection(struct tcpcb *tp, struct tcp_rack *rack, uint32_t bytes_this_ack, uint32_t segsiz) 9163 { 9164 if ((rack->do_detection || tcp_force_detection) && 9165 tcp_sack_to_ack_thresh && 9166 tcp_sack_to_move_thresh && 9167 ((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) { 9168 /* 9169 * We have thresholds set to find 9170 * possible attackers and disable sack. 9171 * Check them. 9172 */ 9173 uint64_t ackratio, moveratio, movetotal; 9174 9175 /* Log detecting */ 9176 rack_log_sad(rack, 1); 9177 ackratio = (uint64_t)(rack->r_ctl.sack_count); 9178 ackratio *= (uint64_t)(1000); 9179 if (rack->r_ctl.ack_count) 9180 ackratio /= (uint64_t)(rack->r_ctl.ack_count); 9181 else { 9182 /* We really should not hit here */ 9183 ackratio = 1000; 9184 } 9185 if ((rack->sack_attack_disable == 0) && 9186 (ackratio > rack_highest_sack_thresh_seen)) 9187 rack_highest_sack_thresh_seen = (uint32_t)ackratio; 9188 movetotal = rack->r_ctl.sack_moved_extra; 9189 movetotal += rack->r_ctl.sack_noextra_move; 9190 moveratio = rack->r_ctl.sack_moved_extra; 9191 moveratio *= (uint64_t)1000; 9192 if (movetotal) 9193 moveratio /= movetotal; 9194 else { 9195 /* No moves, thats pretty good */ 9196 moveratio = 0; 9197 } 9198 if ((rack->sack_attack_disable == 0) && 9199 (moveratio > rack_highest_move_thresh_seen)) 9200 rack_highest_move_thresh_seen = (uint32_t)moveratio; 9201 if (rack->sack_attack_disable == 0) { 9202 if ((ackratio > tcp_sack_to_ack_thresh) && 9203 (moveratio > tcp_sack_to_move_thresh)) { 9204 /* Disable sack processing */ 9205 rack->sack_attack_disable = 1; 9206 if (rack->r_rep_attack == 0) { 9207 rack->r_rep_attack = 1; 9208 counter_u64_add(rack_sack_attacks_detected, 1); 9209 } 9210 if (tcp_attack_on_turns_on_logging) { 9211 /* 9212 * Turn on logging, used for debugging 9213 * false positives. 9214 */ 9215 rack->rc_tp->t_logstate = tcp_attack_on_turns_on_logging; 9216 } 9217 /* Clamp the cwnd at flight size */ 9218 rack->r_ctl.rc_saved_cwnd = rack->rc_tp->snd_cwnd; 9219 rack->rc_tp->snd_cwnd = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 9220 rack_log_sad(rack, 2); 9221 } 9222 } else { 9223 /* We are sack-disabled check for false positives */ 9224 if ((ackratio <= tcp_restoral_thresh) || 9225 (rack->r_ctl.rc_num_maps_alloced < tcp_map_minimum)) { 9226 rack->sack_attack_disable = 0; 9227 rack_log_sad(rack, 3); 9228 /* Restart counting */ 9229 rack->r_ctl.sack_count = 0; 9230 rack->r_ctl.sack_moved_extra = 0; 9231 rack->r_ctl.sack_noextra_move = 1; 9232 rack->r_ctl.ack_count = max(1, 9233 (bytes_this_ack / segsiz)); 9234 9235 if (rack->r_rep_reverse == 0) { 9236 rack->r_rep_reverse = 1; 9237 counter_u64_add(rack_sack_attacks_reversed, 1); 9238 } 9239 /* Restore the cwnd */ 9240 if (rack->r_ctl.rc_saved_cwnd > rack->rc_tp->snd_cwnd) 9241 rack->rc_tp->snd_cwnd = rack->r_ctl.rc_saved_cwnd; 9242 } 9243 } 9244 } 9245 } 9246 #endif 9247 9248 static int 9249 rack_note_dsack(struct tcp_rack *rack, tcp_seq start, tcp_seq end) 9250 { 9251 9252 uint32_t am, l_end; 9253 int was_tlp = 0; 9254 9255 if (SEQ_GT(end, start)) 9256 am = end - start; 9257 else 9258 am = 0; 9259 if ((rack->rc_last_tlp_acked_set ) && 9260 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && 9261 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { 9262 /* 9263 * The DSACK is because of a TLP which we don't 9264 * do anything with the reordering window over since 9265 * it was not reordering that caused the DSACK but 9266 * our previous retransmit TLP. 9267 */ 9268 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9269 was_tlp = 1; 9270 goto skip_dsack_round; 9271 } 9272 if (rack->rc_last_sent_tlp_seq_valid) { 9273 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; 9274 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && 9275 (SEQ_LEQ(end, l_end))) { 9276 /* 9277 * This dsack is from the last sent TLP, ignore it 9278 * for reordering purposes. 9279 */ 9280 rack_log_dsack_event(rack, 7, __LINE__, start, end); 9281 was_tlp = 1; 9282 goto skip_dsack_round; 9283 } 9284 } 9285 if (rack->rc_dsack_round_seen == 0) { 9286 rack->rc_dsack_round_seen = 1; 9287 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; 9288 rack->r_ctl.num_dsack++; 9289 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ 9290 rack_log_dsack_event(rack, 2, __LINE__, 0, 0); 9291 } 9292 skip_dsack_round: 9293 /* 9294 * We keep track of how many DSACK blocks we get 9295 * after a recovery incident. 9296 */ 9297 rack->r_ctl.dsack_byte_cnt += am; 9298 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && 9299 rack->r_ctl.retran_during_recovery && 9300 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { 9301 /* 9302 * False recovery most likely culprit is reordering. If 9303 * nothing else is missing we need to revert. 9304 */ 9305 rack->r_might_revert = 1; 9306 rack_handle_might_revert(rack->rc_tp, rack); 9307 rack->r_might_revert = 0; 9308 rack->r_ctl.retran_during_recovery = 0; 9309 rack->r_ctl.dsack_byte_cnt = 0; 9310 } 9311 return (was_tlp); 9312 } 9313 9314 static uint32_t 9315 do_rack_compute_pipe(struct tcpcb *tp, struct tcp_rack *rack, uint32_t snd_una) 9316 { 9317 return (((tp->snd_max - snd_una) - rack->r_ctl.rc_sacked) + rack->r_ctl.rc_holes_rxt); 9318 } 9319 9320 static int32_t 9321 rack_compute_pipe(struct tcpcb *tp) 9322 { 9323 return ((int32_t)do_rack_compute_pipe(tp, 9324 (struct tcp_rack *)tp->t_fb_ptr, 9325 tp->snd_una)); 9326 } 9327 9328 static void 9329 rack_update_prr(struct tcpcb *tp, struct tcp_rack *rack, uint32_t changed, tcp_seq th_ack) 9330 { 9331 /* Deal with changed and PRR here (in recovery only) */ 9332 uint32_t pipe, snd_una; 9333 9334 rack->r_ctl.rc_prr_delivered += changed; 9335 9336 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { 9337 /* 9338 * It is all outstanding, we are application limited 9339 * and thus we don't need more room to send anything. 9340 * Note we use tp->snd_una here and not th_ack because 9341 * the data as yet not been cut from the sb. 9342 */ 9343 rack->r_ctl.rc_prr_sndcnt = 0; 9344 return; 9345 } 9346 /* Compute prr_sndcnt */ 9347 if (SEQ_GT(tp->snd_una, th_ack)) { 9348 snd_una = tp->snd_una; 9349 } else { 9350 snd_una = th_ack; 9351 } 9352 pipe = do_rack_compute_pipe(tp, rack, snd_una); 9353 if (pipe > tp->snd_ssthresh) { 9354 long sndcnt; 9355 9356 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; 9357 if (rack->r_ctl.rc_prr_recovery_fs > 0) 9358 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; 9359 else { 9360 rack->r_ctl.rc_prr_sndcnt = 0; 9361 rack_log_to_prr(rack, 9, 0, __LINE__); 9362 sndcnt = 0; 9363 } 9364 sndcnt++; 9365 if (sndcnt > (long)rack->r_ctl.rc_prr_out) 9366 sndcnt -= rack->r_ctl.rc_prr_out; 9367 else 9368 sndcnt = 0; 9369 rack->r_ctl.rc_prr_sndcnt = sndcnt; 9370 rack_log_to_prr(rack, 10, 0, __LINE__); 9371 } else { 9372 uint32_t limit; 9373 9374 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) 9375 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); 9376 else 9377 limit = 0; 9378 if (changed > limit) 9379 limit = changed; 9380 limit += ctf_fixed_maxseg(tp); 9381 if (tp->snd_ssthresh > pipe) { 9382 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); 9383 rack_log_to_prr(rack, 11, 0, __LINE__); 9384 } else { 9385 rack->r_ctl.rc_prr_sndcnt = min(0, limit); 9386 rack_log_to_prr(rack, 12, 0, __LINE__); 9387 } 9388 } 9389 } 9390 9391 static void 9392 rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, int entered_recovery, int dup_ack_struck) 9393 { 9394 uint32_t changed; 9395 struct tcp_rack *rack; 9396 struct rack_sendmap *rsm; 9397 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 9398 register uint32_t th_ack; 9399 int32_t i, j, k, num_sack_blks = 0; 9400 uint32_t cts, acked, ack_point; 9401 int loop_start = 0, moved_two = 0; 9402 uint32_t tsused; 9403 9404 9405 INP_WLOCK_ASSERT(tptoinpcb(tp)); 9406 if (tcp_get_flags(th) & TH_RST) { 9407 /* We don't log resets */ 9408 return; 9409 } 9410 rack = (struct tcp_rack *)tp->t_fb_ptr; 9411 cts = tcp_get_usecs(NULL); 9412 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9413 changed = 0; 9414 th_ack = th->th_ack; 9415 if (rack->sack_attack_disable == 0) 9416 rack_do_decay(rack); 9417 if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) { 9418 /* 9419 * You only get credit for 9420 * MSS and greater (and you get extra 9421 * credit for larger cum-ack moves). 9422 */ 9423 int ac; 9424 9425 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); 9426 rack->r_ctl.ack_count += ac; 9427 counter_u64_add(rack_ack_total, ac); 9428 } 9429 if (rack->r_ctl.ack_count > 0xfff00000) { 9430 /* 9431 * reduce the number to keep us under 9432 * a uint32_t. 9433 */ 9434 rack->r_ctl.ack_count /= 2; 9435 rack->r_ctl.sack_count /= 2; 9436 } 9437 if (SEQ_GT(th_ack, tp->snd_una)) { 9438 rack_log_progress_event(rack, tp, ticks, PROGRESS_UPDATE, __LINE__); 9439 tp->t_acktime = ticks; 9440 } 9441 if (rsm && SEQ_GT(th_ack, rsm->r_start)) 9442 changed = th_ack - rsm->r_start; 9443 if (changed) { 9444 rack_process_to_cumack(tp, rack, th_ack, cts, to); 9445 } 9446 if ((to->to_flags & TOF_SACK) == 0) { 9447 /* We are done nothing left and no sack. */ 9448 rack_handle_might_revert(tp, rack); 9449 /* 9450 * For cases where we struck a dup-ack 9451 * with no SACK, add to the changes so 9452 * PRR will work right. 9453 */ 9454 if (dup_ack_struck && (changed == 0)) { 9455 changed += ctf_fixed_maxseg(rack->rc_tp); 9456 } 9457 goto out; 9458 } 9459 /* Sack block processing */ 9460 if (SEQ_GT(th_ack, tp->snd_una)) 9461 ack_point = th_ack; 9462 else 9463 ack_point = tp->snd_una; 9464 for (i = 0; i < to->to_nsacks; i++) { 9465 bcopy((to->to_sacks + i * TCPOLEN_SACK), 9466 &sack, sizeof(sack)); 9467 sack.start = ntohl(sack.start); 9468 sack.end = ntohl(sack.end); 9469 if (SEQ_GT(sack.end, sack.start) && 9470 SEQ_GT(sack.start, ack_point) && 9471 SEQ_LT(sack.start, tp->snd_max) && 9472 SEQ_GT(sack.end, ack_point) && 9473 SEQ_LEQ(sack.end, tp->snd_max)) { 9474 sack_blocks[num_sack_blks] = sack; 9475 num_sack_blks++; 9476 } else if (SEQ_LEQ(sack.start, th_ack) && 9477 SEQ_LEQ(sack.end, th_ack)) { 9478 int was_tlp; 9479 9480 was_tlp = rack_note_dsack(rack, sack.start, sack.end); 9481 /* 9482 * Its a D-SACK block. 9483 */ 9484 tcp_record_dsack(tp, sack.start, sack.end, was_tlp); 9485 } 9486 } 9487 if (rack->rc_dsack_round_seen) { 9488 /* Is the dsack roound over? */ 9489 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { 9490 /* Yes it is */ 9491 rack->rc_dsack_round_seen = 0; 9492 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 9493 } 9494 } 9495 /* 9496 * Sort the SACK blocks so we can update the rack scoreboard with 9497 * just one pass. 9498 */ 9499 num_sack_blks = sack_filter_blks(&rack->r_ctl.rack_sf, sack_blocks, 9500 num_sack_blks, th->th_ack); 9501 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); 9502 if (num_sack_blks == 0) { 9503 /* Nothing to sack (DSACKs?) */ 9504 goto out_with_totals; 9505 } 9506 if (num_sack_blks < 2) { 9507 /* Only one, we don't need to sort */ 9508 goto do_sack_work; 9509 } 9510 /* Sort the sacks */ 9511 for (i = 0; i < num_sack_blks; i++) { 9512 for (j = i + 1; j < num_sack_blks; j++) { 9513 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 9514 sack = sack_blocks[i]; 9515 sack_blocks[i] = sack_blocks[j]; 9516 sack_blocks[j] = sack; 9517 } 9518 } 9519 } 9520 /* 9521 * Now are any of the sack block ends the same (yes some 9522 * implementations send these)? 9523 */ 9524 again: 9525 if (num_sack_blks == 0) 9526 goto out_with_totals; 9527 if (num_sack_blks > 1) { 9528 for (i = 0; i < num_sack_blks; i++) { 9529 for (j = i + 1; j < num_sack_blks; j++) { 9530 if (sack_blocks[i].end == sack_blocks[j].end) { 9531 /* 9532 * Ok these two have the same end we 9533 * want the smallest end and then 9534 * throw away the larger and start 9535 * again. 9536 */ 9537 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 9538 /* 9539 * The second block covers 9540 * more area use that 9541 */ 9542 sack_blocks[i].start = sack_blocks[j].start; 9543 } 9544 /* 9545 * Now collapse out the dup-sack and 9546 * lower the count 9547 */ 9548 for (k = (j + 1); k < num_sack_blks; k++) { 9549 sack_blocks[j].start = sack_blocks[k].start; 9550 sack_blocks[j].end = sack_blocks[k].end; 9551 j++; 9552 } 9553 num_sack_blks--; 9554 goto again; 9555 } 9556 } 9557 } 9558 } 9559 do_sack_work: 9560 /* 9561 * First lets look to see if 9562 * we have retransmitted and 9563 * can use the transmit next? 9564 */ 9565 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9566 if (rsm && 9567 SEQ_GT(sack_blocks[0].end, rsm->r_start) && 9568 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { 9569 /* 9570 * We probably did the FR and the next 9571 * SACK in continues as we would expect. 9572 */ 9573 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[0], to, &rsm, cts, &moved_two); 9574 if (acked) { 9575 rack->r_wanted_output = 1; 9576 changed += acked; 9577 } 9578 if (num_sack_blks == 1) { 9579 /* 9580 * This is what we would expect from 9581 * a normal implementation to happen 9582 * after we have retransmitted the FR, 9583 * i.e the sack-filter pushes down 9584 * to 1 block and the next to be retransmitted 9585 * is the sequence in the sack block (has more 9586 * are acked). Count this as ACK'd data to boost 9587 * up the chances of recovering any false positives. 9588 */ 9589 rack->r_ctl.ack_count += (acked / ctf_fixed_maxseg(rack->rc_tp)); 9590 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); 9591 counter_u64_add(rack_express_sack, 1); 9592 if (rack->r_ctl.ack_count > 0xfff00000) { 9593 /* 9594 * reduce the number to keep us under 9595 * a uint32_t. 9596 */ 9597 rack->r_ctl.ack_count /= 2; 9598 rack->r_ctl.sack_count /= 2; 9599 } 9600 goto out_with_totals; 9601 } else { 9602 /* 9603 * Start the loop through the 9604 * rest of blocks, past the first block. 9605 */ 9606 moved_two = 0; 9607 loop_start = 1; 9608 } 9609 } 9610 /* Its a sack of some sort */ 9611 rack->r_ctl.sack_count++; 9612 if (rack->r_ctl.sack_count > 0xfff00000) { 9613 /* 9614 * reduce the number to keep us under 9615 * a uint32_t. 9616 */ 9617 rack->r_ctl.ack_count /= 2; 9618 rack->r_ctl.sack_count /= 2; 9619 } 9620 counter_u64_add(rack_sack_total, 1); 9621 if (rack->sack_attack_disable) { 9622 /* An attacker disablement is in place */ 9623 if (num_sack_blks > 1) { 9624 rack->r_ctl.sack_count += (num_sack_blks - 1); 9625 rack->r_ctl.sack_moved_extra++; 9626 counter_u64_add(rack_move_some, 1); 9627 if (rack->r_ctl.sack_moved_extra > 0xfff00000) { 9628 rack->r_ctl.sack_moved_extra /= 2; 9629 rack->r_ctl.sack_noextra_move /= 2; 9630 } 9631 } 9632 goto out; 9633 } 9634 rsm = rack->r_ctl.rc_sacklast; 9635 for (i = loop_start; i < num_sack_blks; i++) { 9636 acked = rack_proc_sack_blk(tp, rack, &sack_blocks[i], to, &rsm, cts, &moved_two); 9637 if (acked) { 9638 rack->r_wanted_output = 1; 9639 changed += acked; 9640 } 9641 if (moved_two) { 9642 /* 9643 * If we did not get a SACK for at least a MSS and 9644 * had to move at all, or if we moved more than our 9645 * threshold, it counts against the "extra" move. 9646 */ 9647 rack->r_ctl.sack_moved_extra += moved_two; 9648 counter_u64_add(rack_move_some, 1); 9649 } else { 9650 /* 9651 * else we did not have to move 9652 * any more than we would expect. 9653 */ 9654 rack->r_ctl.sack_noextra_move++; 9655 counter_u64_add(rack_move_none, 1); 9656 } 9657 if (moved_two && (acked < ctf_fixed_maxseg(rack->rc_tp))) { 9658 /* 9659 * If the SACK was not a full MSS then 9660 * we add to sack_count the number of 9661 * MSS's (or possibly more than 9662 * a MSS if its a TSO send) we had to skip by. 9663 */ 9664 rack->r_ctl.sack_count += moved_two; 9665 counter_u64_add(rack_sack_total, moved_two); 9666 } 9667 /* 9668 * Now we need to setup for the next 9669 * round. First we make sure we won't 9670 * exceed the size of our uint32_t on 9671 * the various counts, and then clear out 9672 * moved_two. 9673 */ 9674 if ((rack->r_ctl.sack_moved_extra > 0xfff00000) || 9675 (rack->r_ctl.sack_noextra_move > 0xfff00000)) { 9676 rack->r_ctl.sack_moved_extra /= 2; 9677 rack->r_ctl.sack_noextra_move /= 2; 9678 } 9679 if (rack->r_ctl.sack_count > 0xfff00000) { 9680 rack->r_ctl.ack_count /= 2; 9681 rack->r_ctl.sack_count /= 2; 9682 } 9683 moved_two = 0; 9684 } 9685 out_with_totals: 9686 if (num_sack_blks > 1) { 9687 /* 9688 * You get an extra stroke if 9689 * you have more than one sack-blk, this 9690 * could be where we are skipping forward 9691 * and the sack-filter is still working, or 9692 * it could be an attacker constantly 9693 * moving us. 9694 */ 9695 rack->r_ctl.sack_moved_extra++; 9696 counter_u64_add(rack_move_some, 1); 9697 } 9698 out: 9699 #ifdef NETFLIX_EXP_DETECTION 9700 rack_do_detection(tp, rack, BYTES_THIS_ACK(tp, th), ctf_fixed_maxseg(rack->rc_tp)); 9701 #endif 9702 if (changed) { 9703 /* Something changed cancel the rack timer */ 9704 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 9705 } 9706 tsused = tcp_get_usecs(NULL); 9707 rsm = tcp_rack_output(tp, rack, tsused); 9708 if ((!IN_FASTRECOVERY(tp->t_flags)) && 9709 rsm && 9710 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { 9711 /* Enter recovery */ 9712 entered_recovery = 1; 9713 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 9714 /* 9715 * When we enter recovery we need to assure we send 9716 * one packet. 9717 */ 9718 if (rack->rack_no_prr == 0) { 9719 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); 9720 rack_log_to_prr(rack, 8, 0, __LINE__); 9721 } 9722 rack->r_timer_override = 1; 9723 rack->r_early = 0; 9724 rack->r_ctl.rc_agg_early = 0; 9725 } else if (IN_FASTRECOVERY(tp->t_flags) && 9726 rsm && 9727 (rack->r_rr_config == 3)) { 9728 /* 9729 * Assure we can output and we get no 9730 * remembered pace time except the retransmit. 9731 */ 9732 rack->r_timer_override = 1; 9733 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 9734 rack->r_ctl.rc_resend = rsm; 9735 } 9736 if (IN_FASTRECOVERY(tp->t_flags) && 9737 (rack->rack_no_prr == 0) && 9738 (entered_recovery == 0)) { 9739 rack_update_prr(tp, rack, changed, th_ack); 9740 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && 9741 ((tcp_in_hpts(rack->rc_inp) == 0) && 9742 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { 9743 /* 9744 * If you are pacing output you don't want 9745 * to override. 9746 */ 9747 rack->r_early = 0; 9748 rack->r_ctl.rc_agg_early = 0; 9749 rack->r_timer_override = 1; 9750 } 9751 } 9752 } 9753 9754 static void 9755 rack_strike_dupack(struct tcp_rack *rack) 9756 { 9757 struct rack_sendmap *rsm; 9758 9759 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 9760 while (rsm && (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 9761 rsm = TAILQ_NEXT(rsm, r_tnext); 9762 if (rsm->r_flags & RACK_MUST_RXT) { 9763 /* Sendmap entries that are marked to 9764 * be retransmitted do not need dupack's 9765 * struck. We get these marks for a number 9766 * of reasons (rxt timeout with no sack, 9767 * mtu change, or rwnd collapses). When 9768 * these events occur, we know we must retransmit 9769 * them and mark the sendmap entries. Dupack counting 9770 * is not needed since we are already set to retransmit 9771 * it as soon as we can. 9772 */ 9773 continue; 9774 } 9775 } 9776 if (rsm && (rsm->r_dupack < 0xff)) { 9777 rsm->r_dupack++; 9778 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { 9779 struct timeval tv; 9780 uint32_t cts; 9781 /* 9782 * Here we see if we need to retransmit. For 9783 * a SACK type connection if enough time has passed 9784 * we will get a return of the rsm. For a non-sack 9785 * connection we will get the rsm returned if the 9786 * dupack value is 3 or more. 9787 */ 9788 cts = tcp_get_usecs(&tv); 9789 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); 9790 if (rack->r_ctl.rc_resend != NULL) { 9791 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { 9792 rack_cong_signal(rack->rc_tp, CC_NDUPACK, 9793 rack->rc_tp->snd_una, __LINE__); 9794 } 9795 rack->r_wanted_output = 1; 9796 rack->r_timer_override = 1; 9797 rack_log_retran_reason(rack, rsm, __LINE__, 1, 3); 9798 } 9799 } else { 9800 rack_log_retran_reason(rack, rsm, __LINE__, 0, 3); 9801 } 9802 } 9803 } 9804 9805 static void 9806 rack_check_bottom_drag(struct tcpcb *tp, 9807 struct tcp_rack *rack, 9808 struct socket *so, int32_t acked) 9809 { 9810 uint32_t segsiz, minseg; 9811 9812 segsiz = ctf_fixed_maxseg(tp); 9813 minseg = segsiz; 9814 9815 if (tp->snd_max == tp->snd_una) { 9816 /* 9817 * We are doing dynamic pacing and we are way 9818 * under. Basically everything got acked while 9819 * we were still waiting on the pacer to expire. 9820 * 9821 * This means we need to boost the b/w in 9822 * addition to any earlier boosting of 9823 * the multiplier. 9824 */ 9825 rack->rc_dragged_bottom = 1; 9826 rack_validate_multipliers_at_or_above100(rack); 9827 /* 9828 * Lets use the segment bytes acked plus 9829 * the lowest RTT seen as the basis to 9830 * form a b/w estimate. This will be off 9831 * due to the fact that the true estimate 9832 * should be around 1/2 the time of the RTT 9833 * but we can settle for that. 9834 */ 9835 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && 9836 acked) { 9837 uint64_t bw, calc_bw, rtt; 9838 9839 rtt = rack->r_ctl.rack_rs.rs_us_rtt; 9840 if (rtt == 0) { 9841 /* no us sample is there a ms one? */ 9842 if (rack->r_ctl.rack_rs.rs_rtt_lowest) { 9843 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; 9844 } else { 9845 goto no_measurement; 9846 } 9847 } 9848 bw = acked; 9849 calc_bw = bw * 1000000; 9850 calc_bw /= rtt; 9851 if (rack->r_ctl.last_max_bw && 9852 (rack->r_ctl.last_max_bw < calc_bw)) { 9853 /* 9854 * If we have a last calculated max bw 9855 * enforce it. 9856 */ 9857 calc_bw = rack->r_ctl.last_max_bw; 9858 } 9859 /* now plop it in */ 9860 if (rack->rc_gp_filled == 0) { 9861 if (calc_bw > ONE_POINT_TWO_MEG) { 9862 /* 9863 * If we have no measurement 9864 * don't let us set in more than 9865 * 1.2Mbps. If we are still too 9866 * low after pacing with this we 9867 * will hopefully have a max b/w 9868 * available to sanity check things. 9869 */ 9870 calc_bw = ONE_POINT_TWO_MEG; 9871 } 9872 rack->r_ctl.rc_rtt_diff = 0; 9873 rack->r_ctl.gp_bw = calc_bw; 9874 rack->rc_gp_filled = 1; 9875 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9876 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9877 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9878 } else if (calc_bw > rack->r_ctl.gp_bw) { 9879 rack->r_ctl.rc_rtt_diff = 0; 9880 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) 9881 rack->r_ctl.num_measurements = RACK_REQ_AVG; 9882 rack->r_ctl.gp_bw = calc_bw; 9883 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 9884 } else 9885 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9886 if ((rack->gp_ready == 0) && 9887 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { 9888 /* We have enough measurements now */ 9889 rack->gp_ready = 1; 9890 rack_set_cc_pacing(rack); 9891 if (rack->defer_options) 9892 rack_apply_deferred_options(rack); 9893 } 9894 /* 9895 * For acks over 1mss we do a extra boost to simulate 9896 * where we would get 2 acks (we want 110 for the mul). 9897 */ 9898 if (acked > segsiz) 9899 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9900 } else { 9901 /* 9902 * zero rtt possibly?, settle for just an old increase. 9903 */ 9904 no_measurement: 9905 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9906 } 9907 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && 9908 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), 9909 minseg)) && 9910 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && 9911 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && 9912 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= 9913 (segsiz * rack_req_segs))) { 9914 /* 9915 * We are doing dynamic GP pacing and 9916 * we have everything except 1MSS or less 9917 * bytes left out. We are still pacing away. 9918 * And there is data that could be sent, This 9919 * means we are inserting delayed ack time in 9920 * our measurements because we are pacing too slow. 9921 */ 9922 rack_validate_multipliers_at_or_above100(rack); 9923 rack->rc_dragged_bottom = 1; 9924 rack_increase_bw_mul(rack, -1, 0, 0, 1); 9925 } 9926 } 9927 9928 9929 9930 static void 9931 rack_gain_for_fastoutput(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t acked_amount) 9932 { 9933 /* 9934 * The fast output path is enabled and we 9935 * have moved the cumack forward. Lets see if 9936 * we can expand forward the fast path length by 9937 * that amount. What we would ideally like to 9938 * do is increase the number of bytes in the 9939 * fast path block (left_to_send) by the 9940 * acked amount. However we have to gate that 9941 * by two factors: 9942 * 1) The amount outstanding and the rwnd of the peer 9943 * (i.e. we don't want to exceed the rwnd of the peer). 9944 * <and> 9945 * 2) The amount of data left in the socket buffer (i.e. 9946 * we can't send beyond what is in the buffer). 9947 * 9948 * Note that this does not take into account any increase 9949 * in the cwnd. We will only extend the fast path by 9950 * what was acked. 9951 */ 9952 uint32_t new_total, gating_val; 9953 9954 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; 9955 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), 9956 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); 9957 if (new_total <= gating_val) { 9958 /* We can increase left_to_send by the acked amount */ 9959 counter_u64_add(rack_extended_rfo, 1); 9960 rack->r_ctl.fsb.left_to_send = new_total; 9961 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_max - tp->snd_una))), 9962 ("rack:%p left_to_send:%u sbavail:%u out:%u", 9963 rack, rack->r_ctl.fsb.left_to_send, 9964 sbavail(&rack->rc_inp->inp_socket->so_snd), 9965 (tp->snd_max - tp->snd_una))); 9966 9967 } 9968 } 9969 9970 static void 9971 rack_adjust_sendmap(struct tcp_rack *rack, struct sockbuf *sb, tcp_seq snd_una) 9972 { 9973 /* 9974 * Here any sendmap entry that points to the 9975 * beginning mbuf must be adjusted to the correct 9976 * offset. This must be called with: 9977 * 1) The socket buffer locked 9978 * 2) snd_una adjusted to its new position. 9979 * 9980 * Note that (2) implies rack_ack_received has also 9981 * been called. 9982 * 9983 * We grab the first mbuf in the socket buffer and 9984 * then go through the front of the sendmap, recalculating 9985 * the stored offset for any sendmap entry that has 9986 * that mbuf. We must use the sb functions to do this 9987 * since its possible an add was done has well as 9988 * the subtraction we may have just completed. This should 9989 * not be a penalty though, since we just referenced the sb 9990 * to go in and trim off the mbufs that we freed (of course 9991 * there will be a penalty for the sendmap references though). 9992 */ 9993 struct mbuf *m; 9994 struct rack_sendmap *rsm; 9995 9996 SOCKBUF_LOCK_ASSERT(sb); 9997 m = sb->sb_mb; 9998 rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 9999 if ((rsm == NULL) || (m == NULL)) { 10000 /* Nothing outstanding */ 10001 return; 10002 } 10003 while (rsm->m && (rsm->m == m)) { 10004 /* one to adjust */ 10005 #ifdef INVARIANTS 10006 struct mbuf *tm; 10007 uint32_t soff; 10008 10009 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); 10010 if (rsm->orig_m_len != m->m_len) { 10011 rack_adjust_orig_mlen(rsm); 10012 } 10013 if (rsm->soff != soff) { 10014 /* 10015 * This is not a fatal error, we anticipate it 10016 * might happen (the else code), so we count it here 10017 * so that under invariant we can see that it really 10018 * does happen. 10019 */ 10020 counter_u64_add(rack_adjust_map_bw, 1); 10021 } 10022 rsm->m = tm; 10023 rsm->soff = soff; 10024 if (tm) 10025 rsm->orig_m_len = rsm->m->m_len; 10026 else 10027 rsm->orig_m_len = 0; 10028 #else 10029 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); 10030 if (rsm->m) 10031 rsm->orig_m_len = rsm->m->m_len; 10032 else 10033 rsm->orig_m_len = 0; 10034 #endif 10035 rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 10036 rsm); 10037 if (rsm == NULL) 10038 break; 10039 } 10040 } 10041 10042 /* 10043 * Return value of 1, we do not need to call rack_process_data(). 10044 * return value of 0, rack_process_data can be called. 10045 * For ret_val if its 0 the TCP is locked, if its non-zero 10046 * its unlocked and probably unsafe to touch the TCB. 10047 */ 10048 static int 10049 rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10050 struct tcpcb *tp, struct tcpopt *to, 10051 uint32_t tiwin, int32_t tlen, 10052 int32_t * ofia, int32_t thflags, int32_t *ret_val) 10053 { 10054 int32_t ourfinisacked = 0; 10055 int32_t nsegs, acked_amount; 10056 int32_t acked; 10057 struct mbuf *mfree; 10058 struct tcp_rack *rack; 10059 int32_t under_pacing = 0; 10060 int32_t recovery = 0; 10061 10062 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10063 10064 rack = (struct tcp_rack *)tp->t_fb_ptr; 10065 if (SEQ_GT(th->th_ack, tp->snd_max)) { 10066 __ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val, 10067 &rack->r_ctl.challenge_ack_ts, 10068 &rack->r_ctl.challenge_ack_cnt); 10069 rack->r_wanted_output = 1; 10070 return (1); 10071 } 10072 if (rack->gp_ready && 10073 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10074 under_pacing = 1; 10075 } 10076 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 10077 int in_rec, dup_ack_struck = 0; 10078 10079 in_rec = IN_FASTRECOVERY(tp->t_flags); 10080 if (rack->rc_in_persist) { 10081 tp->t_rxtshift = 0; 10082 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10083 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10084 } 10085 if ((th->th_ack == tp->snd_una) && 10086 (tiwin == tp->snd_wnd) && 10087 ((to->to_flags & TOF_SACK) == 0)) { 10088 rack_strike_dupack(rack); 10089 dup_ack_struck = 1; 10090 } 10091 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), dup_ack_struck); 10092 } 10093 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10094 /* 10095 * Old ack, behind (or duplicate to) the last one rcv'd 10096 * Note: We mark reordering is occuring if its 10097 * less than and we have not closed our window. 10098 */ 10099 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { 10100 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 10101 } 10102 return (0); 10103 } 10104 /* 10105 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 10106 * something we sent. 10107 */ 10108 if (tp->t_flags & TF_NEEDSYN) { 10109 /* 10110 * T/TCP: Connection was half-synchronized, and our SYN has 10111 * been ACK'd (so connection is now fully synchronized). Go 10112 * to non-starred state, increment snd_una for ACK of SYN, 10113 * and check if we can do window scaling. 10114 */ 10115 tp->t_flags &= ~TF_NEEDSYN; 10116 tp->snd_una++; 10117 /* Do window scaling? */ 10118 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 10119 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 10120 tp->rcv_scale = tp->request_r_scale; 10121 /* Send window already scaled. */ 10122 } 10123 } 10124 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10125 10126 acked = BYTES_THIS_ACK(tp, th); 10127 if (acked) { 10128 /* 10129 * Any time we move the cum-ack forward clear 10130 * keep-alive tied probe-not-answered. The 10131 * persists clears its own on entry. 10132 */ 10133 rack->probe_not_answered = 0; 10134 } 10135 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 10136 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 10137 /* 10138 * If we just performed our first retransmit, and the ACK arrives 10139 * within our recovery window, then it was a mistake to do the 10140 * retransmit in the first place. Recover our original cwnd and 10141 * ssthresh, and proceed to transmit where we left off. 10142 */ 10143 if ((tp->t_flags & TF_PREVVALID) && 10144 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10145 tp->t_flags &= ~TF_PREVVALID; 10146 if (tp->t_rxtshift == 1 && 10147 (int)(ticks - tp->t_badrxtwin) < 0) 10148 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10149 } 10150 if (acked) { 10151 /* assure we are not backed off */ 10152 tp->t_rxtshift = 0; 10153 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 10154 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 10155 rack->rc_tlp_in_progress = 0; 10156 rack->r_ctl.rc_tlp_cnt_out = 0; 10157 /* 10158 * If it is the RXT timer we want to 10159 * stop it, so we can restart a TLP. 10160 */ 10161 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 10162 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10163 #ifdef NETFLIX_HTTP_LOGGING 10164 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 10165 #endif 10166 } 10167 /* 10168 * If we have a timestamp reply, update smoothed round trip time. If 10169 * no timestamp is present but transmit timer is running and timed 10170 * sequence number was acked, update smoothed round trip time. Since 10171 * we now have an rtt measurement, cancel the timer backoff (cf., 10172 * Phil Karn's retransmit alg.). Recompute the initial retransmit 10173 * timer. 10174 * 10175 * Some boxes send broken timestamp replies during the SYN+ACK 10176 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10177 * and blow up the retransmit timer. 10178 */ 10179 /* 10180 * If all outstanding data is acked, stop retransmit timer and 10181 * remember to restart (more output or persist). If there is more 10182 * data to be acked, restart retransmit timer, using current 10183 * (possibly backed-off) value. 10184 */ 10185 if (acked == 0) { 10186 if (ofia) 10187 *ofia = ourfinisacked; 10188 return (0); 10189 } 10190 if (IN_RECOVERY(tp->t_flags)) { 10191 if (SEQ_LT(th->th_ack, tp->snd_recover) && 10192 (SEQ_LT(th->th_ack, tp->snd_max))) { 10193 tcp_rack_partialack(tp); 10194 } else { 10195 rack_post_recovery(tp, th->th_ack); 10196 recovery = 1; 10197 } 10198 } 10199 /* 10200 * Let the congestion control algorithm update congestion control 10201 * related information. This typically means increasing the 10202 * congestion window. 10203 */ 10204 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, recovery); 10205 SOCKBUF_LOCK(&so->so_snd); 10206 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 10207 tp->snd_wnd -= acked_amount; 10208 mfree = sbcut_locked(&so->so_snd, acked_amount); 10209 if ((sbused(&so->so_snd) == 0) && 10210 (acked > acked_amount) && 10211 (tp->t_state >= TCPS_FIN_WAIT_1) && 10212 (tp->t_flags & TF_SENTFIN)) { 10213 /* 10214 * We must be sure our fin 10215 * was sent and acked (we can be 10216 * in FIN_WAIT_1 without having 10217 * sent the fin). 10218 */ 10219 ourfinisacked = 1; 10220 } 10221 tp->snd_una = th->th_ack; 10222 if (acked_amount && sbavail(&so->so_snd)) 10223 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 10224 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 10225 /* NB: sowwakeup_locked() does an implicit unlock. */ 10226 sowwakeup_locked(so); 10227 m_freem(mfree); 10228 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 10229 tp->snd_recover = tp->snd_una; 10230 10231 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { 10232 tp->snd_nxt = tp->snd_una; 10233 } 10234 if (under_pacing && 10235 (rack->use_fixed_rate == 0) && 10236 (rack->in_probe_rtt == 0) && 10237 rack->rc_gp_dyn_mul && 10238 rack->rc_always_pace) { 10239 /* Check if we are dragging bottom */ 10240 rack_check_bottom_drag(tp, rack, so, acked); 10241 } 10242 if (tp->snd_una == tp->snd_max) { 10243 /* Nothing left outstanding */ 10244 tp->t_flags &= ~TF_PREVVALID; 10245 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 10246 rack->r_ctl.retran_during_recovery = 0; 10247 rack->r_ctl.dsack_byte_cnt = 0; 10248 if (rack->r_ctl.rc_went_idle_time == 0) 10249 rack->r_ctl.rc_went_idle_time = 1; 10250 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 10251 if (sbavail(&tptosocket(tp)->so_snd) == 0) 10252 tp->t_acktime = 0; 10253 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 10254 /* Set need output so persist might get set */ 10255 rack->r_wanted_output = 1; 10256 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10257 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 10258 (sbavail(&so->so_snd) == 0) && 10259 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 10260 /* 10261 * The socket was gone and the 10262 * peer sent data (now or in the past), time to 10263 * reset him. 10264 */ 10265 *ret_val = 1; 10266 /* tcp_close will kill the inp pre-log the Reset */ 10267 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 10268 tp = tcp_close(tp); 10269 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 10270 return (1); 10271 } 10272 } 10273 if (ofia) 10274 *ofia = ourfinisacked; 10275 return (0); 10276 } 10277 10278 10279 static void 10280 rack_log_collapse(struct tcp_rack *rack, uint32_t cnt, uint32_t split, uint32_t out, int line, 10281 int dir, uint32_t flags, struct rack_sendmap *rsm) 10282 { 10283 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 10284 union tcp_log_stackspecific log; 10285 struct timeval tv; 10286 10287 memset(&log, 0, sizeof(log)); 10288 log.u_bbr.flex1 = cnt; 10289 log.u_bbr.flex2 = split; 10290 log.u_bbr.flex3 = out; 10291 log.u_bbr.flex4 = line; 10292 log.u_bbr.flex5 = rack->r_must_retran; 10293 log.u_bbr.flex6 = flags; 10294 log.u_bbr.flex7 = rack->rc_has_collapsed; 10295 log.u_bbr.flex8 = dir; /* 10296 * 1 is collapsed, 0 is uncollapsed, 10297 * 2 is log of a rsm being marked, 3 is a split. 10298 */ 10299 if (rsm == NULL) 10300 log.u_bbr.rttProp = 0; 10301 else 10302 log.u_bbr.rttProp = (uint64_t)rsm; 10303 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 10304 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 10305 TCP_LOG_EVENTP(rack->rc_tp, NULL, 10306 &rack->rc_inp->inp_socket->so_rcv, 10307 &rack->rc_inp->inp_socket->so_snd, 10308 TCP_RACK_LOG_COLLAPSE, 0, 10309 0, &log, false, &tv); 10310 } 10311 } 10312 10313 static void 10314 rack_collapsed_window(struct tcp_rack *rack, uint32_t out, int line) 10315 { 10316 /* 10317 * Here all we do is mark the collapsed point and set the flag. 10318 * This may happen again and again, but there is no 10319 * sense splitting our map until we know where the 10320 * peer finally lands in the collapse. 10321 */ 10322 rack_trace_point(rack, RACK_TP_COLLAPSED_WND); 10323 if ((rack->rc_has_collapsed == 0) || 10324 (rack->r_ctl.last_collapse_point != (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd))) 10325 counter_u64_add(rack_collapsed_win_seen, 1); 10326 rack->r_ctl.last_collapse_point = rack->rc_tp->snd_una + rack->rc_tp->snd_wnd; 10327 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; 10328 rack->rc_has_collapsed = 1; 10329 rack->r_collapse_point_valid = 1; 10330 rack_log_collapse(rack, 0, 0, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); 10331 } 10332 10333 static void 10334 rack_un_collapse_window(struct tcp_rack *rack, int line) 10335 { 10336 struct rack_sendmap *nrsm, *rsm, fe; 10337 int cnt = 0, split = 0; 10338 #ifdef INVARIANTS 10339 struct rack_sendmap *insret; 10340 #endif 10341 10342 memset(&fe, 0, sizeof(fe)); 10343 rack->rc_has_collapsed = 0; 10344 fe.r_start = rack->r_ctl.last_collapse_point; 10345 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 10346 if (rsm == NULL) { 10347 /* Nothing to do maybe the peer ack'ed it all */ 10348 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10349 return; 10350 } 10351 /* Now do we need to split this one? */ 10352 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { 10353 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 10354 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); 10355 nrsm = rack_alloc_limit(rack, RACK_LIMIT_TYPE_SPLIT); 10356 if (nrsm == NULL) { 10357 /* We can't get a rsm, mark all? */ 10358 nrsm = rsm; 10359 goto no_split; 10360 } 10361 /* Clone it */ 10362 split = 1; 10363 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); 10364 #ifndef INVARIANTS 10365 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10366 #else 10367 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm); 10368 if (insret != NULL) { 10369 panic("Insert in rb tree of %p fails ret:%p rack:%p rsm:%p", 10370 nrsm, insret, rack, rsm); 10371 } 10372 #endif 10373 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, 10374 rack->r_ctl.last_collapse_point, __LINE__); 10375 if (rsm->r_in_tmap) { 10376 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 10377 nrsm->r_in_tmap = 1; 10378 } 10379 /* 10380 * Set in the new RSM as the 10381 * collapsed starting point 10382 */ 10383 rsm = nrsm; 10384 } 10385 no_split: 10386 RB_FOREACH_FROM(nrsm, rack_rb_tree_head, rsm) { 10387 nrsm->r_flags |= RACK_RWND_COLLAPSED; 10388 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); 10389 cnt++; 10390 } 10391 if (cnt) { 10392 counter_u64_add(rack_collapsed_win, 1); 10393 } 10394 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); 10395 } 10396 10397 static void 10398 rack_handle_delayed_ack(struct tcpcb *tp, struct tcp_rack *rack, 10399 int32_t tlen, int32_t tfo_syn) 10400 { 10401 if (DELAY_ACK(tp, tlen) || tfo_syn) { 10402 if (rack->rc_dack_mode && 10403 (tlen > 500) && 10404 (rack->rc_dack_toggle == 1)) { 10405 goto no_delayed_ack; 10406 } 10407 rack_timer_cancel(tp, rack, 10408 rack->r_ctl.rc_rcvtime, __LINE__); 10409 tp->t_flags |= TF_DELACK; 10410 } else { 10411 no_delayed_ack: 10412 rack->r_wanted_output = 1; 10413 tp->t_flags |= TF_ACKNOW; 10414 if (rack->rc_dack_mode) { 10415 if (tp->t_flags & TF_DELACK) 10416 rack->rc_dack_toggle = 1; 10417 else 10418 rack->rc_dack_toggle = 0; 10419 } 10420 } 10421 } 10422 10423 static void 10424 rack_validate_fo_sendwin_up(struct tcpcb *tp, struct tcp_rack *rack) 10425 { 10426 /* 10427 * If fast output is in progress, lets validate that 10428 * the new window did not shrink on us and make it 10429 * so fast output should end. 10430 */ 10431 if (rack->r_fast_output) { 10432 uint32_t out; 10433 10434 /* 10435 * Calculate what we will send if left as is 10436 * and compare that to our send window. 10437 */ 10438 out = ctf_outstanding(tp); 10439 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { 10440 /* ok we have an issue */ 10441 if (out >= tp->snd_wnd) { 10442 /* Turn off fast output the window is met or collapsed */ 10443 rack->r_fast_output = 0; 10444 } else { 10445 /* we have some room left */ 10446 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; 10447 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { 10448 /* If not at least 1 full segment never mind */ 10449 rack->r_fast_output = 0; 10450 } 10451 } 10452 } 10453 } 10454 } 10455 10456 10457 /* 10458 * Return value of 1, the TCB is unlocked and most 10459 * likely gone, return value of 0, the TCP is still 10460 * locked. 10461 */ 10462 static int 10463 rack_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 10464 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 10465 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 10466 { 10467 /* 10468 * Update window information. Don't look at window if no ACK: TAC's 10469 * send garbage on first SYN. 10470 */ 10471 int32_t nsegs; 10472 int32_t tfo_syn; 10473 struct tcp_rack *rack; 10474 10475 INP_WLOCK_ASSERT(tptoinpcb(tp)); 10476 10477 rack = (struct tcp_rack *)tp->t_fb_ptr; 10478 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10479 if ((thflags & TH_ACK) && 10480 (SEQ_LT(tp->snd_wl1, th->th_seq) || 10481 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 10482 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 10483 /* keep track of pure window updates */ 10484 if (tlen == 0 && 10485 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 10486 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 10487 tp->snd_wnd = tiwin; 10488 rack_validate_fo_sendwin_up(tp, rack); 10489 tp->snd_wl1 = th->th_seq; 10490 tp->snd_wl2 = th->th_ack; 10491 if (tp->snd_wnd > tp->max_sndwnd) 10492 tp->max_sndwnd = tp->snd_wnd; 10493 rack->r_wanted_output = 1; 10494 } else if (thflags & TH_ACK) { 10495 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 10496 tp->snd_wnd = tiwin; 10497 rack_validate_fo_sendwin_up(tp, rack); 10498 tp->snd_wl1 = th->th_seq; 10499 tp->snd_wl2 = th->th_ack; 10500 } 10501 } 10502 if (tp->snd_wnd < ctf_outstanding(tp)) 10503 /* The peer collapsed the window */ 10504 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 10505 else if (rack->rc_has_collapsed) 10506 rack_un_collapse_window(rack, __LINE__); 10507 if ((rack->r_collapse_point_valid) && 10508 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) 10509 rack->r_collapse_point_valid = 0; 10510 /* Was persist timer active and now we have window space? */ 10511 if ((rack->rc_in_persist != 0) && 10512 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10513 rack->r_ctl.rc_pace_min_segs))) { 10514 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10515 tp->snd_nxt = tp->snd_max; 10516 /* Make sure we output to start the timer */ 10517 rack->r_wanted_output = 1; 10518 } 10519 /* Do we enter persists? */ 10520 if ((rack->rc_in_persist == 0) && 10521 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10522 TCPS_HAVEESTABLISHED(tp->t_state) && 10523 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10524 sbavail(&tptosocket(tp)->so_snd) && 10525 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10526 /* 10527 * Here the rwnd is less than 10528 * the pacing size, we are established, 10529 * nothing is outstanding, and there is 10530 * data to send. Enter persists. 10531 */ 10532 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10533 } 10534 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 10535 m_freem(m); 10536 return (0); 10537 } 10538 /* 10539 * don't process the URG bit, ignore them drag 10540 * along the up. 10541 */ 10542 tp->rcv_up = tp->rcv_nxt; 10543 10544 /* 10545 * Process the segment text, merging it into the TCP sequencing 10546 * queue, and arranging for acknowledgment of receipt if necessary. 10547 * This process logically involves adjusting tp->rcv_wnd as data is 10548 * presented to the user (this happens in tcp_usrreq.c, case 10549 * PRU_RCVD). If a FIN has already been received on this connection 10550 * then we just ignore the text. 10551 */ 10552 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 10553 IS_FASTOPEN(tp->t_flags)); 10554 if ((tlen || (thflags & TH_FIN) || (tfo_syn && tlen > 0)) && 10555 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10556 tcp_seq save_start = th->th_seq; 10557 tcp_seq save_rnxt = tp->rcv_nxt; 10558 int save_tlen = tlen; 10559 10560 m_adj(m, drop_hdrlen); /* delayed header drop */ 10561 /* 10562 * Insert segment which includes th into TCP reassembly 10563 * queue with control block tp. Set thflags to whether 10564 * reassembly now includes a segment with FIN. This handles 10565 * the common case inline (segment is the next to be 10566 * received on an established connection, and the queue is 10567 * empty), avoiding linkage into and removal from the queue 10568 * and repetition of various conversions. Set DELACK for 10569 * segments received in order, but ack immediately when 10570 * segments are out of order (so fast retransmit can work). 10571 */ 10572 if (th->th_seq == tp->rcv_nxt && 10573 SEGQ_EMPTY(tp) && 10574 (TCPS_HAVEESTABLISHED(tp->t_state) || 10575 tfo_syn)) { 10576 #ifdef NETFLIX_SB_LIMITS 10577 u_int mcnt, appended; 10578 10579 if (so->so_rcv.sb_shlim) { 10580 mcnt = m_memcnt(m); 10581 appended = 0; 10582 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10583 CFO_NOSLEEP, NULL) == false) { 10584 counter_u64_add(tcp_sb_shlim_fails, 1); 10585 m_freem(m); 10586 return (0); 10587 } 10588 } 10589 #endif 10590 rack_handle_delayed_ack(tp, rack, tlen, tfo_syn); 10591 tp->rcv_nxt += tlen; 10592 if (tlen && 10593 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10594 (tp->t_fbyte_in == 0)) { 10595 tp->t_fbyte_in = ticks; 10596 if (tp->t_fbyte_in == 0) 10597 tp->t_fbyte_in = 1; 10598 if (tp->t_fbyte_out && tp->t_fbyte_in) 10599 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10600 } 10601 thflags = tcp_get_flags(th) & TH_FIN; 10602 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10603 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10604 SOCKBUF_LOCK(&so->so_rcv); 10605 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10606 m_freem(m); 10607 } else 10608 #ifdef NETFLIX_SB_LIMITS 10609 appended = 10610 #endif 10611 sbappendstream_locked(&so->so_rcv, m, 0); 10612 10613 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10614 /* NB: sorwakeup_locked() does an implicit unlock. */ 10615 sorwakeup_locked(so); 10616 #ifdef NETFLIX_SB_LIMITS 10617 if (so->so_rcv.sb_shlim && appended != mcnt) 10618 counter_fo_release(so->so_rcv.sb_shlim, 10619 mcnt - appended); 10620 #endif 10621 } else { 10622 /* 10623 * XXX: Due to the header drop above "th" is 10624 * theoretically invalid by now. Fortunately 10625 * m_adj() doesn't actually frees any mbufs when 10626 * trimming from the head. 10627 */ 10628 tcp_seq temp = save_start; 10629 10630 thflags = tcp_reass(tp, th, &temp, &tlen, m); 10631 tp->t_flags |= TF_ACKNOW; 10632 if (tp->t_flags & TF_WAKESOR) { 10633 tp->t_flags &= ~TF_WAKESOR; 10634 /* NB: sorwakeup_locked() does an implicit unlock. */ 10635 sorwakeup_locked(so); 10636 } 10637 } 10638 if ((tp->t_flags & TF_SACK_PERMIT) && 10639 (save_tlen > 0) && 10640 TCPS_HAVEESTABLISHED(tp->t_state)) { 10641 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 10642 /* 10643 * DSACK actually handled in the fastpath 10644 * above. 10645 */ 10646 RACK_OPTS_INC(tcp_sack_path_1); 10647 tcp_update_sack_list(tp, save_start, 10648 save_start + save_tlen); 10649 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 10650 if ((tp->rcv_numsacks >= 1) && 10651 (tp->sackblks[0].end == save_start)) { 10652 /* 10653 * Partial overlap, recorded at todrop 10654 * above. 10655 */ 10656 RACK_OPTS_INC(tcp_sack_path_2a); 10657 tcp_update_sack_list(tp, 10658 tp->sackblks[0].start, 10659 tp->sackblks[0].end); 10660 } else { 10661 RACK_OPTS_INC(tcp_sack_path_2b); 10662 tcp_update_dsack_list(tp, save_start, 10663 save_start + save_tlen); 10664 } 10665 } else if (tlen >= save_tlen) { 10666 /* Update of sackblks. */ 10667 RACK_OPTS_INC(tcp_sack_path_3); 10668 tcp_update_dsack_list(tp, save_start, 10669 save_start + save_tlen); 10670 } else if (tlen > 0) { 10671 RACK_OPTS_INC(tcp_sack_path_4); 10672 tcp_update_dsack_list(tp, save_start, 10673 save_start + tlen); 10674 } 10675 } 10676 } else { 10677 m_freem(m); 10678 thflags &= ~TH_FIN; 10679 } 10680 10681 /* 10682 * If FIN is received ACK the FIN and let the user know that the 10683 * connection is closing. 10684 */ 10685 if (thflags & TH_FIN) { 10686 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 10687 /* The socket upcall is handled by socantrcvmore. */ 10688 socantrcvmore(so); 10689 /* 10690 * If connection is half-synchronized (ie NEEDSYN 10691 * flag on) then delay ACK, so it may be piggybacked 10692 * when SYN is sent. Otherwise, since we received a 10693 * FIN then no more input can be expected, send ACK 10694 * now. 10695 */ 10696 if (tp->t_flags & TF_NEEDSYN) { 10697 rack_timer_cancel(tp, rack, 10698 rack->r_ctl.rc_rcvtime, __LINE__); 10699 tp->t_flags |= TF_DELACK; 10700 } else { 10701 tp->t_flags |= TF_ACKNOW; 10702 } 10703 tp->rcv_nxt++; 10704 } 10705 switch (tp->t_state) { 10706 /* 10707 * In SYN_RECEIVED and ESTABLISHED STATES enter the 10708 * CLOSE_WAIT state. 10709 */ 10710 case TCPS_SYN_RECEIVED: 10711 tp->t_starttime = ticks; 10712 /* FALLTHROUGH */ 10713 case TCPS_ESTABLISHED: 10714 rack_timer_cancel(tp, rack, 10715 rack->r_ctl.rc_rcvtime, __LINE__); 10716 tcp_state_change(tp, TCPS_CLOSE_WAIT); 10717 break; 10718 10719 /* 10720 * If still in FIN_WAIT_1 STATE FIN has not been 10721 * acked so enter the CLOSING state. 10722 */ 10723 case TCPS_FIN_WAIT_1: 10724 rack_timer_cancel(tp, rack, 10725 rack->r_ctl.rc_rcvtime, __LINE__); 10726 tcp_state_change(tp, TCPS_CLOSING); 10727 break; 10728 10729 /* 10730 * In FIN_WAIT_2 state enter the TIME_WAIT state, 10731 * starting the time-wait timer, turning off the 10732 * other standard timers. 10733 */ 10734 case TCPS_FIN_WAIT_2: 10735 rack_timer_cancel(tp, rack, 10736 rack->r_ctl.rc_rcvtime, __LINE__); 10737 tcp_twstart(tp); 10738 return (1); 10739 } 10740 } 10741 /* 10742 * Return any desired output. 10743 */ 10744 if ((tp->t_flags & TF_ACKNOW) || 10745 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { 10746 rack->r_wanted_output = 1; 10747 } 10748 return (0); 10749 } 10750 10751 /* 10752 * Here nothing is really faster, its just that we 10753 * have broken out the fast-data path also just like 10754 * the fast-ack. 10755 */ 10756 static int 10757 rack_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 10758 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10759 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 10760 { 10761 int32_t nsegs; 10762 int32_t newsize = 0; /* automatic sockbuf scaling */ 10763 struct tcp_rack *rack; 10764 #ifdef NETFLIX_SB_LIMITS 10765 u_int mcnt, appended; 10766 #endif 10767 10768 /* 10769 * If last ACK falls within this segment's sequence numbers, record 10770 * the timestamp. NOTE that the test is modified according to the 10771 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10772 */ 10773 if (__predict_false(th->th_seq != tp->rcv_nxt)) { 10774 return (0); 10775 } 10776 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10777 return (0); 10778 } 10779 if (tiwin && tiwin != tp->snd_wnd) { 10780 return (0); 10781 } 10782 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 10783 return (0); 10784 } 10785 if (__predict_false((to->to_flags & TOF_TS) && 10786 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 10787 return (0); 10788 } 10789 if (__predict_false((th->th_ack != tp->snd_una))) { 10790 return (0); 10791 } 10792 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 10793 return (0); 10794 } 10795 if ((to->to_flags & TOF_TS) != 0 && 10796 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10797 tp->ts_recent_age = tcp_ts_getticks(); 10798 tp->ts_recent = to->to_tsval; 10799 } 10800 rack = (struct tcp_rack *)tp->t_fb_ptr; 10801 /* 10802 * This is a pure, in-sequence data packet with nothing on the 10803 * reassembly queue and we have enough buffer space to take it. 10804 */ 10805 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10806 10807 #ifdef NETFLIX_SB_LIMITS 10808 if (so->so_rcv.sb_shlim) { 10809 mcnt = m_memcnt(m); 10810 appended = 0; 10811 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 10812 CFO_NOSLEEP, NULL) == false) { 10813 counter_u64_add(tcp_sb_shlim_fails, 1); 10814 m_freem(m); 10815 return (1); 10816 } 10817 } 10818 #endif 10819 /* Clean receiver SACK report if present */ 10820 if (tp->rcv_numsacks) 10821 tcp_clean_sackreport(tp); 10822 KMOD_TCPSTAT_INC(tcps_preddat); 10823 tp->rcv_nxt += tlen; 10824 if (tlen && 10825 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && 10826 (tp->t_fbyte_in == 0)) { 10827 tp->t_fbyte_in = ticks; 10828 if (tp->t_fbyte_in == 0) 10829 tp->t_fbyte_in = 1; 10830 if (tp->t_fbyte_out && tp->t_fbyte_in) 10831 tp->t_flags2 |= TF2_FBYTES_COMPLETE; 10832 } 10833 /* 10834 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 10835 */ 10836 tp->snd_wl1 = th->th_seq; 10837 /* 10838 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 10839 */ 10840 tp->rcv_up = tp->rcv_nxt; 10841 KMOD_TCPSTAT_ADD(tcps_rcvpack, nsegs); 10842 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 10843 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 10844 10845 /* Add data to socket buffer. */ 10846 SOCKBUF_LOCK(&so->so_rcv); 10847 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 10848 m_freem(m); 10849 } else { 10850 /* 10851 * Set new socket buffer size. Give up when limit is 10852 * reached. 10853 */ 10854 if (newsize) 10855 if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 10856 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 10857 m_adj(m, drop_hdrlen); /* delayed header drop */ 10858 #ifdef NETFLIX_SB_LIMITS 10859 appended = 10860 #endif 10861 sbappendstream_locked(&so->so_rcv, m, 0); 10862 ctf_calc_rwin(so, tp); 10863 } 10864 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); 10865 /* NB: sorwakeup_locked() does an implicit unlock. */ 10866 sorwakeup_locked(so); 10867 #ifdef NETFLIX_SB_LIMITS 10868 if (so->so_rcv.sb_shlim && mcnt != appended) 10869 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 10870 #endif 10871 rack_handle_delayed_ack(tp, rack, tlen, 0); 10872 if (tp->snd_una == tp->snd_max) 10873 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 10874 return (1); 10875 } 10876 10877 /* 10878 * This subfunction is used to try to highly optimize the 10879 * fast path. We again allow window updates that are 10880 * in sequence to remain in the fast-path. We also add 10881 * in the __predict's to attempt to help the compiler. 10882 * Note that if we return a 0, then we can *not* process 10883 * it and the caller should push the packet into the 10884 * slow-path. 10885 */ 10886 static int 10887 rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 10888 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 10889 uint32_t tiwin, int32_t nxt_pkt, uint32_t cts) 10890 { 10891 int32_t acked; 10892 int32_t nsegs; 10893 int32_t under_pacing = 0; 10894 struct tcp_rack *rack; 10895 10896 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 10897 /* Old ack, behind (or duplicate to) the last one rcv'd */ 10898 return (0); 10899 } 10900 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 10901 /* Above what we have sent? */ 10902 return (0); 10903 } 10904 if (__predict_false(tp->snd_nxt != tp->snd_max)) { 10905 /* We are retransmitting */ 10906 return (0); 10907 } 10908 if (__predict_false(tiwin == 0)) { 10909 /* zero window */ 10910 return (0); 10911 } 10912 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 10913 /* We need a SYN or a FIN, unlikely.. */ 10914 return (0); 10915 } 10916 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 10917 /* Timestamp is behind .. old ack with seq wrap? */ 10918 return (0); 10919 } 10920 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 10921 /* Still recovering */ 10922 return (0); 10923 } 10924 rack = (struct tcp_rack *)tp->t_fb_ptr; 10925 if (rack->r_ctl.rc_sacked) { 10926 /* We have sack holes on our scoreboard */ 10927 return (0); 10928 } 10929 /* Ok if we reach here, we can process a fast-ack */ 10930 if (rack->gp_ready && 10931 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 10932 under_pacing = 1; 10933 } 10934 nsegs = max(1, m->m_pkthdr.lro_nsegs); 10935 rack_log_ack(tp, to, th, 0, 0); 10936 /* Did the window get updated? */ 10937 if (tiwin != tp->snd_wnd) { 10938 tp->snd_wnd = tiwin; 10939 rack_validate_fo_sendwin_up(tp, rack); 10940 tp->snd_wl1 = th->th_seq; 10941 if (tp->snd_wnd > tp->max_sndwnd) 10942 tp->max_sndwnd = tp->snd_wnd; 10943 } 10944 /* Do we exit persists? */ 10945 if ((rack->rc_in_persist != 0) && 10946 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 10947 rack->r_ctl.rc_pace_min_segs))) { 10948 rack_exit_persist(tp, rack, cts); 10949 } 10950 /* Do we enter persists? */ 10951 if ((rack->rc_in_persist == 0) && 10952 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 10953 TCPS_HAVEESTABLISHED(tp->t_state) && 10954 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 10955 sbavail(&tptosocket(tp)->so_snd) && 10956 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 10957 /* 10958 * Here the rwnd is less than 10959 * the pacing size, we are established, 10960 * nothing is outstanding, and there is 10961 * data to send. Enter persists. 10962 */ 10963 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 10964 } 10965 /* 10966 * If last ACK falls within this segment's sequence numbers, record 10967 * the timestamp. NOTE that the test is modified according to the 10968 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 10969 */ 10970 if ((to->to_flags & TOF_TS) != 0 && 10971 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 10972 tp->ts_recent_age = tcp_ts_getticks(); 10973 tp->ts_recent = to->to_tsval; 10974 } 10975 /* 10976 * This is a pure ack for outstanding data. 10977 */ 10978 KMOD_TCPSTAT_INC(tcps_predack); 10979 10980 /* 10981 * "bad retransmit" recovery. 10982 */ 10983 if ((tp->t_flags & TF_PREVVALID) && 10984 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 10985 tp->t_flags &= ~TF_PREVVALID; 10986 if (tp->t_rxtshift == 1 && 10987 (int)(ticks - tp->t_badrxtwin) < 0) 10988 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); 10989 } 10990 /* 10991 * Recalculate the transmit timer / rtt. 10992 * 10993 * Some boxes send broken timestamp replies during the SYN+ACK 10994 * phase, ignore timestamps of 0 or we could calculate a huge RTT 10995 * and blow up the retransmit timer. 10996 */ 10997 acked = BYTES_THIS_ACK(tp, th); 10998 10999 #ifdef TCP_HHOOK 11000 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 11001 hhook_run_tcp_est_in(tp, th, to); 11002 #endif 11003 KMOD_TCPSTAT_ADD(tcps_rcvackpack, nsegs); 11004 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 11005 if (acked) { 11006 struct mbuf *mfree; 11007 11008 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); 11009 SOCKBUF_LOCK(&so->so_snd); 11010 mfree = sbcut_locked(&so->so_snd, acked); 11011 tp->snd_una = th->th_ack; 11012 /* Note we want to hold the sb lock through the sendmap adjust */ 11013 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 11014 /* Wake up the socket if we have room to write more */ 11015 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 11016 sowwakeup_locked(so); 11017 m_freem(mfree); 11018 tp->t_rxtshift = 0; 11019 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 11020 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 11021 rack->rc_tlp_in_progress = 0; 11022 rack->r_ctl.rc_tlp_cnt_out = 0; 11023 /* 11024 * If it is the RXT timer we want to 11025 * stop it, so we can restart a TLP. 11026 */ 11027 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 11028 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11029 #ifdef NETFLIX_HTTP_LOGGING 11030 tcp_http_check_for_comp(rack->rc_tp, th->th_ack); 11031 #endif 11032 } 11033 /* 11034 * Let the congestion control algorithm update congestion control 11035 * related information. This typically means increasing the 11036 * congestion window. 11037 */ 11038 if (tp->snd_wnd < ctf_outstanding(tp)) { 11039 /* The peer collapsed the window */ 11040 rack_collapsed_window(rack, ctf_outstanding(tp), __LINE__); 11041 } else if (rack->rc_has_collapsed) 11042 rack_un_collapse_window(rack, __LINE__); 11043 if ((rack->r_collapse_point_valid) && 11044 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) 11045 rack->r_collapse_point_valid = 0; 11046 /* 11047 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 11048 */ 11049 tp->snd_wl2 = th->th_ack; 11050 tp->t_dupacks = 0; 11051 m_freem(m); 11052 /* ND6_HINT(tp); *//* Some progress has been made. */ 11053 11054 /* 11055 * If all outstanding data are acked, stop retransmit timer, 11056 * otherwise restart timer using current (possibly backed-off) 11057 * value. If process is waiting for space, wakeup/selwakeup/signal. 11058 * If data are ready to send, let tcp_output decide between more 11059 * output or persist. 11060 */ 11061 if (under_pacing && 11062 (rack->use_fixed_rate == 0) && 11063 (rack->in_probe_rtt == 0) && 11064 rack->rc_gp_dyn_mul && 11065 rack->rc_always_pace) { 11066 /* Check if we are dragging bottom */ 11067 rack_check_bottom_drag(tp, rack, so, acked); 11068 } 11069 if (tp->snd_una == tp->snd_max) { 11070 tp->t_flags &= ~TF_PREVVALID; 11071 rack->r_ctl.retran_during_recovery = 0; 11072 rack->r_ctl.dsack_byte_cnt = 0; 11073 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 11074 if (rack->r_ctl.rc_went_idle_time == 0) 11075 rack->r_ctl.rc_went_idle_time = 1; 11076 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 11077 if (sbavail(&tptosocket(tp)->so_snd) == 0) 11078 tp->t_acktime = 0; 11079 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 11080 } 11081 if (acked && rack->r_fast_output) 11082 rack_gain_for_fastoutput(rack, tp, so, (uint32_t)acked); 11083 if (sbavail(&so->so_snd)) { 11084 rack->r_wanted_output = 1; 11085 } 11086 return (1); 11087 } 11088 11089 /* 11090 * Return value of 1, the TCB is unlocked and most 11091 * likely gone, return value of 0, the TCP is still 11092 * locked. 11093 */ 11094 static int 11095 rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 11096 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11097 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11098 { 11099 int32_t ret_val = 0; 11100 int32_t todrop; 11101 int32_t ourfinisacked = 0; 11102 struct tcp_rack *rack; 11103 11104 INP_WLOCK_ASSERT(tptoinpcb(tp)); 11105 11106 ctf_calc_rwin(so, tp); 11107 /* 11108 * If the state is SYN_SENT: if seg contains an ACK, but not for our 11109 * SYN, drop the input. if seg contains a RST, then drop the 11110 * connection. if seg does not contain SYN, then drop it. Otherwise 11111 * this is an acceptable SYN segment initialize tp->rcv_nxt and 11112 * tp->irs if seg contains ack then advance tp->snd_una if seg 11113 * contains an ECE and ECN support is enabled, the stream is ECN 11114 * capable. if SYN has been acked change to ESTABLISHED else 11115 * SYN_RCVD state arrange for segment to be acked (eventually) 11116 * continue processing rest of data/controls. 11117 */ 11118 if ((thflags & TH_ACK) && 11119 (SEQ_LEQ(th->th_ack, tp->iss) || 11120 SEQ_GT(th->th_ack, tp->snd_max))) { 11121 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11122 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11123 return (1); 11124 } 11125 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 11126 TCP_PROBE5(connect__refused, NULL, tp, 11127 mtod(m, const char *), tp, th); 11128 tp = tcp_drop(tp, ECONNREFUSED); 11129 ctf_do_drop(m, tp); 11130 return (1); 11131 } 11132 if (thflags & TH_RST) { 11133 ctf_do_drop(m, tp); 11134 return (1); 11135 } 11136 if (!(thflags & TH_SYN)) { 11137 ctf_do_drop(m, tp); 11138 return (1); 11139 } 11140 tp->irs = th->th_seq; 11141 tcp_rcvseqinit(tp); 11142 rack = (struct tcp_rack *)tp->t_fb_ptr; 11143 if (thflags & TH_ACK) { 11144 int tfo_partial = 0; 11145 11146 KMOD_TCPSTAT_INC(tcps_connects); 11147 soisconnected(so); 11148 #ifdef MAC 11149 mac_socketpeer_set_from_mbuf(m, so); 11150 #endif 11151 /* Do window scaling on this connection? */ 11152 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11153 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11154 tp->rcv_scale = tp->request_r_scale; 11155 } 11156 tp->rcv_adv += min(tp->rcv_wnd, 11157 TCP_MAXWIN << tp->rcv_scale); 11158 /* 11159 * If not all the data that was sent in the TFO SYN 11160 * has been acked, resend the remainder right away. 11161 */ 11162 if (IS_FASTOPEN(tp->t_flags) && 11163 (tp->snd_una != tp->snd_max)) { 11164 tp->snd_nxt = th->th_ack; 11165 tfo_partial = 1; 11166 } 11167 /* 11168 * If there's data, delay ACK; if there's also a FIN ACKNOW 11169 * will be turned on later. 11170 */ 11171 if (DELAY_ACK(tp, tlen) && tlen != 0 && !tfo_partial) { 11172 rack_timer_cancel(tp, rack, 11173 rack->r_ctl.rc_rcvtime, __LINE__); 11174 tp->t_flags |= TF_DELACK; 11175 } else { 11176 rack->r_wanted_output = 1; 11177 tp->t_flags |= TF_ACKNOW; 11178 rack->rc_dack_toggle = 0; 11179 } 11180 11181 tcp_ecn_input_syn_sent(tp, thflags, iptos); 11182 11183 if (SEQ_GT(th->th_ack, tp->snd_una)) { 11184 /* 11185 * We advance snd_una for the 11186 * fast open case. If th_ack is 11187 * acknowledging data beyond 11188 * snd_una we can't just call 11189 * ack-processing since the 11190 * data stream in our send-map 11191 * will start at snd_una + 1 (one 11192 * beyond the SYN). If its just 11193 * equal we don't need to do that 11194 * and there is no send_map. 11195 */ 11196 tp->snd_una++; 11197 } 11198 /* 11199 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 11200 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 11201 */ 11202 tp->t_starttime = ticks; 11203 if (tp->t_flags & TF_NEEDFIN) { 11204 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11205 tp->t_flags &= ~TF_NEEDFIN; 11206 thflags &= ~TH_SYN; 11207 } else { 11208 tcp_state_change(tp, TCPS_ESTABLISHED); 11209 TCP_PROBE5(connect__established, NULL, tp, 11210 mtod(m, const char *), tp, th); 11211 rack_cc_conn_init(tp); 11212 } 11213 } else { 11214 /* 11215 * Received initial SYN in SYN-SENT[*] state => simultaneous 11216 * open. If segment contains CC option and there is a 11217 * cached CC, apply TAO test. If it succeeds, connection is * 11218 * half-synchronized. Otherwise, do 3-way handshake: 11219 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 11220 * there was no CC option, clear cached CC value. 11221 */ 11222 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); 11223 tcp_state_change(tp, TCPS_SYN_RECEIVED); 11224 } 11225 /* 11226 * Advance th->th_seq to correspond to first data byte. If data, 11227 * trim to stay within window, dropping FIN if necessary. 11228 */ 11229 th->th_seq++; 11230 if (tlen > tp->rcv_wnd) { 11231 todrop = tlen - tp->rcv_wnd; 11232 m_adj(m, -todrop); 11233 tlen = tp->rcv_wnd; 11234 thflags &= ~TH_FIN; 11235 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 11236 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 11237 } 11238 tp->snd_wl1 = th->th_seq - 1; 11239 tp->rcv_up = th->th_seq; 11240 /* 11241 * Client side of transaction: already sent SYN and data. If the 11242 * remote host used T/TCP to validate the SYN, our data will be 11243 * ACK'd; if so, enter normal data segment processing in the middle 11244 * of step 5, ack processing. Otherwise, goto step 6. 11245 */ 11246 if (thflags & TH_ACK) { 11247 /* For syn-sent we need to possibly update the rtt */ 11248 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11249 uint32_t t, mcts; 11250 11251 mcts = tcp_ts_getticks(); 11252 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11253 if (!tp->t_rttlow || tp->t_rttlow > t) 11254 tp->t_rttlow = t; 11255 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); 11256 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11257 tcp_rack_xmit_timer_commit(rack, tp); 11258 } 11259 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 11260 return (ret_val); 11261 /* We may have changed to FIN_WAIT_1 above */ 11262 if (tp->t_state == TCPS_FIN_WAIT_1) { 11263 /* 11264 * In FIN_WAIT_1 STATE in addition to the processing 11265 * for the ESTABLISHED state if our FIN is now 11266 * acknowledged then enter FIN_WAIT_2. 11267 */ 11268 if (ourfinisacked) { 11269 /* 11270 * If we can't receive any more data, then 11271 * closing user can proceed. Starting the 11272 * timer is contrary to the specification, 11273 * but if we don't get a FIN we'll hang 11274 * forever. 11275 * 11276 * XXXjl: we should release the tp also, and 11277 * use a compressed state. 11278 */ 11279 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11280 soisdisconnected(so); 11281 tcp_timer_activate(tp, TT_2MSL, 11282 (tcp_fast_finwait2_recycle ? 11283 tcp_finwait2_timeout : 11284 TP_MAXIDLE(tp))); 11285 } 11286 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11287 } 11288 } 11289 } 11290 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11291 tiwin, thflags, nxt_pkt)); 11292 } 11293 11294 /* 11295 * Return value of 1, the TCB is unlocked and most 11296 * likely gone, return value of 0, the TCP is still 11297 * locked. 11298 */ 11299 static int 11300 rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 11301 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11302 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11303 { 11304 struct tcp_rack *rack; 11305 int32_t ret_val = 0; 11306 int32_t ourfinisacked = 0; 11307 11308 ctf_calc_rwin(so, tp); 11309 if ((thflags & TH_ACK) && 11310 (SEQ_LEQ(th->th_ack, tp->snd_una) || 11311 SEQ_GT(th->th_ack, tp->snd_max))) { 11312 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11313 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11314 return (1); 11315 } 11316 rack = (struct tcp_rack *)tp->t_fb_ptr; 11317 if (IS_FASTOPEN(tp->t_flags)) { 11318 /* 11319 * When a TFO connection is in SYN_RECEIVED, the 11320 * only valid packets are the initial SYN, a 11321 * retransmit/copy of the initial SYN (possibly with 11322 * a subset of the original data), a valid ACK, a 11323 * FIN, or a RST. 11324 */ 11325 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 11326 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11327 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11328 return (1); 11329 } else if (thflags & TH_SYN) { 11330 /* non-initial SYN is ignored */ 11331 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 11332 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 11333 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 11334 ctf_do_drop(m, NULL); 11335 return (0); 11336 } 11337 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 11338 ctf_do_drop(m, NULL); 11339 return (0); 11340 } 11341 } 11342 11343 if ((thflags & TH_RST) || 11344 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11345 return (__ctf_process_rst(m, th, so, tp, 11346 &rack->r_ctl.challenge_ack_ts, 11347 &rack->r_ctl.challenge_ack_cnt)); 11348 /* 11349 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11350 * it's less than ts_recent, drop it. 11351 */ 11352 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11353 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11354 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11355 return (ret_val); 11356 } 11357 /* 11358 * In the SYN-RECEIVED state, validate that the packet belongs to 11359 * this connection before trimming the data to fit the receive 11360 * window. Check the sequence number versus IRS since we know the 11361 * sequence numbers haven't wrapped. This is a partial fix for the 11362 * "LAND" DoS attack. 11363 */ 11364 if (SEQ_LT(th->th_seq, tp->irs)) { 11365 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11366 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11367 return (1); 11368 } 11369 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11370 &rack->r_ctl.challenge_ack_ts, 11371 &rack->r_ctl.challenge_ack_cnt)) { 11372 return (ret_val); 11373 } 11374 /* 11375 * If last ACK falls within this segment's sequence numbers, record 11376 * its timestamp. NOTE: 1) That the test incorporates suggestions 11377 * from the latest proposal of the tcplw@cray.com list (Braden 11378 * 1993/04/26). 2) That updating only on newer timestamps interferes 11379 * with our earlier PAWS tests, so this check should be solely 11380 * predicated on the sequence space of this segment. 3) That we 11381 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11382 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11383 * SEG.Len, This modified check allows us to overcome RFC1323's 11384 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11385 * p.869. In such cases, we can still calculate the RTT correctly 11386 * when RCV.NXT == Last.ACK.Sent. 11387 */ 11388 if ((to->to_flags & TOF_TS) != 0 && 11389 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11390 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11391 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11392 tp->ts_recent_age = tcp_ts_getticks(); 11393 tp->ts_recent = to->to_tsval; 11394 } 11395 tp->snd_wnd = tiwin; 11396 rack_validate_fo_sendwin_up(tp, rack); 11397 /* 11398 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11399 * is on (half-synchronized state), then queue data for later 11400 * processing; else drop segment and return. 11401 */ 11402 if ((thflags & TH_ACK) == 0) { 11403 if (IS_FASTOPEN(tp->t_flags)) { 11404 rack_cc_conn_init(tp); 11405 } 11406 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11407 tiwin, thflags, nxt_pkt)); 11408 } 11409 KMOD_TCPSTAT_INC(tcps_connects); 11410 if (tp->t_flags & TF_SONOTCONN) { 11411 tp->t_flags &= ~TF_SONOTCONN; 11412 soisconnected(so); 11413 } 11414 /* Do window scaling? */ 11415 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 11416 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 11417 tp->rcv_scale = tp->request_r_scale; 11418 } 11419 /* 11420 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 11421 * FIN-WAIT-1 11422 */ 11423 tp->t_starttime = ticks; 11424 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 11425 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 11426 tp->t_tfo_pending = NULL; 11427 } 11428 if (tp->t_flags & TF_NEEDFIN) { 11429 tcp_state_change(tp, TCPS_FIN_WAIT_1); 11430 tp->t_flags &= ~TF_NEEDFIN; 11431 } else { 11432 tcp_state_change(tp, TCPS_ESTABLISHED); 11433 TCP_PROBE5(accept__established, NULL, tp, 11434 mtod(m, const char *), tp, th); 11435 /* 11436 * TFO connections call cc_conn_init() during SYN 11437 * processing. Calling it again here for such connections 11438 * is not harmless as it would undo the snd_cwnd reduction 11439 * that occurs when a TFO SYN|ACK is retransmitted. 11440 */ 11441 if (!IS_FASTOPEN(tp->t_flags)) 11442 rack_cc_conn_init(tp); 11443 } 11444 /* 11445 * Account for the ACK of our SYN prior to 11446 * regular ACK processing below, except for 11447 * simultaneous SYN, which is handled later. 11448 */ 11449 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 11450 tp->snd_una++; 11451 /* 11452 * If segment contains data or ACK, will call tcp_reass() later; if 11453 * not, do so now to pass queued data to user. 11454 */ 11455 if (tlen == 0 && (thflags & TH_FIN) == 0) { 11456 (void) tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 11457 (struct mbuf *)0); 11458 if (tp->t_flags & TF_WAKESOR) { 11459 tp->t_flags &= ~TF_WAKESOR; 11460 /* NB: sorwakeup_locked() does an implicit unlock. */ 11461 sorwakeup_locked(so); 11462 } 11463 } 11464 tp->snd_wl1 = th->th_seq - 1; 11465 /* For syn-recv we need to possibly update the rtt */ 11466 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { 11467 uint32_t t, mcts; 11468 11469 mcts = tcp_ts_getticks(); 11470 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; 11471 if (!tp->t_rttlow || tp->t_rttlow > t) 11472 tp->t_rttlow = t; 11473 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); 11474 tcp_rack_xmit_timer(rack, t + 1, 1, t, 0, NULL, 2); 11475 tcp_rack_xmit_timer_commit(rack, tp); 11476 } 11477 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11478 return (ret_val); 11479 } 11480 if (tp->t_state == TCPS_FIN_WAIT_1) { 11481 /* We could have went to FIN_WAIT_1 (or EST) above */ 11482 /* 11483 * In FIN_WAIT_1 STATE in addition to the processing for the 11484 * ESTABLISHED state if our FIN is now acknowledged then 11485 * enter FIN_WAIT_2. 11486 */ 11487 if (ourfinisacked) { 11488 /* 11489 * If we can't receive any more data, then closing 11490 * user can proceed. Starting the timer is contrary 11491 * to the specification, but if we don't get a FIN 11492 * we'll hang forever. 11493 * 11494 * XXXjl: we should release the tp also, and use a 11495 * compressed state. 11496 */ 11497 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11498 soisdisconnected(so); 11499 tcp_timer_activate(tp, TT_2MSL, 11500 (tcp_fast_finwait2_recycle ? 11501 tcp_finwait2_timeout : 11502 TP_MAXIDLE(tp))); 11503 } 11504 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11505 } 11506 } 11507 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11508 tiwin, thflags, nxt_pkt)); 11509 } 11510 11511 /* 11512 * Return value of 1, the TCB is unlocked and most 11513 * likely gone, return value of 0, the TCP is still 11514 * locked. 11515 */ 11516 static int 11517 rack_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 11518 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11519 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11520 { 11521 int32_t ret_val = 0; 11522 struct tcp_rack *rack; 11523 11524 /* 11525 * Header prediction: check for the two common cases of a 11526 * uni-directional data xfer. If the packet has no control flags, 11527 * is in-sequence, the window didn't change and we're not 11528 * retransmitting, it's a candidate. If the length is zero and the 11529 * ack moved forward, we're the sender side of the xfer. Just free 11530 * the data acked & wake any higher level process that was blocked 11531 * waiting for space. If the length is non-zero and the ack didn't 11532 * move, we're the receiver side. If we're getting packets in-order 11533 * (the reassembly queue is empty), add the data toc The socket 11534 * buffer and note that we need a delayed ack. Make sure that the 11535 * hidden state-flags are also off. Since we check for 11536 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 11537 */ 11538 rack = (struct tcp_rack *)tp->t_fb_ptr; 11539 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 11540 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_ACK)) == TH_ACK) && 11541 __predict_true(SEGQ_EMPTY(tp)) && 11542 __predict_true(th->th_seq == tp->rcv_nxt)) { 11543 if (tlen == 0) { 11544 if (rack_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 11545 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { 11546 return (0); 11547 } 11548 } else { 11549 if (rack_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 11550 tiwin, nxt_pkt, iptos)) { 11551 return (0); 11552 } 11553 } 11554 } 11555 ctf_calc_rwin(so, tp); 11556 11557 if ((thflags & TH_RST) || 11558 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11559 return (__ctf_process_rst(m, th, so, tp, 11560 &rack->r_ctl.challenge_ack_ts, 11561 &rack->r_ctl.challenge_ack_cnt)); 11562 11563 /* 11564 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11565 * synchronized state. 11566 */ 11567 if (thflags & TH_SYN) { 11568 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11569 return (ret_val); 11570 } 11571 /* 11572 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11573 * it's less than ts_recent, drop it. 11574 */ 11575 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11576 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11577 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11578 return (ret_val); 11579 } 11580 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11581 &rack->r_ctl.challenge_ack_ts, 11582 &rack->r_ctl.challenge_ack_cnt)) { 11583 return (ret_val); 11584 } 11585 /* 11586 * If last ACK falls within this segment's sequence numbers, record 11587 * its timestamp. NOTE: 1) That the test incorporates suggestions 11588 * from the latest proposal of the tcplw@cray.com list (Braden 11589 * 1993/04/26). 2) That updating only on newer timestamps interferes 11590 * with our earlier PAWS tests, so this check should be solely 11591 * predicated on the sequence space of this segment. 3) That we 11592 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11593 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11594 * SEG.Len, This modified check allows us to overcome RFC1323's 11595 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11596 * p.869. In such cases, we can still calculate the RTT correctly 11597 * when RCV.NXT == Last.ACK.Sent. 11598 */ 11599 if ((to->to_flags & TOF_TS) != 0 && 11600 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11601 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11602 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11603 tp->ts_recent_age = tcp_ts_getticks(); 11604 tp->ts_recent = to->to_tsval; 11605 } 11606 /* 11607 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11608 * is on (half-synchronized state), then queue data for later 11609 * processing; else drop segment and return. 11610 */ 11611 if ((thflags & TH_ACK) == 0) { 11612 if (tp->t_flags & TF_NEEDSYN) { 11613 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11614 tiwin, thflags, nxt_pkt)); 11615 11616 } else if (tp->t_flags & TF_ACKNOW) { 11617 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11618 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11619 return (ret_val); 11620 } else { 11621 ctf_do_drop(m, NULL); 11622 return (0); 11623 } 11624 } 11625 /* 11626 * Ack processing. 11627 */ 11628 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11629 return (ret_val); 11630 } 11631 if (sbavail(&so->so_snd)) { 11632 if (ctf_progress_timeout_check(tp, true)) { 11633 rack_log_progress_event(rack, tp, tick, PROGRESS_DROP, __LINE__); 11634 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11635 return (1); 11636 } 11637 } 11638 /* State changes only happen in rack_process_data() */ 11639 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11640 tiwin, thflags, nxt_pkt)); 11641 } 11642 11643 /* 11644 * Return value of 1, the TCB is unlocked and most 11645 * likely gone, return value of 0, the TCP is still 11646 * locked. 11647 */ 11648 static int 11649 rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 11650 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11651 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11652 { 11653 int32_t ret_val = 0; 11654 struct tcp_rack *rack; 11655 11656 rack = (struct tcp_rack *)tp->t_fb_ptr; 11657 ctf_calc_rwin(so, tp); 11658 if ((thflags & TH_RST) || 11659 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11660 return (__ctf_process_rst(m, th, so, tp, 11661 &rack->r_ctl.challenge_ack_ts, 11662 &rack->r_ctl.challenge_ack_cnt)); 11663 /* 11664 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11665 * synchronized state. 11666 */ 11667 if (thflags & TH_SYN) { 11668 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11669 return (ret_val); 11670 } 11671 /* 11672 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11673 * it's less than ts_recent, drop it. 11674 */ 11675 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11676 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11677 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11678 return (ret_val); 11679 } 11680 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11681 &rack->r_ctl.challenge_ack_ts, 11682 &rack->r_ctl.challenge_ack_cnt)) { 11683 return (ret_val); 11684 } 11685 /* 11686 * If last ACK falls within this segment's sequence numbers, record 11687 * its timestamp. NOTE: 1) That the test incorporates suggestions 11688 * from the latest proposal of the tcplw@cray.com list (Braden 11689 * 1993/04/26). 2) That updating only on newer timestamps interferes 11690 * with our earlier PAWS tests, so this check should be solely 11691 * predicated on the sequence space of this segment. 3) That we 11692 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11693 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11694 * SEG.Len, This modified check allows us to overcome RFC1323's 11695 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11696 * p.869. In such cases, we can still calculate the RTT correctly 11697 * when RCV.NXT == Last.ACK.Sent. 11698 */ 11699 if ((to->to_flags & TOF_TS) != 0 && 11700 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11701 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11702 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11703 tp->ts_recent_age = tcp_ts_getticks(); 11704 tp->ts_recent = to->to_tsval; 11705 } 11706 /* 11707 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11708 * is on (half-synchronized state), then queue data for later 11709 * processing; else drop segment and return. 11710 */ 11711 if ((thflags & TH_ACK) == 0) { 11712 if (tp->t_flags & TF_NEEDSYN) { 11713 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11714 tiwin, thflags, nxt_pkt)); 11715 11716 } else if (tp->t_flags & TF_ACKNOW) { 11717 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11718 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11719 return (ret_val); 11720 } else { 11721 ctf_do_drop(m, NULL); 11722 return (0); 11723 } 11724 } 11725 /* 11726 * Ack processing. 11727 */ 11728 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 11729 return (ret_val); 11730 } 11731 if (sbavail(&so->so_snd)) { 11732 if (ctf_progress_timeout_check(tp, true)) { 11733 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11734 tp, tick, PROGRESS_DROP, __LINE__); 11735 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11736 return (1); 11737 } 11738 } 11739 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11740 tiwin, thflags, nxt_pkt)); 11741 } 11742 11743 static int 11744 rack_check_data_after_close(struct mbuf *m, 11745 struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so) 11746 { 11747 struct tcp_rack *rack; 11748 11749 rack = (struct tcp_rack *)tp->t_fb_ptr; 11750 if (rack->rc_allow_data_af_clo == 0) { 11751 close_now: 11752 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11753 /* tcp_close will kill the inp pre-log the Reset */ 11754 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 11755 tp = tcp_close(tp); 11756 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 11757 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 11758 return (1); 11759 } 11760 if (sbavail(&so->so_snd) == 0) 11761 goto close_now; 11762 /* Ok we allow data that is ignored and a followup reset */ 11763 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 11764 tp->rcv_nxt = th->th_seq + *tlen; 11765 tp->t_flags2 |= TF2_DROP_AF_DATA; 11766 rack->r_wanted_output = 1; 11767 *tlen = 0; 11768 return (0); 11769 } 11770 11771 /* 11772 * Return value of 1, the TCB is unlocked and most 11773 * likely gone, return value of 0, the TCP is still 11774 * locked. 11775 */ 11776 static int 11777 rack_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 11778 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11779 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11780 { 11781 int32_t ret_val = 0; 11782 int32_t ourfinisacked = 0; 11783 struct tcp_rack *rack; 11784 11785 rack = (struct tcp_rack *)tp->t_fb_ptr; 11786 ctf_calc_rwin(so, tp); 11787 11788 if ((thflags & TH_RST) || 11789 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11790 return (__ctf_process_rst(m, th, so, tp, 11791 &rack->r_ctl.challenge_ack_ts, 11792 &rack->r_ctl.challenge_ack_cnt)); 11793 /* 11794 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11795 * synchronized state. 11796 */ 11797 if (thflags & TH_SYN) { 11798 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11799 return (ret_val); 11800 } 11801 /* 11802 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11803 * it's less than ts_recent, drop it. 11804 */ 11805 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11806 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11807 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11808 return (ret_val); 11809 } 11810 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11811 &rack->r_ctl.challenge_ack_ts, 11812 &rack->r_ctl.challenge_ack_cnt)) { 11813 return (ret_val); 11814 } 11815 /* 11816 * If new data are received on a connection after the user processes 11817 * are gone, then RST the other end. 11818 */ 11819 if ((tp->t_flags & TF_CLOSED) && tlen && 11820 rack_check_data_after_close(m, tp, &tlen, th, so)) 11821 return (1); 11822 /* 11823 * If last ACK falls within this segment's sequence numbers, record 11824 * its timestamp. NOTE: 1) That the test incorporates suggestions 11825 * from the latest proposal of the tcplw@cray.com list (Braden 11826 * 1993/04/26). 2) That updating only on newer timestamps interferes 11827 * with our earlier PAWS tests, so this check should be solely 11828 * predicated on the sequence space of this segment. 3) That we 11829 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11830 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11831 * SEG.Len, This modified check allows us to overcome RFC1323's 11832 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11833 * p.869. In such cases, we can still calculate the RTT correctly 11834 * when RCV.NXT == Last.ACK.Sent. 11835 */ 11836 if ((to->to_flags & TOF_TS) != 0 && 11837 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11838 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11839 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11840 tp->ts_recent_age = tcp_ts_getticks(); 11841 tp->ts_recent = to->to_tsval; 11842 } 11843 /* 11844 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11845 * is on (half-synchronized state), then queue data for later 11846 * processing; else drop segment and return. 11847 */ 11848 if ((thflags & TH_ACK) == 0) { 11849 if (tp->t_flags & TF_NEEDSYN) { 11850 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11851 tiwin, thflags, nxt_pkt)); 11852 } else if (tp->t_flags & TF_ACKNOW) { 11853 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11854 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11855 return (ret_val); 11856 } else { 11857 ctf_do_drop(m, NULL); 11858 return (0); 11859 } 11860 } 11861 /* 11862 * Ack processing. 11863 */ 11864 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11865 return (ret_val); 11866 } 11867 if (ourfinisacked) { 11868 /* 11869 * If we can't receive any more data, then closing user can 11870 * proceed. Starting the timer is contrary to the 11871 * specification, but if we don't get a FIN we'll hang 11872 * forever. 11873 * 11874 * XXXjl: we should release the tp also, and use a 11875 * compressed state. 11876 */ 11877 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 11878 soisdisconnected(so); 11879 tcp_timer_activate(tp, TT_2MSL, 11880 (tcp_fast_finwait2_recycle ? 11881 tcp_finwait2_timeout : 11882 TP_MAXIDLE(tp))); 11883 } 11884 tcp_state_change(tp, TCPS_FIN_WAIT_2); 11885 } 11886 if (sbavail(&so->so_snd)) { 11887 if (ctf_progress_timeout_check(tp, true)) { 11888 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 11889 tp, tick, PROGRESS_DROP, __LINE__); 11890 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11891 return (1); 11892 } 11893 } 11894 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11895 tiwin, thflags, nxt_pkt)); 11896 } 11897 11898 /* 11899 * Return value of 1, the TCB is unlocked and most 11900 * likely gone, return value of 0, the TCP is still 11901 * locked. 11902 */ 11903 static int 11904 rack_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 11905 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 11906 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 11907 { 11908 int32_t ret_val = 0; 11909 int32_t ourfinisacked = 0; 11910 struct tcp_rack *rack; 11911 11912 rack = (struct tcp_rack *)tp->t_fb_ptr; 11913 ctf_calc_rwin(so, tp); 11914 11915 if ((thflags & TH_RST) || 11916 (tp->t_fin_is_rst && (thflags & TH_FIN))) 11917 return (__ctf_process_rst(m, th, so, tp, 11918 &rack->r_ctl.challenge_ack_ts, 11919 &rack->r_ctl.challenge_ack_cnt)); 11920 /* 11921 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 11922 * synchronized state. 11923 */ 11924 if (thflags & TH_SYN) { 11925 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 11926 return (ret_val); 11927 } 11928 /* 11929 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 11930 * it's less than ts_recent, drop it. 11931 */ 11932 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 11933 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 11934 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 11935 return (ret_val); 11936 } 11937 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 11938 &rack->r_ctl.challenge_ack_ts, 11939 &rack->r_ctl.challenge_ack_cnt)) { 11940 return (ret_val); 11941 } 11942 /* 11943 * If new data are received on a connection after the user processes 11944 * are gone, then RST the other end. 11945 */ 11946 if ((tp->t_flags & TF_CLOSED) && tlen && 11947 rack_check_data_after_close(m, tp, &tlen, th, so)) 11948 return (1); 11949 /* 11950 * If last ACK falls within this segment's sequence numbers, record 11951 * its timestamp. NOTE: 1) That the test incorporates suggestions 11952 * from the latest proposal of the tcplw@cray.com list (Braden 11953 * 1993/04/26). 2) That updating only on newer timestamps interferes 11954 * with our earlier PAWS tests, so this check should be solely 11955 * predicated on the sequence space of this segment. 3) That we 11956 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 11957 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 11958 * SEG.Len, This modified check allows us to overcome RFC1323's 11959 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 11960 * p.869. In such cases, we can still calculate the RTT correctly 11961 * when RCV.NXT == Last.ACK.Sent. 11962 */ 11963 if ((to->to_flags & TOF_TS) != 0 && 11964 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 11965 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 11966 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 11967 tp->ts_recent_age = tcp_ts_getticks(); 11968 tp->ts_recent = to->to_tsval; 11969 } 11970 /* 11971 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 11972 * is on (half-synchronized state), then queue data for later 11973 * processing; else drop segment and return. 11974 */ 11975 if ((thflags & TH_ACK) == 0) { 11976 if (tp->t_flags & TF_NEEDSYN) { 11977 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 11978 tiwin, thflags, nxt_pkt)); 11979 } else if (tp->t_flags & TF_ACKNOW) { 11980 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 11981 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 11982 return (ret_val); 11983 } else { 11984 ctf_do_drop(m, NULL); 11985 return (0); 11986 } 11987 } 11988 /* 11989 * Ack processing. 11990 */ 11991 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 11992 return (ret_val); 11993 } 11994 if (ourfinisacked) { 11995 tcp_twstart(tp); 11996 m_freem(m); 11997 return (1); 11998 } 11999 if (sbavail(&so->so_snd)) { 12000 if (ctf_progress_timeout_check(tp, true)) { 12001 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12002 tp, tick, PROGRESS_DROP, __LINE__); 12003 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12004 return (1); 12005 } 12006 } 12007 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12008 tiwin, thflags, nxt_pkt)); 12009 } 12010 12011 /* 12012 * Return value of 1, the TCB is unlocked and most 12013 * likely gone, return value of 0, the TCP is still 12014 * locked. 12015 */ 12016 static int 12017 rack_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 12018 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12019 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12020 { 12021 int32_t ret_val = 0; 12022 int32_t ourfinisacked = 0; 12023 struct tcp_rack *rack; 12024 12025 rack = (struct tcp_rack *)tp->t_fb_ptr; 12026 ctf_calc_rwin(so, tp); 12027 12028 if ((thflags & TH_RST) || 12029 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12030 return (__ctf_process_rst(m, th, so, tp, 12031 &rack->r_ctl.challenge_ack_ts, 12032 &rack->r_ctl.challenge_ack_cnt)); 12033 /* 12034 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12035 * synchronized state. 12036 */ 12037 if (thflags & TH_SYN) { 12038 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12039 return (ret_val); 12040 } 12041 /* 12042 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12043 * it's less than ts_recent, drop it. 12044 */ 12045 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12046 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12047 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12048 return (ret_val); 12049 } 12050 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12051 &rack->r_ctl.challenge_ack_ts, 12052 &rack->r_ctl.challenge_ack_cnt)) { 12053 return (ret_val); 12054 } 12055 /* 12056 * If new data are received on a connection after the user processes 12057 * are gone, then RST the other end. 12058 */ 12059 if ((tp->t_flags & TF_CLOSED) && tlen && 12060 rack_check_data_after_close(m, tp, &tlen, th, so)) 12061 return (1); 12062 /* 12063 * If last ACK falls within this segment's sequence numbers, record 12064 * its timestamp. NOTE: 1) That the test incorporates suggestions 12065 * from the latest proposal of the tcplw@cray.com list (Braden 12066 * 1993/04/26). 2) That updating only on newer timestamps interferes 12067 * with our earlier PAWS tests, so this check should be solely 12068 * predicated on the sequence space of this segment. 3) That we 12069 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12070 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12071 * SEG.Len, This modified check allows us to overcome RFC1323's 12072 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12073 * p.869. In such cases, we can still calculate the RTT correctly 12074 * when RCV.NXT == Last.ACK.Sent. 12075 */ 12076 if ((to->to_flags & TOF_TS) != 0 && 12077 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12078 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12079 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12080 tp->ts_recent_age = tcp_ts_getticks(); 12081 tp->ts_recent = to->to_tsval; 12082 } 12083 /* 12084 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12085 * is on (half-synchronized state), then queue data for later 12086 * processing; else drop segment and return. 12087 */ 12088 if ((thflags & TH_ACK) == 0) { 12089 if (tp->t_flags & TF_NEEDSYN) { 12090 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12091 tiwin, thflags, nxt_pkt)); 12092 } else if (tp->t_flags & TF_ACKNOW) { 12093 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12094 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12095 return (ret_val); 12096 } else { 12097 ctf_do_drop(m, NULL); 12098 return (0); 12099 } 12100 } 12101 /* 12102 * case TCPS_LAST_ACK: Ack processing. 12103 */ 12104 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12105 return (ret_val); 12106 } 12107 if (ourfinisacked) { 12108 tp = tcp_close(tp); 12109 ctf_do_drop(m, tp); 12110 return (1); 12111 } 12112 if (sbavail(&so->so_snd)) { 12113 if (ctf_progress_timeout_check(tp, true)) { 12114 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12115 tp, tick, PROGRESS_DROP, __LINE__); 12116 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12117 return (1); 12118 } 12119 } 12120 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12121 tiwin, thflags, nxt_pkt)); 12122 } 12123 12124 /* 12125 * Return value of 1, the TCB is unlocked and most 12126 * likely gone, return value of 0, the TCP is still 12127 * locked. 12128 */ 12129 static int 12130 rack_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 12131 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 12132 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 12133 { 12134 int32_t ret_val = 0; 12135 int32_t ourfinisacked = 0; 12136 struct tcp_rack *rack; 12137 12138 rack = (struct tcp_rack *)tp->t_fb_ptr; 12139 ctf_calc_rwin(so, tp); 12140 12141 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 12142 if ((thflags & TH_RST) || 12143 (tp->t_fin_is_rst && (thflags & TH_FIN))) 12144 return (__ctf_process_rst(m, th, so, tp, 12145 &rack->r_ctl.challenge_ack_ts, 12146 &rack->r_ctl.challenge_ack_cnt)); 12147 /* 12148 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 12149 * synchronized state. 12150 */ 12151 if (thflags & TH_SYN) { 12152 ctf_challenge_ack(m, th, tp, iptos, &ret_val); 12153 return (ret_val); 12154 } 12155 /* 12156 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 12157 * it's less than ts_recent, drop it. 12158 */ 12159 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 12160 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 12161 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 12162 return (ret_val); 12163 } 12164 if (_ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val, 12165 &rack->r_ctl.challenge_ack_ts, 12166 &rack->r_ctl.challenge_ack_cnt)) { 12167 return (ret_val); 12168 } 12169 /* 12170 * If new data are received on a connection after the user processes 12171 * are gone, then RST the other end. 12172 */ 12173 if ((tp->t_flags & TF_CLOSED) && tlen && 12174 rack_check_data_after_close(m, tp, &tlen, th, so)) 12175 return (1); 12176 /* 12177 * If last ACK falls within this segment's sequence numbers, record 12178 * its timestamp. NOTE: 1) That the test incorporates suggestions 12179 * from the latest proposal of the tcplw@cray.com list (Braden 12180 * 1993/04/26). 2) That updating only on newer timestamps interferes 12181 * with our earlier PAWS tests, so this check should be solely 12182 * predicated on the sequence space of this segment. 3) That we 12183 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 12184 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 12185 * SEG.Len, This modified check allows us to overcome RFC1323's 12186 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 12187 * p.869. In such cases, we can still calculate the RTT correctly 12188 * when RCV.NXT == Last.ACK.Sent. 12189 */ 12190 if ((to->to_flags & TOF_TS) != 0 && 12191 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 12192 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 12193 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 12194 tp->ts_recent_age = tcp_ts_getticks(); 12195 tp->ts_recent = to->to_tsval; 12196 } 12197 /* 12198 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 12199 * is on (half-synchronized state), then queue data for later 12200 * processing; else drop segment and return. 12201 */ 12202 if ((thflags & TH_ACK) == 0) { 12203 if (tp->t_flags & TF_NEEDSYN) { 12204 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12205 tiwin, thflags, nxt_pkt)); 12206 } else if (tp->t_flags & TF_ACKNOW) { 12207 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 12208 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; 12209 return (ret_val); 12210 } else { 12211 ctf_do_drop(m, NULL); 12212 return (0); 12213 } 12214 } 12215 /* 12216 * Ack processing. 12217 */ 12218 if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 12219 return (ret_val); 12220 } 12221 if (sbavail(&so->so_snd)) { 12222 if (ctf_progress_timeout_check(tp, true)) { 12223 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 12224 tp, tick, PROGRESS_DROP, __LINE__); 12225 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 12226 return (1); 12227 } 12228 } 12229 return (rack_process_data(m, th, so, tp, drop_hdrlen, tlen, 12230 tiwin, thflags, nxt_pkt)); 12231 } 12232 12233 static void inline 12234 rack_clear_rate_sample(struct tcp_rack *rack) 12235 { 12236 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; 12237 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; 12238 rack->r_ctl.rack_rs.rs_rtt_tot = 0; 12239 } 12240 12241 static void 12242 rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack, uint32_t line, uint64_t *fill_override) 12243 { 12244 uint64_t bw_est, rate_wanted; 12245 int chged = 0; 12246 uint32_t user_max, orig_min, orig_max; 12247 12248 orig_min = rack->r_ctl.rc_pace_min_segs; 12249 orig_max = rack->r_ctl.rc_pace_max_segs; 12250 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; 12251 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) 12252 chged = 1; 12253 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); 12254 if (rack->use_fixed_rate || rack->rc_force_max_seg) { 12255 if (user_max != rack->r_ctl.rc_pace_max_segs) 12256 chged = 1; 12257 } 12258 if (rack->rc_force_max_seg) { 12259 rack->r_ctl.rc_pace_max_segs = user_max; 12260 } else if (rack->use_fixed_rate) { 12261 bw_est = rack_get_bw(rack); 12262 if ((rack->r_ctl.crte == NULL) || 12263 (bw_est != rack->r_ctl.crte->rate)) { 12264 rack->r_ctl.rc_pace_max_segs = user_max; 12265 } else { 12266 /* We are pacing right at the hardware rate */ 12267 uint32_t segsiz; 12268 12269 segsiz = min(ctf_fixed_maxseg(tp), 12270 rack->r_ctl.rc_pace_min_segs); 12271 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size( 12272 tp, bw_est, segsiz, 0, 12273 rack->r_ctl.crte, NULL); 12274 } 12275 } else if (rack->rc_always_pace) { 12276 if (rack->r_ctl.gp_bw || 12277 #ifdef NETFLIX_PEAKRATE 12278 rack->rc_tp->t_maxpeakrate || 12279 #endif 12280 rack->r_ctl.init_rate) { 12281 /* We have a rate of some sort set */ 12282 uint32_t orig; 12283 12284 bw_est = rack_get_bw(rack); 12285 orig = rack->r_ctl.rc_pace_max_segs; 12286 if (fill_override) 12287 rate_wanted = *fill_override; 12288 else 12289 rate_wanted = rack_get_output_bw(rack, bw_est, NULL, NULL); 12290 if (rate_wanted) { 12291 /* We have something */ 12292 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, 12293 rate_wanted, 12294 ctf_fixed_maxseg(rack->rc_tp)); 12295 } else 12296 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; 12297 if (orig != rack->r_ctl.rc_pace_max_segs) 12298 chged = 1; 12299 } else if ((rack->r_ctl.gp_bw == 0) && 12300 (rack->r_ctl.rc_pace_max_segs == 0)) { 12301 /* 12302 * If we have nothing limit us to bursting 12303 * out IW sized pieces. 12304 */ 12305 chged = 1; 12306 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); 12307 } 12308 } 12309 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { 12310 chged = 1; 12311 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; 12312 } 12313 if (chged) 12314 rack_log_type_pacing_sizes(tp, rack, orig_min, orig_max, line, 2); 12315 } 12316 12317 12318 static void 12319 rack_init_fsb_block(struct tcpcb *tp, struct tcp_rack *rack) 12320 { 12321 #ifdef INET6 12322 struct ip6_hdr *ip6 = NULL; 12323 #endif 12324 #ifdef INET 12325 struct ip *ip = NULL; 12326 #endif 12327 struct udphdr *udp = NULL; 12328 12329 /* Ok lets fill in the fast block, it can only be used with no IP options! */ 12330 #ifdef INET6 12331 if (rack->r_is_v6) { 12332 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 12333 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 12334 if (tp->t_port) { 12335 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12336 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 12337 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12338 udp->uh_dport = tp->t_port; 12339 rack->r_ctl.fsb.udp = udp; 12340 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12341 } else 12342 { 12343 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); 12344 rack->r_ctl.fsb.udp = NULL; 12345 } 12346 tcpip_fillheaders(rack->rc_inp, 12347 tp->t_port, 12348 ip6, rack->r_ctl.fsb.th); 12349 } else 12350 #endif /* INET6 */ 12351 #ifdef INET 12352 { 12353 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); 12354 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 12355 if (tp->t_port) { 12356 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); 12357 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 12358 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 12359 udp->uh_dport = tp->t_port; 12360 rack->r_ctl.fsb.udp = udp; 12361 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); 12362 } else 12363 { 12364 rack->r_ctl.fsb.udp = NULL; 12365 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); 12366 } 12367 tcpip_fillheaders(rack->rc_inp, 12368 tp->t_port, 12369 ip, rack->r_ctl.fsb.th); 12370 } 12371 #endif 12372 rack->r_fsb_inited = 1; 12373 } 12374 12375 static int 12376 rack_init_fsb(struct tcpcb *tp, struct tcp_rack *rack) 12377 { 12378 /* 12379 * Allocate the larger of spaces V6 if available else just 12380 * V4 and include udphdr (overbook) 12381 */ 12382 #ifdef INET6 12383 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct udphdr); 12384 #else 12385 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); 12386 #endif 12387 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, 12388 M_TCPFSB, M_NOWAIT|M_ZERO); 12389 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { 12390 return (ENOMEM); 12391 } 12392 rack->r_fsb_inited = 0; 12393 return (0); 12394 } 12395 12396 static int 12397 rack_init(struct tcpcb *tp) 12398 { 12399 struct inpcb *inp = tptoinpcb(tp); 12400 struct tcp_rack *rack = NULL; 12401 #ifdef INVARIANTS 12402 struct rack_sendmap *insret; 12403 #endif 12404 uint32_t iwin, snt, us_cts; 12405 int err; 12406 12407 tp->t_fb_ptr = uma_zalloc(rack_pcb_zone, M_NOWAIT); 12408 if (tp->t_fb_ptr == NULL) { 12409 /* 12410 * We need to allocate memory but cant. The INP and INP_INFO 12411 * locks and they are recursive (happens during setup. So a 12412 * scheme to drop the locks fails :( 12413 * 12414 */ 12415 return (ENOMEM); 12416 } 12417 memset(tp->t_fb_ptr, 0, sizeof(struct tcp_rack)); 12418 12419 rack = (struct tcp_rack *)tp->t_fb_ptr; 12420 RB_INIT(&rack->r_ctl.rc_mtree); 12421 TAILQ_INIT(&rack->r_ctl.rc_free); 12422 TAILQ_INIT(&rack->r_ctl.rc_tmap); 12423 rack->rc_tp = tp; 12424 rack->rc_inp = inp; 12425 /* Set the flag */ 12426 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; 12427 /* Probably not needed but lets be sure */ 12428 rack_clear_rate_sample(rack); 12429 /* 12430 * Save off the default values, socket options will poke 12431 * at these if pacing is not on or we have not yet 12432 * reached where pacing is on (gp_ready/fixed enabled). 12433 * When they get set into the CC module (when gp_ready 12434 * is enabled or we enable fixed) then we will set these 12435 * values into the CC and place in here the old values 12436 * so we have a restoral. Then we will set the flag 12437 * rc_pacing_cc_set. That way whenever we turn off pacing 12438 * or switch off this stack, we will know to go restore 12439 * the saved values. 12440 */ 12441 rack->r_ctl.rc_saved_beta.beta = V_newreno_beta_ecn; 12442 rack->r_ctl.rc_saved_beta.beta_ecn = V_newreno_beta_ecn; 12443 /* We want abe like behavior as well */ 12444 rack->r_ctl.rc_saved_beta.newreno_flags |= CC_NEWRENO_BETA_ECN_ENABLED; 12445 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; 12446 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; 12447 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; 12448 rack->r_ctl.roundends = tp->snd_max; 12449 if (use_rack_rr) 12450 rack->use_rack_rr = 1; 12451 if (V_tcp_delack_enabled) 12452 tp->t_delayed_ack = 1; 12453 else 12454 tp->t_delayed_ack = 0; 12455 #ifdef TCP_ACCOUNTING 12456 if (rack_tcp_accounting) { 12457 tp->t_flags2 |= TF2_TCP_ACCOUNTING; 12458 } 12459 #endif 12460 if (rack_enable_shared_cwnd) 12461 rack->rack_enable_scwnd = 1; 12462 rack->rc_user_set_max_segs = rack_hptsi_segments; 12463 rack->rc_force_max_seg = 0; 12464 if (rack_use_imac_dack) 12465 rack->rc_dack_mode = 1; 12466 TAILQ_INIT(&rack->r_ctl.opt_list); 12467 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; 12468 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; 12469 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; 12470 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; 12471 rack->r_ctl.rc_highest_us_rtt = 0; 12472 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; 12473 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); 12474 if (rack_use_cmp_acks) 12475 rack->r_use_cmp_ack = 1; 12476 if (rack_disable_prr) 12477 rack->rack_no_prr = 1; 12478 if (rack_gp_no_rec_chg) 12479 rack->rc_gp_no_rec_chg = 1; 12480 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 12481 rack->rc_always_pace = 1; 12482 if (rack->use_fixed_rate || rack->gp_ready) 12483 rack_set_cc_pacing(rack); 12484 } else 12485 rack->rc_always_pace = 0; 12486 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) 12487 rack->r_mbuf_queue = 1; 12488 else 12489 rack->r_mbuf_queue = 0; 12490 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 12491 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 12492 else 12493 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12494 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12495 if (rack_limits_scwnd) 12496 rack->r_limit_scw = 1; 12497 else 12498 rack->r_limit_scw = 0; 12499 rack->rc_labc = V_tcp_abc_l_var; 12500 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; 12501 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 12502 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; 12503 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; 12504 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; 12505 rack->r_ctl.rc_min_to = rack_min_to; 12506 microuptime(&rack->r_ctl.act_rcv_time); 12507 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; 12508 rack->rc_init_win = rack_default_init_window; 12509 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; 12510 if (rack_hw_up_only) 12511 rack->r_up_only = 1; 12512 if (rack_do_dyn_mul) { 12513 /* When dynamic adjustment is on CA needs to start at 100% */ 12514 rack->rc_gp_dyn_mul = 1; 12515 if (rack_do_dyn_mul >= 100) 12516 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 12517 } else 12518 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 12519 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; 12520 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; 12521 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); 12522 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, 12523 rack_probertt_filter_life); 12524 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 12525 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; 12526 rack->r_ctl.rc_time_of_last_probertt = us_cts; 12527 rack->r_ctl.challenge_ack_ts = tcp_ts_getticks(); 12528 rack->r_ctl.rc_time_probertt_starts = 0; 12529 if (rack_dsack_std_based & 0x1) { 12530 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 12531 rack->rc_rack_tmr_std_based = 1; 12532 } 12533 if (rack_dsack_std_based & 0x2) { 12534 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 12535 rack->rc_rack_use_dsack = 1; 12536 } 12537 /* We require at least one measurement, even if the sysctl is 0 */ 12538 if (rack_req_measurements) 12539 rack->r_ctl.req_measurements = rack_req_measurements; 12540 else 12541 rack->r_ctl.req_measurements = 1; 12542 if (rack_enable_hw_pacing) 12543 rack->rack_hdw_pace_ena = 1; 12544 if (rack_hw_rate_caps) 12545 rack->r_rack_hw_rate_caps = 1; 12546 /* Do we force on detection? */ 12547 #ifdef NETFLIX_EXP_DETECTION 12548 if (tcp_force_detection) 12549 rack->do_detection = 1; 12550 else 12551 #endif 12552 rack->do_detection = 0; 12553 if (rack_non_rxt_use_cr) 12554 rack->rack_rec_nonrxt_use_cr = 1; 12555 err = rack_init_fsb(tp, rack); 12556 if (err) { 12557 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12558 tp->t_fb_ptr = NULL; 12559 return (err); 12560 } 12561 if (tp->snd_una != tp->snd_max) { 12562 /* Create a send map for the current outstanding data */ 12563 struct rack_sendmap *rsm; 12564 12565 rsm = rack_alloc(rack); 12566 if (rsm == NULL) { 12567 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12568 tp->t_fb_ptr = NULL; 12569 return (ENOMEM); 12570 } 12571 rsm->r_no_rtt_allowed = 1; 12572 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); 12573 rsm->r_rtr_cnt = 1; 12574 rsm->r_rtr_bytes = 0; 12575 if (tp->t_flags & TF_SENTFIN) 12576 rsm->r_flags |= RACK_HAS_FIN; 12577 if ((tp->snd_una == tp->iss) && 12578 !TCPS_HAVEESTABLISHED(tp->t_state)) 12579 rsm->r_flags |= RACK_HAS_SYN; 12580 rsm->r_start = tp->snd_una; 12581 rsm->r_end = tp->snd_max; 12582 rsm->r_dupack = 0; 12583 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { 12584 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); 12585 if (rsm->m) 12586 rsm->orig_m_len = rsm->m->m_len; 12587 else 12588 rsm->orig_m_len = 0; 12589 } else { 12590 /* 12591 * This can happen if we have a stand-alone FIN or 12592 * SYN. 12593 */ 12594 rsm->m = NULL; 12595 rsm->orig_m_len = 0; 12596 rsm->soff = 0; 12597 } 12598 #ifndef INVARIANTS 12599 (void)RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12600 #else 12601 insret = RB_INSERT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12602 if (insret != NULL) { 12603 panic("Insert in rb tree fails ret:%p rack:%p rsm:%p", 12604 insret, rack, rsm); 12605 } 12606 #endif 12607 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); 12608 rsm->r_in_tmap = 1; 12609 } 12610 /* 12611 * Timers in Rack are kept in microseconds so lets 12612 * convert any initial incoming variables 12613 * from ticks into usecs. Note that we 12614 * also change the values of t_srtt and t_rttvar, if 12615 * they are non-zero. They are kept with a 5 12616 * bit decimal so we have to carefully convert 12617 * these to get the full precision. 12618 */ 12619 rack_convert_rtts(tp); 12620 tp->t_rttlow = TICKS_2_USEC(tp->t_rttlow); 12621 if (rack_do_hystart) { 12622 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 12623 if (rack_do_hystart > 1) 12624 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 12625 if (rack_do_hystart > 2) 12626 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 12627 } 12628 if (rack_def_profile) 12629 rack_set_profile(rack, rack_def_profile); 12630 /* Cancel the GP measurement in progress */ 12631 tp->t_flags &= ~TF_GPUTINPROG; 12632 if (SEQ_GT(tp->snd_max, tp->iss)) 12633 snt = tp->snd_max - tp->iss; 12634 else 12635 snt = 0; 12636 iwin = rc_init_window(rack); 12637 if (snt < iwin) { 12638 /* We are not past the initial window 12639 * so we need to make sure cwnd is 12640 * correct. 12641 */ 12642 if (tp->snd_cwnd < iwin) 12643 tp->snd_cwnd = iwin; 12644 /* 12645 * If we are within the initial window 12646 * we want ssthresh to be unlimited. Setting 12647 * it to the rwnd (which the default stack does 12648 * and older racks) is not really a good idea 12649 * since we want to be in SS and grow both the 12650 * cwnd and the rwnd (via dynamic rwnd growth). If 12651 * we set it to the rwnd then as the peer grows its 12652 * rwnd we will be stuck in CA and never hit SS. 12653 * 12654 * Its far better to raise it up high (this takes the 12655 * risk that there as been a loss already, probably 12656 * we should have an indicator in all stacks of loss 12657 * but we don't), but considering the normal use this 12658 * is a risk worth taking. The consequences of not 12659 * hitting SS are far worse than going one more time 12660 * into it early on (before we have sent even a IW). 12661 * It is highly unlikely that we will have had a loss 12662 * before getting the IW out. 12663 */ 12664 tp->snd_ssthresh = 0xffffffff; 12665 } 12666 rack_stop_all_timers(tp); 12667 /* Lets setup the fsb block */ 12668 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 12669 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, 12670 __LINE__, RACK_RTTS_INIT); 12671 return (0); 12672 } 12673 12674 static int 12675 rack_handoff_ok(struct tcpcb *tp) 12676 { 12677 if ((tp->t_state == TCPS_CLOSED) || 12678 (tp->t_state == TCPS_LISTEN)) { 12679 /* Sure no problem though it may not stick */ 12680 return (0); 12681 } 12682 if ((tp->t_state == TCPS_SYN_SENT) || 12683 (tp->t_state == TCPS_SYN_RECEIVED)) { 12684 /* 12685 * We really don't know if you support sack, 12686 * you have to get to ESTAB or beyond to tell. 12687 */ 12688 return (EAGAIN); 12689 } 12690 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { 12691 /* 12692 * Rack will only send a FIN after all data is acknowledged. 12693 * So in this case we have more data outstanding. We can't 12694 * switch stacks until either all data and only the FIN 12695 * is left (in which case rack_init() now knows how 12696 * to deal with that) <or> all is acknowledged and we 12697 * are only left with incoming data, though why you 12698 * would want to switch to rack after all data is acknowledged 12699 * I have no idea (rrs)! 12700 */ 12701 return (EAGAIN); 12702 } 12703 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ 12704 return (0); 12705 } 12706 /* 12707 * If we reach here we don't do SACK on this connection so we can 12708 * never do rack. 12709 */ 12710 return (EINVAL); 12711 } 12712 12713 12714 static void 12715 rack_fini(struct tcpcb *tp, int32_t tcb_is_purged) 12716 { 12717 struct inpcb *inp = tptoinpcb(tp); 12718 12719 if (tp->t_fb_ptr) { 12720 struct tcp_rack *rack; 12721 struct rack_sendmap *rsm, *nrsm; 12722 #ifdef INVARIANTS 12723 struct rack_sendmap *rm; 12724 #endif 12725 12726 rack = (struct tcp_rack *)tp->t_fb_ptr; 12727 if (tp->t_in_pkt) { 12728 /* 12729 * It is unsafe to process the packets since a 12730 * reset may be lurking in them (its rare but it 12731 * can occur). If we were to find a RST, then we 12732 * would end up dropping the connection and the 12733 * INP lock, so when we return the caller (tcp_usrreq) 12734 * will blow up when it trys to unlock the inp. 12735 */ 12736 struct mbuf *save, *m; 12737 12738 m = tp->t_in_pkt; 12739 tp->t_in_pkt = NULL; 12740 tp->t_tail_pkt = NULL; 12741 while (m) { 12742 save = m->m_nextpkt; 12743 m->m_nextpkt = NULL; 12744 m_freem(m); 12745 m = save; 12746 } 12747 } 12748 tp->t_flags &= ~TF_FORCEDATA; 12749 #ifdef NETFLIX_SHARED_CWND 12750 if (rack->r_ctl.rc_scw) { 12751 uint32_t limit; 12752 12753 if (rack->r_limit_scw) 12754 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); 12755 else 12756 limit = 0; 12757 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, 12758 rack->r_ctl.rc_scw_index, 12759 limit); 12760 rack->r_ctl.rc_scw = NULL; 12761 } 12762 #endif 12763 if (rack->r_ctl.fsb.tcp_ip_hdr) { 12764 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); 12765 rack->r_ctl.fsb.tcp_ip_hdr = NULL; 12766 rack->r_ctl.fsb.th = NULL; 12767 } 12768 /* Convert back to ticks, with */ 12769 if (tp->t_srtt > 1) { 12770 uint32_t val, frac; 12771 12772 val = USEC_2_TICKS(tp->t_srtt); 12773 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12774 tp->t_srtt = val << TCP_RTT_SHIFT; 12775 /* 12776 * frac is the fractional part here is left 12777 * over from converting to hz and shifting. 12778 * We need to convert this to the 5 bit 12779 * remainder. 12780 */ 12781 if (frac) { 12782 if (hz == 1000) { 12783 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12784 } else { 12785 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12786 } 12787 tp->t_srtt += frac; 12788 } 12789 } 12790 if (tp->t_rttvar) { 12791 uint32_t val, frac; 12792 12793 val = USEC_2_TICKS(tp->t_rttvar); 12794 frac = tp->t_srtt % (HPTS_USEC_IN_SEC / hz); 12795 tp->t_rttvar = val << TCP_RTTVAR_SHIFT; 12796 /* 12797 * frac is the fractional part here is left 12798 * over from converting to hz and shifting. 12799 * We need to convert this to the 5 bit 12800 * remainder. 12801 */ 12802 if (frac) { 12803 if (hz == 1000) { 12804 frac = (((uint64_t)frac * (uint64_t)TCP_RTT_SCALE) / (uint64_t)HPTS_USEC_IN_MSEC); 12805 } else { 12806 frac = (((uint64_t)frac * (uint64_t)(hz) * (uint64_t)TCP_RTT_SCALE) /(uint64_t)HPTS_USEC_IN_SEC); 12807 } 12808 tp->t_rttvar += frac; 12809 } 12810 } 12811 tp->t_rxtcur = USEC_2_TICKS(tp->t_rxtcur); 12812 tp->t_rttlow = USEC_2_TICKS(tp->t_rttlow); 12813 if (rack->rc_always_pace) { 12814 tcp_decrement_paced_conn(); 12815 rack_undo_cc_pacing(rack); 12816 rack->rc_always_pace = 0; 12817 } 12818 /* Clean up any options if they were not applied */ 12819 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { 12820 struct deferred_opt_list *dol; 12821 12822 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); 12823 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 12824 free(dol, M_TCPDO); 12825 } 12826 /* rack does not use force data but other stacks may clear it */ 12827 if (rack->r_ctl.crte != NULL) { 12828 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 12829 rack->rack_hdrw_pacing = 0; 12830 rack->r_ctl.crte = NULL; 12831 } 12832 #ifdef TCP_BLACKBOX 12833 tcp_log_flowend(tp); 12834 #endif 12835 RB_FOREACH_SAFE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree, nrsm) { 12836 #ifndef INVARIANTS 12837 (void)RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12838 #else 12839 rm = RB_REMOVE(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm); 12840 if (rm != rsm) { 12841 panic("At fini, rack:%p rsm:%p rm:%p", 12842 rack, rsm, rm); 12843 } 12844 #endif 12845 uma_zfree(rack_zone, rsm); 12846 } 12847 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12848 while (rsm) { 12849 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); 12850 uma_zfree(rack_zone, rsm); 12851 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); 12852 } 12853 rack->rc_free_cnt = 0; 12854 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); 12855 tp->t_fb_ptr = NULL; 12856 } 12857 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 12858 inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12859 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 12860 inp->inp_flags2 &= ~INP_MBUF_ACKCMP; 12861 /* Cancel the GP measurement in progress */ 12862 tp->t_flags &= ~TF_GPUTINPROG; 12863 inp->inp_flags2 &= ~INP_MBUF_L_ACKS; 12864 /* Make sure snd_nxt is correctly set */ 12865 tp->snd_nxt = tp->snd_max; 12866 } 12867 12868 static void 12869 rack_set_state(struct tcpcb *tp, struct tcp_rack *rack) 12870 { 12871 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { 12872 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; 12873 } 12874 switch (tp->t_state) { 12875 case TCPS_SYN_SENT: 12876 rack->r_state = TCPS_SYN_SENT; 12877 rack->r_substate = rack_do_syn_sent; 12878 break; 12879 case TCPS_SYN_RECEIVED: 12880 rack->r_state = TCPS_SYN_RECEIVED; 12881 rack->r_substate = rack_do_syn_recv; 12882 break; 12883 case TCPS_ESTABLISHED: 12884 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12885 rack->r_state = TCPS_ESTABLISHED; 12886 rack->r_substate = rack_do_established; 12887 break; 12888 case TCPS_CLOSE_WAIT: 12889 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12890 rack->r_state = TCPS_CLOSE_WAIT; 12891 rack->r_substate = rack_do_close_wait; 12892 break; 12893 case TCPS_FIN_WAIT_1: 12894 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12895 rack->r_state = TCPS_FIN_WAIT_1; 12896 rack->r_substate = rack_do_fin_wait_1; 12897 break; 12898 case TCPS_CLOSING: 12899 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12900 rack->r_state = TCPS_CLOSING; 12901 rack->r_substate = rack_do_closing; 12902 break; 12903 case TCPS_LAST_ACK: 12904 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12905 rack->r_state = TCPS_LAST_ACK; 12906 rack->r_substate = rack_do_lastack; 12907 break; 12908 case TCPS_FIN_WAIT_2: 12909 rack_set_pace_segments(tp, rack, __LINE__, NULL); 12910 rack->r_state = TCPS_FIN_WAIT_2; 12911 rack->r_substate = rack_do_fin_wait_2; 12912 break; 12913 case TCPS_LISTEN: 12914 case TCPS_CLOSED: 12915 case TCPS_TIME_WAIT: 12916 default: 12917 break; 12918 }; 12919 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 12920 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 12921 12922 } 12923 12924 static void 12925 rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb) 12926 { 12927 /* 12928 * We received an ack, and then did not 12929 * call send or were bounced out due to the 12930 * hpts was running. Now a timer is up as well, is 12931 * it the right timer? 12932 */ 12933 struct rack_sendmap *rsm; 12934 int tmr_up; 12935 12936 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 12937 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 12938 return; 12939 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 12940 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 12941 (tmr_up == PACE_TMR_RXT)) { 12942 /* Should be an RXT */ 12943 return; 12944 } 12945 if (rsm == NULL) { 12946 /* Nothing outstanding? */ 12947 if (tp->t_flags & TF_DELACK) { 12948 if (tmr_up == PACE_TMR_DELACK) 12949 /* We are supposed to have delayed ack up and we do */ 12950 return; 12951 } else if (sbavail(&tptosocket(tp)->so_snd) && (tmr_up == PACE_TMR_RXT)) { 12952 /* 12953 * if we hit enobufs then we would expect the possibility 12954 * of nothing outstanding and the RXT up (and the hptsi timer). 12955 */ 12956 return; 12957 } else if (((V_tcp_always_keepalive || 12958 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 12959 (tp->t_state <= TCPS_CLOSING)) && 12960 (tmr_up == PACE_TMR_KEEP) && 12961 (tp->snd_max == tp->snd_una)) { 12962 /* We should have keep alive up and we do */ 12963 return; 12964 } 12965 } 12966 if (SEQ_GT(tp->snd_max, tp->snd_una) && 12967 ((tmr_up == PACE_TMR_TLP) || 12968 (tmr_up == PACE_TMR_RACK) || 12969 (tmr_up == PACE_TMR_RXT))) { 12970 /* 12971 * Either a Rack, TLP or RXT is fine if we 12972 * have outstanding data. 12973 */ 12974 return; 12975 } else if (tmr_up == PACE_TMR_DELACK) { 12976 /* 12977 * If the delayed ack was going to go off 12978 * before the rtx/tlp/rack timer were going to 12979 * expire, then that would be the timer in control. 12980 * Note we don't check the time here trusting the 12981 * code is correct. 12982 */ 12983 return; 12984 } 12985 /* 12986 * Ok the timer originally started is not what we want now. 12987 * We will force the hpts to be stopped if any, and restart 12988 * with the slot set to what was in the saved slot. 12989 */ 12990 if (tcp_in_hpts(rack->rc_inp)) { 12991 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 12992 uint32_t us_cts; 12993 12994 us_cts = tcp_get_usecs(NULL); 12995 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 12996 rack->r_early = 1; 12997 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 12998 } 12999 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 13000 } 13001 tcp_hpts_remove(rack->rc_inp); 13002 } 13003 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13004 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 13005 } 13006 13007 13008 static void 13009 rack_do_win_updates(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tiwin, uint32_t seq, uint32_t ack, uint32_t cts, uint32_t high_seq) 13010 { 13011 if ((SEQ_LT(tp->snd_wl1, seq) || 13012 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || 13013 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { 13014 /* keep track of pure window updates */ 13015 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) 13016 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 13017 tp->snd_wnd = tiwin; 13018 rack_validate_fo_sendwin_up(tp, rack); 13019 tp->snd_wl1 = seq; 13020 tp->snd_wl2 = ack; 13021 if (tp->snd_wnd > tp->max_sndwnd) 13022 tp->max_sndwnd = tp->snd_wnd; 13023 rack->r_wanted_output = 1; 13024 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { 13025 tp->snd_wnd = tiwin; 13026 rack_validate_fo_sendwin_up(tp, rack); 13027 tp->snd_wl1 = seq; 13028 tp->snd_wl2 = ack; 13029 } else { 13030 /* Not a valid win update */ 13031 return; 13032 } 13033 /* Do we exit persists? */ 13034 if ((rack->rc_in_persist != 0) && 13035 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), 13036 rack->r_ctl.rc_pace_min_segs))) { 13037 rack_exit_persist(tp, rack, cts); 13038 } 13039 /* Do we enter persists? */ 13040 if ((rack->rc_in_persist == 0) && 13041 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && 13042 TCPS_HAVEESTABLISHED(tp->t_state) && 13043 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && 13044 sbavail(&tptosocket(tp)->so_snd) && 13045 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { 13046 /* 13047 * Here the rwnd is less than 13048 * the pacing size, we are established, 13049 * nothing is outstanding, and there is 13050 * data to send. Enter persists. 13051 */ 13052 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 13053 } 13054 } 13055 13056 static void 13057 rack_log_input_packet(struct tcpcb *tp, struct tcp_rack *rack, struct tcp_ackent *ae, int ackval, uint32_t high_seq) 13058 { 13059 13060 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13061 struct inpcb *inp = tptoinpcb(tp); 13062 union tcp_log_stackspecific log; 13063 struct timeval ltv; 13064 char tcp_hdr_buf[60]; 13065 struct tcphdr *th; 13066 struct timespec ts; 13067 uint32_t orig_snd_una; 13068 uint8_t xx = 0; 13069 13070 #ifdef NETFLIX_HTTP_LOGGING 13071 struct http_sendfile_track *http_req; 13072 13073 if (SEQ_GT(ae->ack, tp->snd_una)) { 13074 http_req = tcp_http_find_req_for_seq(tp, (ae->ack-1)); 13075 } else { 13076 http_req = tcp_http_find_req_for_seq(tp, ae->ack); 13077 } 13078 #endif 13079 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13080 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 13081 if (rack->rack_no_prr == 0) 13082 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 13083 else 13084 log.u_bbr.flex1 = 0; 13085 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 13086 log.u_bbr.use_lt_bw <<= 1; 13087 log.u_bbr.use_lt_bw |= rack->r_might_revert; 13088 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 13089 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 13090 log.u_bbr.pkts_out = tp->t_maxseg; 13091 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 13092 log.u_bbr.flex7 = 1; 13093 log.u_bbr.lost = ae->flags; 13094 log.u_bbr.cwnd_gain = ackval; 13095 log.u_bbr.pacing_gain = 0x2; 13096 if (ae->flags & TSTMP_HDWR) { 13097 /* Record the hardware timestamp if present */ 13098 log.u_bbr.flex3 = M_TSTMP; 13099 ts.tv_sec = ae->timestamp / 1000000000; 13100 ts.tv_nsec = ae->timestamp % 1000000000; 13101 ltv.tv_sec = ts.tv_sec; 13102 ltv.tv_usec = ts.tv_nsec / 1000; 13103 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 13104 } else if (ae->flags & TSTMP_LRO) { 13105 /* Record the LRO the arrival timestamp */ 13106 log.u_bbr.flex3 = M_TSTMP_LRO; 13107 ts.tv_sec = ae->timestamp / 1000000000; 13108 ts.tv_nsec = ae->timestamp % 1000000000; 13109 ltv.tv_sec = ts.tv_sec; 13110 ltv.tv_usec = ts.tv_nsec / 1000; 13111 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 13112 } 13113 log.u_bbr.timeStamp = tcp_get_usecs(<v); 13114 /* Log the rcv time */ 13115 log.u_bbr.delRate = ae->timestamp; 13116 #ifdef NETFLIX_HTTP_LOGGING 13117 log.u_bbr.applimited = tp->t_http_closed; 13118 log.u_bbr.applimited <<= 8; 13119 log.u_bbr.applimited |= tp->t_http_open; 13120 log.u_bbr.applimited <<= 8; 13121 log.u_bbr.applimited |= tp->t_http_req; 13122 if (http_req) { 13123 /* Copy out any client req info */ 13124 /* seconds */ 13125 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 13126 /* useconds */ 13127 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 13128 log.u_bbr.rttProp = http_req->timestamp; 13129 log.u_bbr.cur_del_rate = http_req->start; 13130 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 13131 log.u_bbr.flex8 |= 1; 13132 } else { 13133 log.u_bbr.flex8 |= 2; 13134 log.u_bbr.bw_inuse = http_req->end; 13135 } 13136 log.u_bbr.flex6 = http_req->start_seq; 13137 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 13138 log.u_bbr.flex8 |= 4; 13139 log.u_bbr.epoch = http_req->end_seq; 13140 } 13141 } 13142 #endif 13143 memset(tcp_hdr_buf, 0, sizeof(tcp_hdr_buf)); 13144 th = (struct tcphdr *)tcp_hdr_buf; 13145 th->th_seq = ae->seq; 13146 th->th_ack = ae->ack; 13147 th->th_win = ae->win; 13148 /* Now fill in the ports */ 13149 th->th_sport = inp->inp_fport; 13150 th->th_dport = inp->inp_lport; 13151 tcp_set_flags(th, ae->flags); 13152 /* Now do we have a timestamp option? */ 13153 if (ae->flags & HAS_TSTMP) { 13154 u_char *cp; 13155 uint32_t val; 13156 13157 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); 13158 cp = (u_char *)(th + 1); 13159 *cp = TCPOPT_NOP; 13160 cp++; 13161 *cp = TCPOPT_NOP; 13162 cp++; 13163 *cp = TCPOPT_TIMESTAMP; 13164 cp++; 13165 *cp = TCPOLEN_TIMESTAMP; 13166 cp++; 13167 val = htonl(ae->ts_value); 13168 bcopy((char *)&val, 13169 (char *)cp, sizeof(uint32_t)); 13170 val = htonl(ae->ts_echo); 13171 bcopy((char *)&val, 13172 (char *)(cp + 4), sizeof(uint32_t)); 13173 } else 13174 th->th_off = (sizeof(struct tcphdr) >> 2); 13175 13176 /* 13177 * For sane logging we need to play a little trick. 13178 * If the ack were fully processed we would have moved 13179 * snd_una to high_seq, but since compressed acks are 13180 * processed in two phases, at this point (logging) snd_una 13181 * won't be advanced. So we would see multiple acks showing 13182 * the advancement. We can prevent that by "pretending" that 13183 * snd_una was advanced and then un-advancing it so that the 13184 * logging code has the right value for tlb_snd_una. 13185 */ 13186 if (tp->snd_una != high_seq) { 13187 orig_snd_una = tp->snd_una; 13188 tp->snd_una = high_seq; 13189 xx = 1; 13190 } else 13191 xx = 0; 13192 TCP_LOG_EVENTP(tp, th, 13193 &tptosocket(tp)->so_rcv, 13194 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, 13195 0, &log, true, <v); 13196 if (xx) { 13197 tp->snd_una = orig_snd_una; 13198 } 13199 } 13200 13201 } 13202 13203 static void 13204 rack_handle_probe_response(struct tcp_rack *rack, uint32_t tiwin, uint32_t us_cts) 13205 { 13206 uint32_t us_rtt; 13207 /* 13208 * A persist or keep-alive was forced out, update our 13209 * min rtt time. Note now worry about lost responses. 13210 * When a subsequent keep-alive or persist times out 13211 * and forced_ack is still on, then the last probe 13212 * was not responded to. In such cases we have a 13213 * sysctl that controls the behavior. Either we apply 13214 * the rtt but with reduced confidence (0). Or we just 13215 * plain don't apply the rtt estimate. Having data flow 13216 * will clear the probe_not_answered flag i.e. cum-ack 13217 * move forward <or> exiting and reentering persists. 13218 */ 13219 13220 rack->forced_ack = 0; 13221 rack->rc_tp->t_rxtshift = 0; 13222 if ((rack->rc_in_persist && 13223 (tiwin == rack->rc_tp->snd_wnd)) || 13224 (rack->rc_in_persist == 0)) { 13225 /* 13226 * In persists only apply the RTT update if this is 13227 * a response to our window probe. And that 13228 * means the rwnd sent must match the current 13229 * snd_wnd. If it does not, then we got a 13230 * window update ack instead. For keepalive 13231 * we allow the answer no matter what the window. 13232 * 13233 * Note that if the probe_not_answered is set then 13234 * the forced_ack_ts is the oldest one i.e. the first 13235 * probe sent that might have been lost. This assures 13236 * us that if we do calculate an RTT it is longer not 13237 * some short thing. 13238 */ 13239 if (rack->rc_in_persist) 13240 counter_u64_add(rack_persists_acks, 1); 13241 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; 13242 if (us_rtt == 0) 13243 us_rtt = 1; 13244 if (rack->probe_not_answered == 0) { 13245 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13246 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 3, NULL, 1); 13247 } else { 13248 /* We have a retransmitted probe here too */ 13249 if (rack_apply_rtt_with_reduced_conf) { 13250 rack_apply_updated_usrtt(rack, us_rtt, us_cts); 13251 tcp_rack_xmit_timer(rack, us_rtt, 0, us_rtt, 0, NULL, 1); 13252 } 13253 } 13254 } 13255 } 13256 13257 static int 13258 rack_do_compressed_ack_processing(struct tcpcb *tp, struct socket *so, struct mbuf *m, int nxt_pkt, struct timeval *tv) 13259 { 13260 /* 13261 * Handle a "special" compressed ack mbuf. Each incoming 13262 * ack has only four possible dispositions: 13263 * 13264 * A) It moves the cum-ack forward 13265 * B) It is behind the cum-ack. 13266 * C) It is a window-update ack. 13267 * D) It is a dup-ack. 13268 * 13269 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES 13270 * in the incoming mbuf. We also need to still pay attention 13271 * to nxt_pkt since there may be another packet after this 13272 * one. 13273 */ 13274 #ifdef TCP_ACCOUNTING 13275 uint64_t ts_val; 13276 uint64_t rdstc; 13277 #endif 13278 int segsiz; 13279 struct timespec ts; 13280 struct tcp_rack *rack; 13281 struct tcp_ackent *ae; 13282 uint32_t tiwin, ms_cts, cts, acked, acked_amount, high_seq, win_seq, the_win, win_upd_ack; 13283 int cnt, i, did_out, ourfinisacked = 0; 13284 struct tcpopt to_holder, *to = NULL; 13285 #ifdef TCP_ACCOUNTING 13286 int win_up_req = 0; 13287 #endif 13288 int nsegs = 0; 13289 int under_pacing = 1; 13290 int recovery = 0; 13291 #ifdef TCP_ACCOUNTING 13292 sched_pin(); 13293 #endif 13294 rack = (struct tcp_rack *)tp->t_fb_ptr; 13295 if (rack->gp_ready && 13296 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) 13297 under_pacing = 0; 13298 else 13299 under_pacing = 1; 13300 13301 if (rack->r_state != tp->t_state) 13302 rack_set_state(tp, rack); 13303 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13304 (tp->t_flags & TF_GPUTINPROG)) { 13305 /* 13306 * We have a goodput in progress 13307 * and we have entered a late state. 13308 * Do we have enough data in the sb 13309 * to handle the GPUT request? 13310 */ 13311 uint32_t bytes; 13312 13313 bytes = tp->gput_ack - tp->gput_seq; 13314 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 13315 bytes += tp->gput_seq - tp->snd_una; 13316 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 13317 /* 13318 * There are not enough bytes in the socket 13319 * buffer that have been sent to cover this 13320 * measurement. Cancel it. 13321 */ 13322 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 13323 rack->r_ctl.rc_gp_srtt /*flex1*/, 13324 tp->gput_seq, 13325 0, 0, 18, __LINE__, NULL, 0); 13326 tp->t_flags &= ~TF_GPUTINPROG; 13327 } 13328 } 13329 to = &to_holder; 13330 to->to_flags = 0; 13331 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), 13332 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); 13333 cnt = m->m_len / sizeof(struct tcp_ackent); 13334 counter_u64_add(rack_multi_single_eq, cnt); 13335 high_seq = tp->snd_una; 13336 the_win = tp->snd_wnd; 13337 win_seq = tp->snd_wl1; 13338 win_upd_ack = tp->snd_wl2; 13339 cts = tcp_tv_to_usectick(tv); 13340 ms_cts = tcp_tv_to_mssectick(tv); 13341 rack->r_ctl.rc_rcvtime = cts; 13342 segsiz = ctf_fixed_maxseg(tp); 13343 if ((rack->rc_gp_dyn_mul) && 13344 (rack->use_fixed_rate == 0) && 13345 (rack->rc_always_pace)) { 13346 /* Check in on probertt */ 13347 rack_check_probe_rtt(rack, cts); 13348 } 13349 for (i = 0; i < cnt; i++) { 13350 #ifdef TCP_ACCOUNTING 13351 ts_val = get_cyclecount(); 13352 #endif 13353 rack_clear_rate_sample(rack); 13354 ae = ((mtod(m, struct tcp_ackent *)) + i); 13355 /* Setup the window */ 13356 tiwin = ae->win << tp->snd_scale; 13357 if (tiwin > rack->r_ctl.rc_high_rwnd) 13358 rack->r_ctl.rc_high_rwnd = tiwin; 13359 /* figure out the type of ack */ 13360 if (SEQ_LT(ae->ack, high_seq)) { 13361 /* Case B*/ 13362 ae->ack_val_set = ACK_BEHIND; 13363 } else if (SEQ_GT(ae->ack, high_seq)) { 13364 /* Case A */ 13365 ae->ack_val_set = ACK_CUMACK; 13366 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ 13367 /* Case D */ 13368 ae->ack_val_set = ACK_DUPACK; 13369 } else { 13370 /* Case C */ 13371 ae->ack_val_set = ACK_RWND; 13372 } 13373 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); 13374 /* Validate timestamp */ 13375 if (ae->flags & HAS_TSTMP) { 13376 /* Setup for a timestamp */ 13377 to->to_flags = TOF_TS; 13378 ae->ts_echo -= tp->ts_offset; 13379 to->to_tsecr = ae->ts_echo; 13380 to->to_tsval = ae->ts_value; 13381 /* 13382 * If echoed timestamp is later than the current time, fall back to 13383 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 13384 * were used when this connection was established. 13385 */ 13386 if (TSTMP_GT(ae->ts_echo, ms_cts)) 13387 to->to_tsecr = 0; 13388 if (tp->ts_recent && 13389 TSTMP_LT(ae->ts_value, tp->ts_recent)) { 13390 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { 13391 #ifdef TCP_ACCOUNTING 13392 rdstc = get_cyclecount(); 13393 if (rdstc > ts_val) { 13394 counter_u64_add(tcp_proc_time[ae->ack_val_set] , 13395 (rdstc - ts_val)); 13396 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13397 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13398 } 13399 } 13400 #endif 13401 continue; 13402 } 13403 } 13404 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && 13405 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { 13406 tp->ts_recent_age = tcp_ts_getticks(); 13407 tp->ts_recent = ae->ts_value; 13408 } 13409 } else { 13410 /* Setup for a no options */ 13411 to->to_flags = 0; 13412 } 13413 /* Update the rcv time and perform idle reduction possibly */ 13414 if (tp->t_idle_reduce && 13415 (tp->snd_max == tp->snd_una) && 13416 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 13417 counter_u64_add(rack_input_idle_reduces, 1); 13418 rack_cc_after_idle(rack, tp); 13419 } 13420 tp->t_rcvtime = ticks; 13421 /* Now what about ECN of a chain of pure ACKs? */ 13422 if (tcp_ecn_input_segment(tp, ae->flags, 0, 13423 tcp_packets_this_ack(tp, ae->ack), 13424 ae->codepoint)) 13425 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); 13426 #ifdef TCP_ACCOUNTING 13427 /* Count for the specific type of ack in */ 13428 counter_u64_add(tcp_cnt_counters[ae->ack_val_set], 1); 13429 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13430 tp->tcp_cnt_counters[ae->ack_val_set]++; 13431 } 13432 #endif 13433 /* 13434 * Note how we could move up these in the determination 13435 * above, but we don't so that way the timestamp checks (and ECN) 13436 * is done first before we do any processing on the ACK. 13437 * The non-compressed path through the code has this 13438 * weakness (noted by @jtl) that it actually does some 13439 * processing before verifying the timestamp information. 13440 * We don't take that path here which is why we set 13441 * the ack_val_set first, do the timestamp and ecn 13442 * processing, and then look at what we have setup. 13443 */ 13444 if (ae->ack_val_set == ACK_BEHIND) { 13445 /* 13446 * Case B flag reordering, if window is not closed 13447 * or it could be a keep-alive or persists 13448 */ 13449 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { 13450 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 13451 } 13452 } else if (ae->ack_val_set == ACK_DUPACK) { 13453 /* Case D */ 13454 rack_strike_dupack(rack); 13455 } else if (ae->ack_val_set == ACK_RWND) { 13456 /* Case C */ 13457 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13458 ts.tv_sec = ae->timestamp / 1000000000; 13459 ts.tv_nsec = ae->timestamp % 1000000000; 13460 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13461 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13462 } else { 13463 rack->r_ctl.act_rcv_time = *tv; 13464 } 13465 if (rack->forced_ack) { 13466 rack_handle_probe_response(rack, tiwin, 13467 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); 13468 } 13469 #ifdef TCP_ACCOUNTING 13470 win_up_req = 1; 13471 #endif 13472 win_upd_ack = ae->ack; 13473 win_seq = ae->seq; 13474 the_win = tiwin; 13475 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13476 } else { 13477 /* Case A */ 13478 if (SEQ_GT(ae->ack, tp->snd_max)) { 13479 /* 13480 * We just send an ack since the incoming 13481 * ack is beyond the largest seq we sent. 13482 */ 13483 if ((tp->t_flags & TF_ACKNOW) == 0) { 13484 ctf_ack_war_checks(tp, &rack->r_ctl.challenge_ack_ts, &rack->r_ctl.challenge_ack_cnt); 13485 if (tp->t_flags && TF_ACKNOW) 13486 rack->r_wanted_output = 1; 13487 } 13488 } else { 13489 nsegs++; 13490 /* If the window changed setup to update */ 13491 if (tiwin != tp->snd_wnd) { 13492 win_upd_ack = ae->ack; 13493 win_seq = ae->seq; 13494 the_win = tiwin; 13495 rack_do_win_updates(tp, rack, the_win, win_seq, win_upd_ack, cts, high_seq); 13496 } 13497 #ifdef TCP_ACCOUNTING 13498 /* Account for the acks */ 13499 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13500 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); 13501 } 13502 counter_u64_add(tcp_cnt_counters[CNT_OF_ACKS_IN], 13503 (((ae->ack - high_seq) + segsiz - 1) / segsiz)); 13504 #endif 13505 high_seq = ae->ack; 13506 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 13507 union tcp_log_stackspecific log; 13508 struct timeval tv; 13509 13510 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 13511 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 13512 log.u_bbr.flex1 = high_seq; 13513 log.u_bbr.flex2 = rack->r_ctl.roundends; 13514 log.u_bbr.flex3 = rack->r_ctl.current_round; 13515 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 13516 log.u_bbr.flex8 = 8; 13517 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 13518 0, &log, false, NULL, NULL, 0, &tv); 13519 } 13520 /* 13521 * The draft (v3) calls for us to use SEQ_GEQ, but that 13522 * causes issues when we are just going app limited. Lets 13523 * instead use SEQ_GT <or> where its equal but more data 13524 * is outstanding. 13525 */ 13526 if ((SEQ_GT(high_seq, rack->r_ctl.roundends)) || 13527 ((high_seq == rack->r_ctl.roundends) && 13528 SEQ_GT(tp->snd_max, tp->snd_una))) { 13529 rack->r_ctl.current_round++; 13530 rack->r_ctl.roundends = tp->snd_max; 13531 if (CC_ALGO(tp)->newround != NULL) { 13532 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 13533 } 13534 } 13535 /* Setup our act_rcv_time */ 13536 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { 13537 ts.tv_sec = ae->timestamp / 1000000000; 13538 ts.tv_nsec = ae->timestamp % 1000000000; 13539 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 13540 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 13541 } else { 13542 rack->r_ctl.act_rcv_time = *tv; 13543 } 13544 rack_process_to_cumack(tp, rack, ae->ack, cts, to); 13545 if (rack->rc_dsack_round_seen) { 13546 /* Is the dsack round over? */ 13547 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { 13548 /* Yes it is */ 13549 rack->rc_dsack_round_seen = 0; 13550 rack_log_dsack_event(rack, 3, __LINE__, 0, 0); 13551 } 13552 } 13553 } 13554 } 13555 /* And lets be sure to commit the rtt measurements for this ack */ 13556 tcp_rack_xmit_timer_commit(rack, tp); 13557 #ifdef TCP_ACCOUNTING 13558 rdstc = get_cyclecount(); 13559 if (rdstc > ts_val) { 13560 counter_u64_add(tcp_proc_time[ae->ack_val_set] , (rdstc - ts_val)); 13561 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13562 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); 13563 if (ae->ack_val_set == ACK_CUMACK) 13564 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); 13565 } 13566 } 13567 #endif 13568 } 13569 #ifdef TCP_ACCOUNTING 13570 ts_val = get_cyclecount(); 13571 #endif 13572 /* Tend to any collapsed window */ 13573 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { 13574 /* The peer collapsed the window */ 13575 rack_collapsed_window(rack, (tp->snd_max - high_seq), __LINE__); 13576 } else if (rack->rc_has_collapsed) 13577 rack_un_collapse_window(rack, __LINE__); 13578 if ((rack->r_collapse_point_valid) && 13579 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) 13580 rack->r_collapse_point_valid = 0; 13581 acked_amount = acked = (high_seq - tp->snd_una); 13582 if (acked) { 13583 /* 13584 * Clear the probe not answered flag 13585 * since cum-ack moved forward. 13586 */ 13587 rack->probe_not_answered = 0; 13588 if (rack->sack_attack_disable == 0) 13589 rack_do_decay(rack); 13590 if (acked >= segsiz) { 13591 /* 13592 * You only get credit for 13593 * MSS and greater (and you get extra 13594 * credit for larger cum-ack moves). 13595 */ 13596 int ac; 13597 13598 ac = acked / segsiz; 13599 rack->r_ctl.ack_count += ac; 13600 counter_u64_add(rack_ack_total, ac); 13601 } 13602 if (rack->r_ctl.ack_count > 0xfff00000) { 13603 /* 13604 * reduce the number to keep us under 13605 * a uint32_t. 13606 */ 13607 rack->r_ctl.ack_count /= 2; 13608 rack->r_ctl.sack_count /= 2; 13609 } 13610 if (tp->t_flags & TF_NEEDSYN) { 13611 /* 13612 * T/TCP: Connection was half-synchronized, and our SYN has 13613 * been ACK'd (so connection is now fully synchronized). Go 13614 * to non-starred state, increment snd_una for ACK of SYN, 13615 * and check if we can do window scaling. 13616 */ 13617 tp->t_flags &= ~TF_NEEDSYN; 13618 tp->snd_una++; 13619 acked_amount = acked = (high_seq - tp->snd_una); 13620 } 13621 if (acked > sbavail(&so->so_snd)) 13622 acked_amount = sbavail(&so->so_snd); 13623 #ifdef NETFLIX_EXP_DETECTION 13624 /* 13625 * We only care on a cum-ack move if we are in a sack-disabled 13626 * state. We have already added in to the ack_count, and we never 13627 * would disable on a cum-ack move, so we only care to do the 13628 * detection if it may "undo" it, i.e. we were in disabled already. 13629 */ 13630 if (rack->sack_attack_disable) 13631 rack_do_detection(tp, rack, acked_amount, segsiz); 13632 #endif 13633 if (IN_FASTRECOVERY(tp->t_flags) && 13634 (rack->rack_no_prr == 0)) 13635 rack_update_prr(tp, rack, acked_amount, high_seq); 13636 if (IN_RECOVERY(tp->t_flags)) { 13637 if (SEQ_LT(high_seq, tp->snd_recover) && 13638 (SEQ_LT(high_seq, tp->snd_max))) { 13639 tcp_rack_partialack(tp); 13640 } else { 13641 rack_post_recovery(tp, high_seq); 13642 recovery = 1; 13643 } 13644 } 13645 /* Handle the rack-log-ack part (sendmap) */ 13646 if ((sbused(&so->so_snd) == 0) && 13647 (acked > acked_amount) && 13648 (tp->t_state >= TCPS_FIN_WAIT_1) && 13649 (tp->t_flags & TF_SENTFIN)) { 13650 /* 13651 * We must be sure our fin 13652 * was sent and acked (we can be 13653 * in FIN_WAIT_1 without having 13654 * sent the fin). 13655 */ 13656 ourfinisacked = 1; 13657 /* 13658 * Lets make sure snd_una is updated 13659 * since most likely acked_amount = 0 (it 13660 * should be). 13661 */ 13662 tp->snd_una = high_seq; 13663 } 13664 /* Did we make a RTO error? */ 13665 if ((tp->t_flags & TF_PREVVALID) && 13666 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { 13667 tp->t_flags &= ~TF_PREVVALID; 13668 if (tp->t_rxtshift == 1 && 13669 (int)(ticks - tp->t_badrxtwin) < 0) 13670 rack_cong_signal(tp, CC_RTO_ERR, high_seq, __LINE__); 13671 } 13672 /* Handle the data in the socket buffer */ 13673 KMOD_TCPSTAT_ADD(tcps_rcvackpack, 1); 13674 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 13675 if (acked_amount > 0) { 13676 struct mbuf *mfree; 13677 13678 rack_ack_received(tp, rack, high_seq, nsegs, CC_ACK, recovery); 13679 SOCKBUF_LOCK(&so->so_snd); 13680 mfree = sbcut_locked(&so->so_snd, acked_amount); 13681 tp->snd_una = high_seq; 13682 /* Note we want to hold the sb lock through the sendmap adjust */ 13683 rack_adjust_sendmap(rack, &so->so_snd, tp->snd_una); 13684 /* Wake up the socket if we have room to write more */ 13685 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); 13686 sowwakeup_locked(so); 13687 m_freem(mfree); 13688 } 13689 /* update progress */ 13690 tp->t_acktime = ticks; 13691 rack_log_progress_event(rack, tp, tp->t_acktime, 13692 PROGRESS_UPDATE, __LINE__); 13693 /* Clear out shifts and such */ 13694 tp->t_rxtshift = 0; 13695 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 13696 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); 13697 rack->rc_tlp_in_progress = 0; 13698 rack->r_ctl.rc_tlp_cnt_out = 0; 13699 /* Send recover and snd_nxt must be dragged along */ 13700 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 13701 tp->snd_recover = tp->snd_una; 13702 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 13703 tp->snd_nxt = tp->snd_una; 13704 /* 13705 * If the RXT timer is running we want to 13706 * stop it, so we can restart a TLP (or new RXT). 13707 */ 13708 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) 13709 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13710 #ifdef NETFLIX_HTTP_LOGGING 13711 tcp_http_check_for_comp(rack->rc_tp, high_seq); 13712 #endif 13713 tp->snd_wl2 = high_seq; 13714 tp->t_dupacks = 0; 13715 if (under_pacing && 13716 (rack->use_fixed_rate == 0) && 13717 (rack->in_probe_rtt == 0) && 13718 rack->rc_gp_dyn_mul && 13719 rack->rc_always_pace) { 13720 /* Check if we are dragging bottom */ 13721 rack_check_bottom_drag(tp, rack, so, acked); 13722 } 13723 if (tp->snd_una == tp->snd_max) { 13724 tp->t_flags &= ~TF_PREVVALID; 13725 rack->r_ctl.retran_during_recovery = 0; 13726 rack->r_ctl.dsack_byte_cnt = 0; 13727 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); 13728 if (rack->r_ctl.rc_went_idle_time == 0) 13729 rack->r_ctl.rc_went_idle_time = 1; 13730 rack_log_progress_event(rack, tp, 0, PROGRESS_CLEAR, __LINE__); 13731 if (sbavail(&tptosocket(tp)->so_snd) == 0) 13732 tp->t_acktime = 0; 13733 /* Set so we might enter persists... */ 13734 rack->r_wanted_output = 1; 13735 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13736 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 13737 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 13738 (sbavail(&so->so_snd) == 0) && 13739 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 13740 /* 13741 * The socket was gone and the 13742 * peer sent data (not now in the past), time to 13743 * reset him. 13744 */ 13745 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); 13746 /* tcp_close will kill the inp pre-log the Reset */ 13747 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 13748 #ifdef TCP_ACCOUNTING 13749 rdstc = get_cyclecount(); 13750 if (rdstc > ts_val) { 13751 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13752 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13753 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13754 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13755 } 13756 } 13757 #endif 13758 m_freem(m); 13759 tp = tcp_close(tp); 13760 if (tp == NULL) { 13761 #ifdef TCP_ACCOUNTING 13762 sched_unpin(); 13763 #endif 13764 return (1); 13765 } 13766 /* 13767 * We would normally do drop-with-reset which would 13768 * send back a reset. We can't since we don't have 13769 * all the needed bits. Instead lets arrange for 13770 * a call to tcp_output(). That way since we 13771 * are in the closed state we will generate a reset. 13772 * 13773 * Note if tcp_accounting is on we don't unpin since 13774 * we do that after the goto label. 13775 */ 13776 goto send_out_a_rst; 13777 } 13778 if ((sbused(&so->so_snd) == 0) && 13779 (tp->t_state >= TCPS_FIN_WAIT_1) && 13780 (tp->t_flags & TF_SENTFIN)) { 13781 /* 13782 * If we can't receive any more data, then closing user can 13783 * proceed. Starting the timer is contrary to the 13784 * specification, but if we don't get a FIN we'll hang 13785 * forever. 13786 * 13787 */ 13788 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13789 soisdisconnected(so); 13790 tcp_timer_activate(tp, TT_2MSL, 13791 (tcp_fast_finwait2_recycle ? 13792 tcp_finwait2_timeout : 13793 TP_MAXIDLE(tp))); 13794 } 13795 if (ourfinisacked == 0) { 13796 /* 13797 * We don't change to fin-wait-2 if we have our fin acked 13798 * which means we are probably in TCPS_CLOSING. 13799 */ 13800 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13801 } 13802 } 13803 } 13804 /* Wake up the socket if we have room to write more */ 13805 if (sbavail(&so->so_snd)) { 13806 rack->r_wanted_output = 1; 13807 if (ctf_progress_timeout_check(tp, true)) { 13808 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, 13809 tp, tick, PROGRESS_DROP, __LINE__); 13810 /* 13811 * We cheat here and don't send a RST, we should send one 13812 * when the pacer drops the connection. 13813 */ 13814 #ifdef TCP_ACCOUNTING 13815 rdstc = get_cyclecount(); 13816 if (rdstc > ts_val) { 13817 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13818 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13819 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13820 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13821 } 13822 } 13823 sched_unpin(); 13824 #endif 13825 (void)tcp_drop(tp, ETIMEDOUT); 13826 m_freem(m); 13827 return (1); 13828 } 13829 } 13830 if (ourfinisacked) { 13831 switch(tp->t_state) { 13832 case TCPS_CLOSING: 13833 #ifdef TCP_ACCOUNTING 13834 rdstc = get_cyclecount(); 13835 if (rdstc > ts_val) { 13836 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13837 (rdstc - ts_val)); 13838 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13839 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13840 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13841 } 13842 } 13843 sched_unpin(); 13844 #endif 13845 tcp_twstart(tp); 13846 m_freem(m); 13847 return (1); 13848 break; 13849 case TCPS_LAST_ACK: 13850 #ifdef TCP_ACCOUNTING 13851 rdstc = get_cyclecount(); 13852 if (rdstc > ts_val) { 13853 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13854 (rdstc - ts_val)); 13855 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13856 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13857 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13858 } 13859 } 13860 sched_unpin(); 13861 #endif 13862 tp = tcp_close(tp); 13863 ctf_do_drop(m, tp); 13864 return (1); 13865 break; 13866 case TCPS_FIN_WAIT_1: 13867 #ifdef TCP_ACCOUNTING 13868 rdstc = get_cyclecount(); 13869 if (rdstc > ts_val) { 13870 counter_u64_add(tcp_proc_time[ACK_CUMACK] , 13871 (rdstc - ts_val)); 13872 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13873 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13874 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13875 } 13876 } 13877 #endif 13878 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 13879 soisdisconnected(so); 13880 tcp_timer_activate(tp, TT_2MSL, 13881 (tcp_fast_finwait2_recycle ? 13882 tcp_finwait2_timeout : 13883 TP_MAXIDLE(tp))); 13884 } 13885 tcp_state_change(tp, TCPS_FIN_WAIT_2); 13886 break; 13887 default: 13888 break; 13889 } 13890 } 13891 if (rack->r_fast_output) { 13892 /* 13893 * We re doing fast output.. can we expand that? 13894 */ 13895 rack_gain_for_fastoutput(rack, tp, so, acked_amount); 13896 } 13897 #ifdef TCP_ACCOUNTING 13898 rdstc = get_cyclecount(); 13899 if (rdstc > ts_val) { 13900 counter_u64_add(tcp_proc_time[ACK_CUMACK] , (rdstc - ts_val)); 13901 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13902 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); 13903 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); 13904 } 13905 } 13906 13907 } else if (win_up_req) { 13908 rdstc = get_cyclecount(); 13909 if (rdstc > ts_val) { 13910 counter_u64_add(tcp_proc_time[ACK_RWND] , (rdstc - ts_val)); 13911 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 13912 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); 13913 } 13914 } 13915 #endif 13916 } 13917 /* Now is there a next packet, if so we are done */ 13918 m_freem(m); 13919 did_out = 0; 13920 if (nxt_pkt) { 13921 #ifdef TCP_ACCOUNTING 13922 sched_unpin(); 13923 #endif 13924 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 5, nsegs); 13925 return (0); 13926 } 13927 rack_handle_might_revert(tp, rack); 13928 ctf_calc_rwin(so, tp); 13929 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 13930 send_out_a_rst: 13931 if (tcp_output(tp) < 0) { 13932 #ifdef TCP_ACCOUNTING 13933 sched_unpin(); 13934 #endif 13935 return (1); 13936 } 13937 did_out = 1; 13938 } 13939 rack_free_trim(rack); 13940 #ifdef TCP_ACCOUNTING 13941 sched_unpin(); 13942 #endif 13943 rack_timer_audit(tp, rack, &so->so_snd); 13944 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, 6, nsegs); 13945 return (0); 13946 } 13947 13948 13949 static int 13950 rack_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 13951 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 13952 int32_t nxt_pkt, struct timeval *tv) 13953 { 13954 struct inpcb *inp = tptoinpcb(tp); 13955 #ifdef TCP_ACCOUNTING 13956 uint64_t ts_val; 13957 #endif 13958 int32_t thflags, retval, did_out = 0; 13959 int32_t way_out = 0; 13960 /* 13961 * cts - is the current time from tv (caller gets ts) in microseconds. 13962 * ms_cts - is the current time from tv in milliseconds. 13963 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. 13964 */ 13965 uint32_t cts, us_cts, ms_cts; 13966 uint32_t tiwin, high_seq; 13967 struct timespec ts; 13968 struct tcpopt to; 13969 struct tcp_rack *rack; 13970 struct rack_sendmap *rsm; 13971 int32_t prev_state = 0; 13972 #ifdef TCP_ACCOUNTING 13973 int ack_val_set = 0xf; 13974 #endif 13975 int nsegs; 13976 13977 NET_EPOCH_ASSERT(); 13978 INP_WLOCK_ASSERT(inp); 13979 13980 /* 13981 * tv passed from common code is from either M_TSTMP_LRO or 13982 * tcp_get_usecs() if no LRO m_pkthdr timestamp is present. 13983 */ 13984 rack = (struct tcp_rack *)tp->t_fb_ptr; 13985 if (m->m_flags & M_ACKCMP) { 13986 /* 13987 * All compressed ack's are ack's by definition so 13988 * remove any ack required flag and then do the processing. 13989 */ 13990 rack->rc_ack_required = 0; 13991 return (rack_do_compressed_ack_processing(tp, so, m, nxt_pkt, tv)); 13992 } 13993 if (m->m_flags & M_ACKCMP) { 13994 panic("Impossible reach m has ackcmp? m:%p tp:%p", m, tp); 13995 } 13996 cts = tcp_tv_to_usectick(tv); 13997 ms_cts = tcp_tv_to_mssectick(tv); 13998 nsegs = m->m_pkthdr.lro_nsegs; 13999 counter_u64_add(rack_proc_non_comp_ack, 1); 14000 thflags = tcp_get_flags(th); 14001 #ifdef TCP_ACCOUNTING 14002 sched_pin(); 14003 if (thflags & TH_ACK) 14004 ts_val = get_cyclecount(); 14005 #endif 14006 if ((m->m_flags & M_TSTMP) || 14007 (m->m_flags & M_TSTMP_LRO)) { 14008 mbuf_tstmp2timespec(m, &ts); 14009 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; 14010 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; 14011 } else 14012 rack->r_ctl.act_rcv_time = *tv; 14013 kern_prefetch(rack, &prev_state); 14014 prev_state = 0; 14015 /* 14016 * Unscale the window into a 32-bit value. For the SYN_SENT state 14017 * the scale is zero. 14018 */ 14019 tiwin = th->th_win << tp->snd_scale; 14020 #ifdef TCP_ACCOUNTING 14021 if (thflags & TH_ACK) { 14022 /* 14023 * We have a tradeoff here. We can either do what we are 14024 * doing i.e. pinning to this CPU and then doing the accounting 14025 * <or> we could do a critical enter, setup the rdtsc and cpu 14026 * as in below, and then validate we are on the same CPU on 14027 * exit. I have choosen to not do the critical enter since 14028 * that often will gain you a context switch, and instead lock 14029 * us (line above this if) to the same CPU with sched_pin(). This 14030 * means we may be context switched out for a higher priority 14031 * interupt but we won't be moved to another CPU. 14032 * 14033 * If this occurs (which it won't very often since we most likely 14034 * are running this code in interupt context and only a higher 14035 * priority will bump us ... clock?) we will falsely add in 14036 * to the time the interupt processing time plus the ack processing 14037 * time. This is ok since its a rare event. 14038 */ 14039 ack_val_set = tcp_do_ack_accounting(tp, th, &to, tiwin, 14040 ctf_fixed_maxseg(tp)); 14041 } 14042 #endif 14043 /* 14044 * Parse options on any incoming segment. 14045 */ 14046 memset(&to, 0, sizeof(to)); 14047 tcp_dooptions(&to, (u_char *)(th + 1), 14048 (th->th_off << 2) - sizeof(struct tcphdr), 14049 (thflags & TH_SYN) ? TO_SYN : 0); 14050 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 14051 __func__)); 14052 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 14053 __func__)); 14054 14055 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 14056 (tp->t_flags & TF_GPUTINPROG)) { 14057 /* 14058 * We have a goodput in progress 14059 * and we have entered a late state. 14060 * Do we have enough data in the sb 14061 * to handle the GPUT request? 14062 */ 14063 uint32_t bytes; 14064 14065 bytes = tp->gput_ack - tp->gput_seq; 14066 if (SEQ_GT(tp->gput_seq, tp->snd_una)) 14067 bytes += tp->gput_seq - tp->snd_una; 14068 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { 14069 /* 14070 * There are not enough bytes in the socket 14071 * buffer that have been sent to cover this 14072 * measurement. Cancel it. 14073 */ 14074 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 14075 rack->r_ctl.rc_gp_srtt /*flex1*/, 14076 tp->gput_seq, 14077 0, 0, 18, __LINE__, NULL, 0); 14078 tp->t_flags &= ~TF_GPUTINPROG; 14079 } 14080 } 14081 high_seq = th->th_ack; 14082 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 14083 union tcp_log_stackspecific log; 14084 struct timeval ltv; 14085 #ifdef NETFLIX_HTTP_LOGGING 14086 struct http_sendfile_track *http_req; 14087 14088 if (SEQ_GT(th->th_ack, tp->snd_una)) { 14089 http_req = tcp_http_find_req_for_seq(tp, (th->th_ack-1)); 14090 } else { 14091 http_req = tcp_http_find_req_for_seq(tp, th->th_ack); 14092 } 14093 #endif 14094 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14095 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 14096 if (rack->rack_no_prr == 0) 14097 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 14098 else 14099 log.u_bbr.flex1 = 0; 14100 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; 14101 log.u_bbr.use_lt_bw <<= 1; 14102 log.u_bbr.use_lt_bw |= rack->r_might_revert; 14103 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; 14104 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14105 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; 14106 log.u_bbr.flex3 = m->m_flags; 14107 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; 14108 log.u_bbr.lost = thflags; 14109 log.u_bbr.pacing_gain = 0x1; 14110 #ifdef TCP_ACCOUNTING 14111 log.u_bbr.cwnd_gain = ack_val_set; 14112 #endif 14113 log.u_bbr.flex7 = 2; 14114 if (m->m_flags & M_TSTMP) { 14115 /* Record the hardware timestamp if present */ 14116 mbuf_tstmp2timespec(m, &ts); 14117 ltv.tv_sec = ts.tv_sec; 14118 ltv.tv_usec = ts.tv_nsec / 1000; 14119 log.u_bbr.lt_epoch = tcp_tv_to_usectick(<v); 14120 } else if (m->m_flags & M_TSTMP_LRO) { 14121 /* Record the LRO the arrival timestamp */ 14122 mbuf_tstmp2timespec(m, &ts); 14123 ltv.tv_sec = ts.tv_sec; 14124 ltv.tv_usec = ts.tv_nsec / 1000; 14125 log.u_bbr.flex5 = tcp_tv_to_usectick(<v); 14126 } 14127 log.u_bbr.timeStamp = tcp_get_usecs(<v); 14128 /* Log the rcv time */ 14129 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; 14130 #ifdef NETFLIX_HTTP_LOGGING 14131 log.u_bbr.applimited = tp->t_http_closed; 14132 log.u_bbr.applimited <<= 8; 14133 log.u_bbr.applimited |= tp->t_http_open; 14134 log.u_bbr.applimited <<= 8; 14135 log.u_bbr.applimited |= tp->t_http_req; 14136 if (http_req) { 14137 /* Copy out any client req info */ 14138 /* seconds */ 14139 log.u_bbr.pkt_epoch = (http_req->localtime / HPTS_USEC_IN_SEC); 14140 /* useconds */ 14141 log.u_bbr.delivered = (http_req->localtime % HPTS_USEC_IN_SEC); 14142 log.u_bbr.rttProp = http_req->timestamp; 14143 log.u_bbr.cur_del_rate = http_req->start; 14144 if (http_req->flags & TCP_HTTP_TRACK_FLG_OPEN) { 14145 log.u_bbr.flex8 |= 1; 14146 } else { 14147 log.u_bbr.flex8 |= 2; 14148 log.u_bbr.bw_inuse = http_req->end; 14149 } 14150 log.u_bbr.flex6 = http_req->start_seq; 14151 if (http_req->flags & TCP_HTTP_TRACK_FLG_COMP) { 14152 log.u_bbr.flex8 |= 4; 14153 log.u_bbr.epoch = http_req->end_seq; 14154 } 14155 } 14156 #endif 14157 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, 14158 tlen, &log, true, <v); 14159 } 14160 /* Remove ack required flag if set, we have one */ 14161 if (thflags & TH_ACK) 14162 rack->rc_ack_required = 0; 14163 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 14164 way_out = 4; 14165 retval = 0; 14166 m_freem(m); 14167 goto done_with_input; 14168 } 14169 /* 14170 * If a segment with the ACK-bit set arrives in the SYN-SENT state 14171 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 14172 */ 14173 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 14174 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 14175 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 14176 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 14177 #ifdef TCP_ACCOUNTING 14178 sched_unpin(); 14179 #endif 14180 return (1); 14181 } 14182 /* 14183 * If timestamps were negotiated during SYN/ACK and a 14184 * segment without a timestamp is received, silently drop 14185 * the segment, unless it is a RST segment or missing timestamps are 14186 * tolerated. 14187 * See section 3.2 of RFC 7323. 14188 */ 14189 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && 14190 ((thflags & TH_RST) == 0) && (V_tcp_tolerate_missing_ts == 0)) { 14191 way_out = 5; 14192 retval = 0; 14193 m_freem(m); 14194 goto done_with_input; 14195 } 14196 14197 /* 14198 * Segment received on connection. Reset idle time and keep-alive 14199 * timer. XXX: This should be done after segment validation to 14200 * ignore broken/spoofed segs. 14201 */ 14202 if (tp->t_idle_reduce && 14203 (tp->snd_max == tp->snd_una) && 14204 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { 14205 counter_u64_add(rack_input_idle_reduces, 1); 14206 rack_cc_after_idle(rack, tp); 14207 } 14208 tp->t_rcvtime = ticks; 14209 #ifdef STATS 14210 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 14211 #endif 14212 if (tiwin > rack->r_ctl.rc_high_rwnd) 14213 rack->r_ctl.rc_high_rwnd = tiwin; 14214 /* 14215 * TCP ECN processing. XXXJTL: If we ever use ECN, we need to move 14216 * this to occur after we've validated the segment. 14217 */ 14218 if (tcp_ecn_input_segment(tp, thflags, tlen, 14219 tcp_packets_this_ack(tp, th->th_ack), 14220 iptos)) 14221 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); 14222 14223 /* 14224 * If echoed timestamp is later than the current time, fall back to 14225 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 14226 * were used when this connection was established. 14227 */ 14228 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 14229 to.to_tsecr -= tp->ts_offset; 14230 if (TSTMP_GT(to.to_tsecr, ms_cts)) 14231 to.to_tsecr = 0; 14232 } 14233 14234 /* 14235 * If its the first time in we need to take care of options and 14236 * verify we can do SACK for rack! 14237 */ 14238 if (rack->r_state == 0) { 14239 /* Should be init'd by rack_init() */ 14240 KASSERT(rack->rc_inp != NULL, 14241 ("%s: rack->rc_inp unexpectedly NULL", __func__)); 14242 if (rack->rc_inp == NULL) { 14243 rack->rc_inp = inp; 14244 } 14245 14246 /* 14247 * Process options only when we get SYN/ACK back. The SYN 14248 * case for incoming connections is handled in tcp_syncache. 14249 * According to RFC1323 the window field in a SYN (i.e., a 14250 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 14251 * this is traditional behavior, may need to be cleaned up. 14252 */ 14253 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 14254 /* Handle parallel SYN for ECN */ 14255 tcp_ecn_input_parallel_syn(tp, thflags, iptos); 14256 if ((to.to_flags & TOF_SCALE) && 14257 (tp->t_flags & TF_REQ_SCALE)) { 14258 tp->t_flags |= TF_RCVD_SCALE; 14259 tp->snd_scale = to.to_wscale; 14260 } else 14261 tp->t_flags &= ~TF_REQ_SCALE; 14262 /* 14263 * Initial send window. It will be updated with the 14264 * next incoming segment to the scaled value. 14265 */ 14266 tp->snd_wnd = th->th_win; 14267 rack_validate_fo_sendwin_up(tp, rack); 14268 if ((to.to_flags & TOF_TS) && 14269 (tp->t_flags & TF_REQ_TSTMP)) { 14270 tp->t_flags |= TF_RCVD_TSTMP; 14271 tp->ts_recent = to.to_tsval; 14272 tp->ts_recent_age = cts; 14273 } else 14274 tp->t_flags &= ~TF_REQ_TSTMP; 14275 if (to.to_flags & TOF_MSS) { 14276 tcp_mss(tp, to.to_mss); 14277 } 14278 if ((tp->t_flags & TF_SACK_PERMIT) && 14279 (to.to_flags & TOF_SACKPERM) == 0) 14280 tp->t_flags &= ~TF_SACK_PERMIT; 14281 if (IS_FASTOPEN(tp->t_flags)) { 14282 if (to.to_flags & TOF_FASTOPEN) { 14283 uint16_t mss; 14284 14285 if (to.to_flags & TOF_MSS) 14286 mss = to.to_mss; 14287 else 14288 if ((inp->inp_vflag & INP_IPV6) != 0) 14289 mss = TCP6_MSS; 14290 else 14291 mss = TCP_MSS; 14292 tcp_fastopen_update_cache(tp, mss, 14293 to.to_tfo_len, to.to_tfo_cookie); 14294 } else 14295 tcp_fastopen_disable_path(tp); 14296 } 14297 } 14298 /* 14299 * At this point we are at the initial call. Here we decide 14300 * if we are doing RACK or not. We do this by seeing if 14301 * TF_SACK_PERMIT is set and the sack-not-required is clear. 14302 * The code now does do dup-ack counting so if you don't 14303 * switch back you won't get rack & TLP, but you will still 14304 * get this stack. 14305 */ 14306 14307 if ((rack_sack_not_required == 0) && 14308 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { 14309 tcp_switch_back_to_default(tp); 14310 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 14311 tlen, iptos); 14312 #ifdef TCP_ACCOUNTING 14313 sched_unpin(); 14314 #endif 14315 return (1); 14316 } 14317 tcp_set_hpts(inp); 14318 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); 14319 } 14320 if (thflags & TH_FIN) 14321 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 14322 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 14323 if ((rack->rc_gp_dyn_mul) && 14324 (rack->use_fixed_rate == 0) && 14325 (rack->rc_always_pace)) { 14326 /* Check in on probertt */ 14327 rack_check_probe_rtt(rack, us_cts); 14328 } 14329 rack_clear_rate_sample(rack); 14330 if ((rack->forced_ack) && 14331 ((tcp_get_flags(th) & TH_RST) == 0)) { 14332 rack_handle_probe_response(rack, tiwin, us_cts); 14333 } 14334 /* 14335 * This is the one exception case where we set the rack state 14336 * always. All other times (timers etc) we must have a rack-state 14337 * set (so we assure we have done the checks above for SACK). 14338 */ 14339 rack->r_ctl.rc_rcvtime = cts; 14340 if (rack->r_state != tp->t_state) 14341 rack_set_state(tp, rack); 14342 if (SEQ_GT(th->th_ack, tp->snd_una) && 14343 (rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree)) != NULL) 14344 kern_prefetch(rsm, &prev_state); 14345 prev_state = rack->r_state; 14346 retval = (*rack->r_substate) (m, th, so, 14347 tp, &to, drop_hdrlen, 14348 tlen, tiwin, thflags, nxt_pkt, iptos); 14349 if (retval == 0) { 14350 /* 14351 * If retval is 1 the tcb is unlocked and most likely the tp 14352 * is gone. 14353 */ 14354 INP_WLOCK_ASSERT(inp); 14355 if ((rack->rc_gp_dyn_mul) && 14356 (rack->rc_always_pace) && 14357 (rack->use_fixed_rate == 0) && 14358 rack->in_probe_rtt && 14359 (rack->r_ctl.rc_time_probertt_starts == 0)) { 14360 /* 14361 * If we are going for target, lets recheck before 14362 * we output. 14363 */ 14364 rack_check_probe_rtt(rack, us_cts); 14365 } 14366 if (rack->set_pacing_done_a_iw == 0) { 14367 /* How much has been acked? */ 14368 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { 14369 /* We have enough to set in the pacing segment size */ 14370 rack->set_pacing_done_a_iw = 1; 14371 rack_set_pace_segments(tp, rack, __LINE__, NULL); 14372 } 14373 } 14374 tcp_rack_xmit_timer_commit(rack, tp); 14375 #ifdef TCP_ACCOUNTING 14376 /* 14377 * If we set the ack_val_se to what ack processing we are doing 14378 * we also want to track how many cycles we burned. Note 14379 * the bits after tcp_output we let be "free". This is because 14380 * we are also tracking the tcp_output times as well. Note the 14381 * use of 0xf here since we only have 11 counter (0 - 0xa) and 14382 * 0xf cannot be returned and is what we initialize it too to 14383 * indicate we are not doing the tabulations. 14384 */ 14385 if (ack_val_set != 0xf) { 14386 uint64_t crtsc; 14387 14388 crtsc = get_cyclecount(); 14389 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14390 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 14391 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); 14392 } 14393 } 14394 #endif 14395 if (nxt_pkt == 0) { 14396 if ((rack->r_wanted_output != 0) || (rack->r_fast_output != 0)) { 14397 do_output_now: 14398 if (tcp_output(tp) < 0) 14399 return (1); 14400 did_out = 1; 14401 } 14402 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 14403 rack_free_trim(rack); 14404 } 14405 /* Update any rounds needed */ 14406 if (rack_verbose_logging && (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 14407 union tcp_log_stackspecific log; 14408 struct timeval tv; 14409 14410 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 14411 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14412 log.u_bbr.flex1 = high_seq; 14413 log.u_bbr.flex2 = rack->r_ctl.roundends; 14414 log.u_bbr.flex3 = rack->r_ctl.current_round; 14415 log.u_bbr.rttProp = (uint64_t)CC_ALGO(tp)->newround; 14416 log.u_bbr.flex8 = 9; 14417 tcp_log_event_(tp, NULL, NULL, NULL, BBR_LOG_CWND, 0, 14418 0, &log, false, NULL, NULL, 0, &tv); 14419 } 14420 /* 14421 * The draft (v3) calls for us to use SEQ_GEQ, but that 14422 * causes issues when we are just going app limited. Lets 14423 * instead use SEQ_GT <or> where its equal but more data 14424 * is outstanding. 14425 */ 14426 if ((SEQ_GT(tp->snd_una, rack->r_ctl.roundends)) || 14427 ((tp->snd_una == rack->r_ctl.roundends) && SEQ_GT(tp->snd_max, tp->snd_una))) { 14428 rack->r_ctl.current_round++; 14429 rack->r_ctl.roundends = tp->snd_max; 14430 if (CC_ALGO(tp)->newround != NULL) { 14431 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); 14432 } 14433 } 14434 if ((nxt_pkt == 0) && 14435 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 14436 (SEQ_GT(tp->snd_max, tp->snd_una) || 14437 (tp->t_flags & TF_DELACK) || 14438 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 14439 (tp->t_state <= TCPS_CLOSING)))) { 14440 /* We could not send (probably in the hpts but stopped the timer earlier)? */ 14441 if ((tp->snd_max == tp->snd_una) && 14442 ((tp->t_flags & TF_DELACK) == 0) && 14443 (tcp_in_hpts(rack->rc_inp)) && 14444 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 14445 /* keep alive not needed if we are hptsi output yet */ 14446 ; 14447 } else { 14448 int late = 0; 14449 if (tcp_in_hpts(inp)) { 14450 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 14451 us_cts = tcp_get_usecs(NULL); 14452 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { 14453 rack->r_early = 1; 14454 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); 14455 } else 14456 late = 1; 14457 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 14458 } 14459 tcp_hpts_remove(inp); 14460 } 14461 if (late && (did_out == 0)) { 14462 /* 14463 * We are late in the sending 14464 * and we did not call the output 14465 * (this probably should not happen). 14466 */ 14467 goto do_output_now; 14468 } 14469 rack_start_hpts_timer(rack, tp, tcp_get_usecs(NULL), 0, 0, 0); 14470 } 14471 way_out = 1; 14472 } else if (nxt_pkt == 0) { 14473 /* Do we have the correct timer running? */ 14474 rack_timer_audit(tp, rack, &so->so_snd); 14475 way_out = 2; 14476 } 14477 done_with_input: 14478 rack_log_doseg_done(rack, cts, nxt_pkt, did_out, way_out, max(1, nsegs)); 14479 if (did_out) 14480 rack->r_wanted_output = 0; 14481 #ifdef TCP_ACCOUNTING 14482 } else { 14483 /* 14484 * Track the time (see above). 14485 */ 14486 if (ack_val_set != 0xf) { 14487 uint64_t crtsc; 14488 14489 crtsc = get_cyclecount(); 14490 counter_u64_add(tcp_proc_time[ack_val_set] , (crtsc - ts_val)); 14491 /* 14492 * Note we *DO NOT* increment the per-tcb counters since 14493 * in the else the TP may be gone!! 14494 */ 14495 } 14496 #endif 14497 } 14498 #ifdef TCP_ACCOUNTING 14499 sched_unpin(); 14500 #endif 14501 return (retval); 14502 } 14503 14504 void 14505 rack_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 14506 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 14507 { 14508 struct timeval tv; 14509 14510 /* First lets see if we have old packets */ 14511 if (tp->t_in_pkt) { 14512 if (ctf_do_queued_segments(so, tp, 1)) { 14513 m_freem(m); 14514 return; 14515 } 14516 } 14517 if (m->m_flags & M_TSTMP_LRO) { 14518 mbuf_tstmp2timeval(m, &tv); 14519 } else { 14520 /* Should not be should we kassert instead? */ 14521 tcp_get_usecs(&tv); 14522 } 14523 if (rack_do_segment_nounlock(m, th, so, tp, 14524 drop_hdrlen, tlen, iptos, 0, &tv) == 0) { 14525 INP_WUNLOCK(tptoinpcb(tp)); 14526 } 14527 } 14528 14529 struct rack_sendmap * 14530 tcp_rack_output(struct tcpcb *tp, struct tcp_rack *rack, uint32_t tsused) 14531 { 14532 struct rack_sendmap *rsm = NULL; 14533 int32_t idx; 14534 uint32_t srtt = 0, thresh = 0, ts_low = 0; 14535 14536 /* Return the next guy to be re-transmitted */ 14537 if (RB_EMPTY(&rack->r_ctl.rc_mtree)) { 14538 return (NULL); 14539 } 14540 if (tp->t_flags & TF_SENTFIN) { 14541 /* retran the end FIN? */ 14542 return (NULL); 14543 } 14544 /* ok lets look at this one */ 14545 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 14546 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { 14547 return (rsm); 14548 } 14549 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { 14550 goto check_it; 14551 } 14552 rsm = rack_find_lowest_rsm(rack); 14553 if (rsm == NULL) { 14554 return (NULL); 14555 } 14556 check_it: 14557 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && 14558 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { 14559 /* 14560 * No sack so we automatically do the 3 strikes and 14561 * retransmit (no rack timer would be started). 14562 */ 14563 14564 return (rsm); 14565 } 14566 if (rsm->r_flags & RACK_ACKED) { 14567 return (NULL); 14568 } 14569 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && 14570 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { 14571 /* Its not yet ready */ 14572 return (NULL); 14573 } 14574 srtt = rack_grab_rtt(tp, rack); 14575 idx = rsm->r_rtr_cnt - 1; 14576 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; 14577 thresh = rack_calc_thresh_rack(rack, srtt, tsused); 14578 if ((tsused == ts_low) || 14579 (TSTMP_LT(tsused, ts_low))) { 14580 /* No time since sending */ 14581 return (NULL); 14582 } 14583 if ((tsused - ts_low) < thresh) { 14584 /* It has not been long enough yet */ 14585 return (NULL); 14586 } 14587 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 14588 ((rsm->r_flags & RACK_SACK_PASSED) && 14589 (rack->sack_attack_disable == 0))) { 14590 /* 14591 * We have passed the dup-ack threshold <or> 14592 * a SACK has indicated this is missing. 14593 * Note that if you are a declared attacker 14594 * it is only the dup-ack threshold that 14595 * will cause retransmits. 14596 */ 14597 /* log retransmit reason */ 14598 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); 14599 rack->r_fast_output = 0; 14600 return (rsm); 14601 } 14602 return (NULL); 14603 } 14604 14605 static void 14606 rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot, 14607 uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, 14608 int line, struct rack_sendmap *rsm, uint8_t quality) 14609 { 14610 if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 14611 union tcp_log_stackspecific log; 14612 struct timeval tv; 14613 14614 memset(&log, 0, sizeof(log)); 14615 log.u_bbr.flex1 = slot; 14616 log.u_bbr.flex2 = len; 14617 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; 14618 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; 14619 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; 14620 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; 14621 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; 14622 log.u_bbr.use_lt_bw <<= 1; 14623 log.u_bbr.use_lt_bw |= rack->r_late; 14624 log.u_bbr.use_lt_bw <<= 1; 14625 log.u_bbr.use_lt_bw |= rack->r_early; 14626 log.u_bbr.use_lt_bw <<= 1; 14627 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; 14628 log.u_bbr.use_lt_bw <<= 1; 14629 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; 14630 log.u_bbr.use_lt_bw <<= 1; 14631 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; 14632 log.u_bbr.use_lt_bw <<= 1; 14633 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; 14634 log.u_bbr.use_lt_bw <<= 1; 14635 log.u_bbr.use_lt_bw |= rack->gp_ready; 14636 log.u_bbr.pkt_epoch = line; 14637 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; 14638 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; 14639 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; 14640 log.u_bbr.bw_inuse = bw_est; 14641 log.u_bbr.delRate = bw; 14642 if (rack->r_ctl.gp_bw == 0) 14643 log.u_bbr.cur_del_rate = 0; 14644 else 14645 log.u_bbr.cur_del_rate = rack_get_bw(rack); 14646 log.u_bbr.rttProp = len_time; 14647 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; 14648 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; 14649 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 14650 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { 14651 /* We are in slow start */ 14652 log.u_bbr.flex7 = 1; 14653 } else { 14654 /* we are on congestion avoidance */ 14655 log.u_bbr.flex7 = 0; 14656 } 14657 log.u_bbr.flex8 = method; 14658 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 14659 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14660 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; 14661 log.u_bbr.cwnd_gain <<= 1; 14662 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; 14663 log.u_bbr.cwnd_gain <<= 1; 14664 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; 14665 log.u_bbr.bbr_substate = quality; 14666 TCP_LOG_EVENTP(rack->rc_tp, NULL, 14667 &rack->rc_inp->inp_socket->so_rcv, 14668 &rack->rc_inp->inp_socket->so_snd, 14669 BBR_LOG_HPTSI_CALC, 0, 14670 0, &log, false, &tv); 14671 } 14672 } 14673 14674 static uint32_t 14675 rack_get_pacing_len(struct tcp_rack *rack, uint64_t bw, uint32_t mss) 14676 { 14677 uint32_t new_tso, user_max; 14678 14679 user_max = rack->rc_user_set_max_segs * mss; 14680 if (rack->rc_force_max_seg) { 14681 return (user_max); 14682 } 14683 if (rack->use_fixed_rate && 14684 ((rack->r_ctl.crte == NULL) || 14685 (bw != rack->r_ctl.crte->rate))) { 14686 /* Use the user mss since we are not exactly matched */ 14687 return (user_max); 14688 } 14689 new_tso = tcp_get_pacing_burst_size(rack->rc_tp, bw, mss, rack_pace_one_seg, rack->r_ctl.crte, NULL); 14690 if (new_tso > user_max) 14691 new_tso = user_max; 14692 return (new_tso); 14693 } 14694 14695 static int32_t 14696 pace_to_fill_cwnd(struct tcp_rack *rack, int32_t slot, uint32_t len, uint32_t segsiz, int *capped, uint64_t *rate_wanted, uint8_t non_paced) 14697 { 14698 uint64_t lentim, fill_bw; 14699 14700 /* Lets first see if we are full, if so continue with normal rate */ 14701 rack->r_via_fill_cw = 0; 14702 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) 14703 return (slot); 14704 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) 14705 return (slot); 14706 if (rack->r_ctl.rc_last_us_rtt == 0) 14707 return (slot); 14708 if (rack->rc_pace_fill_if_rttin_range && 14709 (rack->r_ctl.rc_last_us_rtt >= 14710 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { 14711 /* The rtt is huge, N * smallest, lets not fill */ 14712 return (slot); 14713 } 14714 /* 14715 * first lets calculate the b/w based on the last us-rtt 14716 * and the sndwnd. 14717 */ 14718 fill_bw = rack->r_ctl.cwnd_to_use; 14719 /* Take the rwnd if its smaller */ 14720 if (fill_bw > rack->rc_tp->snd_wnd) 14721 fill_bw = rack->rc_tp->snd_wnd; 14722 if (rack->r_fill_less_agg) { 14723 /* 14724 * Now take away the inflight (this will reduce our 14725 * aggressiveness and yeah, if we get that much out in 1RTT 14726 * we will have had acks come back and still be behind). 14727 */ 14728 fill_bw -= ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 14729 } 14730 /* Now lets make it into a b/w */ 14731 fill_bw *= (uint64_t)HPTS_USEC_IN_SEC; 14732 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; 14733 /* We are below the min b/w */ 14734 if (non_paced) 14735 *rate_wanted = fill_bw; 14736 if ((fill_bw < RACK_MIN_BW) || (fill_bw < *rate_wanted)) 14737 return (slot); 14738 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) 14739 fill_bw = rack->r_ctl.bw_rate_cap; 14740 rack->r_via_fill_cw = 1; 14741 if (rack->r_rack_hw_rate_caps && 14742 (rack->r_ctl.crte != NULL)) { 14743 uint64_t high_rate; 14744 14745 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); 14746 if (fill_bw > high_rate) { 14747 /* We are capping bw at the highest rate table entry */ 14748 if (*rate_wanted > high_rate) { 14749 /* The original rate was also capped */ 14750 rack->r_via_fill_cw = 0; 14751 } 14752 rack_log_hdwr_pacing(rack, 14753 fill_bw, high_rate, __LINE__, 14754 0, 3); 14755 fill_bw = high_rate; 14756 if (capped) 14757 *capped = 1; 14758 } 14759 } else if ((rack->r_ctl.crte == NULL) && 14760 (rack->rack_hdrw_pacing == 0) && 14761 (rack->rack_hdw_pace_ena) && 14762 rack->r_rack_hw_rate_caps && 14763 (rack->rack_attempt_hdwr_pace == 0) && 14764 (rack->rc_inp->inp_route.ro_nh != NULL) && 14765 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14766 /* 14767 * Ok we may have a first attempt that is greater than our top rate 14768 * lets check. 14769 */ 14770 uint64_t high_rate; 14771 14772 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); 14773 if (high_rate) { 14774 if (fill_bw > high_rate) { 14775 fill_bw = high_rate; 14776 if (capped) 14777 *capped = 1; 14778 } 14779 } 14780 } 14781 /* 14782 * Ok fill_bw holds our mythical b/w to fill the cwnd 14783 * in a rtt, what does that time wise equate too? 14784 */ 14785 lentim = (uint64_t)(len) * (uint64_t)HPTS_USEC_IN_SEC; 14786 lentim /= fill_bw; 14787 *rate_wanted = fill_bw; 14788 if (non_paced || (lentim < slot)) { 14789 rack_log_pacing_delay_calc(rack, len, slot, fill_bw, 14790 0, lentim, 12, __LINE__, NULL, 0); 14791 return ((int32_t)lentim); 14792 } else 14793 return (slot); 14794 } 14795 14796 static int32_t 14797 rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len, struct rack_sendmap *rsm, uint32_t segsiz) 14798 { 14799 uint64_t srtt; 14800 int32_t slot = 0; 14801 int can_start_hw_pacing = 1; 14802 int err; 14803 14804 if (rack->rc_always_pace == 0) { 14805 /* 14806 * We use the most optimistic possible cwnd/srtt for 14807 * sending calculations. This will make our 14808 * calculation anticipate getting more through 14809 * quicker then possible. But thats ok we don't want 14810 * the peer to have a gap in data sending. 14811 */ 14812 uint64_t cwnd, tr_perms = 0; 14813 int32_t reduce = 0; 14814 14815 old_method: 14816 /* 14817 * We keep no precise pacing with the old method 14818 * instead we use the pacer to mitigate bursts. 14819 */ 14820 if (rack->r_ctl.rc_rack_min_rtt) 14821 srtt = rack->r_ctl.rc_rack_min_rtt; 14822 else 14823 srtt = max(tp->t_srtt, 1); 14824 if (rack->r_ctl.rc_rack_largest_cwnd) 14825 cwnd = rack->r_ctl.rc_rack_largest_cwnd; 14826 else 14827 cwnd = rack->r_ctl.cwnd_to_use; 14828 /* Inflate cwnd by 1000 so srtt of usecs is in ms */ 14829 tr_perms = (cwnd * 1000) / srtt; 14830 if (tr_perms == 0) { 14831 tr_perms = ctf_fixed_maxseg(tp); 14832 } 14833 /* 14834 * Calculate how long this will take to drain, if 14835 * the calculation comes out to zero, thats ok we 14836 * will use send_a_lot to possibly spin around for 14837 * more increasing tot_len_this_send to the point 14838 * that its going to require a pace, or we hit the 14839 * cwnd. Which in that case we are just waiting for 14840 * a ACK. 14841 */ 14842 slot = len / tr_perms; 14843 /* Now do we reduce the time so we don't run dry? */ 14844 if (slot && rack_slot_reduction) { 14845 reduce = (slot / rack_slot_reduction); 14846 if (reduce < slot) { 14847 slot -= reduce; 14848 } else 14849 slot = 0; 14850 } 14851 slot *= HPTS_USEC_IN_MSEC; 14852 if (rack->rc_pace_to_cwnd) { 14853 uint64_t rate_wanted = 0; 14854 14855 slot = pace_to_fill_cwnd(rack, slot, len, segsiz, NULL, &rate_wanted, 1); 14856 rack->rc_ack_can_sendout_data = 1; 14857 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, 0, 0, 14, __LINE__, NULL, 0); 14858 } else 14859 rack_log_pacing_delay_calc(rack, len, slot, tr_perms, reduce, 0, 7, __LINE__, NULL, 0); 14860 } else { 14861 uint64_t bw_est, res, lentim, rate_wanted; 14862 uint32_t orig_val, segs, oh; 14863 int capped = 0; 14864 int prev_fill; 14865 14866 if ((rack->r_rr_config == 1) && rsm) { 14867 return (rack->r_ctl.rc_min_to); 14868 } 14869 if (rack->use_fixed_rate) { 14870 rate_wanted = bw_est = rack_get_fixed_pacing_bw(rack); 14871 } else if ((rack->r_ctl.init_rate == 0) && 14872 #ifdef NETFLIX_PEAKRATE 14873 (rack->rc_tp->t_maxpeakrate == 0) && 14874 #endif 14875 (rack->r_ctl.gp_bw == 0)) { 14876 /* no way to yet do an estimate */ 14877 bw_est = rate_wanted = 0; 14878 } else { 14879 bw_est = rack_get_bw(rack); 14880 rate_wanted = rack_get_output_bw(rack, bw_est, rsm, &capped); 14881 } 14882 if ((bw_est == 0) || (rate_wanted == 0) || 14883 ((rack->gp_ready == 0) && (rack->use_fixed_rate == 0))) { 14884 /* 14885 * No way yet to make a b/w estimate or 14886 * our raise is set incorrectly. 14887 */ 14888 goto old_method; 14889 } 14890 /* We need to account for all the overheads */ 14891 segs = (len + segsiz - 1) / segsiz; 14892 /* 14893 * We need the diff between 1514 bytes (e-mtu with e-hdr) 14894 * and how much data we put in each packet. Yes this 14895 * means we may be off if we are larger than 1500 bytes 14896 * or smaller. But this just makes us more conservative. 14897 */ 14898 if (rack_hw_rate_min && 14899 (bw_est < rack_hw_rate_min)) 14900 can_start_hw_pacing = 0; 14901 if (ETHERNET_SEGMENT_SIZE > segsiz) 14902 oh = ETHERNET_SEGMENT_SIZE - segsiz; 14903 else 14904 oh = 0; 14905 segs *= oh; 14906 lentim = (uint64_t)(len + segs) * (uint64_t)HPTS_USEC_IN_SEC; 14907 res = lentim / rate_wanted; 14908 slot = (uint32_t)res; 14909 orig_val = rack->r_ctl.rc_pace_max_segs; 14910 if (rack->r_ctl.crte == NULL) { 14911 /* 14912 * Only do this if we are not hardware pacing 14913 * since if we are doing hw-pacing below we will 14914 * set make a call after setting up or changing 14915 * the rate. 14916 */ 14917 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 14918 } else if (rack->rc_inp->inp_snd_tag == NULL) { 14919 /* 14920 * We lost our rate somehow, this can happen 14921 * if the interface changed underneath us. 14922 */ 14923 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 14924 rack->r_ctl.crte = NULL; 14925 /* Lets re-allow attempting to setup pacing */ 14926 rack->rack_hdrw_pacing = 0; 14927 rack->rack_attempt_hdwr_pace = 0; 14928 rack_log_hdwr_pacing(rack, 14929 rate_wanted, bw_est, __LINE__, 14930 0, 6); 14931 } 14932 /* Did we change the TSO size, if so log it */ 14933 if (rack->r_ctl.rc_pace_max_segs != orig_val) 14934 rack_log_pacing_delay_calc(rack, len, slot, orig_val, 0, 0, 15, __LINE__, NULL, 0); 14935 prev_fill = rack->r_via_fill_cw; 14936 if ((rack->rc_pace_to_cwnd) && 14937 (capped == 0) && 14938 (rack->use_fixed_rate == 0) && 14939 (rack->in_probe_rtt == 0) && 14940 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { 14941 /* 14942 * We want to pace at our rate *or* faster to 14943 * fill the cwnd to the max if its not full. 14944 */ 14945 slot = pace_to_fill_cwnd(rack, slot, (len+segs), segsiz, &capped, &rate_wanted, 0); 14946 } 14947 if ((rack->rc_inp->inp_route.ro_nh != NULL) && 14948 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { 14949 if ((rack->rack_hdw_pace_ena) && 14950 (can_start_hw_pacing > 0) && 14951 (rack->rack_hdrw_pacing == 0) && 14952 (rack->rack_attempt_hdwr_pace == 0)) { 14953 /* 14954 * Lets attempt to turn on hardware pacing 14955 * if we can. 14956 */ 14957 rack->rack_attempt_hdwr_pace = 1; 14958 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, 14959 rack->rc_inp->inp_route.ro_nh->nh_ifp, 14960 rate_wanted, 14961 RS_PACING_GEQ, 14962 &err, &rack->r_ctl.crte_prev_rate); 14963 if (rack->r_ctl.crte) { 14964 rack->rack_hdrw_pacing = 1; 14965 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, segsiz, 14966 0, rack->r_ctl.crte, 14967 NULL); 14968 rack_log_hdwr_pacing(rack, 14969 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 14970 err, 0); 14971 rack->r_ctl.last_hw_bw_req = rate_wanted; 14972 } else { 14973 counter_u64_add(rack_hw_pace_init_fail, 1); 14974 } 14975 } else if (rack->rack_hdrw_pacing && 14976 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { 14977 /* Do we need to adjust our rate? */ 14978 const struct tcp_hwrate_limit_table *nrte; 14979 14980 if (rack->r_up_only && 14981 (rate_wanted < rack->r_ctl.crte->rate)) { 14982 /** 14983 * We have four possible states here 14984 * having to do with the previous time 14985 * and this time. 14986 * previous | this-time 14987 * A) 0 | 0 -- fill_cw not in the picture 14988 * B) 1 | 0 -- we were doing a fill-cw but now are not 14989 * C) 1 | 1 -- all rates from fill_cw 14990 * D) 0 | 1 -- we were doing non-fill and now we are filling 14991 * 14992 * For case A, C and D we don't allow a drop. But for 14993 * case B where we now our on our steady rate we do 14994 * allow a drop. 14995 * 14996 */ 14997 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) 14998 goto done_w_hdwr; 14999 } 15000 if ((rate_wanted > rack->r_ctl.crte->rate) || 15001 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { 15002 if (rack_hw_rate_to_low && 15003 (bw_est < rack_hw_rate_to_low)) { 15004 /* 15005 * The pacing rate is too low for hardware, but 15006 * do allow hardware pacing to be restarted. 15007 */ 15008 rack_log_hdwr_pacing(rack, 15009 bw_est, rack->r_ctl.crte->rate, __LINE__, 15010 0, 5); 15011 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); 15012 rack->r_ctl.crte = NULL; 15013 rack->rack_attempt_hdwr_pace = 0; 15014 rack->rack_hdrw_pacing = 0; 15015 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15016 goto done_w_hdwr; 15017 } 15018 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, 15019 rack->rc_tp, 15020 rack->rc_inp->inp_route.ro_nh->nh_ifp, 15021 rate_wanted, 15022 RS_PACING_GEQ, 15023 &err, &rack->r_ctl.crte_prev_rate); 15024 if (nrte == NULL) { 15025 /* Lost the rate */ 15026 rack->rack_hdrw_pacing = 0; 15027 rack->r_ctl.crte = NULL; 15028 rack_log_hdwr_pacing(rack, 15029 rate_wanted, 0, __LINE__, 15030 err, 1); 15031 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15032 counter_u64_add(rack_hw_pace_lost, 1); 15033 } else if (nrte != rack->r_ctl.crte) { 15034 rack->r_ctl.crte = nrte; 15035 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size(tp, rate_wanted, 15036 segsiz, 0, 15037 rack->r_ctl.crte, 15038 NULL); 15039 rack_log_hdwr_pacing(rack, 15040 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15041 err, 2); 15042 rack->r_ctl.last_hw_bw_req = rate_wanted; 15043 } 15044 } else { 15045 /* We just need to adjust the segment size */ 15046 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); 15047 rack_log_hdwr_pacing(rack, 15048 rate_wanted, rack->r_ctl.crte->rate, __LINE__, 15049 0, 4); 15050 rack->r_ctl.last_hw_bw_req = rate_wanted; 15051 } 15052 } 15053 } 15054 if ((rack->r_ctl.crte != NULL) && 15055 (rack->r_ctl.crte->rate == rate_wanted)) { 15056 /* 15057 * We need to add a extra if the rates 15058 * are exactly matched. The idea is 15059 * we want the software to make sure the 15060 * queue is empty before adding more, this 15061 * gives us N MSS extra pace times where 15062 * N is our sysctl 15063 */ 15064 slot += (rack->r_ctl.crte->time_between * rack_hw_pace_extra_slots); 15065 } 15066 done_w_hdwr: 15067 if (rack_limit_time_with_srtt && 15068 (rack->use_fixed_rate == 0) && 15069 #ifdef NETFLIX_PEAKRATE 15070 (rack->rc_tp->t_maxpeakrate == 0) && 15071 #endif 15072 (rack->rack_hdrw_pacing == 0)) { 15073 /* 15074 * Sanity check, we do not allow the pacing delay 15075 * to be longer than the SRTT of the path. If it is 15076 * a slow path, then adding a packet should increase 15077 * the RTT and compensate for this i.e. the srtt will 15078 * be greater so the allowed pacing time will be greater. 15079 * 15080 * Note this restriction is not for where a peak rate 15081 * is set, we are doing fixed pacing or hardware pacing. 15082 */ 15083 if (rack->rc_tp->t_srtt) 15084 srtt = rack->rc_tp->t_srtt; 15085 else 15086 srtt = RACK_INITIAL_RTO * HPTS_USEC_IN_MSEC; /* its in ms convert */ 15087 if (srtt < (uint64_t)slot) { 15088 rack_log_pacing_delay_calc(rack, srtt, slot, rate_wanted, bw_est, lentim, 99, __LINE__, NULL, 0); 15089 slot = srtt; 15090 } 15091 } 15092 rack_log_pacing_delay_calc(rack, len, slot, rate_wanted, bw_est, lentim, 2, __LINE__, rsm, 0); 15093 } 15094 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { 15095 /* 15096 * If this rate is seeing enobufs when it 15097 * goes to send then either the nic is out 15098 * of gas or we are mis-estimating the time 15099 * somehow and not letting the queue empty 15100 * completely. Lets add to the pacing time. 15101 */ 15102 int hw_boost_delay; 15103 15104 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; 15105 if (hw_boost_delay > rack_enobuf_hw_max) 15106 hw_boost_delay = rack_enobuf_hw_max; 15107 else if (hw_boost_delay < rack_enobuf_hw_min) 15108 hw_boost_delay = rack_enobuf_hw_min; 15109 slot += hw_boost_delay; 15110 } 15111 return (slot); 15112 } 15113 15114 static void 15115 rack_start_gp_measurement(struct tcpcb *tp, struct tcp_rack *rack, 15116 tcp_seq startseq, uint32_t sb_offset) 15117 { 15118 struct rack_sendmap *my_rsm = NULL; 15119 struct rack_sendmap fe; 15120 15121 if (tp->t_state < TCPS_ESTABLISHED) { 15122 /* 15123 * We don't start any measurements if we are 15124 * not at least established. 15125 */ 15126 return; 15127 } 15128 if (tp->t_state >= TCPS_FIN_WAIT_1) { 15129 /* 15130 * We will get no more data into the SB 15131 * this means we need to have the data available 15132 * before we start a measurement. 15133 */ 15134 15135 if (sbavail(&tptosocket(tp)->so_snd) < 15136 max(rc_init_window(rack), 15137 (MIN_GP_WIN * ctf_fixed_maxseg(tp)))) { 15138 /* Nope not enough data */ 15139 return; 15140 } 15141 } 15142 tp->t_flags |= TF_GPUTINPROG; 15143 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; 15144 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; 15145 tp->gput_seq = startseq; 15146 rack->app_limited_needs_set = 0; 15147 if (rack->in_probe_rtt) 15148 rack->measure_saw_probe_rtt = 1; 15149 else if ((rack->measure_saw_probe_rtt) && 15150 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) 15151 rack->measure_saw_probe_rtt = 0; 15152 if (rack->rc_gp_filled) 15153 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); 15154 else { 15155 /* Special case initial measurement */ 15156 struct timeval tv; 15157 15158 tp->gput_ts = tcp_get_usecs(&tv); 15159 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15160 } 15161 /* 15162 * We take a guess out into the future, 15163 * if we have no measurement and no 15164 * initial rate, we measure the first 15165 * initial-windows worth of data to 15166 * speed up getting some GP measurement and 15167 * thus start pacing. 15168 */ 15169 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { 15170 rack->app_limited_needs_set = 1; 15171 tp->gput_ack = startseq + max(rc_init_window(rack), 15172 (MIN_GP_WIN * ctf_fixed_maxseg(tp))); 15173 rack_log_pacing_delay_calc(rack, 15174 tp->gput_seq, 15175 tp->gput_ack, 15176 0, 15177 tp->gput_ts, 15178 rack->r_ctl.rc_app_limited_cnt, 15179 9, 15180 __LINE__, NULL, 0); 15181 return; 15182 } 15183 if (sb_offset) { 15184 /* 15185 * We are out somewhere in the sb 15186 * can we use the already outstanding data? 15187 */ 15188 if (rack->r_ctl.rc_app_limited_cnt == 0) { 15189 /* 15190 * Yes first one is good and in this case 15191 * the tp->gput_ts is correctly set based on 15192 * the last ack that arrived (no need to 15193 * set things up when an ack comes in). 15194 */ 15195 my_rsm = RB_MIN(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 15196 if ((my_rsm == NULL) || 15197 (my_rsm->r_rtr_cnt != 1)) { 15198 /* retransmission? */ 15199 goto use_latest; 15200 } 15201 } else { 15202 if (rack->r_ctl.rc_first_appl == NULL) { 15203 /* 15204 * If rc_first_appl is NULL 15205 * then the cnt should be 0. 15206 * This is probably an error, maybe 15207 * a KASSERT would be approprate. 15208 */ 15209 goto use_latest; 15210 } 15211 /* 15212 * If we have a marker pointer to the last one that is 15213 * app limited we can use that, but we need to set 15214 * things up so that when it gets ack'ed we record 15215 * the ack time (if its not already acked). 15216 */ 15217 rack->app_limited_needs_set = 1; 15218 /* 15219 * We want to get to the rsm that is either 15220 * next with space i.e. over 1 MSS or the one 15221 * after that (after the app-limited). 15222 */ 15223 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15224 rack->r_ctl.rc_first_appl); 15225 if (my_rsm) { 15226 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) 15227 /* Have to use the next one */ 15228 my_rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, 15229 my_rsm); 15230 else { 15231 /* Use after the first MSS of it is acked */ 15232 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); 15233 goto start_set; 15234 } 15235 } 15236 if ((my_rsm == NULL) || 15237 (my_rsm->r_rtr_cnt != 1)) { 15238 /* 15239 * Either its a retransmit or 15240 * the last is the app-limited one. 15241 */ 15242 goto use_latest; 15243 } 15244 } 15245 tp->gput_seq = my_rsm->r_start; 15246 start_set: 15247 if (my_rsm->r_flags & RACK_ACKED) { 15248 /* 15249 * This one has been acked use the arrival ack time 15250 */ 15251 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15252 rack->app_limited_needs_set = 0; 15253 } 15254 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15255 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); 15256 rack_log_pacing_delay_calc(rack, 15257 tp->gput_seq, 15258 tp->gput_ack, 15259 (uint64_t)my_rsm, 15260 tp->gput_ts, 15261 rack->r_ctl.rc_app_limited_cnt, 15262 9, 15263 __LINE__, NULL, 0); 15264 return; 15265 } 15266 15267 use_latest: 15268 /* 15269 * We don't know how long we may have been 15270 * idle or if this is the first-send. Lets 15271 * setup the flag so we will trim off 15272 * the first ack'd data so we get a true 15273 * measurement. 15274 */ 15275 rack->app_limited_needs_set = 1; 15276 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); 15277 /* Find this guy so we can pull the send time */ 15278 fe.r_start = startseq; 15279 my_rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 15280 if (my_rsm) { 15281 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[(my_rsm->r_rtr_cnt-1)]; 15282 if (my_rsm->r_flags & RACK_ACKED) { 15283 /* 15284 * Unlikely since its probably what was 15285 * just transmitted (but I am paranoid). 15286 */ 15287 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; 15288 rack->app_limited_needs_set = 0; 15289 } 15290 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { 15291 /* This also is unlikely */ 15292 tp->gput_seq = my_rsm->r_start; 15293 } 15294 } else { 15295 /* 15296 * TSNH unless we have some send-map limit, 15297 * and even at that it should not be hitting 15298 * that limit (we should have stopped sending). 15299 */ 15300 struct timeval tv; 15301 15302 microuptime(&tv); 15303 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); 15304 } 15305 rack_log_pacing_delay_calc(rack, 15306 tp->gput_seq, 15307 tp->gput_ack, 15308 (uint64_t)my_rsm, 15309 tp->gput_ts, 15310 rack->r_ctl.rc_app_limited_cnt, 15311 9, __LINE__, NULL, 0); 15312 } 15313 15314 static inline uint32_t 15315 rack_what_can_we_send(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cwnd_to_use, 15316 uint32_t avail, int32_t sb_offset) 15317 { 15318 uint32_t len; 15319 uint32_t sendwin; 15320 15321 if (tp->snd_wnd > cwnd_to_use) 15322 sendwin = cwnd_to_use; 15323 else 15324 sendwin = tp->snd_wnd; 15325 if (ctf_outstanding(tp) >= tp->snd_wnd) { 15326 /* We never want to go over our peers rcv-window */ 15327 len = 0; 15328 } else { 15329 uint32_t flight; 15330 15331 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); 15332 if (flight >= sendwin) { 15333 /* 15334 * We have in flight what we are allowed by cwnd (if 15335 * it was rwnd blocking it would have hit above out 15336 * >= tp->snd_wnd). 15337 */ 15338 return (0); 15339 } 15340 len = sendwin - flight; 15341 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 15342 /* We would send too much (beyond the rwnd) */ 15343 len = tp->snd_wnd - ctf_outstanding(tp); 15344 } 15345 if ((len + sb_offset) > avail) { 15346 /* 15347 * We don't have that much in the SB, how much is 15348 * there? 15349 */ 15350 len = avail - sb_offset; 15351 } 15352 } 15353 return (len); 15354 } 15355 15356 static void 15357 rack_log_fsb(struct tcp_rack *rack, struct tcpcb *tp, struct socket *so, uint32_t flags, 15358 unsigned ipoptlen, int32_t orig_len, int32_t len, int error, 15359 int rsm_is_null, int optlen, int line, uint16_t mode) 15360 { 15361 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15362 union tcp_log_stackspecific log; 15363 struct timeval tv; 15364 15365 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15366 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15367 log.u_bbr.flex1 = error; 15368 log.u_bbr.flex2 = flags; 15369 log.u_bbr.flex3 = rsm_is_null; 15370 log.u_bbr.flex4 = ipoptlen; 15371 log.u_bbr.flex5 = tp->rcv_numsacks; 15372 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15373 log.u_bbr.flex7 = optlen; 15374 log.u_bbr.flex8 = rack->r_fsb_inited; 15375 log.u_bbr.applimited = rack->r_fast_output; 15376 log.u_bbr.bw_inuse = rack_get_bw(rack); 15377 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15378 log.u_bbr.cwnd_gain = mode; 15379 log.u_bbr.pkts_out = orig_len; 15380 log.u_bbr.lt_epoch = len; 15381 log.u_bbr.delivered = line; 15382 log.u_bbr.timeStamp = tcp_get_usecs(&tv); 15383 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15384 tcp_log_event_(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, 15385 len, &log, false, NULL, NULL, 0, &tv); 15386 } 15387 } 15388 15389 15390 static struct mbuf * 15391 rack_fo_base_copym(struct mbuf *the_m, uint32_t the_off, int32_t *plen, 15392 struct rack_fast_send_blk *fsb, 15393 int32_t seglimit, int32_t segsize, int hw_tls) 15394 { 15395 #ifdef KERN_TLS 15396 struct ktls_session *tls, *ntls; 15397 #ifdef INVARIANTS 15398 struct mbuf *start; 15399 #endif 15400 #endif 15401 struct mbuf *m, *n, **np, *smb; 15402 struct mbuf *top; 15403 int32_t off, soff; 15404 int32_t len = *plen; 15405 int32_t fragsize; 15406 int32_t len_cp = 0; 15407 uint32_t mlen, frags; 15408 15409 soff = off = the_off; 15410 smb = m = the_m; 15411 np = ⊤ 15412 top = NULL; 15413 #ifdef KERN_TLS 15414 if (hw_tls && (m->m_flags & M_EXTPG)) 15415 tls = m->m_epg_tls; 15416 else 15417 tls = NULL; 15418 #ifdef INVARIANTS 15419 start = m; 15420 #endif 15421 #endif 15422 while (len > 0) { 15423 if (m == NULL) { 15424 *plen = len_cp; 15425 break; 15426 } 15427 #ifdef KERN_TLS 15428 if (hw_tls) { 15429 if (m->m_flags & M_EXTPG) 15430 ntls = m->m_epg_tls; 15431 else 15432 ntls = NULL; 15433 15434 /* 15435 * Avoid mixing TLS records with handshake 15436 * data or TLS records from different 15437 * sessions. 15438 */ 15439 if (tls != ntls) { 15440 MPASS(m != start); 15441 *plen = len_cp; 15442 break; 15443 } 15444 } 15445 #endif 15446 mlen = min(len, m->m_len - off); 15447 if (seglimit) { 15448 /* 15449 * For M_EXTPG mbufs, add 3 segments 15450 * + 1 in case we are crossing page boundaries 15451 * + 2 in case the TLS hdr/trailer are used 15452 * It is cheaper to just add the segments 15453 * than it is to take the cache miss to look 15454 * at the mbuf ext_pgs state in detail. 15455 */ 15456 if (m->m_flags & M_EXTPG) { 15457 fragsize = min(segsize, PAGE_SIZE); 15458 frags = 3; 15459 } else { 15460 fragsize = segsize; 15461 frags = 0; 15462 } 15463 15464 /* Break if we really can't fit anymore. */ 15465 if ((frags + 1) >= seglimit) { 15466 *plen = len_cp; 15467 break; 15468 } 15469 15470 /* 15471 * Reduce size if you can't copy the whole 15472 * mbuf. If we can't copy the whole mbuf, also 15473 * adjust len so the loop will end after this 15474 * mbuf. 15475 */ 15476 if ((frags + howmany(mlen, fragsize)) >= seglimit) { 15477 mlen = (seglimit - frags - 1) * fragsize; 15478 len = mlen; 15479 *plen = len_cp + len; 15480 } 15481 frags += howmany(mlen, fragsize); 15482 if (frags == 0) 15483 frags++; 15484 seglimit -= frags; 15485 KASSERT(seglimit > 0, 15486 ("%s: seglimit went too low", __func__)); 15487 } 15488 n = m_get(M_NOWAIT, m->m_type); 15489 *np = n; 15490 if (n == NULL) 15491 goto nospace; 15492 n->m_len = mlen; 15493 soff += mlen; 15494 len_cp += n->m_len; 15495 if (m->m_flags & (M_EXT|M_EXTPG)) { 15496 n->m_data = m->m_data + off; 15497 mb_dupcl(n, m); 15498 } else { 15499 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 15500 (u_int)n->m_len); 15501 } 15502 len -= n->m_len; 15503 off = 0; 15504 m = m->m_next; 15505 np = &n->m_next; 15506 if (len || (soff == smb->m_len)) { 15507 /* 15508 * We have more so we move forward or 15509 * we have consumed the entire mbuf and 15510 * len has fell to 0. 15511 */ 15512 soff = 0; 15513 smb = m; 15514 } 15515 15516 } 15517 if (fsb != NULL) { 15518 fsb->m = smb; 15519 fsb->off = soff; 15520 if (smb) { 15521 /* 15522 * Save off the size of the mbuf. We do 15523 * this so that we can recognize when it 15524 * has been trimmed by sbcut() as acks 15525 * come in. 15526 */ 15527 fsb->o_m_len = smb->m_len; 15528 } else { 15529 /* 15530 * This is the case where the next mbuf went to NULL. This 15531 * means with this copy we have sent everything in the sb. 15532 * In theory we could clear the fast_output flag, but lets 15533 * not since its possible that we could get more added 15534 * and acks that call the extend function which would let 15535 * us send more. 15536 */ 15537 fsb->o_m_len = 0; 15538 } 15539 } 15540 return (top); 15541 nospace: 15542 if (top) 15543 m_freem(top); 15544 return (NULL); 15545 15546 } 15547 15548 /* 15549 * This is a copy of m_copym(), taking the TSO segment size/limit 15550 * constraints into account, and advancing the sndptr as it goes. 15551 */ 15552 static struct mbuf * 15553 rack_fo_m_copym(struct tcp_rack *rack, int32_t *plen, 15554 int32_t seglimit, int32_t segsize, struct mbuf **s_mb, int *s_soff) 15555 { 15556 struct mbuf *m, *n; 15557 int32_t soff; 15558 15559 soff = rack->r_ctl.fsb.off; 15560 m = rack->r_ctl.fsb.m; 15561 if (rack->r_ctl.fsb.o_m_len > m->m_len) { 15562 /* 15563 * The mbuf had the front of it chopped off by an ack 15564 * we need to adjust the soff/off by that difference. 15565 */ 15566 uint32_t delta; 15567 15568 delta = rack->r_ctl.fsb.o_m_len - m->m_len; 15569 soff -= delta; 15570 } else if (rack->r_ctl.fsb.o_m_len < m->m_len) { 15571 /* 15572 * The mbuf was expanded probably by 15573 * a m_compress. Just update o_m_len. 15574 */ 15575 rack->r_ctl.fsb.o_m_len = m->m_len; 15576 } 15577 KASSERT(soff >= 0, ("%s, negative off %d", __FUNCTION__, soff)); 15578 KASSERT(*plen >= 0, ("%s, negative len %d", __FUNCTION__, *plen)); 15579 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", 15580 __FUNCTION__, 15581 rack, *plen, m, m->m_len)); 15582 /* Save off the right location before we copy and advance */ 15583 *s_soff = soff; 15584 *s_mb = rack->r_ctl.fsb.m; 15585 n = rack_fo_base_copym(m, soff, plen, 15586 &rack->r_ctl.fsb, 15587 seglimit, segsize, rack->r_ctl.fsb.hw_tls); 15588 return (n); 15589 } 15590 15591 static int 15592 rack_fast_rsm_output(struct tcpcb *tp, struct tcp_rack *rack, struct rack_sendmap *rsm, 15593 uint64_t ts_val, uint32_t cts, uint32_t ms_cts, struct timeval *tv, int len, uint8_t doing_tlp) 15594 { 15595 /* 15596 * Enter the fast retransmit path. We are given that a sched_pin is 15597 * in place (if accounting is compliled in) and the cycle count taken 15598 * at the entry is in the ts_val. The concept her is that the rsm 15599 * now holds the mbuf offsets and such so we can directly transmit 15600 * without a lot of overhead, the len field is already set for 15601 * us to prohibit us from sending too much (usually its 1MSS). 15602 */ 15603 struct ip *ip = NULL; 15604 struct udphdr *udp = NULL; 15605 struct tcphdr *th = NULL; 15606 struct mbuf *m = NULL; 15607 struct inpcb *inp; 15608 uint8_t *cpto; 15609 struct tcp_log_buffer *lgb; 15610 #ifdef TCP_ACCOUNTING 15611 uint64_t crtsc; 15612 int cnt_thru = 1; 15613 #endif 15614 struct tcpopt to; 15615 u_char opt[TCP_MAXOLEN]; 15616 uint32_t hdrlen, optlen; 15617 int32_t slot, segsiz, max_val, tso = 0, error = 0, ulen = 0; 15618 uint16_t flags; 15619 uint32_t if_hw_tsomaxsegcount = 0, startseq; 15620 uint32_t if_hw_tsomaxsegsize; 15621 15622 #ifdef INET6 15623 struct ip6_hdr *ip6 = NULL; 15624 15625 if (rack->r_is_v6) { 15626 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 15627 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 15628 } else 15629 #endif /* INET6 */ 15630 { 15631 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 15632 hdrlen = sizeof(struct tcpiphdr); 15633 } 15634 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 15635 goto failed; 15636 } 15637 if (doing_tlp) { 15638 /* Its a TLP add the flag, it may already be there but be sure */ 15639 rsm->r_flags |= RACK_TLP; 15640 } else { 15641 /* If it was a TLP it is not not on this retransmit */ 15642 rsm->r_flags &= ~RACK_TLP; 15643 } 15644 startseq = rsm->r_start; 15645 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 15646 inp = rack->rc_inp; 15647 to.to_flags = 0; 15648 flags = tcp_outflags[tp->t_state]; 15649 if (flags & (TH_SYN|TH_RST)) { 15650 goto failed; 15651 } 15652 if (rsm->r_flags & RACK_HAS_FIN) { 15653 /* We can't send a FIN here */ 15654 goto failed; 15655 } 15656 if (flags & TH_FIN) { 15657 /* We never send a FIN */ 15658 flags &= ~TH_FIN; 15659 } 15660 if (tp->t_flags & TF_RCVD_TSTMP) { 15661 to.to_tsval = ms_cts + tp->ts_offset; 15662 to.to_tsecr = tp->ts_recent; 15663 to.to_flags = TOF_TS; 15664 } 15665 optlen = tcp_addoptions(&to, opt); 15666 hdrlen += optlen; 15667 udp = rack->r_ctl.fsb.udp; 15668 if (udp) 15669 hdrlen += sizeof(struct udphdr); 15670 if (rack->r_ctl.rc_pace_max_segs) 15671 max_val = rack->r_ctl.rc_pace_max_segs; 15672 else if (rack->rc_user_set_max_segs) 15673 max_val = rack->rc_user_set_max_segs * segsiz; 15674 else 15675 max_val = len; 15676 if ((tp->t_flags & TF_TSO) && 15677 V_tcp_do_tso && 15678 (len > segsiz) && 15679 (tp->t_port == 0)) 15680 tso = 1; 15681 #ifdef INET6 15682 if (MHLEN < hdrlen + max_linkhdr) 15683 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 15684 else 15685 #endif 15686 m = m_gethdr(M_NOWAIT, MT_DATA); 15687 if (m == NULL) 15688 goto failed; 15689 m->m_data += max_linkhdr; 15690 m->m_len = hdrlen; 15691 th = rack->r_ctl.fsb.th; 15692 /* Establish the len to send */ 15693 if (len > max_val) 15694 len = max_val; 15695 if ((tso) && (len + optlen > tp->t_maxseg)) { 15696 uint32_t if_hw_tsomax; 15697 int32_t max_len; 15698 15699 /* extract TSO information */ 15700 if_hw_tsomax = tp->t_tsomax; 15701 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 15702 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 15703 /* 15704 * Check if we should limit by maximum payload 15705 * length: 15706 */ 15707 if (if_hw_tsomax != 0) { 15708 /* compute maximum TSO length */ 15709 max_len = (if_hw_tsomax - hdrlen - 15710 max_linkhdr); 15711 if (max_len <= 0) { 15712 goto failed; 15713 } else if (len > max_len) { 15714 len = max_len; 15715 } 15716 } 15717 if (len <= segsiz) { 15718 /* 15719 * In case there are too many small fragments don't 15720 * use TSO: 15721 */ 15722 tso = 0; 15723 } 15724 } else { 15725 tso = 0; 15726 } 15727 if ((tso == 0) && (len > segsiz)) 15728 len = segsiz; 15729 if ((len == 0) || 15730 (len <= MHLEN - hdrlen - max_linkhdr)) { 15731 goto failed; 15732 } 15733 th->th_seq = htonl(rsm->r_start); 15734 th->th_ack = htonl(tp->rcv_nxt); 15735 /* 15736 * The PUSH bit should only be applied 15737 * if the full retransmission is made. If 15738 * we are sending less than this is the 15739 * left hand edge and should not have 15740 * the PUSH bit. 15741 */ 15742 if ((rsm->r_flags & RACK_HAD_PUSH) && 15743 (len == (rsm->r_end - rsm->r_start))) 15744 flags |= TH_PUSH; 15745 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 15746 if (th->th_win == 0) { 15747 tp->t_sndzerowin++; 15748 tp->t_flags |= TF_RXWIN0SENT; 15749 } else 15750 tp->t_flags &= ~TF_RXWIN0SENT; 15751 if (rsm->r_flags & RACK_TLP) { 15752 /* 15753 * TLP should not count in retran count, but 15754 * in its own bin 15755 */ 15756 counter_u64_add(rack_tlp_retran, 1); 15757 counter_u64_add(rack_tlp_retran_bytes, len); 15758 } else { 15759 tp->t_sndrexmitpack++; 15760 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 15761 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 15762 } 15763 #ifdef STATS 15764 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 15765 len); 15766 #endif 15767 if (rsm->m == NULL) 15768 goto failed; 15769 if (rsm->orig_m_len != rsm->m->m_len) { 15770 /* Fix up the orig_m_len and possibly the mbuf offset */ 15771 rack_adjust_orig_mlen(rsm); 15772 } 15773 m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, rsm->r_hw_tls); 15774 if (len <= segsiz) { 15775 /* 15776 * Must have ran out of mbufs for the copy 15777 * shorten it to no longer need tso. Lets 15778 * not put on sendalot since we are low on 15779 * mbufs. 15780 */ 15781 tso = 0; 15782 } 15783 if ((m->m_next == NULL) || (len <= 0)){ 15784 goto failed; 15785 } 15786 if (udp) { 15787 if (rack->r_is_v6) 15788 ulen = hdrlen + len - sizeof(struct ip6_hdr); 15789 else 15790 ulen = hdrlen + len - sizeof(struct ip); 15791 udp->uh_ulen = htons(ulen); 15792 } 15793 m->m_pkthdr.rcvif = (struct ifnet *)0; 15794 if (TCPS_HAVERCVDSYN(tp->t_state) && 15795 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 15796 int ect = tcp_ecn_output_established(tp, &flags, len, true); 15797 if ((tp->t_state == TCPS_SYN_RECEIVED) && 15798 (tp->t_flags2 & TF2_ECN_SND_ECE)) 15799 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 15800 #ifdef INET6 15801 if (rack->r_is_v6) { 15802 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 15803 ip6->ip6_flow |= htonl(ect << 20); 15804 } 15805 else 15806 #endif 15807 { 15808 ip->ip_tos &= ~IPTOS_ECN_MASK; 15809 ip->ip_tos |= ect; 15810 } 15811 } 15812 tcp_set_flags(th, flags); 15813 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 15814 #ifdef INET6 15815 if (rack->r_is_v6) { 15816 if (tp->t_port) { 15817 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 15818 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15819 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 15820 th->th_sum = htons(0); 15821 UDPSTAT_INC(udps_opackets); 15822 } else { 15823 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 15824 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15825 th->th_sum = in6_cksum_pseudo(ip6, 15826 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 15827 0); 15828 } 15829 } 15830 #endif 15831 #if defined(INET6) && defined(INET) 15832 else 15833 #endif 15834 #ifdef INET 15835 { 15836 if (tp->t_port) { 15837 m->m_pkthdr.csum_flags = CSUM_UDP; 15838 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 15839 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 15840 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 15841 th->th_sum = htons(0); 15842 UDPSTAT_INC(udps_opackets); 15843 } else { 15844 m->m_pkthdr.csum_flags = CSUM_TCP; 15845 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 15846 th->th_sum = in_pseudo(ip->ip_src.s_addr, 15847 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 15848 IPPROTO_TCP + len + optlen)); 15849 } 15850 /* IP version must be set here for ipv4/ipv6 checking later */ 15851 KASSERT(ip->ip_v == IPVERSION, 15852 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 15853 } 15854 #endif 15855 if (tso) { 15856 KASSERT(len > tp->t_maxseg - optlen, 15857 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 15858 m->m_pkthdr.csum_flags |= CSUM_TSO; 15859 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 15860 } 15861 #ifdef INET6 15862 if (rack->r_is_v6) { 15863 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 15864 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 15865 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 15866 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15867 else 15868 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15869 } 15870 #endif 15871 #if defined(INET) && defined(INET6) 15872 else 15873 #endif 15874 #ifdef INET 15875 { 15876 ip->ip_len = htons(m->m_pkthdr.len); 15877 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 15878 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 15879 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 15880 if (tp->t_port == 0 || len < V_tcp_minmss) { 15881 ip->ip_off |= htons(IP_DF); 15882 } 15883 } else { 15884 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 15885 } 15886 } 15887 #endif 15888 /* Time to copy in our header */ 15889 cpto = mtod(m, uint8_t *); 15890 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 15891 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 15892 if (optlen) { 15893 bcopy(opt, th + 1, optlen); 15894 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 15895 } else { 15896 th->th_off = sizeof(struct tcphdr) >> 2; 15897 } 15898 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 15899 union tcp_log_stackspecific log; 15900 15901 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 15902 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 15903 counter_u64_add(rack_collapsed_win_rxt, 1); 15904 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 15905 } 15906 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 15907 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 15908 if (rack->rack_no_prr) 15909 log.u_bbr.flex1 = 0; 15910 else 15911 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 15912 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 15913 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 15914 log.u_bbr.flex4 = max_val; 15915 log.u_bbr.flex5 = 0; 15916 /* Save off the early/late values */ 15917 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 15918 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 15919 log.u_bbr.bw_inuse = rack_get_bw(rack); 15920 if (doing_tlp == 0) 15921 log.u_bbr.flex8 = 1; 15922 else 15923 log.u_bbr.flex8 = 2; 15924 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 15925 log.u_bbr.flex7 = 55; 15926 log.u_bbr.pkts_out = tp->t_maxseg; 15927 log.u_bbr.timeStamp = cts; 15928 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 15929 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 15930 log.u_bbr.delivered = 0; 15931 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 15932 len, &log, false, NULL, NULL, 0, tv); 15933 } else 15934 lgb = NULL; 15935 #ifdef INET6 15936 if (rack->r_is_v6) { 15937 error = ip6_output(m, NULL, 15938 &inp->inp_route6, 15939 0, NULL, NULL, inp); 15940 } 15941 else 15942 #endif 15943 #ifdef INET 15944 { 15945 error = ip_output(m, NULL, 15946 &inp->inp_route, 15947 0, 0, inp); 15948 } 15949 #endif 15950 m = NULL; 15951 if (lgb) { 15952 lgb->tlb_errno = error; 15953 lgb = NULL; 15954 } 15955 if (error) { 15956 goto failed; 15957 } 15958 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), 15959 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls); 15960 if (doing_tlp && (rack->fast_rsm_hack == 0)) { 15961 rack->rc_tlp_in_progress = 1; 15962 rack->r_ctl.rc_tlp_cnt_out++; 15963 } 15964 if (error == 0) { 15965 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); 15966 if (doing_tlp) { 15967 rack->rc_last_sent_tlp_past_cumack = 0; 15968 rack->rc_last_sent_tlp_seq_valid = 1; 15969 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 15970 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 15971 } 15972 } 15973 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 15974 rack->forced_ack = 0; /* If we send something zap the FA flag */ 15975 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 15976 rack->r_ctl.retran_during_recovery += len; 15977 { 15978 int idx; 15979 15980 idx = (len / segsiz) + 3; 15981 if (idx >= TCP_MSS_ACCT_ATIMER) 15982 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 15983 else 15984 counter_u64_add(rack_out_size[idx], 1); 15985 } 15986 if (tp->t_rtttime == 0) { 15987 tp->t_rtttime = ticks; 15988 tp->t_rtseq = startseq; 15989 KMOD_TCPSTAT_INC(tcps_segstimed); 15990 } 15991 counter_u64_add(rack_fto_rsm_send, 1); 15992 if (error && (error == ENOBUFS)) { 15993 if (rack->r_ctl.crte != NULL) { 15994 rack_trace_point(rack, RACK_TP_HWENOBUF); 15995 } else 15996 rack_trace_point(rack, RACK_TP_ENOBUF); 15997 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 15998 if (rack->rc_enobuf < 0x7f) 15999 rack->rc_enobuf++; 16000 if (slot < (10 * HPTS_USEC_IN_MSEC)) 16001 slot = 10 * HPTS_USEC_IN_MSEC; 16002 } else 16003 slot = rack_get_pacing_delay(rack, tp, len, NULL, segsiz); 16004 if ((slot == 0) || 16005 (rack->rc_always_pace == 0) || 16006 (rack->r_rr_config == 1)) { 16007 /* 16008 * We have no pacing set or we 16009 * are using old-style rack or 16010 * we are overridden to use the old 1ms pacing. 16011 */ 16012 slot = rack->r_ctl.rc_min_to; 16013 } 16014 rack_start_hpts_timer(rack, tp, cts, slot, len, 0); 16015 #ifdef TCP_ACCOUNTING 16016 crtsc = get_cyclecount(); 16017 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16018 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16019 } 16020 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16021 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16022 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16023 } 16024 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16025 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16026 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); 16027 } 16028 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((len + segsiz - 1) / segsiz)); 16029 sched_unpin(); 16030 #endif 16031 return (0); 16032 failed: 16033 if (m) 16034 m_free(m); 16035 return (-1); 16036 } 16037 16038 static void 16039 rack_sndbuf_autoscale(struct tcp_rack *rack) 16040 { 16041 /* 16042 * Automatic sizing of send socket buffer. Often the send buffer 16043 * size is not optimally adjusted to the actual network conditions 16044 * at hand (delay bandwidth product). Setting the buffer size too 16045 * small limits throughput on links with high bandwidth and high 16046 * delay (eg. trans-continental/oceanic links). Setting the 16047 * buffer size too big consumes too much real kernel memory, 16048 * especially with many connections on busy servers. 16049 * 16050 * The criteria to step up the send buffer one notch are: 16051 * 1. receive window of remote host is larger than send buffer 16052 * (with a fudge factor of 5/4th); 16053 * 2. send buffer is filled to 7/8th with data (so we actually 16054 * have data to make use of it); 16055 * 3. send buffer fill has not hit maximal automatic size; 16056 * 4. our send window (slow start and cogestion controlled) is 16057 * larger than sent but unacknowledged data in send buffer. 16058 * 16059 * Note that the rack version moves things much faster since 16060 * we want to avoid hitting cache lines in the rack_fast_output() 16061 * path so this is called much less often and thus moves 16062 * the SB forward by a percentage. 16063 */ 16064 struct socket *so; 16065 struct tcpcb *tp; 16066 uint32_t sendwin, scaleup; 16067 16068 tp = rack->rc_tp; 16069 so = rack->rc_inp->inp_socket; 16070 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); 16071 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { 16072 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && 16073 sbused(&so->so_snd) >= 16074 (so->so_snd.sb_hiwat / 8 * 7) && 16075 sbused(&so->so_snd) < V_tcp_autosndbuf_max && 16076 sendwin >= (sbused(&so->so_snd) - 16077 (tp->snd_nxt - tp->snd_una))) { 16078 if (rack_autosndbuf_inc) 16079 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; 16080 else 16081 scaleup = V_tcp_autosndbuf_inc; 16082 if (scaleup < V_tcp_autosndbuf_inc) 16083 scaleup = V_tcp_autosndbuf_inc; 16084 scaleup += so->so_snd.sb_hiwat; 16085 if (scaleup > V_tcp_autosndbuf_max) 16086 scaleup = V_tcp_autosndbuf_max; 16087 if (!sbreserve_locked(so, SO_SND, scaleup, curthread)) 16088 so->so_snd.sb_flags &= ~SB_AUTOSIZE; 16089 } 16090 } 16091 } 16092 16093 static int 16094 rack_fast_output(struct tcpcb *tp, struct tcp_rack *rack, uint64_t ts_val, 16095 uint32_t cts, uint32_t ms_cts, struct timeval *tv, long tot_len, int *send_err) 16096 { 16097 /* 16098 * Enter to do fast output. We are given that the sched_pin is 16099 * in place (if accounting is compiled in) and the cycle count taken 16100 * at entry is in place in ts_val. The idea here is that 16101 * we know how many more bytes needs to be sent (presumably either 16102 * during pacing or to fill the cwnd and that was greater than 16103 * the max-burst). We have how much to send and all the info we 16104 * need to just send. 16105 */ 16106 #ifdef INET 16107 struct ip *ip = NULL; 16108 #endif 16109 struct udphdr *udp = NULL; 16110 struct tcphdr *th = NULL; 16111 struct mbuf *m, *s_mb; 16112 struct inpcb *inp; 16113 uint8_t *cpto; 16114 struct tcp_log_buffer *lgb; 16115 #ifdef TCP_ACCOUNTING 16116 uint64_t crtsc; 16117 #endif 16118 struct tcpopt to; 16119 u_char opt[TCP_MAXOLEN]; 16120 uint32_t hdrlen, optlen; 16121 #ifdef TCP_ACCOUNTING 16122 int cnt_thru = 1; 16123 #endif 16124 int32_t slot, segsiz, len, max_val, tso = 0, sb_offset, error, ulen = 0; 16125 uint16_t flags; 16126 uint32_t s_soff; 16127 uint32_t if_hw_tsomaxsegcount = 0, startseq; 16128 uint32_t if_hw_tsomaxsegsize; 16129 uint16_t add_flag = RACK_SENT_FP; 16130 #ifdef INET6 16131 struct ip6_hdr *ip6 = NULL; 16132 16133 if (rack->r_is_v6) { 16134 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 16135 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 16136 } else 16137 #endif /* INET6 */ 16138 { 16139 #ifdef INET 16140 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 16141 hdrlen = sizeof(struct tcpiphdr); 16142 #endif 16143 } 16144 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { 16145 m = NULL; 16146 goto failed; 16147 } 16148 startseq = tp->snd_max; 16149 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16150 inp = rack->rc_inp; 16151 len = rack->r_ctl.fsb.left_to_send; 16152 to.to_flags = 0; 16153 flags = rack->r_ctl.fsb.tcp_flags; 16154 if (tp->t_flags & TF_RCVD_TSTMP) { 16155 to.to_tsval = ms_cts + tp->ts_offset; 16156 to.to_tsecr = tp->ts_recent; 16157 to.to_flags = TOF_TS; 16158 } 16159 optlen = tcp_addoptions(&to, opt); 16160 hdrlen += optlen; 16161 udp = rack->r_ctl.fsb.udp; 16162 if (udp) 16163 hdrlen += sizeof(struct udphdr); 16164 if (rack->r_ctl.rc_pace_max_segs) 16165 max_val = rack->r_ctl.rc_pace_max_segs; 16166 else if (rack->rc_user_set_max_segs) 16167 max_val = rack->rc_user_set_max_segs * segsiz; 16168 else 16169 max_val = len; 16170 if ((tp->t_flags & TF_TSO) && 16171 V_tcp_do_tso && 16172 (len > segsiz) && 16173 (tp->t_port == 0)) 16174 tso = 1; 16175 again: 16176 #ifdef INET6 16177 if (MHLEN < hdrlen + max_linkhdr) 16178 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 16179 else 16180 #endif 16181 m = m_gethdr(M_NOWAIT, MT_DATA); 16182 if (m == NULL) 16183 goto failed; 16184 m->m_data += max_linkhdr; 16185 m->m_len = hdrlen; 16186 th = rack->r_ctl.fsb.th; 16187 /* Establish the len to send */ 16188 if (len > max_val) 16189 len = max_val; 16190 if ((tso) && (len + optlen > tp->t_maxseg)) { 16191 uint32_t if_hw_tsomax; 16192 int32_t max_len; 16193 16194 /* extract TSO information */ 16195 if_hw_tsomax = tp->t_tsomax; 16196 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 16197 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 16198 /* 16199 * Check if we should limit by maximum payload 16200 * length: 16201 */ 16202 if (if_hw_tsomax != 0) { 16203 /* compute maximum TSO length */ 16204 max_len = (if_hw_tsomax - hdrlen - 16205 max_linkhdr); 16206 if (max_len <= 0) { 16207 goto failed; 16208 } else if (len > max_len) { 16209 len = max_len; 16210 } 16211 } 16212 if (len <= segsiz) { 16213 /* 16214 * In case there are too many small fragments don't 16215 * use TSO: 16216 */ 16217 tso = 0; 16218 } 16219 } else { 16220 tso = 0; 16221 } 16222 if ((tso == 0) && (len > segsiz)) 16223 len = segsiz; 16224 if ((len == 0) || 16225 (len <= MHLEN - hdrlen - max_linkhdr)) { 16226 goto failed; 16227 } 16228 sb_offset = tp->snd_max - tp->snd_una; 16229 th->th_seq = htonl(tp->snd_max); 16230 th->th_ack = htonl(tp->rcv_nxt); 16231 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); 16232 if (th->th_win == 0) { 16233 tp->t_sndzerowin++; 16234 tp->t_flags |= TF_RXWIN0SENT; 16235 } else 16236 tp->t_flags &= ~TF_RXWIN0SENT; 16237 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 16238 KMOD_TCPSTAT_INC(tcps_sndpack); 16239 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 16240 #ifdef STATS 16241 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 16242 len); 16243 #endif 16244 if (rack->r_ctl.fsb.m == NULL) 16245 goto failed; 16246 16247 /* s_mb and s_soff are saved for rack_log_output */ 16248 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, 16249 &s_mb, &s_soff); 16250 if (len <= segsiz) { 16251 /* 16252 * Must have ran out of mbufs for the copy 16253 * shorten it to no longer need tso. Lets 16254 * not put on sendalot since we are low on 16255 * mbufs. 16256 */ 16257 tso = 0; 16258 } 16259 if (rack->r_ctl.fsb.rfo_apply_push && 16260 (len == rack->r_ctl.fsb.left_to_send)) { 16261 flags |= TH_PUSH; 16262 add_flag |= RACK_HAD_PUSH; 16263 } 16264 if ((m->m_next == NULL) || (len <= 0)){ 16265 goto failed; 16266 } 16267 if (udp) { 16268 if (rack->r_is_v6) 16269 ulen = hdrlen + len - sizeof(struct ip6_hdr); 16270 else 16271 ulen = hdrlen + len - sizeof(struct ip); 16272 udp->uh_ulen = htons(ulen); 16273 } 16274 m->m_pkthdr.rcvif = (struct ifnet *)0; 16275 if (TCPS_HAVERCVDSYN(tp->t_state) && 16276 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 16277 int ect = tcp_ecn_output_established(tp, &flags, len, false); 16278 if ((tp->t_state == TCPS_SYN_RECEIVED) && 16279 (tp->t_flags2 & TF2_ECN_SND_ECE)) 16280 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 16281 #ifdef INET6 16282 if (rack->r_is_v6) { 16283 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 16284 ip6->ip6_flow |= htonl(ect << 20); 16285 } 16286 else 16287 #endif 16288 { 16289 #ifdef INET 16290 ip->ip_tos &= ~IPTOS_ECN_MASK; 16291 ip->ip_tos |= ect; 16292 #endif 16293 } 16294 } 16295 tcp_set_flags(th, flags); 16296 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 16297 #ifdef INET6 16298 if (rack->r_is_v6) { 16299 if (tp->t_port) { 16300 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 16301 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16302 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 16303 th->th_sum = htons(0); 16304 UDPSTAT_INC(udps_opackets); 16305 } else { 16306 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 16307 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16308 th->th_sum = in6_cksum_pseudo(ip6, 16309 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 16310 0); 16311 } 16312 } 16313 #endif 16314 #if defined(INET6) && defined(INET) 16315 else 16316 #endif 16317 #ifdef INET 16318 { 16319 if (tp->t_port) { 16320 m->m_pkthdr.csum_flags = CSUM_UDP; 16321 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 16322 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 16323 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 16324 th->th_sum = htons(0); 16325 UDPSTAT_INC(udps_opackets); 16326 } else { 16327 m->m_pkthdr.csum_flags = CSUM_TCP; 16328 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 16329 th->th_sum = in_pseudo(ip->ip_src.s_addr, 16330 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 16331 IPPROTO_TCP + len + optlen)); 16332 } 16333 /* IP version must be set here for ipv4/ipv6 checking later */ 16334 KASSERT(ip->ip_v == IPVERSION, 16335 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 16336 } 16337 #endif 16338 if (tso) { 16339 KASSERT(len > tp->t_maxseg - optlen, 16340 ("%s: len <= tso_segsz tp:%p", __func__, tp)); 16341 m->m_pkthdr.csum_flags |= CSUM_TSO; 16342 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 16343 } 16344 #ifdef INET6 16345 if (rack->r_is_v6) { 16346 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; 16347 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 16348 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 16349 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16350 else 16351 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16352 } 16353 #endif 16354 #if defined(INET) && defined(INET6) 16355 else 16356 #endif 16357 #ifdef INET 16358 { 16359 ip->ip_len = htons(m->m_pkthdr.len); 16360 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; 16361 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 16362 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 16363 if (tp->t_port == 0 || len < V_tcp_minmss) { 16364 ip->ip_off |= htons(IP_DF); 16365 } 16366 } else { 16367 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 16368 } 16369 } 16370 #endif 16371 /* Time to copy in our header */ 16372 cpto = mtod(m, uint8_t *); 16373 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 16374 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 16375 if (optlen) { 16376 bcopy(opt, th + 1, optlen); 16377 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 16378 } else { 16379 th->th_off = sizeof(struct tcphdr) >> 2; 16380 } 16381 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 16382 union tcp_log_stackspecific log; 16383 16384 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 16385 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 16386 if (rack->rack_no_prr) 16387 log.u_bbr.flex1 = 0; 16388 else 16389 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 16390 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 16391 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 16392 log.u_bbr.flex4 = max_val; 16393 log.u_bbr.flex5 = 0; 16394 /* Save off the early/late values */ 16395 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 16396 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 16397 log.u_bbr.bw_inuse = rack_get_bw(rack); 16398 log.u_bbr.flex8 = 0; 16399 log.u_bbr.pacing_gain = rack_get_output_gain(rack, NULL); 16400 log.u_bbr.flex7 = 44; 16401 log.u_bbr.pkts_out = tp->t_maxseg; 16402 log.u_bbr.timeStamp = cts; 16403 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 16404 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; 16405 log.u_bbr.delivered = 0; 16406 lgb = tcp_log_event_(tp, th, NULL, NULL, TCP_LOG_OUT, ERRNO_UNK, 16407 len, &log, false, NULL, NULL, 0, tv); 16408 } else 16409 lgb = NULL; 16410 #ifdef INET6 16411 if (rack->r_is_v6) { 16412 error = ip6_output(m, NULL, 16413 &inp->inp_route6, 16414 0, NULL, NULL, inp); 16415 } 16416 #endif 16417 #if defined(INET) && defined(INET6) 16418 else 16419 #endif 16420 #ifdef INET 16421 { 16422 error = ip_output(m, NULL, 16423 &inp->inp_route, 16424 0, 0, inp); 16425 } 16426 #endif 16427 if (lgb) { 16428 lgb->tlb_errno = error; 16429 lgb = NULL; 16430 } 16431 if (error) { 16432 *send_err = error; 16433 m = NULL; 16434 goto failed; 16435 } 16436 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), 16437 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls); 16438 m = NULL; 16439 if (tp->snd_una == tp->snd_max) { 16440 rack->r_ctl.rc_tlp_rxt_last_time = cts; 16441 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 16442 tp->t_acktime = ticks; 16443 } 16444 if (error == 0) 16445 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); 16446 16447 rack->forced_ack = 0; /* If we send something zap the FA flag */ 16448 tot_len += len; 16449 if ((tp->t_flags & TF_GPUTINPROG) == 0) 16450 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); 16451 tp->snd_max += len; 16452 tp->snd_nxt = tp->snd_max; 16453 { 16454 int idx; 16455 16456 idx = (len / segsiz) + 3; 16457 if (idx >= TCP_MSS_ACCT_ATIMER) 16458 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 16459 else 16460 counter_u64_add(rack_out_size[idx], 1); 16461 } 16462 if (len <= rack->r_ctl.fsb.left_to_send) 16463 rack->r_ctl.fsb.left_to_send -= len; 16464 else 16465 rack->r_ctl.fsb.left_to_send = 0; 16466 if (rack->r_ctl.fsb.left_to_send < segsiz) { 16467 rack->r_fast_output = 0; 16468 rack->r_ctl.fsb.left_to_send = 0; 16469 /* At the end of fast_output scale up the sb */ 16470 SOCKBUF_LOCK(&rack->rc_inp->inp_socket->so_snd); 16471 rack_sndbuf_autoscale(rack); 16472 SOCKBUF_UNLOCK(&rack->rc_inp->inp_socket->so_snd); 16473 } 16474 if (tp->t_rtttime == 0) { 16475 tp->t_rtttime = ticks; 16476 tp->t_rtseq = startseq; 16477 KMOD_TCPSTAT_INC(tcps_segstimed); 16478 } 16479 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && 16480 (max_val > len) && 16481 (tso == 0)) { 16482 max_val -= len; 16483 len = segsiz; 16484 th = rack->r_ctl.fsb.th; 16485 #ifdef TCP_ACCOUNTING 16486 cnt_thru++; 16487 #endif 16488 goto again; 16489 } 16490 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 16491 counter_u64_add(rack_fto_send, 1); 16492 slot = rack_get_pacing_delay(rack, tp, tot_len, NULL, segsiz); 16493 rack_start_hpts_timer(rack, tp, cts, slot, tot_len, 0); 16494 #ifdef TCP_ACCOUNTING 16495 crtsc = get_cyclecount(); 16496 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16497 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; 16498 } 16499 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], cnt_thru); 16500 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16501 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 16502 } 16503 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 16504 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16505 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len + segsiz - 1) / segsiz); 16506 } 16507 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len + segsiz - 1) / segsiz)); 16508 sched_unpin(); 16509 #endif 16510 return (0); 16511 failed: 16512 if (m) 16513 m_free(m); 16514 rack->r_fast_output = 0; 16515 return (-1); 16516 } 16517 16518 static struct rack_sendmap * 16519 rack_check_collapsed(struct tcp_rack *rack, uint32_t cts) 16520 { 16521 struct rack_sendmap *rsm = NULL; 16522 struct rack_sendmap fe; 16523 int thresh; 16524 16525 restart: 16526 fe.r_start = rack->r_ctl.last_collapse_point; 16527 rsm = RB_FIND(rack_rb_tree_head, &rack->r_ctl.rc_mtree, &fe); 16528 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { 16529 /* Nothing, strange turn off validity */ 16530 rack->r_collapse_point_valid = 0; 16531 return (NULL); 16532 } 16533 /* Can we send it yet? */ 16534 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { 16535 /* 16536 * Receiver window has not grown enough for 16537 * the segment to be put on the wire. 16538 */ 16539 return (NULL); 16540 } 16541 if (rsm->r_flags & RACK_ACKED) { 16542 /* 16543 * It has been sacked, lets move to the 16544 * next one if possible. 16545 */ 16546 rack->r_ctl.last_collapse_point = rsm->r_end; 16547 /* Are we done? */ 16548 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16549 rack->r_ctl.high_collapse_point)) { 16550 rack->r_collapse_point_valid = 0; 16551 return (NULL); 16552 } 16553 goto restart; 16554 } 16555 /* Now has it been long enough ? */ 16556 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts); 16557 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { 16558 rack_log_collapse(rack, rsm->r_start, 16559 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16560 thresh, __LINE__, 6, rsm->r_flags, rsm); 16561 return (rsm); 16562 } 16563 /* Not enough time */ 16564 rack_log_collapse(rack, rsm->r_start, 16565 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), 16566 thresh, __LINE__, 7, rsm->r_flags, rsm); 16567 return (NULL); 16568 } 16569 16570 static int 16571 rack_output(struct tcpcb *tp) 16572 { 16573 struct socket *so; 16574 uint32_t recwin; 16575 uint32_t sb_offset, s_moff = 0; 16576 int32_t len, error = 0; 16577 uint16_t flags; 16578 struct mbuf *m, *s_mb = NULL; 16579 struct mbuf *mb; 16580 uint32_t if_hw_tsomaxsegcount = 0; 16581 uint32_t if_hw_tsomaxsegsize; 16582 int32_t segsiz, minseg; 16583 long tot_len_this_send = 0; 16584 #ifdef INET 16585 struct ip *ip = NULL; 16586 #endif 16587 struct udphdr *udp = NULL; 16588 struct tcp_rack *rack; 16589 struct tcphdr *th; 16590 uint8_t pass = 0; 16591 uint8_t mark = 0; 16592 uint8_t wanted_cookie = 0; 16593 u_char opt[TCP_MAXOLEN]; 16594 unsigned ipoptlen, optlen, hdrlen, ulen=0; 16595 uint32_t rack_seq; 16596 16597 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 16598 unsigned ipsec_optlen = 0; 16599 16600 #endif 16601 int32_t idle, sendalot; 16602 int32_t sub_from_prr = 0; 16603 volatile int32_t sack_rxmit; 16604 struct rack_sendmap *rsm = NULL; 16605 int32_t tso, mtu; 16606 struct tcpopt to; 16607 int32_t slot = 0; 16608 int32_t sup_rack = 0; 16609 uint32_t cts, ms_cts, delayed, early; 16610 uint16_t add_flag = RACK_SENT_SP; 16611 /* The doing_tlp flag will be set by the actual rack_timeout_tlp() */ 16612 uint8_t hpts_calling, doing_tlp = 0; 16613 uint32_t cwnd_to_use, pace_max_seg; 16614 int32_t do_a_prefetch = 0; 16615 int32_t prefetch_rsm = 0; 16616 int32_t orig_len = 0; 16617 struct timeval tv; 16618 int32_t prefetch_so_done = 0; 16619 struct tcp_log_buffer *lgb; 16620 struct inpcb *inp = tptoinpcb(tp); 16621 struct sockbuf *sb; 16622 uint64_t ts_val = 0; 16623 #ifdef TCP_ACCOUNTING 16624 uint64_t crtsc; 16625 #endif 16626 #ifdef INET6 16627 struct ip6_hdr *ip6 = NULL; 16628 int32_t isipv6; 16629 #endif 16630 bool hw_tls = false; 16631 16632 NET_EPOCH_ASSERT(); 16633 INP_WLOCK_ASSERT(inp); 16634 16635 /* setup and take the cache hits here */ 16636 rack = (struct tcp_rack *)tp->t_fb_ptr; 16637 #ifdef TCP_ACCOUNTING 16638 sched_pin(); 16639 ts_val = get_cyclecount(); 16640 #endif 16641 hpts_calling = inp->inp_hpts_calls; 16642 #ifdef TCP_OFFLOAD 16643 if (tp->t_flags & TF_TOE) { 16644 #ifdef TCP_ACCOUNTING 16645 sched_unpin(); 16646 #endif 16647 return (tcp_offload_output(tp)); 16648 } 16649 #endif 16650 /* 16651 * For TFO connections in SYN_RECEIVED, only allow the initial 16652 * SYN|ACK and those sent by the retransmit timer. 16653 */ 16654 if (IS_FASTOPEN(tp->t_flags) && 16655 (tp->t_state == TCPS_SYN_RECEIVED) && 16656 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ 16657 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ 16658 #ifdef TCP_ACCOUNTING 16659 sched_unpin(); 16660 #endif 16661 return (0); 16662 } 16663 #ifdef INET6 16664 if (rack->r_state) { 16665 /* Use the cache line loaded if possible */ 16666 isipv6 = rack->r_is_v6; 16667 } else { 16668 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; 16669 } 16670 #endif 16671 early = 0; 16672 cts = tcp_get_usecs(&tv); 16673 ms_cts = tcp_tv_to_mssectick(&tv); 16674 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 16675 tcp_in_hpts(rack->rc_inp)) { 16676 /* 16677 * We are on the hpts for some timer but not hptsi output. 16678 * Remove from the hpts unconditionally. 16679 */ 16680 rack_timer_cancel(tp, rack, cts, __LINE__); 16681 } 16682 /* Are we pacing and late? */ 16683 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16684 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { 16685 /* We are delayed */ 16686 delayed = cts - rack->r_ctl.rc_last_output_to; 16687 } else { 16688 delayed = 0; 16689 } 16690 /* Do the timers, which may override the pacer */ 16691 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 16692 int retval; 16693 16694 retval = rack_process_timers(tp, rack, cts, hpts_calling, 16695 &doing_tlp); 16696 if (retval != 0) { 16697 counter_u64_add(rack_out_size[TCP_MSS_ACCT_ATIMER], 1); 16698 #ifdef TCP_ACCOUNTING 16699 sched_unpin(); 16700 #endif 16701 /* 16702 * If timers want tcp_drop(), then pass error out, 16703 * otherwise suppress it. 16704 */ 16705 return (retval < 0 ? retval : 0); 16706 } 16707 } 16708 if (rack->rc_in_persist) { 16709 if (tcp_in_hpts(rack->rc_inp) == 0) { 16710 /* Timer is not running */ 16711 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16712 } 16713 #ifdef TCP_ACCOUNTING 16714 sched_unpin(); 16715 #endif 16716 return (0); 16717 } 16718 if ((rack->rc_ack_required == 1) && 16719 (rack->r_timer_override == 0)){ 16720 /* A timeout occurred and no ack has arrived */ 16721 if (tcp_in_hpts(rack->rc_inp) == 0) { 16722 /* Timer is not running */ 16723 rack_start_hpts_timer(rack, tp, cts, 0, 0, 0); 16724 } 16725 #ifdef TCP_ACCOUNTING 16726 sched_unpin(); 16727 #endif 16728 return (0); 16729 } 16730 if ((rack->r_timer_override) || 16731 (rack->rc_ack_can_sendout_data) || 16732 (delayed) || 16733 (tp->t_state < TCPS_ESTABLISHED)) { 16734 rack->rc_ack_can_sendout_data = 0; 16735 if (tcp_in_hpts(rack->rc_inp)) 16736 tcp_hpts_remove(rack->rc_inp); 16737 } else if (tcp_in_hpts(rack->rc_inp)) { 16738 /* 16739 * On the hpts you can't pass even if ACKNOW is on, we will 16740 * when the hpts fires. 16741 */ 16742 #ifdef TCP_ACCOUNTING 16743 crtsc = get_cyclecount(); 16744 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16745 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); 16746 } 16747 counter_u64_add(tcp_proc_time[SND_BLOCKED], (crtsc - ts_val)); 16748 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 16749 tp->tcp_cnt_counters[SND_BLOCKED]++; 16750 } 16751 counter_u64_add(tcp_cnt_counters[SND_BLOCKED], 1); 16752 sched_unpin(); 16753 #endif 16754 counter_u64_add(rack_out_size[TCP_MSS_ACCT_INPACE], 1); 16755 return (0); 16756 } 16757 rack->rc_inp->inp_hpts_calls = 0; 16758 /* Finish out both pacing early and late accounting */ 16759 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 16760 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { 16761 early = rack->r_ctl.rc_last_output_to - cts; 16762 } else 16763 early = 0; 16764 if (delayed) { 16765 rack->r_ctl.rc_agg_delayed += delayed; 16766 rack->r_late = 1; 16767 } else if (early) { 16768 rack->r_ctl.rc_agg_early += early; 16769 rack->r_early = 1; 16770 } 16771 /* Now that early/late accounting is done turn off the flag */ 16772 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; 16773 rack->r_wanted_output = 0; 16774 rack->r_timer_override = 0; 16775 if ((tp->t_state != rack->r_state) && 16776 TCPS_HAVEESTABLISHED(tp->t_state)) { 16777 rack_set_state(tp, rack); 16778 } 16779 if ((rack->r_fast_output) && 16780 (doing_tlp == 0) && 16781 (tp->rcv_numsacks == 0)) { 16782 int ret; 16783 16784 error = 0; 16785 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 16786 if (ret >= 0) 16787 return(ret); 16788 else if (error) { 16789 inp = rack->rc_inp; 16790 so = inp->inp_socket; 16791 sb = &so->so_snd; 16792 goto nomore; 16793 } 16794 } 16795 inp = rack->rc_inp; 16796 /* 16797 * For TFO connections in SYN_SENT or SYN_RECEIVED, 16798 * only allow the initial SYN or SYN|ACK and those sent 16799 * by the retransmit timer. 16800 */ 16801 if (IS_FASTOPEN(tp->t_flags) && 16802 ((tp->t_state == TCPS_SYN_RECEIVED) || 16803 (tp->t_state == TCPS_SYN_SENT)) && 16804 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 16805 (tp->t_rxtshift == 0)) { /* not a retransmit */ 16806 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16807 so = inp->inp_socket; 16808 sb = &so->so_snd; 16809 goto just_return_nolock; 16810 } 16811 /* 16812 * Determine length of data that should be transmitted, and flags 16813 * that will be used. If there is some data or critical controls 16814 * (SYN, RST) to send, then transmit; otherwise, investigate 16815 * further. 16816 */ 16817 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); 16818 if (tp->t_idle_reduce) { 16819 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) 16820 rack_cc_after_idle(rack, tp); 16821 } 16822 tp->t_flags &= ~TF_LASTIDLE; 16823 if (idle) { 16824 if (tp->t_flags & TF_MORETOCOME) { 16825 tp->t_flags |= TF_LASTIDLE; 16826 idle = 0; 16827 } 16828 } 16829 if ((tp->snd_una == tp->snd_max) && 16830 rack->r_ctl.rc_went_idle_time && 16831 TSTMP_GT(cts, rack->r_ctl.rc_went_idle_time)) { 16832 idle = cts - rack->r_ctl.rc_went_idle_time; 16833 if (idle > rack_min_probertt_hold) { 16834 /* Count as a probe rtt */ 16835 if (rack->in_probe_rtt == 0) { 16836 rack->r_ctl.rc_lower_rtt_us_cts = cts; 16837 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; 16838 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; 16839 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; 16840 } else { 16841 rack_exit_probertt(rack, cts); 16842 } 16843 } 16844 idle = 0; 16845 } 16846 if (rack_use_fsb && (rack->r_fsb_inited == 0) && (rack->r_state != TCPS_CLOSED)) 16847 rack_init_fsb_block(tp, rack); 16848 again: 16849 /* 16850 * If we've recently taken a timeout, snd_max will be greater than 16851 * snd_nxt. There may be SACK information that allows us to avoid 16852 * resending already delivered data. Adjust snd_nxt accordingly. 16853 */ 16854 sendalot = 0; 16855 cts = tcp_get_usecs(&tv); 16856 ms_cts = tcp_tv_to_mssectick(&tv); 16857 tso = 0; 16858 mtu = 0; 16859 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); 16860 minseg = segsiz; 16861 if (rack->r_ctl.rc_pace_max_segs == 0) 16862 pace_max_seg = rack->rc_user_set_max_segs * segsiz; 16863 else 16864 pace_max_seg = rack->r_ctl.rc_pace_max_segs; 16865 sb_offset = tp->snd_max - tp->snd_una; 16866 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; 16867 flags = tcp_outflags[tp->t_state]; 16868 while (rack->rc_free_cnt < rack_free_cache) { 16869 rsm = rack_alloc(rack); 16870 if (rsm == NULL) { 16871 if (inp->inp_hpts_calls) 16872 /* Retry in a ms */ 16873 slot = (1 * HPTS_USEC_IN_MSEC); 16874 so = inp->inp_socket; 16875 sb = &so->so_snd; 16876 goto just_return_nolock; 16877 } 16878 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); 16879 rack->rc_free_cnt++; 16880 rsm = NULL; 16881 } 16882 if (inp->inp_hpts_calls) 16883 inp->inp_hpts_calls = 0; 16884 sack_rxmit = 0; 16885 len = 0; 16886 rsm = NULL; 16887 if (flags & TH_RST) { 16888 SOCKBUF_LOCK(&inp->inp_socket->so_snd); 16889 so = inp->inp_socket; 16890 sb = &so->so_snd; 16891 goto send; 16892 } 16893 if (rack->r_ctl.rc_resend) { 16894 /* Retransmit timer */ 16895 rsm = rack->r_ctl.rc_resend; 16896 rack->r_ctl.rc_resend = NULL; 16897 len = rsm->r_end - rsm->r_start; 16898 sack_rxmit = 1; 16899 sendalot = 0; 16900 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16901 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16902 __func__, __LINE__, 16903 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16904 sb_offset = rsm->r_start - tp->snd_una; 16905 if (len >= segsiz) 16906 len = segsiz; 16907 } else if (rack->r_collapse_point_valid && 16908 ((rsm = rack_check_collapsed(rack, cts)) != NULL)) { 16909 /* 16910 * If an RSM is returned then enough time has passed 16911 * for us to retransmit it. Move up the collapse point, 16912 * since this rsm has its chance to retransmit now. 16913 */ 16914 rack_trace_point(rack, RACK_TP_COLLAPSED_RXT); 16915 rack->r_ctl.last_collapse_point = rsm->r_end; 16916 /* Are we done? */ 16917 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, 16918 rack->r_ctl.high_collapse_point)) 16919 rack->r_collapse_point_valid = 0; 16920 sack_rxmit = 1; 16921 /* We are not doing a TLP */ 16922 doing_tlp = 0; 16923 len = rsm->r_end - rsm->r_start; 16924 sb_offset = rsm->r_start - tp->snd_una; 16925 sendalot = 0; 16926 if ((rack->full_size_rxt == 0) && 16927 (rack->shape_rxt_to_pacing_min == 0) && 16928 (len >= segsiz)) 16929 len = segsiz; 16930 } else if ((rsm = tcp_rack_output(tp, rack, cts)) != NULL) { 16931 /* We have a retransmit that takes precedence */ 16932 if ((!IN_FASTRECOVERY(tp->t_flags)) && 16933 ((rsm->r_flags & RACK_MUST_RXT) == 0) && 16934 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { 16935 /* Enter recovery if not induced by a time-out */ 16936 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); 16937 } 16938 #ifdef INVARIANTS 16939 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 16940 panic("Huh, tp:%p rack:%p rsm:%p start:%u < snd_una:%u\n", 16941 tp, rack, rsm, rsm->r_start, tp->snd_una); 16942 } 16943 #endif 16944 len = rsm->r_end - rsm->r_start; 16945 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16946 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16947 __func__, __LINE__, 16948 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16949 sb_offset = rsm->r_start - tp->snd_una; 16950 sendalot = 0; 16951 if (len >= segsiz) 16952 len = segsiz; 16953 if (len > 0) { 16954 sack_rxmit = 1; 16955 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 16956 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 16957 min(len, segsiz)); 16958 } 16959 } else if (rack->r_ctl.rc_tlpsend) { 16960 /* Tail loss probe */ 16961 long cwin; 16962 long tlen; 16963 16964 /* 16965 * Check if we can do a TLP with a RACK'd packet 16966 * this can happen if we are not doing the rack 16967 * cheat and we skipped to a TLP and it 16968 * went off. 16969 */ 16970 rsm = rack->r_ctl.rc_tlpsend; 16971 /* We are doing a TLP make sure the flag is preent */ 16972 rsm->r_flags |= RACK_TLP; 16973 rack->r_ctl.rc_tlpsend = NULL; 16974 sack_rxmit = 1; 16975 tlen = rsm->r_end - rsm->r_start; 16976 if (tlen > segsiz) 16977 tlen = segsiz; 16978 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), 16979 ("%s:%d: r.start:%u < SND.UNA:%u; tp:%p, rack:%p, rsm:%p", 16980 __func__, __LINE__, 16981 rsm->r_start, tp->snd_una, tp, rack, rsm)); 16982 sb_offset = rsm->r_start - tp->snd_una; 16983 cwin = min(tp->snd_wnd, tlen); 16984 len = cwin; 16985 } 16986 if (rack->r_must_retran && 16987 (doing_tlp == 0) && 16988 (SEQ_GT(tp->snd_max, tp->snd_una)) && 16989 (rsm == NULL)) { 16990 /* 16991 * There are two different ways that we 16992 * can get into this block: 16993 * a) This is a non-sack connection, we had a time-out 16994 * and thus r_must_retran was set and everything 16995 * left outstanding as been marked for retransmit. 16996 * b) The MTU of the path shrank, so that everything 16997 * was marked to be retransmitted with the smaller 16998 * mtu and r_must_retran was set. 16999 * 17000 * This means that we expect the sendmap (outstanding) 17001 * to all be marked must. We can use the tmap to 17002 * look at them. 17003 * 17004 */ 17005 int sendwin, flight; 17006 17007 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 17008 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); 17009 if (flight >= sendwin) { 17010 /* 17011 * We can't send yet. 17012 */ 17013 so = inp->inp_socket; 17014 sb = &so->so_snd; 17015 goto just_return_nolock; 17016 } 17017 /* 17018 * This is the case a/b mentioned above. All 17019 * outstanding/not-acked should be marked. 17020 * We can use the tmap to find them. 17021 */ 17022 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); 17023 if (rsm == NULL) { 17024 /* TSNH */ 17025 rack->r_must_retran = 0; 17026 rack->r_ctl.rc_out_at_rto = 0; 17027 so = inp->inp_socket; 17028 sb = &so->so_snd; 17029 goto just_return_nolock; 17030 } 17031 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { 17032 /* 17033 * The first one does not have the flag, did we collapse 17034 * further up in our list? 17035 */ 17036 rack->r_must_retran = 0; 17037 rack->r_ctl.rc_out_at_rto = 0; 17038 rsm = NULL; 17039 sack_rxmit = 0; 17040 } else { 17041 sack_rxmit = 1; 17042 len = rsm->r_end - rsm->r_start; 17043 sb_offset = rsm->r_start - tp->snd_una; 17044 sendalot = 0; 17045 if ((rack->full_size_rxt == 0) && 17046 (rack->shape_rxt_to_pacing_min == 0) && 17047 (len >= segsiz)) 17048 len = segsiz; 17049 /* 17050 * Delay removing the flag RACK_MUST_RXT so 17051 * that the fastpath for retransmit will 17052 * work with this rsm. 17053 */ 17054 } 17055 } 17056 /* 17057 * Enforce a connection sendmap count limit if set 17058 * as long as we are not retransmiting. 17059 */ 17060 if ((rsm == NULL) && 17061 (rack->do_detection == 0) && 17062 (V_tcp_map_entries_limit > 0) && 17063 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 17064 counter_u64_add(rack_to_alloc_limited, 1); 17065 if (!rack->alloc_limit_reported) { 17066 rack->alloc_limit_reported = 1; 17067 counter_u64_add(rack_alloc_limited_conns, 1); 17068 } 17069 so = inp->inp_socket; 17070 sb = &so->so_snd; 17071 goto just_return_nolock; 17072 } 17073 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { 17074 /* we are retransmitting the fin */ 17075 len--; 17076 if (len) { 17077 /* 17078 * When retransmitting data do *not* include the 17079 * FIN. This could happen from a TLP probe. 17080 */ 17081 flags &= ~TH_FIN; 17082 } 17083 } 17084 if (rsm && rack->r_fsb_inited && rack_use_rsm_rfo && 17085 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { 17086 int ret; 17087 17088 ret = rack_fast_rsm_output(tp, rack, rsm, ts_val, cts, ms_cts, &tv, len, doing_tlp); 17089 if (ret == 0) 17090 return (0); 17091 } 17092 so = inp->inp_socket; 17093 sb = &so->so_snd; 17094 if (do_a_prefetch == 0) { 17095 kern_prefetch(sb, &do_a_prefetch); 17096 do_a_prefetch = 1; 17097 } 17098 #ifdef NETFLIX_SHARED_CWND 17099 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && 17100 rack->rack_enable_scwnd) { 17101 /* We are doing cwnd sharing */ 17102 if (rack->gp_ready && 17103 (rack->rack_attempted_scwnd == 0) && 17104 (rack->r_ctl.rc_scw == NULL) && 17105 tp->t_lib) { 17106 /* The pcbid is in, lets make an attempt */ 17107 counter_u64_add(rack_try_scwnd, 1); 17108 rack->rack_attempted_scwnd = 1; 17109 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, 17110 &rack->r_ctl.rc_scw_index, 17111 segsiz); 17112 } 17113 if (rack->r_ctl.rc_scw && 17114 (rack->rack_scwnd_is_idle == 1) && 17115 sbavail(&so->so_snd)) { 17116 /* we are no longer out of data */ 17117 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17118 rack->rack_scwnd_is_idle = 0; 17119 } 17120 if (rack->r_ctl.rc_scw) { 17121 /* First lets update and get the cwnd */ 17122 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, 17123 rack->r_ctl.rc_scw_index, 17124 tp->snd_cwnd, tp->snd_wnd, segsiz); 17125 } 17126 } 17127 #endif 17128 /* 17129 * Get standard flags, and add SYN or FIN if requested by 'hidden' 17130 * state flags. 17131 */ 17132 if (tp->t_flags & TF_NEEDFIN) 17133 flags |= TH_FIN; 17134 if (tp->t_flags & TF_NEEDSYN) 17135 flags |= TH_SYN; 17136 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 17137 void *end_rsm; 17138 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); 17139 if (end_rsm) 17140 kern_prefetch(end_rsm, &prefetch_rsm); 17141 prefetch_rsm = 1; 17142 } 17143 SOCKBUF_LOCK(sb); 17144 /* 17145 * If snd_nxt == snd_max and we have transmitted a FIN, the 17146 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 17147 * negative length. This can also occur when TCP opens up its 17148 * congestion window while receiving additional duplicate acks after 17149 * fast-retransmit because TCP will reset snd_nxt to snd_max after 17150 * the fast-retransmit. 17151 * 17152 * In the normal retransmit-FIN-only case, however, snd_nxt will be 17153 * set to snd_una, the sb_offset will be 0, and the length may wind 17154 * up 0. 17155 * 17156 * If sack_rxmit is true we are retransmitting from the scoreboard 17157 * in which case len is already set. 17158 */ 17159 if ((sack_rxmit == 0) && 17160 (TCPS_HAVEESTABLISHED(tp->t_state) || IS_FASTOPEN(tp->t_flags))) { 17161 uint32_t avail; 17162 17163 avail = sbavail(sb); 17164 if (SEQ_GT(tp->snd_nxt, tp->snd_una) && avail) 17165 sb_offset = tp->snd_nxt - tp->snd_una; 17166 else 17167 sb_offset = 0; 17168 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { 17169 if (rack->r_ctl.rc_tlp_new_data) { 17170 /* TLP is forcing out new data */ 17171 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { 17172 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); 17173 } 17174 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { 17175 if (tp->snd_wnd > sb_offset) 17176 len = tp->snd_wnd - sb_offset; 17177 else 17178 len = 0; 17179 } else { 17180 len = rack->r_ctl.rc_tlp_new_data; 17181 } 17182 rack->r_ctl.rc_tlp_new_data = 0; 17183 } else { 17184 len = rack_what_can_we_send(tp, rack, cwnd_to_use, avail, sb_offset); 17185 } 17186 if ((rack->r_ctl.crte == NULL) && IN_FASTRECOVERY(tp->t_flags) && (len > segsiz)) { 17187 /* 17188 * For prr=off, we need to send only 1 MSS 17189 * at a time. We do this because another sack could 17190 * be arriving that causes us to send retransmits and 17191 * we don't want to be on a long pace due to a larger send 17192 * that keeps us from sending out the retransmit. 17193 */ 17194 len = segsiz; 17195 } 17196 } else { 17197 uint32_t outstanding; 17198 /* 17199 * We are inside of a Fast recovery episode, this 17200 * is caused by a SACK or 3 dup acks. At this point 17201 * we have sent all the retransmissions and we rely 17202 * on PRR to dictate what we will send in the form of 17203 * new data. 17204 */ 17205 17206 outstanding = tp->snd_max - tp->snd_una; 17207 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { 17208 if (tp->snd_wnd > outstanding) { 17209 len = tp->snd_wnd - outstanding; 17210 /* Check to see if we have the data */ 17211 if ((sb_offset + len) > avail) { 17212 /* It does not all fit */ 17213 if (avail > sb_offset) 17214 len = avail - sb_offset; 17215 else 17216 len = 0; 17217 } 17218 } else { 17219 len = 0; 17220 } 17221 } else if (avail > sb_offset) { 17222 len = avail - sb_offset; 17223 } else { 17224 len = 0; 17225 } 17226 if (len > 0) { 17227 if (len > rack->r_ctl.rc_prr_sndcnt) { 17228 len = rack->r_ctl.rc_prr_sndcnt; 17229 } 17230 if (len > 0) { 17231 sub_from_prr = 1; 17232 } 17233 } 17234 if (len > segsiz) { 17235 /* 17236 * We should never send more than a MSS when 17237 * retransmitting or sending new data in prr 17238 * mode unless the override flag is on. Most 17239 * likely the PRR algorithm is not going to 17240 * let us send a lot as well :-) 17241 */ 17242 if (rack->r_ctl.rc_prr_sendalot == 0) { 17243 len = segsiz; 17244 } 17245 } else if (len < segsiz) { 17246 /* 17247 * Do we send any? The idea here is if the 17248 * send empty's the socket buffer we want to 17249 * do it. However if not then lets just wait 17250 * for our prr_sndcnt to get bigger. 17251 */ 17252 long leftinsb; 17253 17254 leftinsb = sbavail(sb) - sb_offset; 17255 if (leftinsb > len) { 17256 /* This send does not empty the sb */ 17257 len = 0; 17258 } 17259 } 17260 } 17261 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { 17262 /* 17263 * If you have not established 17264 * and are not doing FAST OPEN 17265 * no data please. 17266 */ 17267 if ((sack_rxmit == 0) && 17268 (!IS_FASTOPEN(tp->t_flags))){ 17269 len = 0; 17270 sb_offset = 0; 17271 } 17272 } 17273 if (prefetch_so_done == 0) { 17274 kern_prefetch(so, &prefetch_so_done); 17275 prefetch_so_done = 1; 17276 } 17277 /* 17278 * Lop off SYN bit if it has already been sent. However, if this is 17279 * SYN-SENT state and if segment contains data and if we don't know 17280 * that foreign host supports TAO, suppress sending segment. 17281 */ 17282 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una) && 17283 ((sack_rxmit == 0) && (tp->t_rxtshift == 0))) { 17284 /* 17285 * When sending additional segments following a TFO SYN|ACK, 17286 * do not include the SYN bit. 17287 */ 17288 if (IS_FASTOPEN(tp->t_flags) && 17289 (tp->t_state == TCPS_SYN_RECEIVED)) 17290 flags &= ~TH_SYN; 17291 } 17292 /* 17293 * Be careful not to send data and/or FIN on SYN segments. This 17294 * measure is needed to prevent interoperability problems with not 17295 * fully conformant TCP implementations. 17296 */ 17297 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 17298 len = 0; 17299 flags &= ~TH_FIN; 17300 } 17301 /* 17302 * On TFO sockets, ensure no data is sent in the following cases: 17303 * 17304 * - When retransmitting SYN|ACK on a passively-created socket 17305 * 17306 * - When retransmitting SYN on an actively created socket 17307 * 17308 * - When sending a zero-length cookie (cookie request) on an 17309 * actively created socket 17310 * 17311 * - When the socket is in the CLOSED state (RST is being sent) 17312 */ 17313 if (IS_FASTOPEN(tp->t_flags) && 17314 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 17315 ((tp->t_state == TCPS_SYN_SENT) && 17316 (tp->t_tfo_client_cookie_len == 0)) || 17317 (flags & TH_RST))) { 17318 sack_rxmit = 0; 17319 len = 0; 17320 } 17321 /* Without fast-open there should never be data sent on a SYN */ 17322 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) { 17323 tp->snd_nxt = tp->iss; 17324 len = 0; 17325 } 17326 if ((len > segsiz) && (tcp_dsack_block_exists(tp))) { 17327 /* We only send 1 MSS if we have a DSACK block */ 17328 add_flag |= RACK_SENT_W_DSACK; 17329 len = segsiz; 17330 } 17331 orig_len = len; 17332 if (len <= 0) { 17333 /* 17334 * If FIN has been sent but not acked, but we haven't been 17335 * called to retransmit, len will be < 0. Otherwise, window 17336 * shrank after we sent into it. If window shrank to 0, 17337 * cancel pending retransmit, pull snd_nxt back to (closed) 17338 * window, and set the persist timer if it isn't already 17339 * going. If the window didn't close completely, just wait 17340 * for an ACK. 17341 * 17342 * We also do a general check here to ensure that we will 17343 * set the persist timer when we have data to send, but a 17344 * 0-byte window. This makes sure the persist timer is set 17345 * even if the packet hits one of the "goto send" lines 17346 * below. 17347 */ 17348 len = 0; 17349 if ((tp->snd_wnd == 0) && 17350 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17351 (tp->snd_una == tp->snd_max) && 17352 (sb_offset < (int)sbavail(sb))) { 17353 rack_enter_persist(tp, rack, cts); 17354 } 17355 } else if ((rsm == NULL) && 17356 (doing_tlp == 0) && 17357 (len < pace_max_seg)) { 17358 /* 17359 * We are not sending a maximum sized segment for 17360 * some reason. Should we not send anything (think 17361 * sws or persists)? 17362 */ 17363 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17364 (TCPS_HAVEESTABLISHED(tp->t_state)) && 17365 (len < minseg) && 17366 (len < (int)(sbavail(sb) - sb_offset))) { 17367 /* 17368 * Here the rwnd is less than 17369 * the minimum pacing size, this is not a retransmit, 17370 * we are established and 17371 * the send is not the last in the socket buffer 17372 * we send nothing, and we may enter persists 17373 * if nothing is outstanding. 17374 */ 17375 len = 0; 17376 if (tp->snd_max == tp->snd_una) { 17377 /* 17378 * Nothing out we can 17379 * go into persists. 17380 */ 17381 rack_enter_persist(tp, rack, cts); 17382 } 17383 } else if ((cwnd_to_use >= max(minseg, (segsiz * 4))) && 17384 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17385 (len < (int)(sbavail(sb) - sb_offset)) && 17386 (len < minseg)) { 17387 /* 17388 * Here we are not retransmitting, and 17389 * the cwnd is not so small that we could 17390 * not send at least a min size (rxt timer 17391 * not having gone off), We have 2 segments or 17392 * more already in flight, its not the tail end 17393 * of the socket buffer and the cwnd is blocking 17394 * us from sending out a minimum pacing segment size. 17395 * Lets not send anything. 17396 */ 17397 len = 0; 17398 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 17399 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && 17400 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && 17401 (len < (int)(sbavail(sb) - sb_offset)) && 17402 (TCPS_HAVEESTABLISHED(tp->t_state))) { 17403 /* 17404 * Here we have a send window but we have 17405 * filled it up and we can't send another pacing segment. 17406 * We also have in flight more than 2 segments 17407 * and we are not completing the sb i.e. we allow 17408 * the last bytes of the sb to go out even if 17409 * its not a full pacing segment. 17410 */ 17411 len = 0; 17412 } else if ((rack->r_ctl.crte != NULL) && 17413 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && 17414 (cwnd_to_use >= (pace_max_seg + (4 * segsiz))) && 17415 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && 17416 (len < (int)(sbavail(sb) - sb_offset))) { 17417 /* 17418 * Here we are doing hardware pacing, this is not a TLP, 17419 * we are not sending a pace max segment size, there is rwnd 17420 * room to send at least N pace_max_seg, the cwnd is greater 17421 * than or equal to a full pacing segments plus 4 mss and we have 2 or 17422 * more segments in flight and its not the tail of the socket buffer. 17423 * 17424 * We don't want to send instead we need to get more ack's in to 17425 * allow us to send a full pacing segment. Normally, if we are pacing 17426 * about the right speed, we should have finished our pacing 17427 * send as most of the acks have come back if we are at the 17428 * right rate. This is a bit fuzzy since return path delay 17429 * can delay the acks, which is why we want to make sure we 17430 * have cwnd space to have a bit more than a max pace segments in flight. 17431 * 17432 * If we have not gotten our acks back we are pacing at too high a 17433 * rate delaying will not hurt and will bring our GP estimate down by 17434 * injecting the delay. If we don't do this we will send 17435 * 2 MSS out in response to the acks being clocked in which 17436 * defeats the point of hw-pacing (i.e. to help us get 17437 * larger TSO's out). 17438 */ 17439 len = 0; 17440 17441 } 17442 17443 } 17444 /* len will be >= 0 after this point. */ 17445 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 17446 rack_sndbuf_autoscale(rack); 17447 /* 17448 * Decide if we can use TCP Segmentation Offloading (if supported by 17449 * hardware). 17450 * 17451 * TSO may only be used if we are in a pure bulk sending state. The 17452 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 17453 * options prevent using TSO. With TSO the TCP header is the same 17454 * (except for the sequence number) for all generated packets. This 17455 * makes it impossible to transmit any options which vary per 17456 * generated segment or packet. 17457 * 17458 * IPv4 handling has a clear separation of ip options and ip header 17459 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() does 17460 * the right thing below to provide length of just ip options and thus 17461 * checking for ipoptlen is enough to decide if ip options are present. 17462 */ 17463 ipoptlen = 0; 17464 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17465 /* 17466 * Pre-calculate here as we save another lookup into the darknesses 17467 * of IPsec that way and can actually decide if TSO is ok. 17468 */ 17469 #ifdef INET6 17470 if (isipv6 && IPSEC_ENABLED(ipv6)) 17471 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 17472 #ifdef INET 17473 else 17474 #endif 17475 #endif /* INET6 */ 17476 #ifdef INET 17477 if (IPSEC_ENABLED(ipv4)) 17478 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 17479 #endif /* INET */ 17480 #endif 17481 17482 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 17483 ipoptlen += ipsec_optlen; 17484 #endif 17485 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && 17486 (tp->t_port == 0) && 17487 ((tp->t_flags & TF_SIGNATURE) == 0) && 17488 tp->rcv_numsacks == 0 && sack_rxmit == 0 && 17489 ipoptlen == 0) 17490 tso = 1; 17491 { 17492 uint32_t outstanding __unused; 17493 17494 outstanding = tp->snd_max - tp->snd_una; 17495 if (tp->t_flags & TF_SENTFIN) { 17496 /* 17497 * If we sent a fin, snd_max is 1 higher than 17498 * snd_una 17499 */ 17500 outstanding--; 17501 } 17502 if (sack_rxmit) { 17503 if ((rsm->r_flags & RACK_HAS_FIN) == 0) 17504 flags &= ~TH_FIN; 17505 } else { 17506 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + 17507 sbused(sb))) 17508 flags &= ~TH_FIN; 17509 } 17510 } 17511 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), 17512 (long)TCP_MAXWIN << tp->rcv_scale); 17513 17514 /* 17515 * Sender silly window avoidance. We transmit under the following 17516 * conditions when len is non-zero: 17517 * 17518 * - We have a full segment (or more with TSO) - This is the last 17519 * buffer in a write()/send() and we are either idle or running 17520 * NODELAY - we've timed out (e.g. persist timer) - we have more 17521 * then 1/2 the maximum send window's worth of data (receiver may be 17522 * limited the window size) - we need to retransmit 17523 */ 17524 if (len) { 17525 if (len >= segsiz) { 17526 goto send; 17527 } 17528 /* 17529 * NOTE! on localhost connections an 'ack' from the remote 17530 * end may occur synchronously with the output and cause us 17531 * to flush a buffer queued with moretocome. XXX 17532 * 17533 */ 17534 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ 17535 (idle || (tp->t_flags & TF_NODELAY)) && 17536 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17537 (tp->t_flags & TF_NOPUSH) == 0) { 17538 pass = 2; 17539 goto send; 17540 } 17541 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 17542 pass = 22; 17543 goto send; 17544 } 17545 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 17546 pass = 4; 17547 goto send; 17548 } 17549 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { /* retransmit case */ 17550 pass = 5; 17551 goto send; 17552 } 17553 if (sack_rxmit) { 17554 pass = 6; 17555 goto send; 17556 } 17557 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && 17558 (ctf_outstanding(tp) < (segsiz * 2))) { 17559 /* 17560 * We have less than two MSS outstanding (delayed ack) 17561 * and our rwnd will not let us send a full sized 17562 * MSS. Lets go ahead and let this small segment 17563 * out because we want to try to have at least two 17564 * packets inflight to not be caught by delayed ack. 17565 */ 17566 pass = 12; 17567 goto send; 17568 } 17569 } 17570 /* 17571 * Sending of standalone window updates. 17572 * 17573 * Window updates are important when we close our window due to a 17574 * full socket buffer and are opening it again after the application 17575 * reads data from it. Once the window has opened again and the 17576 * remote end starts to send again the ACK clock takes over and 17577 * provides the most current window information. 17578 * 17579 * We must avoid the silly window syndrome whereas every read from 17580 * the receive buffer, no matter how small, causes a window update 17581 * to be sent. We also should avoid sending a flurry of window 17582 * updates when the socket buffer had queued a lot of data and the 17583 * application is doing small reads. 17584 * 17585 * Prevent a flurry of pointless window updates by only sending an 17586 * update when we can increase the advertized window by more than 17587 * 1/4th of the socket buffer capacity. When the buffer is getting 17588 * full or is very small be more aggressive and send an update 17589 * whenever we can increase by two mss sized segments. In all other 17590 * situations the ACK's to new incoming data will carry further 17591 * window increases. 17592 * 17593 * Don't send an independent window update if a delayed ACK is 17594 * pending (it will get piggy-backed on it) or the remote side 17595 * already has done a half-close and won't send more data. Skip 17596 * this if the connection is in T/TCP half-open state. 17597 */ 17598 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 17599 !(tp->t_flags & TF_DELACK) && 17600 !TCPS_HAVERCVDFIN(tp->t_state)) { 17601 /* 17602 * "adv" is the amount we could increase the window, taking 17603 * into account that we are limited by TCP_MAXWIN << 17604 * tp->rcv_scale. 17605 */ 17606 int32_t adv; 17607 int oldwin; 17608 17609 adv = recwin; 17610 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 17611 oldwin = (tp->rcv_adv - tp->rcv_nxt); 17612 if (adv > oldwin) 17613 adv -= oldwin; 17614 else { 17615 /* We can't increase the window */ 17616 adv = 0; 17617 } 17618 } else 17619 oldwin = 0; 17620 17621 /* 17622 * If the new window size ends up being the same as or less 17623 * than the old size when it is scaled, then don't force 17624 * a window update. 17625 */ 17626 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) 17627 goto dontupdate; 17628 17629 if (adv >= (int32_t)(2 * segsiz) && 17630 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || 17631 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || 17632 so->so_rcv.sb_hiwat <= 8 * segsiz)) { 17633 pass = 7; 17634 goto send; 17635 } 17636 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { 17637 pass = 23; 17638 goto send; 17639 } 17640 } 17641 dontupdate: 17642 17643 /* 17644 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW 17645 * is also a catch-all for the retransmit timer timeout case. 17646 */ 17647 if (tp->t_flags & TF_ACKNOW) { 17648 pass = 8; 17649 goto send; 17650 } 17651 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { 17652 pass = 9; 17653 goto send; 17654 } 17655 /* 17656 * If our state indicates that FIN should be sent and we have not 17657 * yet done so, then we need to send. 17658 */ 17659 if ((flags & TH_FIN) && 17660 (tp->snd_nxt == tp->snd_una)) { 17661 pass = 11; 17662 goto send; 17663 } 17664 /* 17665 * No reason to send a segment, just return. 17666 */ 17667 just_return: 17668 SOCKBUF_UNLOCK(sb); 17669 just_return_nolock: 17670 { 17671 int app_limited = CTF_JR_SENT_DATA; 17672 17673 if (tot_len_this_send > 0) { 17674 /* Make sure snd_nxt is up to max */ 17675 rack->r_ctl.fsb.recwin = recwin; 17676 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, NULL, segsiz); 17677 if ((error == 0) && 17678 rack_use_rfo && 17679 ((flags & (TH_SYN|TH_FIN)) == 0) && 17680 (ipoptlen == 0) && 17681 (tp->snd_nxt == tp->snd_max) && 17682 (tp->rcv_numsacks == 0) && 17683 rack->r_fsb_inited && 17684 TCPS_HAVEESTABLISHED(tp->t_state) && 17685 (rack->r_must_retran == 0) && 17686 ((tp->t_flags & TF_NEEDFIN) == 0) && 17687 (len > 0) && (orig_len > 0) && 17688 (orig_len > len) && 17689 ((orig_len - len) >= segsiz) && 17690 ((optlen == 0) || 17691 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 17692 /* We can send at least one more MSS using our fsb */ 17693 17694 rack->r_fast_output = 1; 17695 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 17696 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 17697 rack->r_ctl.fsb.tcp_flags = flags; 17698 rack->r_ctl.fsb.left_to_send = orig_len - len; 17699 if (hw_tls) 17700 rack->r_ctl.fsb.hw_tls = 1; 17701 else 17702 rack->r_ctl.fsb.hw_tls = 0; 17703 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 17704 ("rack:%p left_to_send:%u sbavail:%u out:%u", 17705 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 17706 (tp->snd_max - tp->snd_una))); 17707 if (rack->r_ctl.fsb.left_to_send < segsiz) 17708 rack->r_fast_output = 0; 17709 else { 17710 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 17711 rack->r_ctl.fsb.rfo_apply_push = 1; 17712 else 17713 rack->r_ctl.fsb.rfo_apply_push = 0; 17714 } 17715 } else 17716 rack->r_fast_output = 0; 17717 17718 17719 rack_log_fsb(rack, tp, so, flags, 17720 ipoptlen, orig_len, len, 0, 17721 1, optlen, __LINE__, 1); 17722 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 17723 tp->snd_nxt = tp->snd_max; 17724 } else { 17725 int end_window = 0; 17726 uint32_t seq = tp->gput_ack; 17727 17728 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17729 if (rsm) { 17730 /* 17731 * Mark the last sent that we just-returned (hinting 17732 * that delayed ack may play a role in any rtt measurement). 17733 */ 17734 rsm->r_just_ret = 1; 17735 } 17736 counter_u64_add(rack_out_size[TCP_MSS_ACCT_JUSTRET], 1); 17737 rack->r_ctl.rc_agg_delayed = 0; 17738 rack->r_early = 0; 17739 rack->r_late = 0; 17740 rack->r_ctl.rc_agg_early = 0; 17741 if ((ctf_outstanding(tp) + 17742 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), 17743 minseg)) >= tp->snd_wnd) { 17744 /* We are limited by the rwnd */ 17745 app_limited = CTF_JR_RWND_LIMITED; 17746 if (IN_FASTRECOVERY(tp->t_flags)) 17747 rack->r_ctl.rc_prr_sndcnt = 0; 17748 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 17749 /* We are limited by whats available -- app limited */ 17750 app_limited = CTF_JR_APP_LIMITED; 17751 if (IN_FASTRECOVERY(tp->t_flags)) 17752 rack->r_ctl.rc_prr_sndcnt = 0; 17753 } else if ((idle == 0) && 17754 ((tp->t_flags & TF_NODELAY) == 0) && 17755 ((uint32_t)len + (uint32_t)sb_offset >= sbavail(sb)) && 17756 (len < segsiz)) { 17757 /* 17758 * No delay is not on and the 17759 * user is sending less than 1MSS. This 17760 * brings out SWS avoidance so we 17761 * don't send. Another app-limited case. 17762 */ 17763 app_limited = CTF_JR_APP_LIMITED; 17764 } else if (tp->t_flags & TF_NOPUSH) { 17765 /* 17766 * The user has requested no push of 17767 * the last segment and we are 17768 * at the last segment. Another app 17769 * limited case. 17770 */ 17771 app_limited = CTF_JR_APP_LIMITED; 17772 } else if ((ctf_outstanding(tp) + minseg) > cwnd_to_use) { 17773 /* Its the cwnd */ 17774 app_limited = CTF_JR_CWND_LIMITED; 17775 } else if (IN_FASTRECOVERY(tp->t_flags) && 17776 (rack->rack_no_prr == 0) && 17777 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { 17778 app_limited = CTF_JR_PRR; 17779 } else { 17780 /* Now why here are we not sending? */ 17781 #ifdef NOW 17782 #ifdef INVARIANTS 17783 panic("rack:%p hit JR_ASSESSING case cwnd_to_use:%u?", rack, cwnd_to_use); 17784 #endif 17785 #endif 17786 app_limited = CTF_JR_ASSESSING; 17787 } 17788 /* 17789 * App limited in some fashion, for our pacing GP 17790 * measurements we don't want any gap (even cwnd). 17791 * Close down the measurement window. 17792 */ 17793 if (rack_cwnd_block_ends_measure && 17794 ((app_limited == CTF_JR_CWND_LIMITED) || 17795 (app_limited == CTF_JR_PRR))) { 17796 /* 17797 * The reason we are not sending is 17798 * the cwnd (or prr). We have been configured 17799 * to end the measurement window in 17800 * this case. 17801 */ 17802 end_window = 1; 17803 } else if (rack_rwnd_block_ends_measure && 17804 (app_limited == CTF_JR_RWND_LIMITED)) { 17805 /* 17806 * We are rwnd limited and have been 17807 * configured to end the measurement 17808 * window in this case. 17809 */ 17810 end_window = 1; 17811 } else if (app_limited == CTF_JR_APP_LIMITED) { 17812 /* 17813 * A true application limited period, we have 17814 * ran out of data. 17815 */ 17816 end_window = 1; 17817 } else if (app_limited == CTF_JR_ASSESSING) { 17818 /* 17819 * In the assessing case we hit the end of 17820 * the if/else and had no known reason 17821 * This will panic us under invariants.. 17822 * 17823 * If we get this out in logs we need to 17824 * investagate which reason we missed. 17825 */ 17826 end_window = 1; 17827 } 17828 if (end_window) { 17829 uint8_t log = 0; 17830 17831 /* Adjust the Gput measurement */ 17832 if ((tp->t_flags & TF_GPUTINPROG) && 17833 SEQ_GT(tp->gput_ack, tp->snd_max)) { 17834 tp->gput_ack = tp->snd_max; 17835 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { 17836 /* 17837 * There is not enough to measure. 17838 */ 17839 tp->t_flags &= ~TF_GPUTINPROG; 17840 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, 17841 rack->r_ctl.rc_gp_srtt /*flex1*/, 17842 tp->gput_seq, 17843 0, 0, 18, __LINE__, NULL, 0); 17844 } else 17845 log = 1; 17846 } 17847 /* Mark the last packet has app limited */ 17848 rsm = RB_MAX(rack_rb_tree_head, &rack->r_ctl.rc_mtree); 17849 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { 17850 if (rack->r_ctl.rc_app_limited_cnt == 0) 17851 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; 17852 else { 17853 /* 17854 * Go out to the end app limited and mark 17855 * this new one as next and move the end_appl up 17856 * to this guy. 17857 */ 17858 if (rack->r_ctl.rc_end_appl) 17859 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; 17860 rack->r_ctl.rc_end_appl = rsm; 17861 } 17862 rsm->r_flags |= RACK_APP_LIMITED; 17863 rack->r_ctl.rc_app_limited_cnt++; 17864 } 17865 if (log) 17866 rack_log_pacing_delay_calc(rack, 17867 rack->r_ctl.rc_app_limited_cnt, seq, 17868 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); 17869 } 17870 } 17871 /* Check if we need to go into persists or not */ 17872 if ((tp->snd_max == tp->snd_una) && 17873 TCPS_HAVEESTABLISHED(tp->t_state) && 17874 sbavail(sb) && 17875 (sbavail(sb) > tp->snd_wnd) && 17876 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { 17877 /* Yes lets make sure to move to persist before timer-start */ 17878 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime); 17879 } 17880 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, sup_rack); 17881 rack_log_type_just_return(rack, cts, tot_len_this_send, slot, hpts_calling, app_limited, cwnd_to_use); 17882 } 17883 #ifdef NETFLIX_SHARED_CWND 17884 if ((sbavail(sb) == 0) && 17885 rack->r_ctl.rc_scw) { 17886 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); 17887 rack->rack_scwnd_is_idle = 1; 17888 } 17889 #endif 17890 #ifdef TCP_ACCOUNTING 17891 if (tot_len_this_send > 0) { 17892 crtsc = get_cyclecount(); 17893 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17894 tp->tcp_cnt_counters[SND_OUT_DATA]++; 17895 } 17896 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 17897 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17898 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); 17899 } 17900 counter_u64_add(tcp_proc_time[SND_OUT_DATA], (crtsc - ts_val)); 17901 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17902 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); 17903 } 17904 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) / segsiz)); 17905 } else { 17906 crtsc = get_cyclecount(); 17907 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17908 tp->tcp_cnt_counters[SND_LIMITED]++; 17909 } 17910 counter_u64_add(tcp_cnt_counters[SND_LIMITED], 1); 17911 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 17912 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); 17913 } 17914 counter_u64_add(tcp_proc_time[SND_LIMITED], (crtsc - ts_val)); 17915 } 17916 sched_unpin(); 17917 #endif 17918 return (0); 17919 17920 send: 17921 if (rsm || sack_rxmit) 17922 counter_u64_add(rack_nfto_resend, 1); 17923 else 17924 counter_u64_add(rack_non_fto_send, 1); 17925 if ((flags & TH_FIN) && 17926 sbavail(sb)) { 17927 /* 17928 * We do not transmit a FIN 17929 * with data outstanding. We 17930 * need to make it so all data 17931 * is acked first. 17932 */ 17933 flags &= ~TH_FIN; 17934 } 17935 /* Enforce stack imposed max seg size if we have one */ 17936 if (rack->r_ctl.rc_pace_max_segs && 17937 (len > rack->r_ctl.rc_pace_max_segs)) { 17938 mark = 1; 17939 len = rack->r_ctl.rc_pace_max_segs; 17940 } 17941 SOCKBUF_LOCK_ASSERT(sb); 17942 if (len > 0) { 17943 if (len >= segsiz) 17944 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 17945 else 17946 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 17947 } 17948 /* 17949 * Before ESTABLISHED, force sending of initial options unless TCP 17950 * set not to do any options. NOTE: we assume that the IP/TCP header 17951 * plus TCP options always fit in a single mbuf, leaving room for a 17952 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 17953 * + optlen <= MCLBYTES 17954 */ 17955 optlen = 0; 17956 #ifdef INET6 17957 if (isipv6) 17958 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 17959 else 17960 #endif 17961 hdrlen = sizeof(struct tcpiphdr); 17962 17963 /* 17964 * Compute options for segment. We only have to care about SYN and 17965 * established connection segments. Options for SYN-ACK segments 17966 * are handled in TCP syncache. 17967 */ 17968 to.to_flags = 0; 17969 if ((tp->t_flags & TF_NOOPT) == 0) { 17970 /* Maximum segment size. */ 17971 if (flags & TH_SYN) { 17972 tp->snd_nxt = tp->iss; 17973 to.to_mss = tcp_mssopt(&inp->inp_inc); 17974 if (tp->t_port) 17975 to.to_mss -= V_tcp_udp_tunneling_overhead; 17976 to.to_flags |= TOF_MSS; 17977 17978 /* 17979 * On SYN or SYN|ACK transmits on TFO connections, 17980 * only include the TFO option if it is not a 17981 * retransmit, as the presence of the TFO option may 17982 * have caused the original SYN or SYN|ACK to have 17983 * been dropped by a middlebox. 17984 */ 17985 if (IS_FASTOPEN(tp->t_flags) && 17986 (tp->t_rxtshift == 0)) { 17987 if (tp->t_state == TCPS_SYN_RECEIVED) { 17988 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 17989 to.to_tfo_cookie = 17990 (u_int8_t *)&tp->t_tfo_cookie.server; 17991 to.to_flags |= TOF_FASTOPEN; 17992 wanted_cookie = 1; 17993 } else if (tp->t_state == TCPS_SYN_SENT) { 17994 to.to_tfo_len = 17995 tp->t_tfo_client_cookie_len; 17996 to.to_tfo_cookie = 17997 tp->t_tfo_cookie.client; 17998 to.to_flags |= TOF_FASTOPEN; 17999 wanted_cookie = 1; 18000 /* 18001 * If we wind up having more data to 18002 * send with the SYN than can fit in 18003 * one segment, don't send any more 18004 * until the SYN|ACK comes back from 18005 * the other end. 18006 */ 18007 sendalot = 0; 18008 } 18009 } 18010 } 18011 /* Window scaling. */ 18012 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 18013 to.to_wscale = tp->request_r_scale; 18014 to.to_flags |= TOF_SCALE; 18015 } 18016 /* Timestamps. */ 18017 if ((tp->t_flags & TF_RCVD_TSTMP) || 18018 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 18019 to.to_tsval = ms_cts + tp->ts_offset; 18020 to.to_tsecr = tp->ts_recent; 18021 to.to_flags |= TOF_TS; 18022 } 18023 /* Set receive buffer autosizing timestamp. */ 18024 if (tp->rfbuf_ts == 0 && 18025 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 18026 tp->rfbuf_ts = tcp_ts_getticks(); 18027 /* Selective ACK's. */ 18028 if (tp->t_flags & TF_SACK_PERMIT) { 18029 if (flags & TH_SYN) 18030 to.to_flags |= TOF_SACKPERM; 18031 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 18032 tp->rcv_numsacks > 0) { 18033 to.to_flags |= TOF_SACK; 18034 to.to_nsacks = tp->rcv_numsacks; 18035 to.to_sacks = (u_char *)tp->sackblks; 18036 } 18037 } 18038 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18039 /* TCP-MD5 (RFC2385). */ 18040 if (tp->t_flags & TF_SIGNATURE) 18041 to.to_flags |= TOF_SIGNATURE; 18042 #endif /* TCP_SIGNATURE */ 18043 18044 /* Processing the options. */ 18045 hdrlen += optlen = tcp_addoptions(&to, opt); 18046 /* 18047 * If we wanted a TFO option to be added, but it was unable 18048 * to fit, ensure no data is sent. 18049 */ 18050 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 18051 !(to.to_flags & TOF_FASTOPEN)) 18052 len = 0; 18053 } 18054 if (tp->t_port) { 18055 if (V_tcp_udp_tunneling_port == 0) { 18056 /* The port was removed?? */ 18057 SOCKBUF_UNLOCK(&so->so_snd); 18058 #ifdef TCP_ACCOUNTING 18059 crtsc = get_cyclecount(); 18060 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18061 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18062 } 18063 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18064 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18065 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18066 } 18067 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18068 sched_unpin(); 18069 #endif 18070 return (EHOSTUNREACH); 18071 } 18072 hdrlen += sizeof(struct udphdr); 18073 } 18074 #ifdef INET6 18075 if (isipv6) 18076 ipoptlen = ip6_optlen(inp); 18077 else 18078 #endif 18079 if (inp->inp_options) 18080 ipoptlen = inp->inp_options->m_len - 18081 offsetof(struct ipoption, ipopt_list); 18082 else 18083 ipoptlen = 0; 18084 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18085 ipoptlen += ipsec_optlen; 18086 #endif 18087 18088 /* 18089 * Adjust data length if insertion of options will bump the packet 18090 * length beyond the t_maxseg length. Clear the FIN bit because we 18091 * cut off the tail of the segment. 18092 */ 18093 if (len + optlen + ipoptlen > tp->t_maxseg) { 18094 if (tso) { 18095 uint32_t if_hw_tsomax; 18096 uint32_t moff; 18097 int32_t max_len; 18098 18099 /* extract TSO information */ 18100 if_hw_tsomax = tp->t_tsomax; 18101 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 18102 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 18103 KASSERT(ipoptlen == 0, 18104 ("%s: TSO can't do IP options", __func__)); 18105 18106 /* 18107 * Check if we should limit by maximum payload 18108 * length: 18109 */ 18110 if (if_hw_tsomax != 0) { 18111 /* compute maximum TSO length */ 18112 max_len = (if_hw_tsomax - hdrlen - 18113 max_linkhdr); 18114 if (max_len <= 0) { 18115 len = 0; 18116 } else if (len > max_len) { 18117 sendalot = 1; 18118 len = max_len; 18119 mark = 2; 18120 } 18121 } 18122 /* 18123 * Prevent the last segment from being fractional 18124 * unless the send sockbuf can be emptied: 18125 */ 18126 max_len = (tp->t_maxseg - optlen); 18127 if ((sb_offset + len) < sbavail(sb)) { 18128 moff = len % (u_int)max_len; 18129 if (moff != 0) { 18130 mark = 3; 18131 len -= moff; 18132 } 18133 } 18134 /* 18135 * In case there are too many small fragments don't 18136 * use TSO: 18137 */ 18138 if (len <= segsiz) { 18139 mark = 4; 18140 tso = 0; 18141 } 18142 /* 18143 * Send the FIN in a separate segment after the bulk 18144 * sending is done. We don't trust the TSO 18145 * implementations to clear the FIN flag on all but 18146 * the last segment. 18147 */ 18148 if (tp->t_flags & TF_NEEDFIN) { 18149 sendalot = 4; 18150 } 18151 } else { 18152 mark = 5; 18153 if (optlen + ipoptlen >= tp->t_maxseg) { 18154 /* 18155 * Since we don't have enough space to put 18156 * the IP header chain and the TCP header in 18157 * one packet as required by RFC 7112, don't 18158 * send it. Also ensure that at least one 18159 * byte of the payload can be put into the 18160 * TCP segment. 18161 */ 18162 SOCKBUF_UNLOCK(&so->so_snd); 18163 error = EMSGSIZE; 18164 sack_rxmit = 0; 18165 goto out; 18166 } 18167 len = tp->t_maxseg - optlen - ipoptlen; 18168 sendalot = 5; 18169 } 18170 } else { 18171 tso = 0; 18172 mark = 6; 18173 } 18174 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 18175 ("%s: len > IP_MAXPACKET", __func__)); 18176 #ifdef DIAGNOSTIC 18177 #ifdef INET6 18178 if (max_linkhdr + hdrlen > MCLBYTES) 18179 #else 18180 if (max_linkhdr + hdrlen > MHLEN) 18181 #endif 18182 panic("tcphdr too big"); 18183 #endif 18184 18185 /* 18186 * This KASSERT is here to catch edge cases at a well defined place. 18187 * Before, those had triggered (random) panic conditions further 18188 * down. 18189 */ 18190 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 18191 if ((len == 0) && 18192 (flags & TH_FIN) && 18193 (sbused(sb))) { 18194 /* 18195 * We have outstanding data, don't send a fin by itself!. 18196 */ 18197 goto just_return; 18198 } 18199 /* 18200 * Grab a header mbuf, attaching a copy of data to be transmitted, 18201 * and initialize the header from the template for sends on this 18202 * connection. 18203 */ 18204 hw_tls = (sb->sb_flags & SB_TLS_IFNET) != 0; 18205 if (len) { 18206 uint32_t max_val; 18207 uint32_t moff; 18208 18209 if (rack->r_ctl.rc_pace_max_segs) 18210 max_val = rack->r_ctl.rc_pace_max_segs; 18211 else if (rack->rc_user_set_max_segs) 18212 max_val = rack->rc_user_set_max_segs * segsiz; 18213 else 18214 max_val = len; 18215 /* 18216 * We allow a limit on sending with hptsi. 18217 */ 18218 if (len > max_val) { 18219 mark = 7; 18220 len = max_val; 18221 } 18222 #ifdef INET6 18223 if (MHLEN < hdrlen + max_linkhdr) 18224 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18225 else 18226 #endif 18227 m = m_gethdr(M_NOWAIT, MT_DATA); 18228 18229 if (m == NULL) { 18230 SOCKBUF_UNLOCK(sb); 18231 error = ENOBUFS; 18232 sack_rxmit = 0; 18233 goto out; 18234 } 18235 m->m_data += max_linkhdr; 18236 m->m_len = hdrlen; 18237 18238 /* 18239 * Start the m_copy functions from the closest mbuf to the 18240 * sb_offset in the socket buffer chain. 18241 */ 18242 mb = sbsndptr_noadv(sb, sb_offset, &moff); 18243 s_mb = mb; 18244 s_moff = moff; 18245 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 18246 m_copydata(mb, moff, (int)len, 18247 mtod(m, caddr_t)+hdrlen); 18248 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18249 sbsndptr_adv(sb, mb, len); 18250 m->m_len += len; 18251 } else { 18252 struct sockbuf *msb; 18253 18254 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) 18255 msb = NULL; 18256 else 18257 msb = sb; 18258 m->m_next = tcp_m_copym( 18259 mb, moff, &len, 18260 if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb, 18261 ((rsm == NULL) ? hw_tls : 0) 18262 #ifdef NETFLIX_COPY_ARGS 18263 , &s_mb, &s_moff 18264 #endif 18265 ); 18266 if (len <= (tp->t_maxseg - optlen)) { 18267 /* 18268 * Must have ran out of mbufs for the copy 18269 * shorten it to no longer need tso. Lets 18270 * not put on sendalot since we are low on 18271 * mbufs. 18272 */ 18273 tso = 0; 18274 } 18275 if (m->m_next == NULL) { 18276 SOCKBUF_UNLOCK(sb); 18277 (void)m_free(m); 18278 error = ENOBUFS; 18279 sack_rxmit = 0; 18280 goto out; 18281 } 18282 } 18283 if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { 18284 if (rsm && (rsm->r_flags & RACK_TLP)) { 18285 /* 18286 * TLP should not count in retran count, but 18287 * in its own bin 18288 */ 18289 counter_u64_add(rack_tlp_retran, 1); 18290 counter_u64_add(rack_tlp_retran_bytes, len); 18291 } else { 18292 tp->t_sndrexmitpack++; 18293 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 18294 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 18295 } 18296 #ifdef STATS 18297 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 18298 len); 18299 #endif 18300 } else { 18301 KMOD_TCPSTAT_INC(tcps_sndpack); 18302 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 18303 #ifdef STATS 18304 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 18305 len); 18306 #endif 18307 } 18308 /* 18309 * If we're sending everything we've got, set PUSH. (This 18310 * will keep happy those implementations which only give 18311 * data to the user when a buffer fills or a PUSH comes in.) 18312 */ 18313 if (sb_offset + len == sbused(sb) && 18314 sbused(sb) && 18315 !(flags & TH_SYN)) { 18316 flags |= TH_PUSH; 18317 add_flag |= RACK_HAD_PUSH; 18318 } 18319 18320 SOCKBUF_UNLOCK(sb); 18321 } else { 18322 SOCKBUF_UNLOCK(sb); 18323 if (tp->t_flags & TF_ACKNOW) 18324 KMOD_TCPSTAT_INC(tcps_sndacks); 18325 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 18326 KMOD_TCPSTAT_INC(tcps_sndctrl); 18327 else 18328 KMOD_TCPSTAT_INC(tcps_sndwinup); 18329 18330 m = m_gethdr(M_NOWAIT, MT_DATA); 18331 if (m == NULL) { 18332 error = ENOBUFS; 18333 sack_rxmit = 0; 18334 goto out; 18335 } 18336 #ifdef INET6 18337 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 18338 MHLEN >= hdrlen) { 18339 M_ALIGN(m, hdrlen); 18340 } else 18341 #endif 18342 m->m_data += max_linkhdr; 18343 m->m_len = hdrlen; 18344 } 18345 SOCKBUF_UNLOCK_ASSERT(sb); 18346 m->m_pkthdr.rcvif = (struct ifnet *)0; 18347 #ifdef MAC 18348 mac_inpcb_create_mbuf(inp, m); 18349 #endif 18350 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18351 #ifdef INET6 18352 if (isipv6) 18353 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 18354 else 18355 #endif /* INET6 */ 18356 #ifdef INET 18357 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 18358 #endif 18359 th = rack->r_ctl.fsb.th; 18360 udp = rack->r_ctl.fsb.udp; 18361 if (udp) { 18362 #ifdef INET6 18363 if (isipv6) 18364 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18365 else 18366 #endif /* INET6 */ 18367 ulen = hdrlen + len - sizeof(struct ip); 18368 udp->uh_ulen = htons(ulen); 18369 } 18370 } else { 18371 #ifdef INET6 18372 if (isipv6) { 18373 ip6 = mtod(m, struct ip6_hdr *); 18374 if (tp->t_port) { 18375 udp = (struct udphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); 18376 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18377 udp->uh_dport = tp->t_port; 18378 ulen = hdrlen + len - sizeof(struct ip6_hdr); 18379 udp->uh_ulen = htons(ulen); 18380 th = (struct tcphdr *)(udp + 1); 18381 } else 18382 th = (struct tcphdr *)(ip6 + 1); 18383 tcpip_fillheaders(inp, tp->t_port, ip6, th); 18384 } else 18385 #endif /* INET6 */ 18386 { 18387 #ifdef INET 18388 ip = mtod(m, struct ip *); 18389 if (tp->t_port) { 18390 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 18391 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 18392 udp->uh_dport = tp->t_port; 18393 ulen = hdrlen + len - sizeof(struct ip); 18394 udp->uh_ulen = htons(ulen); 18395 th = (struct tcphdr *)(udp + 1); 18396 } else 18397 th = (struct tcphdr *)(ip + 1); 18398 tcpip_fillheaders(inp, tp->t_port, ip, th); 18399 #endif 18400 } 18401 } 18402 /* 18403 * Fill in fields, remembering maximum advertised window for use in 18404 * delaying messages about window sizes. If resending a FIN, be sure 18405 * not to use a new sequence number. 18406 */ 18407 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && 18408 tp->snd_nxt == tp->snd_max) 18409 tp->snd_nxt--; 18410 /* 18411 * If we are starting a connection, send ECN setup SYN packet. If we 18412 * are on a retransmit, we may resend those bits a number of times 18413 * as per RFC 3168. 18414 */ 18415 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { 18416 flags |= tcp_ecn_output_syn_sent(tp); 18417 } 18418 /* Also handle parallel SYN for ECN */ 18419 if (TCPS_HAVERCVDSYN(tp->t_state) && 18420 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { 18421 int ect = tcp_ecn_output_established(tp, &flags, len, sack_rxmit); 18422 if ((tp->t_state == TCPS_SYN_RECEIVED) && 18423 (tp->t_flags2 & TF2_ECN_SND_ECE)) 18424 tp->t_flags2 &= ~TF2_ECN_SND_ECE; 18425 #ifdef INET6 18426 if (isipv6) { 18427 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); 18428 ip6->ip6_flow |= htonl(ect << 20); 18429 } 18430 else 18431 #endif 18432 { 18433 #ifdef INET 18434 ip->ip_tos &= ~IPTOS_ECN_MASK; 18435 ip->ip_tos |= ect; 18436 #endif 18437 } 18438 } 18439 /* 18440 * If we are doing retransmissions, then snd_nxt will not reflect 18441 * the first unsent octet. For ACK only packets, we do not want the 18442 * sequence number of the retransmitted packet, we want the sequence 18443 * number of the next unsent octet. So, if there is no data (and no 18444 * SYN or FIN), use snd_max instead of snd_nxt when filling in 18445 * ti_seq. But if we are in persist state, snd_max might reflect 18446 * one byte beyond the right edge of the window, so use snd_nxt in 18447 * that case, since we know we aren't doing a retransmission. 18448 * (retransmit and persist are mutually exclusive...) 18449 */ 18450 if (sack_rxmit == 0) { 18451 if (len || (flags & (TH_SYN | TH_FIN))) { 18452 th->th_seq = htonl(tp->snd_nxt); 18453 rack_seq = tp->snd_nxt; 18454 } else { 18455 th->th_seq = htonl(tp->snd_max); 18456 rack_seq = tp->snd_max; 18457 } 18458 } else { 18459 th->th_seq = htonl(rsm->r_start); 18460 rack_seq = rsm->r_start; 18461 } 18462 th->th_ack = htonl(tp->rcv_nxt); 18463 tcp_set_flags(th, flags); 18464 /* 18465 * Calculate receive window. Don't shrink window, but avoid silly 18466 * window syndrome. 18467 * If a RST segment is sent, advertise a window of zero. 18468 */ 18469 if (flags & TH_RST) { 18470 recwin = 0; 18471 } else { 18472 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && 18473 recwin < (long)segsiz) { 18474 recwin = 0; 18475 } 18476 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 18477 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) 18478 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); 18479 } 18480 18481 /* 18482 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 18483 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 18484 * handled in syncache. 18485 */ 18486 if (flags & TH_SYN) 18487 th->th_win = htons((u_short) 18488 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 18489 else { 18490 /* Avoid shrinking window with window scaling. */ 18491 recwin = roundup2(recwin, 1 << tp->rcv_scale); 18492 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 18493 } 18494 /* 18495 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 18496 * window. This may cause the remote transmitter to stall. This 18497 * flag tells soreceive() to disable delayed acknowledgements when 18498 * draining the buffer. This can occur if the receiver is 18499 * attempting to read more data than can be buffered prior to 18500 * transmitting on the connection. 18501 */ 18502 if (th->th_win == 0) { 18503 tp->t_sndzerowin++; 18504 tp->t_flags |= TF_RXWIN0SENT; 18505 } else 18506 tp->t_flags &= ~TF_RXWIN0SENT; 18507 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ 18508 /* Now are we using fsb?, if so copy the template data to the mbuf */ 18509 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { 18510 uint8_t *cpto; 18511 18512 cpto = mtod(m, uint8_t *); 18513 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); 18514 /* 18515 * We have just copied in: 18516 * IP/IP6 18517 * <optional udphdr> 18518 * tcphdr (no options) 18519 * 18520 * We need to grab the correct pointers into the mbuf 18521 * for both the tcp header, and possibly the udp header (if tunneling). 18522 * We do this by using the offset in the copy buffer and adding it 18523 * to the mbuf base pointer (cpto). 18524 */ 18525 #ifdef INET6 18526 if (isipv6) 18527 ip6 = mtod(m, struct ip6_hdr *); 18528 else 18529 #endif /* INET6 */ 18530 #ifdef INET 18531 ip = mtod(m, struct ip *); 18532 #endif 18533 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); 18534 /* If we have a udp header lets set it into the mbuf as well */ 18535 if (udp) 18536 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); 18537 } 18538 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 18539 if (to.to_flags & TOF_SIGNATURE) { 18540 /* 18541 * Calculate MD5 signature and put it into the place 18542 * determined before. 18543 * NOTE: since TCP options buffer doesn't point into 18544 * mbuf's data, calculate offset and use it. 18545 */ 18546 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 18547 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 18548 /* 18549 * Do not send segment if the calculation of MD5 18550 * digest has failed. 18551 */ 18552 goto out; 18553 } 18554 } 18555 #endif 18556 if (optlen) { 18557 bcopy(opt, th + 1, optlen); 18558 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 18559 } 18560 /* 18561 * Put TCP length in extended header, and then checksum extended 18562 * header and data. 18563 */ 18564 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 18565 #ifdef INET6 18566 if (isipv6) { 18567 /* 18568 * ip6_plen is not need to be filled now, and will be filled 18569 * in ip6_output. 18570 */ 18571 if (tp->t_port) { 18572 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 18573 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18574 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 18575 th->th_sum = htons(0); 18576 UDPSTAT_INC(udps_opackets); 18577 } else { 18578 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 18579 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18580 th->th_sum = in6_cksum_pseudo(ip6, 18581 sizeof(struct tcphdr) + optlen + len, IPPROTO_TCP, 18582 0); 18583 } 18584 } 18585 #endif 18586 #if defined(INET6) && defined(INET) 18587 else 18588 #endif 18589 #ifdef INET 18590 { 18591 if (tp->t_port) { 18592 m->m_pkthdr.csum_flags = CSUM_UDP; 18593 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 18594 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 18595 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 18596 th->th_sum = htons(0); 18597 UDPSTAT_INC(udps_opackets); 18598 } else { 18599 m->m_pkthdr.csum_flags = CSUM_TCP; 18600 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 18601 th->th_sum = in_pseudo(ip->ip_src.s_addr, 18602 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 18603 IPPROTO_TCP + len + optlen)); 18604 } 18605 /* IP version must be set here for ipv4/ipv6 checking later */ 18606 KASSERT(ip->ip_v == IPVERSION, 18607 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 18608 } 18609 #endif 18610 /* 18611 * Enable TSO and specify the size of the segments. The TCP pseudo 18612 * header checksum is always provided. XXX: Fixme: This is currently 18613 * not the case for IPv6. 18614 */ 18615 if (tso) { 18616 KASSERT(len > tp->t_maxseg - optlen, 18617 ("%s: len <= tso_segsz", __func__)); 18618 m->m_pkthdr.csum_flags |= CSUM_TSO; 18619 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; 18620 } 18621 KASSERT(len + hdrlen == m_length(m, NULL), 18622 ("%s: mbuf chain different than expected: %d + %u != %u", 18623 __func__, len, hdrlen, m_length(m, NULL))); 18624 18625 #ifdef TCP_HHOOK 18626 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */ 18627 hhook_run_tcp_est_out(tp, th, &to, len, tso); 18628 #endif 18629 /* We're getting ready to send; log now. */ 18630 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 18631 union tcp_log_stackspecific log; 18632 18633 memset(&log.u_bbr, 0, sizeof(log.u_bbr)); 18634 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_inp); 18635 if (rack->rack_no_prr) 18636 log.u_bbr.flex1 = 0; 18637 else 18638 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; 18639 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; 18640 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; 18641 log.u_bbr.flex4 = orig_len; 18642 /* Save off the early/late values */ 18643 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; 18644 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; 18645 log.u_bbr.bw_inuse = rack_get_bw(rack); 18646 log.u_bbr.flex8 = 0; 18647 if (rsm) { 18648 if (rsm->r_flags & RACK_RWND_COLLAPSED) { 18649 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); 18650 counter_u64_add(rack_collapsed_win_rxt, 1); 18651 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); 18652 } 18653 if (doing_tlp) 18654 log.u_bbr.flex8 = 2; 18655 else 18656 log.u_bbr.flex8 = 1; 18657 } else { 18658 if (doing_tlp) 18659 log.u_bbr.flex8 = 3; 18660 else 18661 log.u_bbr.flex8 = 0; 18662 } 18663 log.u_bbr.pacing_gain = rack_get_output_gain(rack, rsm); 18664 log.u_bbr.flex7 = mark; 18665 log.u_bbr.flex7 <<= 8; 18666 log.u_bbr.flex7 |= pass; 18667 log.u_bbr.pkts_out = tp->t_maxseg; 18668 log.u_bbr.timeStamp = cts; 18669 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); 18670 log.u_bbr.lt_epoch = cwnd_to_use; 18671 log.u_bbr.delivered = sendalot; 18672 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 18673 len, &log, false, NULL, NULL, 0, &tv); 18674 } else 18675 lgb = NULL; 18676 18677 /* 18678 * Fill in IP length and desired time to live and send to IP level. 18679 * There should be a better way to handle ttl and tos; we could keep 18680 * them in the template, but need a way to checksum without them. 18681 */ 18682 /* 18683 * m->m_pkthdr.len should have been set before cksum calcuration, 18684 * because in6_cksum() need it. 18685 */ 18686 #ifdef INET6 18687 if (isipv6) { 18688 /* 18689 * we separately set hoplimit for every segment, since the 18690 * user might want to change the value via setsockopt. Also, 18691 * desired default hop limit might be changed via Neighbor 18692 * Discovery. 18693 */ 18694 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); 18695 18696 /* 18697 * Set the packet size here for the benefit of DTrace 18698 * probes. ip6_output() will set it properly; it's supposed 18699 * to include the option header lengths as well. 18700 */ 18701 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 18702 18703 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) 18704 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18705 else 18706 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18707 18708 if (tp->t_state == TCPS_SYN_SENT) 18709 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 18710 18711 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 18712 /* TODO: IPv6 IP6TOS_ECT bit on */ 18713 error = ip6_output(m, 18714 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18715 inp->in6p_outputopts, 18716 #else 18717 NULL, 18718 #endif 18719 &inp->inp_route6, 18720 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 18721 NULL, NULL, inp); 18722 18723 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 18724 mtu = inp->inp_route6.ro_nh->nh_mtu; 18725 } 18726 #endif /* INET6 */ 18727 #if defined(INET) && defined(INET6) 18728 else 18729 #endif 18730 #ifdef INET 18731 { 18732 ip->ip_len = htons(m->m_pkthdr.len); 18733 #ifdef INET6 18734 if (inp->inp_vflag & INP_IPV6PROTO) 18735 ip->ip_ttl = in6_selecthlim(inp, NULL); 18736 #endif /* INET6 */ 18737 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; 18738 /* 18739 * If we do path MTU discovery, then we set DF on every 18740 * packet. This might not be the best thing to do according 18741 * to RFC3390 Section 2. However the tcp hostcache migitates 18742 * the problem so it affects only the first tcp connection 18743 * with a host. 18744 * 18745 * NB: Don't set DF on small MTU/MSS to have a safe 18746 * fallback. 18747 */ 18748 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 18749 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 18750 if (tp->t_port == 0 || len < V_tcp_minmss) { 18751 ip->ip_off |= htons(IP_DF); 18752 } 18753 } else { 18754 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 18755 } 18756 18757 if (tp->t_state == TCPS_SYN_SENT) 18758 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 18759 18760 TCP_PROBE5(send, NULL, tp, ip, tp, th); 18761 18762 error = ip_output(m, 18763 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 18764 inp->inp_options, 18765 #else 18766 NULL, 18767 #endif 18768 &inp->inp_route, 18769 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 18770 inp); 18771 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 18772 mtu = inp->inp_route.ro_nh->nh_mtu; 18773 } 18774 #endif /* INET */ 18775 18776 out: 18777 if (lgb) { 18778 lgb->tlb_errno = error; 18779 lgb = NULL; 18780 } 18781 /* 18782 * In transmit state, time the transmission and arrange for the 18783 * retransmit. In persist state, just set snd_max. 18784 */ 18785 if (error == 0) { 18786 tcp_account_for_send(tp, len, (rsm != NULL), doing_tlp, hw_tls); 18787 if (rsm && doing_tlp) { 18788 rack->rc_last_sent_tlp_past_cumack = 0; 18789 rack->rc_last_sent_tlp_seq_valid = 1; 18790 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; 18791 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; 18792 } 18793 rack->forced_ack = 0; /* If we send something zap the FA flag */ 18794 if (rsm && (doing_tlp == 0)) { 18795 /* Set we retransmitted */ 18796 rack->rc_gp_saw_rec = 1; 18797 } else { 18798 if (cwnd_to_use > tp->snd_ssthresh) { 18799 /* Set we sent in CA */ 18800 rack->rc_gp_saw_ca = 1; 18801 } else { 18802 /* Set we sent in SS */ 18803 rack->rc_gp_saw_ss = 1; 18804 } 18805 } 18806 if (TCPS_HAVEESTABLISHED(tp->t_state) && 18807 (tp->t_flags & TF_SACK_PERMIT) && 18808 tp->rcv_numsacks > 0) 18809 tcp_clean_dsack_blocks(tp); 18810 tot_len_this_send += len; 18811 if (len == 0) 18812 counter_u64_add(rack_out_size[TCP_MSS_ACCT_SNDACK], 1); 18813 else if (len == 1) { 18814 counter_u64_add(rack_out_size[TCP_MSS_ACCT_PERSIST], 1); 18815 } else if (len > 1) { 18816 int idx; 18817 18818 idx = (len / segsiz) + 3; 18819 if (idx >= TCP_MSS_ACCT_ATIMER) 18820 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); 18821 else 18822 counter_u64_add(rack_out_size[idx], 1); 18823 } 18824 } 18825 if ((rack->rack_no_prr == 0) && 18826 sub_from_prr && 18827 (error == 0)) { 18828 if (rack->r_ctl.rc_prr_sndcnt >= len) 18829 rack->r_ctl.rc_prr_sndcnt -= len; 18830 else 18831 rack->r_ctl.rc_prr_sndcnt = 0; 18832 } 18833 sub_from_prr = 0; 18834 if (doing_tlp) { 18835 /* Make sure the TLP is added */ 18836 add_flag |= RACK_TLP; 18837 } else if (rsm) { 18838 /* If its a resend without TLP then it must not have the flag */ 18839 rsm->r_flags &= ~RACK_TLP; 18840 } 18841 rack_log_output(tp, &to, len, rack_seq, (uint8_t) flags, error, 18842 rack_to_usec_ts(&tv), 18843 rsm, add_flag, s_mb, s_moff, hw_tls); 18844 18845 18846 if ((error == 0) && 18847 (len > 0) && 18848 (tp->snd_una == tp->snd_max)) 18849 rack->r_ctl.rc_tlp_rxt_last_time = cts; 18850 { 18851 tcp_seq startseq = tp->snd_nxt; 18852 18853 /* Track our lost count */ 18854 if (rsm && (doing_tlp == 0)) 18855 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; 18856 /* 18857 * Advance snd_nxt over sequence space of this segment. 18858 */ 18859 if (error) 18860 /* We don't log or do anything with errors */ 18861 goto nomore; 18862 if (doing_tlp == 0) { 18863 if (rsm == NULL) { 18864 /* 18865 * Not a retransmission of some 18866 * sort, new data is going out so 18867 * clear our TLP count and flag. 18868 */ 18869 rack->rc_tlp_in_progress = 0; 18870 rack->r_ctl.rc_tlp_cnt_out = 0; 18871 } 18872 } else { 18873 /* 18874 * We have just sent a TLP, mark that it is true 18875 * and make sure our in progress is set so we 18876 * continue to check the count. 18877 */ 18878 rack->rc_tlp_in_progress = 1; 18879 rack->r_ctl.rc_tlp_cnt_out++; 18880 } 18881 if (flags & (TH_SYN | TH_FIN)) { 18882 if (flags & TH_SYN) 18883 tp->snd_nxt++; 18884 if (flags & TH_FIN) { 18885 tp->snd_nxt++; 18886 tp->t_flags |= TF_SENTFIN; 18887 } 18888 } 18889 /* In the ENOBUFS case we do *not* update snd_max */ 18890 if (sack_rxmit) 18891 goto nomore; 18892 18893 tp->snd_nxt += len; 18894 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { 18895 if (tp->snd_una == tp->snd_max) { 18896 /* 18897 * Update the time we just added data since 18898 * none was outstanding. 18899 */ 18900 rack_log_progress_event(rack, tp, ticks, PROGRESS_START, __LINE__); 18901 tp->t_acktime = ticks; 18902 } 18903 tp->snd_max = tp->snd_nxt; 18904 /* 18905 * Time this transmission if not a retransmission and 18906 * not currently timing anything. 18907 * This is only relevant in case of switching back to 18908 * the base stack. 18909 */ 18910 if (tp->t_rtttime == 0) { 18911 tp->t_rtttime = ticks; 18912 tp->t_rtseq = startseq; 18913 KMOD_TCPSTAT_INC(tcps_segstimed); 18914 } 18915 if (len && 18916 ((tp->t_flags & TF_GPUTINPROG) == 0)) 18917 rack_start_gp_measurement(tp, rack, startseq, sb_offset); 18918 } 18919 /* 18920 * If we are doing FO we need to update the mbuf position and subtract 18921 * this happens when the peer sends us duplicate information and 18922 * we thus want to send a DSACK. 18923 * 18924 * XXXRRS: This brings to mind a ?, when we send a DSACK block is TSO 18925 * turned off? If not then we are going to echo multiple DSACK blocks 18926 * out (with the TSO), which we should not be doing. 18927 */ 18928 if (rack->r_fast_output && len) { 18929 if (rack->r_ctl.fsb.left_to_send > len) 18930 rack->r_ctl.fsb.left_to_send -= len; 18931 else 18932 rack->r_ctl.fsb.left_to_send = 0; 18933 if (rack->r_ctl.fsb.left_to_send < segsiz) 18934 rack->r_fast_output = 0; 18935 if (rack->r_fast_output) { 18936 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 18937 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 18938 } 18939 } 18940 } 18941 nomore: 18942 if (error) { 18943 rack->r_ctl.rc_agg_delayed = 0; 18944 rack->r_early = 0; 18945 rack->r_late = 0; 18946 rack->r_ctl.rc_agg_early = 0; 18947 SOCKBUF_UNLOCK_ASSERT(sb); /* Check gotos. */ 18948 /* 18949 * Failures do not advance the seq counter above. For the 18950 * case of ENOBUFS we will fall out and retry in 1ms with 18951 * the hpts. Everything else will just have to retransmit 18952 * with the timer. 18953 * 18954 * In any case, we do not want to loop around for another 18955 * send without a good reason. 18956 */ 18957 sendalot = 0; 18958 switch (error) { 18959 case EPERM: 18960 tp->t_softerror = error; 18961 #ifdef TCP_ACCOUNTING 18962 crtsc = get_cyclecount(); 18963 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18964 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 18965 } 18966 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 18967 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 18968 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 18969 } 18970 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 18971 sched_unpin(); 18972 #endif 18973 return (error); 18974 case ENOBUFS: 18975 /* 18976 * Pace us right away to retry in a some 18977 * time 18978 */ 18979 if (rack->r_ctl.crte != NULL) { 18980 rack_trace_point(rack, RACK_TP_HWENOBUF); 18981 } else 18982 rack_trace_point(rack, RACK_TP_ENOBUF); 18983 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); 18984 if (rack->rc_enobuf < 0x7f) 18985 rack->rc_enobuf++; 18986 if (slot < (10 * HPTS_USEC_IN_MSEC)) 18987 slot = 10 * HPTS_USEC_IN_MSEC; 18988 if (rack->r_ctl.crte != NULL) { 18989 counter_u64_add(rack_saw_enobuf_hw, 1); 18990 tcp_rl_log_enobuf(rack->r_ctl.crte); 18991 } 18992 counter_u64_add(rack_saw_enobuf, 1); 18993 goto enobufs; 18994 case EMSGSIZE: 18995 /* 18996 * For some reason the interface we used initially 18997 * to send segments changed to another or lowered 18998 * its MTU. If TSO was active we either got an 18999 * interface without TSO capabilits or TSO was 19000 * turned off. If we obtained mtu from ip_output() 19001 * then update it and try again. 19002 */ 19003 if (tso) 19004 tp->t_flags &= ~TF_TSO; 19005 if (mtu != 0) { 19006 tcp_mss_update(tp, -1, mtu, NULL, NULL); 19007 goto again; 19008 } 19009 slot = 10 * HPTS_USEC_IN_MSEC; 19010 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19011 #ifdef TCP_ACCOUNTING 19012 crtsc = get_cyclecount(); 19013 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19014 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19015 } 19016 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19017 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19018 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19019 } 19020 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19021 sched_unpin(); 19022 #endif 19023 return (error); 19024 case ENETUNREACH: 19025 counter_u64_add(rack_saw_enetunreach, 1); 19026 case EHOSTDOWN: 19027 case EHOSTUNREACH: 19028 case ENETDOWN: 19029 if (TCPS_HAVERCVDSYN(tp->t_state)) { 19030 tp->t_softerror = error; 19031 } 19032 /* FALLTHROUGH */ 19033 default: 19034 slot = 10 * HPTS_USEC_IN_MSEC; 19035 rack_start_hpts_timer(rack, tp, cts, slot, 0, 0); 19036 #ifdef TCP_ACCOUNTING 19037 crtsc = get_cyclecount(); 19038 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19039 tp->tcp_cnt_counters[SND_OUT_FAIL]++; 19040 } 19041 counter_u64_add(tcp_cnt_counters[SND_OUT_FAIL], 1); 19042 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19043 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); 19044 } 19045 counter_u64_add(tcp_proc_time[SND_OUT_FAIL], (crtsc - ts_val)); 19046 sched_unpin(); 19047 #endif 19048 return (error); 19049 } 19050 } else { 19051 rack->rc_enobuf = 0; 19052 if (IN_FASTRECOVERY(tp->t_flags) && rsm) 19053 rack->r_ctl.retran_during_recovery += len; 19054 } 19055 KMOD_TCPSTAT_INC(tcps_sndtotal); 19056 19057 /* 19058 * Data sent (as far as we can tell). If this advertises a larger 19059 * window than any other segment, then remember the size of the 19060 * advertised window. Any pending ACK has now been sent. 19061 */ 19062 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 19063 tp->rcv_adv = tp->rcv_nxt + recwin; 19064 19065 tp->last_ack_sent = tp->rcv_nxt; 19066 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 19067 enobufs: 19068 if (sendalot) { 19069 /* Do we need to turn off sendalot? */ 19070 if (rack->r_ctl.rc_pace_max_segs && 19071 (tot_len_this_send >= rack->r_ctl.rc_pace_max_segs)) { 19072 /* We hit our max. */ 19073 sendalot = 0; 19074 } else if ((rack->rc_user_set_max_segs) && 19075 (tot_len_this_send >= (rack->rc_user_set_max_segs * segsiz))) { 19076 /* We hit the user defined max */ 19077 sendalot = 0; 19078 } 19079 } 19080 if ((error == 0) && (flags & TH_FIN)) 19081 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 19082 if (flags & TH_RST) { 19083 /* 19084 * We don't send again after sending a RST. 19085 */ 19086 slot = 0; 19087 sendalot = 0; 19088 if (error == 0) 19089 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 19090 } else if ((slot == 0) && (sendalot == 0) && tot_len_this_send) { 19091 /* 19092 * Get our pacing rate, if an error 19093 * occurred in sending (ENOBUF) we would 19094 * hit the else if with slot preset. Other 19095 * errors return. 19096 */ 19097 slot = rack_get_pacing_delay(rack, tp, tot_len_this_send, rsm, segsiz); 19098 } 19099 if (rsm && 19100 (rsm->r_flags & RACK_HAS_SYN) == 0 && 19101 rack->use_rack_rr) { 19102 /* Its a retransmit and we use the rack cheat? */ 19103 if ((slot == 0) || 19104 (rack->rc_always_pace == 0) || 19105 (rack->r_rr_config == 1)) { 19106 /* 19107 * We have no pacing set or we 19108 * are using old-style rack or 19109 * we are overridden to use the old 1ms pacing. 19110 */ 19111 slot = rack->r_ctl.rc_min_to; 19112 } 19113 } 19114 /* We have sent clear the flag */ 19115 rack->r_ent_rec_ns = 0; 19116 if (rack->r_must_retran) { 19117 if (rsm) { 19118 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); 19119 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { 19120 /* 19121 * We have retransmitted all. 19122 */ 19123 rack->r_must_retran = 0; 19124 rack->r_ctl.rc_out_at_rto = 0; 19125 } 19126 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19127 /* 19128 * Sending new data will also kill 19129 * the loop. 19130 */ 19131 rack->r_must_retran = 0; 19132 rack->r_ctl.rc_out_at_rto = 0; 19133 } 19134 } 19135 rack->r_ctl.fsb.recwin = recwin; 19136 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && 19137 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { 19138 /* 19139 * We hit an RTO and now have past snd_max at the RTO 19140 * clear all the WAS flags. 19141 */ 19142 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); 19143 } 19144 if (slot) { 19145 /* set the rack tcb into the slot N */ 19146 if ((error == 0) && 19147 rack_use_rfo && 19148 ((flags & (TH_SYN|TH_FIN)) == 0) && 19149 (rsm == NULL) && 19150 (tp->snd_nxt == tp->snd_max) && 19151 (ipoptlen == 0) && 19152 (tp->rcv_numsacks == 0) && 19153 rack->r_fsb_inited && 19154 TCPS_HAVEESTABLISHED(tp->t_state) && 19155 (rack->r_must_retran == 0) && 19156 ((tp->t_flags & TF_NEEDFIN) == 0) && 19157 (len > 0) && (orig_len > 0) && 19158 (orig_len > len) && 19159 ((orig_len - len) >= segsiz) && 19160 ((optlen == 0) || 19161 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19162 /* We can send at least one more MSS using our fsb */ 19163 19164 rack->r_fast_output = 1; 19165 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19166 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19167 rack->r_ctl.fsb.tcp_flags = flags; 19168 rack->r_ctl.fsb.left_to_send = orig_len - len; 19169 if (hw_tls) 19170 rack->r_ctl.fsb.hw_tls = 1; 19171 else 19172 rack->r_ctl.fsb.hw_tls = 0; 19173 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19174 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19175 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19176 (tp->snd_max - tp->snd_una))); 19177 if (rack->r_ctl.fsb.left_to_send < segsiz) 19178 rack->r_fast_output = 0; 19179 else { 19180 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19181 rack->r_ctl.fsb.rfo_apply_push = 1; 19182 else 19183 rack->r_ctl.fsb.rfo_apply_push = 0; 19184 } 19185 } else 19186 rack->r_fast_output = 0; 19187 rack_log_fsb(rack, tp, so, flags, 19188 ipoptlen, orig_len, len, error, 19189 (rsm == NULL), optlen, __LINE__, 2); 19190 } else if (sendalot) { 19191 int ret; 19192 19193 sack_rxmit = 0; 19194 if ((error == 0) && 19195 rack_use_rfo && 19196 ((flags & (TH_SYN|TH_FIN)) == 0) && 19197 (rsm == NULL) && 19198 (ipoptlen == 0) && 19199 (tp->rcv_numsacks == 0) && 19200 (tp->snd_nxt == tp->snd_max) && 19201 (rack->r_must_retran == 0) && 19202 rack->r_fsb_inited && 19203 TCPS_HAVEESTABLISHED(tp->t_state) && 19204 ((tp->t_flags & TF_NEEDFIN) == 0) && 19205 (len > 0) && (orig_len > 0) && 19206 (orig_len > len) && 19207 ((orig_len - len) >= segsiz) && 19208 ((optlen == 0) || 19209 ((optlen == TCPOLEN_TSTAMP_APPA) && (to.to_flags & TOF_TS)))) { 19210 /* we can use fast_output for more */ 19211 19212 rack->r_fast_output = 1; 19213 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); 19214 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; 19215 rack->r_ctl.fsb.tcp_flags = flags; 19216 rack->r_ctl.fsb.left_to_send = orig_len - len; 19217 if (hw_tls) 19218 rack->r_ctl.fsb.hw_tls = 1; 19219 else 19220 rack->r_ctl.fsb.hw_tls = 0; 19221 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), 19222 ("rack:%p left_to_send:%u sbavail:%u out:%u", 19223 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), 19224 (tp->snd_max - tp->snd_una))); 19225 if (rack->r_ctl.fsb.left_to_send < segsiz) { 19226 rack->r_fast_output = 0; 19227 } 19228 if (rack->r_fast_output) { 19229 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) 19230 rack->r_ctl.fsb.rfo_apply_push = 1; 19231 else 19232 rack->r_ctl.fsb.rfo_apply_push = 0; 19233 rack_log_fsb(rack, tp, so, flags, 19234 ipoptlen, orig_len, len, error, 19235 (rsm == NULL), optlen, __LINE__, 3); 19236 error = 0; 19237 ret = rack_fast_output(tp, rack, ts_val, cts, ms_cts, &tv, tot_len_this_send, &error); 19238 if (ret >= 0) 19239 return (ret); 19240 else if (error) 19241 goto nomore; 19242 19243 } 19244 } 19245 goto again; 19246 } 19247 /* Assure when we leave that snd_nxt will point to top */ 19248 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) 19249 tp->snd_nxt = tp->snd_max; 19250 rack_start_hpts_timer(rack, tp, cts, slot, tot_len_this_send, 0); 19251 #ifdef TCP_ACCOUNTING 19252 crtsc = get_cyclecount() - ts_val; 19253 if (tot_len_this_send) { 19254 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19255 tp->tcp_cnt_counters[SND_OUT_DATA]++; 19256 } 19257 counter_u64_add(tcp_cnt_counters[SND_OUT_DATA], 1); 19258 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19259 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; 19260 } 19261 counter_u64_add(tcp_proc_time[SND_OUT_DATA], crtsc); 19262 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19263 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); 19264 } 19265 counter_u64_add(tcp_cnt_counters[CNT_OF_MSS_OUT], ((tot_len_this_send + segsiz - 1) /segsiz)); 19266 } else { 19267 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19268 tp->tcp_cnt_counters[SND_OUT_ACK]++; 19269 } 19270 counter_u64_add(tcp_cnt_counters[SND_OUT_ACK], 1); 19271 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { 19272 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; 19273 } 19274 counter_u64_add(tcp_proc_time[SND_OUT_ACK], crtsc); 19275 } 19276 sched_unpin(); 19277 #endif 19278 if (error == ENOBUFS) 19279 error = 0; 19280 return (error); 19281 } 19282 19283 static void 19284 rack_update_seg(struct tcp_rack *rack) 19285 { 19286 uint32_t orig_val; 19287 19288 orig_val = rack->r_ctl.rc_pace_max_segs; 19289 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); 19290 if (orig_val != rack->r_ctl.rc_pace_max_segs) 19291 rack_log_pacing_delay_calc(rack, 0, 0, orig_val, 0, 0, 15, __LINE__, NULL, 0); 19292 } 19293 19294 static void 19295 rack_mtu_change(struct tcpcb *tp) 19296 { 19297 /* 19298 * The MSS may have changed 19299 */ 19300 struct tcp_rack *rack; 19301 struct rack_sendmap *rsm; 19302 19303 rack = (struct tcp_rack *)tp->t_fb_ptr; 19304 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { 19305 /* 19306 * The MTU has changed we need to resend everything 19307 * since all we have sent is lost. We first fix 19308 * up the mtu though. 19309 */ 19310 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19311 /* We treat this like a full retransmit timeout without the cwnd adjustment */ 19312 rack_remxt_tmr(tp); 19313 rack->r_fast_output = 0; 19314 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, 19315 rack->r_ctl.rc_sacked); 19316 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; 19317 rack->r_must_retran = 1; 19318 /* Mark all inflight to needing to be rxt'd */ 19319 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { 19320 rsm->r_flags |= RACK_MUST_RXT; 19321 } 19322 } 19323 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); 19324 /* We don't use snd_nxt to retransmit */ 19325 tp->snd_nxt = tp->snd_max; 19326 } 19327 19328 static int 19329 rack_set_profile(struct tcp_rack *rack, int prof) 19330 { 19331 int err = EINVAL; 19332 if (prof == 1) { 19333 /* pace_always=1 */ 19334 if (rack->rc_always_pace == 0) { 19335 if (tcp_can_enable_pacing() == 0) 19336 return (EBUSY); 19337 } 19338 rack->rc_always_pace = 1; 19339 if (rack->use_fixed_rate || rack->gp_ready) 19340 rack_set_cc_pacing(rack); 19341 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19342 rack->rack_attempt_hdwr_pace = 0; 19343 /* cmpack=1 */ 19344 if (rack_use_cmp_acks) 19345 rack->r_use_cmp_ack = 1; 19346 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19347 rack->r_use_cmp_ack) 19348 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19349 /* scwnd=1 */ 19350 rack->rack_enable_scwnd = 1; 19351 /* dynamic=100 */ 19352 rack->rc_gp_dyn_mul = 1; 19353 /* gp_inc_ca */ 19354 rack->r_ctl.rack_per_of_gp_ca = 100; 19355 /* rrr_conf=3 */ 19356 rack->r_rr_config = 3; 19357 /* npush=2 */ 19358 rack->r_ctl.rc_no_push_at_mrtt = 2; 19359 /* fillcw=1 */ 19360 rack->rc_pace_to_cwnd = 1; 19361 rack->rc_pace_fill_if_rttin_range = 0; 19362 rack->rtt_limit_mul = 0; 19363 /* noprr=1 */ 19364 rack->rack_no_prr = 1; 19365 /* lscwnd=1 */ 19366 rack->r_limit_scw = 1; 19367 /* gp_inc_rec */ 19368 rack->r_ctl.rack_per_of_gp_rec = 90; 19369 err = 0; 19370 19371 } else if (prof == 3) { 19372 /* Same as profile one execept fill_cw becomes 2 (less aggressive set) */ 19373 /* pace_always=1 */ 19374 if (rack->rc_always_pace == 0) { 19375 if (tcp_can_enable_pacing() == 0) 19376 return (EBUSY); 19377 } 19378 rack->rc_always_pace = 1; 19379 if (rack->use_fixed_rate || rack->gp_ready) 19380 rack_set_cc_pacing(rack); 19381 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19382 rack->rack_attempt_hdwr_pace = 0; 19383 /* cmpack=1 */ 19384 if (rack_use_cmp_acks) 19385 rack->r_use_cmp_ack = 1; 19386 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && 19387 rack->r_use_cmp_ack) 19388 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19389 /* scwnd=1 */ 19390 rack->rack_enable_scwnd = 1; 19391 /* dynamic=100 */ 19392 rack->rc_gp_dyn_mul = 1; 19393 /* gp_inc_ca */ 19394 rack->r_ctl.rack_per_of_gp_ca = 100; 19395 /* rrr_conf=3 */ 19396 rack->r_rr_config = 3; 19397 /* npush=2 */ 19398 rack->r_ctl.rc_no_push_at_mrtt = 2; 19399 /* fillcw=2 */ 19400 rack->rc_pace_to_cwnd = 1; 19401 rack->r_fill_less_agg = 1; 19402 rack->rc_pace_fill_if_rttin_range = 0; 19403 rack->rtt_limit_mul = 0; 19404 /* noprr=1 */ 19405 rack->rack_no_prr = 1; 19406 /* lscwnd=1 */ 19407 rack->r_limit_scw = 1; 19408 /* gp_inc_rec */ 19409 rack->r_ctl.rack_per_of_gp_rec = 90; 19410 err = 0; 19411 19412 19413 } else if (prof == 2) { 19414 /* cmpack=1 */ 19415 if (rack->rc_always_pace == 0) { 19416 if (tcp_can_enable_pacing() == 0) 19417 return (EBUSY); 19418 } 19419 rack->rc_always_pace = 1; 19420 if (rack->use_fixed_rate || rack->gp_ready) 19421 rack_set_cc_pacing(rack); 19422 rack->r_use_cmp_ack = 1; 19423 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19424 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19425 /* pace_always=1 */ 19426 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19427 /* scwnd=1 */ 19428 rack->rack_enable_scwnd = 1; 19429 /* dynamic=100 */ 19430 rack->rc_gp_dyn_mul = 1; 19431 rack->r_ctl.rack_per_of_gp_ca = 100; 19432 /* rrr_conf=3 */ 19433 rack->r_rr_config = 3; 19434 /* npush=2 */ 19435 rack->r_ctl.rc_no_push_at_mrtt = 2; 19436 /* fillcw=1 */ 19437 rack->rc_pace_to_cwnd = 1; 19438 rack->rc_pace_fill_if_rttin_range = 0; 19439 rack->rtt_limit_mul = 0; 19440 /* noprr=1 */ 19441 rack->rack_no_prr = 1; 19442 /* lscwnd=0 */ 19443 rack->r_limit_scw = 0; 19444 err = 0; 19445 } else if (prof == 0) { 19446 /* This changes things back to the default settings */ 19447 err = 0; 19448 if (rack->rc_always_pace) { 19449 tcp_decrement_paced_conn(); 19450 rack_undo_cc_pacing(rack); 19451 rack->rc_always_pace = 0; 19452 } 19453 if (rack_pace_every_seg && tcp_can_enable_pacing()) { 19454 rack->rc_always_pace = 1; 19455 if (rack->use_fixed_rate || rack->gp_ready) 19456 rack_set_cc_pacing(rack); 19457 } else 19458 rack->rc_always_pace = 0; 19459 if (rack_dsack_std_based & 0x1) { 19460 /* Basically this means all rack timers are at least (srtt + 1/4 srtt) */ 19461 rack->rc_rack_tmr_std_based = 1; 19462 } 19463 if (rack_dsack_std_based & 0x2) { 19464 /* Basically this means rack timers are extended based on dsack by up to (2 * srtt) */ 19465 rack->rc_rack_use_dsack = 1; 19466 } 19467 if (rack_use_cmp_acks) 19468 rack->r_use_cmp_ack = 1; 19469 else 19470 rack->r_use_cmp_ack = 0; 19471 if (rack_disable_prr) 19472 rack->rack_no_prr = 1; 19473 else 19474 rack->rack_no_prr = 0; 19475 if (rack_gp_no_rec_chg) 19476 rack->rc_gp_no_rec_chg = 1; 19477 else 19478 rack->rc_gp_no_rec_chg = 0; 19479 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { 19480 rack->r_mbuf_queue = 1; 19481 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) 19482 rack->rc_inp->inp_flags2 |= INP_MBUF_ACKCMP; 19483 rack->rc_inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19484 } else { 19485 rack->r_mbuf_queue = 0; 19486 rack->rc_inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19487 } 19488 if (rack_enable_shared_cwnd) 19489 rack->rack_enable_scwnd = 1; 19490 else 19491 rack->rack_enable_scwnd = 0; 19492 if (rack_do_dyn_mul) { 19493 /* When dynamic adjustment is on CA needs to start at 100% */ 19494 rack->rc_gp_dyn_mul = 1; 19495 if (rack_do_dyn_mul >= 100) 19496 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; 19497 } else { 19498 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; 19499 rack->rc_gp_dyn_mul = 0; 19500 } 19501 rack->r_rr_config = 0; 19502 rack->r_ctl.rc_no_push_at_mrtt = 0; 19503 rack->rc_pace_to_cwnd = 0; 19504 rack->rc_pace_fill_if_rttin_range = 0; 19505 rack->rtt_limit_mul = 0; 19506 19507 if (rack_enable_hw_pacing) 19508 rack->rack_hdw_pace_ena = 1; 19509 else 19510 rack->rack_hdw_pace_ena = 0; 19511 if (rack_disable_prr) 19512 rack->rack_no_prr = 1; 19513 else 19514 rack->rack_no_prr = 0; 19515 if (rack_limits_scwnd) 19516 rack->r_limit_scw = 1; 19517 else 19518 rack->r_limit_scw = 0; 19519 err = 0; 19520 } 19521 return (err); 19522 } 19523 19524 static int 19525 rack_add_deferred_option(struct tcp_rack *rack, int sopt_name, uint64_t loptval) 19526 { 19527 struct deferred_opt_list *dol; 19528 19529 dol = malloc(sizeof(struct deferred_opt_list), 19530 M_TCPFSB, M_NOWAIT|M_ZERO); 19531 if (dol == NULL) { 19532 /* 19533 * No space yikes -- fail out.. 19534 */ 19535 return (0); 19536 } 19537 dol->optname = sopt_name; 19538 dol->optval = loptval; 19539 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); 19540 return (1); 19541 } 19542 19543 static int 19544 rack_process_option(struct tcpcb *tp, struct tcp_rack *rack, int sopt_name, 19545 uint32_t optval, uint64_t loptval) 19546 { 19547 struct epoch_tracker et; 19548 struct sockopt sopt; 19549 struct cc_newreno_opts opt; 19550 struct inpcb *inp = tptoinpcb(tp); 19551 uint64_t val; 19552 int error = 0; 19553 uint16_t ca, ss; 19554 19555 switch (sopt_name) { 19556 19557 case TCP_RACK_DSACK_OPT: 19558 RACK_OPTS_INC(tcp_rack_dsack_opt); 19559 if (optval & 0x1) { 19560 rack->rc_rack_tmr_std_based = 1; 19561 } else { 19562 rack->rc_rack_tmr_std_based = 0; 19563 } 19564 if (optval & 0x2) { 19565 rack->rc_rack_use_dsack = 1; 19566 } else { 19567 rack->rc_rack_use_dsack = 0; 19568 } 19569 rack_log_dsack_event(rack, 5, __LINE__, 0, 0); 19570 break; 19571 case TCP_RACK_PACING_BETA: 19572 RACK_OPTS_INC(tcp_rack_beta); 19573 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 19574 /* This only works for newreno. */ 19575 error = EINVAL; 19576 break; 19577 } 19578 if (rack->rc_pacing_cc_set) { 19579 /* 19580 * Set them into the real CC module 19581 * whats in the rack pcb is the old values 19582 * to be used on restoral/ 19583 */ 19584 sopt.sopt_dir = SOPT_SET; 19585 opt.name = CC_NEWRENO_BETA; 19586 opt.val = optval; 19587 if (CC_ALGO(tp)->ctl_output != NULL) 19588 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 19589 else { 19590 error = ENOENT; 19591 break; 19592 } 19593 } else { 19594 /* 19595 * Not pacing yet so set it into our local 19596 * rack pcb storage. 19597 */ 19598 rack->r_ctl.rc_saved_beta.beta = optval; 19599 } 19600 break; 19601 case TCP_RACK_TIMER_SLOP: 19602 RACK_OPTS_INC(tcp_rack_timer_slop); 19603 rack->r_ctl.timer_slop = optval; 19604 if (rack->rc_tp->t_srtt) { 19605 /* 19606 * If we have an SRTT lets update t_rxtcur 19607 * to have the new slop. 19608 */ 19609 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), 19610 rack_rto_min, rack_rto_max, 19611 rack->r_ctl.timer_slop); 19612 } 19613 break; 19614 case TCP_RACK_PACING_BETA_ECN: 19615 RACK_OPTS_INC(tcp_rack_beta_ecn); 19616 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { 19617 /* This only works for newreno. */ 19618 error = EINVAL; 19619 break; 19620 } 19621 if (rack->rc_pacing_cc_set) { 19622 /* 19623 * Set them into the real CC module 19624 * whats in the rack pcb is the old values 19625 * to be used on restoral/ 19626 */ 19627 sopt.sopt_dir = SOPT_SET; 19628 opt.name = CC_NEWRENO_BETA_ECN; 19629 opt.val = optval; 19630 if (CC_ALGO(tp)->ctl_output != NULL) 19631 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); 19632 else 19633 error = ENOENT; 19634 } else { 19635 /* 19636 * Not pacing yet so set it into our local 19637 * rack pcb storage. 19638 */ 19639 rack->r_ctl.rc_saved_beta.beta_ecn = optval; 19640 rack->r_ctl.rc_saved_beta.newreno_flags = CC_NEWRENO_BETA_ECN_ENABLED; 19641 } 19642 break; 19643 case TCP_DEFER_OPTIONS: 19644 RACK_OPTS_INC(tcp_defer_opt); 19645 if (optval) { 19646 if (rack->gp_ready) { 19647 /* Too late */ 19648 error = EINVAL; 19649 break; 19650 } 19651 rack->defer_options = 1; 19652 } else 19653 rack->defer_options = 0; 19654 break; 19655 case TCP_RACK_MEASURE_CNT: 19656 RACK_OPTS_INC(tcp_rack_measure_cnt); 19657 if (optval && (optval <= 0xff)) { 19658 rack->r_ctl.req_measurements = optval; 19659 } else 19660 error = EINVAL; 19661 break; 19662 case TCP_REC_ABC_VAL: 19663 RACK_OPTS_INC(tcp_rec_abc_val); 19664 if (optval > 0) 19665 rack->r_use_labc_for_rec = 1; 19666 else 19667 rack->r_use_labc_for_rec = 0; 19668 break; 19669 case TCP_RACK_ABC_VAL: 19670 RACK_OPTS_INC(tcp_rack_abc_val); 19671 if ((optval > 0) && (optval < 255)) 19672 rack->rc_labc = optval; 19673 else 19674 error = EINVAL; 19675 break; 19676 case TCP_HDWR_UP_ONLY: 19677 RACK_OPTS_INC(tcp_pacing_up_only); 19678 if (optval) 19679 rack->r_up_only = 1; 19680 else 19681 rack->r_up_only = 0; 19682 break; 19683 case TCP_PACING_RATE_CAP: 19684 RACK_OPTS_INC(tcp_pacing_rate_cap); 19685 rack->r_ctl.bw_rate_cap = loptval; 19686 break; 19687 case TCP_RACK_PROFILE: 19688 RACK_OPTS_INC(tcp_profile); 19689 error = rack_set_profile(rack, optval); 19690 break; 19691 case TCP_USE_CMP_ACKS: 19692 RACK_OPTS_INC(tcp_use_cmp_acks); 19693 if ((optval == 0) && (rack->rc_inp->inp_flags2 & INP_MBUF_ACKCMP)) { 19694 /* You can't turn it off once its on! */ 19695 error = EINVAL; 19696 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { 19697 rack->r_use_cmp_ack = 1; 19698 rack->r_mbuf_queue = 1; 19699 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19700 } 19701 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) 19702 inp->inp_flags2 |= INP_MBUF_ACKCMP; 19703 break; 19704 case TCP_SHARED_CWND_TIME_LIMIT: 19705 RACK_OPTS_INC(tcp_lscwnd); 19706 if (optval) 19707 rack->r_limit_scw = 1; 19708 else 19709 rack->r_limit_scw = 0; 19710 break; 19711 case TCP_RACK_PACE_TO_FILL: 19712 RACK_OPTS_INC(tcp_fillcw); 19713 if (optval == 0) 19714 rack->rc_pace_to_cwnd = 0; 19715 else { 19716 rack->rc_pace_to_cwnd = 1; 19717 if (optval > 1) 19718 rack->r_fill_less_agg = 1; 19719 } 19720 if ((optval >= rack_gp_rtt_maxmul) && 19721 rack_gp_rtt_maxmul && 19722 (optval < 0xf)) { 19723 rack->rc_pace_fill_if_rttin_range = 1; 19724 rack->rtt_limit_mul = optval; 19725 } else { 19726 rack->rc_pace_fill_if_rttin_range = 0; 19727 rack->rtt_limit_mul = 0; 19728 } 19729 break; 19730 case TCP_RACK_NO_PUSH_AT_MAX: 19731 RACK_OPTS_INC(tcp_npush); 19732 if (optval == 0) 19733 rack->r_ctl.rc_no_push_at_mrtt = 0; 19734 else if (optval < 0xff) 19735 rack->r_ctl.rc_no_push_at_mrtt = optval; 19736 else 19737 error = EINVAL; 19738 break; 19739 case TCP_SHARED_CWND_ENABLE: 19740 RACK_OPTS_INC(tcp_rack_scwnd); 19741 if (optval == 0) 19742 rack->rack_enable_scwnd = 0; 19743 else 19744 rack->rack_enable_scwnd = 1; 19745 break; 19746 case TCP_RACK_MBUF_QUEUE: 19747 /* Now do we use the LRO mbuf-queue feature */ 19748 RACK_OPTS_INC(tcp_rack_mbufq); 19749 if (optval || rack->r_use_cmp_ack) 19750 rack->r_mbuf_queue = 1; 19751 else 19752 rack->r_mbuf_queue = 0; 19753 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19754 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19755 else 19756 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19757 break; 19758 case TCP_RACK_NONRXT_CFG_RATE: 19759 RACK_OPTS_INC(tcp_rack_cfg_rate); 19760 if (optval == 0) 19761 rack->rack_rec_nonrxt_use_cr = 0; 19762 else 19763 rack->rack_rec_nonrxt_use_cr = 1; 19764 break; 19765 case TCP_NO_PRR: 19766 RACK_OPTS_INC(tcp_rack_noprr); 19767 if (optval == 0) 19768 rack->rack_no_prr = 0; 19769 else if (optval == 1) 19770 rack->rack_no_prr = 1; 19771 else if (optval == 2) 19772 rack->no_prr_addback = 1; 19773 else 19774 error = EINVAL; 19775 break; 19776 case TCP_TIMELY_DYN_ADJ: 19777 RACK_OPTS_INC(tcp_timely_dyn); 19778 if (optval == 0) 19779 rack->rc_gp_dyn_mul = 0; 19780 else { 19781 rack->rc_gp_dyn_mul = 1; 19782 if (optval >= 100) { 19783 /* 19784 * If the user sets something 100 or more 19785 * its the gp_ca value. 19786 */ 19787 rack->r_ctl.rack_per_of_gp_ca = optval; 19788 } 19789 } 19790 break; 19791 case TCP_RACK_DO_DETECTION: 19792 RACK_OPTS_INC(tcp_rack_do_detection); 19793 if (optval == 0) 19794 rack->do_detection = 0; 19795 else 19796 rack->do_detection = 1; 19797 break; 19798 case TCP_RACK_TLP_USE: 19799 if ((optval < TLP_USE_ID) || (optval > TLP_USE_TWO_TWO)) { 19800 error = EINVAL; 19801 break; 19802 } 19803 RACK_OPTS_INC(tcp_tlp_use); 19804 rack->rack_tlp_threshold_use = optval; 19805 break; 19806 case TCP_RACK_TLP_REDUCE: 19807 /* RACK TLP cwnd reduction (bool) */ 19808 RACK_OPTS_INC(tcp_rack_tlp_reduce); 19809 rack->r_ctl.rc_tlp_cwnd_reduce = optval; 19810 break; 19811 /* Pacing related ones */ 19812 case TCP_RACK_PACE_ALWAYS: 19813 /* 19814 * zero is old rack method, 1 is new 19815 * method using a pacing rate. 19816 */ 19817 RACK_OPTS_INC(tcp_rack_pace_always); 19818 if (optval > 0) { 19819 if (rack->rc_always_pace) { 19820 error = EALREADY; 19821 break; 19822 } else if (tcp_can_enable_pacing()) { 19823 rack->rc_always_pace = 1; 19824 if (rack->use_fixed_rate || rack->gp_ready) 19825 rack_set_cc_pacing(rack); 19826 } 19827 else { 19828 error = ENOSPC; 19829 break; 19830 } 19831 } else { 19832 if (rack->rc_always_pace) { 19833 tcp_decrement_paced_conn(); 19834 rack->rc_always_pace = 0; 19835 rack_undo_cc_pacing(rack); 19836 } 19837 } 19838 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) 19839 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 19840 else 19841 inp->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 19842 /* A rate may be set irate or other, if so set seg size */ 19843 rack_update_seg(rack); 19844 break; 19845 case TCP_BBR_RACK_INIT_RATE: 19846 RACK_OPTS_INC(tcp_initial_rate); 19847 val = optval; 19848 /* Change from kbits per second to bytes per second */ 19849 val *= 1000; 19850 val /= 8; 19851 rack->r_ctl.init_rate = val; 19852 if (rack->rc_init_win != rack_default_init_window) { 19853 uint32_t win, snt; 19854 19855 /* 19856 * Options don't always get applied 19857 * in the order you think. So in order 19858 * to assure we update a cwnd we need 19859 * to check and see if we are still 19860 * where we should raise the cwnd. 19861 */ 19862 win = rc_init_window(rack); 19863 if (SEQ_GT(tp->snd_max, tp->iss)) 19864 snt = tp->snd_max - tp->iss; 19865 else 19866 snt = 0; 19867 if ((snt < win) && 19868 (tp->snd_cwnd < win)) 19869 tp->snd_cwnd = win; 19870 } 19871 if (rack->rc_always_pace) 19872 rack_update_seg(rack); 19873 break; 19874 case TCP_BBR_IWINTSO: 19875 RACK_OPTS_INC(tcp_initial_win); 19876 if (optval && (optval <= 0xff)) { 19877 uint32_t win, snt; 19878 19879 rack->rc_init_win = optval; 19880 win = rc_init_window(rack); 19881 if (SEQ_GT(tp->snd_max, tp->iss)) 19882 snt = tp->snd_max - tp->iss; 19883 else 19884 snt = 0; 19885 if ((snt < win) && 19886 (tp->t_srtt | 19887 #ifdef NETFLIX_PEAKRATE 19888 tp->t_maxpeakrate | 19889 #endif 19890 rack->r_ctl.init_rate)) { 19891 /* 19892 * We are not past the initial window 19893 * and we have some bases for pacing, 19894 * so we need to possibly adjust up 19895 * the cwnd. Note even if we don't set 19896 * the cwnd, its still ok to raise the rc_init_win 19897 * which can be used coming out of idle when we 19898 * would have a rate. 19899 */ 19900 if (tp->snd_cwnd < win) 19901 tp->snd_cwnd = win; 19902 } 19903 if (rack->rc_always_pace) 19904 rack_update_seg(rack); 19905 } else 19906 error = EINVAL; 19907 break; 19908 case TCP_RACK_FORCE_MSEG: 19909 RACK_OPTS_INC(tcp_rack_force_max_seg); 19910 if (optval) 19911 rack->rc_force_max_seg = 1; 19912 else 19913 rack->rc_force_max_seg = 0; 19914 break; 19915 case TCP_RACK_PACE_MAX_SEG: 19916 /* Max segments size in a pace in bytes */ 19917 RACK_OPTS_INC(tcp_rack_max_seg); 19918 rack->rc_user_set_max_segs = optval; 19919 rack_set_pace_segments(tp, rack, __LINE__, NULL); 19920 break; 19921 case TCP_RACK_PACE_RATE_REC: 19922 /* Set the fixed pacing rate in Bytes per second ca */ 19923 RACK_OPTS_INC(tcp_rack_pace_rate_rec); 19924 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19925 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19926 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19927 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19928 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19929 rack->use_fixed_rate = 1; 19930 if (rack->rc_always_pace) 19931 rack_set_cc_pacing(rack); 19932 rack_log_pacing_delay_calc(rack, 19933 rack->r_ctl.rc_fixed_pacing_rate_ss, 19934 rack->r_ctl.rc_fixed_pacing_rate_ca, 19935 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19936 __LINE__, NULL,0); 19937 break; 19938 19939 case TCP_RACK_PACE_RATE_SS: 19940 /* Set the fixed pacing rate in Bytes per second ca */ 19941 RACK_OPTS_INC(tcp_rack_pace_rate_ss); 19942 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19943 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) 19944 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19945 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19946 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19947 rack->use_fixed_rate = 1; 19948 if (rack->rc_always_pace) 19949 rack_set_cc_pacing(rack); 19950 rack_log_pacing_delay_calc(rack, 19951 rack->r_ctl.rc_fixed_pacing_rate_ss, 19952 rack->r_ctl.rc_fixed_pacing_rate_ca, 19953 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19954 __LINE__, NULL, 0); 19955 break; 19956 19957 case TCP_RACK_PACE_RATE_CA: 19958 /* Set the fixed pacing rate in Bytes per second ca */ 19959 RACK_OPTS_INC(tcp_rack_pace_rate_ca); 19960 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; 19961 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) 19962 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; 19963 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) 19964 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; 19965 rack->use_fixed_rate = 1; 19966 if (rack->rc_always_pace) 19967 rack_set_cc_pacing(rack); 19968 rack_log_pacing_delay_calc(rack, 19969 rack->r_ctl.rc_fixed_pacing_rate_ss, 19970 rack->r_ctl.rc_fixed_pacing_rate_ca, 19971 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, 19972 __LINE__, NULL, 0); 19973 break; 19974 case TCP_RACK_GP_INCREASE_REC: 19975 RACK_OPTS_INC(tcp_gp_inc_rec); 19976 rack->r_ctl.rack_per_of_gp_rec = optval; 19977 rack_log_pacing_delay_calc(rack, 19978 rack->r_ctl.rack_per_of_gp_ss, 19979 rack->r_ctl.rack_per_of_gp_ca, 19980 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19981 __LINE__, NULL, 0); 19982 break; 19983 case TCP_RACK_GP_INCREASE_CA: 19984 RACK_OPTS_INC(tcp_gp_inc_ca); 19985 ca = optval; 19986 if (ca < 100) { 19987 /* 19988 * We don't allow any reduction 19989 * over the GP b/w. 19990 */ 19991 error = EINVAL; 19992 break; 19993 } 19994 rack->r_ctl.rack_per_of_gp_ca = ca; 19995 rack_log_pacing_delay_calc(rack, 19996 rack->r_ctl.rack_per_of_gp_ss, 19997 rack->r_ctl.rack_per_of_gp_ca, 19998 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 19999 __LINE__, NULL, 0); 20000 break; 20001 case TCP_RACK_GP_INCREASE_SS: 20002 RACK_OPTS_INC(tcp_gp_inc_ss); 20003 ss = optval; 20004 if (ss < 100) { 20005 /* 20006 * We don't allow any reduction 20007 * over the GP b/w. 20008 */ 20009 error = EINVAL; 20010 break; 20011 } 20012 rack->r_ctl.rack_per_of_gp_ss = ss; 20013 rack_log_pacing_delay_calc(rack, 20014 rack->r_ctl.rack_per_of_gp_ss, 20015 rack->r_ctl.rack_per_of_gp_ca, 20016 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, 20017 __LINE__, NULL, 0); 20018 break; 20019 case TCP_RACK_RR_CONF: 20020 RACK_OPTS_INC(tcp_rack_rrr_no_conf_rate); 20021 if (optval && optval <= 3) 20022 rack->r_rr_config = optval; 20023 else 20024 rack->r_rr_config = 0; 20025 break; 20026 case TCP_HDWR_RATE_CAP: 20027 RACK_OPTS_INC(tcp_hdwr_rate_cap); 20028 if (optval) { 20029 if (rack->r_rack_hw_rate_caps == 0) 20030 rack->r_rack_hw_rate_caps = 1; 20031 else 20032 error = EALREADY; 20033 } else { 20034 rack->r_rack_hw_rate_caps = 0; 20035 } 20036 break; 20037 case TCP_BBR_HDWR_PACE: 20038 RACK_OPTS_INC(tcp_hdwr_pacing); 20039 if (optval){ 20040 if (rack->rack_hdrw_pacing == 0) { 20041 rack->rack_hdw_pace_ena = 1; 20042 rack->rack_attempt_hdwr_pace = 0; 20043 } else 20044 error = EALREADY; 20045 } else { 20046 rack->rack_hdw_pace_ena = 0; 20047 #ifdef RATELIMIT 20048 if (rack->r_ctl.crte != NULL) { 20049 rack->rack_hdrw_pacing = 0; 20050 rack->rack_attempt_hdwr_pace = 0; 20051 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); 20052 rack->r_ctl.crte = NULL; 20053 } 20054 #endif 20055 } 20056 break; 20057 /* End Pacing related ones */ 20058 case TCP_RACK_PRR_SENDALOT: 20059 /* Allow PRR to send more than one seg */ 20060 RACK_OPTS_INC(tcp_rack_prr_sendalot); 20061 rack->r_ctl.rc_prr_sendalot = optval; 20062 break; 20063 case TCP_RACK_MIN_TO: 20064 /* Minimum time between rack t-o's in ms */ 20065 RACK_OPTS_INC(tcp_rack_min_to); 20066 rack->r_ctl.rc_min_to = optval; 20067 break; 20068 case TCP_RACK_EARLY_SEG: 20069 /* If early recovery max segments */ 20070 RACK_OPTS_INC(tcp_rack_early_seg); 20071 rack->r_ctl.rc_early_recovery_segs = optval; 20072 break; 20073 case TCP_RACK_ENABLE_HYSTART: 20074 { 20075 if (optval) { 20076 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; 20077 if (rack_do_hystart > RACK_HYSTART_ON) 20078 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; 20079 if (rack_do_hystart > RACK_HYSTART_ON_W_SC) 20080 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; 20081 } else { 20082 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); 20083 } 20084 } 20085 break; 20086 case TCP_RACK_REORD_THRESH: 20087 /* RACK reorder threshold (shift amount) */ 20088 RACK_OPTS_INC(tcp_rack_reord_thresh); 20089 if ((optval > 0) && (optval < 31)) 20090 rack->r_ctl.rc_reorder_shift = optval; 20091 else 20092 error = EINVAL; 20093 break; 20094 case TCP_RACK_REORD_FADE: 20095 /* Does reordering fade after ms time */ 20096 RACK_OPTS_INC(tcp_rack_reord_fade); 20097 rack->r_ctl.rc_reorder_fade = optval; 20098 break; 20099 case TCP_RACK_TLP_THRESH: 20100 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20101 RACK_OPTS_INC(tcp_rack_tlp_thresh); 20102 if (optval) 20103 rack->r_ctl.rc_tlp_threshold = optval; 20104 else 20105 error = EINVAL; 20106 break; 20107 case TCP_BBR_USE_RACK_RR: 20108 RACK_OPTS_INC(tcp_rack_rr); 20109 if (optval) 20110 rack->use_rack_rr = 1; 20111 else 20112 rack->use_rack_rr = 0; 20113 break; 20114 case TCP_FAST_RSM_HACK: 20115 RACK_OPTS_INC(tcp_rack_fastrsm_hack); 20116 if (optval) 20117 rack->fast_rsm_hack = 1; 20118 else 20119 rack->fast_rsm_hack = 0; 20120 break; 20121 case TCP_RACK_PKT_DELAY: 20122 /* RACK added ms i.e. rack-rtt + reord + N */ 20123 RACK_OPTS_INC(tcp_rack_pkt_delay); 20124 rack->r_ctl.rc_pkt_delay = optval; 20125 break; 20126 case TCP_DELACK: 20127 RACK_OPTS_INC(tcp_rack_delayed_ack); 20128 if (optval == 0) 20129 tp->t_delayed_ack = 0; 20130 else 20131 tp->t_delayed_ack = 1; 20132 if (tp->t_flags & TF_DELACK) { 20133 tp->t_flags &= ~TF_DELACK; 20134 tp->t_flags |= TF_ACKNOW; 20135 NET_EPOCH_ENTER(et); 20136 rack_output(tp); 20137 NET_EPOCH_EXIT(et); 20138 } 20139 break; 20140 20141 case TCP_BBR_RACK_RTT_USE: 20142 RACK_OPTS_INC(tcp_rack_rtt_use); 20143 if ((optval != USE_RTT_HIGH) && 20144 (optval != USE_RTT_LOW) && 20145 (optval != USE_RTT_AVG)) 20146 error = EINVAL; 20147 else 20148 rack->r_ctl.rc_rate_sample_method = optval; 20149 break; 20150 case TCP_DATA_AFTER_CLOSE: 20151 RACK_OPTS_INC(tcp_data_after_close); 20152 if (optval) 20153 rack->rc_allow_data_af_clo = 1; 20154 else 20155 rack->rc_allow_data_af_clo = 0; 20156 break; 20157 default: 20158 break; 20159 } 20160 #ifdef NETFLIX_STATS 20161 tcp_log_socket_option(tp, sopt_name, optval, error); 20162 #endif 20163 return (error); 20164 } 20165 20166 20167 static void 20168 rack_apply_deferred_options(struct tcp_rack *rack) 20169 { 20170 struct deferred_opt_list *dol, *sdol; 20171 uint32_t s_optval; 20172 20173 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { 20174 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); 20175 /* Disadvantage of deferal is you loose the error return */ 20176 s_optval = (uint32_t)dol->optval; 20177 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval); 20178 free(dol, M_TCPDO); 20179 } 20180 } 20181 20182 static void 20183 rack_hw_tls_change(struct tcpcb *tp, int chg) 20184 { 20185 /* 20186 * HW tls state has changed.. fix all 20187 * rsm's in flight. 20188 */ 20189 struct tcp_rack *rack; 20190 struct rack_sendmap *rsm; 20191 20192 rack = (struct tcp_rack *)tp->t_fb_ptr; 20193 RB_FOREACH(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) { 20194 if (chg) 20195 rsm->r_hw_tls = 1; 20196 else 20197 rsm->r_hw_tls = 0; 20198 } 20199 if (chg) 20200 rack->r_ctl.fsb.hw_tls = 1; 20201 else 20202 rack->r_ctl.fsb.hw_tls = 0; 20203 } 20204 20205 static int 20206 rack_pru_options(struct tcpcb *tp, int flags) 20207 { 20208 if (flags & PRUS_OOB) 20209 return (EOPNOTSUPP); 20210 return (0); 20211 } 20212 20213 static struct tcp_function_block __tcp_rack = { 20214 .tfb_tcp_block_name = __XSTRING(STACKNAME), 20215 .tfb_tcp_output = rack_output, 20216 .tfb_do_queued_segments = ctf_do_queued_segments, 20217 .tfb_do_segment_nounlock = rack_do_segment_nounlock, 20218 .tfb_tcp_do_segment = rack_do_segment, 20219 .tfb_tcp_ctloutput = rack_ctloutput, 20220 .tfb_tcp_fb_init = rack_init, 20221 .tfb_tcp_fb_fini = rack_fini, 20222 .tfb_tcp_timer_stop_all = rack_stopall, 20223 .tfb_tcp_rexmit_tmr = rack_remxt_tmr, 20224 .tfb_tcp_handoff_ok = rack_handoff_ok, 20225 .tfb_tcp_mtu_chg = rack_mtu_change, 20226 .tfb_pru_options = rack_pru_options, 20227 .tfb_hwtls_change = rack_hw_tls_change, 20228 .tfb_compute_pipe = rack_compute_pipe, 20229 .tfb_flags = TCP_FUNC_OUTPUT_CANDROP, 20230 }; 20231 20232 /* 20233 * rack_ctloutput() must drop the inpcb lock before performing copyin on 20234 * socket option arguments. When it re-acquires the lock after the copy, it 20235 * has to revalidate that the connection is still valid for the socket 20236 * option. 20237 */ 20238 static int 20239 rack_set_sockopt(struct inpcb *inp, struct sockopt *sopt) 20240 { 20241 #ifdef INET6 20242 struct ip6_hdr *ip6; 20243 #endif 20244 #ifdef INET 20245 struct ip *ip; 20246 #endif 20247 struct tcpcb *tp; 20248 struct tcp_rack *rack; 20249 uint64_t loptval; 20250 int32_t error = 0, optval; 20251 20252 tp = intotcpcb(inp); 20253 rack = (struct tcp_rack *)tp->t_fb_ptr; 20254 if (rack == NULL) { 20255 INP_WUNLOCK(inp); 20256 return (EINVAL); 20257 } 20258 #ifdef INET6 20259 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; 20260 #endif 20261 #ifdef INET 20262 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; 20263 #endif 20264 20265 switch (sopt->sopt_level) { 20266 #ifdef INET6 20267 case IPPROTO_IPV6: 20268 MPASS(inp->inp_vflag & INP_IPV6PROTO); 20269 switch (sopt->sopt_name) { 20270 case IPV6_USE_MIN_MTU: 20271 tcp6_use_min_mtu(tp); 20272 break; 20273 case IPV6_TCLASS: 20274 /* 20275 * The DSCP codepoint has changed, update the fsb. 20276 */ 20277 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 20278 (rack->rc_inp->inp_flow & IPV6_FLOWINFO_MASK); 20279 break; 20280 } 20281 INP_WUNLOCK(inp); 20282 return (0); 20283 #endif 20284 #ifdef INET 20285 case IPPROTO_IP: 20286 switch (sopt->sopt_name) { 20287 case IP_TOS: 20288 /* 20289 * The DSCP codepoint has changed, update the fsb. 20290 */ 20291 ip->ip_tos = rack->rc_inp->inp_ip_tos; 20292 break; 20293 case IP_TTL: 20294 /* 20295 * The TTL has changed, update the fsb. 20296 */ 20297 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; 20298 break; 20299 } 20300 INP_WUNLOCK(inp); 20301 return (0); 20302 #endif 20303 } 20304 20305 switch (sopt->sopt_name) { 20306 case TCP_RACK_TLP_REDUCE: /* URL:tlp_reduce */ 20307 /* Pacing related ones */ 20308 case TCP_RACK_PACE_ALWAYS: /* URL:pace_always */ 20309 case TCP_BBR_RACK_INIT_RATE: /* URL:irate */ 20310 case TCP_BBR_IWINTSO: /* URL:tso_iwin */ 20311 case TCP_RACK_PACE_MAX_SEG: /* URL:pace_max_seg */ 20312 case TCP_RACK_FORCE_MSEG: /* URL:force_max_seg */ 20313 case TCP_RACK_PACE_RATE_CA: /* URL:pr_ca */ 20314 case TCP_RACK_PACE_RATE_SS: /* URL:pr_ss*/ 20315 case TCP_RACK_PACE_RATE_REC: /* URL:pr_rec */ 20316 case TCP_RACK_GP_INCREASE_CA: /* URL:gp_inc_ca */ 20317 case TCP_RACK_GP_INCREASE_SS: /* URL:gp_inc_ss */ 20318 case TCP_RACK_GP_INCREASE_REC: /* URL:gp_inc_rec */ 20319 case TCP_RACK_RR_CONF: /* URL:rrr_conf */ 20320 case TCP_BBR_HDWR_PACE: /* URL:hdwrpace */ 20321 case TCP_HDWR_RATE_CAP: /* URL:hdwrcap boolean */ 20322 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ 20323 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ 20324 /* End pacing related */ 20325 case TCP_FAST_RSM_HACK: /* URL:frsm_hack */ 20326 case TCP_DELACK: /* URL:delack (in base TCP i.e. tcp_hints along with cc etc ) */ 20327 case TCP_RACK_PRR_SENDALOT: /* URL:prr_sendalot */ 20328 case TCP_RACK_MIN_TO: /* URL:min_to */ 20329 case TCP_RACK_EARLY_SEG: /* URL:early_seg */ 20330 case TCP_RACK_REORD_THRESH: /* URL:reord_thresh */ 20331 case TCP_RACK_REORD_FADE: /* URL:reord_fade */ 20332 case TCP_RACK_TLP_THRESH: /* URL:tlp_thresh */ 20333 case TCP_RACK_PKT_DELAY: /* URL:pkt_delay */ 20334 case TCP_RACK_TLP_USE: /* URL:tlp_use */ 20335 case TCP_BBR_RACK_RTT_USE: /* URL:rttuse */ 20336 case TCP_BBR_USE_RACK_RR: /* URL:rackrr */ 20337 case TCP_RACK_DO_DETECTION: /* URL:detect */ 20338 case TCP_NO_PRR: /* URL:noprr */ 20339 case TCP_TIMELY_DYN_ADJ: /* URL:dynamic */ 20340 case TCP_DATA_AFTER_CLOSE: /* no URL */ 20341 case TCP_RACK_NONRXT_CFG_RATE: /* URL:nonrxtcr */ 20342 case TCP_SHARED_CWND_ENABLE: /* URL:scwnd */ 20343 case TCP_RACK_MBUF_QUEUE: /* URL:mqueue */ 20344 case TCP_RACK_NO_PUSH_AT_MAX: /* URL:npush */ 20345 case TCP_RACK_PACE_TO_FILL: /* URL:fillcw */ 20346 case TCP_SHARED_CWND_TIME_LIMIT: /* URL:lscwnd */ 20347 case TCP_RACK_PROFILE: /* URL:profile */ 20348 case TCP_USE_CMP_ACKS: /* URL:cmpack */ 20349 case TCP_RACK_ABC_VAL: /* URL:labc */ 20350 case TCP_REC_ABC_VAL: /* URL:reclabc */ 20351 case TCP_RACK_MEASURE_CNT: /* URL:measurecnt */ 20352 case TCP_DEFER_OPTIONS: /* URL:defer */ 20353 case TCP_RACK_DSACK_OPT: /* URL:dsack */ 20354 case TCP_RACK_PACING_BETA: /* URL:pacing_beta */ 20355 case TCP_RACK_PACING_BETA_ECN: /* URL:pacing_beta_ecn */ 20356 case TCP_RACK_TIMER_SLOP: /* URL:timer_slop */ 20357 case TCP_RACK_ENABLE_HYSTART: /* URL:hystart */ 20358 break; 20359 default: 20360 /* Filter off all unknown options to the base stack */ 20361 return (tcp_default_ctloutput(inp, sopt)); 20362 break; 20363 } 20364 INP_WUNLOCK(inp); 20365 if (sopt->sopt_name == TCP_PACING_RATE_CAP) { 20366 error = sooptcopyin(sopt, &loptval, sizeof(loptval), sizeof(loptval)); 20367 /* 20368 * We truncate it down to 32 bits for the socket-option trace this 20369 * means rates > 34Gbps won't show right, but thats probably ok. 20370 */ 20371 optval = (uint32_t)loptval; 20372 } else { 20373 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 20374 /* Save it in 64 bit form too */ 20375 loptval = optval; 20376 } 20377 if (error) 20378 return (error); 20379 INP_WLOCK(inp); 20380 if (inp->inp_flags & INP_DROPPED) { 20381 INP_WUNLOCK(inp); 20382 return (ECONNRESET); 20383 } 20384 if (tp->t_fb != &__tcp_rack) { 20385 INP_WUNLOCK(inp); 20386 return (ENOPROTOOPT); 20387 } 20388 if (rack->defer_options && (rack->gp_ready == 0) && 20389 (sopt->sopt_name != TCP_DEFER_OPTIONS) && 20390 (sopt->sopt_name != TCP_RACK_PACING_BETA) && 20391 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && 20392 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { 20393 /* Options are beind deferred */ 20394 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { 20395 INP_WUNLOCK(inp); 20396 return (0); 20397 } else { 20398 /* No memory to defer, fail */ 20399 INP_WUNLOCK(inp); 20400 return (ENOMEM); 20401 } 20402 } 20403 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval); 20404 INP_WUNLOCK(inp); 20405 return (error); 20406 } 20407 20408 static void 20409 rack_fill_info(struct tcpcb *tp, struct tcp_info *ti) 20410 { 20411 20412 INP_WLOCK_ASSERT(tptoinpcb(tp)); 20413 bzero(ti, sizeof(*ti)); 20414 20415 ti->tcpi_state = tp->t_state; 20416 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) 20417 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; 20418 if (tp->t_flags & TF_SACK_PERMIT) 20419 ti->tcpi_options |= TCPI_OPT_SACK; 20420 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { 20421 ti->tcpi_options |= TCPI_OPT_WSCALE; 20422 ti->tcpi_snd_wscale = tp->snd_scale; 20423 ti->tcpi_rcv_wscale = tp->rcv_scale; 20424 } 20425 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) 20426 ti->tcpi_options |= TCPI_OPT_ECN; 20427 if (tp->t_flags & TF_FASTOPEN) 20428 ti->tcpi_options |= TCPI_OPT_TFO; 20429 /* still kept in ticks is t_rcvtime */ 20430 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; 20431 /* Since we hold everything in precise useconds this is easy */ 20432 ti->tcpi_rtt = tp->t_srtt; 20433 ti->tcpi_rttvar = tp->t_rttvar; 20434 ti->tcpi_rto = tp->t_rxtcur; 20435 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; 20436 ti->tcpi_snd_cwnd = tp->snd_cwnd; 20437 /* 20438 * FreeBSD-specific extension fields for tcp_info. 20439 */ 20440 ti->tcpi_rcv_space = tp->rcv_wnd; 20441 ti->tcpi_rcv_nxt = tp->rcv_nxt; 20442 ti->tcpi_snd_wnd = tp->snd_wnd; 20443 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ 20444 ti->tcpi_snd_nxt = tp->snd_nxt; 20445 ti->tcpi_snd_mss = tp->t_maxseg; 20446 ti->tcpi_rcv_mss = tp->t_maxseg; 20447 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; 20448 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; 20449 ti->tcpi_snd_zerowin = tp->t_sndzerowin; 20450 #ifdef NETFLIX_STATS 20451 ti->tcpi_total_tlp = tp->t_sndtlppack; 20452 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; 20453 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); 20454 #endif 20455 #ifdef TCP_OFFLOAD 20456 if (tp->t_flags & TF_TOE) { 20457 ti->tcpi_options |= TCPI_OPT_TOE; 20458 tcp_offload_tcp_info(tp, ti); 20459 } 20460 #endif 20461 } 20462 20463 static int 20464 rack_get_sockopt(struct inpcb *inp, struct sockopt *sopt) 20465 { 20466 struct tcpcb *tp; 20467 struct tcp_rack *rack; 20468 int32_t error, optval; 20469 uint64_t val, loptval; 20470 struct tcp_info ti; 20471 /* 20472 * Because all our options are either boolean or an int, we can just 20473 * pull everything into optval and then unlock and copy. If we ever 20474 * add a option that is not a int, then this will have quite an 20475 * impact to this routine. 20476 */ 20477 error = 0; 20478 tp = intotcpcb(inp); 20479 rack = (struct tcp_rack *)tp->t_fb_ptr; 20480 if (rack == NULL) { 20481 INP_WUNLOCK(inp); 20482 return (EINVAL); 20483 } 20484 switch (sopt->sopt_name) { 20485 case TCP_INFO: 20486 /* First get the info filled */ 20487 rack_fill_info(tp, &ti); 20488 /* Fix up the rtt related fields if needed */ 20489 INP_WUNLOCK(inp); 20490 error = sooptcopyout(sopt, &ti, sizeof ti); 20491 return (error); 20492 /* 20493 * Beta is the congestion control value for NewReno that influences how 20494 * much of a backoff happens when loss is detected. It is normally set 20495 * to 50 for 50% i.e. the cwnd is reduced to 50% of its previous value 20496 * when you exit recovery. 20497 */ 20498 case TCP_RACK_PACING_BETA: 20499 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 20500 error = EINVAL; 20501 else if (rack->rc_pacing_cc_set == 0) 20502 optval = rack->r_ctl.rc_saved_beta.beta; 20503 else { 20504 /* 20505 * Reach out into the CC data and report back what 20506 * I have previously set. Yeah it looks hackish but 20507 * we don't want to report the saved values. 20508 */ 20509 if (tp->t_ccv.cc_data) 20510 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; 20511 else 20512 error = EINVAL; 20513 } 20514 break; 20515 /* 20516 * Beta_ecn is the congestion control value for NewReno that influences how 20517 * much of a backoff happens when a ECN mark is detected. It is normally set 20518 * to 80 for 80% i.e. the cwnd is reduced by 20% of its previous value when 20519 * you exit recovery. Note that classic ECN has a beta of 50, it is only 20520 * ABE Ecn that uses this "less" value, but we do too with pacing :) 20521 */ 20522 20523 case TCP_RACK_PACING_BETA_ECN: 20524 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) 20525 error = EINVAL; 20526 else if (rack->rc_pacing_cc_set == 0) 20527 optval = rack->r_ctl.rc_saved_beta.beta_ecn; 20528 else { 20529 /* 20530 * Reach out into the CC data and report back what 20531 * I have previously set. Yeah it looks hackish but 20532 * we don't want to report the saved values. 20533 */ 20534 if (tp->t_ccv.cc_data) 20535 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; 20536 else 20537 error = EINVAL; 20538 } 20539 break; 20540 case TCP_RACK_DSACK_OPT: 20541 optval = 0; 20542 if (rack->rc_rack_tmr_std_based) { 20543 optval |= 1; 20544 } 20545 if (rack->rc_rack_use_dsack) { 20546 optval |= 2; 20547 } 20548 break; 20549 case TCP_RACK_ENABLE_HYSTART: 20550 { 20551 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { 20552 optval = RACK_HYSTART_ON; 20553 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) 20554 optval = RACK_HYSTART_ON_W_SC; 20555 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) 20556 optval = RACK_HYSTART_ON_W_SC_C; 20557 } else { 20558 optval = RACK_HYSTART_OFF; 20559 } 20560 } 20561 break; 20562 case TCP_FAST_RSM_HACK: 20563 optval = rack->fast_rsm_hack; 20564 break; 20565 case TCP_DEFER_OPTIONS: 20566 optval = rack->defer_options; 20567 break; 20568 case TCP_RACK_MEASURE_CNT: 20569 optval = rack->r_ctl.req_measurements; 20570 break; 20571 case TCP_REC_ABC_VAL: 20572 optval = rack->r_use_labc_for_rec; 20573 break; 20574 case TCP_RACK_ABC_VAL: 20575 optval = rack->rc_labc; 20576 break; 20577 case TCP_HDWR_UP_ONLY: 20578 optval= rack->r_up_only; 20579 break; 20580 case TCP_PACING_RATE_CAP: 20581 loptval = rack->r_ctl.bw_rate_cap; 20582 break; 20583 case TCP_RACK_PROFILE: 20584 /* You cannot retrieve a profile, its write only */ 20585 error = EINVAL; 20586 break; 20587 case TCP_USE_CMP_ACKS: 20588 optval = rack->r_use_cmp_ack; 20589 break; 20590 case TCP_RACK_PACE_TO_FILL: 20591 optval = rack->rc_pace_to_cwnd; 20592 if (optval && rack->r_fill_less_agg) 20593 optval++; 20594 break; 20595 case TCP_RACK_NO_PUSH_AT_MAX: 20596 optval = rack->r_ctl.rc_no_push_at_mrtt; 20597 break; 20598 case TCP_SHARED_CWND_ENABLE: 20599 optval = rack->rack_enable_scwnd; 20600 break; 20601 case TCP_RACK_NONRXT_CFG_RATE: 20602 optval = rack->rack_rec_nonrxt_use_cr; 20603 break; 20604 case TCP_NO_PRR: 20605 if (rack->rack_no_prr == 1) 20606 optval = 1; 20607 else if (rack->no_prr_addback == 1) 20608 optval = 2; 20609 else 20610 optval = 0; 20611 break; 20612 case TCP_RACK_DO_DETECTION: 20613 optval = rack->do_detection; 20614 break; 20615 case TCP_RACK_MBUF_QUEUE: 20616 /* Now do we use the LRO mbuf-queue feature */ 20617 optval = rack->r_mbuf_queue; 20618 break; 20619 case TCP_TIMELY_DYN_ADJ: 20620 optval = rack->rc_gp_dyn_mul; 20621 break; 20622 case TCP_BBR_IWINTSO: 20623 optval = rack->rc_init_win; 20624 break; 20625 case TCP_RACK_TLP_REDUCE: 20626 /* RACK TLP cwnd reduction (bool) */ 20627 optval = rack->r_ctl.rc_tlp_cwnd_reduce; 20628 break; 20629 case TCP_BBR_RACK_INIT_RATE: 20630 val = rack->r_ctl.init_rate; 20631 /* convert to kbits per sec */ 20632 val *= 8; 20633 val /= 1000; 20634 optval = (uint32_t)val; 20635 break; 20636 case TCP_RACK_FORCE_MSEG: 20637 optval = rack->rc_force_max_seg; 20638 break; 20639 case TCP_RACK_PACE_MAX_SEG: 20640 /* Max segments in a pace */ 20641 optval = rack->rc_user_set_max_segs; 20642 break; 20643 case TCP_RACK_PACE_ALWAYS: 20644 /* Use the always pace method */ 20645 optval = rack->rc_always_pace; 20646 break; 20647 case TCP_RACK_PRR_SENDALOT: 20648 /* Allow PRR to send more than one seg */ 20649 optval = rack->r_ctl.rc_prr_sendalot; 20650 break; 20651 case TCP_RACK_MIN_TO: 20652 /* Minimum time between rack t-o's in ms */ 20653 optval = rack->r_ctl.rc_min_to; 20654 break; 20655 case TCP_RACK_EARLY_SEG: 20656 /* If early recovery max segments */ 20657 optval = rack->r_ctl.rc_early_recovery_segs; 20658 break; 20659 case TCP_RACK_REORD_THRESH: 20660 /* RACK reorder threshold (shift amount) */ 20661 optval = rack->r_ctl.rc_reorder_shift; 20662 break; 20663 case TCP_RACK_REORD_FADE: 20664 /* Does reordering fade after ms time */ 20665 optval = rack->r_ctl.rc_reorder_fade; 20666 break; 20667 case TCP_BBR_USE_RACK_RR: 20668 /* Do we use the rack cheat for rxt */ 20669 optval = rack->use_rack_rr; 20670 break; 20671 case TCP_RACK_RR_CONF: 20672 optval = rack->r_rr_config; 20673 break; 20674 case TCP_HDWR_RATE_CAP: 20675 optval = rack->r_rack_hw_rate_caps; 20676 break; 20677 case TCP_BBR_HDWR_PACE: 20678 optval = rack->rack_hdw_pace_ena; 20679 break; 20680 case TCP_RACK_TLP_THRESH: 20681 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 20682 optval = rack->r_ctl.rc_tlp_threshold; 20683 break; 20684 case TCP_RACK_PKT_DELAY: 20685 /* RACK added ms i.e. rack-rtt + reord + N */ 20686 optval = rack->r_ctl.rc_pkt_delay; 20687 break; 20688 case TCP_RACK_TLP_USE: 20689 optval = rack->rack_tlp_threshold_use; 20690 break; 20691 case TCP_RACK_PACE_RATE_CA: 20692 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; 20693 break; 20694 case TCP_RACK_PACE_RATE_SS: 20695 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; 20696 break; 20697 case TCP_RACK_PACE_RATE_REC: 20698 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; 20699 break; 20700 case TCP_RACK_GP_INCREASE_SS: 20701 optval = rack->r_ctl.rack_per_of_gp_ca; 20702 break; 20703 case TCP_RACK_GP_INCREASE_CA: 20704 optval = rack->r_ctl.rack_per_of_gp_ss; 20705 break; 20706 case TCP_BBR_RACK_RTT_USE: 20707 optval = rack->r_ctl.rc_rate_sample_method; 20708 break; 20709 case TCP_DELACK: 20710 optval = tp->t_delayed_ack; 20711 break; 20712 case TCP_DATA_AFTER_CLOSE: 20713 optval = rack->rc_allow_data_af_clo; 20714 break; 20715 case TCP_SHARED_CWND_TIME_LIMIT: 20716 optval = rack->r_limit_scw; 20717 break; 20718 case TCP_RACK_TIMER_SLOP: 20719 optval = rack->r_ctl.timer_slop; 20720 break; 20721 default: 20722 return (tcp_default_ctloutput(inp, sopt)); 20723 break; 20724 } 20725 INP_WUNLOCK(inp); 20726 if (error == 0) { 20727 if (TCP_PACING_RATE_CAP) 20728 error = sooptcopyout(sopt, &loptval, sizeof loptval); 20729 else 20730 error = sooptcopyout(sopt, &optval, sizeof optval); 20731 } 20732 return (error); 20733 } 20734 20735 static int 20736 rack_ctloutput(struct inpcb *inp, struct sockopt *sopt) 20737 { 20738 if (sopt->sopt_dir == SOPT_SET) { 20739 return (rack_set_sockopt(inp, sopt)); 20740 } else if (sopt->sopt_dir == SOPT_GET) { 20741 return (rack_get_sockopt(inp, sopt)); 20742 } else { 20743 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); 20744 } 20745 } 20746 20747 static const char *rack_stack_names[] = { 20748 __XSTRING(STACKNAME), 20749 #ifdef STACKALIAS 20750 __XSTRING(STACKALIAS), 20751 #endif 20752 }; 20753 20754 static int 20755 rack_ctor(void *mem, int32_t size, void *arg, int32_t how) 20756 { 20757 memset(mem, 0, size); 20758 return (0); 20759 } 20760 20761 static void 20762 rack_dtor(void *mem, int32_t size, void *arg) 20763 { 20764 20765 } 20766 20767 static bool rack_mod_inited = false; 20768 20769 static int 20770 tcp_addrack(module_t mod, int32_t type, void *data) 20771 { 20772 int32_t err = 0; 20773 int num_stacks; 20774 20775 switch (type) { 20776 case MOD_LOAD: 20777 rack_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 20778 sizeof(struct rack_sendmap), 20779 rack_ctor, rack_dtor, NULL, NULL, UMA_ALIGN_PTR, 0); 20780 20781 rack_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 20782 sizeof(struct tcp_rack), 20783 rack_ctor, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 20784 20785 sysctl_ctx_init(&rack_sysctl_ctx); 20786 rack_sysctl_root = SYSCTL_ADD_NODE(&rack_sysctl_ctx, 20787 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 20788 OID_AUTO, 20789 #ifdef STACKALIAS 20790 __XSTRING(STACKALIAS), 20791 #else 20792 __XSTRING(STACKNAME), 20793 #endif 20794 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 20795 ""); 20796 if (rack_sysctl_root == NULL) { 20797 printf("Failed to add sysctl node\n"); 20798 err = EFAULT; 20799 goto free_uma; 20800 } 20801 rack_init_sysctls(); 20802 num_stacks = nitems(rack_stack_names); 20803 err = register_tcp_functions_as_names(&__tcp_rack, M_WAITOK, 20804 rack_stack_names, &num_stacks); 20805 if (err) { 20806 printf("Failed to register %s stack name for " 20807 "%s module\n", rack_stack_names[num_stacks], 20808 __XSTRING(MODNAME)); 20809 sysctl_ctx_free(&rack_sysctl_ctx); 20810 free_uma: 20811 uma_zdestroy(rack_zone); 20812 uma_zdestroy(rack_pcb_zone); 20813 rack_counter_destroy(); 20814 printf("Failed to register rack module -- err:%d\n", err); 20815 return (err); 20816 } 20817 tcp_lro_reg_mbufq(); 20818 rack_mod_inited = true; 20819 break; 20820 case MOD_QUIESCE: 20821 err = deregister_tcp_functions(&__tcp_rack, true, false); 20822 break; 20823 case MOD_UNLOAD: 20824 err = deregister_tcp_functions(&__tcp_rack, false, true); 20825 if (err == EBUSY) 20826 break; 20827 if (rack_mod_inited) { 20828 uma_zdestroy(rack_zone); 20829 uma_zdestroy(rack_pcb_zone); 20830 sysctl_ctx_free(&rack_sysctl_ctx); 20831 rack_counter_destroy(); 20832 rack_mod_inited = false; 20833 } 20834 tcp_lro_dereg_mbufq(); 20835 err = 0; 20836 break; 20837 default: 20838 return (EOPNOTSUPP); 20839 } 20840 return (err); 20841 } 20842 20843 static moduledata_t tcp_rack = { 20844 .name = __XSTRING(MODNAME), 20845 .evhand = tcp_addrack, 20846 .priv = 0 20847 }; 20848 20849 MODULE_VERSION(MODNAME, 1); 20850 DECLARE_MODULE(MODNAME, tcp_rack, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 20851 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 20852 20853 #endif /* #if !defined(INET) && !defined(INET6) */ 20854