1 /*- 2 * Copyright (c) 2016-2020 Netflix, Inc. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 /** 27 * Author: Randall Stewart <rrs@netflix.com> 28 * This work is based on the ACM Queue paper 29 * BBR - Congestion Based Congestion Control 30 * and also numerous discussions with Neal, Yuchung and Van. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "opt_inet.h" 37 #include "opt_inet6.h" 38 #include "opt_ipsec.h" 39 #include "opt_tcpdebug.h" 40 #include "opt_ratelimit.h" 41 #include "opt_kern_tls.h" 42 #include <sys/param.h> 43 #include <sys/arb.h> 44 #include <sys/module.h> 45 #include <sys/kernel.h> 46 #ifdef TCP_HHOOK 47 #include <sys/hhook.h> 48 #endif 49 #include <sys/malloc.h> 50 #include <sys/mbuf.h> 51 #include <sys/proc.h> 52 #include <sys/socket.h> 53 #include <sys/socketvar.h> 54 #ifdef KERN_TLS 55 #include <sys/ktls.h> 56 #endif 57 #include <sys/sysctl.h> 58 #include <sys/systm.h> 59 #ifdef STATS 60 #include <sys/qmath.h> 61 #include <sys/tree.h> 62 #include <sys/stats.h> /* Must come after qmath.h and tree.h */ 63 #endif 64 #include <sys/refcount.h> 65 #include <sys/queue.h> 66 #include <sys/eventhandler.h> 67 #include <sys/smp.h> 68 #include <sys/kthread.h> 69 #include <sys/lock.h> 70 #include <sys/mutex.h> 71 #include <sys/tim_filter.h> 72 #include <sys/time.h> 73 #include <sys/protosw.h> 74 #include <vm/uma.h> 75 #include <sys/kern_prefetch.h> 76 77 #include <net/route.h> 78 #include <net/route/nhop.h> 79 #include <net/vnet.h> 80 81 #define TCPSTATES /* for logging */ 82 83 #include <netinet/in.h> 84 #include <netinet/in_kdtrace.h> 85 #include <netinet/in_pcb.h> 86 #include <netinet/ip.h> 87 #include <netinet/ip_icmp.h> /* required for icmp_var.h */ 88 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 89 #include <netinet/ip_var.h> 90 #include <netinet/ip6.h> 91 #include <netinet6/in6_pcb.h> 92 #include <netinet6/ip6_var.h> 93 #define TCPOUTFLAGS 94 #include <netinet/tcp.h> 95 #include <netinet/tcp_fsm.h> 96 #include <netinet/tcp_seq.h> 97 #include <netinet/tcp_timer.h> 98 #include <netinet/tcp_var.h> 99 #include <netinet/tcpip.h> 100 #include <netinet/tcp_hpts.h> 101 #include <netinet/cc/cc.h> 102 #include <netinet/tcp_log_buf.h> 103 #include <netinet/tcp_ratelimit.h> 104 #include <netinet/tcp_lro.h> 105 #ifdef TCPDEBUG 106 #include <netinet/tcp_debug.h> 107 #endif /* TCPDEBUG */ 108 #ifdef TCP_OFFLOAD 109 #include <netinet/tcp_offload.h> 110 #endif 111 #ifdef INET6 112 #include <netinet6/tcp6_var.h> 113 #endif 114 #include <netinet/tcp_fastopen.h> 115 116 #include <netipsec/ipsec_support.h> 117 #include <net/if.h> 118 #include <net/if_var.h> 119 #include <net/ethernet.h> 120 121 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 122 #include <netipsec/ipsec.h> 123 #include <netipsec/ipsec6.h> 124 #endif /* IPSEC */ 125 126 #include <netinet/udp.h> 127 #include <netinet/udp_var.h> 128 #include <machine/in_cksum.h> 129 130 #ifdef MAC 131 #include <security/mac/mac_framework.h> 132 #endif 133 134 #include "sack_filter.h" 135 #include "tcp_bbr.h" 136 #include "rack_bbr_common.h" 137 uma_zone_t bbr_zone; 138 uma_zone_t bbr_pcb_zone; 139 140 struct sysctl_ctx_list bbr_sysctl_ctx; 141 struct sysctl_oid *bbr_sysctl_root; 142 143 #define TCPT_RANGESET_NOSLOP(tv, value, tvmin, tvmax) do { \ 144 (tv) = (value); \ 145 if ((u_long)(tv) < (u_long)(tvmin)) \ 146 (tv) = (tvmin); \ 147 if ((u_long)(tv) > (u_long)(tvmax)) \ 148 (tv) = (tvmax); \ 149 } while(0) 150 151 /*#define BBR_INVARIANT 1*/ 152 153 /* 154 * initial window 155 */ 156 static uint32_t bbr_def_init_win = 10; 157 static int32_t bbr_persist_min = 250000; /* 250ms */ 158 static int32_t bbr_persist_max = 1000000; /* 1 Second */ 159 static int32_t bbr_cwnd_may_shrink = 0; 160 static int32_t bbr_cwndtarget_rtt_touse = BBR_RTT_PROP; 161 static int32_t bbr_num_pktepo_for_del_limit = BBR_NUM_RTTS_FOR_DEL_LIMIT; 162 static int32_t bbr_hardware_pacing_limit = 8000; 163 static int32_t bbr_quanta = 3; /* How much extra quanta do we get? */ 164 static int32_t bbr_no_retran = 0; 165 166 167 static int32_t bbr_error_base_paceout = 10000; /* usec to pace */ 168 static int32_t bbr_max_net_error_cnt = 10; 169 /* Should the following be dynamic too -- loss wise */ 170 static int32_t bbr_rtt_gain_thresh = 0; 171 /* Measurement controls */ 172 static int32_t bbr_use_google_algo = 1; 173 static int32_t bbr_ts_limiting = 1; 174 static int32_t bbr_ts_can_raise = 0; 175 static int32_t bbr_do_red = 600; 176 static int32_t bbr_red_scale = 20000; 177 static int32_t bbr_red_mul = 1; 178 static int32_t bbr_red_div = 2; 179 static int32_t bbr_red_growth_restrict = 1; 180 static int32_t bbr_target_is_bbunit = 0; 181 static int32_t bbr_drop_limit = 0; 182 /* 183 * How much gain do we need to see to 184 * stay in startup? 185 */ 186 static int32_t bbr_marks_rxt_sack_passed = 0; 187 static int32_t bbr_start_exit = 25; 188 static int32_t bbr_low_start_exit = 25; /* When we are in reduced gain */ 189 static int32_t bbr_startup_loss_thresh = 2000; /* 20.00% loss */ 190 static int32_t bbr_hptsi_max_mul = 1; /* These two mul/div assure a min pacing */ 191 static int32_t bbr_hptsi_max_div = 2; /* time, 0 means turned off. We need this 192 * if we go back ever to where the pacer 193 * has priority over timers. 194 */ 195 static int32_t bbr_policer_call_from_rack_to = 0; 196 static int32_t bbr_policer_detection_enabled = 1; 197 static int32_t bbr_min_measurements_req = 1; /* We need at least 2 198 * measurments before we are 199 * "good" note that 2 == 1. 200 * This is because we use a > 201 * comparison. This means if 202 * min_measure was 0, it takes 203 * num-measures > min(0) and 204 * you get 1 measurement and 205 * you are good. Set to 1, you 206 * have to have two 207 * measurements (this is done 208 * to prevent it from being ok 209 * to have no measurements). */ 210 static int32_t bbr_no_pacing_until = 4; 211 212 static int32_t bbr_min_usec_delta = 20000; /* 20,000 usecs */ 213 static int32_t bbr_min_peer_delta = 20; /* 20 units */ 214 static int32_t bbr_delta_percent = 150; /* 15.0 % */ 215 216 static int32_t bbr_target_cwnd_mult_limit = 8; 217 /* 218 * bbr_cwnd_min_val is the number of 219 * segments we hold to in the RTT probe 220 * state typically 4. 221 */ 222 static int32_t bbr_cwnd_min_val = BBR_PROBERTT_NUM_MSS; 223 224 225 static int32_t bbr_cwnd_min_val_hs = BBR_HIGHSPEED_NUM_MSS; 226 227 static int32_t bbr_gain_to_target = 1; 228 static int32_t bbr_gain_gets_extra_too = 1; 229 /* 230 * bbr_high_gain is the 2/ln(2) value we need 231 * to double the sending rate in startup. This 232 * is used for both cwnd and hptsi gain's. 233 */ 234 static int32_t bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1; 235 static int32_t bbr_startup_lower = BBR_UNIT * 1500 / 1000 + 1; 236 static int32_t bbr_use_lower_gain_in_startup = 1; 237 238 /* thresholds for reduction on drain in sub-states/drain */ 239 static int32_t bbr_drain_rtt = BBR_SRTT; 240 static int32_t bbr_drain_floor = 88; 241 static int32_t google_allow_early_out = 1; 242 static int32_t google_consider_lost = 1; 243 static int32_t bbr_drain_drop_mul = 4; 244 static int32_t bbr_drain_drop_div = 5; 245 static int32_t bbr_rand_ot = 50; 246 static int32_t bbr_can_force_probertt = 0; 247 static int32_t bbr_can_adjust_probertt = 1; 248 static int32_t bbr_probertt_sets_rtt = 0; 249 static int32_t bbr_can_use_ts_for_rtt = 1; 250 static int32_t bbr_is_ratio = 0; 251 static int32_t bbr_sub_drain_app_limit = 1; 252 static int32_t bbr_prtt_slam_cwnd = 1; 253 static int32_t bbr_sub_drain_slam_cwnd = 1; 254 static int32_t bbr_slam_cwnd_in_main_drain = 1; 255 static int32_t bbr_filter_len_sec = 6; /* How long does the rttProp filter 256 * hold */ 257 static uint32_t bbr_rtt_probe_limit = (USECS_IN_SECOND * 4); 258 /* 259 * bbr_drain_gain is the reverse of the high_gain 260 * designed to drain back out the standing queue 261 * that is formed in startup by causing a larger 262 * hptsi gain and thus drainging the packets 263 * in flight. 264 */ 265 static int32_t bbr_drain_gain = BBR_UNIT * 1000 / 2885; 266 static int32_t bbr_rttprobe_gain = 192; 267 268 /* 269 * The cwnd_gain is the default cwnd gain applied when 270 * calculating a target cwnd. Note that the cwnd is 271 * a secondary factor in the way BBR works (see the 272 * paper and think about it, it will take some time). 273 * Basically the hptsi_gain spreads the packets out 274 * so you never get more than BDP to the peer even 275 * if the cwnd is high. In our implemenation that 276 * means in non-recovery/retransmission scenarios 277 * cwnd will never be reached by the flight-size. 278 */ 279 static int32_t bbr_cwnd_gain = BBR_UNIT * 2; 280 static int32_t bbr_tlp_type_to_use = BBR_SRTT; 281 static int32_t bbr_delack_time = 100000; /* 100ms in useconds */ 282 static int32_t bbr_sack_not_required = 0; /* set to one to allow non-sack to use bbr */ 283 static int32_t bbr_initial_bw_bps = 62500; /* 500kbps in bytes ps */ 284 static int32_t bbr_ignore_data_after_close = 1; 285 static int16_t bbr_hptsi_gain[] = { 286 (BBR_UNIT *5 / 4), 287 (BBR_UNIT * 3 / 4), 288 BBR_UNIT, 289 BBR_UNIT, 290 BBR_UNIT, 291 BBR_UNIT, 292 BBR_UNIT, 293 BBR_UNIT 294 }; 295 int32_t bbr_use_rack_resend_cheat = 1; 296 int32_t bbr_sends_full_iwnd = 1; 297 298 #define BBR_HPTSI_GAIN_MAX 8 299 /* 300 * The BBR module incorporates a number of 301 * TCP ideas that have been put out into the IETF 302 * over the last few years: 303 * - Yuchung Cheng's RACK TCP (for which its named) that 304 * will stop us using the number of dup acks and instead 305 * use time as the gage of when we retransmit. 306 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft 307 * of Dukkipati et.al. 308 * - Van Jacobson's et.al BBR. 309 * 310 * RACK depends on SACK, so if an endpoint arrives that 311 * cannot do SACK the state machine below will shuttle the 312 * connection back to using the "default" TCP stack that is 313 * in FreeBSD. 314 * 315 * To implement BBR and RACK the original TCP stack was first decomposed 316 * into a functional state machine with individual states 317 * for each of the possible TCP connection states. The do_segement 318 * functions role in life is to mandate the connection supports SACK 319 * initially and then assure that the RACK state matches the conenction 320 * state before calling the states do_segment function. Data processing 321 * of inbound segments also now happens in the hpts_do_segment in general 322 * with only one exception. This is so we can keep the connection on 323 * a single CPU. 324 * 325 * Each state is simplified due to the fact that the original do_segment 326 * has been decomposed and we *know* what state we are in (no 327 * switches on the state) and all tests for SACK are gone. This 328 * greatly simplifies what each state does. 329 * 330 * TCP output is also over-written with a new version since it 331 * must maintain the new rack scoreboard and has had hptsi 332 * integrated as a requirment. Still todo is to eliminate the 333 * use of the callout_() system and use the hpts for all 334 * timers as well. 335 */ 336 static uint32_t bbr_rtt_probe_time = 200000; /* 200ms in micro seconds */ 337 static uint32_t bbr_rtt_probe_cwndtarg = 4; /* How many mss's outstanding */ 338 static const int32_t bbr_min_req_free = 2; /* The min we must have on the 339 * free list */ 340 static int32_t bbr_tlp_thresh = 1; 341 static int32_t bbr_reorder_thresh = 2; 342 static int32_t bbr_reorder_fade = 60000000; /* 0 - never fade, def 343 * 60,000,000 - 60 seconds */ 344 static int32_t bbr_pkt_delay = 1000; 345 static int32_t bbr_min_to = 1000; /* Number of usec's minimum timeout */ 346 static int32_t bbr_incr_timers = 1; 347 348 static int32_t bbr_tlp_min = 10000; /* 10ms in usecs */ 349 static int32_t bbr_delayed_ack_time = 200000; /* 200ms in usecs */ 350 static int32_t bbr_exit_startup_at_loss = 1; 351 352 /* 353 * bbr_lt_bw_ratio is 1/8th 354 * bbr_lt_bw_diff is < 4 Kbit/sec 355 */ 356 static uint64_t bbr_lt_bw_diff = 4000 / 8; /* In bytes per second */ 357 static uint64_t bbr_lt_bw_ratio = 8; /* For 1/8th */ 358 static uint32_t bbr_lt_bw_max_rtts = 48; /* How many rtt's do we use 359 * the lt_bw for */ 360 static uint32_t bbr_lt_intvl_min_rtts = 4; /* Min num of RTT's to measure 361 * lt_bw */ 362 static int32_t bbr_lt_intvl_fp = 0; /* False positive epoch diff */ 363 static int32_t bbr_lt_loss_thresh = 196; /* Lost vs delivered % */ 364 static int32_t bbr_lt_fd_thresh = 100; /* false detection % */ 365 366 static int32_t bbr_verbose_logging = 0; 367 /* 368 * Currently regular tcp has a rto_min of 30ms 369 * the backoff goes 12 times so that ends up 370 * being a total of 122.850 seconds before a 371 * connection is killed. 372 */ 373 static int32_t bbr_rto_min_ms = 30; /* 30ms same as main freebsd */ 374 static int32_t bbr_rto_max_sec = 4; /* 4 seconds */ 375 376 /****************************************************/ 377 /* DEFAULT TSO SIZING (cpu performance impacting) */ 378 /****************************************************/ 379 /* What amount is our formula using to get TSO size */ 380 static int32_t bbr_hptsi_per_second = 1000; 381 382 /* 383 * For hptsi under bbr_cross_over connections what is delay 384 * target 7ms (in usec) combined with a seg_max of 2 385 * gets us close to identical google behavior in 386 * TSO size selection (possibly more 1MSS sends). 387 */ 388 static int32_t bbr_hptsi_segments_delay_tar = 7000; 389 390 /* Does pacing delay include overhead's in its time calculations? */ 391 static int32_t bbr_include_enet_oh = 0; 392 static int32_t bbr_include_ip_oh = 1; 393 static int32_t bbr_include_tcp_oh = 1; 394 static int32_t bbr_google_discount = 10; 395 396 /* Do we use (nf mode) pkt-epoch to drive us or rttProp? */ 397 static int32_t bbr_state_is_pkt_epoch = 0; 398 static int32_t bbr_state_drain_2_tar = 1; 399 /* What is the max the 0 - bbr_cross_over MBPS TSO target 400 * can reach using our delay target. Note that this 401 * value becomes the floor for the cross over 402 * algorithm. 403 */ 404 static int32_t bbr_hptsi_segments_max = 2; 405 static int32_t bbr_hptsi_segments_floor = 1; 406 static int32_t bbr_hptsi_utter_max = 0; 407 408 /* What is the min the 0 - bbr_cross-over MBPS TSO target can be */ 409 static int32_t bbr_hptsi_bytes_min = 1460; 410 static int32_t bbr_all_get_min = 0; 411 412 /* Cross over point from algo-a to algo-b */ 413 static uint32_t bbr_cross_over = TWENTY_THREE_MBPS; 414 415 /* Do we deal with our restart state? */ 416 static int32_t bbr_uses_idle_restart = 0; 417 static int32_t bbr_idle_restart_threshold = 100000; /* 100ms in useconds */ 418 419 /* Do we allow hardware pacing? */ 420 static int32_t bbr_allow_hdwr_pacing = 0; 421 static int32_t bbr_hdwr_pace_adjust = 2; /* multipler when we calc the tso size */ 422 static int32_t bbr_hdwr_pace_floor = 1; 423 static int32_t bbr_hdwr_pacing_delay_cnt = 10; 424 425 /****************************************************/ 426 static int32_t bbr_resends_use_tso = 0; 427 static int32_t bbr_tlp_max_resend = 2; 428 static int32_t bbr_sack_block_limit = 128; 429 430 #define BBR_MAX_STAT 19 431 counter_u64_t bbr_state_time[BBR_MAX_STAT]; 432 counter_u64_t bbr_state_lost[BBR_MAX_STAT]; 433 counter_u64_t bbr_state_resend[BBR_MAX_STAT]; 434 counter_u64_t bbr_stat_arry[BBR_STAT_SIZE]; 435 counter_u64_t bbr_opts_arry[BBR_OPTS_SIZE]; 436 counter_u64_t bbr_out_size[TCP_MSS_ACCT_SIZE]; 437 counter_u64_t bbr_flows_whdwr_pacing; 438 counter_u64_t bbr_flows_nohdwr_pacing; 439 440 counter_u64_t bbr_nohdwr_pacing_enobuf; 441 counter_u64_t bbr_hdwr_pacing_enobuf; 442 443 static inline uint64_t bbr_get_bw(struct tcp_bbr *bbr); 444 445 /* 446 * Static defintions we need for forward declarations. 447 */ 448 static uint32_t 449 bbr_get_pacing_length(struct tcp_bbr *bbr, uint16_t gain, 450 uint32_t useconds_time, uint64_t bw); 451 static uint32_t 452 bbr_get_a_state_target(struct tcp_bbr *bbr, uint32_t gain); 453 static void 454 bbr_set_state(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t win); 455 static void 456 bbr_set_probebw_gains(struct tcp_bbr *bbr, uint32_t cts, uint32_t losses); 457 static void 458 bbr_substate_change(struct tcp_bbr *bbr, uint32_t cts, int line, 459 int dolog); 460 static uint32_t 461 bbr_get_target_cwnd(struct tcp_bbr *bbr, uint64_t bw, uint32_t gain); 462 static void 463 bbr_state_change(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch, 464 int32_t pkt_epoch, uint32_t losses); 465 static uint32_t 466 bbr_calc_thresh_rack(struct tcp_bbr *bbr, uint32_t srtt, uint32_t cts, struct bbr_sendmap *rsm); 467 static uint32_t bbr_initial_cwnd(struct tcp_bbr *bbr, struct tcpcb *tp); 468 static uint32_t 469 bbr_calc_thresh_tlp(struct tcpcb *tp, struct tcp_bbr *bbr, 470 struct bbr_sendmap *rsm, uint32_t srtt, 471 uint32_t cts); 472 static void 473 bbr_exit_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, 474 int32_t line); 475 static void 476 bbr_set_state_target(struct tcp_bbr *bbr, int line); 477 static void 478 bbr_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts, int32_t line); 479 480 static void 481 bbr_log_progress_event(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t tick, int event, int line); 482 483 static void 484 tcp_bbr_tso_size_check(struct tcp_bbr *bbr, uint32_t cts); 485 486 static void 487 bbr_setup_red_bw(struct tcp_bbr *bbr, uint32_t cts); 488 489 static void 490 bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied, uint32_t rtt, 491 uint32_t line, uint8_t is_start, uint16_t set); 492 493 static struct bbr_sendmap * 494 bbr_find_lowest_rsm(struct tcp_bbr *bbr); 495 static __inline uint32_t 496 bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type); 497 static void 498 bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which); 499 500 static void 501 bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts, uint32_t time_since_sent, uint32_t srtt, 502 uint32_t thresh, uint32_t to); 503 static void 504 bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag); 505 506 static void 507 bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, 508 uint32_t del_by, uint32_t cts, uint32_t sloton, uint32_t prev_delay); 509 510 static void 511 bbr_enter_persist(struct tcpcb *tp, struct tcp_bbr *bbr, 512 uint32_t cts, int32_t line); 513 static void 514 bbr_stop_all_timers(struct tcpcb *tp); 515 static void 516 bbr_exit_probe_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts); 517 static void 518 bbr_check_probe_rtt_limits(struct tcp_bbr *bbr, uint32_t cts); 519 static void 520 bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts); 521 522 523 static void 524 bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len, 525 uint32_t cts, uint32_t usecs, uint64_t bw, uint32_t override, int mod); 526 527 static inline uint8_t 528 bbr_state_val(struct tcp_bbr *bbr) 529 { 530 return(bbr->rc_bbr_substate); 531 } 532 533 static inline uint32_t 534 get_min_cwnd(struct tcp_bbr *bbr) 535 { 536 int mss; 537 538 mss = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options), bbr->r_ctl.rc_pace_max_segs); 539 if (bbr_get_rtt(bbr, BBR_RTT_PROP) < BBR_HIGH_SPEED) 540 return (bbr_cwnd_min_val_hs * mss); 541 else 542 return (bbr_cwnd_min_val * mss); 543 } 544 545 static uint32_t 546 bbr_get_persists_timer_val(struct tcpcb *tp, struct tcp_bbr *bbr) 547 { 548 uint64_t srtt, var; 549 uint64_t ret_val; 550 551 bbr->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; 552 if (tp->t_srtt == 0) { 553 srtt = (uint64_t)BBR_INITIAL_RTO; 554 var = 0; 555 } else { 556 srtt = ((uint64_t)TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT); 557 var = ((uint64_t)TICKS_2_USEC(tp->t_rttvar) >> TCP_RTT_SHIFT); 558 } 559 TCPT_RANGESET_NOSLOP(ret_val, ((srtt + var) * tcp_backoff[tp->t_rxtshift]), 560 bbr_persist_min, bbr_persist_max); 561 return ((uint32_t)ret_val); 562 } 563 564 static uint32_t 565 bbr_timer_start(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 566 { 567 /* 568 * Start the FR timer, we do this based on getting the first one in 569 * the rc_tmap. Note that if its NULL we must stop the timer. in all 570 * events we need to stop the running timer (if its running) before 571 * starting the new one. 572 */ 573 uint32_t thresh, exp, to, srtt, time_since_sent, tstmp_touse; 574 int32_t idx; 575 int32_t is_tlp_timer = 0; 576 struct bbr_sendmap *rsm; 577 578 if (bbr->rc_all_timers_stopped) { 579 /* All timers have been stopped none are to run */ 580 return (0); 581 } 582 if (bbr->rc_in_persist) { 583 /* We can't start any timer in persists */ 584 return (bbr_get_persists_timer_val(tp, bbr)); 585 } 586 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap); 587 if ((rsm == NULL) || 588 ((tp->t_flags & TF_SACK_PERMIT) == 0) || 589 (tp->t_state < TCPS_ESTABLISHED)) { 590 /* Nothing on the send map */ 591 activate_rxt: 592 if (SEQ_LT(tp->snd_una, tp->snd_max) || sbavail(&(tp->t_inpcb->inp_socket->so_snd))) { 593 uint64_t tov; 594 595 time_since_sent = 0; 596 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap); 597 if (rsm) { 598 idx = rsm->r_rtr_cnt - 1; 599 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], bbr->r_ctl.rc_tlp_rxt_last_time)) 600 tstmp_touse = rsm->r_tim_lastsent[idx]; 601 else 602 tstmp_touse = bbr->r_ctl.rc_tlp_rxt_last_time; 603 if (TSTMP_GT(tstmp_touse, cts)) 604 time_since_sent = cts - tstmp_touse; 605 } 606 bbr->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; 607 if (tp->t_srtt == 0) 608 tov = BBR_INITIAL_RTO; 609 else 610 tov = ((uint64_t)(TICKS_2_USEC(tp->t_srtt) + 611 ((uint64_t)TICKS_2_USEC(tp->t_rttvar) * (uint64_t)4)) >> TCP_RTT_SHIFT); 612 if (tp->t_rxtshift) 613 tov *= tcp_backoff[tp->t_rxtshift]; 614 if (tov > time_since_sent) 615 tov -= time_since_sent; 616 else 617 tov = bbr->r_ctl.rc_min_to; 618 TCPT_RANGESET_NOSLOP(to, tov, 619 (bbr->r_ctl.rc_min_rto_ms * MS_IN_USEC), 620 (bbr->rc_max_rto_sec * USECS_IN_SECOND)); 621 bbr_log_timer_var(bbr, 2, cts, 0, srtt, 0, to); 622 return (to); 623 } 624 return (0); 625 } 626 if (rsm->r_flags & BBR_ACKED) { 627 rsm = bbr_find_lowest_rsm(bbr); 628 if (rsm == NULL) { 629 /* No lowest? */ 630 goto activate_rxt; 631 } 632 } 633 /* Convert from ms to usecs */ 634 if (rsm->r_flags & BBR_SACK_PASSED) { 635 if ((tp->t_flags & TF_SENTFIN) && 636 ((tp->snd_max - tp->snd_una) == 1) && 637 (rsm->r_flags & BBR_HAS_FIN)) { 638 /* 639 * We don't start a bbr rack timer if all we have is 640 * a FIN outstanding. 641 */ 642 goto activate_rxt; 643 } 644 srtt = bbr_get_rtt(bbr, BBR_RTT_RACK); 645 thresh = bbr_calc_thresh_rack(bbr, srtt, cts, rsm); 646 idx = rsm->r_rtr_cnt - 1; 647 exp = rsm->r_tim_lastsent[idx] + thresh; 648 if (SEQ_GEQ(exp, cts)) { 649 to = exp - cts; 650 if (to < bbr->r_ctl.rc_min_to) { 651 to = bbr->r_ctl.rc_min_to; 652 } 653 } else { 654 to = bbr->r_ctl.rc_min_to; 655 } 656 } else { 657 /* Ok we need to do a TLP not RACK */ 658 if (bbr->rc_tlp_in_progress != 0) { 659 /* 660 * The previous send was a TLP. 661 */ 662 goto activate_rxt; 663 } 664 rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_tmap, bbr_sendmap, r_tnext); 665 if (rsm == NULL) { 666 /* We found no rsm to TLP with. */ 667 goto activate_rxt; 668 } 669 if (rsm->r_flags & BBR_HAS_FIN) { 670 /* If its a FIN we don't do TLP */ 671 rsm = NULL; 672 goto activate_rxt; 673 } 674 time_since_sent = 0; 675 idx = rsm->r_rtr_cnt - 1; 676 if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], bbr->r_ctl.rc_tlp_rxt_last_time)) 677 tstmp_touse = rsm->r_tim_lastsent[idx]; 678 else 679 tstmp_touse = bbr->r_ctl.rc_tlp_rxt_last_time; 680 if (TSTMP_GT(tstmp_touse, cts)) 681 time_since_sent = cts - tstmp_touse; 682 is_tlp_timer = 1; 683 srtt = bbr_get_rtt(bbr, bbr_tlp_type_to_use); 684 thresh = bbr_calc_thresh_tlp(tp, bbr, rsm, srtt, cts); 685 if (thresh > time_since_sent) 686 to = thresh - time_since_sent; 687 else 688 to = bbr->r_ctl.rc_min_to; 689 if (to > (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND)) { 690 /* 691 * If the TLP time works out to larger than the max 692 * RTO lets not do TLP.. just RTO. 693 */ 694 goto activate_rxt; 695 } 696 if ((bbr->rc_tlp_rtx_out == 1) && 697 (rsm->r_start == bbr->r_ctl.rc_last_tlp_seq)) { 698 /* 699 * Second retransmit of the same TLP 700 * lets not. 701 */ 702 bbr->rc_tlp_rtx_out = 0; 703 goto activate_rxt; 704 } 705 if (rsm->r_start != bbr->r_ctl.rc_last_tlp_seq) { 706 /* 707 * The tail is no longer the last one I did a probe 708 * on 709 */ 710 bbr->r_ctl.rc_tlp_seg_send_cnt = 0; 711 bbr->r_ctl.rc_last_tlp_seq = rsm->r_start; 712 } 713 } 714 if (is_tlp_timer == 0) { 715 BBR_STAT_INC(bbr_to_arm_rack); 716 bbr->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; 717 } else { 718 bbr_log_timer_var(bbr, 1, cts, time_since_sent, srtt, thresh, to); 719 if (bbr->r_ctl.rc_tlp_seg_send_cnt > bbr_tlp_max_resend) { 720 /* 721 * We have exceeded how many times we can retran the 722 * current TLP timer, switch to the RTO timer. 723 */ 724 goto activate_rxt; 725 } else { 726 BBR_STAT_INC(bbr_to_arm_tlp); 727 bbr->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; 728 } 729 } 730 return (to); 731 } 732 733 static inline int32_t 734 bbr_minseg(struct tcp_bbr *bbr) 735 { 736 return (bbr->r_ctl.rc_pace_min_segs - bbr->rc_last_options); 737 } 738 739 static void 740 bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t slot, uint32_t tot_len) 741 { 742 struct inpcb *inp; 743 struct hpts_diag diag; 744 uint32_t delayed_ack = 0; 745 uint32_t left = 0; 746 uint32_t hpts_timeout; 747 uint8_t stopped; 748 int32_t delay_calc = 0; 749 uint32_t prev_delay = 0; 750 751 inp = tp->t_inpcb; 752 if (inp->inp_in_hpts) { 753 /* A previous call is already set up */ 754 return; 755 } 756 if ((tp->t_state == TCPS_CLOSED) || 757 (tp->t_state == TCPS_LISTEN)) { 758 return; 759 } 760 stopped = bbr->rc_tmr_stopped; 761 if (stopped && TSTMP_GT(bbr->r_ctl.rc_timer_exp, cts)) { 762 left = bbr->r_ctl.rc_timer_exp - cts; 763 } 764 bbr->r_ctl.rc_hpts_flags = 0; 765 bbr->r_ctl.rc_timer_exp = 0; 766 prev_delay = bbr->r_ctl.rc_last_delay_val; 767 if (bbr->r_ctl.rc_last_delay_val && 768 (slot == 0)) { 769 /* 770 * If a previous pacer delay was in place we 771 * are not coming from the output side (where 772 * we calculate a delay, more likely a timer). 773 */ 774 slot = bbr->r_ctl.rc_last_delay_val; 775 if (TSTMP_GT(cts, bbr->rc_pacer_started)) { 776 /* Compensate for time passed */ 777 delay_calc = cts - bbr->rc_pacer_started; 778 if (delay_calc <= slot) 779 slot -= delay_calc; 780 } 781 } 782 /* Do we have early to make up for by pushing out the pacing time? */ 783 if (bbr->r_agg_early_set) { 784 bbr_log_pacing_delay_calc(bbr, 0, bbr->r_ctl.rc_agg_early, cts, slot, 0, bbr->r_agg_early_set, 2); 785 slot += bbr->r_ctl.rc_agg_early; 786 bbr->r_ctl.rc_agg_early = 0; 787 bbr->r_agg_early_set = 0; 788 } 789 /* Are we running a total debt that needs to be compensated for? */ 790 if (bbr->r_ctl.rc_hptsi_agg_delay) { 791 if (slot > bbr->r_ctl.rc_hptsi_agg_delay) { 792 /* We nuke the delay */ 793 slot -= bbr->r_ctl.rc_hptsi_agg_delay; 794 bbr->r_ctl.rc_hptsi_agg_delay = 0; 795 } else { 796 /* We nuke some of the delay, put in a minimal 100usecs */ 797 bbr->r_ctl.rc_hptsi_agg_delay -= slot; 798 bbr->r_ctl.rc_last_delay_val = slot = 100; 799 } 800 } 801 bbr->r_ctl.rc_last_delay_val = slot; 802 hpts_timeout = bbr_timer_start(tp, bbr, cts); 803 if (tp->t_flags & TF_DELACK) { 804 if (bbr->rc_in_persist == 0) { 805 delayed_ack = bbr_delack_time; 806 } else { 807 /* 808 * We are in persists and have 809 * gotten a new data element. 810 */ 811 if (hpts_timeout > bbr_delack_time) { 812 /* 813 * Lets make the persists timer (which acks) 814 * be the smaller of hpts_timeout and bbr_delack_time. 815 */ 816 hpts_timeout = bbr_delack_time; 817 } 818 } 819 } 820 if (delayed_ack && 821 ((hpts_timeout == 0) || 822 (delayed_ack < hpts_timeout))) { 823 /* We need a Delayed ack timer */ 824 bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK; 825 hpts_timeout = delayed_ack; 826 } 827 if (slot) { 828 /* Mark that we have a pacing timer up */ 829 BBR_STAT_INC(bbr_paced_segments); 830 bbr->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; 831 } 832 /* 833 * If no timers are going to run and we will fall off thfe hptsi 834 * wheel, we resort to a keep-alive timer if its configured. 835 */ 836 if ((hpts_timeout == 0) && 837 (slot == 0)) { 838 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 839 (tp->t_state <= TCPS_CLOSING)) { 840 /* 841 * Ok we have no timer (persists, rack, tlp, rxt or 842 * del-ack), we don't have segments being paced. So 843 * all that is left is the keepalive timer. 844 */ 845 if (TCPS_HAVEESTABLISHED(tp->t_state)) { 846 hpts_timeout = TICKS_2_USEC(TP_KEEPIDLE(tp)); 847 } else { 848 hpts_timeout = TICKS_2_USEC(TP_KEEPINIT(tp)); 849 } 850 bbr->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; 851 } 852 } 853 if (left && (stopped & (PACE_TMR_KEEP | PACE_TMR_DELACK)) == 854 (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { 855 /* 856 * RACK, TLP, persists and RXT timers all are restartable 857 * based on actions input .. i.e we received a packet (ack 858 * or sack) and that changes things (rw, or snd_una etc). 859 * Thus we can restart them with a new value. For 860 * keep-alive, delayed_ack we keep track of what was left 861 * and restart the timer with a smaller value. 862 */ 863 if (left < hpts_timeout) 864 hpts_timeout = left; 865 } 866 if (bbr->r_ctl.rc_incr_tmrs && slot && 867 (bbr->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { 868 /* 869 * If configured to do so, and the timer is either 870 * the TLP or RXT timer, we need to increase the timeout 871 * by the pacing time. Consider the bottleneck at my 872 * machine as an example, we are sending something 873 * to start a TLP on. The last packet won't be emitted 874 * fully until the pacing time (the bottleneck will hold 875 * the data in place). Once the packet is emitted that 876 * is when we want to start waiting for the TLP. This 877 * is most evident with hardware pacing (where the nic 878 * is holding the packet(s) before emitting). But it 879 * can also show up in the network so we do it for all 880 * cases. Technically we would take off one packet from 881 * this extra delay but this is easier and being more 882 * conservative is probably better. 883 */ 884 hpts_timeout += slot; 885 } 886 if (hpts_timeout) { 887 /* 888 * Hack alert for now we can't time-out over 2147 seconds (a 889 * bit more than 35min) 890 */ 891 if (hpts_timeout > 0x7ffffffe) 892 hpts_timeout = 0x7ffffffe; 893 bbr->r_ctl.rc_timer_exp = cts + hpts_timeout; 894 } else 895 bbr->r_ctl.rc_timer_exp = 0; 896 if ((slot) && 897 (bbr->rc_use_google || 898 bbr->output_error_seen || 899 (slot <= hpts_timeout)) ) { 900 /* 901 * Tell LRO that it can queue packets while 902 * we pace. 903 */ 904 bbr->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 905 if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 906 (bbr->rc_cwnd_limited == 0)) { 907 /* 908 * If we are not cwnd limited and we 909 * are running a rack timer we put on 910 * the do not disturbe even for sack. 911 */ 912 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 913 } else 914 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 915 bbr->rc_pacer_started = cts; 916 917 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(slot), 918 __LINE__, &diag); 919 bbr->rc_timer_first = 0; 920 bbr->bbr_timer_src = frm; 921 bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1); 922 bbr_log_hpts_diag(bbr, cts, &diag); 923 } else if (hpts_timeout) { 924 (void)tcp_hpts_insert_diag(tp->t_inpcb, HPTS_USEC_TO_SLOTS(hpts_timeout), 925 __LINE__, &diag); 926 /* 927 * We add the flag here as well if the slot is set, 928 * since hpts will call in to clear the queue first before 929 * calling the output routine (which does our timers). 930 * We don't want to set the flag if its just a timer 931 * else the arrival of data might (that causes us 932 * to send more) might get delayed. Imagine being 933 * on a keep-alive timer and a request comes in for 934 * more data. 935 */ 936 if (slot) 937 bbr->rc_pacer_started = cts; 938 if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) && 939 (bbr->rc_cwnd_limited == 0)) { 940 /* 941 * For a rack timer, don't wake us even 942 * if a sack arrives as long as we are 943 * not cwnd limited. 944 */ 945 bbr->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY; 946 inp->inp_flags2 |= INP_DONT_SACK_QUEUE; 947 } else { 948 /* All other timers wake us up */ 949 bbr->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 950 inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE; 951 } 952 bbr->bbr_timer_src = frm; 953 bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0); 954 bbr_log_hpts_diag(bbr, cts, &diag); 955 bbr->rc_timer_first = 1; 956 } 957 bbr->rc_tmr_stopped = 0; 958 bbr_log_type_bbrsnd(bbr, tot_len, slot, delay_calc, cts, frm, prev_delay); 959 } 960 961 static void 962 bbr_timer_audit(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, struct sockbuf *sb) 963 { 964 /* 965 * We received an ack, and then did not call send or were bounced 966 * out due to the hpts was running. Now a timer is up as well, is it 967 * the right timer? 968 */ 969 struct inpcb *inp; 970 struct bbr_sendmap *rsm; 971 uint32_t hpts_timeout; 972 int tmr_up; 973 974 tmr_up = bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 975 if (bbr->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) 976 return; 977 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap); 978 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && 979 (tmr_up == PACE_TMR_RXT)) { 980 /* Should be an RXT */ 981 return; 982 } 983 inp = bbr->rc_inp; 984 if (rsm == NULL) { 985 /* Nothing outstanding? */ 986 if (tp->t_flags & TF_DELACK) { 987 if (tmr_up == PACE_TMR_DELACK) 988 /* 989 * We are supposed to have delayed ack up 990 * and we do 991 */ 992 return; 993 } else if (sbavail(&inp->inp_socket->so_snd) && 994 (tmr_up == PACE_TMR_RXT)) { 995 /* 996 * if we hit enobufs then we would expect the 997 * possiblity of nothing outstanding and the RXT up 998 * (and the hptsi timer). 999 */ 1000 return; 1001 } else if (((V_tcp_always_keepalive || 1002 inp->inp_socket->so_options & SO_KEEPALIVE) && 1003 (tp->t_state <= TCPS_CLOSING)) && 1004 (tmr_up == PACE_TMR_KEEP) && 1005 (tp->snd_max == tp->snd_una)) { 1006 /* We should have keep alive up and we do */ 1007 return; 1008 } 1009 } 1010 if (rsm && (rsm->r_flags & BBR_SACK_PASSED)) { 1011 if ((tp->t_flags & TF_SENTFIN) && 1012 ((tp->snd_max - tp->snd_una) == 1) && 1013 (rsm->r_flags & BBR_HAS_FIN)) { 1014 /* needs to be a RXT */ 1015 if (tmr_up == PACE_TMR_RXT) 1016 return; 1017 else 1018 goto wrong_timer; 1019 } else if (tmr_up == PACE_TMR_RACK) 1020 return; 1021 else 1022 goto wrong_timer; 1023 } else if (rsm && (tmr_up == PACE_TMR_RACK)) { 1024 /* Rack timer has priority if we have data out */ 1025 return; 1026 } else if (SEQ_GT(tp->snd_max, tp->snd_una) && 1027 ((tmr_up == PACE_TMR_TLP) || 1028 (tmr_up == PACE_TMR_RXT))) { 1029 /* 1030 * Either a TLP or RXT is fine if no sack-passed is in place 1031 * and data is outstanding. 1032 */ 1033 return; 1034 } else if (tmr_up == PACE_TMR_DELACK) { 1035 /* 1036 * If the delayed ack was going to go off before the 1037 * rtx/tlp/rack timer were going to expire, then that would 1038 * be the timer in control. Note we don't check the time 1039 * here trusting the code is correct. 1040 */ 1041 return; 1042 } 1043 if (SEQ_GT(tp->snd_max, tp->snd_una) && 1044 ((tmr_up == PACE_TMR_RXT) || 1045 (tmr_up == PACE_TMR_TLP) || 1046 (tmr_up == PACE_TMR_RACK))) { 1047 /* 1048 * We have outstanding data and 1049 * we *do* have a RACK, TLP or RXT 1050 * timer running. We won't restart 1051 * anything here since thats probably ok we 1052 * will get called with some timer here shortly. 1053 */ 1054 return; 1055 } 1056 /* 1057 * Ok the timer originally started is not what we want now. We will 1058 * force the hpts to be stopped if any, and restart with the slot 1059 * set to what was in the saved slot. 1060 */ 1061 wrong_timer: 1062 if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) { 1063 if (inp->inp_in_hpts) 1064 tcp_hpts_remove(inp, HPTS_REMOVE_OUTPUT); 1065 bbr_timer_cancel(bbr, __LINE__, cts); 1066 bbr_start_hpts_timer(bbr, tp, cts, 1, bbr->r_ctl.rc_last_delay_val, 1067 0); 1068 } else { 1069 /* 1070 * Output is hptsi so we just need to switch the type of 1071 * timer. We don't bother with keep-alive, since when we 1072 * jump through the output, it will start the keep-alive if 1073 * nothing is sent. 1074 * 1075 * We only need a delayed-ack added and or the hpts_timeout. 1076 */ 1077 hpts_timeout = bbr_timer_start(tp, bbr, cts); 1078 if (tp->t_flags & TF_DELACK) { 1079 if (hpts_timeout == 0) { 1080 hpts_timeout = bbr_delack_time; 1081 bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK; 1082 } 1083 else if (hpts_timeout > bbr_delack_time) { 1084 hpts_timeout = bbr_delack_time; 1085 bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK; 1086 } 1087 } 1088 if (hpts_timeout) { 1089 if (hpts_timeout > 0x7ffffffe) 1090 hpts_timeout = 0x7ffffffe; 1091 bbr->r_ctl.rc_timer_exp = cts + hpts_timeout; 1092 } 1093 } 1094 } 1095 1096 int32_t bbr_clear_lost = 0; 1097 1098 /* 1099 * Considers the two time values now (cts) and earlier. 1100 * If cts is smaller than earlier, we could have 1101 * had a sequence wrap (our counter wraps every 1102 * 70 min or so) or it could be just clock skew 1103 * getting us two differnt time values. Clock skew 1104 * will show up within 10ms or so. So in such 1105 * a case (where cts is behind earlier time by 1106 * less than 10ms) we return 0. Otherwise we 1107 * return the true difference between them. 1108 */ 1109 static inline uint32_t 1110 bbr_calc_time(uint32_t cts, uint32_t earlier_time) { 1111 /* 1112 * Given two timestamps, the current time stamp cts, and some other 1113 * time-stamp taken in theory earlier return the difference. The 1114 * trick is here sometimes locking will get the other timestamp 1115 * after the cts. If this occurs we need to return 0. 1116 */ 1117 if (TSTMP_GEQ(cts, earlier_time)) 1118 return (cts - earlier_time); 1119 /* 1120 * cts is behind earlier_time if its less than 10ms consider it 0. 1121 * If its more than 10ms difference then we had a time wrap. Else 1122 * its just the normal locking foo. I wonder if we should not go to 1123 * 64bit TS and get rid of this issue. 1124 */ 1125 if (TSTMP_GEQ((cts + 10000), earlier_time)) 1126 return (0); 1127 /* 1128 * Ok the time must have wrapped. So we need to answer a large 1129 * amount of time, which the normal subtraction should do. 1130 */ 1131 return (cts - earlier_time); 1132 } 1133 1134 1135 1136 static int 1137 sysctl_bbr_clear_lost(SYSCTL_HANDLER_ARGS) 1138 { 1139 uint32_t stat; 1140 int32_t error; 1141 1142 error = SYSCTL_OUT(req, &bbr_clear_lost, sizeof(uint32_t)); 1143 if (error || req->newptr == NULL) 1144 return error; 1145 1146 error = SYSCTL_IN(req, &stat, sizeof(uint32_t)); 1147 if (error) 1148 return (error); 1149 if (stat == 1) { 1150 #ifdef BBR_INVARIANTS 1151 printf("Clearing BBR lost counters\n"); 1152 #endif 1153 COUNTER_ARRAY_ZERO(bbr_state_lost, BBR_MAX_STAT); 1154 COUNTER_ARRAY_ZERO(bbr_state_time, BBR_MAX_STAT); 1155 COUNTER_ARRAY_ZERO(bbr_state_resend, BBR_MAX_STAT); 1156 } else if (stat == 2) { 1157 #ifdef BBR_INVARIANTS 1158 printf("Clearing BBR option counters\n"); 1159 #endif 1160 COUNTER_ARRAY_ZERO(bbr_opts_arry, BBR_OPTS_SIZE); 1161 } else if (stat == 3) { 1162 #ifdef BBR_INVARIANTS 1163 printf("Clearing BBR stats counters\n"); 1164 #endif 1165 COUNTER_ARRAY_ZERO(bbr_stat_arry, BBR_STAT_SIZE); 1166 } else if (stat == 4) { 1167 #ifdef BBR_INVARIANTS 1168 printf("Clearing BBR out-size counters\n"); 1169 #endif 1170 COUNTER_ARRAY_ZERO(bbr_out_size, TCP_MSS_ACCT_SIZE); 1171 } 1172 bbr_clear_lost = 0; 1173 return (0); 1174 } 1175 1176 static void 1177 bbr_init_sysctls(void) 1178 { 1179 struct sysctl_oid *bbr_probertt; 1180 struct sysctl_oid *bbr_hptsi; 1181 struct sysctl_oid *bbr_measure; 1182 struct sysctl_oid *bbr_cwnd; 1183 struct sysctl_oid *bbr_timeout; 1184 struct sysctl_oid *bbr_states; 1185 struct sysctl_oid *bbr_startup; 1186 struct sysctl_oid *bbr_policer; 1187 1188 /* Probe rtt controls */ 1189 bbr_probertt = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1190 SYSCTL_CHILDREN(bbr_sysctl_root), 1191 OID_AUTO, 1192 "probertt", 1193 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1194 ""); 1195 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1196 SYSCTL_CHILDREN(bbr_probertt), 1197 OID_AUTO, "gain", CTLFLAG_RW, 1198 &bbr_rttprobe_gain, 192, 1199 "What is the filter gain drop in probe_rtt (0=disable)?"); 1200 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1201 SYSCTL_CHILDREN(bbr_probertt), 1202 OID_AUTO, "cwnd", CTLFLAG_RW, 1203 &bbr_rtt_probe_cwndtarg, 4, 1204 "How many mss's are outstanding during probe-rtt"); 1205 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1206 SYSCTL_CHILDREN(bbr_probertt), 1207 OID_AUTO, "int", CTLFLAG_RW, 1208 &bbr_rtt_probe_limit, 4000000, 1209 "If RTT has not shrank in this many micro-seconds enter probe-rtt"); 1210 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1211 SYSCTL_CHILDREN(bbr_probertt), 1212 OID_AUTO, "mintime", CTLFLAG_RW, 1213 &bbr_rtt_probe_time, 200000, 1214 "How many microseconds in probe-rtt"); 1215 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1216 SYSCTL_CHILDREN(bbr_probertt), 1217 OID_AUTO, "filter_len_sec", CTLFLAG_RW, 1218 &bbr_filter_len_sec, 6, 1219 "How long in seconds does the rttProp filter run?"); 1220 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1221 SYSCTL_CHILDREN(bbr_probertt), 1222 OID_AUTO, "drain_rtt", CTLFLAG_RW, 1223 &bbr_drain_rtt, BBR_SRTT, 1224 "What is the drain rtt to use in probeRTT (rtt_prop=0, rtt_rack=1, rtt_pkt=2, rtt_srtt=3?"); 1225 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1226 SYSCTL_CHILDREN(bbr_probertt), 1227 OID_AUTO, "can_force", CTLFLAG_RW, 1228 &bbr_can_force_probertt, 0, 1229 "If we keep setting new low rtt's but delay going in probe-rtt can we force in??"); 1230 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1231 SYSCTL_CHILDREN(bbr_probertt), 1232 OID_AUTO, "enter_sets_force", CTLFLAG_RW, 1233 &bbr_probertt_sets_rtt, 0, 1234 "In NF mode, do we imitate google_mode and set the rttProp on entry to probe-rtt?"); 1235 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1236 SYSCTL_CHILDREN(bbr_probertt), 1237 OID_AUTO, "can_adjust", CTLFLAG_RW, 1238 &bbr_can_adjust_probertt, 1, 1239 "Can we dynamically adjust the probe-rtt limits and times?"); 1240 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1241 SYSCTL_CHILDREN(bbr_probertt), 1242 OID_AUTO, "is_ratio", CTLFLAG_RW, 1243 &bbr_is_ratio, 0, 1244 "is the limit to filter a ratio?"); 1245 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1246 SYSCTL_CHILDREN(bbr_probertt), 1247 OID_AUTO, "use_cwnd", CTLFLAG_RW, 1248 &bbr_prtt_slam_cwnd, 0, 1249 "Should we set/recover cwnd?"); 1250 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1251 SYSCTL_CHILDREN(bbr_probertt), 1252 OID_AUTO, "can_use_ts", CTLFLAG_RW, 1253 &bbr_can_use_ts_for_rtt, 1, 1254 "Can we use the ms timestamp if available for retransmistted rtt calculations?"); 1255 1256 /* Pacing controls */ 1257 bbr_hptsi = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1258 SYSCTL_CHILDREN(bbr_sysctl_root), 1259 OID_AUTO, 1260 "pacing", 1261 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1262 ""); 1263 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1264 SYSCTL_CHILDREN(bbr_hptsi), 1265 OID_AUTO, "hw_pacing", CTLFLAG_RW, 1266 &bbr_allow_hdwr_pacing, 1, 1267 "Do we allow hardware pacing?"); 1268 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1269 SYSCTL_CHILDREN(bbr_hptsi), 1270 OID_AUTO, "hw_pacing_limit", CTLFLAG_RW, 1271 &bbr_hardware_pacing_limit, 4000, 1272 "Do we have a limited number of connections for pacing chelsio (0=no limit)?"); 1273 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1274 SYSCTL_CHILDREN(bbr_hptsi), 1275 OID_AUTO, "hw_pacing_adj", CTLFLAG_RW, 1276 &bbr_hdwr_pace_adjust, 2, 1277 "Multiplier to calculated tso size?"); 1278 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1279 SYSCTL_CHILDREN(bbr_hptsi), 1280 OID_AUTO, "hw_pacing_floor", CTLFLAG_RW, 1281 &bbr_hdwr_pace_floor, 1, 1282 "Do we invoke the hardware pacing floor?"); 1283 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1284 SYSCTL_CHILDREN(bbr_hptsi), 1285 OID_AUTO, "hw_pacing_delay_cnt", CTLFLAG_RW, 1286 &bbr_hdwr_pacing_delay_cnt, 10, 1287 "How many packets must be sent after hdwr pacing is enabled"); 1288 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1289 SYSCTL_CHILDREN(bbr_hptsi), 1290 OID_AUTO, "bw_cross", CTLFLAG_RW, 1291 &bbr_cross_over, 3000000, 1292 "What is the point where we cross over to linux like TSO size set"); 1293 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1294 SYSCTL_CHILDREN(bbr_hptsi), 1295 OID_AUTO, "seg_deltarg", CTLFLAG_RW, 1296 &bbr_hptsi_segments_delay_tar, 7000, 1297 "What is the worse case delay target for hptsi < 48Mbp connections"); 1298 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1299 SYSCTL_CHILDREN(bbr_hptsi), 1300 OID_AUTO, "enet_oh", CTLFLAG_RW, 1301 &bbr_include_enet_oh, 0, 1302 "Do we include the ethernet overhead in calculating pacing delay?"); 1303 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1304 SYSCTL_CHILDREN(bbr_hptsi), 1305 OID_AUTO, "ip_oh", CTLFLAG_RW, 1306 &bbr_include_ip_oh, 1, 1307 "Do we include the IP overhead in calculating pacing delay?"); 1308 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1309 SYSCTL_CHILDREN(bbr_hptsi), 1310 OID_AUTO, "tcp_oh", CTLFLAG_RW, 1311 &bbr_include_tcp_oh, 0, 1312 "Do we include the TCP overhead in calculating pacing delay?"); 1313 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1314 SYSCTL_CHILDREN(bbr_hptsi), 1315 OID_AUTO, "google_discount", CTLFLAG_RW, 1316 &bbr_google_discount, 10, 1317 "What is the default google discount percentage wise for pacing (11 = 1.1%%)?"); 1318 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1319 SYSCTL_CHILDREN(bbr_hptsi), 1320 OID_AUTO, "all_get_min", CTLFLAG_RW, 1321 &bbr_all_get_min, 0, 1322 "If you are less than a MSS do you just get the min?"); 1323 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1324 SYSCTL_CHILDREN(bbr_hptsi), 1325 OID_AUTO, "tso_min", CTLFLAG_RW, 1326 &bbr_hptsi_bytes_min, 1460, 1327 "For 0 -> 24Mbps what is floor number of segments for TSO"); 1328 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1329 SYSCTL_CHILDREN(bbr_hptsi), 1330 OID_AUTO, "seg_tso_max", CTLFLAG_RW, 1331 &bbr_hptsi_segments_max, 6, 1332 "For 0 -> 24Mbps what is top number of segments for TSO"); 1333 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1334 SYSCTL_CHILDREN(bbr_hptsi), 1335 OID_AUTO, "seg_floor", CTLFLAG_RW, 1336 &bbr_hptsi_segments_floor, 1, 1337 "Minimum TSO size we will fall too in segments"); 1338 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1339 SYSCTL_CHILDREN(bbr_hptsi), 1340 OID_AUTO, "utter_max", CTLFLAG_RW, 1341 &bbr_hptsi_utter_max, 0, 1342 "The absolute maximum that any pacing (outside of hardware) can be"); 1343 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1344 SYSCTL_CHILDREN(bbr_hptsi), 1345 OID_AUTO, "seg_divisor", CTLFLAG_RW, 1346 &bbr_hptsi_per_second, 100, 1347 "What is the divisor in our hptsi TSO calculation 512Mbps < X > 24Mbps "); 1348 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1349 SYSCTL_CHILDREN(bbr_hptsi), 1350 OID_AUTO, "srtt_mul", CTLFLAG_RW, 1351 &bbr_hptsi_max_mul, 1, 1352 "The multiplier for pace len max"); 1353 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1354 SYSCTL_CHILDREN(bbr_hptsi), 1355 OID_AUTO, "srtt_div", CTLFLAG_RW, 1356 &bbr_hptsi_max_div, 2, 1357 "The divisor for pace len max"); 1358 /* Measurement controls */ 1359 bbr_measure = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1360 SYSCTL_CHILDREN(bbr_sysctl_root), 1361 OID_AUTO, 1362 "measure", 1363 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1364 "Measurement controls"); 1365 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1366 SYSCTL_CHILDREN(bbr_measure), 1367 OID_AUTO, "min_i_bw", CTLFLAG_RW, 1368 &bbr_initial_bw_bps, 62500, 1369 "Minimum initial b/w in bytes per second"); 1370 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1371 SYSCTL_CHILDREN(bbr_measure), 1372 OID_AUTO, "no_sack_needed", CTLFLAG_RW, 1373 &bbr_sack_not_required, 0, 1374 "Do we allow bbr to run on connections not supporting SACK?"); 1375 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1376 SYSCTL_CHILDREN(bbr_measure), 1377 OID_AUTO, "use_google", CTLFLAG_RW, 1378 &bbr_use_google_algo, 0, 1379 "Use has close to google V1.0 has possible?"); 1380 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1381 SYSCTL_CHILDREN(bbr_measure), 1382 OID_AUTO, "ts_limiting", CTLFLAG_RW, 1383 &bbr_ts_limiting, 1, 1384 "Do we attempt to use the peers timestamp to limit b/w caculations?"); 1385 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1386 SYSCTL_CHILDREN(bbr_measure), 1387 OID_AUTO, "ts_can_raise", CTLFLAG_RW, 1388 &bbr_ts_can_raise, 0, 1389 "Can we raise the b/w via timestamp b/w calculation?"); 1390 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1391 SYSCTL_CHILDREN(bbr_measure), 1392 OID_AUTO, "ts_delta", CTLFLAG_RW, 1393 &bbr_min_usec_delta, 20000, 1394 "How long in usec between ts of our sends in ts validation code?"); 1395 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1396 SYSCTL_CHILDREN(bbr_measure), 1397 OID_AUTO, "ts_peer_delta", CTLFLAG_RW, 1398 &bbr_min_peer_delta, 20, 1399 "What min numerical value should be between the peer deltas?"); 1400 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1401 SYSCTL_CHILDREN(bbr_measure), 1402 OID_AUTO, "ts_delta_percent", CTLFLAG_RW, 1403 &bbr_delta_percent, 150, 1404 "What percentage (150 = 15.0) do we allow variance for?"); 1405 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1406 SYSCTL_CHILDREN(bbr_measure), 1407 OID_AUTO, "min_measure_good_bw", CTLFLAG_RW, 1408 &bbr_min_measurements_req, 1, 1409 "What is the minimum measurment count we need before we switch to our b/w estimate"); 1410 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1411 SYSCTL_CHILDREN(bbr_measure), 1412 OID_AUTO, "min_measure_before_pace", CTLFLAG_RW, 1413 &bbr_no_pacing_until, 4, 1414 "How many pkt-epoch's (0 is off) do we need before pacing is on?"); 1415 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1416 SYSCTL_CHILDREN(bbr_measure), 1417 OID_AUTO, "quanta", CTLFLAG_RW, 1418 &bbr_quanta, 2, 1419 "Extra quanta to add when calculating the target (ID section 4.2.3.2)."); 1420 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1421 SYSCTL_CHILDREN(bbr_measure), 1422 OID_AUTO, "noretran", CTLFLAG_RW, 1423 &bbr_no_retran, 0, 1424 "Should google mode not use retransmission measurements for the b/w estimation?"); 1425 /* State controls */ 1426 bbr_states = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1427 SYSCTL_CHILDREN(bbr_sysctl_root), 1428 OID_AUTO, 1429 "states", 1430 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1431 "State controls"); 1432 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1433 SYSCTL_CHILDREN(bbr_states), 1434 OID_AUTO, "idle_restart", CTLFLAG_RW, 1435 &bbr_uses_idle_restart, 0, 1436 "Do we use a new special idle_restart state to ramp back up quickly?"); 1437 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1438 SYSCTL_CHILDREN(bbr_states), 1439 OID_AUTO, "idle_restart_threshold", CTLFLAG_RW, 1440 &bbr_idle_restart_threshold, 100000, 1441 "How long must we be idle before we restart??"); 1442 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1443 SYSCTL_CHILDREN(bbr_states), 1444 OID_AUTO, "use_pkt_epoch", CTLFLAG_RW, 1445 &bbr_state_is_pkt_epoch, 0, 1446 "Do we use a pkt-epoch for substate if 0 rttProp?"); 1447 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1448 SYSCTL_CHILDREN(bbr_states), 1449 OID_AUTO, "startup_rtt_gain", CTLFLAG_RW, 1450 &bbr_rtt_gain_thresh, 0, 1451 "What increase in RTT triggers us to stop ignoring no-loss and possibly exit startup?"); 1452 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1453 SYSCTL_CHILDREN(bbr_states), 1454 OID_AUTO, "drain_floor", CTLFLAG_RW, 1455 &bbr_drain_floor, 88, 1456 "What is the lowest we can drain (pg) too?"); 1457 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1458 SYSCTL_CHILDREN(bbr_states), 1459 OID_AUTO, "drain_2_target", CTLFLAG_RW, 1460 &bbr_state_drain_2_tar, 1, 1461 "Do we drain to target in drain substate?"); 1462 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1463 SYSCTL_CHILDREN(bbr_states), 1464 OID_AUTO, "gain_2_target", CTLFLAG_RW, 1465 &bbr_gain_to_target, 1, 1466 "Does probe bw gain to target??"); 1467 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1468 SYSCTL_CHILDREN(bbr_states), 1469 OID_AUTO, "gain_extra_time", CTLFLAG_RW, 1470 &bbr_gain_gets_extra_too, 1, 1471 "Does probe bw gain get the extra time too?"); 1472 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1473 SYSCTL_CHILDREN(bbr_states), 1474 OID_AUTO, "ld_div", CTLFLAG_RW, 1475 &bbr_drain_drop_div, 5, 1476 "Long drain drop divider?"); 1477 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1478 SYSCTL_CHILDREN(bbr_states), 1479 OID_AUTO, "ld_mul", CTLFLAG_RW, 1480 &bbr_drain_drop_mul, 4, 1481 "Long drain drop multiplier?"); 1482 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1483 SYSCTL_CHILDREN(bbr_states), 1484 OID_AUTO, "rand_ot_disc", CTLFLAG_RW, 1485 &bbr_rand_ot, 50, 1486 "Random discount of the ot?"); 1487 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1488 SYSCTL_CHILDREN(bbr_states), 1489 OID_AUTO, "dr_filter_life", CTLFLAG_RW, 1490 &bbr_num_pktepo_for_del_limit, BBR_NUM_RTTS_FOR_DEL_LIMIT, 1491 "How many packet-epochs does the b/w delivery rate last?"); 1492 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1493 SYSCTL_CHILDREN(bbr_states), 1494 OID_AUTO, "subdrain_applimited", CTLFLAG_RW, 1495 &bbr_sub_drain_app_limit, 0, 1496 "Does our sub-state drain invoke app limited if its long?"); 1497 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1498 SYSCTL_CHILDREN(bbr_states), 1499 OID_AUTO, "use_cwnd_subdrain", CTLFLAG_RW, 1500 &bbr_sub_drain_slam_cwnd, 0, 1501 "Should we set/recover cwnd for sub-state drain?"); 1502 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1503 SYSCTL_CHILDREN(bbr_states), 1504 OID_AUTO, "use_cwnd_maindrain", CTLFLAG_RW, 1505 &bbr_slam_cwnd_in_main_drain, 0, 1506 "Should we set/recover cwnd for main-state drain?"); 1507 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1508 SYSCTL_CHILDREN(bbr_states), 1509 OID_AUTO, "google_gets_earlyout", CTLFLAG_RW, 1510 &google_allow_early_out, 1, 1511 "Should we allow google probe-bw/drain to exit early at flight target?"); 1512 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1513 SYSCTL_CHILDREN(bbr_states), 1514 OID_AUTO, "google_exit_loss", CTLFLAG_RW, 1515 &google_consider_lost, 1, 1516 "Should we have losses exit gain of probebw in google mode??"); 1517 /* Startup controls */ 1518 bbr_startup = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1519 SYSCTL_CHILDREN(bbr_sysctl_root), 1520 OID_AUTO, 1521 "startup", 1522 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1523 "Startup controls"); 1524 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1525 SYSCTL_CHILDREN(bbr_startup), 1526 OID_AUTO, "cheat_iwnd", CTLFLAG_RW, 1527 &bbr_sends_full_iwnd, 1, 1528 "Do we not pace but burst out initial windows has our TSO size?"); 1529 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1530 SYSCTL_CHILDREN(bbr_startup), 1531 OID_AUTO, "loss_threshold", CTLFLAG_RW, 1532 &bbr_startup_loss_thresh, 2000, 1533 "In startup what is the loss threshold in a pe that will exit us from startup?"); 1534 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1535 SYSCTL_CHILDREN(bbr_startup), 1536 OID_AUTO, "use_lowerpg", CTLFLAG_RW, 1537 &bbr_use_lower_gain_in_startup, 1, 1538 "Should we use a lower hptsi gain if we see loss in startup?"); 1539 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1540 SYSCTL_CHILDREN(bbr_startup), 1541 OID_AUTO, "gain", CTLFLAG_RW, 1542 &bbr_start_exit, 25, 1543 "What gain percent do we need to see to stay in startup??"); 1544 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1545 SYSCTL_CHILDREN(bbr_startup), 1546 OID_AUTO, "low_gain", CTLFLAG_RW, 1547 &bbr_low_start_exit, 15, 1548 "What gain percent do we need to see to stay in the lower gain startup??"); 1549 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1550 SYSCTL_CHILDREN(bbr_startup), 1551 OID_AUTO, "loss_exit", CTLFLAG_RW, 1552 &bbr_exit_startup_at_loss, 1, 1553 "Should we exit startup at loss in an epoch if we are not gaining?"); 1554 /* CWND controls */ 1555 bbr_cwnd = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1556 SYSCTL_CHILDREN(bbr_sysctl_root), 1557 OID_AUTO, 1558 "cwnd", 1559 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1560 "Cwnd controls"); 1561 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1562 SYSCTL_CHILDREN(bbr_cwnd), 1563 OID_AUTO, "tar_rtt", CTLFLAG_RW, 1564 &bbr_cwndtarget_rtt_touse, 0, 1565 "Target cwnd rtt measurment to use (0=rtt_prop, 1=rtt_rack, 2=pkt_rtt, 3=srtt)?"); 1566 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1567 SYSCTL_CHILDREN(bbr_cwnd), 1568 OID_AUTO, "may_shrink", CTLFLAG_RW, 1569 &bbr_cwnd_may_shrink, 0, 1570 "Can the cwnd shrink if it would grow to more than the target?"); 1571 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1572 SYSCTL_CHILDREN(bbr_cwnd), 1573 OID_AUTO, "max_target_limit", CTLFLAG_RW, 1574 &bbr_target_cwnd_mult_limit, 8, 1575 "Do we limit the cwnd to some multiple of the cwnd target if cwnd can't shrink 0=no?"); 1576 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1577 SYSCTL_CHILDREN(bbr_cwnd), 1578 OID_AUTO, "highspeed_min", CTLFLAG_RW, 1579 &bbr_cwnd_min_val_hs, BBR_HIGHSPEED_NUM_MSS, 1580 "What is the high-speed min cwnd (rttProp under 1ms)"); 1581 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1582 SYSCTL_CHILDREN(bbr_cwnd), 1583 OID_AUTO, "lowspeed_min", CTLFLAG_RW, 1584 &bbr_cwnd_min_val, BBR_PROBERTT_NUM_MSS, 1585 "What is the min cwnd (rttProp > 1ms)"); 1586 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1587 SYSCTL_CHILDREN(bbr_cwnd), 1588 OID_AUTO, "initwin", CTLFLAG_RW, 1589 &bbr_def_init_win, 10, 1590 "What is the BBR initial window, if 0 use tcp version"); 1591 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1592 SYSCTL_CHILDREN(bbr_cwnd), 1593 OID_AUTO, "do_loss_red", CTLFLAG_RW, 1594 &bbr_do_red, 600, 1595 "Do we reduce the b/w at exit from recovery based on ratio of prop/srtt (800=80.0, 0=off)?"); 1596 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1597 SYSCTL_CHILDREN(bbr_cwnd), 1598 OID_AUTO, "red_scale", CTLFLAG_RW, 1599 &bbr_red_scale, 20000, 1600 "What RTT do we scale with?"); 1601 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1602 SYSCTL_CHILDREN(bbr_cwnd), 1603 OID_AUTO, "red_growslow", CTLFLAG_RW, 1604 &bbr_red_growth_restrict, 1, 1605 "Do we restrict cwnd growth for whats in flight?"); 1606 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1607 SYSCTL_CHILDREN(bbr_cwnd), 1608 OID_AUTO, "red_div", CTLFLAG_RW, 1609 &bbr_red_div, 2, 1610 "If we reduce whats the divisor?"); 1611 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1612 SYSCTL_CHILDREN(bbr_cwnd), 1613 OID_AUTO, "red_mul", CTLFLAG_RW, 1614 &bbr_red_mul, 1, 1615 "If we reduce whats the mulitiplier?"); 1616 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1617 SYSCTL_CHILDREN(bbr_cwnd), 1618 OID_AUTO, "target_is_unit", CTLFLAG_RW, 1619 &bbr_target_is_bbunit, 0, 1620 "Is the state target the pacing_gain or BBR_UNIT?"); 1621 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1622 SYSCTL_CHILDREN(bbr_cwnd), 1623 OID_AUTO, "drop_limit", CTLFLAG_RW, 1624 &bbr_drop_limit, 0, 1625 "Number of segments limit for drop (0=use min_cwnd w/flight)?"); 1626 1627 /* Timeout controls */ 1628 bbr_timeout = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1629 SYSCTL_CHILDREN(bbr_sysctl_root), 1630 OID_AUTO, 1631 "timeout", 1632 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1633 "Time out controls"); 1634 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1635 SYSCTL_CHILDREN(bbr_timeout), 1636 OID_AUTO, "delack", CTLFLAG_RW, 1637 &bbr_delack_time, 100000, 1638 "BBR's delayed ack time"); 1639 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1640 SYSCTL_CHILDREN(bbr_timeout), 1641 OID_AUTO, "tlp_uses", CTLFLAG_RW, 1642 &bbr_tlp_type_to_use, 3, 1643 "RTT that TLP uses in its calculations, 0=rttProp, 1=Rack_rtt, 2=pkt_rtt and 3=srtt"); 1644 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1645 SYSCTL_CHILDREN(bbr_timeout), 1646 OID_AUTO, "persmin", CTLFLAG_RW, 1647 &bbr_persist_min, 250000, 1648 "What is the minimum time in microseconds between persists"); 1649 SYSCTL_ADD_U32(&bbr_sysctl_ctx, 1650 SYSCTL_CHILDREN(bbr_timeout), 1651 OID_AUTO, "persmax", CTLFLAG_RW, 1652 &bbr_persist_max, 1000000, 1653 "What is the largest delay in microseconds between persists"); 1654 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1655 SYSCTL_CHILDREN(bbr_timeout), 1656 OID_AUTO, "tlp_minto", CTLFLAG_RW, 1657 &bbr_tlp_min, 10000, 1658 "TLP Min timeout in usecs"); 1659 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1660 SYSCTL_CHILDREN(bbr_timeout), 1661 OID_AUTO, "tlp_dack_time", CTLFLAG_RW, 1662 &bbr_delayed_ack_time, 200000, 1663 "TLP delayed ack compensation value"); 1664 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1665 SYSCTL_CHILDREN(bbr_sysctl_root), 1666 OID_AUTO, "minrto", CTLFLAG_RW, 1667 &bbr_rto_min_ms, 30, 1668 "Minimum RTO in ms"); 1669 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1670 SYSCTL_CHILDREN(bbr_timeout), 1671 OID_AUTO, "maxrto", CTLFLAG_RW, 1672 &bbr_rto_max_sec, 4, 1673 "Maxiumum RTO in seconds -- should be at least as large as min_rto"); 1674 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1675 SYSCTL_CHILDREN(bbr_timeout), 1676 OID_AUTO, "tlp_retry", CTLFLAG_RW, 1677 &bbr_tlp_max_resend, 2, 1678 "How many times does TLP retry a single segment or multiple with no ACK"); 1679 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1680 SYSCTL_CHILDREN(bbr_timeout), 1681 OID_AUTO, "minto", CTLFLAG_RW, 1682 &bbr_min_to, 1000, 1683 "Minimum rack timeout in useconds"); 1684 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1685 SYSCTL_CHILDREN(bbr_timeout), 1686 OID_AUTO, "pktdelay", CTLFLAG_RW, 1687 &bbr_pkt_delay, 1000, 1688 "Extra RACK time (in useconds) besides reordering thresh"); 1689 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1690 SYSCTL_CHILDREN(bbr_timeout), 1691 OID_AUTO, "incr_tmrs", CTLFLAG_RW, 1692 &bbr_incr_timers, 1, 1693 "Increase the RXT/TLP timer by the pacing time used?"); 1694 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1695 SYSCTL_CHILDREN(bbr_timeout), 1696 OID_AUTO, "rxtmark_sackpassed", CTLFLAG_RW, 1697 &bbr_marks_rxt_sack_passed, 0, 1698 "Mark sack passed on all those not ack'd when a RXT hits?"); 1699 /* Policer controls */ 1700 bbr_policer = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 1701 SYSCTL_CHILDREN(bbr_sysctl_root), 1702 OID_AUTO, 1703 "policer", 1704 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1705 "Policer controls"); 1706 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1707 SYSCTL_CHILDREN(bbr_policer), 1708 OID_AUTO, "detect_enable", CTLFLAG_RW, 1709 &bbr_policer_detection_enabled, 1, 1710 "Is policer detection enabled??"); 1711 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1712 SYSCTL_CHILDREN(bbr_policer), 1713 OID_AUTO, "min_pes", CTLFLAG_RW, 1714 &bbr_lt_intvl_min_rtts, 4, 1715 "Minimum number of PE's?"); 1716 SYSCTL_ADD_U64(&bbr_sysctl_ctx, 1717 SYSCTL_CHILDREN(bbr_policer), 1718 OID_AUTO, "bwdiff", CTLFLAG_RW, 1719 &bbr_lt_bw_diff, (4000/8), 1720 "Minimal bw diff?"); 1721 SYSCTL_ADD_U64(&bbr_sysctl_ctx, 1722 SYSCTL_CHILDREN(bbr_policer), 1723 OID_AUTO, "bwratio", CTLFLAG_RW, 1724 &bbr_lt_bw_ratio, 8, 1725 "Minimal bw diff?"); 1726 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1727 SYSCTL_CHILDREN(bbr_policer), 1728 OID_AUTO, "from_rack_rxt", CTLFLAG_RW, 1729 &bbr_policer_call_from_rack_to, 0, 1730 "Do we call the policer detection code from a rack-timeout?"); 1731 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1732 SYSCTL_CHILDREN(bbr_policer), 1733 OID_AUTO, "false_postive", CTLFLAG_RW, 1734 &bbr_lt_intvl_fp, 0, 1735 "What packet epoch do we do false-postive detection at (0=no)?"); 1736 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1737 SYSCTL_CHILDREN(bbr_policer), 1738 OID_AUTO, "loss_thresh", CTLFLAG_RW, 1739 &bbr_lt_loss_thresh, 196, 1740 "Loss threshold 196 = 19.6%?"); 1741 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1742 SYSCTL_CHILDREN(bbr_policer), 1743 OID_AUTO, "false_postive_thresh", CTLFLAG_RW, 1744 &bbr_lt_fd_thresh, 100, 1745 "What percentage is the false detection threshold (150=15.0)?"); 1746 /* All the rest */ 1747 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1748 SYSCTL_CHILDREN(bbr_sysctl_root), 1749 OID_AUTO, "cheat_rxt", CTLFLAG_RW, 1750 &bbr_use_rack_resend_cheat, 0, 1751 "Do we burst 1ms between sends on retransmissions (like rack)?"); 1752 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1753 SYSCTL_CHILDREN(bbr_sysctl_root), 1754 OID_AUTO, "error_paceout", CTLFLAG_RW, 1755 &bbr_error_base_paceout, 10000, 1756 "When we hit an error what is the min to pace out in usec's?"); 1757 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1758 SYSCTL_CHILDREN(bbr_sysctl_root), 1759 OID_AUTO, "kill_paceout", CTLFLAG_RW, 1760 &bbr_max_net_error_cnt, 10, 1761 "When we hit this many errors in a row, kill the session?"); 1762 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1763 SYSCTL_CHILDREN(bbr_sysctl_root), 1764 OID_AUTO, "data_after_close", CTLFLAG_RW, 1765 &bbr_ignore_data_after_close, 1, 1766 "Do we hold off sending a RST until all pending data is ack'd"); 1767 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1768 SYSCTL_CHILDREN(bbr_sysctl_root), 1769 OID_AUTO, "resend_use_tso", CTLFLAG_RW, 1770 &bbr_resends_use_tso, 0, 1771 "Can resends use TSO?"); 1772 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1773 SYSCTL_CHILDREN(bbr_sysctl_root), 1774 OID_AUTO, "sblklimit", CTLFLAG_RW, 1775 &bbr_sack_block_limit, 128, 1776 "When do we start ignoring small sack blocks"); 1777 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1778 SYSCTL_CHILDREN(bbr_sysctl_root), 1779 OID_AUTO, "bb_verbose", CTLFLAG_RW, 1780 &bbr_verbose_logging, 0, 1781 "Should BBR black box logging be verbose"); 1782 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1783 SYSCTL_CHILDREN(bbr_sysctl_root), 1784 OID_AUTO, "reorder_thresh", CTLFLAG_RW, 1785 &bbr_reorder_thresh, 2, 1786 "What factor for rack will be added when seeing reordering (shift right)"); 1787 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1788 SYSCTL_CHILDREN(bbr_sysctl_root), 1789 OID_AUTO, "reorder_fade", CTLFLAG_RW, 1790 &bbr_reorder_fade, 0, 1791 "Does reorder detection fade, if so how many ms (0 means never)"); 1792 SYSCTL_ADD_S32(&bbr_sysctl_ctx, 1793 SYSCTL_CHILDREN(bbr_sysctl_root), 1794 OID_AUTO, "rtt_tlp_thresh", CTLFLAG_RW, 1795 &bbr_tlp_thresh, 1, 1796 "what divisor for TLP rtt/retran will be added (1=rtt, 2=1/2 rtt etc)"); 1797 /* Stats and counters */ 1798 /* The pacing counters for hdwr/software can't be in the array */ 1799 bbr_nohdwr_pacing_enobuf = counter_u64_alloc(M_WAITOK); 1800 bbr_hdwr_pacing_enobuf = counter_u64_alloc(M_WAITOK); 1801 SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx, 1802 SYSCTL_CHILDREN(bbr_sysctl_root), 1803 OID_AUTO, "enob_hdwr_pacing", CTLFLAG_RD, 1804 &bbr_hdwr_pacing_enobuf, 1805 "Total number of enobufs for hardware paced flows"); 1806 SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx, 1807 SYSCTL_CHILDREN(bbr_sysctl_root), 1808 OID_AUTO, "enob_no_hdwr_pacing", CTLFLAG_RD, 1809 &bbr_nohdwr_pacing_enobuf, 1810 "Total number of enobufs for non-hardware paced flows"); 1811 1812 1813 bbr_flows_whdwr_pacing = counter_u64_alloc(M_WAITOK); 1814 SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx, 1815 SYSCTL_CHILDREN(bbr_sysctl_root), 1816 OID_AUTO, "hdwr_pacing", CTLFLAG_RD, 1817 &bbr_flows_whdwr_pacing, 1818 "Total number of hardware paced flows"); 1819 bbr_flows_nohdwr_pacing = counter_u64_alloc(M_WAITOK); 1820 SYSCTL_ADD_COUNTER_U64(&bbr_sysctl_ctx, 1821 SYSCTL_CHILDREN(bbr_sysctl_root), 1822 OID_AUTO, "software_pacing", CTLFLAG_RD, 1823 &bbr_flows_nohdwr_pacing, 1824 "Total number of software paced flows"); 1825 COUNTER_ARRAY_ALLOC(bbr_stat_arry, BBR_STAT_SIZE, M_WAITOK); 1826 SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), 1827 OID_AUTO, "stats", CTLFLAG_RD, 1828 bbr_stat_arry, BBR_STAT_SIZE, "BBR Stats"); 1829 COUNTER_ARRAY_ALLOC(bbr_opts_arry, BBR_OPTS_SIZE, M_WAITOK); 1830 SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), 1831 OID_AUTO, "opts", CTLFLAG_RD, 1832 bbr_opts_arry, BBR_OPTS_SIZE, "BBR Option Stats"); 1833 COUNTER_ARRAY_ALLOC(bbr_state_lost, BBR_MAX_STAT, M_WAITOK); 1834 SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), 1835 OID_AUTO, "lost", CTLFLAG_RD, 1836 bbr_state_lost, BBR_MAX_STAT, "Stats of when losses occur"); 1837 COUNTER_ARRAY_ALLOC(bbr_state_resend, BBR_MAX_STAT, M_WAITOK); 1838 SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), 1839 OID_AUTO, "stateresend", CTLFLAG_RD, 1840 bbr_state_resend, BBR_MAX_STAT, "Stats of what states resend"); 1841 COUNTER_ARRAY_ALLOC(bbr_state_time, BBR_MAX_STAT, M_WAITOK); 1842 SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), 1843 OID_AUTO, "statetime", CTLFLAG_RD, 1844 bbr_state_time, BBR_MAX_STAT, "Stats of time spent in the states"); 1845 COUNTER_ARRAY_ALLOC(bbr_out_size, TCP_MSS_ACCT_SIZE, M_WAITOK); 1846 SYSCTL_ADD_COUNTER_U64_ARRAY(&bbr_sysctl_ctx, SYSCTL_CHILDREN(bbr_sysctl_root), 1847 OID_AUTO, "outsize", CTLFLAG_RD, 1848 bbr_out_size, TCP_MSS_ACCT_SIZE, "Size of output calls"); 1849 SYSCTL_ADD_PROC(&bbr_sysctl_ctx, 1850 SYSCTL_CHILDREN(bbr_sysctl_root), 1851 OID_AUTO, "clrlost", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1852 &bbr_clear_lost, 0, sysctl_bbr_clear_lost, "IU", "Clear lost counters"); 1853 } 1854 1855 static void 1856 bbr_counter_destroy(void) 1857 { 1858 COUNTER_ARRAY_FREE(bbr_stat_arry, BBR_STAT_SIZE); 1859 COUNTER_ARRAY_FREE(bbr_opts_arry, BBR_OPTS_SIZE); 1860 COUNTER_ARRAY_FREE(bbr_out_size, TCP_MSS_ACCT_SIZE); 1861 COUNTER_ARRAY_FREE(bbr_state_lost, BBR_MAX_STAT); 1862 COUNTER_ARRAY_FREE(bbr_state_time, BBR_MAX_STAT); 1863 COUNTER_ARRAY_FREE(bbr_state_resend, BBR_MAX_STAT); 1864 counter_u64_free(bbr_nohdwr_pacing_enobuf); 1865 counter_u64_free(bbr_hdwr_pacing_enobuf); 1866 counter_u64_free(bbr_flows_whdwr_pacing); 1867 counter_u64_free(bbr_flows_nohdwr_pacing); 1868 1869 } 1870 1871 static __inline void 1872 bbr_fill_in_logging_data(struct tcp_bbr *bbr, struct tcp_log_bbr *l, uint32_t cts) 1873 { 1874 memset(l, 0, sizeof(union tcp_log_stackspecific)); 1875 l->cur_del_rate = bbr->r_ctl.rc_bbr_cur_del_rate; 1876 l->delRate = get_filter_value(&bbr->r_ctl.rc_delrate); 1877 l->rttProp = get_filter_value_small(&bbr->r_ctl.rc_rttprop); 1878 l->bw_inuse = bbr_get_bw(bbr); 1879 l->inflight = ctf_flight_size(bbr->rc_tp, 1880 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 1881 l->applimited = bbr->r_ctl.r_app_limited_until; 1882 l->delivered = bbr->r_ctl.rc_delivered; 1883 l->timeStamp = cts; 1884 l->lost = bbr->r_ctl.rc_lost; 1885 l->bbr_state = bbr->rc_bbr_state; 1886 l->bbr_substate = bbr_state_val(bbr); 1887 l->epoch = bbr->r_ctl.rc_rtt_epoch; 1888 l->lt_epoch = bbr->r_ctl.rc_lt_epoch; 1889 l->pacing_gain = bbr->r_ctl.rc_bbr_hptsi_gain; 1890 l->cwnd_gain = bbr->r_ctl.rc_bbr_cwnd_gain; 1891 l->inhpts = bbr->rc_inp->inp_in_hpts; 1892 l->ininput = bbr->rc_inp->inp_in_input; 1893 l->use_lt_bw = bbr->rc_lt_use_bw; 1894 l->pkts_out = bbr->r_ctl.rc_flight_at_input; 1895 l->pkt_epoch = bbr->r_ctl.rc_pkt_epoch; 1896 } 1897 1898 static void 1899 bbr_log_type_bw_reduce(struct tcp_bbr *bbr, int reason) 1900 { 1901 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1902 union tcp_log_stackspecific log; 1903 1904 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 1905 log.u_bbr.flex1 = 0; 1906 log.u_bbr.flex2 = 0; 1907 log.u_bbr.flex5 = 0; 1908 log.u_bbr.flex3 = 0; 1909 log.u_bbr.flex4 = bbr->r_ctl.rc_pkt_epoch_loss_rate; 1910 log.u_bbr.flex7 = reason; 1911 log.u_bbr.flex6 = bbr->r_ctl.rc_bbr_enters_probertt; 1912 log.u_bbr.flex8 = 0; 1913 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 1914 &bbr->rc_inp->inp_socket->so_rcv, 1915 &bbr->rc_inp->inp_socket->so_snd, 1916 BBR_LOG_BW_RED_EV, 0, 1917 0, &log, false, &bbr->rc_tv); 1918 } 1919 } 1920 1921 static void 1922 bbr_log_type_rwnd_collapse(struct tcp_bbr *bbr, int seq, int mode, uint32_t count) 1923 { 1924 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1925 union tcp_log_stackspecific log; 1926 1927 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 1928 log.u_bbr.flex1 = seq; 1929 log.u_bbr.flex2 = count; 1930 log.u_bbr.flex8 = mode; 1931 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 1932 &bbr->rc_inp->inp_socket->so_rcv, 1933 &bbr->rc_inp->inp_socket->so_snd, 1934 BBR_LOG_LOWGAIN, 0, 1935 0, &log, false, &bbr->rc_tv); 1936 } 1937 } 1938 1939 1940 1941 static void 1942 bbr_log_type_just_return(struct tcp_bbr *bbr, uint32_t cts, uint32_t tlen, uint8_t hpts_calling, 1943 uint8_t reason, uint32_t p_maxseg, int len) 1944 { 1945 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1946 union tcp_log_stackspecific log; 1947 1948 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 1949 log.u_bbr.flex1 = p_maxseg; 1950 log.u_bbr.flex2 = bbr->r_ctl.rc_hpts_flags; 1951 log.u_bbr.flex3 = bbr->r_ctl.rc_timer_exp; 1952 log.u_bbr.flex4 = reason; 1953 log.u_bbr.flex5 = bbr->rc_in_persist; 1954 log.u_bbr.flex6 = bbr->r_ctl.rc_last_delay_val; 1955 log.u_bbr.flex7 = p_maxseg; 1956 log.u_bbr.flex8 = bbr->rc_in_persist; 1957 log.u_bbr.pkts_out = 0; 1958 log.u_bbr.applimited = len; 1959 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 1960 &bbr->rc_inp->inp_socket->so_rcv, 1961 &bbr->rc_inp->inp_socket->so_snd, 1962 BBR_LOG_JUSTRET, 0, 1963 tlen, &log, false, &bbr->rc_tv); 1964 } 1965 } 1966 1967 1968 static void 1969 bbr_log_type_enter_rec(struct tcp_bbr *bbr, uint32_t seq) 1970 { 1971 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 1972 union tcp_log_stackspecific log; 1973 1974 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 1975 log.u_bbr.flex1 = seq; 1976 log.u_bbr.flex2 = bbr->r_ctl.rc_cwnd_on_ent; 1977 log.u_bbr.flex3 = bbr->r_ctl.rc_recovery_start; 1978 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 1979 &bbr->rc_inp->inp_socket->so_rcv, 1980 &bbr->rc_inp->inp_socket->so_snd, 1981 BBR_LOG_ENTREC, 0, 1982 0, &log, false, &bbr->rc_tv); 1983 } 1984 } 1985 1986 static void 1987 bbr_log_msgsize_fail(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t len, uint32_t maxseg, uint32_t mtu, int32_t csum_flags, int32_t tso, uint32_t cts) 1988 { 1989 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 1990 union tcp_log_stackspecific log; 1991 1992 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 1993 log.u_bbr.flex1 = tso; 1994 log.u_bbr.flex2 = maxseg; 1995 log.u_bbr.flex3 = mtu; 1996 log.u_bbr.flex4 = csum_flags; 1997 TCP_LOG_EVENTP(tp, NULL, 1998 &bbr->rc_inp->inp_socket->so_rcv, 1999 &bbr->rc_inp->inp_socket->so_snd, 2000 BBR_LOG_MSGSIZE, 0, 2001 0, &log, false, &bbr->rc_tv); 2002 } 2003 } 2004 2005 static void 2006 bbr_log_flowend(struct tcp_bbr *bbr) 2007 { 2008 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2009 union tcp_log_stackspecific log; 2010 struct sockbuf *r, *s; 2011 struct timeval tv; 2012 2013 if (bbr->rc_inp->inp_socket) { 2014 r = &bbr->rc_inp->inp_socket->so_rcv; 2015 s = &bbr->rc_inp->inp_socket->so_snd; 2016 } else { 2017 r = s = NULL; 2018 } 2019 bbr_fill_in_logging_data(bbr, &log.u_bbr, tcp_get_usecs(&tv)); 2020 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2021 r, s, 2022 TCP_LOG_FLOWEND, 0, 2023 0, &log, false, &tv); 2024 } 2025 } 2026 2027 static void 2028 bbr_log_pkt_epoch(struct tcp_bbr *bbr, uint32_t cts, uint32_t line, 2029 uint32_t lost, uint32_t del) 2030 { 2031 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2032 union tcp_log_stackspecific log; 2033 2034 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2035 log.u_bbr.flex1 = lost; 2036 log.u_bbr.flex2 = del; 2037 log.u_bbr.flex3 = bbr->r_ctl.rc_bbr_lastbtlbw; 2038 log.u_bbr.flex4 = bbr->r_ctl.rc_pkt_epoch_rtt; 2039 log.u_bbr.flex5 = bbr->r_ctl.rc_bbr_last_startup_epoch; 2040 log.u_bbr.flex6 = bbr->r_ctl.rc_lost_at_startup; 2041 log.u_bbr.flex7 = line; 2042 log.u_bbr.flex8 = 0; 2043 log.u_bbr.inflight = bbr->r_ctl.r_measurement_count; 2044 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2045 &bbr->rc_inp->inp_socket->so_rcv, 2046 &bbr->rc_inp->inp_socket->so_snd, 2047 BBR_LOG_PKT_EPOCH, 0, 2048 0, &log, false, &bbr->rc_tv); 2049 } 2050 } 2051 2052 static void 2053 bbr_log_time_epoch(struct tcp_bbr *bbr, uint32_t cts, uint32_t line, uint32_t epoch_time) 2054 { 2055 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2056 union tcp_log_stackspecific log; 2057 2058 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2059 log.u_bbr.flex1 = bbr->r_ctl.rc_lost; 2060 log.u_bbr.flex2 = bbr->rc_inp->inp_socket->so_snd.sb_lowat; 2061 log.u_bbr.flex3 = bbr->rc_inp->inp_socket->so_snd.sb_hiwat; 2062 log.u_bbr.flex7 = line; 2063 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2064 &bbr->rc_inp->inp_socket->so_rcv, 2065 &bbr->rc_inp->inp_socket->so_snd, 2066 BBR_LOG_TIME_EPOCH, 0, 2067 0, &log, false, &bbr->rc_tv); 2068 } 2069 } 2070 2071 static void 2072 bbr_log_set_of_state_target(struct tcp_bbr *bbr, uint32_t new_tar, int line, int meth) 2073 { 2074 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2075 union tcp_log_stackspecific log; 2076 2077 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2078 log.u_bbr.flex1 = bbr->r_ctl.rc_target_at_state; 2079 log.u_bbr.flex2 = new_tar; 2080 log.u_bbr.flex3 = line; 2081 log.u_bbr.flex4 = bbr->r_ctl.rc_pace_max_segs; 2082 log.u_bbr.flex5 = bbr_quanta; 2083 log.u_bbr.flex6 = bbr->r_ctl.rc_pace_min_segs; 2084 log.u_bbr.flex7 = bbr->rc_last_options; 2085 log.u_bbr.flex8 = meth; 2086 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2087 &bbr->rc_inp->inp_socket->so_rcv, 2088 &bbr->rc_inp->inp_socket->so_snd, 2089 BBR_LOG_STATE_TARGET, 0, 2090 0, &log, false, &bbr->rc_tv); 2091 } 2092 2093 } 2094 2095 static void 2096 bbr_log_type_statechange(struct tcp_bbr *bbr, uint32_t cts, int32_t line) 2097 { 2098 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2099 union tcp_log_stackspecific log; 2100 2101 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2102 log.u_bbr.flex1 = line; 2103 log.u_bbr.flex2 = bbr->r_ctl.rc_rtt_shrinks; 2104 log.u_bbr.flex3 = bbr->r_ctl.rc_probertt_int; 2105 if (bbr_state_is_pkt_epoch) 2106 log.u_bbr.flex4 = bbr_get_rtt(bbr, BBR_RTT_PKTRTT); 2107 else 2108 log.u_bbr.flex4 = bbr_get_rtt(bbr, BBR_RTT_PROP); 2109 log.u_bbr.flex5 = bbr->r_ctl.rc_bbr_last_startup_epoch; 2110 log.u_bbr.flex6 = bbr->r_ctl.rc_lost_at_startup; 2111 log.u_bbr.flex7 = (bbr->r_ctl.rc_target_at_state/1000); 2112 log.u_bbr.lt_epoch = bbr->r_ctl.rc_level_state_extra; 2113 log.u_bbr.pkts_out = bbr->r_ctl.rc_target_at_state; 2114 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2115 &bbr->rc_inp->inp_socket->so_rcv, 2116 &bbr->rc_inp->inp_socket->so_snd, 2117 BBR_LOG_STATE, 0, 2118 0, &log, false, &bbr->rc_tv); 2119 } 2120 } 2121 2122 static void 2123 bbr_log_rtt_shrinks(struct tcp_bbr *bbr, uint32_t cts, uint32_t applied, 2124 uint32_t rtt, uint32_t line, uint8_t reas, uint16_t cond) 2125 { 2126 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2127 union tcp_log_stackspecific log; 2128 2129 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2130 log.u_bbr.flex1 = line; 2131 log.u_bbr.flex2 = bbr->r_ctl.rc_rtt_shrinks; 2132 log.u_bbr.flex3 = bbr->r_ctl.last_in_probertt; 2133 log.u_bbr.flex4 = applied; 2134 log.u_bbr.flex5 = rtt; 2135 log.u_bbr.flex6 = bbr->r_ctl.rc_target_at_state; 2136 log.u_bbr.flex7 = cond; 2137 log.u_bbr.flex8 = reas; 2138 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2139 &bbr->rc_inp->inp_socket->so_rcv, 2140 &bbr->rc_inp->inp_socket->so_snd, 2141 BBR_LOG_RTT_SHRINKS, 0, 2142 0, &log, false, &bbr->rc_tv); 2143 } 2144 } 2145 2146 static void 2147 bbr_log_type_exit_rec(struct tcp_bbr *bbr) 2148 { 2149 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2150 union tcp_log_stackspecific log; 2151 2152 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2153 log.u_bbr.flex1 = bbr->r_ctl.rc_recovery_start; 2154 log.u_bbr.flex2 = bbr->r_ctl.rc_cwnd_on_ent; 2155 log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state; 2156 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2157 &bbr->rc_inp->inp_socket->so_rcv, 2158 &bbr->rc_inp->inp_socket->so_snd, 2159 BBR_LOG_EXITREC, 0, 2160 0, &log, false, &bbr->rc_tv); 2161 } 2162 } 2163 2164 static void 2165 bbr_log_type_cwndupd(struct tcp_bbr *bbr, uint32_t bytes_this_ack, uint32_t chg, 2166 uint32_t prev_acked, int32_t meth, uint32_t target, uint32_t th_ack, int32_t line) 2167 { 2168 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2169 union tcp_log_stackspecific log; 2170 2171 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2172 log.u_bbr.flex1 = line; 2173 log.u_bbr.flex2 = prev_acked; 2174 log.u_bbr.flex3 = bytes_this_ack; 2175 log.u_bbr.flex4 = chg; 2176 log.u_bbr.flex5 = th_ack; 2177 log.u_bbr.flex6 = target; 2178 log.u_bbr.flex8 = meth; 2179 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2180 &bbr->rc_inp->inp_socket->so_rcv, 2181 &bbr->rc_inp->inp_socket->so_snd, 2182 BBR_LOG_CWND, 0, 2183 0, &log, false, &bbr->rc_tv); 2184 } 2185 } 2186 2187 static void 2188 bbr_log_rtt_sample(struct tcp_bbr *bbr, uint32_t rtt, uint32_t tsin) 2189 { 2190 /* 2191 * Log the rtt sample we are applying to the srtt algorithm in 2192 * useconds. 2193 */ 2194 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2195 union tcp_log_stackspecific log; 2196 2197 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2198 log.u_bbr.flex1 = rtt; 2199 log.u_bbr.flex2 = bbr->r_ctl.rc_bbr_state_time; 2200 log.u_bbr.flex3 = bbr->r_ctl.rc_ack_hdwr_delay; 2201 log.u_bbr.flex4 = bbr->rc_tp->ts_offset; 2202 log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state; 2203 log.u_bbr.pkts_out = tcp_tv_to_mssectick(&bbr->rc_tv); 2204 log.u_bbr.flex6 = tsin; 2205 log.u_bbr.flex7 = 0; 2206 log.u_bbr.flex8 = bbr->rc_ack_was_delayed; 2207 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2208 &bbr->rc_inp->inp_socket->so_rcv, 2209 &bbr->rc_inp->inp_socket->so_snd, 2210 TCP_LOG_RTT, 0, 2211 0, &log, false, &bbr->rc_tv); 2212 } 2213 } 2214 2215 static void 2216 bbr_log_type_pesist(struct tcp_bbr *bbr, uint32_t cts, uint32_t time_in, int32_t line, uint8_t enter_exit) 2217 { 2218 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2219 union tcp_log_stackspecific log; 2220 2221 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2222 log.u_bbr.flex1 = time_in; 2223 log.u_bbr.flex2 = line; 2224 log.u_bbr.flex8 = enter_exit; 2225 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2226 &bbr->rc_inp->inp_socket->so_rcv, 2227 &bbr->rc_inp->inp_socket->so_snd, 2228 BBR_LOG_PERSIST, 0, 2229 0, &log, false, &bbr->rc_tv); 2230 } 2231 } 2232 static void 2233 bbr_log_ack_clear(struct tcp_bbr *bbr, uint32_t cts) 2234 { 2235 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2236 union tcp_log_stackspecific log; 2237 2238 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2239 log.u_bbr.flex1 = bbr->rc_tp->ts_recent_age; 2240 log.u_bbr.flex2 = bbr->r_ctl.rc_rtt_shrinks; 2241 log.u_bbr.flex3 = bbr->r_ctl.rc_probertt_int; 2242 log.u_bbr.flex4 = bbr->r_ctl.rc_went_idle_time; 2243 log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state; 2244 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2245 &bbr->rc_inp->inp_socket->so_rcv, 2246 &bbr->rc_inp->inp_socket->so_snd, 2247 BBR_LOG_ACKCLEAR, 0, 2248 0, &log, false, &bbr->rc_tv); 2249 } 2250 } 2251 2252 static void 2253 bbr_log_ack_event(struct tcp_bbr *bbr, struct tcphdr *th, struct tcpopt *to, uint32_t tlen, 2254 uint16_t nsegs, uint32_t cts, int32_t nxt_pkt, struct mbuf *m) 2255 { 2256 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2257 union tcp_log_stackspecific log; 2258 struct timeval tv; 2259 2260 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2261 log.u_bbr.flex1 = nsegs; 2262 log.u_bbr.flex2 = bbr->r_ctl.rc_lost_bytes; 2263 if (m) { 2264 struct timespec ts; 2265 2266 log.u_bbr.flex3 = m->m_flags; 2267 if (m->m_flags & M_TSTMP) { 2268 mbuf_tstmp2timespec(m, &ts); 2269 tv.tv_sec = ts.tv_sec; 2270 tv.tv_usec = ts.tv_nsec / 1000; 2271 log.u_bbr.lt_epoch = tcp_tv_to_usectick(&tv); 2272 } else { 2273 log.u_bbr.lt_epoch = 0; 2274 } 2275 if (m->m_flags & M_TSTMP_LRO) { 2276 tv.tv_sec = m->m_pkthdr.rcv_tstmp / 1000000000; 2277 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000) / 1000; 2278 log.u_bbr.flex5 = tcp_tv_to_usectick(&tv); 2279 } else { 2280 /* No arrival timestamp */ 2281 log.u_bbr.flex5 = 0; 2282 } 2283 2284 log.u_bbr.pkts_out = tcp_get_usecs(&tv); 2285 } else { 2286 log.u_bbr.flex3 = 0; 2287 log.u_bbr.flex5 = 0; 2288 log.u_bbr.flex6 = 0; 2289 log.u_bbr.pkts_out = 0; 2290 } 2291 log.u_bbr.flex4 = bbr->r_ctl.rc_target_at_state; 2292 log.u_bbr.flex7 = bbr->r_wanted_output; 2293 log.u_bbr.flex8 = bbr->rc_in_persist; 2294 TCP_LOG_EVENTP(bbr->rc_tp, th, 2295 &bbr->rc_inp->inp_socket->so_rcv, 2296 &bbr->rc_inp->inp_socket->so_snd, 2297 TCP_LOG_IN, 0, 2298 tlen, &log, true, &bbr->rc_tv); 2299 } 2300 } 2301 2302 static void 2303 bbr_log_doseg_done(struct tcp_bbr *bbr, uint32_t cts, int32_t nxt_pkt, int32_t did_out) 2304 { 2305 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2306 union tcp_log_stackspecific log; 2307 2308 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2309 log.u_bbr.flex1 = did_out; 2310 log.u_bbr.flex2 = nxt_pkt; 2311 log.u_bbr.flex3 = bbr->r_ctl.rc_last_delay_val; 2312 log.u_bbr.flex4 = bbr->r_ctl.rc_hpts_flags; 2313 log.u_bbr.flex5 = bbr->r_ctl.rc_timer_exp; 2314 log.u_bbr.flex6 = bbr->r_ctl.rc_lost_bytes; 2315 log.u_bbr.flex7 = bbr->r_wanted_output; 2316 log.u_bbr.flex8 = bbr->rc_in_persist; 2317 log.u_bbr.pkts_out = bbr->r_ctl.highest_hdwr_delay; 2318 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2319 &bbr->rc_inp->inp_socket->so_rcv, 2320 &bbr->rc_inp->inp_socket->so_snd, 2321 BBR_LOG_DOSEG_DONE, 0, 2322 0, &log, true, &bbr->rc_tv); 2323 } 2324 } 2325 2326 static void 2327 bbr_log_enobuf_jmp(struct tcp_bbr *bbr, uint32_t len, uint32_t cts, 2328 int32_t line, uint32_t o_len, uint32_t segcnt, uint32_t segsiz) 2329 { 2330 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2331 union tcp_log_stackspecific log; 2332 2333 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2334 log.u_bbr.flex1 = line; 2335 log.u_bbr.flex2 = o_len; 2336 log.u_bbr.flex3 = segcnt; 2337 log.u_bbr.flex4 = segsiz; 2338 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2339 &bbr->rc_inp->inp_socket->so_rcv, 2340 &bbr->rc_inp->inp_socket->so_snd, 2341 BBR_LOG_ENOBUF_JMP, ENOBUFS, 2342 len, &log, true, &bbr->rc_tv); 2343 } 2344 } 2345 2346 static void 2347 bbr_log_to_processing(struct tcp_bbr *bbr, uint32_t cts, int32_t ret, int32_t timers, uint8_t hpts_calling) 2348 { 2349 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2350 union tcp_log_stackspecific log; 2351 2352 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2353 log.u_bbr.flex1 = timers; 2354 log.u_bbr.flex2 = ret; 2355 log.u_bbr.flex3 = bbr->r_ctl.rc_timer_exp; 2356 log.u_bbr.flex4 = bbr->r_ctl.rc_hpts_flags; 2357 log.u_bbr.flex5 = cts; 2358 log.u_bbr.flex6 = bbr->r_ctl.rc_target_at_state; 2359 log.u_bbr.flex8 = hpts_calling; 2360 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2361 &bbr->rc_inp->inp_socket->so_rcv, 2362 &bbr->rc_inp->inp_socket->so_snd, 2363 BBR_LOG_TO_PROCESS, 0, 2364 0, &log, false, &bbr->rc_tv); 2365 } 2366 } 2367 2368 static void 2369 bbr_log_to_event(struct tcp_bbr *bbr, uint32_t cts, int32_t to_num) 2370 { 2371 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2372 union tcp_log_stackspecific log; 2373 uint64_t ar; 2374 2375 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2376 log.u_bbr.flex1 = bbr->bbr_timer_src; 2377 log.u_bbr.flex2 = 0; 2378 log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags; 2379 ar = (uint64_t)(bbr->r_ctl.rc_resend); 2380 ar >>= 32; 2381 ar &= 0x00000000ffffffff; 2382 log.u_bbr.flex4 = (uint32_t)ar; 2383 ar = (uint64_t)bbr->r_ctl.rc_resend; 2384 ar &= 0x00000000ffffffff; 2385 log.u_bbr.flex5 = (uint32_t)ar; 2386 log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); 2387 log.u_bbr.flex8 = to_num; 2388 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2389 &bbr->rc_inp->inp_socket->so_rcv, 2390 &bbr->rc_inp->inp_socket->so_snd, 2391 BBR_LOG_RTO, 0, 2392 0, &log, false, &bbr->rc_tv); 2393 } 2394 } 2395 2396 static void 2397 bbr_log_startup_event(struct tcp_bbr *bbr, uint32_t cts, uint32_t flex1, uint32_t flex2, uint32_t flex3, uint8_t reason) 2398 { 2399 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2400 union tcp_log_stackspecific log; 2401 2402 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2403 log.u_bbr.flex1 = flex1; 2404 log.u_bbr.flex2 = flex2; 2405 log.u_bbr.flex3 = flex3; 2406 log.u_bbr.flex4 = 0; 2407 log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state; 2408 log.u_bbr.flex6 = bbr->r_ctl.rc_lost_at_startup; 2409 log.u_bbr.flex8 = reason; 2410 log.u_bbr.cur_del_rate = bbr->r_ctl.rc_bbr_lastbtlbw; 2411 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2412 &bbr->rc_inp->inp_socket->so_rcv, 2413 &bbr->rc_inp->inp_socket->so_snd, 2414 BBR_LOG_REDUCE, 0, 2415 0, &log, false, &bbr->rc_tv); 2416 } 2417 } 2418 2419 static void 2420 bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag) 2421 { 2422 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2423 union tcp_log_stackspecific log; 2424 2425 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2426 log.u_bbr.flex1 = diag->p_nxt_slot; 2427 log.u_bbr.flex2 = diag->p_cur_slot; 2428 log.u_bbr.flex3 = diag->slot_req; 2429 log.u_bbr.flex4 = diag->inp_hptsslot; 2430 log.u_bbr.flex5 = diag->slot_remaining; 2431 log.u_bbr.flex6 = diag->need_new_to; 2432 log.u_bbr.flex7 = diag->p_hpts_active; 2433 log.u_bbr.flex8 = diag->p_on_min_sleep; 2434 /* Hijack other fields as needed */ 2435 log.u_bbr.epoch = diag->have_slept; 2436 log.u_bbr.lt_epoch = diag->yet_to_sleep; 2437 log.u_bbr.pkts_out = diag->co_ret; 2438 log.u_bbr.applimited = diag->hpts_sleep_time; 2439 log.u_bbr.delivered = diag->p_prev_slot; 2440 log.u_bbr.inflight = diag->p_runningtick; 2441 log.u_bbr.bw_inuse = diag->wheel_tick; 2442 log.u_bbr.rttProp = diag->wheel_cts; 2443 log.u_bbr.delRate = diag->maxticks; 2444 log.u_bbr.cur_del_rate = diag->p_curtick; 2445 log.u_bbr.cur_del_rate <<= 32; 2446 log.u_bbr.cur_del_rate |= diag->p_lasttick; 2447 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2448 &bbr->rc_inp->inp_socket->so_rcv, 2449 &bbr->rc_inp->inp_socket->so_snd, 2450 BBR_LOG_HPTSDIAG, 0, 2451 0, &log, false, &bbr->rc_tv); 2452 } 2453 } 2454 2455 static void 2456 bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts, uint32_t time_since_sent, uint32_t srtt, 2457 uint32_t thresh, uint32_t to) 2458 { 2459 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2460 union tcp_log_stackspecific log; 2461 2462 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2463 log.u_bbr.flex1 = bbr->rc_tp->t_rttvar; 2464 log.u_bbr.flex2 = time_since_sent; 2465 log.u_bbr.flex3 = srtt; 2466 log.u_bbr.flex4 = thresh; 2467 log.u_bbr.flex5 = to; 2468 log.u_bbr.flex6 = bbr->rc_tp->t_srtt; 2469 log.u_bbr.flex8 = mode; 2470 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2471 &bbr->rc_inp->inp_socket->so_rcv, 2472 &bbr->rc_inp->inp_socket->so_snd, 2473 BBR_LOG_TIMERPREP, 0, 2474 0, &log, false, &bbr->rc_tv); 2475 } 2476 } 2477 2478 static void 2479 bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len, 2480 uint32_t cts, uint32_t usecs, uint64_t bw, uint32_t override, int mod) 2481 { 2482 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2483 union tcp_log_stackspecific log; 2484 2485 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2486 log.u_bbr.flex1 = usecs; 2487 log.u_bbr.flex2 = len; 2488 log.u_bbr.flex3 = (uint32_t)((bw >> 32) & 0x00000000ffffffff); 2489 log.u_bbr.flex4 = (uint32_t)(bw & 0x00000000ffffffff); 2490 if (override) 2491 log.u_bbr.flex5 = (1 << 2); 2492 else 2493 log.u_bbr.flex5 = 0; 2494 log.u_bbr.flex6 = override; 2495 log.u_bbr.flex7 = gain; 2496 log.u_bbr.flex8 = mod; 2497 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2498 &bbr->rc_inp->inp_socket->so_rcv, 2499 &bbr->rc_inp->inp_socket->so_snd, 2500 BBR_LOG_HPTSI_CALC, 0, 2501 len, &log, false, &bbr->rc_tv); 2502 } 2503 } 2504 2505 static void 2506 bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which) 2507 { 2508 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2509 union tcp_log_stackspecific log; 2510 2511 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2512 2513 log.u_bbr.flex1 = bbr->bbr_timer_src; 2514 log.u_bbr.flex2 = to; 2515 log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags; 2516 log.u_bbr.flex4 = slot; 2517 log.u_bbr.flex5 = bbr->rc_inp->inp_hptsslot; 2518 log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); 2519 log.u_bbr.pkts_out = bbr->rc_inp->inp_flags2; 2520 log.u_bbr.flex8 = which; 2521 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2522 &bbr->rc_inp->inp_socket->so_rcv, 2523 &bbr->rc_inp->inp_socket->so_snd, 2524 BBR_LOG_TIMERSTAR, 0, 2525 0, &log, false, &bbr->rc_tv); 2526 } 2527 } 2528 2529 static void 2530 bbr_log_thresh_choice(struct tcp_bbr *bbr, uint32_t cts, uint32_t thresh, uint32_t lro, uint32_t srtt, struct bbr_sendmap *rsm, uint8_t frm) 2531 { 2532 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2533 union tcp_log_stackspecific log; 2534 2535 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2536 log.u_bbr.flex1 = thresh; 2537 log.u_bbr.flex2 = lro; 2538 log.u_bbr.flex3 = bbr->r_ctl.rc_reorder_ts; 2539 log.u_bbr.flex4 = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; 2540 log.u_bbr.flex5 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); 2541 log.u_bbr.flex6 = srtt; 2542 log.u_bbr.flex7 = bbr->r_ctl.rc_reorder_shift; 2543 log.u_bbr.flex8 = frm; 2544 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2545 &bbr->rc_inp->inp_socket->so_rcv, 2546 &bbr->rc_inp->inp_socket->so_snd, 2547 BBR_LOG_THRESH_CALC, 0, 2548 0, &log, false, &bbr->rc_tv); 2549 } 2550 } 2551 2552 static void 2553 bbr_log_to_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts, uint8_t hpts_removed) 2554 { 2555 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2556 union tcp_log_stackspecific log; 2557 2558 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2559 log.u_bbr.flex1 = line; 2560 log.u_bbr.flex2 = bbr->bbr_timer_src; 2561 log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags; 2562 log.u_bbr.flex4 = bbr->rc_in_persist; 2563 log.u_bbr.flex5 = bbr->r_ctl.rc_target_at_state; 2564 log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); 2565 log.u_bbr.flex8 = hpts_removed; 2566 log.u_bbr.pkts_out = bbr->rc_pacer_started; 2567 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2568 &bbr->rc_inp->inp_socket->so_rcv, 2569 &bbr->rc_inp->inp_socket->so_snd, 2570 BBR_LOG_TIMERCANC, 0, 2571 0, &log, false, &bbr->rc_tv); 2572 } 2573 } 2574 2575 2576 static void 2577 bbr_log_tstmp_validation(struct tcp_bbr *bbr, uint64_t peer_delta, uint64_t delta) 2578 { 2579 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2580 union tcp_log_stackspecific log; 2581 2582 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2583 log.u_bbr.flex1 = bbr->r_ctl.bbr_peer_tsratio; 2584 log.u_bbr.flex2 = (peer_delta >> 32); 2585 log.u_bbr.flex3 = (peer_delta & 0x00000000ffffffff); 2586 log.u_bbr.flex4 = (delta >> 32); 2587 log.u_bbr.flex5 = (delta & 0x00000000ffffffff); 2588 log.u_bbr.flex7 = bbr->rc_ts_clock_set; 2589 log.u_bbr.flex8 = bbr->rc_ts_cant_be_used; 2590 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2591 &bbr->rc_inp->inp_socket->so_rcv, 2592 &bbr->rc_inp->inp_socket->so_snd, 2593 BBR_LOG_TSTMP_VAL, 0, 2594 0, &log, false, &bbr->rc_tv); 2595 2596 } 2597 } 2598 2599 static void 2600 bbr_log_type_tsosize(struct tcp_bbr *bbr, uint32_t cts, uint32_t tsosz, uint32_t tls, uint32_t old_val, uint32_t maxseg, int hdwr) 2601 { 2602 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2603 union tcp_log_stackspecific log; 2604 2605 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2606 log.u_bbr.flex1 = tsosz; 2607 log.u_bbr.flex2 = tls; 2608 log.u_bbr.flex3 = tcp_min_hptsi_time; 2609 log.u_bbr.flex4 = bbr->r_ctl.bbr_hptsi_bytes_min; 2610 log.u_bbr.flex5 = old_val; 2611 log.u_bbr.flex6 = maxseg; 2612 log.u_bbr.flex7 = bbr->rc_no_pacing; 2613 log.u_bbr.flex7 <<= 1; 2614 log.u_bbr.flex7 |= bbr->rc_past_init_win; 2615 if (hdwr) 2616 log.u_bbr.flex8 = 0x80 | bbr->rc_use_google; 2617 else 2618 log.u_bbr.flex8 = bbr->rc_use_google; 2619 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2620 &bbr->rc_inp->inp_socket->so_rcv, 2621 &bbr->rc_inp->inp_socket->so_snd, 2622 BBR_LOG_BBRTSO, 0, 2623 0, &log, false, &bbr->rc_tv); 2624 } 2625 } 2626 2627 static void 2628 bbr_log_type_rsmclear(struct tcp_bbr *bbr, uint32_t cts, struct bbr_sendmap *rsm, 2629 uint32_t flags, uint32_t line) 2630 { 2631 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2632 union tcp_log_stackspecific log; 2633 2634 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2635 log.u_bbr.flex1 = line; 2636 log.u_bbr.flex2 = rsm->r_start; 2637 log.u_bbr.flex3 = rsm->r_end; 2638 log.u_bbr.flex4 = rsm->r_delivered; 2639 log.u_bbr.flex5 = rsm->r_rtr_cnt; 2640 log.u_bbr.flex6 = rsm->r_dupack; 2641 log.u_bbr.flex7 = rsm->r_tim_lastsent[0]; 2642 log.u_bbr.flex8 = rsm->r_flags; 2643 /* Hijack the pkts_out fids */ 2644 log.u_bbr.applimited = flags; 2645 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2646 &bbr->rc_inp->inp_socket->so_rcv, 2647 &bbr->rc_inp->inp_socket->so_snd, 2648 BBR_RSM_CLEARED, 0, 2649 0, &log, false, &bbr->rc_tv); 2650 } 2651 } 2652 2653 static void 2654 bbr_log_type_bbrupd(struct tcp_bbr *bbr, uint8_t flex8, uint32_t cts, 2655 uint32_t flex3, uint32_t flex2, uint32_t flex5, 2656 uint32_t flex6, uint32_t pkts_out, int flex7, 2657 uint32_t flex4, uint32_t flex1) 2658 { 2659 2660 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2661 union tcp_log_stackspecific log; 2662 2663 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2664 log.u_bbr.flex1 = flex1; 2665 log.u_bbr.flex2 = flex2; 2666 log.u_bbr.flex3 = flex3; 2667 log.u_bbr.flex4 = flex4; 2668 log.u_bbr.flex5 = flex5; 2669 log.u_bbr.flex6 = flex6; 2670 log.u_bbr.flex7 = flex7; 2671 /* Hijack the pkts_out fids */ 2672 log.u_bbr.pkts_out = pkts_out; 2673 log.u_bbr.flex8 = flex8; 2674 if (bbr->rc_ack_was_delayed) 2675 log.u_bbr.epoch = bbr->r_ctl.rc_ack_hdwr_delay; 2676 else 2677 log.u_bbr.epoch = 0; 2678 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2679 &bbr->rc_inp->inp_socket->so_rcv, 2680 &bbr->rc_inp->inp_socket->so_snd, 2681 BBR_LOG_BBRUPD, 0, 2682 flex2, &log, false, &bbr->rc_tv); 2683 } 2684 } 2685 2686 2687 static void 2688 bbr_log_type_ltbw(struct tcp_bbr *bbr, uint32_t cts, int32_t reason, 2689 uint32_t newbw, uint32_t obw, uint32_t diff, 2690 uint32_t tim) 2691 { 2692 if (/*bbr_verbose_logging && */(bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2693 union tcp_log_stackspecific log; 2694 2695 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2696 log.u_bbr.flex1 = reason; 2697 log.u_bbr.flex2 = newbw; 2698 log.u_bbr.flex3 = obw; 2699 log.u_bbr.flex4 = diff; 2700 log.u_bbr.flex5 = bbr->r_ctl.rc_lt_lost; 2701 log.u_bbr.flex6 = bbr->r_ctl.rc_lt_del; 2702 log.u_bbr.flex7 = bbr->rc_lt_is_sampling; 2703 log.u_bbr.pkts_out = tim; 2704 log.u_bbr.bw_inuse = bbr->r_ctl.rc_lt_bw; 2705 if (bbr->rc_lt_use_bw == 0) 2706 log.u_bbr.epoch = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch; 2707 else 2708 log.u_bbr.epoch = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch_use; 2709 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2710 &bbr->rc_inp->inp_socket->so_rcv, 2711 &bbr->rc_inp->inp_socket->so_snd, 2712 BBR_LOG_BWSAMP, 0, 2713 0, &log, false, &bbr->rc_tv); 2714 } 2715 } 2716 2717 static inline void 2718 bbr_log_progress_event(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t tick, int event, int line) 2719 { 2720 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2721 union tcp_log_stackspecific log; 2722 2723 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2724 log.u_bbr.flex1 = line; 2725 log.u_bbr.flex2 = tick; 2726 log.u_bbr.flex3 = tp->t_maxunacktime; 2727 log.u_bbr.flex4 = tp->t_acktime; 2728 log.u_bbr.flex8 = event; 2729 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2730 &bbr->rc_inp->inp_socket->so_rcv, 2731 &bbr->rc_inp->inp_socket->so_snd, 2732 BBR_LOG_PROGRESS, 0, 2733 0, &log, false, &bbr->rc_tv); 2734 } 2735 } 2736 2737 static void 2738 bbr_type_log_hdwr_pacing(struct tcp_bbr *bbr, const struct ifnet *ifp, 2739 uint64_t rate, uint64_t hw_rate, int line, uint32_t cts, 2740 int error) 2741 { 2742 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2743 union tcp_log_stackspecific log; 2744 2745 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2746 log.u_bbr.flex1 = ((hw_rate >> 32) & 0x00000000ffffffff); 2747 log.u_bbr.flex2 = (hw_rate & 0x00000000ffffffff); 2748 log.u_bbr.flex3 = (((uint64_t)ifp >> 32) & 0x00000000ffffffff); 2749 log.u_bbr.flex4 = ((uint64_t)ifp & 0x00000000ffffffff); 2750 log.u_bbr.bw_inuse = rate; 2751 log.u_bbr.flex5 = line; 2752 log.u_bbr.flex6 = error; 2753 log.u_bbr.flex8 = bbr->skip_gain; 2754 log.u_bbr.flex8 <<= 1; 2755 log.u_bbr.flex8 |= bbr->gain_is_limited; 2756 log.u_bbr.flex8 <<= 1; 2757 log.u_bbr.flex8 |= bbr->bbr_hdrw_pacing; 2758 log.u_bbr.pkts_out = bbr->rc_tp->t_maxseg; 2759 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2760 &bbr->rc_inp->inp_socket->so_rcv, 2761 &bbr->rc_inp->inp_socket->so_snd, 2762 BBR_LOG_HDWR_PACE, 0, 2763 0, &log, false, &bbr->rc_tv); 2764 } 2765 } 2766 2767 static void 2768 bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay) 2769 { 2770 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2771 union tcp_log_stackspecific log; 2772 2773 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2774 log.u_bbr.flex1 = slot; 2775 log.u_bbr.flex2 = del_by; 2776 log.u_bbr.flex3 = prev_delay; 2777 log.u_bbr.flex4 = line; 2778 log.u_bbr.flex5 = bbr->r_ctl.rc_last_delay_val; 2779 log.u_bbr.flex6 = bbr->r_ctl.rc_hptsi_agg_delay; 2780 log.u_bbr.flex7 = (0x0000ffff & bbr->r_ctl.rc_hpts_flags); 2781 log.u_bbr.flex8 = bbr->rc_in_persist; 2782 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2783 &bbr->rc_inp->inp_socket->so_rcv, 2784 &bbr->rc_inp->inp_socket->so_snd, 2785 BBR_LOG_BBRSND, 0, 2786 len, &log, false, &bbr->rc_tv); 2787 } 2788 } 2789 2790 static void 2791 bbr_log_type_bbrrttprop(struct tcp_bbr *bbr, uint32_t t, uint32_t end, uint32_t tsconv, uint32_t cts, int32_t match, uint32_t seq, uint8_t flags) 2792 { 2793 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2794 union tcp_log_stackspecific log; 2795 2796 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2797 log.u_bbr.flex1 = bbr->r_ctl.rc_delivered; 2798 log.u_bbr.flex2 = 0; 2799 log.u_bbr.flex3 = bbr->r_ctl.rc_lowest_rtt; 2800 log.u_bbr.flex4 = end; 2801 log.u_bbr.flex5 = seq; 2802 log.u_bbr.flex6 = t; 2803 log.u_bbr.flex7 = match; 2804 log.u_bbr.flex8 = flags; 2805 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2806 &bbr->rc_inp->inp_socket->so_rcv, 2807 &bbr->rc_inp->inp_socket->so_snd, 2808 BBR_LOG_BBRRTT, 0, 2809 0, &log, false, &bbr->rc_tv); 2810 } 2811 } 2812 2813 static void 2814 bbr_log_exit_gain(struct tcp_bbr *bbr, uint32_t cts, int32_t entry_method) 2815 { 2816 if (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF) { 2817 union tcp_log_stackspecific log; 2818 2819 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 2820 log.u_bbr.flex1 = bbr->r_ctl.rc_target_at_state; 2821 log.u_bbr.flex2 = (bbr->rc_tp->t_maxseg - bbr->rc_last_options); 2822 log.u_bbr.flex3 = bbr->r_ctl.gain_epoch; 2823 log.u_bbr.flex4 = bbr->r_ctl.rc_pace_max_segs; 2824 log.u_bbr.flex5 = bbr->r_ctl.rc_pace_min_segs; 2825 log.u_bbr.flex6 = bbr->r_ctl.rc_bbr_state_atflight; 2826 log.u_bbr.flex7 = 0; 2827 log.u_bbr.flex8 = entry_method; 2828 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2829 &bbr->rc_inp->inp_socket->so_rcv, 2830 &bbr->rc_inp->inp_socket->so_snd, 2831 BBR_LOG_EXIT_GAIN, 0, 2832 0, &log, false, &bbr->rc_tv); 2833 } 2834 } 2835 2836 static void 2837 bbr_log_settings_change(struct tcp_bbr *bbr, int settings_desired) 2838 { 2839 if (bbr_verbose_logging && (bbr->rc_tp->t_logstate != TCP_LOG_STATE_OFF)) { 2840 union tcp_log_stackspecific log; 2841 2842 bbr_fill_in_logging_data(bbr, &log.u_bbr, bbr->r_ctl.rc_rcvtime); 2843 /* R-HU */ 2844 log.u_bbr.flex1 = 0; 2845 log.u_bbr.flex2 = 0; 2846 log.u_bbr.flex3 = 0; 2847 log.u_bbr.flex4 = 0; 2848 log.u_bbr.flex7 = 0; 2849 log.u_bbr.flex8 = settings_desired; 2850 2851 TCP_LOG_EVENTP(bbr->rc_tp, NULL, 2852 &bbr->rc_inp->inp_socket->so_rcv, 2853 &bbr->rc_inp->inp_socket->so_snd, 2854 BBR_LOG_SETTINGS_CHG, 0, 2855 0, &log, false, &bbr->rc_tv); 2856 } 2857 } 2858 2859 /* 2860 * Returns the bw from the our filter. 2861 */ 2862 static inline uint64_t 2863 bbr_get_full_bw(struct tcp_bbr *bbr) 2864 { 2865 uint64_t bw; 2866 2867 bw = get_filter_value(&bbr->r_ctl.rc_delrate); 2868 2869 return (bw); 2870 } 2871 2872 static inline void 2873 bbr_set_pktepoch(struct tcp_bbr *bbr, uint32_t cts, int32_t line) 2874 { 2875 uint64_t calclr; 2876 uint32_t lost, del; 2877 2878 if (bbr->r_ctl.rc_lost > bbr->r_ctl.rc_lost_at_pktepoch) 2879 lost = bbr->r_ctl.rc_lost - bbr->r_ctl.rc_lost_at_pktepoch; 2880 else 2881 lost = 0; 2882 del = bbr->r_ctl.rc_delivered - bbr->r_ctl.rc_pkt_epoch_del; 2883 if (lost == 0) { 2884 calclr = 0; 2885 } else if (del) { 2886 calclr = lost; 2887 calclr *= (uint64_t)1000; 2888 calclr /= (uint64_t)del; 2889 } else { 2890 /* Nothing delivered? 100.0% loss */ 2891 calclr = 1000; 2892 } 2893 bbr->r_ctl.rc_pkt_epoch_loss_rate = (uint32_t)calclr; 2894 if (IN_RECOVERY(bbr->rc_tp->t_flags)) 2895 bbr->r_ctl.recovery_lr += (uint32_t)calclr; 2896 bbr->r_ctl.rc_pkt_epoch++; 2897 if (bbr->rc_no_pacing && 2898 (bbr->r_ctl.rc_pkt_epoch >= bbr->no_pacing_until)) { 2899 bbr->rc_no_pacing = 0; 2900 tcp_bbr_tso_size_check(bbr, cts); 2901 } 2902 bbr->r_ctl.rc_pkt_epoch_rtt = bbr_calc_time(cts, bbr->r_ctl.rc_pkt_epoch_time); 2903 bbr->r_ctl.rc_pkt_epoch_time = cts; 2904 /* What was our loss rate */ 2905 bbr_log_pkt_epoch(bbr, cts, line, lost, del); 2906 bbr->r_ctl.rc_pkt_epoch_del = bbr->r_ctl.rc_delivered; 2907 bbr->r_ctl.rc_lost_at_pktepoch = bbr->r_ctl.rc_lost; 2908 } 2909 2910 static inline void 2911 bbr_set_epoch(struct tcp_bbr *bbr, uint32_t cts, int32_t line) 2912 { 2913 uint32_t epoch_time; 2914 2915 /* Tick the RTT clock */ 2916 bbr->r_ctl.rc_rtt_epoch++; 2917 epoch_time = cts - bbr->r_ctl.rc_rcv_epoch_start; 2918 bbr_log_time_epoch(bbr, cts, line, epoch_time); 2919 bbr->r_ctl.rc_rcv_epoch_start = cts; 2920 } 2921 2922 2923 static inline void 2924 bbr_isit_a_pkt_epoch(struct tcp_bbr *bbr, uint32_t cts, struct bbr_sendmap *rsm, int32_t line, int32_t cum_acked) 2925 { 2926 if (SEQ_GEQ(rsm->r_delivered, bbr->r_ctl.rc_pkt_epoch_del)) { 2927 bbr->rc_is_pkt_epoch_now = 1; 2928 } 2929 } 2930 2931 /* 2932 * Returns the bw from either the b/w filter 2933 * or from the lt_bw (if the connection is being 2934 * policed). 2935 */ 2936 static inline uint64_t 2937 __bbr_get_bw(struct tcp_bbr *bbr) 2938 { 2939 uint64_t bw, min_bw; 2940 uint64_t rtt; 2941 int gm_measure_cnt = 1; 2942 2943 /* 2944 * For startup we make, like google, a 2945 * minimum b/w. This is generated from the 2946 * IW and the rttProp. We do fall back to srtt 2947 * if for some reason (initial handshake) we don't 2948 * have a rttProp. We, in the worst case, fall back 2949 * to the configured min_bw (rc_initial_hptsi_bw). 2950 */ 2951 if (bbr->rc_bbr_state == BBR_STATE_STARTUP) { 2952 /* Attempt first to use rttProp */ 2953 rtt = (uint64_t)get_filter_value_small(&bbr->r_ctl.rc_rttprop); 2954 if (rtt && (rtt < 0xffffffff)) { 2955 measure: 2956 min_bw = (uint64_t)(bbr_initial_cwnd(bbr, bbr->rc_tp)) * 2957 ((uint64_t)1000000); 2958 min_bw /= rtt; 2959 if (min_bw < bbr->r_ctl.rc_initial_hptsi_bw) { 2960 min_bw = bbr->r_ctl.rc_initial_hptsi_bw; 2961 } 2962 2963 } else if (bbr->rc_tp->t_srtt != 0) { 2964 /* No rttProp, use srtt? */ 2965 rtt = bbr_get_rtt(bbr, BBR_SRTT); 2966 goto measure; 2967 } else { 2968 min_bw = bbr->r_ctl.rc_initial_hptsi_bw; 2969 } 2970 } else 2971 min_bw = 0; 2972 2973 if ((bbr->rc_past_init_win == 0) && 2974 (bbr->r_ctl.rc_delivered > bbr_initial_cwnd(bbr, bbr->rc_tp))) 2975 bbr->rc_past_init_win = 1; 2976 if ((bbr->rc_use_google) && (bbr->r_ctl.r_measurement_count >= 1)) 2977 gm_measure_cnt = 0; 2978 if (gm_measure_cnt && 2979 ((bbr->r_ctl.r_measurement_count < bbr_min_measurements_req) || 2980 (bbr->rc_past_init_win == 0))) { 2981 /* For google we use our guess rate until we get 1 measurement */ 2982 2983 use_initial_window: 2984 rtt = (uint64_t)get_filter_value_small(&bbr->r_ctl.rc_rttprop); 2985 if (rtt && (rtt < 0xffffffff)) { 2986 /* 2987 * We have an RTT measurment. Use that in 2988 * combination with our initial window to calculate 2989 * a b/w. 2990 */ 2991 bw = (uint64_t)(bbr_initial_cwnd(bbr, bbr->rc_tp)) * 2992 ((uint64_t)1000000); 2993 bw /= rtt; 2994 if (bw < bbr->r_ctl.rc_initial_hptsi_bw) { 2995 bw = bbr->r_ctl.rc_initial_hptsi_bw; 2996 } 2997 } else { 2998 /* Drop back to the 40 and punt to a default */ 2999 bw = bbr->r_ctl.rc_initial_hptsi_bw; 3000 } 3001 if (bw < 1) 3002 /* Probably should panic */ 3003 bw = 1; 3004 if (bw > min_bw) 3005 return (bw); 3006 else 3007 return (min_bw); 3008 } 3009 if (bbr->rc_lt_use_bw) 3010 bw = bbr->r_ctl.rc_lt_bw; 3011 else if (bbr->r_recovery_bw && (bbr->rc_use_google == 0)) 3012 bw = bbr->r_ctl.red_bw; 3013 else 3014 bw = get_filter_value(&bbr->r_ctl.rc_delrate); 3015 if (bbr->rc_tp->t_peakrate_thr && (bbr->rc_use_google == 0)) { 3016 /* 3017 * Enforce user set rate limit, keep in mind that 3018 * t_peakrate_thr is in B/s already 3019 */ 3020 bw = uqmin((uint64_t)bbr->rc_tp->t_peakrate_thr, bw); 3021 } 3022 if (bw == 0) { 3023 /* We should not be at 0, go to the initial window then */ 3024 goto use_initial_window; 3025 } 3026 if (bw < 1) 3027 /* Probably should panic */ 3028 bw = 1; 3029 if (bw < min_bw) 3030 bw = min_bw; 3031 return (bw); 3032 } 3033 3034 static inline uint64_t 3035 bbr_get_bw(struct tcp_bbr *bbr) 3036 { 3037 uint64_t bw; 3038 3039 bw = __bbr_get_bw(bbr); 3040 return (bw); 3041 } 3042 3043 static inline void 3044 bbr_reset_lt_bw_interval(struct tcp_bbr *bbr, uint32_t cts) 3045 { 3046 bbr->r_ctl.rc_lt_epoch = bbr->r_ctl.rc_pkt_epoch; 3047 bbr->r_ctl.rc_lt_time = bbr->r_ctl.rc_del_time; 3048 bbr->r_ctl.rc_lt_del = bbr->r_ctl.rc_delivered; 3049 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 3050 } 3051 3052 static inline void 3053 bbr_reset_lt_bw_sampling(struct tcp_bbr *bbr, uint32_t cts) 3054 { 3055 bbr->rc_lt_is_sampling = 0; 3056 bbr->rc_lt_use_bw = 0; 3057 bbr->r_ctl.rc_lt_bw = 0; 3058 bbr_reset_lt_bw_interval(bbr, cts); 3059 } 3060 3061 static inline void 3062 bbr_lt_bw_samp_done(struct tcp_bbr *bbr, uint64_t bw, uint32_t cts, uint32_t timin) 3063 { 3064 uint64_t diff; 3065 3066 /* Do we have a previous sample? */ 3067 if (bbr->r_ctl.rc_lt_bw) { 3068 /* Get the diff in bytes per second */ 3069 if (bbr->r_ctl.rc_lt_bw > bw) 3070 diff = bbr->r_ctl.rc_lt_bw - bw; 3071 else 3072 diff = bw - bbr->r_ctl.rc_lt_bw; 3073 if ((diff <= bbr_lt_bw_diff) || 3074 (diff <= (bbr->r_ctl.rc_lt_bw / bbr_lt_bw_ratio))) { 3075 /* Consider us policed */ 3076 uint32_t saved_bw; 3077 3078 saved_bw = (uint32_t)bbr->r_ctl.rc_lt_bw; 3079 bbr->r_ctl.rc_lt_bw = (bw + bbr->r_ctl.rc_lt_bw) / 2; /* average of two */ 3080 bbr->rc_lt_use_bw = 1; 3081 bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT; 3082 /* 3083 * Use pkt based epoch for measuring length of 3084 * policer up 3085 */ 3086 bbr->r_ctl.rc_lt_epoch_use = bbr->r_ctl.rc_pkt_epoch; 3087 /* 3088 * reason 4 is we need to start consider being 3089 * policed 3090 */ 3091 bbr_log_type_ltbw(bbr, cts, 4, (uint32_t)bw, saved_bw, (uint32_t)diff, timin); 3092 return; 3093 } 3094 } 3095 bbr->r_ctl.rc_lt_bw = bw; 3096 bbr_reset_lt_bw_interval(bbr, cts); 3097 bbr_log_type_ltbw(bbr, cts, 5, 0, (uint32_t)bw, 0, timin); 3098 } 3099 3100 /* 3101 * RRS: Copied from user space! 3102 * Calculate a uniformly distributed random number less than upper_bound 3103 * avoiding "modulo bias". 3104 * 3105 * Uniformity is achieved by generating new random numbers until the one 3106 * returned is outside the range [0, 2**32 % upper_bound). This 3107 * guarantees the selected random number will be inside 3108 * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound) 3109 * after reduction modulo upper_bound. 3110 */ 3111 static uint32_t 3112 arc4random_uniform(uint32_t upper_bound) 3113 { 3114 uint32_t r, min; 3115 3116 if (upper_bound < 2) 3117 return 0; 3118 3119 /* 2**32 % x == (2**32 - x) % x */ 3120 min = -upper_bound % upper_bound; 3121 3122 /* 3123 * This could theoretically loop forever but each retry has 3124 * p > 0.5 (worst case, usually far better) of selecting a 3125 * number inside the range we need, so it should rarely need 3126 * to re-roll. 3127 */ 3128 for (;;) { 3129 r = arc4random(); 3130 if (r >= min) 3131 break; 3132 } 3133 3134 return r % upper_bound; 3135 } 3136 3137 static void 3138 bbr_randomize_extra_state_time(struct tcp_bbr *bbr) 3139 { 3140 uint32_t ran, deduct; 3141 3142 ran = arc4random_uniform(bbr_rand_ot); 3143 if (ran) { 3144 deduct = bbr->r_ctl.rc_level_state_extra / ran; 3145 bbr->r_ctl.rc_level_state_extra -= deduct; 3146 } 3147 } 3148 /* 3149 * Return randomly the starting state 3150 * to use in probebw. 3151 */ 3152 static uint8_t 3153 bbr_pick_probebw_substate(struct tcp_bbr *bbr, uint32_t cts) 3154 { 3155 uint32_t ran; 3156 uint8_t ret_val; 3157 3158 /* Initialize the offset to 0 */ 3159 bbr->r_ctl.rc_exta_time_gd = 0; 3160 bbr->rc_hit_state_1 = 0; 3161 bbr->r_ctl.rc_level_state_extra = 0; 3162 ran = arc4random_uniform((BBR_SUBSTATE_COUNT-1)); 3163 /* 3164 * The math works funny here :) the return value is used to set the 3165 * substate and then the state change is called which increments by 3166 * one. So if we return 1 (DRAIN) we will increment to 2 (LEVEL1) when 3167 * we fully enter the state. Note that the (8 - 1 - ran) assures that 3168 * we return 1 - 7, so we dont return 0 and end up starting in 3169 * state 1 (DRAIN). 3170 */ 3171 ret_val = BBR_SUBSTATE_COUNT - 1 - ran; 3172 /* Set an epoch */ 3173 if ((cts - bbr->r_ctl.rc_rcv_epoch_start) >= bbr_get_rtt(bbr, BBR_RTT_PROP)) 3174 bbr_set_epoch(bbr, cts, __LINE__); 3175 3176 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 3177 return (ret_val); 3178 } 3179 3180 static void 3181 bbr_lt_bw_sampling(struct tcp_bbr *bbr, uint32_t cts, int32_t loss_detected) 3182 { 3183 uint32_t diff, d_time; 3184 uint64_t del_time, bw, lost, delivered; 3185 3186 if (bbr->r_use_policer == 0) 3187 return; 3188 if (bbr->rc_lt_use_bw) { 3189 /* We are using lt bw do we stop yet? */ 3190 diff = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch_use; 3191 if (diff > bbr_lt_bw_max_rtts) { 3192 /* Reset it all */ 3193 reset_all: 3194 bbr_reset_lt_bw_sampling(bbr, cts); 3195 if (bbr->rc_filled_pipe) { 3196 bbr_set_epoch(bbr, cts, __LINE__); 3197 bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts); 3198 bbr_substate_change(bbr, cts, __LINE__, 0); 3199 bbr->rc_bbr_state = BBR_STATE_PROBE_BW; 3200 bbr_log_type_statechange(bbr, cts, __LINE__); 3201 } else { 3202 /* 3203 * This should not happen really 3204 * unless we remove the startup/drain 3205 * restrictions above. 3206 */ 3207 bbr->rc_bbr_state = BBR_STATE_STARTUP; 3208 bbr_set_epoch(bbr, cts, __LINE__); 3209 bbr->r_ctl.rc_bbr_state_time = cts; 3210 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 3211 bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_startup_pg; 3212 bbr->r_ctl.rc_bbr_cwnd_gain = bbr->r_ctl.rc_startup_pg; 3213 bbr_set_state_target(bbr, __LINE__); 3214 bbr_log_type_statechange(bbr, cts, __LINE__); 3215 } 3216 /* reason 0 is to stop using lt-bw */ 3217 bbr_log_type_ltbw(bbr, cts, 0, 0, 0, 0, 0); 3218 return; 3219 } 3220 if (bbr_lt_intvl_fp == 0) { 3221 /* Not doing false-postive detection */ 3222 return; 3223 } 3224 /* False positive detection */ 3225 if (diff == bbr_lt_intvl_fp) { 3226 /* At bbr_lt_intvl_fp we record the lost */ 3227 bbr->r_ctl.rc_lt_del = bbr->r_ctl.rc_delivered; 3228 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 3229 } else if (diff > (bbr_lt_intvl_min_rtts + bbr_lt_intvl_fp)) { 3230 /* Now is our loss rate still high? */ 3231 lost = bbr->r_ctl.rc_lost - bbr->r_ctl.rc_lt_lost; 3232 delivered = bbr->r_ctl.rc_delivered - bbr->r_ctl.rc_lt_del; 3233 if ((delivered == 0) || 3234 (((lost * 1000)/delivered) < bbr_lt_fd_thresh)) { 3235 /* No still below our threshold */ 3236 bbr_log_type_ltbw(bbr, cts, 7, lost, delivered, 0, 0); 3237 } else { 3238 /* Yikes its still high, it must be a false positive */ 3239 bbr_log_type_ltbw(bbr, cts, 8, lost, delivered, 0, 0); 3240 goto reset_all; 3241 } 3242 } 3243 return; 3244 } 3245 /* 3246 * Wait for the first loss before sampling, to let the policer 3247 * exhaust its tokens and estimate the steady-state rate allowed by 3248 * the policer. Starting samples earlier includes bursts that 3249 * over-estimate the bw. 3250 */ 3251 if (bbr->rc_lt_is_sampling == 0) { 3252 /* reason 1 is to begin doing the sampling */ 3253 if (loss_detected == 0) 3254 return; 3255 bbr_reset_lt_bw_interval(bbr, cts); 3256 bbr->rc_lt_is_sampling = 1; 3257 bbr_log_type_ltbw(bbr, cts, 1, 0, 0, 0, 0); 3258 return; 3259 } 3260 /* Now how long were we delivering long term last> */ 3261 if (TSTMP_GEQ(bbr->r_ctl.rc_del_time, bbr->r_ctl.rc_lt_time)) 3262 d_time = bbr->r_ctl.rc_del_time - bbr->r_ctl.rc_lt_time; 3263 else 3264 d_time = 0; 3265 3266 /* To avoid underestimates, reset sampling if we run out of data. */ 3267 if (bbr->r_ctl.r_app_limited_until) { 3268 /* Can not measure in app-limited state */ 3269 bbr_reset_lt_bw_sampling(bbr, cts); 3270 /* reason 2 is to reset sampling due to app limits */ 3271 bbr_log_type_ltbw(bbr, cts, 2, 0, 0, 0, d_time); 3272 return; 3273 } 3274 diff = bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_lt_epoch; 3275 if (diff < bbr_lt_intvl_min_rtts) { 3276 /* 3277 * need more samples (we don't 3278 * start on a round like linux so 3279 * we need 1 more). 3280 */ 3281 /* 6 is not_enough time or no-loss */ 3282 bbr_log_type_ltbw(bbr, cts, 6, 0, 0, 0, d_time); 3283 return; 3284 } 3285 if (diff > (4 * bbr_lt_intvl_min_rtts)) { 3286 /* 3287 * For now if we wait too long, reset all sampling. We need 3288 * to do some research here, its possible that we should 3289 * base this on how much loss as occurred.. something like 3290 * if its under 10% (or some thresh) reset all otherwise 3291 * don't. Thats for phase II I guess. 3292 */ 3293 bbr_reset_lt_bw_sampling(bbr, cts); 3294 /* reason 3 is to reset sampling due too long of sampling */ 3295 bbr_log_type_ltbw(bbr, cts, 3, 0, 0, 0, d_time); 3296 return; 3297 } 3298 /* 3299 * End sampling interval when a packet is lost, so we estimate the 3300 * policer tokens were exhausted. Stopping the sampling before the 3301 * tokens are exhausted under-estimates the policed rate. 3302 */ 3303 if (loss_detected == 0) { 3304 /* 6 is not_enough time or no-loss */ 3305 bbr_log_type_ltbw(bbr, cts, 6, 0, 0, 0, d_time); 3306 return; 3307 } 3308 /* Calculate packets lost and delivered in sampling interval. */ 3309 lost = bbr->r_ctl.rc_lost - bbr->r_ctl.rc_lt_lost; 3310 delivered = bbr->r_ctl.rc_delivered - bbr->r_ctl.rc_lt_del; 3311 if ((delivered == 0) || 3312 (((lost * 1000)/delivered) < bbr_lt_loss_thresh)) { 3313 bbr_log_type_ltbw(bbr, cts, 6, lost, delivered, 0, d_time); 3314 return; 3315 } 3316 if (d_time < 1000) { 3317 /* Not enough time. wait */ 3318 /* 6 is not_enough time or no-loss */ 3319 bbr_log_type_ltbw(bbr, cts, 6, 0, 0, 0, d_time); 3320 return; 3321 } 3322 if (d_time >= (0xffffffff / USECS_IN_MSEC)) { 3323 /* Too long */ 3324 bbr_reset_lt_bw_sampling(bbr, cts); 3325 /* reason 3 is to reset sampling due too long of sampling */ 3326 bbr_log_type_ltbw(bbr, cts, 3, 0, 0, 0, d_time); 3327 return; 3328 } 3329 del_time = d_time; 3330 bw = delivered; 3331 bw *= (uint64_t)USECS_IN_SECOND; 3332 bw /= del_time; 3333 bbr_lt_bw_samp_done(bbr, bw, cts, d_time); 3334 } 3335 3336 /* 3337 * Allocate a sendmap from our zone. 3338 */ 3339 static struct bbr_sendmap * 3340 bbr_alloc(struct tcp_bbr *bbr) 3341 { 3342 struct bbr_sendmap *rsm; 3343 3344 BBR_STAT_INC(bbr_to_alloc); 3345 rsm = uma_zalloc(bbr_zone, (M_NOWAIT | M_ZERO)); 3346 if (rsm) { 3347 bbr->r_ctl.rc_num_maps_alloced++; 3348 return (rsm); 3349 } 3350 if (bbr->r_ctl.rc_free_cnt) { 3351 BBR_STAT_INC(bbr_to_alloc_emerg); 3352 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_free); 3353 TAILQ_REMOVE(&bbr->r_ctl.rc_free, rsm, r_next); 3354 bbr->r_ctl.rc_free_cnt--; 3355 return (rsm); 3356 } 3357 BBR_STAT_INC(bbr_to_alloc_failed); 3358 return (NULL); 3359 } 3360 3361 static struct bbr_sendmap * 3362 bbr_alloc_full_limit(struct tcp_bbr *bbr) 3363 { 3364 if ((V_tcp_map_entries_limit > 0) && 3365 (bbr->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 3366 BBR_STAT_INC(bbr_alloc_limited); 3367 if (!bbr->alloc_limit_reported) { 3368 bbr->alloc_limit_reported = 1; 3369 BBR_STAT_INC(bbr_alloc_limited_conns); 3370 } 3371 return (NULL); 3372 } 3373 return (bbr_alloc(bbr)); 3374 } 3375 3376 3377 /* wrapper to allocate a sendmap entry, subject to a specific limit */ 3378 static struct bbr_sendmap * 3379 bbr_alloc_limit(struct tcp_bbr *bbr, uint8_t limit_type) 3380 { 3381 struct bbr_sendmap *rsm; 3382 3383 if (limit_type) { 3384 /* currently there is only one limit type */ 3385 if (V_tcp_map_split_limit > 0 && 3386 bbr->r_ctl.rc_num_split_allocs >= V_tcp_map_split_limit) { 3387 BBR_STAT_INC(bbr_split_limited); 3388 if (!bbr->alloc_limit_reported) { 3389 bbr->alloc_limit_reported = 1; 3390 BBR_STAT_INC(bbr_alloc_limited_conns); 3391 } 3392 return (NULL); 3393 } 3394 } 3395 3396 /* allocate and mark in the limit type, if set */ 3397 rsm = bbr_alloc(bbr); 3398 if (rsm != NULL && limit_type) { 3399 rsm->r_limit_type = limit_type; 3400 bbr->r_ctl.rc_num_split_allocs++; 3401 } 3402 return (rsm); 3403 } 3404 3405 static void 3406 bbr_free(struct tcp_bbr *bbr, struct bbr_sendmap *rsm) 3407 { 3408 if (rsm->r_limit_type) { 3409 /* currently there is only one limit type */ 3410 bbr->r_ctl.rc_num_split_allocs--; 3411 } 3412 if (rsm->r_is_smallmap) 3413 bbr->r_ctl.rc_num_small_maps_alloced--; 3414 if (bbr->r_ctl.rc_tlp_send == rsm) 3415 bbr->r_ctl.rc_tlp_send = NULL; 3416 if (bbr->r_ctl.rc_resend == rsm) { 3417 bbr->r_ctl.rc_resend = NULL; 3418 } 3419 if (bbr->r_ctl.rc_next == rsm) 3420 bbr->r_ctl.rc_next = NULL; 3421 if (bbr->r_ctl.rc_sacklast == rsm) 3422 bbr->r_ctl.rc_sacklast = NULL; 3423 if (bbr->r_ctl.rc_free_cnt < bbr_min_req_free) { 3424 memset(rsm, 0, sizeof(struct bbr_sendmap)); 3425 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next); 3426 rsm->r_limit_type = 0; 3427 bbr->r_ctl.rc_free_cnt++; 3428 return; 3429 } 3430 bbr->r_ctl.rc_num_maps_alloced--; 3431 uma_zfree(bbr_zone, rsm); 3432 } 3433 3434 /* 3435 * Returns the BDP. 3436 */ 3437 static uint64_t 3438 bbr_get_bw_delay_prod(uint64_t rtt, uint64_t bw) { 3439 /* 3440 * Calculate the bytes in flight needed given the bw (in bytes per 3441 * second) and the specifyed rtt in useconds. We need to put out the 3442 * returned value per RTT to match that rate. Gain will normaly 3443 * raise it up from there. 3444 * 3445 * This should not overflow as long as the bandwidth is below 1 3446 * TByte per second (bw < 10**12 = 2**40) and the rtt is smaller 3447 * than 1000 seconds (rtt < 10**3 * 10**6 = 10**9 = 2**30). 3448 */ 3449 uint64_t usec_per_sec; 3450 3451 usec_per_sec = USECS_IN_SECOND; 3452 return ((rtt * bw) / usec_per_sec); 3453 } 3454 3455 /* 3456 * Return the initial cwnd. 3457 */ 3458 static uint32_t 3459 bbr_initial_cwnd(struct tcp_bbr *bbr, struct tcpcb *tp) 3460 { 3461 uint32_t i_cwnd; 3462 3463 if (bbr->rc_init_win) { 3464 i_cwnd = bbr->rc_init_win * tp->t_maxseg; 3465 } else if (V_tcp_initcwnd_segments) 3466 i_cwnd = min((V_tcp_initcwnd_segments * tp->t_maxseg), 3467 max(2 * tp->t_maxseg, 14600)); 3468 else if (V_tcp_do_rfc3390) 3469 i_cwnd = min(4 * tp->t_maxseg, 3470 max(2 * tp->t_maxseg, 4380)); 3471 else { 3472 /* Per RFC5681 Section 3.1 */ 3473 if (tp->t_maxseg > 2190) 3474 i_cwnd = 2 * tp->t_maxseg; 3475 else if (tp->t_maxseg > 1095) 3476 i_cwnd = 3 * tp->t_maxseg; 3477 else 3478 i_cwnd = 4 * tp->t_maxseg; 3479 } 3480 return (i_cwnd); 3481 } 3482 3483 /* 3484 * Given a specified gain, return the target 3485 * cwnd based on that gain. 3486 */ 3487 static uint32_t 3488 bbr_get_raw_target_cwnd(struct tcp_bbr *bbr, uint32_t gain, uint64_t bw) 3489 { 3490 uint64_t bdp, rtt; 3491 uint32_t cwnd; 3492 3493 if ((get_filter_value_small(&bbr->r_ctl.rc_rttprop) == 0xffffffff) || 3494 (bbr_get_full_bw(bbr) == 0)) { 3495 /* No measurements yet */ 3496 return (bbr_initial_cwnd(bbr, bbr->rc_tp)); 3497 } 3498 /* 3499 * Get bytes per RTT needed (rttProp is normally in 3500 * bbr_cwndtarget_rtt_touse) 3501 */ 3502 rtt = bbr_get_rtt(bbr, bbr_cwndtarget_rtt_touse); 3503 /* Get the bdp from the two values */ 3504 bdp = bbr_get_bw_delay_prod(rtt, bw); 3505 /* Now apply the gain */ 3506 cwnd = (uint32_t)(((bdp * ((uint64_t)gain)) + (uint64_t)(BBR_UNIT - 1)) / ((uint64_t)BBR_UNIT)); 3507 3508 return (cwnd); 3509 } 3510 3511 static uint32_t 3512 bbr_get_target_cwnd(struct tcp_bbr *bbr, uint64_t bw, uint32_t gain) 3513 { 3514 uint32_t cwnd, mss; 3515 3516 mss = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options), bbr->r_ctl.rc_pace_max_segs); 3517 /* Get the base cwnd with gain rounded to a mss */ 3518 cwnd = roundup(bbr_get_raw_target_cwnd(bbr, bw, gain), mss); 3519 /* 3520 * Add in N (2 default since we do not have a 3521 * fq layer to trap packets in) quanta's per the I-D 3522 * section 4.2.3.2 quanta adjust. 3523 */ 3524 cwnd += (bbr_quanta * bbr->r_ctl.rc_pace_max_segs); 3525 if (bbr->rc_use_google) { 3526 if((bbr->rc_bbr_state == BBR_STATE_PROBE_BW) && 3527 (bbr_state_val(bbr) == BBR_SUB_GAIN)) { 3528 /* 3529 * The linux implementation adds 3530 * an extra 2 x mss in gain cycle which 3531 * is documented no-where except in the code. 3532 * so we add more for Neal undocumented feature 3533 */ 3534 cwnd += 2 * mss; 3535 } 3536 if ((cwnd / mss) & 0x1) { 3537 /* Round up for odd num mss */ 3538 cwnd += mss; 3539 } 3540 } 3541 /* Are we below the min cwnd? */ 3542 if (cwnd < get_min_cwnd(bbr)) 3543 return (get_min_cwnd(bbr)); 3544 return (cwnd); 3545 } 3546 3547 static uint16_t 3548 bbr_gain_adjust(struct tcp_bbr *bbr, uint16_t gain) 3549 { 3550 if (gain < 1) 3551 gain = 1; 3552 return (gain); 3553 } 3554 3555 static uint32_t 3556 bbr_get_header_oh(struct tcp_bbr *bbr) 3557 { 3558 int seg_oh; 3559 3560 seg_oh = 0; 3561 if (bbr->r_ctl.rc_inc_tcp_oh) { 3562 /* Do we include TCP overhead? */ 3563 seg_oh = (bbr->rc_last_options + sizeof(struct tcphdr)); 3564 } 3565 if (bbr->r_ctl.rc_inc_ip_oh) { 3566 /* Do we include IP overhead? */ 3567 #ifdef INET6 3568 if (bbr->r_is_v6) 3569 seg_oh += sizeof(struct ip6_hdr); 3570 else 3571 #endif 3572 #ifdef INET 3573 seg_oh += sizeof(struct ip); 3574 #endif 3575 } 3576 if (bbr->r_ctl.rc_inc_enet_oh) { 3577 /* Do we include the ethernet overhead? */ 3578 seg_oh += sizeof(struct ether_header); 3579 } 3580 return(seg_oh); 3581 } 3582 3583 3584 static uint32_t 3585 bbr_get_pacing_length(struct tcp_bbr *bbr, uint16_t gain, uint32_t useconds_time, uint64_t bw) 3586 { 3587 uint64_t divor, res, tim; 3588 3589 if (useconds_time == 0) 3590 return (0); 3591 gain = bbr_gain_adjust(bbr, gain); 3592 divor = (uint64_t)USECS_IN_SECOND * (uint64_t)BBR_UNIT; 3593 tim = useconds_time; 3594 res = (tim * bw * gain) / divor; 3595 if (res == 0) 3596 res = 1; 3597 return ((uint32_t)res); 3598 } 3599 3600 /* 3601 * Given a gain and a length return the delay in useconds that 3602 * should be used to evenly space out packets 3603 * on the connection (based on the gain factor). 3604 */ 3605 static uint32_t 3606 bbr_get_pacing_delay(struct tcp_bbr *bbr, uint16_t gain, int32_t len, uint32_t cts, int nolog) 3607 { 3608 uint64_t bw, lentim, res; 3609 uint32_t usecs, srtt, over = 0; 3610 uint32_t seg_oh, num_segs, maxseg; 3611 3612 if (len == 0) 3613 return (0); 3614 3615 maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options; 3616 num_segs = (len + maxseg - 1) / maxseg; 3617 if (bbr->rc_use_google == 0) { 3618 seg_oh = bbr_get_header_oh(bbr); 3619 len += (num_segs * seg_oh); 3620 } 3621 gain = bbr_gain_adjust(bbr, gain); 3622 bw = bbr_get_bw(bbr); 3623 if (bbr->rc_use_google) { 3624 uint64_t cbw; 3625 3626 /* 3627 * Reduce the b/w by the google discount 3628 * factor 10 = 1%. 3629 */ 3630 cbw = bw * (uint64_t)(1000 - bbr->r_ctl.bbr_google_discount); 3631 cbw /= (uint64_t)1000; 3632 /* We don't apply a discount if it results in 0 */ 3633 if (cbw > 0) 3634 bw = cbw; 3635 } 3636 lentim = ((uint64_t)len * 3637 (uint64_t)USECS_IN_SECOND * 3638 (uint64_t)BBR_UNIT); 3639 res = lentim / ((uint64_t)gain * bw); 3640 if (res == 0) 3641 res = 1; 3642 usecs = (uint32_t)res; 3643 srtt = bbr_get_rtt(bbr, BBR_SRTT); 3644 if (bbr_hptsi_max_mul && bbr_hptsi_max_div && 3645 (bbr->rc_use_google == 0) && 3646 (usecs > ((srtt * bbr_hptsi_max_mul) / bbr_hptsi_max_div))) { 3647 /* 3648 * We cannot let the delay be more than 1/2 the srtt time. 3649 * Otherwise we cannot pace out or send properly. 3650 */ 3651 over = usecs = (srtt * bbr_hptsi_max_mul) / bbr_hptsi_max_div; 3652 BBR_STAT_INC(bbr_hpts_min_time); 3653 } 3654 if (!nolog) 3655 bbr_log_pacing_delay_calc(bbr, gain, len, cts, usecs, bw, over, 1); 3656 return (usecs); 3657 } 3658 3659 static void 3660 bbr_ack_received(struct tcpcb *tp, struct tcp_bbr *bbr, struct tcphdr *th, uint32_t bytes_this_ack, 3661 uint32_t sack_changed, uint32_t prev_acked, int32_t line, uint32_t losses) 3662 { 3663 INP_WLOCK_ASSERT(tp->t_inpcb); 3664 uint64_t bw; 3665 uint32_t cwnd, target_cwnd, saved_bytes, maxseg; 3666 int32_t meth; 3667 3668 #ifdef STATS 3669 if ((tp->t_flags & TF_GPUTINPROG) && 3670 SEQ_GEQ(th->th_ack, tp->gput_ack)) { 3671 /* 3672 * Strech acks and compressed acks will cause this to 3673 * oscillate but we are doing it the same way as the main 3674 * stack so it will be compariable (though possibly not 3675 * ideal). 3676 */ 3677 int32_t cgput; 3678 int64_t gput, time_stamp; 3679 3680 gput = (int64_t) (th->th_ack - tp->gput_seq) * 8; 3681 time_stamp = max(1, ((bbr->r_ctl.rc_rcvtime - tp->gput_ts) / 1000)); 3682 cgput = gput / time_stamp; 3683 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, 3684 cgput); 3685 if (tp->t_stats_gput_prev > 0) 3686 stats_voi_update_abs_s32(tp->t_stats, 3687 VOI_TCP_GPUT_ND, 3688 ((gput - tp->t_stats_gput_prev) * 100) / 3689 tp->t_stats_gput_prev); 3690 tp->t_flags &= ~TF_GPUTINPROG; 3691 tp->t_stats_gput_prev = cgput; 3692 } 3693 #endif 3694 if ((bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) && 3695 ((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google)) { 3696 /* We don't change anything in probe-rtt */ 3697 return; 3698 } 3699 maxseg = tp->t_maxseg - bbr->rc_last_options; 3700 saved_bytes = bytes_this_ack; 3701 bytes_this_ack += sack_changed; 3702 if (bytes_this_ack > prev_acked) { 3703 bytes_this_ack -= prev_acked; 3704 /* 3705 * A byte ack'd gives us a full mss 3706 * to be like linux i.e. they count packets. 3707 */ 3708 if ((bytes_this_ack < maxseg) && bbr->rc_use_google) 3709 bytes_this_ack = maxseg; 3710 } else { 3711 /* Unlikely */ 3712 bytes_this_ack = 0; 3713 } 3714 cwnd = tp->snd_cwnd; 3715 bw = get_filter_value(&bbr->r_ctl.rc_delrate); 3716 if (bw) 3717 target_cwnd = bbr_get_target_cwnd(bbr, 3718 bw, 3719 (uint32_t)bbr->r_ctl.rc_bbr_cwnd_gain); 3720 else 3721 target_cwnd = bbr_initial_cwnd(bbr, bbr->rc_tp); 3722 if (IN_RECOVERY(tp->t_flags) && 3723 (bbr->bbr_prev_in_rec == 0)) { 3724 /* 3725 * We are entering recovery and 3726 * thus packet conservation. 3727 */ 3728 bbr->pkt_conservation = 1; 3729 bbr->r_ctl.rc_recovery_start = bbr->r_ctl.rc_rcvtime; 3730 cwnd = ctf_flight_size(tp, 3731 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) + 3732 bytes_this_ack; 3733 } 3734 if (IN_RECOVERY(tp->t_flags)) { 3735 uint32_t flight; 3736 3737 bbr->bbr_prev_in_rec = 1; 3738 if (cwnd > losses) { 3739 cwnd -= losses; 3740 if (cwnd < maxseg) 3741 cwnd = maxseg; 3742 } else 3743 cwnd = maxseg; 3744 flight = ctf_flight_size(tp, 3745 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 3746 bbr_log_type_cwndupd(bbr, flight, 0, 3747 losses, 10, 0, 0, line); 3748 if (bbr->pkt_conservation) { 3749 uint32_t time_in; 3750 3751 if (TSTMP_GEQ(bbr->r_ctl.rc_rcvtime, bbr->r_ctl.rc_recovery_start)) 3752 time_in = bbr->r_ctl.rc_rcvtime - bbr->r_ctl.rc_recovery_start; 3753 else 3754 time_in = 0; 3755 3756 if (time_in >= bbr_get_rtt(bbr, BBR_RTT_PROP)) { 3757 /* Clear packet conservation after an rttProp */ 3758 bbr->pkt_conservation = 0; 3759 } else { 3760 if ((flight + bytes_this_ack) > cwnd) 3761 cwnd = flight + bytes_this_ack; 3762 if (cwnd < get_min_cwnd(bbr)) 3763 cwnd = get_min_cwnd(bbr); 3764 tp->snd_cwnd = cwnd; 3765 bbr_log_type_cwndupd(bbr, saved_bytes, sack_changed, 3766 prev_acked, 1, target_cwnd, th->th_ack, line); 3767 return; 3768 } 3769 } 3770 } else 3771 bbr->bbr_prev_in_rec = 0; 3772 if ((bbr->rc_use_google == 0) && bbr->r_ctl.restrict_growth) { 3773 bbr->r_ctl.restrict_growth--; 3774 if (bytes_this_ack > maxseg) 3775 bytes_this_ack = maxseg; 3776 } 3777 if (bbr->rc_filled_pipe) { 3778 /* 3779 * Here we have exited startup and filled the pipe. We will 3780 * thus allow the cwnd to shrink to the target. We hit here 3781 * mostly. 3782 */ 3783 uint32_t s_cwnd; 3784 3785 meth = 2; 3786 s_cwnd = min((cwnd + bytes_this_ack), target_cwnd); 3787 if (s_cwnd > cwnd) 3788 cwnd = s_cwnd; 3789 else if (bbr_cwnd_may_shrink || bbr->rc_use_google || bbr->rc_no_pacing) 3790 cwnd = s_cwnd; 3791 } else { 3792 /* 3793 * Here we are still in startup, we increase cwnd by what 3794 * has been acked. 3795 */ 3796 if ((cwnd < target_cwnd) || 3797 (bbr->rc_past_init_win == 0)) { 3798 meth = 3; 3799 cwnd += bytes_this_ack; 3800 } else { 3801 /* 3802 * Method 4 means we are at target so no gain in 3803 * startup and past the initial window. 3804 */ 3805 meth = 4; 3806 } 3807 } 3808 tp->snd_cwnd = max(cwnd, get_min_cwnd(bbr)); 3809 bbr_log_type_cwndupd(bbr, saved_bytes, sack_changed, prev_acked, meth, target_cwnd, th->th_ack, line); 3810 } 3811 3812 static void 3813 tcp_bbr_partialack(struct tcpcb *tp) 3814 { 3815 struct tcp_bbr *bbr; 3816 3817 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 3818 INP_WLOCK_ASSERT(tp->t_inpcb); 3819 if (ctf_flight_size(tp, 3820 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <= 3821 tp->snd_cwnd) { 3822 bbr->r_wanted_output = 1; 3823 } 3824 } 3825 3826 static void 3827 bbr_post_recovery(struct tcpcb *tp) 3828 { 3829 struct tcp_bbr *bbr; 3830 uint32_t flight; 3831 3832 INP_WLOCK_ASSERT(tp->t_inpcb); 3833 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 3834 /* 3835 * Here we just exit recovery. 3836 */ 3837 EXIT_RECOVERY(tp->t_flags); 3838 /* Lock in our b/w reduction for the specified number of pkt-epochs */ 3839 bbr->r_recovery_bw = 0; 3840 tp->snd_recover = tp->snd_una; 3841 tcp_bbr_tso_size_check(bbr, bbr->r_ctl.rc_rcvtime); 3842 bbr->pkt_conservation = 0; 3843 if (bbr->rc_use_google == 0) { 3844 /* 3845 * For non-google mode lets 3846 * go ahead and make sure we clear 3847 * the recovery state so if we 3848 * bounce back in to recovery we 3849 * will do PC. 3850 */ 3851 bbr->bbr_prev_in_rec = 0; 3852 } 3853 bbr_log_type_exit_rec(bbr); 3854 if (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) { 3855 tp->snd_cwnd = max(tp->snd_cwnd, bbr->r_ctl.rc_cwnd_on_ent); 3856 bbr_log_type_cwndupd(bbr, 0, 0, 0, 15, 0, 0, __LINE__); 3857 } else { 3858 /* For probe-rtt case lets fix up its saved_cwnd */ 3859 if (bbr->r_ctl.rc_saved_cwnd < bbr->r_ctl.rc_cwnd_on_ent) { 3860 bbr->r_ctl.rc_saved_cwnd = bbr->r_ctl.rc_cwnd_on_ent; 3861 bbr_log_type_cwndupd(bbr, 0, 0, 0, 16, 0, 0, __LINE__); 3862 } 3863 } 3864 flight = ctf_flight_size(tp, 3865 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 3866 if ((bbr->rc_use_google == 0) && 3867 bbr_do_red) { 3868 uint64_t val, lr2use; 3869 uint32_t maxseg, newcwnd, acks_inflight, ratio, cwnd; 3870 uint32_t *cwnd_p; 3871 3872 if (bbr_get_rtt(bbr, BBR_SRTT)) { 3873 val = ((uint64_t)bbr_get_rtt(bbr, BBR_RTT_PROP) * (uint64_t)1000); 3874 val /= bbr_get_rtt(bbr, BBR_SRTT); 3875 ratio = (uint32_t)val; 3876 } else 3877 ratio = 1000; 3878 3879 bbr_log_type_cwndupd(bbr, bbr_red_mul, bbr_red_div, 3880 bbr->r_ctl.recovery_lr, 21, 3881 ratio, 3882 bbr->r_ctl.rc_red_cwnd_pe, 3883 __LINE__); 3884 if ((ratio < bbr_do_red) || (bbr_do_red == 0)) 3885 goto done; 3886 if (((bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) && 3887 bbr_prtt_slam_cwnd) || 3888 (bbr_sub_drain_slam_cwnd && 3889 (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) && 3890 bbr->rc_hit_state_1 && 3891 (bbr_state_val(bbr) == BBR_SUB_DRAIN)) || 3892 ((bbr->rc_bbr_state == BBR_STATE_DRAIN) && 3893 bbr_slam_cwnd_in_main_drain)) { 3894 /* 3895 * Here we must poke at the saved cwnd 3896 * as well as the cwnd. 3897 */ 3898 cwnd = bbr->r_ctl.rc_saved_cwnd; 3899 cwnd_p = &bbr->r_ctl.rc_saved_cwnd; 3900 } else { 3901 cwnd = tp->snd_cwnd; 3902 cwnd_p = &tp->snd_cwnd; 3903 } 3904 maxseg = tp->t_maxseg - bbr->rc_last_options; 3905 /* Add the overall lr with the recovery lr */ 3906 if (bbr->r_ctl.rc_lost == 0) 3907 lr2use = 0; 3908 else if (bbr->r_ctl.rc_delivered == 0) 3909 lr2use = 1000; 3910 else { 3911 lr2use = bbr->r_ctl.rc_lost * 1000; 3912 lr2use /= bbr->r_ctl.rc_delivered; 3913 } 3914 lr2use += bbr->r_ctl.recovery_lr; 3915 acks_inflight = (flight / (maxseg * 2)); 3916 if (bbr_red_scale) { 3917 lr2use *= bbr_get_rtt(bbr, BBR_SRTT); 3918 lr2use /= bbr_red_scale; 3919 if ((bbr_red_growth_restrict) && 3920 ((bbr_get_rtt(bbr, BBR_SRTT)/bbr_red_scale) > 1)) 3921 bbr->r_ctl.restrict_growth += acks_inflight; 3922 } 3923 if (lr2use) { 3924 val = (uint64_t)cwnd * lr2use; 3925 val /= 1000; 3926 if (cwnd > val) 3927 newcwnd = roundup((cwnd - val), maxseg); 3928 else 3929 newcwnd = maxseg; 3930 } else { 3931 val = (uint64_t)cwnd * (uint64_t)bbr_red_mul; 3932 val /= (uint64_t)bbr_red_div; 3933 newcwnd = roundup((uint32_t)val, maxseg); 3934 } 3935 /* with standard delayed acks how many acks can I expect? */ 3936 if (bbr_drop_limit == 0) { 3937 /* 3938 * Anticpate how much we will 3939 * raise the cwnd based on the acks. 3940 */ 3941 if ((newcwnd + (acks_inflight * maxseg)) < get_min_cwnd(bbr)) { 3942 /* We do enforce the min (with the acks) */ 3943 newcwnd = (get_min_cwnd(bbr) - acks_inflight); 3944 } 3945 } else { 3946 /* 3947 * A strict drop limit of N is is inplace 3948 */ 3949 if (newcwnd < (bbr_drop_limit * maxseg)) { 3950 newcwnd = bbr_drop_limit * maxseg; 3951 } 3952 } 3953 /* For the next N acks do we restrict the growth */ 3954 *cwnd_p = newcwnd; 3955 if (tp->snd_cwnd > newcwnd) 3956 tp->snd_cwnd = newcwnd; 3957 bbr_log_type_cwndupd(bbr, bbr_red_mul, bbr_red_div, val, 22, 3958 (uint32_t)lr2use, 3959 bbr_get_rtt(bbr, BBR_SRTT), __LINE__); 3960 bbr->r_ctl.rc_red_cwnd_pe = bbr->r_ctl.rc_pkt_epoch; 3961 } 3962 done: 3963 bbr->r_ctl.recovery_lr = 0; 3964 if (flight <= tp->snd_cwnd) { 3965 bbr->r_wanted_output = 1; 3966 } 3967 tcp_bbr_tso_size_check(bbr, bbr->r_ctl.rc_rcvtime); 3968 } 3969 3970 static void 3971 bbr_setup_red_bw(struct tcp_bbr *bbr, uint32_t cts) 3972 { 3973 bbr->r_ctl.red_bw = get_filter_value(&bbr->r_ctl.rc_delrate); 3974 /* Limit the drop in b/w to 1/2 our current filter. */ 3975 if (bbr->r_ctl.red_bw > bbr->r_ctl.rc_bbr_cur_del_rate) 3976 bbr->r_ctl.red_bw = bbr->r_ctl.rc_bbr_cur_del_rate; 3977 if (bbr->r_ctl.red_bw < (get_filter_value(&bbr->r_ctl.rc_delrate) / 2)) 3978 bbr->r_ctl.red_bw = get_filter_value(&bbr->r_ctl.rc_delrate) / 2; 3979 tcp_bbr_tso_size_check(bbr, cts); 3980 } 3981 3982 static void 3983 bbr_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type, struct bbr_sendmap *rsm) 3984 { 3985 struct tcp_bbr *bbr; 3986 3987 INP_WLOCK_ASSERT(tp->t_inpcb); 3988 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 3989 switch (type) { 3990 case CC_NDUPACK: 3991 if (!IN_RECOVERY(tp->t_flags)) { 3992 tp->snd_recover = tp->snd_max; 3993 /* Start a new epoch */ 3994 bbr_set_pktepoch(bbr, bbr->r_ctl.rc_rcvtime, __LINE__); 3995 if (bbr->rc_lt_is_sampling || bbr->rc_lt_use_bw) { 3996 /* 3997 * Move forward the lt epoch 3998 * so it won't count the truncated 3999 * epoch. 4000 */ 4001 bbr->r_ctl.rc_lt_epoch++; 4002 } 4003 if (bbr->rc_bbr_state == BBR_STATE_STARTUP) { 4004 /* 4005 * Just like the policer detection code 4006 * if we are in startup we must push 4007 * forward the last startup epoch 4008 * to hide the truncated PE. 4009 */ 4010 bbr->r_ctl.rc_bbr_last_startup_epoch++; 4011 } 4012 bbr->r_ctl.rc_cwnd_on_ent = tp->snd_cwnd; 4013 ENTER_RECOVERY(tp->t_flags); 4014 bbr->rc_tlp_rtx_out = 0; 4015 bbr->r_ctl.recovery_lr = bbr->r_ctl.rc_pkt_epoch_loss_rate; 4016 tcp_bbr_tso_size_check(bbr, bbr->r_ctl.rc_rcvtime); 4017 if (bbr->rc_inp->inp_in_hpts && 4018 ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) == 0)) { 4019 /* 4020 * When we enter recovery, we need to restart 4021 * any timers. This may mean we gain an agg 4022 * early, which will be made up for at the last 4023 * rxt out. 4024 */ 4025 bbr->rc_timer_first = 1; 4026 bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); 4027 } 4028 /* 4029 * Calculate a new cwnd based on to the current 4030 * delivery rate with no gain. We get the bdp 4031 * without gaining it up like we normally would and 4032 * we use the last cur_del_rate. 4033 */ 4034 if ((bbr->rc_use_google == 0) && 4035 (bbr->r_ctl.bbr_rttprobe_gain_val || 4036 (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT))) { 4037 tp->snd_cwnd = ctf_flight_size(tp, 4038 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) + 4039 (tp->t_maxseg - bbr->rc_last_options); 4040 if (tp->snd_cwnd < get_min_cwnd(bbr)) { 4041 /* We always gate to min cwnd */ 4042 tp->snd_cwnd = get_min_cwnd(bbr); 4043 } 4044 bbr_log_type_cwndupd(bbr, 0, 0, 0, 14, 0, 0, __LINE__); 4045 } 4046 bbr_log_type_enter_rec(bbr, rsm->r_start); 4047 } 4048 break; 4049 case CC_RTO_ERR: 4050 KMOD_TCPSTAT_INC(tcps_sndrexmitbad); 4051 /* RTO was unnecessary, so reset everything. */ 4052 bbr_reset_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime); 4053 if (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) { 4054 tp->snd_cwnd = tp->snd_cwnd_prev; 4055 tp->snd_ssthresh = tp->snd_ssthresh_prev; 4056 tp->snd_recover = tp->snd_recover_prev; 4057 tp->snd_cwnd = max(tp->snd_cwnd, bbr->r_ctl.rc_cwnd_on_ent); 4058 bbr_log_type_cwndupd(bbr, 0, 0, 0, 13, 0, 0, __LINE__); 4059 } 4060 tp->t_badrxtwin = 0; 4061 break; 4062 } 4063 } 4064 4065 /* 4066 * Indicate whether this ack should be delayed. We can delay the ack if 4067 * following conditions are met: 4068 * - There is no delayed ack timer in progress. 4069 * - Our last ack wasn't a 0-sized window. We never want to delay 4070 * the ack that opens up a 0-sized window. 4071 * - LRO wasn't used for this segment. We make sure by checking that the 4072 * segment size is not larger than the MSS. 4073 * - Delayed acks are enabled or this is a half-synchronized T/TCP 4074 * connection. 4075 * - The data being acked is less than a full segment (a stretch ack 4076 * of more than a segment we should ack. 4077 * - nsegs is 1 (if its more than that we received more than 1 ack). 4078 */ 4079 #define DELAY_ACK(tp, bbr, nsegs) \ 4080 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \ 4081 ((tp->t_flags & TF_DELACK) == 0) && \ 4082 ((bbr->bbr_segs_rcvd + nsegs) < tp->t_delayed_ack) && \ 4083 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN))) 4084 4085 /* 4086 * Return the lowest RSM in the map of 4087 * packets still in flight that is not acked. 4088 * This should normally find on the first one 4089 * since we remove packets from the send 4090 * map after they are marked ACKED. 4091 */ 4092 static struct bbr_sendmap * 4093 bbr_find_lowest_rsm(struct tcp_bbr *bbr) 4094 { 4095 struct bbr_sendmap *rsm; 4096 4097 /* 4098 * Walk the time-order transmitted list looking for an rsm that is 4099 * not acked. This will be the one that was sent the longest time 4100 * ago that is still outstanding. 4101 */ 4102 TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_tmap, r_tnext) { 4103 if (rsm->r_flags & BBR_ACKED) { 4104 continue; 4105 } 4106 goto finish; 4107 } 4108 finish: 4109 return (rsm); 4110 } 4111 4112 static struct bbr_sendmap * 4113 bbr_find_high_nonack(struct tcp_bbr *bbr, struct bbr_sendmap *rsm) 4114 { 4115 struct bbr_sendmap *prsm; 4116 4117 /* 4118 * Walk the sequence order list backward until we hit and arrive at 4119 * the highest seq not acked. In theory when this is called it 4120 * should be the last segment (which it was not). 4121 */ 4122 prsm = rsm; 4123 TAILQ_FOREACH_REVERSE_FROM(prsm, &bbr->r_ctl.rc_map, bbr_head, r_next) { 4124 if (prsm->r_flags & (BBR_ACKED | BBR_HAS_FIN)) { 4125 continue; 4126 } 4127 return (prsm); 4128 } 4129 return (NULL); 4130 } 4131 4132 /* 4133 * Returns to the caller the number of microseconds that 4134 * the packet can be outstanding before we think we 4135 * should have had an ack returned. 4136 */ 4137 static uint32_t 4138 bbr_calc_thresh_rack(struct tcp_bbr *bbr, uint32_t srtt, uint32_t cts, struct bbr_sendmap *rsm) 4139 { 4140 /* 4141 * lro is the flag we use to determine if we have seen reordering. 4142 * If it gets set we have seen reordering. The reorder logic either 4143 * works in one of two ways: 4144 * 4145 * If reorder-fade is configured, then we track the last time we saw 4146 * re-ordering occur. If we reach the point where enough time as 4147 * passed we no longer consider reordering has occuring. 4148 * 4149 * Or if reorder-face is 0, then once we see reordering we consider 4150 * the connection to alway be subject to reordering and just set lro 4151 * to 1. 4152 * 4153 * In the end if lro is non-zero we add the extra time for 4154 * reordering in. 4155 */ 4156 int32_t lro; 4157 uint32_t thresh, t_rxtcur; 4158 4159 if (srtt == 0) 4160 srtt = 1; 4161 if (bbr->r_ctl.rc_reorder_ts) { 4162 if (bbr->r_ctl.rc_reorder_fade) { 4163 if (SEQ_GEQ(cts, bbr->r_ctl.rc_reorder_ts)) { 4164 lro = cts - bbr->r_ctl.rc_reorder_ts; 4165 if (lro == 0) { 4166 /* 4167 * No time as passed since the last 4168 * reorder, mark it as reordering. 4169 */ 4170 lro = 1; 4171 } 4172 } else { 4173 /* Negative time? */ 4174 lro = 0; 4175 } 4176 if (lro > bbr->r_ctl.rc_reorder_fade) { 4177 /* Turn off reordering seen too */ 4178 bbr->r_ctl.rc_reorder_ts = 0; 4179 lro = 0; 4180 } 4181 } else { 4182 /* Reodering does not fade */ 4183 lro = 1; 4184 } 4185 } else { 4186 lro = 0; 4187 } 4188 thresh = srtt + bbr->r_ctl.rc_pkt_delay; 4189 if (lro) { 4190 /* It must be set, if not you get 1/4 rtt */ 4191 if (bbr->r_ctl.rc_reorder_shift) 4192 thresh += (srtt >> bbr->r_ctl.rc_reorder_shift); 4193 else 4194 thresh += (srtt >> 2); 4195 } else { 4196 thresh += 1000; 4197 } 4198 /* We don't let the rack timeout be above a RTO */ 4199 if ((bbr->rc_tp)->t_srtt == 0) 4200 t_rxtcur = BBR_INITIAL_RTO; 4201 else 4202 t_rxtcur = TICKS_2_USEC(bbr->rc_tp->t_rxtcur); 4203 if (thresh > t_rxtcur) { 4204 thresh = t_rxtcur; 4205 } 4206 /* And we don't want it above the RTO max either */ 4207 if (thresh > (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND)) { 4208 thresh = (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND); 4209 } 4210 bbr_log_thresh_choice(bbr, cts, thresh, lro, srtt, rsm, BBR_TO_FRM_RACK); 4211 return (thresh); 4212 } 4213 4214 /* 4215 * Return to the caller the amount of time in mico-seconds 4216 * that should be used for the TLP timer from the last 4217 * send time of this packet. 4218 */ 4219 static uint32_t 4220 bbr_calc_thresh_tlp(struct tcpcb *tp, struct tcp_bbr *bbr, 4221 struct bbr_sendmap *rsm, uint32_t srtt, 4222 uint32_t cts) 4223 { 4224 uint32_t thresh, len, maxseg, t_rxtcur; 4225 struct bbr_sendmap *prsm; 4226 4227 if (srtt == 0) 4228 srtt = 1; 4229 if (bbr->rc_tlp_threshold) 4230 thresh = srtt + (srtt / bbr->rc_tlp_threshold); 4231 else 4232 thresh = (srtt * 2); 4233 maxseg = tp->t_maxseg - bbr->rc_last_options; 4234 /* Get the previous sent packet, if any */ 4235 len = rsm->r_end - rsm->r_start; 4236 4237 /* 2.1 behavior */ 4238 prsm = TAILQ_PREV(rsm, bbr_head, r_tnext); 4239 if (prsm && (len <= maxseg)) { 4240 /* 4241 * Two packets outstanding, thresh should be (2*srtt) + 4242 * possible inter-packet delay (if any). 4243 */ 4244 uint32_t inter_gap = 0; 4245 int idx, nidx; 4246 4247 idx = rsm->r_rtr_cnt - 1; 4248 nidx = prsm->r_rtr_cnt - 1; 4249 if (TSTMP_GEQ(rsm->r_tim_lastsent[nidx], prsm->r_tim_lastsent[idx])) { 4250 /* Yes it was sent later (or at the same time) */ 4251 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; 4252 } 4253 thresh += inter_gap; 4254 } else if (len <= maxseg) { 4255 /* 4256 * Possibly compensate for delayed-ack. 4257 */ 4258 uint32_t alt_thresh; 4259 4260 alt_thresh = srtt + (srtt / 2) + bbr_delayed_ack_time; 4261 if (alt_thresh > thresh) 4262 thresh = alt_thresh; 4263 } 4264 /* Not above the current RTO */ 4265 if (tp->t_srtt == 0) 4266 t_rxtcur = BBR_INITIAL_RTO; 4267 else 4268 t_rxtcur = TICKS_2_USEC(tp->t_rxtcur); 4269 4270 bbr_log_thresh_choice(bbr, cts, thresh, t_rxtcur, srtt, rsm, BBR_TO_FRM_TLP); 4271 /* Not above an RTO */ 4272 if (thresh > t_rxtcur) { 4273 thresh = t_rxtcur; 4274 } 4275 /* Not above a RTO max */ 4276 if (thresh > (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND)) { 4277 thresh = (((uint32_t)bbr->rc_max_rto_sec) * USECS_IN_SECOND); 4278 } 4279 /* And now apply the user TLP min */ 4280 if (thresh < bbr_tlp_min) { 4281 thresh = bbr_tlp_min; 4282 } 4283 return (thresh); 4284 } 4285 4286 /* 4287 * Return one of three RTTs to use (in microseconds). 4288 */ 4289 static __inline uint32_t 4290 bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type) 4291 { 4292 uint32_t f_rtt; 4293 uint32_t srtt; 4294 4295 f_rtt = get_filter_value_small(&bbr->r_ctl.rc_rttprop); 4296 if (get_filter_value_small(&bbr->r_ctl.rc_rttprop) == 0xffffffff) { 4297 /* We have no rtt at all */ 4298 if (bbr->rc_tp->t_srtt == 0) 4299 f_rtt = BBR_INITIAL_RTO; 4300 else 4301 f_rtt = (TICKS_2_USEC(bbr->rc_tp->t_srtt) >> TCP_RTT_SHIFT); 4302 /* 4303 * Since we don't know how good the rtt is apply a 4304 * delayed-ack min 4305 */ 4306 if (f_rtt < bbr_delayed_ack_time) { 4307 f_rtt = bbr_delayed_ack_time; 4308 } 4309 } 4310 /* Take the filter version or last measured pkt-rtt */ 4311 if (rtt_type == BBR_RTT_PROP) { 4312 srtt = f_rtt; 4313 } else if (rtt_type == BBR_RTT_PKTRTT) { 4314 if (bbr->r_ctl.rc_pkt_epoch_rtt) { 4315 srtt = bbr->r_ctl.rc_pkt_epoch_rtt; 4316 } else { 4317 /* No pkt rtt yet */ 4318 srtt = f_rtt; 4319 } 4320 } else if (rtt_type == BBR_RTT_RACK) { 4321 srtt = bbr->r_ctl.rc_last_rtt; 4322 /* We need to add in any internal delay for our timer */ 4323 if (bbr->rc_ack_was_delayed) 4324 srtt += bbr->r_ctl.rc_ack_hdwr_delay; 4325 } else if (rtt_type == BBR_SRTT) { 4326 srtt = (TICKS_2_USEC(bbr->rc_tp->t_srtt) >> TCP_RTT_SHIFT); 4327 } else { 4328 /* TSNH */ 4329 srtt = f_rtt; 4330 #ifdef BBR_INVARIANTS 4331 panic("Unknown rtt request type %d", rtt_type); 4332 #endif 4333 } 4334 return (srtt); 4335 } 4336 4337 static int 4338 bbr_is_lost(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t cts) 4339 { 4340 uint32_t thresh; 4341 4342 4343 thresh = bbr_calc_thresh_rack(bbr, bbr_get_rtt(bbr, BBR_RTT_RACK), 4344 cts, rsm); 4345 if ((cts - rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) >= thresh) { 4346 /* It is lost (past time) */ 4347 return (1); 4348 } 4349 return (0); 4350 } 4351 4352 /* 4353 * Return a sendmap if we need to retransmit something. 4354 */ 4355 static struct bbr_sendmap * 4356 bbr_check_recovery_mode(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 4357 { 4358 /* 4359 * Check to see that we don't need to fall into recovery. We will 4360 * need to do so if our oldest transmit is past the time we should 4361 * have had an ack. 4362 */ 4363 4364 struct bbr_sendmap *rsm; 4365 int32_t idx; 4366 4367 if (TAILQ_EMPTY(&bbr->r_ctl.rc_map)) { 4368 /* Nothing outstanding that we know of */ 4369 return (NULL); 4370 } 4371 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap); 4372 if (rsm == NULL) { 4373 /* Nothing in the transmit map */ 4374 return (NULL); 4375 } 4376 if (tp->t_flags & TF_SENTFIN) { 4377 /* Fin restricted, don't find anything once a fin is sent */ 4378 return (NULL); 4379 } 4380 if (rsm->r_flags & BBR_ACKED) { 4381 /* 4382 * Ok the first one is acked (this really should not happen 4383 * since we remove the from the tmap once they are acked) 4384 */ 4385 rsm = bbr_find_lowest_rsm(bbr); 4386 if (rsm == NULL) 4387 return (NULL); 4388 } 4389 idx = rsm->r_rtr_cnt - 1; 4390 if (SEQ_LEQ(cts, rsm->r_tim_lastsent[idx])) { 4391 /* Send timestamp is the same or less? can't be ready */ 4392 return (NULL); 4393 } 4394 /* Get our RTT time */ 4395 if (bbr_is_lost(bbr, rsm, cts) && 4396 ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || 4397 (rsm->r_flags & BBR_SACK_PASSED))) { 4398 if ((rsm->r_flags & BBR_MARKED_LOST) == 0) { 4399 rsm->r_flags |= BBR_MARKED_LOST; 4400 bbr->r_ctl.rc_lost += rsm->r_end - rsm->r_start; 4401 bbr->r_ctl.rc_lost_bytes += rsm->r_end - rsm->r_start; 4402 } 4403 bbr_cong_signal(tp, NULL, CC_NDUPACK, rsm); 4404 #ifdef BBR_INVARIANTS 4405 if ((rsm->r_end - rsm->r_start) == 0) 4406 panic("tp:%p bbr:%p rsm:%p length is 0?", tp, bbr, rsm); 4407 #endif 4408 return (rsm); 4409 } 4410 return (NULL); 4411 } 4412 4413 /* 4414 * RACK Timer, here we simply do logging and house keeping. 4415 * the normal bbr_output_wtime() function will call the 4416 * appropriate thing to check if we need to do a RACK retransmit. 4417 * We return 1, saying don't proceed with bbr_output_wtime only 4418 * when all timers have been stopped (destroyed PCB?). 4419 */ 4420 static int 4421 bbr_timeout_rack(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 4422 { 4423 /* 4424 * This timer simply provides an internal trigger to send out data. 4425 * The check_recovery_mode call will see if there are needed 4426 * retransmissions, if so we will enter fast-recovery. The output 4427 * call may or may not do the same thing depending on sysctl 4428 * settings. 4429 */ 4430 uint32_t lost; 4431 4432 if (bbr->rc_all_timers_stopped) { 4433 return (1); 4434 } 4435 if (TSTMP_LT(cts, bbr->r_ctl.rc_timer_exp)) { 4436 /* Its not time yet */ 4437 return (0); 4438 } 4439 BBR_STAT_INC(bbr_to_tot); 4440 lost = bbr->r_ctl.rc_lost; 4441 if (bbr->r_state && (bbr->r_state != tp->t_state)) 4442 bbr_set_state(tp, bbr, 0); 4443 bbr_log_to_event(bbr, cts, BBR_TO_FRM_RACK); 4444 if (bbr->r_ctl.rc_resend == NULL) { 4445 /* Lets do the check here */ 4446 bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts); 4447 } 4448 if (bbr_policer_call_from_rack_to) 4449 bbr_lt_bw_sampling(bbr, cts, (bbr->r_ctl.rc_lost > lost)); 4450 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; 4451 return (0); 4452 } 4453 4454 static __inline void 4455 bbr_clone_rsm(struct tcp_bbr *bbr, struct bbr_sendmap *nrsm, struct bbr_sendmap *rsm, uint32_t start) 4456 { 4457 int idx; 4458 4459 nrsm->r_start = start; 4460 nrsm->r_end = rsm->r_end; 4461 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; 4462 nrsm->r_flags = rsm->r_flags; 4463 /* We don't transfer forward the SYN flag */ 4464 nrsm->r_flags &= ~BBR_HAS_SYN; 4465 /* We move forward the FIN flag, not that this should happen */ 4466 rsm->r_flags &= ~BBR_HAS_FIN; 4467 nrsm->r_dupack = rsm->r_dupack; 4468 nrsm->r_rtr_bytes = 0; 4469 nrsm->r_is_gain = rsm->r_is_gain; 4470 nrsm->r_is_drain = rsm->r_is_drain; 4471 nrsm->r_delivered = rsm->r_delivered; 4472 nrsm->r_ts_valid = rsm->r_ts_valid; 4473 nrsm->r_del_ack_ts = rsm->r_del_ack_ts; 4474 nrsm->r_del_time = rsm->r_del_time; 4475 nrsm->r_app_limited = rsm->r_app_limited; 4476 nrsm->r_first_sent_time = rsm->r_first_sent_time; 4477 nrsm->r_flight_at_send = rsm->r_flight_at_send; 4478 /* We split a piece the lower section looses any just_ret flag. */ 4479 nrsm->r_bbr_state = rsm->r_bbr_state; 4480 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { 4481 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; 4482 } 4483 rsm->r_end = nrsm->r_start; 4484 idx = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options), bbr->r_ctl.rc_pace_max_segs); 4485 idx /= 8; 4486 /* Check if we got too small */ 4487 if ((rsm->r_is_smallmap == 0) && 4488 ((rsm->r_end - rsm->r_start) <= idx)) { 4489 bbr->r_ctl.rc_num_small_maps_alloced++; 4490 rsm->r_is_smallmap = 1; 4491 } 4492 /* Check the new one as well */ 4493 if ((nrsm->r_end - nrsm->r_start) <= idx) { 4494 bbr->r_ctl.rc_num_small_maps_alloced++; 4495 nrsm->r_is_smallmap = 1; 4496 } 4497 } 4498 4499 static int 4500 bbr_sack_mergable(struct bbr_sendmap *at, 4501 uint32_t start, uint32_t end) 4502 { 4503 /* 4504 * Given a sack block defined by 4505 * start and end, and a current postion 4506 * at. Return 1 if either side of at 4507 * would show that the block is mergable 4508 * to that side. A block to be mergable 4509 * must have overlap with the start/end 4510 * and be in the SACK'd state. 4511 */ 4512 struct bbr_sendmap *l_rsm; 4513 struct bbr_sendmap *r_rsm; 4514 4515 /* first get the either side blocks */ 4516 l_rsm = TAILQ_PREV(at, bbr_head, r_next); 4517 r_rsm = TAILQ_NEXT(at, r_next); 4518 if (l_rsm && (l_rsm->r_flags & BBR_ACKED)) { 4519 /* Potentially mergeable */ 4520 if ((l_rsm->r_end == start) || 4521 (SEQ_LT(start, l_rsm->r_end) && 4522 SEQ_GT(end, l_rsm->r_end))) { 4523 /* 4524 * map blk |------| 4525 * sack blk |------| 4526 * <or> 4527 * map blk |------| 4528 * sack blk |------| 4529 */ 4530 return (1); 4531 } 4532 } 4533 if (r_rsm && (r_rsm->r_flags & BBR_ACKED)) { 4534 /* Potentially mergeable */ 4535 if ((r_rsm->r_start == end) || 4536 (SEQ_LT(start, r_rsm->r_start) && 4537 SEQ_GT(end, r_rsm->r_start))) { 4538 /* 4539 * map blk |---------| 4540 * sack blk |----| 4541 * <or> 4542 * map blk |---------| 4543 * sack blk |-------| 4544 */ 4545 return (1); 4546 } 4547 } 4548 return (0); 4549 } 4550 4551 static struct bbr_sendmap * 4552 bbr_merge_rsm(struct tcp_bbr *bbr, 4553 struct bbr_sendmap *l_rsm, 4554 struct bbr_sendmap *r_rsm) 4555 { 4556 /* 4557 * We are merging two ack'd RSM's, 4558 * the l_rsm is on the left (lower seq 4559 * values) and the r_rsm is on the right 4560 * (higher seq value). The simplest way 4561 * to merge these is to move the right 4562 * one into the left. I don't think there 4563 * is any reason we need to try to find 4564 * the oldest (or last oldest retransmitted). 4565 */ 4566 l_rsm->r_end = r_rsm->r_end; 4567 if (l_rsm->r_dupack < r_rsm->r_dupack) 4568 l_rsm->r_dupack = r_rsm->r_dupack; 4569 if (r_rsm->r_rtr_bytes) 4570 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; 4571 if (r_rsm->r_in_tmap) { 4572 /* This really should not happen */ 4573 TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, r_rsm, r_tnext); 4574 } 4575 if (r_rsm->r_app_limited) 4576 l_rsm->r_app_limited = r_rsm->r_app_limited; 4577 /* Now the flags */ 4578 if (r_rsm->r_flags & BBR_HAS_FIN) 4579 l_rsm->r_flags |= BBR_HAS_FIN; 4580 if (r_rsm->r_flags & BBR_TLP) 4581 l_rsm->r_flags |= BBR_TLP; 4582 if (r_rsm->r_flags & BBR_RWND_COLLAPSED) 4583 l_rsm->r_flags |= BBR_RWND_COLLAPSED; 4584 if (r_rsm->r_flags & BBR_MARKED_LOST) { 4585 /* This really should not happen */ 4586 bbr->r_ctl.rc_lost_bytes -= r_rsm->r_end - r_rsm->r_start; 4587 } 4588 TAILQ_REMOVE(&bbr->r_ctl.rc_map, r_rsm, r_next); 4589 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { 4590 /* Transfer the split limit to the map we free */ 4591 r_rsm->r_limit_type = l_rsm->r_limit_type; 4592 l_rsm->r_limit_type = 0; 4593 } 4594 bbr_free(bbr, r_rsm); 4595 return(l_rsm); 4596 } 4597 4598 /* 4599 * TLP Timer, here we simply setup what segment we want to 4600 * have the TLP expire on, the normal bbr_output_wtime() will then 4601 * send it out. 4602 * 4603 * We return 1, saying don't proceed with bbr_output_wtime only 4604 * when all timers have been stopped (destroyed PCB?). 4605 */ 4606 static int 4607 bbr_timeout_tlp(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 4608 { 4609 /* 4610 * Tail Loss Probe. 4611 */ 4612 struct bbr_sendmap *rsm = NULL; 4613 struct socket *so; 4614 uint32_t amm; 4615 uint32_t out, avail; 4616 uint32_t maxseg; 4617 int collapsed_win = 0; 4618 4619 if (bbr->rc_all_timers_stopped) { 4620 return (1); 4621 } 4622 if (TSTMP_LT(cts, bbr->r_ctl.rc_timer_exp)) { 4623 /* Its not time yet */ 4624 return (0); 4625 } 4626 if (ctf_progress_timeout_check(tp, true)) { 4627 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 4628 tcp_set_inp_to_drop(bbr->rc_inp, ETIMEDOUT); 4629 return (1); 4630 } 4631 /* Did we somehow get into persists? */ 4632 if (bbr->rc_in_persist) { 4633 return (0); 4634 } 4635 if (bbr->r_state && (bbr->r_state != tp->t_state)) 4636 bbr_set_state(tp, bbr, 0); 4637 BBR_STAT_INC(bbr_tlp_tot); 4638 maxseg = tp->t_maxseg - bbr->rc_last_options; 4639 #ifdef KERN_TLS 4640 if (bbr->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { 4641 /* 4642 * For hardware TLS we do *not* want to send 4643 * new data. 4644 */ 4645 goto need_retran; 4646 } 4647 #endif 4648 /* 4649 * A TLP timer has expired. We have been idle for 2 rtts. So we now 4650 * need to figure out how to force a full MSS segment out. 4651 */ 4652 so = tp->t_inpcb->inp_socket; 4653 avail = sbavail(&so->so_snd); 4654 out = ctf_outstanding(tp); 4655 if (out > tp->snd_wnd) { 4656 /* special case, we need a retransmission */ 4657 collapsed_win = 1; 4658 goto need_retran; 4659 } 4660 if (avail > out) { 4661 /* New data is available */ 4662 amm = avail - out; 4663 if (amm > maxseg) { 4664 amm = maxseg; 4665 } else if ((amm < maxseg) && ((tp->t_flags & TF_NODELAY) == 0)) { 4666 /* not enough to fill a MTU and no-delay is off */ 4667 goto need_retran; 4668 } 4669 /* Set the send-new override */ 4670 if ((out + amm) <= tp->snd_wnd) { 4671 bbr->rc_tlp_new_data = 1; 4672 } else { 4673 goto need_retran; 4674 } 4675 bbr->r_ctl.rc_tlp_seg_send_cnt = 0; 4676 bbr->r_ctl.rc_last_tlp_seq = tp->snd_max; 4677 bbr->r_ctl.rc_tlp_send = NULL; 4678 /* cap any slots */ 4679 BBR_STAT_INC(bbr_tlp_newdata); 4680 goto send; 4681 } 4682 need_retran: 4683 /* 4684 * Ok we need to arrange the last un-acked segment to be re-sent, or 4685 * optionally the first un-acked segment. 4686 */ 4687 if (collapsed_win == 0) { 4688 rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_map, bbr_sendmap, r_next); 4689 if (rsm && (BBR_ACKED | BBR_HAS_FIN)) { 4690 rsm = bbr_find_high_nonack(bbr, rsm); 4691 } 4692 if (rsm == NULL) { 4693 goto restore; 4694 } 4695 } else { 4696 /* 4697 * We must find the last segment 4698 * that was acceptable by the client. 4699 */ 4700 TAILQ_FOREACH_REVERSE(rsm, &bbr->r_ctl.rc_map, bbr_head, r_next) { 4701 if ((rsm->r_flags & BBR_RWND_COLLAPSED) == 0) { 4702 /* Found one */ 4703 break; 4704 } 4705 } 4706 if (rsm == NULL) { 4707 /* None? if so send the first */ 4708 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 4709 if (rsm == NULL) 4710 goto restore; 4711 } 4712 } 4713 if ((rsm->r_end - rsm->r_start) > maxseg) { 4714 /* 4715 * We need to split this the last segment in two. 4716 */ 4717 struct bbr_sendmap *nrsm; 4718 4719 nrsm = bbr_alloc_full_limit(bbr); 4720 if (nrsm == NULL) { 4721 /* 4722 * We can't get memory to split, we can either just 4723 * not split it. Or retransmit the whole piece, lets 4724 * do the large send (BTLP :-) ). 4725 */ 4726 goto go_for_it; 4727 } 4728 bbr_clone_rsm(bbr, nrsm, rsm, (rsm->r_end - maxseg)); 4729 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next); 4730 if (rsm->r_in_tmap) { 4731 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 4732 nrsm->r_in_tmap = 1; 4733 } 4734 rsm->r_flags &= (~BBR_HAS_FIN); 4735 rsm = nrsm; 4736 } 4737 go_for_it: 4738 bbr->r_ctl.rc_tlp_send = rsm; 4739 bbr->rc_tlp_rtx_out = 1; 4740 if (rsm->r_start == bbr->r_ctl.rc_last_tlp_seq) { 4741 bbr->r_ctl.rc_tlp_seg_send_cnt++; 4742 tp->t_rxtshift++; 4743 } else { 4744 bbr->r_ctl.rc_last_tlp_seq = rsm->r_start; 4745 bbr->r_ctl.rc_tlp_seg_send_cnt = 1; 4746 } 4747 send: 4748 if (bbr->r_ctl.rc_tlp_seg_send_cnt > bbr_tlp_max_resend) { 4749 /* 4750 * Can't [re]/transmit a segment we have retranmitted the 4751 * max times. We need the retransmit timer to take over. 4752 */ 4753 restore: 4754 bbr->rc_tlp_new_data = 0; 4755 bbr->r_ctl.rc_tlp_send = NULL; 4756 if (rsm) 4757 rsm->r_flags &= ~BBR_TLP; 4758 BBR_STAT_INC(bbr_tlp_retran_fail); 4759 return (0); 4760 } else if (rsm) { 4761 rsm->r_flags |= BBR_TLP; 4762 } 4763 if (rsm && (rsm->r_start == bbr->r_ctl.rc_last_tlp_seq) && 4764 (bbr->r_ctl.rc_tlp_seg_send_cnt > bbr_tlp_max_resend)) { 4765 /* 4766 * We have retransmitted to many times for TLP. Switch to 4767 * the regular RTO timer 4768 */ 4769 goto restore; 4770 } 4771 bbr_log_to_event(bbr, cts, BBR_TO_FRM_TLP); 4772 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; 4773 return (0); 4774 } 4775 4776 /* 4777 * Delayed ack Timer, here we simply need to setup the 4778 * ACK_NOW flag and remove the DELACK flag. From there 4779 * the output routine will send the ack out. 4780 * 4781 * We only return 1, saying don't proceed, if all timers 4782 * are stopped (destroyed PCB?). 4783 */ 4784 static int 4785 bbr_timeout_delack(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 4786 { 4787 if (bbr->rc_all_timers_stopped) { 4788 return (1); 4789 } 4790 bbr_log_to_event(bbr, cts, BBR_TO_FRM_DELACK); 4791 tp->t_flags &= ~TF_DELACK; 4792 tp->t_flags |= TF_ACKNOW; 4793 KMOD_TCPSTAT_INC(tcps_delack); 4794 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; 4795 return (0); 4796 } 4797 4798 /* 4799 * Here we send a KEEP-ALIVE like probe to the 4800 * peer, we do not send data. 4801 * 4802 * We only return 1, saying don't proceed, if all timers 4803 * are stopped (destroyed PCB?). 4804 */ 4805 static int 4806 bbr_timeout_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 4807 { 4808 struct tcptemp *t_template; 4809 int32_t retval = 1; 4810 4811 if (bbr->rc_all_timers_stopped) { 4812 return (1); 4813 } 4814 if (bbr->rc_in_persist == 0) 4815 return (0); 4816 KASSERT(tp->t_inpcb != NULL, 4817 ("%s: tp %p tp->t_inpcb == NULL", __func__, tp)); 4818 /* 4819 * Persistence timer into zero window. Force a byte to be output, if 4820 * possible. 4821 */ 4822 bbr_log_to_event(bbr, cts, BBR_TO_FRM_PERSIST); 4823 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; 4824 KMOD_TCPSTAT_INC(tcps_persisttimeo); 4825 /* 4826 * Have we exceeded the user specified progress time? 4827 */ 4828 if (ctf_progress_timeout_check(tp, true)) { 4829 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 4830 tcp_set_inp_to_drop(bbr->rc_inp, ETIMEDOUT); 4831 goto out; 4832 } 4833 /* 4834 * Hack: if the peer is dead/unreachable, we do not time out if the 4835 * window is closed. After a full backoff, drop the connection if 4836 * the idle time (no responses to probes) reaches the maximum 4837 * backoff that we would use if retransmitting. 4838 */ 4839 if (tp->t_rxtshift == TCP_MAXRXTSHIFT && 4840 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || 4841 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { 4842 KMOD_TCPSTAT_INC(tcps_persistdrop); 4843 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 4844 tcp_set_inp_to_drop(bbr->rc_inp, ETIMEDOUT); 4845 goto out; 4846 } 4847 if ((sbavail(&bbr->rc_inp->inp_socket->so_snd) == 0) && 4848 tp->snd_una == tp->snd_max) { 4849 bbr_exit_persist(tp, bbr, cts, __LINE__); 4850 retval = 0; 4851 goto out; 4852 } 4853 /* 4854 * If the user has closed the socket then drop a persisting 4855 * connection after a much reduced timeout. 4856 */ 4857 if (tp->t_state > TCPS_CLOSE_WAIT && 4858 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { 4859 KMOD_TCPSTAT_INC(tcps_persistdrop); 4860 tcp_log_end_status(tp, TCP_EI_STATUS_PERSIST_MAX); 4861 tcp_set_inp_to_drop(bbr->rc_inp, ETIMEDOUT); 4862 goto out; 4863 } 4864 t_template = tcpip_maketemplate(bbr->rc_inp); 4865 if (t_template) { 4866 tcp_respond(tp, t_template->tt_ipgen, 4867 &t_template->tt_t, (struct mbuf *)NULL, 4868 tp->rcv_nxt, tp->snd_una - 1, 0); 4869 /* This sends an ack */ 4870 if (tp->t_flags & TF_DELACK) 4871 tp->t_flags &= ~TF_DELACK; 4872 free(t_template, M_TEMP); 4873 } 4874 if (tp->t_rxtshift < TCP_MAXRXTSHIFT) 4875 tp->t_rxtshift++; 4876 bbr_start_hpts_timer(bbr, tp, cts, 3, 0, 0); 4877 out: 4878 return (retval); 4879 } 4880 4881 /* 4882 * If a keepalive goes off, we had no other timers 4883 * happening. We always return 1 here since this 4884 * routine either drops the connection or sends 4885 * out a segment with respond. 4886 */ 4887 static int 4888 bbr_timeout_keepalive(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 4889 { 4890 struct tcptemp *t_template; 4891 struct inpcb *inp; 4892 4893 if (bbr->rc_all_timers_stopped) { 4894 return (1); 4895 } 4896 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; 4897 inp = tp->t_inpcb; 4898 bbr_log_to_event(bbr, cts, BBR_TO_FRM_KEEP); 4899 /* 4900 * Keep-alive timer went off; send something or drop connection if 4901 * idle for too long. 4902 */ 4903 KMOD_TCPSTAT_INC(tcps_keeptimeo); 4904 if (tp->t_state < TCPS_ESTABLISHED) 4905 goto dropit; 4906 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && 4907 tp->t_state <= TCPS_CLOSING) { 4908 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) 4909 goto dropit; 4910 /* 4911 * Send a packet designed to force a response if the peer is 4912 * up and reachable: either an ACK if the connection is 4913 * still alive, or an RST if the peer has closed the 4914 * connection due to timeout or reboot. Using sequence 4915 * number tp->snd_una-1 causes the transmitted zero-length 4916 * segment to lie outside the receive window; by the 4917 * protocol spec, this requires the correspondent TCP to 4918 * respond. 4919 */ 4920 KMOD_TCPSTAT_INC(tcps_keepprobe); 4921 t_template = tcpip_maketemplate(inp); 4922 if (t_template) { 4923 tcp_respond(tp, t_template->tt_ipgen, 4924 &t_template->tt_t, (struct mbuf *)NULL, 4925 tp->rcv_nxt, tp->snd_una - 1, 0); 4926 free(t_template, M_TEMP); 4927 } 4928 } 4929 bbr_start_hpts_timer(bbr, tp, cts, 4, 0, 0); 4930 return (1); 4931 dropit: 4932 KMOD_TCPSTAT_INC(tcps_keepdrops); 4933 tcp_log_end_status(tp, TCP_EI_STATUS_KEEP_MAX); 4934 tcp_set_inp_to_drop(bbr->rc_inp, ETIMEDOUT); 4935 return (1); 4936 } 4937 4938 /* 4939 * Retransmit helper function, clear up all the ack 4940 * flags and take care of important book keeping. 4941 */ 4942 static void 4943 bbr_remxt_tmr(struct tcpcb *tp) 4944 { 4945 /* 4946 * The retransmit timer went off, all sack'd blocks must be 4947 * un-acked. 4948 */ 4949 struct bbr_sendmap *rsm, *trsm = NULL; 4950 struct tcp_bbr *bbr; 4951 uint32_t cts, lost; 4952 4953 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 4954 cts = tcp_get_usecs(&bbr->rc_tv); 4955 lost = bbr->r_ctl.rc_lost; 4956 if (bbr->r_state && (bbr->r_state != tp->t_state)) 4957 bbr_set_state(tp, bbr, 0); 4958 4959 TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) { 4960 if (rsm->r_flags & BBR_ACKED) { 4961 uint32_t old_flags; 4962 4963 rsm->r_dupack = 0; 4964 if (rsm->r_in_tmap == 0) { 4965 /* We must re-add it back to the tlist */ 4966 if (trsm == NULL) { 4967 TAILQ_INSERT_HEAD(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 4968 } else { 4969 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, trsm, rsm, r_tnext); 4970 } 4971 rsm->r_in_tmap = 1; 4972 } 4973 old_flags = rsm->r_flags; 4974 rsm->r_flags |= BBR_RXT_CLEARED; 4975 rsm->r_flags &= ~(BBR_ACKED | BBR_SACK_PASSED | BBR_WAS_SACKPASS); 4976 bbr_log_type_rsmclear(bbr, cts, rsm, old_flags, __LINE__); 4977 } else { 4978 if ((rsm->r_flags & BBR_MARKED_LOST) == 0) { 4979 bbr->r_ctl.rc_lost += rsm->r_end - rsm->r_start; 4980 bbr->r_ctl.rc_lost_bytes += rsm->r_end - rsm->r_start; 4981 } 4982 if (bbr_marks_rxt_sack_passed) { 4983 /* 4984 * With this option, we will rack out 4985 * in 1ms increments the rest of the packets. 4986 */ 4987 rsm->r_flags |= BBR_SACK_PASSED | BBR_MARKED_LOST; 4988 rsm->r_flags &= ~BBR_WAS_SACKPASS; 4989 } else { 4990 /* 4991 * With this option we only mark them lost 4992 * and remove all sack'd markings. We will run 4993 * another RXT or a TLP. This will cause 4994 * us to eventually send more based on what 4995 * ack's come in. 4996 */ 4997 rsm->r_flags |= BBR_MARKED_LOST; 4998 rsm->r_flags &= ~BBR_WAS_SACKPASS; 4999 rsm->r_flags &= ~BBR_SACK_PASSED; 5000 } 5001 } 5002 trsm = rsm; 5003 } 5004 bbr->r_ctl.rc_resend = TAILQ_FIRST(&bbr->r_ctl.rc_map); 5005 /* Clear the count (we just un-acked them) */ 5006 bbr_log_to_event(bbr, cts, BBR_TO_FRM_TMR); 5007 bbr->rc_tlp_new_data = 0; 5008 bbr->r_ctl.rc_tlp_seg_send_cnt = 0; 5009 /* zap the behindness on a rxt */ 5010 bbr->r_ctl.rc_hptsi_agg_delay = 0; 5011 bbr->r_agg_early_set = 0; 5012 bbr->r_ctl.rc_agg_early = 0; 5013 bbr->rc_tlp_rtx_out = 0; 5014 bbr->r_ctl.rc_sacked = 0; 5015 bbr->r_ctl.rc_sacklast = NULL; 5016 bbr->r_timer_override = 1; 5017 bbr_lt_bw_sampling(bbr, cts, (bbr->r_ctl.rc_lost > lost)); 5018 } 5019 5020 /* 5021 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise 5022 * we will setup to retransmit the lowest seq number outstanding. 5023 */ 5024 static int 5025 bbr_timeout_rxt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 5026 { 5027 int32_t rexmt; 5028 int32_t retval = 0; 5029 bool isipv6; 5030 5031 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; 5032 if (bbr->rc_all_timers_stopped) { 5033 return (1); 5034 } 5035 if (TCPS_HAVEESTABLISHED(tp->t_state) && 5036 (tp->snd_una == tp->snd_max)) { 5037 /* Nothing outstanding .. nothing to do */ 5038 return (0); 5039 } 5040 /* 5041 * Retransmission timer went off. Message has not been acked within 5042 * retransmit interval. Back off to a longer retransmit interval 5043 * and retransmit one segment. 5044 */ 5045 if (ctf_progress_timeout_check(tp, true)) { 5046 retval = 1; 5047 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 5048 tcp_set_inp_to_drop(bbr->rc_inp, ETIMEDOUT); 5049 goto out; 5050 } 5051 bbr_remxt_tmr(tp); 5052 if ((bbr->r_ctl.rc_resend == NULL) || 5053 ((bbr->r_ctl.rc_resend->r_flags & BBR_RWND_COLLAPSED) == 0)) { 5054 /* 5055 * If the rwnd collapsed on 5056 * the one we are retransmitting 5057 * it does not count against the 5058 * rxt count. 5059 */ 5060 tp->t_rxtshift++; 5061 } 5062 if (tp->t_rxtshift > TCP_MAXRXTSHIFT) { 5063 tp->t_rxtshift = TCP_MAXRXTSHIFT; 5064 KMOD_TCPSTAT_INC(tcps_timeoutdrop); 5065 retval = 1; 5066 tcp_log_end_status(tp, TCP_EI_STATUS_RETRAN); 5067 tcp_set_inp_to_drop(bbr->rc_inp, 5068 (tp->t_softerror ? (uint16_t) tp->t_softerror : ETIMEDOUT)); 5069 goto out; 5070 } 5071 if (tp->t_state == TCPS_SYN_SENT) { 5072 /* 5073 * If the SYN was retransmitted, indicate CWND to be limited 5074 * to 1 segment in cc_conn_init(). 5075 */ 5076 tp->snd_cwnd = 1; 5077 } else if (tp->t_rxtshift == 1) { 5078 /* 5079 * first retransmit; record ssthresh and cwnd so they can be 5080 * recovered if this turns out to be a "bad" retransmit. A 5081 * retransmit is considered "bad" if an ACK for this segment 5082 * is received within RTT/2 interval; the assumption here is 5083 * that the ACK was already in flight. See "On Estimating 5084 * End-to-End Network Path Properties" by Allman and Paxson 5085 * for more details. 5086 */ 5087 tp->snd_cwnd = tp->t_maxseg - bbr->rc_last_options; 5088 if (!IN_RECOVERY(tp->t_flags)) { 5089 tp->snd_cwnd_prev = tp->snd_cwnd; 5090 tp->snd_ssthresh_prev = tp->snd_ssthresh; 5091 tp->snd_recover_prev = tp->snd_recover; 5092 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); 5093 tp->t_flags |= TF_PREVVALID; 5094 } else { 5095 tp->t_flags &= ~TF_PREVVALID; 5096 } 5097 tp->snd_cwnd = tp->t_maxseg - bbr->rc_last_options; 5098 } else { 5099 tp->snd_cwnd = tp->t_maxseg - bbr->rc_last_options; 5100 tp->t_flags &= ~TF_PREVVALID; 5101 } 5102 KMOD_TCPSTAT_INC(tcps_rexmttimeo); 5103 if ((tp->t_state == TCPS_SYN_SENT) || 5104 (tp->t_state == TCPS_SYN_RECEIVED)) 5105 rexmt = USEC_2_TICKS(BBR_INITIAL_RTO) * tcp_backoff[tp->t_rxtshift]; 5106 else 5107 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; 5108 TCPT_RANGESET(tp->t_rxtcur, rexmt, 5109 MSEC_2_TICKS(bbr->r_ctl.rc_min_rto_ms), 5110 MSEC_2_TICKS(((uint32_t)bbr->rc_max_rto_sec) * 1000)); 5111 /* 5112 * We enter the path for PLMTUD if connection is established or, if 5113 * connection is FIN_WAIT_1 status, reason for the last is that if 5114 * amount of data we send is very small, we could send it in couple 5115 * of packets and process straight to FIN. In that case we won't 5116 * catch ESTABLISHED state. 5117 */ 5118 #ifdef INET6 5119 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false; 5120 #else 5121 isipv6 = false; 5122 #endif 5123 if (((V_tcp_pmtud_blackhole_detect == 1) || 5124 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) || 5125 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) && 5126 ((tp->t_state == TCPS_ESTABLISHED) || 5127 (tp->t_state == TCPS_FIN_WAIT_1))) { 5128 5129 /* 5130 * Idea here is that at each stage of mtu probe (usually, 5131 * 1448 -> 1188 -> 524) should be given 2 chances to recover 5132 * before further clamping down. 'tp->t_rxtshift % 2 == 0' 5133 * should take care of that. 5134 */ 5135 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == 5136 (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) && 5137 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && 5138 tp->t_rxtshift % 2 == 0)) { 5139 /* 5140 * Enter Path MTU Black-hole Detection mechanism: - 5141 * Disable Path MTU Discovery (IP "DF" bit). - 5142 * Reduce MTU to lower value than what we negotiated 5143 * with peer. 5144 */ 5145 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { 5146 /* 5147 * Record that we may have found a black 5148 * hole. 5149 */ 5150 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; 5151 /* Keep track of previous MSS. */ 5152 tp->t_pmtud_saved_maxseg = tp->t_maxseg; 5153 } 5154 /* 5155 * Reduce the MSS to blackhole value or to the 5156 * default in an attempt to retransmit. 5157 */ 5158 #ifdef INET6 5159 isipv6 = bbr->r_is_v6; 5160 if (isipv6 && 5161 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { 5162 /* Use the sysctl tuneable blackhole MSS. */ 5163 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; 5164 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 5165 } else if (isipv6) { 5166 /* Use the default MSS. */ 5167 tp->t_maxseg = V_tcp_v6mssdflt; 5168 /* 5169 * Disable Path MTU Discovery when we switch 5170 * to minmss. 5171 */ 5172 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 5173 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 5174 } 5175 #endif 5176 #if defined(INET6) && defined(INET) 5177 else 5178 #endif 5179 #ifdef INET 5180 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { 5181 /* Use the sysctl tuneable blackhole MSS. */ 5182 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; 5183 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated); 5184 } else { 5185 /* Use the default MSS. */ 5186 tp->t_maxseg = V_tcp_mssdflt; 5187 /* 5188 * Disable Path MTU Discovery when we switch 5189 * to minmss. 5190 */ 5191 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 5192 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss); 5193 } 5194 #endif 5195 } else { 5196 /* 5197 * If further retransmissions are still unsuccessful 5198 * with a lowered MTU, maybe this isn't a blackhole 5199 * and we restore the previous MSS and blackhole 5200 * detection flags. The limit '6' is determined by 5201 * giving each probe stage (1448, 1188, 524) 2 5202 * chances to recover. 5203 */ 5204 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && 5205 (tp->t_rxtshift >= 6)) { 5206 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 5207 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; 5208 tp->t_maxseg = tp->t_pmtud_saved_maxseg; 5209 KMOD_TCPSTAT_INC(tcps_pmtud_blackhole_failed); 5210 } 5211 } 5212 } 5213 /* 5214 * Disable RFC1323 and SACK if we haven't got any response to our 5215 * third SYN to work-around some broken terminal servers (most of 5216 * which have hopefully been retired) that have bad VJ header 5217 * compression code which trashes TCP segments containing 5218 * unknown-to-them TCP options. 5219 */ 5220 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && 5221 (tp->t_rxtshift == 3)) 5222 tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_SACK_PERMIT); 5223 /* 5224 * If we backed off this far, our srtt estimate is probably bogus. 5225 * Clobber it so we'll take the next rtt measurement as our srtt; 5226 * move the current srtt into rttvar to keep the current retransmit 5227 * times until then. 5228 */ 5229 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { 5230 #ifdef INET6 5231 if (bbr->r_is_v6) 5232 in6_losing(tp->t_inpcb); 5233 else 5234 #endif 5235 in_losing(tp->t_inpcb); 5236 tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); 5237 tp->t_srtt = 0; 5238 } 5239 sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una); 5240 tp->snd_recover = tp->snd_max; 5241 tp->t_flags |= TF_ACKNOW; 5242 tp->t_rtttime = 0; 5243 out: 5244 return (retval); 5245 } 5246 5247 static int 5248 bbr_process_timers(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, uint8_t hpts_calling) 5249 { 5250 int32_t ret = 0; 5251 int32_t timers = (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK); 5252 5253 if (timers == 0) { 5254 return (0); 5255 } 5256 if (tp->t_state == TCPS_LISTEN) { 5257 /* no timers on listen sockets */ 5258 if (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) 5259 return (0); 5260 return (1); 5261 } 5262 if (TSTMP_LT(cts, bbr->r_ctl.rc_timer_exp)) { 5263 uint32_t left; 5264 5265 if (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { 5266 ret = -1; 5267 bbr_log_to_processing(bbr, cts, ret, 0, hpts_calling); 5268 return (0); 5269 } 5270 if (hpts_calling == 0) { 5271 ret = -2; 5272 bbr_log_to_processing(bbr, cts, ret, 0, hpts_calling); 5273 return (0); 5274 } 5275 /* 5276 * Ok our timer went off early and we are not paced false 5277 * alarm, go back to sleep. 5278 */ 5279 left = bbr->r_ctl.rc_timer_exp - cts; 5280 ret = -3; 5281 bbr_log_to_processing(bbr, cts, ret, left, hpts_calling); 5282 tcp_hpts_insert(tp->t_inpcb, HPTS_USEC_TO_SLOTS(left)); 5283 return (1); 5284 } 5285 bbr->rc_tmr_stopped = 0; 5286 bbr->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; 5287 if (timers & PACE_TMR_DELACK) { 5288 ret = bbr_timeout_delack(tp, bbr, cts); 5289 } else if (timers & PACE_TMR_PERSIT) { 5290 ret = bbr_timeout_persist(tp, bbr, cts); 5291 } else if (timers & PACE_TMR_RACK) { 5292 bbr->r_ctl.rc_tlp_rxt_last_time = cts; 5293 ret = bbr_timeout_rack(tp, bbr, cts); 5294 } else if (timers & PACE_TMR_TLP) { 5295 bbr->r_ctl.rc_tlp_rxt_last_time = cts; 5296 ret = bbr_timeout_tlp(tp, bbr, cts); 5297 } else if (timers & PACE_TMR_RXT) { 5298 bbr->r_ctl.rc_tlp_rxt_last_time = cts; 5299 ret = bbr_timeout_rxt(tp, bbr, cts); 5300 } else if (timers & PACE_TMR_KEEP) { 5301 ret = bbr_timeout_keepalive(tp, bbr, cts); 5302 } 5303 bbr_log_to_processing(bbr, cts, ret, timers, hpts_calling); 5304 return (ret); 5305 } 5306 5307 static void 5308 bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts) 5309 { 5310 if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 5311 uint8_t hpts_removed = 0; 5312 5313 if (bbr->rc_inp->inp_in_hpts && 5314 (bbr->rc_timer_first == 1)) { 5315 /* 5316 * If we are canceling timer's when we have the 5317 * timer ahead of the output being paced. We also 5318 * must remove ourselves from the hpts. 5319 */ 5320 hpts_removed = 1; 5321 tcp_hpts_remove(bbr->rc_inp, HPTS_REMOVE_OUTPUT); 5322 if (bbr->r_ctl.rc_last_delay_val) { 5323 /* Update the last hptsi delay too */ 5324 uint32_t time_since_send; 5325 5326 if (TSTMP_GT(cts, bbr->rc_pacer_started)) 5327 time_since_send = cts - bbr->rc_pacer_started; 5328 else 5329 time_since_send = 0; 5330 if (bbr->r_ctl.rc_last_delay_val > time_since_send) { 5331 /* Cut down our slot time */ 5332 bbr->r_ctl.rc_last_delay_val -= time_since_send; 5333 } else { 5334 bbr->r_ctl.rc_last_delay_val = 0; 5335 } 5336 bbr->rc_pacer_started = cts; 5337 } 5338 } 5339 bbr->rc_timer_first = 0; 5340 bbr_log_to_cancel(bbr, line, cts, hpts_removed); 5341 bbr->rc_tmr_stopped = bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK; 5342 bbr->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); 5343 } 5344 } 5345 5346 static void 5347 bbr_timer_stop(struct tcpcb *tp, uint32_t timer_type) 5348 { 5349 struct tcp_bbr *bbr; 5350 5351 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 5352 bbr->rc_all_timers_stopped = 1; 5353 return; 5354 } 5355 5356 /* 5357 * stop all timers always returning 0. 5358 */ 5359 static int 5360 bbr_stopall(struct tcpcb *tp) 5361 { 5362 return (0); 5363 } 5364 5365 static void 5366 bbr_timer_activate(struct tcpcb *tp, uint32_t timer_type, uint32_t delta) 5367 { 5368 return; 5369 } 5370 5371 /* 5372 * return true if a bbr timer (rack or tlp) is active. 5373 */ 5374 static int 5375 bbr_timer_active(struct tcpcb *tp, uint32_t timer_type) 5376 { 5377 return (0); 5378 } 5379 5380 static uint32_t 5381 bbr_get_earliest_send_outstanding(struct tcp_bbr *bbr, struct bbr_sendmap *u_rsm, uint32_t cts) 5382 { 5383 struct bbr_sendmap *rsm; 5384 5385 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap); 5386 if ((rsm == NULL) || (u_rsm == rsm)) 5387 return (cts); 5388 return(rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); 5389 } 5390 5391 static void 5392 bbr_update_rsm(struct tcpcb *tp, struct tcp_bbr *bbr, 5393 struct bbr_sendmap *rsm, uint32_t cts, uint32_t pacing_time) 5394 { 5395 int32_t idx; 5396 5397 rsm->r_rtr_cnt++; 5398 rsm->r_dupack = 0; 5399 if (rsm->r_rtr_cnt > BBR_NUM_OF_RETRANS) { 5400 rsm->r_rtr_cnt = BBR_NUM_OF_RETRANS; 5401 rsm->r_flags |= BBR_OVERMAX; 5402 } 5403 if (rsm->r_flags & BBR_RWND_COLLAPSED) { 5404 /* Take off the collapsed flag at rxt */ 5405 rsm->r_flags &= ~BBR_RWND_COLLAPSED; 5406 } 5407 if (rsm->r_flags & BBR_MARKED_LOST) { 5408 /* We have retransmitted, its no longer lost */ 5409 rsm->r_flags &= ~BBR_MARKED_LOST; 5410 bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start; 5411 } 5412 if (rsm->r_flags & BBR_RXT_CLEARED) { 5413 /* 5414 * We hit a RXT timer on it and 5415 * we cleared the "acked" flag. 5416 * We now have it going back into 5417 * flight, we can remove the cleared 5418 * flag and possibly do accounting on 5419 * this piece. 5420 */ 5421 rsm->r_flags &= ~BBR_RXT_CLEARED; 5422 } 5423 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & BBR_TLP) == 0)) { 5424 bbr->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); 5425 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); 5426 } 5427 idx = rsm->r_rtr_cnt - 1; 5428 rsm->r_tim_lastsent[idx] = cts; 5429 rsm->r_pacing_delay = pacing_time; 5430 rsm->r_delivered = bbr->r_ctl.rc_delivered; 5431 rsm->r_ts_valid = bbr->rc_ts_valid; 5432 if (bbr->rc_ts_valid) 5433 rsm->r_del_ack_ts = bbr->r_ctl.last_inbound_ts; 5434 if (bbr->r_ctl.r_app_limited_until) 5435 rsm->r_app_limited = 1; 5436 else 5437 rsm->r_app_limited = 0; 5438 if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) 5439 rsm->r_bbr_state = bbr_state_val(bbr); 5440 else 5441 rsm->r_bbr_state = 8; 5442 if (rsm->r_flags & BBR_ACKED) { 5443 /* Problably MTU discovery messing with us */ 5444 uint32_t old_flags; 5445 5446 old_flags = rsm->r_flags; 5447 rsm->r_flags &= ~BBR_ACKED; 5448 bbr_log_type_rsmclear(bbr, cts, rsm, old_flags, __LINE__); 5449 bbr->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 5450 if (bbr->r_ctl.rc_sacked == 0) 5451 bbr->r_ctl.rc_sacklast = NULL; 5452 } 5453 if (rsm->r_in_tmap) { 5454 TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 5455 } 5456 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 5457 rsm->r_in_tmap = 1; 5458 if (rsm->r_flags & BBR_SACK_PASSED) { 5459 /* We have retransmitted due to the SACK pass */ 5460 rsm->r_flags &= ~BBR_SACK_PASSED; 5461 rsm->r_flags |= BBR_WAS_SACKPASS; 5462 } 5463 rsm->r_first_sent_time = bbr_get_earliest_send_outstanding(bbr, rsm, cts); 5464 rsm->r_flight_at_send = ctf_flight_size(bbr->rc_tp, 5465 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 5466 bbr->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next); 5467 if (bbr->r_ctl.rc_bbr_hptsi_gain > BBR_UNIT) { 5468 rsm->r_is_gain = 1; 5469 rsm->r_is_drain = 0; 5470 } else if (bbr->r_ctl.rc_bbr_hptsi_gain < BBR_UNIT) { 5471 rsm->r_is_drain = 1; 5472 rsm->r_is_gain = 0; 5473 } else { 5474 rsm->r_is_drain = 0; 5475 rsm->r_is_gain = 0; 5476 } 5477 rsm->r_del_time = bbr->r_ctl.rc_del_time; /* TEMP GOOGLE CODE */ 5478 } 5479 5480 /* 5481 * Returns 0, or the sequence where we stopped 5482 * updating. We also update the lenp to be the amount 5483 * of data left. 5484 */ 5485 5486 static uint32_t 5487 bbr_update_entry(struct tcpcb *tp, struct tcp_bbr *bbr, 5488 struct bbr_sendmap *rsm, uint32_t cts, int32_t *lenp, uint32_t pacing_time) 5489 { 5490 /* 5491 * We (re-)transmitted starting at rsm->r_start for some length 5492 * (possibly less than r_end. 5493 */ 5494 struct bbr_sendmap *nrsm; 5495 uint32_t c_end; 5496 int32_t len; 5497 5498 len = *lenp; 5499 c_end = rsm->r_start + len; 5500 if (SEQ_GEQ(c_end, rsm->r_end)) { 5501 /* 5502 * We retransmitted the whole piece or more than the whole 5503 * slopping into the next rsm. 5504 */ 5505 bbr_update_rsm(tp, bbr, rsm, cts, pacing_time); 5506 if (c_end == rsm->r_end) { 5507 *lenp = 0; 5508 return (0); 5509 } else { 5510 int32_t act_len; 5511 5512 /* Hangs over the end return whats left */ 5513 act_len = rsm->r_end - rsm->r_start; 5514 *lenp = (len - act_len); 5515 return (rsm->r_end); 5516 } 5517 /* We don't get out of this block. */ 5518 } 5519 /* 5520 * Here we retransmitted less than the whole thing which means we 5521 * have to split this into what was transmitted and what was not. 5522 */ 5523 nrsm = bbr_alloc_full_limit(bbr); 5524 if (nrsm == NULL) { 5525 *lenp = 0; 5526 return (0); 5527 } 5528 /* 5529 * So here we are going to take the original rsm and make it what we 5530 * retransmitted. nrsm will be the tail portion we did not 5531 * retransmit. For example say the chunk was 1, 11 (10 bytes). And 5532 * we retransmitted 5 bytes i.e. 1, 5. The original piece shrinks to 5533 * 1, 6 and the new piece will be 6, 11. 5534 */ 5535 bbr_clone_rsm(bbr, nrsm, rsm, c_end); 5536 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next); 5537 nrsm->r_dupack = 0; 5538 if (rsm->r_in_tmap) { 5539 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 5540 nrsm->r_in_tmap = 1; 5541 } 5542 rsm->r_flags &= (~BBR_HAS_FIN); 5543 bbr_update_rsm(tp, bbr, rsm, cts, pacing_time); 5544 *lenp = 0; 5545 return (0); 5546 } 5547 5548 static uint64_t 5549 bbr_get_hardware_rate(struct tcp_bbr *bbr) 5550 { 5551 uint64_t bw; 5552 5553 bw = bbr_get_bw(bbr); 5554 bw *= (uint64_t)bbr_hptsi_gain[BBR_SUB_GAIN]; 5555 bw /= (uint64_t)BBR_UNIT; 5556 return(bw); 5557 } 5558 5559 static void 5560 bbr_setup_less_of_rate(struct tcp_bbr *bbr, uint32_t cts, 5561 uint64_t act_rate, uint64_t rate_wanted) 5562 { 5563 /* 5564 * We could not get a full gains worth 5565 * of rate. 5566 */ 5567 if (get_filter_value(&bbr->r_ctl.rc_delrate) >= act_rate) { 5568 /* we can't even get the real rate */ 5569 uint64_t red; 5570 5571 bbr->skip_gain = 1; 5572 bbr->gain_is_limited = 0; 5573 red = get_filter_value(&bbr->r_ctl.rc_delrate) - act_rate; 5574 if (red) 5575 filter_reduce_by(&bbr->r_ctl.rc_delrate, red, cts); 5576 } else { 5577 /* We can use a lower gain */ 5578 bbr->skip_gain = 0; 5579 bbr->gain_is_limited = 1; 5580 } 5581 } 5582 5583 static void 5584 bbr_update_hardware_pacing_rate(struct tcp_bbr *bbr, uint32_t cts) 5585 { 5586 const struct tcp_hwrate_limit_table *nrte; 5587 int error, rate = -1; 5588 5589 if (bbr->r_ctl.crte == NULL) 5590 return; 5591 if ((bbr->rc_inp->inp_route.ro_nh == NULL) || 5592 (bbr->rc_inp->inp_route.ro_nh->nh_ifp == NULL)) { 5593 /* Lost our routes? */ 5594 /* Clear the way for a re-attempt */ 5595 bbr->bbr_attempt_hdwr_pace = 0; 5596 lost_rate: 5597 bbr->gain_is_limited = 0; 5598 bbr->skip_gain = 0; 5599 bbr->bbr_hdrw_pacing = 0; 5600 counter_u64_add(bbr_flows_whdwr_pacing, -1); 5601 counter_u64_add(bbr_flows_nohdwr_pacing, 1); 5602 tcp_bbr_tso_size_check(bbr, cts); 5603 return; 5604 } 5605 rate = bbr_get_hardware_rate(bbr); 5606 nrte = tcp_chg_pacing_rate(bbr->r_ctl.crte, 5607 bbr->rc_tp, 5608 bbr->rc_inp->inp_route.ro_nh->nh_ifp, 5609 rate, 5610 (RS_PACING_GEQ|RS_PACING_SUB_OK), 5611 &error); 5612 if (nrte == NULL) { 5613 goto lost_rate; 5614 } 5615 if (nrte != bbr->r_ctl.crte) { 5616 bbr->r_ctl.crte = nrte; 5617 if (error == 0) { 5618 BBR_STAT_INC(bbr_hdwr_rl_mod_ok); 5619 if (bbr->r_ctl.crte->rate < rate) { 5620 /* We have a problem */ 5621 bbr_setup_less_of_rate(bbr, cts, 5622 bbr->r_ctl.crte->rate, rate); 5623 } else { 5624 /* We are good */ 5625 bbr->gain_is_limited = 0; 5626 bbr->skip_gain = 0; 5627 } 5628 } else { 5629 /* A failure should release the tag */ 5630 BBR_STAT_INC(bbr_hdwr_rl_mod_fail); 5631 bbr->gain_is_limited = 0; 5632 bbr->skip_gain = 0; 5633 bbr->bbr_hdrw_pacing = 0; 5634 } 5635 bbr_type_log_hdwr_pacing(bbr, 5636 bbr->r_ctl.crte->ptbl->rs_ifp, 5637 rate, 5638 ((bbr->r_ctl.crte == NULL) ? 0 : bbr->r_ctl.crte->rate), 5639 __LINE__, 5640 cts, 5641 error); 5642 } 5643 } 5644 5645 static void 5646 bbr_adjust_for_hw_pacing(struct tcp_bbr *bbr, uint32_t cts) 5647 { 5648 /* 5649 * If we have hardware pacing support 5650 * we need to factor that in for our 5651 * TSO size. 5652 */ 5653 const struct tcp_hwrate_limit_table *rlp; 5654 uint32_t cur_delay, seg_sz, maxseg, new_tso, delta, hdwr_delay; 5655 5656 if ((bbr->bbr_hdrw_pacing == 0) || 5657 (IN_RECOVERY(bbr->rc_tp->t_flags)) || 5658 (bbr->r_ctl.crte == NULL)) 5659 return; 5660 if (bbr->hw_pacing_set == 0) { 5661 /* Not yet by the hdwr pacing count delay */ 5662 return; 5663 } 5664 if (bbr_hdwr_pace_adjust == 0) { 5665 /* No adjustment */ 5666 return; 5667 } 5668 rlp = bbr->r_ctl.crte; 5669 if (bbr->rc_tp->t_maxseg > bbr->rc_last_options) 5670 maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options; 5671 else 5672 maxseg = BBR_MIN_SEG - bbr->rc_last_options; 5673 /* 5674 * So lets first get the 5675 * time we will take between 5676 * TSO sized sends currently without 5677 * hardware help. 5678 */ 5679 cur_delay = bbr_get_pacing_delay(bbr, BBR_UNIT, 5680 bbr->r_ctl.rc_pace_max_segs, cts, 1); 5681 hdwr_delay = bbr->r_ctl.rc_pace_max_segs / maxseg; 5682 hdwr_delay *= rlp->time_between; 5683 if (cur_delay > hdwr_delay) 5684 delta = cur_delay - hdwr_delay; 5685 else 5686 delta = 0; 5687 bbr_log_type_tsosize(bbr, cts, delta, cur_delay, hdwr_delay, 5688 (bbr->r_ctl.rc_pace_max_segs / maxseg), 5689 1); 5690 if (delta && 5691 (delta < (max(rlp->time_between, 5692 bbr->r_ctl.bbr_hptsi_segments_delay_tar)))) { 5693 /* 5694 * Now lets divide by the pacing 5695 * time between each segment the 5696 * hardware sends rounding up and 5697 * derive a bytes from that. We multiply 5698 * that by bbr_hdwr_pace_adjust to get 5699 * more bang for our buck. 5700 * 5701 * The goal is to have the software pacer 5702 * waiting no more than an additional 5703 * pacing delay if we can (without the 5704 * compensation i.e. x bbr_hdwr_pace_adjust). 5705 */ 5706 seg_sz = max(((cur_delay + rlp->time_between)/rlp->time_between), 5707 (bbr->r_ctl.rc_pace_max_segs/maxseg)); 5708 seg_sz *= bbr_hdwr_pace_adjust; 5709 if (bbr_hdwr_pace_floor && 5710 (seg_sz < bbr->r_ctl.crte->ptbl->rs_min_seg)) { 5711 /* Currently hardware paces 5712 * out rs_min_seg segments at a time. 5713 * We need to make sure we always send at least 5714 * a full burst of bbr_hdwr_pace_floor down. 5715 */ 5716 seg_sz = bbr->r_ctl.crte->ptbl->rs_min_seg; 5717 } 5718 seg_sz *= maxseg; 5719 } else if (delta == 0) { 5720 /* 5721 * The highest pacing rate is 5722 * above our b/w gained. This means 5723 * we probably are going quite fast at 5724 * the hardware highest rate. Lets just multiply 5725 * the calculated TSO size by the 5726 * multiplier factor (its probably 5727 * 4 segments in the default config for 5728 * mlx). 5729 */ 5730 seg_sz = bbr->r_ctl.rc_pace_max_segs * bbr_hdwr_pace_adjust; 5731 if (bbr_hdwr_pace_floor && 5732 (seg_sz < bbr->r_ctl.crte->ptbl->rs_min_seg)) { 5733 /* Currently hardware paces 5734 * out rs_min_seg segments at a time. 5735 * We need to make sure we always send at least 5736 * a full burst of bbr_hdwr_pace_floor down. 5737 */ 5738 seg_sz = bbr->r_ctl.crte->ptbl->rs_min_seg; 5739 } 5740 } else { 5741 /* 5742 * The pacing time difference is so 5743 * big that the hardware will 5744 * pace out more rapidly then we 5745 * really want and then we 5746 * will have a long delay. Lets just keep 5747 * the same TSO size so its as if 5748 * we were not using hdwr pacing (we 5749 * just gain a bit of spacing from the 5750 * hardware if seg_sz > 1). 5751 */ 5752 seg_sz = bbr->r_ctl.rc_pace_max_segs; 5753 } 5754 if (seg_sz > bbr->r_ctl.rc_pace_max_segs) 5755 new_tso = seg_sz; 5756 else 5757 new_tso = bbr->r_ctl.rc_pace_max_segs; 5758 if (new_tso >= (PACE_MAX_IP_BYTES-maxseg)) 5759 new_tso = PACE_MAX_IP_BYTES - maxseg; 5760 5761 if (new_tso != bbr->r_ctl.rc_pace_max_segs) { 5762 bbr_log_type_tsosize(bbr, cts, new_tso, 0, bbr->r_ctl.rc_pace_max_segs, maxseg, 0); 5763 bbr->r_ctl.rc_pace_max_segs = new_tso; 5764 } 5765 } 5766 5767 static void 5768 tcp_bbr_tso_size_check(struct tcp_bbr *bbr, uint32_t cts) 5769 { 5770 uint64_t bw; 5771 uint32_t old_tso = 0, new_tso; 5772 uint32_t maxseg, bytes; 5773 uint32_t tls_seg=0; 5774 /* 5775 * Google/linux uses the following algorithm to determine 5776 * the TSO size based on the b/w of the link (from Neal Cardwell email 9/27/18): 5777 * 5778 * bytes = bw_in_bytes_per_second / 1000 5779 * bytes = min(bytes, 64k) 5780 * tso_segs = bytes / MSS 5781 * if (bw < 1.2Mbs) 5782 * min_tso_segs = 1 5783 * else 5784 * min_tso_segs = 2 5785 * tso_segs = max(tso_segs, min_tso_segs) 5786 * 5787 * * Note apply a device specific limit (we apply this in the 5788 * tcp_m_copym). 5789 * Note that before the initial measurement is made google bursts out 5790 * a full iwnd just like new-reno/cubic. 5791 * 5792 * We do not use this algorithm. Instead we 5793 * use a two phased approach: 5794 * 5795 * if ( bw <= per-tcb-cross-over) 5796 * goal_tso = calculate how much with this bw we 5797 * can send in goal-time seconds. 5798 * if (goal_tso > mss) 5799 * seg = goal_tso / mss 5800 * tso = seg * mss 5801 * else 5802 * tso = mss 5803 * if (tso > per-tcb-max) 5804 * tso = per-tcb-max 5805 * else if ( bw > 512Mbps) 5806 * tso = max-tso (64k/mss) 5807 * else 5808 * goal_tso = bw / per-tcb-divsor 5809 * seg = (goal_tso + mss-1)/mss 5810 * tso = seg * mss 5811 * 5812 * if (tso < per-tcb-floor) 5813 * tso = per-tcb-floor 5814 * if (tso > per-tcb-utter_max) 5815 * tso = per-tcb-utter_max 5816 * 5817 * Note the default per-tcb-divisor is 1000 (same as google). 5818 * the goal cross over is 30Mbps however. To recreate googles 5819 * algorithm you need to set: 5820 * 5821 * cross-over = 23,168,000 bps 5822 * goal-time = 18000 5823 * per-tcb-max = 2 5824 * per-tcb-divisor = 1000 5825 * per-tcb-floor = 1 5826 * 5827 * This will get you "google bbr" behavior with respect to tso size. 5828 * 5829 * Note we do set anything TSO size until we are past the initial 5830 * window. Before that we gnerally use either a single MSS 5831 * or we use the full IW size (so we burst a IW at a time) 5832 * Also note that Hardware-TLS is special and does alternate 5833 * things to minimize PCI Bus Bandwidth use. 5834 */ 5835 5836 if (bbr->rc_tp->t_maxseg > bbr->rc_last_options) { 5837 maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options; 5838 } else { 5839 maxseg = BBR_MIN_SEG - bbr->rc_last_options; 5840 } 5841 #ifdef KERN_TLS 5842 if (bbr->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) { 5843 tls_seg = ctf_get_opt_tls_size(bbr->rc_inp->inp_socket, bbr->rc_tp->snd_wnd); 5844 bbr->r_ctl.rc_pace_min_segs = (tls_seg + bbr->rc_last_options); 5845 } 5846 #endif 5847 old_tso = bbr->r_ctl.rc_pace_max_segs; 5848 if (bbr->rc_past_init_win == 0) { 5849 /* 5850 * Not enough data has been acknowledged to make a 5851 * judgement unless we are hardware TLS. Set up 5852 * the initial TSO based on if we are sending a 5853 * full IW at once or not. 5854 */ 5855 if (bbr->rc_use_google) 5856 bbr->r_ctl.rc_pace_max_segs = ((bbr->rc_tp->t_maxseg - bbr->rc_last_options) * 2); 5857 else if (bbr->bbr_init_win_cheat) 5858 bbr->r_ctl.rc_pace_max_segs = bbr_initial_cwnd(bbr, bbr->rc_tp); 5859 else 5860 bbr->r_ctl.rc_pace_max_segs = bbr->rc_tp->t_maxseg - bbr->rc_last_options; 5861 if (bbr->r_ctl.rc_pace_min_segs != bbr->rc_tp->t_maxseg) 5862 bbr->r_ctl.rc_pace_min_segs = bbr->rc_tp->t_maxseg; 5863 #ifdef KERN_TLS 5864 if ((bbr->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) && tls_seg) { 5865 /* 5866 * For hardware TLS we set our min to the tls_seg size. 5867 */ 5868 bbr->r_ctl.rc_pace_max_segs = tls_seg; 5869 bbr->r_ctl.rc_pace_min_segs = tls_seg + bbr->rc_last_options; 5870 } 5871 #endif 5872 if (bbr->r_ctl.rc_pace_max_segs == 0) { 5873 bbr->r_ctl.rc_pace_max_segs = maxseg; 5874 } 5875 bbr_log_type_tsosize(bbr, cts, bbr->r_ctl.rc_pace_max_segs, tls_seg, old_tso, maxseg, 0); 5876 #ifdef KERN_TLS 5877 if ((bbr->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) == 0) 5878 #endif 5879 bbr_adjust_for_hw_pacing(bbr, cts); 5880 return; 5881 } 5882 /** 5883 * Now lets set the TSO goal based on our delivery rate in 5884 * bytes per second. Note we only do this if 5885 * we have acked at least the initial cwnd worth of data. 5886 */ 5887 bw = bbr_get_bw(bbr); 5888 if (IN_RECOVERY(bbr->rc_tp->t_flags) && 5889 (bbr->rc_use_google == 0)) { 5890 /* We clamp to one MSS in recovery */ 5891 new_tso = maxseg; 5892 } else if (bbr->rc_use_google) { 5893 int min_tso_segs; 5894 5895 /* Google considers the gain too */ 5896 if (bbr->r_ctl.rc_bbr_hptsi_gain != BBR_UNIT) { 5897 bw *= bbr->r_ctl.rc_bbr_hptsi_gain; 5898 bw /= BBR_UNIT; 5899 } 5900 bytes = bw / 1024; 5901 if (bytes > (64 * 1024)) 5902 bytes = 64 * 1024; 5903 new_tso = bytes / maxseg; 5904 if (bw < ONE_POINT_TWO_MEG) 5905 min_tso_segs = 1; 5906 else 5907 min_tso_segs = 2; 5908 if (new_tso < min_tso_segs) 5909 new_tso = min_tso_segs; 5910 new_tso *= maxseg; 5911 } else if (bbr->rc_no_pacing) { 5912 new_tso = (PACE_MAX_IP_BYTES / maxseg) * maxseg; 5913 } else if (bw <= bbr->r_ctl.bbr_cross_over) { 5914 /* 5915 * Calculate the worse case b/w TSO if we are inserting no 5916 * more than a delay_target number of TSO's. 5917 */ 5918 uint32_t tso_len, min_tso; 5919 5920 tso_len = bbr_get_pacing_length(bbr, BBR_UNIT, bbr->r_ctl.bbr_hptsi_segments_delay_tar, bw); 5921 if (tso_len > maxseg) { 5922 new_tso = tso_len / maxseg; 5923 if (new_tso > bbr->r_ctl.bbr_hptsi_segments_max) 5924 new_tso = bbr->r_ctl.bbr_hptsi_segments_max; 5925 new_tso *= maxseg; 5926 } else { 5927 /* 5928 * less than a full sized frame yikes.. long rtt or 5929 * low bw? 5930 */ 5931 min_tso = bbr_minseg(bbr); 5932 if ((tso_len > min_tso) && (bbr_all_get_min == 0)) 5933 new_tso = rounddown(tso_len, min_tso); 5934 else 5935 new_tso = min_tso; 5936 } 5937 } else if (bw > FIVETWELVE_MBPS) { 5938 /* 5939 * This guy is so fast b/w wise that we can TSO as large as 5940 * possible of segments that the NIC will allow. 5941 */ 5942 new_tso = rounddown(PACE_MAX_IP_BYTES, maxseg); 5943 } else { 5944 /* 5945 * This formula is based on attempting to send a segment or 5946 * more every bbr_hptsi_per_second. The default is 1000 5947 * which means you are targeting what you can send every 1ms 5948 * based on the peers bw. 5949 * 5950 * If the number drops to say 500, then you are looking more 5951 * at 2ms and you will raise how much we send in a single 5952 * TSO thus saving CPU (less bbr_output_wtime() calls). The 5953 * trade off of course is you will send more at once and 5954 * thus tend to clump up the sends into larger "bursts" 5955 * building a queue. 5956 */ 5957 bw /= bbr->r_ctl.bbr_hptsi_per_second; 5958 new_tso = roundup(bw, (uint64_t)maxseg); 5959 /* 5960 * Gate the floor to match what our lower than 48Mbps 5961 * algorithm does. The ceiling (bbr_hptsi_segments_max) thus 5962 * becomes the floor for this calculation. 5963 */ 5964 if (new_tso < (bbr->r_ctl.bbr_hptsi_segments_max * maxseg)) 5965 new_tso = (bbr->r_ctl.bbr_hptsi_segments_max * maxseg); 5966 } 5967 if (bbr->r_ctl.bbr_hptsi_segments_floor && (new_tso < (maxseg * bbr->r_ctl.bbr_hptsi_segments_floor))) 5968 new_tso = maxseg * bbr->r_ctl.bbr_hptsi_segments_floor; 5969 if (new_tso > PACE_MAX_IP_BYTES) 5970 new_tso = rounddown(PACE_MAX_IP_BYTES, maxseg); 5971 /* Enforce an utter maximum if we are not HW-TLS */ 5972 #ifdef KERN_TLS 5973 if ((bbr->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) == 0) 5974 #endif 5975 if (bbr->r_ctl.bbr_utter_max && (new_tso > (bbr->r_ctl.bbr_utter_max * maxseg))) { 5976 new_tso = bbr->r_ctl.bbr_utter_max * maxseg; 5977 } 5978 #ifdef KERN_TLS 5979 if (tls_seg) { 5980 /* 5981 * Lets move the output size 5982 * up to 1 or more TLS record sizes. 5983 */ 5984 uint32_t temp; 5985 5986 temp = roundup(new_tso, tls_seg); 5987 new_tso = temp; 5988 /* Back down if needed to under a full frame */ 5989 while (new_tso > PACE_MAX_IP_BYTES) 5990 new_tso -= tls_seg; 5991 } 5992 #endif 5993 if (old_tso != new_tso) { 5994 /* Only log changes */ 5995 bbr_log_type_tsosize(bbr, cts, new_tso, tls_seg, old_tso, maxseg, 0); 5996 bbr->r_ctl.rc_pace_max_segs = new_tso; 5997 } 5998 #ifdef KERN_TLS 5999 if ((bbr->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) && 6000 tls_seg) { 6001 bbr->r_ctl.rc_pace_min_segs = tls_seg + bbr->rc_last_options; 6002 } else 6003 #endif 6004 /* We have hardware pacing and not hardware TLS! */ 6005 bbr_adjust_for_hw_pacing(bbr, cts); 6006 } 6007 6008 static void 6009 bbr_log_output(struct tcp_bbr *bbr, struct tcpcb *tp, struct tcpopt *to, int32_t len, 6010 uint32_t seq_out, uint8_t th_flags, int32_t err, uint32_t cts, 6011 struct mbuf *mb, int32_t * abandon, struct bbr_sendmap *hintrsm, uint32_t delay_calc, 6012 struct sockbuf *sb) 6013 { 6014 6015 struct bbr_sendmap *rsm, *nrsm; 6016 register uint32_t snd_max, snd_una; 6017 uint32_t pacing_time; 6018 /* 6019 * Add to the RACK log of packets in flight or retransmitted. If 6020 * there is a TS option we will use the TS echoed, if not we will 6021 * grab a TS. 6022 * 6023 * Retransmissions will increment the count and move the ts to its 6024 * proper place. Note that if options do not include TS's then we 6025 * won't be able to effectively use the ACK for an RTT on a retran. 6026 * 6027 * Notes about r_start and r_end. Lets consider a send starting at 6028 * sequence 1 for 10 bytes. In such an example the r_start would be 6029 * 1 (starting sequence) but the r_end would be r_start+len i.e. 11. 6030 * This means that r_end is actually the first sequence for the next 6031 * slot (11). 6032 * 6033 */ 6034 INP_WLOCK_ASSERT(tp->t_inpcb); 6035 if (err) { 6036 /* 6037 * We don't log errors -- we could but snd_max does not 6038 * advance in this case either. 6039 */ 6040 return; 6041 } 6042 if (th_flags & TH_RST) { 6043 /* 6044 * We don't log resets and we return immediately from 6045 * sending 6046 */ 6047 *abandon = 1; 6048 return; 6049 } 6050 snd_una = tp->snd_una; 6051 if (th_flags & (TH_SYN | TH_FIN) && (hintrsm == NULL)) { 6052 /* 6053 * The call to bbr_log_output is made before bumping 6054 * snd_max. This means we can record one extra byte on a SYN 6055 * or FIN if seq_out is adding more on and a FIN is present 6056 * (and we are not resending). 6057 */ 6058 if (th_flags & TH_SYN) 6059 len++; 6060 if (th_flags & TH_FIN) 6061 len++; 6062 } 6063 if (SEQ_LEQ((seq_out + len), snd_una)) { 6064 /* Are sending an old segment to induce an ack (keep-alive)? */ 6065 return; 6066 } 6067 if (SEQ_LT(seq_out, snd_una)) { 6068 /* huh? should we panic? */ 6069 uint32_t end; 6070 6071 end = seq_out + len; 6072 seq_out = snd_una; 6073 len = end - seq_out; 6074 } 6075 snd_max = tp->snd_max; 6076 if (len == 0) { 6077 /* We don't log zero window probes */ 6078 return; 6079 } 6080 pacing_time = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, len, cts, 1); 6081 /* First question is it a retransmission? */ 6082 if (seq_out == snd_max) { 6083 again: 6084 rsm = bbr_alloc(bbr); 6085 if (rsm == NULL) { 6086 return; 6087 } 6088 rsm->r_flags = 0; 6089 if (th_flags & TH_SYN) 6090 rsm->r_flags |= BBR_HAS_SYN; 6091 if (th_flags & TH_FIN) 6092 rsm->r_flags |= BBR_HAS_FIN; 6093 rsm->r_tim_lastsent[0] = cts; 6094 rsm->r_rtr_cnt = 1; 6095 rsm->r_rtr_bytes = 0; 6096 rsm->r_start = seq_out; 6097 rsm->r_end = rsm->r_start + len; 6098 rsm->r_dupack = 0; 6099 rsm->r_delivered = bbr->r_ctl.rc_delivered; 6100 rsm->r_pacing_delay = pacing_time; 6101 rsm->r_ts_valid = bbr->rc_ts_valid; 6102 if (bbr->rc_ts_valid) 6103 rsm->r_del_ack_ts = bbr->r_ctl.last_inbound_ts; 6104 rsm->r_del_time = bbr->r_ctl.rc_del_time; 6105 if (bbr->r_ctl.r_app_limited_until) 6106 rsm->r_app_limited = 1; 6107 else 6108 rsm->r_app_limited = 0; 6109 rsm->r_first_sent_time = bbr_get_earliest_send_outstanding(bbr, rsm, cts); 6110 rsm->r_flight_at_send = ctf_flight_size(bbr->rc_tp, 6111 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 6112 /* 6113 * Here we must also add in this rsm since snd_max 6114 * is updated after we return from a new send. 6115 */ 6116 rsm->r_flight_at_send += len; 6117 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_map, rsm, r_next); 6118 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 6119 rsm->r_in_tmap = 1; 6120 if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) 6121 rsm->r_bbr_state = bbr_state_val(bbr); 6122 else 6123 rsm->r_bbr_state = 8; 6124 if (bbr->r_ctl.rc_bbr_hptsi_gain > BBR_UNIT) { 6125 rsm->r_is_gain = 1; 6126 rsm->r_is_drain = 0; 6127 } else if (bbr->r_ctl.rc_bbr_hptsi_gain < BBR_UNIT) { 6128 rsm->r_is_drain = 1; 6129 rsm->r_is_gain = 0; 6130 } else { 6131 rsm->r_is_drain = 0; 6132 rsm->r_is_gain = 0; 6133 } 6134 return; 6135 } 6136 /* 6137 * If we reach here its a retransmission and we need to find it. 6138 */ 6139 more: 6140 if (hintrsm && (hintrsm->r_start == seq_out)) { 6141 rsm = hintrsm; 6142 hintrsm = NULL; 6143 } else if (bbr->r_ctl.rc_next) { 6144 /* We have a hint from a previous run */ 6145 rsm = bbr->r_ctl.rc_next; 6146 } else { 6147 /* No hints sorry */ 6148 rsm = NULL; 6149 } 6150 if ((rsm) && (rsm->r_start == seq_out)) { 6151 /* 6152 * We used rc_next or hintrsm to retransmit, hopefully the 6153 * likely case. 6154 */ 6155 seq_out = bbr_update_entry(tp, bbr, rsm, cts, &len, pacing_time); 6156 if (len == 0) { 6157 return; 6158 } else { 6159 goto more; 6160 } 6161 } 6162 /* Ok it was not the last pointer go through it the hard way. */ 6163 TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) { 6164 if (rsm->r_start == seq_out) { 6165 seq_out = bbr_update_entry(tp, bbr, rsm, cts, &len, pacing_time); 6166 bbr->r_ctl.rc_next = TAILQ_NEXT(rsm, r_next); 6167 if (len == 0) { 6168 return; 6169 } else { 6170 continue; 6171 } 6172 } 6173 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { 6174 /* Transmitted within this piece */ 6175 /* 6176 * Ok we must split off the front and then let the 6177 * update do the rest 6178 */ 6179 nrsm = bbr_alloc_full_limit(bbr); 6180 if (nrsm == NULL) { 6181 bbr_update_rsm(tp, bbr, rsm, cts, pacing_time); 6182 return; 6183 } 6184 /* 6185 * copy rsm to nrsm and then trim the front of rsm 6186 * to not include this part. 6187 */ 6188 bbr_clone_rsm(bbr, nrsm, rsm, seq_out); 6189 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next); 6190 if (rsm->r_in_tmap) { 6191 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 6192 nrsm->r_in_tmap = 1; 6193 } 6194 rsm->r_flags &= (~BBR_HAS_FIN); 6195 seq_out = bbr_update_entry(tp, bbr, nrsm, cts, &len, pacing_time); 6196 if (len == 0) { 6197 return; 6198 } 6199 } 6200 } 6201 /* 6202 * Hmm not found in map did they retransmit both old and on into the 6203 * new? 6204 */ 6205 if (seq_out == tp->snd_max) { 6206 goto again; 6207 } else if (SEQ_LT(seq_out, tp->snd_max)) { 6208 #ifdef BBR_INVARIANTS 6209 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", 6210 seq_out, len, tp->snd_una, tp->snd_max); 6211 printf("Starting Dump of all rack entries\n"); 6212 TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) { 6213 printf("rsm:%p start:%u end:%u\n", 6214 rsm, rsm->r_start, rsm->r_end); 6215 } 6216 printf("Dump complete\n"); 6217 panic("seq_out not found rack:%p tp:%p", 6218 bbr, tp); 6219 #endif 6220 } else { 6221 #ifdef BBR_INVARIANTS 6222 /* 6223 * Hmm beyond sndmax? (only if we are using the new rtt-pack 6224 * flag) 6225 */ 6226 panic("seq_out:%u(%d) is beyond snd_max:%u tp:%p", 6227 seq_out, len, tp->snd_max, tp); 6228 #endif 6229 } 6230 } 6231 6232 static void 6233 bbr_collapse_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, int32_t rtt) 6234 { 6235 /* 6236 * Collapse timeout back the cum-ack moved. 6237 */ 6238 tp->t_rxtshift = 0; 6239 tp->t_softerror = 0; 6240 } 6241 6242 6243 static void 6244 tcp_bbr_xmit_timer(struct tcp_bbr *bbr, uint32_t rtt_usecs, uint32_t rsm_send_time, uint32_t r_start, uint32_t tsin) 6245 { 6246 bbr->rtt_valid = 1; 6247 bbr->r_ctl.cur_rtt = rtt_usecs; 6248 bbr->r_ctl.ts_in = tsin; 6249 if (rsm_send_time) 6250 bbr->r_ctl.cur_rtt_send_time = rsm_send_time; 6251 } 6252 6253 static void 6254 bbr_make_timestamp_determination(struct tcp_bbr *bbr) 6255 { 6256 /** 6257 * We have in our bbr control: 6258 * 1) The timestamp we started observing cum-acks (bbr->r_ctl.bbr_ts_check_tstmp). 6259 * 2) Our timestamp indicating when we sent that packet (bbr->r_ctl.rsm->bbr_ts_check_our_cts). 6260 * 3) The current timestamp that just came in (bbr->r_ctl.last_inbound_ts) 6261 * 4) The time that the packet that generated that ack was sent (bbr->r_ctl.cur_rtt_send_time) 6262 * 6263 * Now we can calculate the time between the sends by doing: 6264 * 6265 * delta = bbr->r_ctl.cur_rtt_send_time - bbr->r_ctl.bbr_ts_check_our_cts 6266 * 6267 * And the peer's time between receiving them by doing: 6268 * 6269 * peer_delta = bbr->r_ctl.last_inbound_ts - bbr->r_ctl.bbr_ts_check_tstmp 6270 * 6271 * We want to figure out if the timestamp values are in msec, 10msec or usec. 6272 * We also may find that we can't use the timestamps if say we see 6273 * that the peer_delta indicates that though we may have taken 10ms to 6274 * pace out the data, it only saw 1ms between the two packets. This would 6275 * indicate that somewhere on the path is a batching entity that is giving 6276 * out time-slices of the actual b/w. This would mean we could not use 6277 * reliably the peers timestamps. 6278 * 6279 * We expect delta > peer_delta initially. Until we figure out the 6280 * timestamp difference which we will store in bbr->r_ctl.bbr_peer_tsratio. 6281 * If we place 1000 there then its a ms vs our usec. If we place 10000 there 6282 * then its 10ms vs our usec. If the peer is running a usec clock we would 6283 * put a 1 there. If the value is faster then ours, we will disable the 6284 * use of timestamps (though we could revist this later if we find it to be not 6285 * just an isolated one or two flows)). 6286 * 6287 * To detect the batching middle boxes we will come up with our compensation and 6288 * if with it in place, we find the peer is drastically off (by some margin) in 6289 * the smaller direction, then we will assume the worst case and disable use of timestamps. 6290 * 6291 */ 6292 uint64_t delta, peer_delta, delta_up; 6293 6294 delta = bbr->r_ctl.cur_rtt_send_time - bbr->r_ctl.bbr_ts_check_our_cts; 6295 if (delta < bbr_min_usec_delta) { 6296 /* 6297 * Have not seen a min amount of time 6298 * between our send times so we can 6299 * make a determination of the timestamp 6300 * yet. 6301 */ 6302 return; 6303 } 6304 peer_delta = bbr->r_ctl.last_inbound_ts - bbr->r_ctl.bbr_ts_check_tstmp; 6305 if (peer_delta < bbr_min_peer_delta) { 6306 /* 6307 * We may have enough in the form of 6308 * our delta but the peers number 6309 * has not changed that much. It could 6310 * be its clock ratio is such that 6311 * we need more data (10ms tick) or 6312 * there may be other compression scenarios 6313 * going on. In any event we need the 6314 * spread to be larger. 6315 */ 6316 return; 6317 } 6318 /* Ok lets first see which way our delta is going */ 6319 if (peer_delta > delta) { 6320 /* Very unlikely, the peer without 6321 * compensation shows that it saw 6322 * the two sends arrive further apart 6323 * then we saw then in micro-seconds. 6324 */ 6325 if (peer_delta < (delta + ((delta * (uint64_t)1000)/ (uint64_t)bbr_delta_percent))) { 6326 /* well it looks like the peer is a micro-second clock. */ 6327 bbr->rc_ts_clock_set = 1; 6328 bbr->r_ctl.bbr_peer_tsratio = 1; 6329 } else { 6330 bbr->rc_ts_cant_be_used = 1; 6331 bbr->rc_ts_clock_set = 1; 6332 } 6333 return; 6334 } 6335 /* Ok we know that the peer_delta is smaller than our send distance */ 6336 bbr->rc_ts_clock_set = 1; 6337 /* First question is it within the percentage that they are using usec time? */ 6338 delta_up = (peer_delta * 1000) / (uint64_t)bbr_delta_percent; 6339 if ((peer_delta + delta_up) >= delta) { 6340 /* Its a usec clock */ 6341 bbr->r_ctl.bbr_peer_tsratio = 1; 6342 bbr_log_tstmp_validation(bbr, peer_delta, delta); 6343 return; 6344 } 6345 /* Ok if not usec, what about 10usec (though unlikely)? */ 6346 delta_up = (peer_delta * 1000 * 10) / (uint64_t)bbr_delta_percent; 6347 if (((peer_delta * 10) + delta_up) >= delta) { 6348 bbr->r_ctl.bbr_peer_tsratio = 10; 6349 bbr_log_tstmp_validation(bbr, peer_delta, delta); 6350 return; 6351 } 6352 /* And what about 100usec (though again unlikely)? */ 6353 delta_up = (peer_delta * 1000 * 100) / (uint64_t)bbr_delta_percent; 6354 if (((peer_delta * 100) + delta_up) >= delta) { 6355 bbr->r_ctl.bbr_peer_tsratio = 100; 6356 bbr_log_tstmp_validation(bbr, peer_delta, delta); 6357 return; 6358 } 6359 /* And how about 1 msec (the most likely one)? */ 6360 delta_up = (peer_delta * 1000 * 1000) / (uint64_t)bbr_delta_percent; 6361 if (((peer_delta * 1000) + delta_up) >= delta) { 6362 bbr->r_ctl.bbr_peer_tsratio = 1000; 6363 bbr_log_tstmp_validation(bbr, peer_delta, delta); 6364 return; 6365 } 6366 /* Ok if not msec could it be 10 msec? */ 6367 delta_up = (peer_delta * 1000 * 10000) / (uint64_t)bbr_delta_percent; 6368 if (((peer_delta * 10000) + delta_up) >= delta) { 6369 bbr->r_ctl.bbr_peer_tsratio = 10000; 6370 return; 6371 } 6372 /* If we fall down here the clock tick so slowly we can't use it */ 6373 bbr->rc_ts_cant_be_used = 1; 6374 bbr->r_ctl.bbr_peer_tsratio = 0; 6375 bbr_log_tstmp_validation(bbr, peer_delta, delta); 6376 } 6377 6378 /* 6379 * Collect new round-trip time estimate 6380 * and update averages and current timeout. 6381 */ 6382 static void 6383 tcp_bbr_xmit_timer_commit(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts) 6384 { 6385 int32_t delta; 6386 uint32_t rtt, tsin; 6387 int32_t rtt_ticks; 6388 6389 6390 if (bbr->rtt_valid == 0) 6391 /* No valid sample */ 6392 return; 6393 6394 rtt = bbr->r_ctl.cur_rtt; 6395 tsin = bbr->r_ctl.ts_in; 6396 if (bbr->rc_prtt_set_ts) { 6397 /* 6398 * We are to force feed the rttProp filter due 6399 * to an entry into PROBE_RTT. This assures 6400 * that the times are sync'd between when we 6401 * go into PROBE_RTT and the filter expiration. 6402 * 6403 * Google does not use a true filter, so they do 6404 * this implicitly since they only keep one value 6405 * and when they enter probe-rtt they update the 6406 * value to the newest rtt. 6407 */ 6408 uint32_t rtt_prop; 6409 6410 bbr->rc_prtt_set_ts = 0; 6411 rtt_prop = get_filter_value_small(&bbr->r_ctl.rc_rttprop); 6412 if (rtt > rtt_prop) 6413 filter_increase_by_small(&bbr->r_ctl.rc_rttprop, (rtt - rtt_prop), cts); 6414 else 6415 apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts); 6416 } 6417 if (bbr->rc_ack_was_delayed) 6418 rtt += bbr->r_ctl.rc_ack_hdwr_delay; 6419 6420 if (rtt < bbr->r_ctl.rc_lowest_rtt) 6421 bbr->r_ctl.rc_lowest_rtt = rtt; 6422 bbr_log_rtt_sample(bbr, rtt, tsin); 6423 if (bbr->r_init_rtt) { 6424 /* 6425 * The initial rtt is not-trusted, nuke it and lets get 6426 * our first valid measurement in. 6427 */ 6428 bbr->r_init_rtt = 0; 6429 tp->t_srtt = 0; 6430 } 6431 if ((bbr->rc_ts_clock_set == 0) && bbr->rc_ts_valid) { 6432 /* 6433 * So we have not yet figured out 6434 * what the peers TSTMP value is 6435 * in (most likely ms). We need a 6436 * series of cum-ack's to determine 6437 * this reliably. 6438 */ 6439 if (bbr->rc_ack_is_cumack) { 6440 if (bbr->rc_ts_data_set) { 6441 /* Lets attempt to determine the timestamp granularity. */ 6442 bbr_make_timestamp_determination(bbr); 6443 } else { 6444 bbr->rc_ts_data_set = 1; 6445 bbr->r_ctl.bbr_ts_check_tstmp = bbr->r_ctl.last_inbound_ts; 6446 bbr->r_ctl.bbr_ts_check_our_cts = bbr->r_ctl.cur_rtt_send_time; 6447 } 6448 } else { 6449 /* 6450 * We have to have consecutive acks 6451 * reset any "filled" state to none. 6452 */ 6453 bbr->rc_ts_data_set = 0; 6454 } 6455 } 6456 /* Round it up */ 6457 rtt_ticks = USEC_2_TICKS((rtt + (USECS_IN_MSEC - 1))); 6458 if (rtt_ticks == 0) 6459 rtt_ticks = 1; 6460 if (tp->t_srtt != 0) { 6461 /* 6462 * srtt is stored as fixed point with 5 bits after the 6463 * binary point (i.e., scaled by 8). The following magic is 6464 * equivalent to the smoothing algorithm in rfc793 with an 6465 * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point). 6466 * Adjust rtt to origin 0. 6467 */ 6468 6469 delta = ((rtt_ticks - 1) << TCP_DELTA_SHIFT) 6470 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 6471 6472 tp->t_srtt += delta; 6473 if (tp->t_srtt <= 0) 6474 tp->t_srtt = 1; 6475 6476 /* 6477 * We accumulate a smoothed rtt variance (actually, a 6478 * smoothed mean difference), then set the retransmit timer 6479 * to smoothed rtt + 4 times the smoothed variance. rttvar 6480 * is stored as fixed point with 4 bits after the binary 6481 * point (scaled by 16). The following is equivalent to 6482 * rfc793 smoothing with an alpha of .75 (rttvar = 6483 * rttvar*3/4 + |delta| / 4). This replaces rfc793's 6484 * wired-in beta. 6485 */ 6486 if (delta < 0) 6487 delta = -delta; 6488 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 6489 tp->t_rttvar += delta; 6490 if (tp->t_rttvar <= 0) 6491 tp->t_rttvar = 1; 6492 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 6493 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 6494 } else { 6495 /* 6496 * No rtt measurement yet - use the unsmoothed rtt. Set the 6497 * variance to half the rtt (so our first retransmit happens 6498 * at 3*rtt). 6499 */ 6500 tp->t_srtt = rtt_ticks << TCP_RTT_SHIFT; 6501 tp->t_rttvar = rtt_ticks << (TCP_RTTVAR_SHIFT - 1); 6502 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 6503 } 6504 KMOD_TCPSTAT_INC(tcps_rttupdated); 6505 tp->t_rttupdated++; 6506 #ifdef STATS 6507 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt_ticks)); 6508 #endif 6509 /* 6510 * the retransmit should happen at rtt + 4 * rttvar. Because of the 6511 * way we do the smoothing, srtt and rttvar will each average +1/2 6512 * tick of bias. When we compute the retransmit timer, we want 1/2 6513 * tick of rounding and 1 extra tick because of +-1/2 tick 6514 * uncertainty in the firing of the timer. The bias will give us 6515 * exactly the 1.5 tick we need. But, because the bias is 6516 * statistical, we have to test that we don't drop below the minimum 6517 * feasible timer (which is 2 ticks). 6518 */ 6519 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 6520 max(MSEC_2_TICKS(bbr->r_ctl.rc_min_rto_ms), rtt_ticks + 2), 6521 MSEC_2_TICKS(((uint32_t)bbr->rc_max_rto_sec) * 1000)); 6522 6523 /* 6524 * We received an ack for a packet that wasn't retransmitted; it is 6525 * probably safe to discard any error indications we've received 6526 * recently. This isn't quite right, but close enough for now (a 6527 * route might have failed after we sent a segment, and the return 6528 * path might not be symmetrical). 6529 */ 6530 tp->t_softerror = 0; 6531 rtt = (TICKS_2_USEC(bbr->rc_tp->t_srtt) >> TCP_RTT_SHIFT); 6532 if (bbr->r_ctl.bbr_smallest_srtt_this_state > rtt) 6533 bbr->r_ctl.bbr_smallest_srtt_this_state = rtt; 6534 } 6535 6536 static void 6537 bbr_earlier_retran(struct tcpcb *tp, struct tcp_bbr *bbr, struct bbr_sendmap *rsm, 6538 uint32_t t, uint32_t cts, int ack_type) 6539 { 6540 /* 6541 * For this RSM, we acknowledged the data from a previous 6542 * transmission, not the last one we made. This means we did a false 6543 * retransmit. 6544 */ 6545 if (rsm->r_flags & BBR_HAS_FIN) { 6546 /* 6547 * The sending of the FIN often is multiple sent when we 6548 * have everything outstanding ack'd. We ignore this case 6549 * since its over now. 6550 */ 6551 return; 6552 } 6553 if (rsm->r_flags & BBR_TLP) { 6554 /* 6555 * We expect TLP's to have this occur often 6556 */ 6557 bbr->rc_tlp_rtx_out = 0; 6558 return; 6559 } 6560 if (ack_type != BBR_CUM_ACKED) { 6561 /* 6562 * If it was not a cum-ack we 6563 * don't really know for sure since 6564 * the timestamp could be from some 6565 * other transmission. 6566 */ 6567 return; 6568 } 6569 6570 if (rsm->r_flags & BBR_WAS_SACKPASS) { 6571 /* 6572 * We retransmitted based on a sack and the earlier 6573 * retransmission ack'd it - re-ordering is occuring. 6574 */ 6575 BBR_STAT_INC(bbr_reorder_seen); 6576 bbr->r_ctl.rc_reorder_ts = cts; 6577 } 6578 /* Back down the loss count */ 6579 if (rsm->r_flags & BBR_MARKED_LOST) { 6580 bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start; 6581 bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start; 6582 rsm->r_flags &= ~BBR_MARKED_LOST; 6583 if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost)) 6584 /* LT sampling also needs adjustment */ 6585 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 6586 } 6587 /***** RRS HERE ************************/ 6588 /* Do we need to do this??? */ 6589 /* bbr_reset_lt_bw_sampling(bbr, cts); */ 6590 /***** RRS HERE ************************/ 6591 BBR_STAT_INC(bbr_badfr); 6592 BBR_STAT_ADD(bbr_badfr_bytes, (rsm->r_end - rsm->r_start)); 6593 } 6594 6595 6596 static void 6597 bbr_set_reduced_rtt(struct tcp_bbr *bbr, uint32_t cts, uint32_t line) 6598 { 6599 bbr->r_ctl.rc_rtt_shrinks = cts; 6600 if (bbr_can_force_probertt && 6601 (TSTMP_GT(cts, bbr->r_ctl.last_in_probertt)) && 6602 ((cts - bbr->r_ctl.last_in_probertt) > bbr->r_ctl.rc_probertt_int)) { 6603 /* 6604 * We should enter probe-rtt its been too long 6605 * since we have been there. 6606 */ 6607 bbr_enter_probe_rtt(bbr, cts, __LINE__); 6608 } else 6609 bbr_check_probe_rtt_limits(bbr, cts); 6610 } 6611 6612 static void 6613 tcp_bbr_commit_bw(struct tcp_bbr *bbr, uint32_t cts) 6614 { 6615 uint64_t orig_bw; 6616 6617 if (bbr->r_ctl.rc_bbr_cur_del_rate == 0) { 6618 /* We never apply a zero measurment */ 6619 bbr_log_type_bbrupd(bbr, 20, cts, 0, 0, 6620 0, 0, 0, 0, 0, 0); 6621 return; 6622 } 6623 if (bbr->r_ctl.r_measurement_count < 0xffffffff) 6624 bbr->r_ctl.r_measurement_count++; 6625 orig_bw = get_filter_value(&bbr->r_ctl.rc_delrate); 6626 apply_filter_max(&bbr->r_ctl.rc_delrate, bbr->r_ctl.rc_bbr_cur_del_rate, bbr->r_ctl.rc_pkt_epoch); 6627 bbr_log_type_bbrupd(bbr, 21, cts, (uint32_t)orig_bw, 6628 (uint32_t)get_filter_value(&bbr->r_ctl.rc_delrate), 6629 0, 0, 0, 0, 0, 0); 6630 if (orig_bw && 6631 (orig_bw != get_filter_value(&bbr->r_ctl.rc_delrate))) { 6632 if (bbr->bbr_hdrw_pacing) { 6633 /* 6634 * Apply a new rate to the hardware 6635 * possibly. 6636 */ 6637 bbr_update_hardware_pacing_rate(bbr, cts); 6638 } 6639 bbr_set_state_target(bbr, __LINE__); 6640 tcp_bbr_tso_size_check(bbr, cts); 6641 if (bbr->r_recovery_bw) { 6642 bbr_setup_red_bw(bbr, cts); 6643 bbr_log_type_bw_reduce(bbr, BBR_RED_BW_USELRBW); 6644 } 6645 } else if ((orig_bw == 0) && get_filter_value(&bbr->r_ctl.rc_delrate)) 6646 tcp_bbr_tso_size_check(bbr, cts); 6647 } 6648 6649 static void 6650 bbr_nf_measurement(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t rtt, uint32_t cts) 6651 { 6652 if (bbr->rc_in_persist == 0) { 6653 /* We log only when not in persist */ 6654 /* Translate to a Bytes Per Second */ 6655 uint64_t tim, bw, ts_diff, ts_bw; 6656 uint32_t upper, lower, delivered; 6657 6658 if (TSTMP_GT(bbr->r_ctl.rc_del_time, rsm->r_del_time)) 6659 tim = (uint64_t)(bbr->r_ctl.rc_del_time - rsm->r_del_time); 6660 else 6661 tim = 1; 6662 /* 6663 * Now that we have processed the tim (skipping the sample 6664 * or possibly updating the time, go ahead and 6665 * calculate the cdr. 6666 */ 6667 delivered = (bbr->r_ctl.rc_delivered - rsm->r_delivered); 6668 bw = (uint64_t)delivered; 6669 bw *= (uint64_t)USECS_IN_SECOND; 6670 bw /= tim; 6671 if (bw == 0) { 6672 /* We must have a calculatable amount */ 6673 return; 6674 } 6675 upper = (bw >> 32) & 0x00000000ffffffff; 6676 lower = bw & 0x00000000ffffffff; 6677 /* 6678 * If we are using this b/w shove it in now so we 6679 * can see in the trace viewer if it gets over-ridden. 6680 */ 6681 if (rsm->r_ts_valid && 6682 bbr->rc_ts_valid && 6683 bbr->rc_ts_clock_set && 6684 (bbr->rc_ts_cant_be_used == 0) && 6685 bbr->rc_use_ts_limit) { 6686 ts_diff = max((bbr->r_ctl.last_inbound_ts - rsm->r_del_ack_ts), 1); 6687 ts_diff *= bbr->r_ctl.bbr_peer_tsratio; 6688 if ((delivered == 0) || 6689 (rtt < 1000)) { 6690 /* Can't use the ts */ 6691 bbr_log_type_bbrupd(bbr, 61, cts, 6692 ts_diff, 6693 bbr->r_ctl.last_inbound_ts, 6694 rsm->r_del_ack_ts, 0, 6695 0, 0, 0, delivered); 6696 } else { 6697 ts_bw = (uint64_t)delivered; 6698 ts_bw *= (uint64_t)USECS_IN_SECOND; 6699 ts_bw /= ts_diff; 6700 bbr_log_type_bbrupd(bbr, 62, cts, 6701 (ts_bw >> 32), 6702 (ts_bw & 0xffffffff), 0, 0, 6703 0, 0, ts_diff, delivered); 6704 if ((bbr->ts_can_raise) && 6705 (ts_bw > bw)) { 6706 bbr_log_type_bbrupd(bbr, 8, cts, 6707 delivered, 6708 ts_diff, 6709 (bw >> 32), 6710 (bw & 0x00000000ffffffff), 6711 0, 0, 0, 0); 6712 bw = ts_bw; 6713 } else if (ts_bw && (ts_bw < bw)) { 6714 bbr_log_type_bbrupd(bbr, 7, cts, 6715 delivered, 6716 ts_diff, 6717 (bw >> 32), 6718 (bw & 0x00000000ffffffff), 6719 0, 0, 0, 0); 6720 bw = ts_bw; 6721 } 6722 } 6723 } 6724 if (rsm->r_first_sent_time && 6725 TSTMP_GT(rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)],rsm->r_first_sent_time)) { 6726 uint64_t sbw, sti; 6727 /* 6728 * We use what was in flight at the time of our 6729 * send and the size of this send to figure 6730 * out what we have been sending at (amount). 6731 * For the time we take from the time of 6732 * the send of the first send outstanding 6733 * until this send plus this sends pacing 6734 * time. This gives us a good calculation 6735 * as to the rate we have been sending at. 6736 */ 6737 6738 sbw = (uint64_t)(rsm->r_flight_at_send); 6739 sbw *= (uint64_t)USECS_IN_SECOND; 6740 sti = rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)] - rsm->r_first_sent_time; 6741 sti += rsm->r_pacing_delay; 6742 sbw /= sti; 6743 if (sbw < bw) { 6744 bbr_log_type_bbrupd(bbr, 6, cts, 6745 delivered, 6746 (uint32_t)sti, 6747 (bw >> 32), 6748 (uint32_t)bw, 6749 rsm->r_first_sent_time, 0, (sbw >> 32), 6750 (uint32_t)sbw); 6751 bw = sbw; 6752 } 6753 } 6754 /* Use the google algorithm for b/w measurements */ 6755 bbr->r_ctl.rc_bbr_cur_del_rate = bw; 6756 if ((rsm->r_app_limited == 0) || 6757 (bw > get_filter_value(&bbr->r_ctl.rc_delrate))) { 6758 tcp_bbr_commit_bw(bbr, cts); 6759 bbr_log_type_bbrupd(bbr, 10, cts, (uint32_t)tim, delivered, 6760 0, 0, 0, 0, bbr->r_ctl.rc_del_time, rsm->r_del_time); 6761 } 6762 } 6763 } 6764 6765 static void 6766 bbr_google_measurement(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t rtt, uint32_t cts) 6767 { 6768 if (bbr->rc_in_persist == 0) { 6769 /* We log only when not in persist */ 6770 /* Translate to a Bytes Per Second */ 6771 uint64_t tim, bw; 6772 uint32_t upper, lower, delivered; 6773 int no_apply = 0; 6774 6775 if (TSTMP_GT(bbr->r_ctl.rc_del_time, rsm->r_del_time)) 6776 tim = (uint64_t)(bbr->r_ctl.rc_del_time - rsm->r_del_time); 6777 else 6778 tim = 1; 6779 /* 6780 * Now that we have processed the tim (skipping the sample 6781 * or possibly updating the time, go ahead and 6782 * calculate the cdr. 6783 */ 6784 delivered = (bbr->r_ctl.rc_delivered - rsm->r_delivered); 6785 bw = (uint64_t)delivered; 6786 bw *= (uint64_t)USECS_IN_SECOND; 6787 bw /= tim; 6788 if (tim < bbr->r_ctl.rc_lowest_rtt) { 6789 bbr_log_type_bbrupd(bbr, 99, cts, (uint32_t)tim, delivered, 6790 tim, bbr->r_ctl.rc_lowest_rtt, 0, 0, 0, 0); 6791 6792 no_apply = 1; 6793 } 6794 upper = (bw >> 32) & 0x00000000ffffffff; 6795 lower = bw & 0x00000000ffffffff; 6796 /* 6797 * If we are using this b/w shove it in now so we 6798 * can see in the trace viewer if it gets over-ridden. 6799 */ 6800 bbr->r_ctl.rc_bbr_cur_del_rate = bw; 6801 /* Gate by the sending rate */ 6802 if (rsm->r_first_sent_time && 6803 TSTMP_GT(rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)],rsm->r_first_sent_time)) { 6804 uint64_t sbw, sti; 6805 /* 6806 * We use what was in flight at the time of our 6807 * send and the size of this send to figure 6808 * out what we have been sending at (amount). 6809 * For the time we take from the time of 6810 * the send of the first send outstanding 6811 * until this send plus this sends pacing 6812 * time. This gives us a good calculation 6813 * as to the rate we have been sending at. 6814 */ 6815 6816 sbw = (uint64_t)(rsm->r_flight_at_send); 6817 sbw *= (uint64_t)USECS_IN_SECOND; 6818 sti = rsm->r_tim_lastsent[(rsm->r_rtr_cnt -1)] - rsm->r_first_sent_time; 6819 sti += rsm->r_pacing_delay; 6820 sbw /= sti; 6821 if (sbw < bw) { 6822 bbr_log_type_bbrupd(bbr, 6, cts, 6823 delivered, 6824 (uint32_t)sti, 6825 (bw >> 32), 6826 (uint32_t)bw, 6827 rsm->r_first_sent_time, 0, (sbw >> 32), 6828 (uint32_t)sbw); 6829 bw = sbw; 6830 } 6831 if ((sti > tim) && 6832 (sti < bbr->r_ctl.rc_lowest_rtt)) { 6833 bbr_log_type_bbrupd(bbr, 99, cts, (uint32_t)tim, delivered, 6834 (uint32_t)sti, bbr->r_ctl.rc_lowest_rtt, 0, 0, 0, 0); 6835 no_apply = 1; 6836 } else 6837 no_apply = 0; 6838 } 6839 bbr->r_ctl.rc_bbr_cur_del_rate = bw; 6840 if ((no_apply == 0) && 6841 ((rsm->r_app_limited == 0) || 6842 (bw > get_filter_value(&bbr->r_ctl.rc_delrate)))) { 6843 tcp_bbr_commit_bw(bbr, cts); 6844 bbr_log_type_bbrupd(bbr, 10, cts, (uint32_t)tim, delivered, 6845 0, 0, 0, 0, bbr->r_ctl.rc_del_time, rsm->r_del_time); 6846 } 6847 } 6848 } 6849 6850 6851 static void 6852 bbr_update_bbr_info(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, uint32_t rtt, uint32_t cts, uint32_t tsin, 6853 uint32_t uts, int32_t match, uint32_t rsm_send_time, int32_t ack_type, struct tcpopt *to) 6854 { 6855 uint64_t old_rttprop; 6856 6857 /* Update our delivery time and amount */ 6858 bbr->r_ctl.rc_delivered += (rsm->r_end - rsm->r_start); 6859 bbr->r_ctl.rc_del_time = cts; 6860 if (rtt == 0) { 6861 /* 6862 * 0 means its a retransmit, for now we don't use these for 6863 * the rest of BBR. 6864 */ 6865 return; 6866 } 6867 if ((bbr->rc_use_google == 0) && 6868 (match != BBR_RTT_BY_EXACTMATCH) && 6869 (match != BBR_RTT_BY_TIMESTAMP)){ 6870 /* 6871 * We get a lot of rtt updates, lets not pay attention to 6872 * any that are not an exact match. That way we don't have 6873 * to worry about timestamps and the whole nonsense of 6874 * unsure if its a retransmission etc (if we ever had the 6875 * timestamp fixed to always have the last thing sent this 6876 * would not be a issue). 6877 */ 6878 return; 6879 } 6880 if ((bbr_no_retran && bbr->rc_use_google) && 6881 (match != BBR_RTT_BY_EXACTMATCH) && 6882 (match != BBR_RTT_BY_TIMESTAMP)){ 6883 /* 6884 * We only do measurements in google mode 6885 * with bbr_no_retran on for sure things. 6886 */ 6887 return; 6888 } 6889 /* Only update srtt if we know by exact match */ 6890 tcp_bbr_xmit_timer(bbr, rtt, rsm_send_time, rsm->r_start, tsin); 6891 if (ack_type == BBR_CUM_ACKED) 6892 bbr->rc_ack_is_cumack = 1; 6893 else 6894 bbr->rc_ack_is_cumack = 0; 6895 old_rttprop = bbr_get_rtt(bbr, BBR_RTT_PROP); 6896 /* 6897 * Note the following code differs to the original 6898 * BBR spec. It calls for <= not <. However after a 6899 * long discussion in email with Neal, he acknowledged 6900 * that it should be < than so that we will have flows 6901 * going into probe-rtt (we were seeing cases where that 6902 * did not happen and caused ugly things to occur). We 6903 * have added this agreed upon fix to our code base. 6904 */ 6905 if (rtt < old_rttprop) { 6906 /* Update when we last saw a rtt drop */ 6907 bbr_log_rtt_shrinks(bbr, cts, 0, rtt, __LINE__, BBR_RTTS_NEWRTT, 0); 6908 bbr_set_reduced_rtt(bbr, cts, __LINE__); 6909 } 6910 bbr_log_type_bbrrttprop(bbr, rtt, (rsm ? rsm->r_end : 0), uts, cts, 6911 match, rsm->r_start, rsm->r_flags); 6912 apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts); 6913 if (old_rttprop != bbr_get_rtt(bbr, BBR_RTT_PROP)) { 6914 /* 6915 * The RTT-prop moved, reset the target (may be a 6916 * nop for some states). 6917 */ 6918 bbr_set_state_target(bbr, __LINE__); 6919 if (bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) 6920 bbr_log_rtt_shrinks(bbr, cts, 0, 0, 6921 __LINE__, BBR_RTTS_NEW_TARGET, 0); 6922 else if (old_rttprop < bbr_get_rtt(bbr, BBR_RTT_PROP)) 6923 /* It went up */ 6924 bbr_check_probe_rtt_limits(bbr, cts); 6925 } 6926 if ((bbr->rc_use_google == 0) && 6927 (match == BBR_RTT_BY_TIMESTAMP)) { 6928 /* 6929 * We don't do b/w update with 6930 * these since they are not really 6931 * reliable. 6932 */ 6933 return; 6934 } 6935 if (bbr->r_ctl.r_app_limited_until && 6936 (bbr->r_ctl.rc_delivered >= bbr->r_ctl.r_app_limited_until)) { 6937 /* We are no longer app-limited */ 6938 bbr->r_ctl.r_app_limited_until = 0; 6939 } 6940 if (bbr->rc_use_google) { 6941 bbr_google_measurement(bbr, rsm, rtt, cts); 6942 } else { 6943 bbr_nf_measurement(bbr, rsm, rtt, cts); 6944 } 6945 } 6946 6947 /* 6948 * Convert a timestamp that the main stack 6949 * uses (milliseconds) into one that bbr uses 6950 * (microseconds). Return that converted timestamp. 6951 */ 6952 static uint32_t 6953 bbr_ts_convert(uint32_t cts) { 6954 uint32_t sec, msec; 6955 6956 sec = cts / MS_IN_USEC; 6957 msec = cts - (MS_IN_USEC * sec); 6958 return ((sec * USECS_IN_SECOND) + (msec * MS_IN_USEC)); 6959 } 6960 6961 /* 6962 * Return 0 if we did not update the RTT time, return 6963 * 1 if we did. 6964 */ 6965 static int 6966 bbr_update_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, 6967 struct bbr_sendmap *rsm, struct tcpopt *to, uint32_t cts, int32_t ack_type, uint32_t th_ack) 6968 { 6969 int32_t i; 6970 uint32_t t, uts = 0; 6971 6972 if ((rsm->r_flags & BBR_ACKED) || 6973 (rsm->r_flags & BBR_WAS_RENEGED) || 6974 (rsm->r_flags & BBR_RXT_CLEARED)) { 6975 /* Already done */ 6976 return (0); 6977 } 6978 if (rsm->r_rtr_cnt == 1) { 6979 /* 6980 * Only one transmit. Hopefully the normal case. 6981 */ 6982 if (TSTMP_GT(cts, rsm->r_tim_lastsent[0])) 6983 t = cts - rsm->r_tim_lastsent[0]; 6984 else 6985 t = 1; 6986 if ((int)t <= 0) 6987 t = 1; 6988 bbr->r_ctl.rc_last_rtt = t; 6989 bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, 0, 6990 BBR_RTT_BY_EXACTMATCH, rsm->r_tim_lastsent[0], ack_type, to); 6991 return (1); 6992 } 6993 /* Convert to usecs */ 6994 if ((bbr_can_use_ts_for_rtt == 1) && 6995 (bbr->rc_use_google == 1) && 6996 (ack_type == BBR_CUM_ACKED) && 6997 (to->to_flags & TOF_TS) && 6998 (to->to_tsecr != 0)) { 6999 7000 t = tcp_tv_to_mssectick(&bbr->rc_tv) - to->to_tsecr; 7001 if (t < 1) 7002 t = 1; 7003 t *= MS_IN_USEC; 7004 bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, 0, 7005 BBR_RTT_BY_TIMESTAMP, 7006 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)], 7007 ack_type, to); 7008 return (1); 7009 } 7010 uts = bbr_ts_convert(to->to_tsecr); 7011 if ((to->to_flags & TOF_TS) && 7012 (to->to_tsecr != 0) && 7013 (ack_type == BBR_CUM_ACKED) && 7014 ((rsm->r_flags & BBR_OVERMAX) == 0)) { 7015 /* 7016 * Now which timestamp does it match? In this block the ACK 7017 * may be coming from a previous transmission. 7018 */ 7019 uint32_t fudge; 7020 7021 fudge = BBR_TIMER_FUDGE; 7022 for (i = 0; i < rsm->r_rtr_cnt; i++) { 7023 if ((SEQ_GEQ(uts, (rsm->r_tim_lastsent[i] - fudge))) && 7024 (SEQ_LEQ(uts, (rsm->r_tim_lastsent[i] + fudge)))) { 7025 if (TSTMP_GT(cts, rsm->r_tim_lastsent[i])) 7026 t = cts - rsm->r_tim_lastsent[i]; 7027 else 7028 t = 1; 7029 if ((int)t <= 0) 7030 t = 1; 7031 bbr->r_ctl.rc_last_rtt = t; 7032 bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, uts, BBR_RTT_BY_TSMATCHING, 7033 rsm->r_tim_lastsent[i], ack_type, to); 7034 if ((i + 1) < rsm->r_rtr_cnt) { 7035 /* Likely */ 7036 bbr_earlier_retran(tp, bbr, rsm, t, cts, ack_type); 7037 } else if (rsm->r_flags & BBR_TLP) { 7038 bbr->rc_tlp_rtx_out = 0; 7039 } 7040 return (1); 7041 } 7042 } 7043 /* Fall through if we can't find a matching timestamp */ 7044 } 7045 /* 7046 * Ok its a SACK block that we retransmitted. or a windows 7047 * machine without timestamps. We can tell nothing from the 7048 * time-stamp since its not there or the time the peer last 7049 * recieved a segment that moved forward its cum-ack point. 7050 * 7051 * Lets look at the last retransmit and see what we can tell 7052 * (with BBR for space we only keep 2 note we have to keep 7053 * at least 2 so the map can not be condensed more). 7054 */ 7055 i = rsm->r_rtr_cnt - 1; 7056 if (TSTMP_GT(cts, rsm->r_tim_lastsent[i])) 7057 t = cts - rsm->r_tim_lastsent[i]; 7058 else 7059 goto not_sure; 7060 if (t < bbr->r_ctl.rc_lowest_rtt) { 7061 /* 7062 * We retransmitted and the ack came back in less 7063 * than the smallest rtt we have observed in the 7064 * windowed rtt. We most likey did an improper 7065 * retransmit as outlined in 4.2 Step 3 point 2 in 7066 * the rack-draft. 7067 * 7068 * Use the prior transmission to update all the 7069 * information as long as there is only one prior 7070 * transmission. 7071 */ 7072 if ((rsm->r_flags & BBR_OVERMAX) == 0) { 7073 #ifdef BBR_INVARIANTS 7074 if (rsm->r_rtr_cnt == 1) 7075 panic("rsm:%p bbr:%p rsm has overmax and only 1 retranmit flags:%x?", rsm, bbr, rsm->r_flags); 7076 #endif 7077 i = rsm->r_rtr_cnt - 2; 7078 if (TSTMP_GT(cts, rsm->r_tim_lastsent[i])) 7079 t = cts - rsm->r_tim_lastsent[i]; 7080 else 7081 t = 1; 7082 bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, uts, BBR_RTT_BY_EARLIER_RET, 7083 rsm->r_tim_lastsent[i], ack_type, to); 7084 bbr_earlier_retran(tp, bbr, rsm, t, cts, ack_type); 7085 } else { 7086 /* 7087 * Too many prior transmissions, just 7088 * updated BBR delivered 7089 */ 7090 not_sure: 7091 bbr_update_bbr_info(bbr, rsm, 0, cts, to->to_tsecr, uts, 7092 BBR_RTT_BY_SOME_RETRAN, 0, ack_type, to); 7093 } 7094 } else { 7095 /* 7096 * We retransmitted it and the retransmit did the 7097 * job. 7098 */ 7099 if (rsm->r_flags & BBR_TLP) 7100 bbr->rc_tlp_rtx_out = 0; 7101 if ((rsm->r_flags & BBR_OVERMAX) == 0) 7102 bbr_update_bbr_info(bbr, rsm, t, cts, to->to_tsecr, uts, 7103 BBR_RTT_BY_THIS_RETRAN, 0, ack_type, to); 7104 else 7105 bbr_update_bbr_info(bbr, rsm, 0, cts, to->to_tsecr, uts, 7106 BBR_RTT_BY_SOME_RETRAN, 0, ack_type, to); 7107 return (1); 7108 } 7109 return (0); 7110 } 7111 7112 /* 7113 * Mark the SACK_PASSED flag on all entries prior to rsm send wise. 7114 */ 7115 static void 7116 bbr_log_sack_passed(struct tcpcb *tp, 7117 struct tcp_bbr *bbr, struct bbr_sendmap *rsm) 7118 { 7119 struct bbr_sendmap *nrsm; 7120 7121 nrsm = rsm; 7122 TAILQ_FOREACH_REVERSE_FROM(nrsm, &bbr->r_ctl.rc_tmap, 7123 bbr_head, r_tnext) { 7124 if (nrsm == rsm) { 7125 /* Skip orginal segment he is acked */ 7126 continue; 7127 } 7128 if (nrsm->r_flags & BBR_ACKED) { 7129 /* Skip ack'd segments */ 7130 continue; 7131 } 7132 if (nrsm->r_flags & BBR_SACK_PASSED) { 7133 /* 7134 * We found one that is already marked 7135 * passed, we have been here before and 7136 * so all others below this are marked. 7137 */ 7138 break; 7139 } 7140 BBR_STAT_INC(bbr_sack_passed); 7141 nrsm->r_flags |= BBR_SACK_PASSED; 7142 if (((nrsm->r_flags & BBR_MARKED_LOST) == 0) && 7143 bbr_is_lost(bbr, nrsm, bbr->r_ctl.rc_rcvtime)) { 7144 bbr->r_ctl.rc_lost += nrsm->r_end - nrsm->r_start; 7145 bbr->r_ctl.rc_lost_bytes += nrsm->r_end - nrsm->r_start; 7146 nrsm->r_flags |= BBR_MARKED_LOST; 7147 } 7148 nrsm->r_flags &= ~BBR_WAS_SACKPASS; 7149 } 7150 } 7151 7152 /* 7153 * Returns the number of bytes that were 7154 * newly ack'd by sack blocks. 7155 */ 7156 static uint32_t 7157 bbr_proc_sack_blk(struct tcpcb *tp, struct tcp_bbr *bbr, struct sackblk *sack, 7158 struct tcpopt *to, struct bbr_sendmap **prsm, uint32_t cts) 7159 { 7160 int32_t times = 0; 7161 uint32_t start, end, maxseg, changed = 0; 7162 struct bbr_sendmap *rsm, *nrsm; 7163 int32_t used_ref = 1; 7164 uint8_t went_back = 0, went_fwd = 0; 7165 7166 maxseg = tp->t_maxseg - bbr->rc_last_options; 7167 start = sack->start; 7168 end = sack->end; 7169 rsm = *prsm; 7170 if (rsm == NULL) 7171 used_ref = 0; 7172 7173 /* Do we locate the block behind where we last were? */ 7174 if (rsm && SEQ_LT(start, rsm->r_start)) { 7175 went_back = 1; 7176 TAILQ_FOREACH_REVERSE_FROM(rsm, &bbr->r_ctl.rc_map, bbr_head, r_next) { 7177 if (SEQ_GEQ(start, rsm->r_start) && 7178 SEQ_LT(start, rsm->r_end)) { 7179 goto do_rest_ofb; 7180 } 7181 } 7182 } 7183 start_at_beginning: 7184 went_fwd = 1; 7185 /* 7186 * Ok lets locate the block where this guy is fwd from rsm (if its 7187 * set) 7188 */ 7189 TAILQ_FOREACH_FROM(rsm, &bbr->r_ctl.rc_map, r_next) { 7190 if (SEQ_GEQ(start, rsm->r_start) && 7191 SEQ_LT(start, rsm->r_end)) { 7192 break; 7193 } 7194 } 7195 do_rest_ofb: 7196 if (rsm == NULL) { 7197 /* 7198 * This happens when we get duplicate sack blocks with the 7199 * same end. For example SACK 4: 100 SACK 3: 100 The sort 7200 * will not change there location so we would just start at 7201 * the end of the first one and get lost. 7202 */ 7203 if (tp->t_flags & TF_SENTFIN) { 7204 /* 7205 * Check to see if we have not logged the FIN that 7206 * went out. 7207 */ 7208 nrsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_map, bbr_sendmap, r_next); 7209 if (nrsm && (nrsm->r_end + 1) == tp->snd_max) { 7210 /* 7211 * Ok we did not get the FIN logged. 7212 */ 7213 nrsm->r_end++; 7214 rsm = nrsm; 7215 goto do_rest_ofb; 7216 } 7217 } 7218 if (times == 1) { 7219 #ifdef BBR_INVARIANTS 7220 panic("tp:%p bbr:%p sack:%p to:%p prsm:%p", 7221 tp, bbr, sack, to, prsm); 7222 #else 7223 goto out; 7224 #endif 7225 } 7226 times++; 7227 BBR_STAT_INC(bbr_sack_proc_restart); 7228 rsm = NULL; 7229 goto start_at_beginning; 7230 } 7231 /* Ok we have an ACK for some piece of rsm */ 7232 if (rsm->r_start != start) { 7233 /* 7234 * Need to split this in two pieces the before and after. 7235 */ 7236 if (bbr_sack_mergable(rsm, start, end)) 7237 nrsm = bbr_alloc_full_limit(bbr); 7238 else 7239 nrsm = bbr_alloc_limit(bbr, BBR_LIMIT_TYPE_SPLIT); 7240 if (nrsm == NULL) { 7241 /* We could not allocate ignore the sack */ 7242 struct sackblk blk; 7243 7244 blk.start = start; 7245 blk.end = end; 7246 sack_filter_reject(&bbr->r_ctl.bbr_sf, &blk); 7247 goto out; 7248 } 7249 bbr_clone_rsm(bbr, nrsm, rsm, start); 7250 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next); 7251 if (rsm->r_in_tmap) { 7252 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7253 nrsm->r_in_tmap = 1; 7254 } 7255 rsm->r_flags &= (~BBR_HAS_FIN); 7256 rsm = nrsm; 7257 } 7258 if (SEQ_GEQ(end, rsm->r_end)) { 7259 /* 7260 * The end of this block is either beyond this guy or right 7261 * at this guy. 7262 */ 7263 if ((rsm->r_flags & BBR_ACKED) == 0) { 7264 bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_SACKED, 0); 7265 changed += (rsm->r_end - rsm->r_start); 7266 bbr->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 7267 bbr_log_sack_passed(tp, bbr, rsm); 7268 if (rsm->r_flags & BBR_MARKED_LOST) { 7269 bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start; 7270 } 7271 /* Is Reordering occuring? */ 7272 if (rsm->r_flags & BBR_SACK_PASSED) { 7273 BBR_STAT_INC(bbr_reorder_seen); 7274 bbr->r_ctl.rc_reorder_ts = cts; 7275 if (rsm->r_flags & BBR_MARKED_LOST) { 7276 bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start; 7277 if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost)) 7278 /* LT sampling also needs adjustment */ 7279 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 7280 } 7281 } 7282 rsm->r_flags |= BBR_ACKED; 7283 rsm->r_flags &= ~(BBR_TLP|BBR_WAS_RENEGED|BBR_RXT_CLEARED|BBR_MARKED_LOST); 7284 if (rsm->r_in_tmap) { 7285 TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 7286 rsm->r_in_tmap = 0; 7287 } 7288 } 7289 bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_SACKED); 7290 if (end == rsm->r_end) { 7291 /* This block only - done */ 7292 goto out; 7293 } 7294 /* There is more not coverend by this rsm move on */ 7295 start = rsm->r_end; 7296 nrsm = TAILQ_NEXT(rsm, r_next); 7297 rsm = nrsm; 7298 times = 0; 7299 goto do_rest_ofb; 7300 } 7301 if (rsm->r_flags & BBR_ACKED) { 7302 /* Been here done that */ 7303 goto out; 7304 } 7305 /* Ok we need to split off this one at the tail */ 7306 if (bbr_sack_mergable(rsm, start, end)) 7307 nrsm = bbr_alloc_full_limit(bbr); 7308 else 7309 nrsm = bbr_alloc_limit(bbr, BBR_LIMIT_TYPE_SPLIT); 7310 if (nrsm == NULL) { 7311 /* failed XXXrrs what can we do but loose the sack info? */ 7312 struct sackblk blk; 7313 7314 blk.start = start; 7315 blk.end = end; 7316 sack_filter_reject(&bbr->r_ctl.bbr_sf, &blk); 7317 goto out; 7318 } 7319 /* Clone it */ 7320 bbr_clone_rsm(bbr, nrsm, rsm, end); 7321 /* The sack block does not cover this guy fully */ 7322 rsm->r_flags &= (~BBR_HAS_FIN); 7323 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next); 7324 if (rsm->r_in_tmap) { 7325 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 7326 nrsm->r_in_tmap = 1; 7327 } 7328 nrsm->r_dupack = 0; 7329 bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_SACKED, 0); 7330 bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_SACKED); 7331 changed += (rsm->r_end - rsm->r_start); 7332 bbr->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); 7333 bbr_log_sack_passed(tp, bbr, rsm); 7334 /* Is Reordering occuring? */ 7335 if (rsm->r_flags & BBR_MARKED_LOST) { 7336 bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start; 7337 } 7338 if (rsm->r_flags & BBR_SACK_PASSED) { 7339 BBR_STAT_INC(bbr_reorder_seen); 7340 bbr->r_ctl.rc_reorder_ts = cts; 7341 if (rsm->r_flags & BBR_MARKED_LOST) { 7342 bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start; 7343 if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost)) 7344 /* LT sampling also needs adjustment */ 7345 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 7346 } 7347 } 7348 rsm->r_flags &= ~(BBR_TLP|BBR_WAS_RENEGED|BBR_RXT_CLEARED|BBR_MARKED_LOST); 7349 rsm->r_flags |= BBR_ACKED; 7350 if (rsm->r_in_tmap) { 7351 TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 7352 rsm->r_in_tmap = 0; 7353 } 7354 out: 7355 if (rsm && (rsm->r_flags & BBR_ACKED)) { 7356 /* 7357 * Now can we merge this newly acked 7358 * block with either the previous or 7359 * next block? 7360 */ 7361 nrsm = TAILQ_NEXT(rsm, r_next); 7362 if (nrsm && 7363 (nrsm->r_flags & BBR_ACKED)) { 7364 /* yep this and next can be merged */ 7365 rsm = bbr_merge_rsm(bbr, rsm, nrsm); 7366 } 7367 /* Now what about the previous? */ 7368 nrsm = TAILQ_PREV(rsm, bbr_head, r_next); 7369 if (nrsm && 7370 (nrsm->r_flags & BBR_ACKED)) { 7371 /* yep the previous and this can be merged */ 7372 rsm = bbr_merge_rsm(bbr, nrsm, rsm); 7373 } 7374 } 7375 if (used_ref == 0) { 7376 BBR_STAT_INC(bbr_sack_proc_all); 7377 } else { 7378 BBR_STAT_INC(bbr_sack_proc_short); 7379 } 7380 if (went_fwd && went_back) { 7381 BBR_STAT_INC(bbr_sack_search_both); 7382 } else if (went_fwd) { 7383 BBR_STAT_INC(bbr_sack_search_fwd); 7384 } else if (went_back) { 7385 BBR_STAT_INC(bbr_sack_search_back); 7386 } 7387 /* Save off where the next seq is */ 7388 if (rsm) 7389 bbr->r_ctl.rc_sacklast = TAILQ_NEXT(rsm, r_next); 7390 else 7391 bbr->r_ctl.rc_sacklast = NULL; 7392 *prsm = rsm; 7393 return (changed); 7394 } 7395 7396 7397 static void inline 7398 bbr_peer_reneges(struct tcp_bbr *bbr, struct bbr_sendmap *rsm, tcp_seq th_ack) 7399 { 7400 struct bbr_sendmap *tmap; 7401 7402 BBR_STAT_INC(bbr_reneges_seen); 7403 tmap = NULL; 7404 while (rsm && (rsm->r_flags & BBR_ACKED)) { 7405 /* Its no longer sacked, mark it so */ 7406 uint32_t oflags; 7407 bbr->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7408 #ifdef BBR_INVARIANTS 7409 if (rsm->r_in_tmap) { 7410 panic("bbr:%p rsm:%p flags:0x%x in tmap?", 7411 bbr, rsm, rsm->r_flags); 7412 } 7413 #endif 7414 oflags = rsm->r_flags; 7415 if (rsm->r_flags & BBR_MARKED_LOST) { 7416 bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start; 7417 bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start; 7418 if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost)) 7419 /* LT sampling also needs adjustment */ 7420 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 7421 } 7422 rsm->r_flags &= ~(BBR_ACKED | BBR_SACK_PASSED | BBR_WAS_SACKPASS | BBR_MARKED_LOST); 7423 rsm->r_flags |= BBR_WAS_RENEGED; 7424 rsm->r_flags |= BBR_RXT_CLEARED; 7425 bbr_log_type_rsmclear(bbr, bbr->r_ctl.rc_rcvtime, rsm, oflags, __LINE__); 7426 /* Rebuild it into our tmap */ 7427 if (tmap == NULL) { 7428 TAILQ_INSERT_HEAD(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 7429 tmap = rsm; 7430 } else { 7431 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, tmap, rsm, r_tnext); 7432 tmap = rsm; 7433 } 7434 tmap->r_in_tmap = 1; 7435 /* 7436 * XXXrrs Delivered? Should we do anything here? 7437 * 7438 * Of course we don't on a rxt timeout so maybe its ok that 7439 * we don't? 7440 * 7441 * For now lets not. 7442 */ 7443 rsm = TAILQ_NEXT(rsm, r_next); 7444 } 7445 /* 7446 * Now lets possibly clear the sack filter so we start recognizing 7447 * sacks that cover this area. 7448 */ 7449 sack_filter_clear(&bbr->r_ctl.bbr_sf, th_ack); 7450 } 7451 7452 static void 7453 bbr_log_syn(struct tcpcb *tp, struct tcpopt *to) 7454 { 7455 struct tcp_bbr *bbr; 7456 struct bbr_sendmap *rsm; 7457 uint32_t cts; 7458 7459 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 7460 cts = bbr->r_ctl.rc_rcvtime; 7461 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 7462 if (rsm && (rsm->r_flags & BBR_HAS_SYN)) { 7463 if ((rsm->r_end - rsm->r_start) <= 1) { 7464 /* Log out the SYN completely */ 7465 bbr->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 7466 rsm->r_rtr_bytes = 0; 7467 TAILQ_REMOVE(&bbr->r_ctl.rc_map, rsm, r_next); 7468 if (rsm->r_in_tmap) { 7469 TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 7470 rsm->r_in_tmap = 0; 7471 } 7472 if (bbr->r_ctl.rc_next == rsm) { 7473 /* scoot along the marker */ 7474 bbr->r_ctl.rc_next = TAILQ_FIRST(&bbr->r_ctl.rc_map); 7475 } 7476 if (to != NULL) 7477 bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_CUM_ACKED, 0); 7478 bbr_free(bbr, rsm); 7479 } else { 7480 /* There is more (Fast open)? strip out SYN. */ 7481 rsm->r_flags &= ~BBR_HAS_SYN; 7482 rsm->r_start++; 7483 } 7484 } 7485 } 7486 7487 /* 7488 * Returns the number of bytes that were 7489 * acknowledged by SACK blocks. 7490 */ 7491 7492 static uint32_t 7493 bbr_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, 7494 uint32_t *prev_acked) 7495 { 7496 uint32_t changed, last_seq, entered_recovery = 0; 7497 struct tcp_bbr *bbr; 7498 struct bbr_sendmap *rsm; 7499 struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1]; 7500 register uint32_t th_ack; 7501 int32_t i, j, k, new_sb, num_sack_blks = 0; 7502 uint32_t cts, acked, ack_point, sack_changed = 0; 7503 uint32_t p_maxseg, maxseg, p_acked = 0; 7504 7505 INP_WLOCK_ASSERT(tp->t_inpcb); 7506 if (th->th_flags & TH_RST) { 7507 /* We don't log resets */ 7508 return (0); 7509 } 7510 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 7511 cts = bbr->r_ctl.rc_rcvtime; 7512 7513 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 7514 changed = 0; 7515 maxseg = tp->t_maxseg - bbr->rc_last_options; 7516 p_maxseg = min(bbr->r_ctl.rc_pace_max_segs, maxseg); 7517 th_ack = th->th_ack; 7518 if (SEQ_GT(th_ack, tp->snd_una)) { 7519 acked = th_ack - tp->snd_una; 7520 bbr_log_progress_event(bbr, tp, ticks, PROGRESS_UPDATE, __LINE__); 7521 bbr->rc_tp->t_acktime = ticks; 7522 } else 7523 acked = 0; 7524 if (SEQ_LEQ(th_ack, tp->snd_una)) { 7525 /* Only sent here for sack processing */ 7526 goto proc_sack; 7527 } 7528 if (rsm && SEQ_GT(th_ack, rsm->r_start)) { 7529 changed = th_ack - rsm->r_start; 7530 } else if ((rsm == NULL) && ((th_ack - 1) == tp->iss)) { 7531 /* 7532 * For the SYN incoming case we will not have called 7533 * tcp_output for the sending of the SYN, so there will be 7534 * no map. All other cases should probably be a panic. 7535 */ 7536 if ((to->to_flags & TOF_TS) && (to->to_tsecr != 0)) { 7537 /* 7538 * We have a timestamp that can be used to generate 7539 * an initial RTT. 7540 */ 7541 uint32_t ts, now, rtt; 7542 7543 ts = bbr_ts_convert(to->to_tsecr); 7544 now = bbr_ts_convert(tcp_tv_to_mssectick(&bbr->rc_tv)); 7545 rtt = now - ts; 7546 if (rtt < 1) 7547 rtt = 1; 7548 bbr_log_type_bbrrttprop(bbr, rtt, 7549 tp->iss, 0, cts, 7550 BBR_RTT_BY_TIMESTAMP, tp->iss, 0); 7551 apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts); 7552 changed = 1; 7553 bbr->r_wanted_output = 1; 7554 goto out; 7555 } 7556 goto proc_sack; 7557 } else if (rsm == NULL) { 7558 goto out; 7559 } 7560 if (changed) { 7561 /* 7562 * The ACK point is advancing to th_ack, we must drop off 7563 * the packets in the rack log and calculate any eligble 7564 * RTT's. 7565 */ 7566 bbr->r_wanted_output = 1; 7567 more: 7568 if (rsm == NULL) { 7569 7570 if (tp->t_flags & TF_SENTFIN) { 7571 /* if we send a FIN we will not hav a map */ 7572 goto proc_sack; 7573 } 7574 #ifdef BBR_INVARIANTS 7575 panic("No rack map tp:%p for th:%p state:%d bbr:%p snd_una:%u snd_max:%u chg:%d\n", 7576 tp, 7577 th, tp->t_state, bbr, 7578 tp->snd_una, tp->snd_max, changed); 7579 #endif 7580 goto proc_sack; 7581 } 7582 } 7583 if (SEQ_LT(th_ack, rsm->r_start)) { 7584 /* Huh map is missing this */ 7585 #ifdef BBR_INVARIANTS 7586 printf("Rack map starts at r_start:%u for th_ack:%u huh? ts:%d rs:%d bbr:%p\n", 7587 rsm->r_start, 7588 th_ack, tp->t_state, 7589 bbr->r_state, bbr); 7590 panic("th-ack is bad bbr:%p tp:%p", bbr, tp); 7591 #endif 7592 goto proc_sack; 7593 } else if (th_ack == rsm->r_start) { 7594 /* None here to ack */ 7595 goto proc_sack; 7596 } 7597 /* 7598 * Clear the dup ack counter, it will 7599 * either be freed or if there is some 7600 * remaining we need to start it at zero. 7601 */ 7602 rsm->r_dupack = 0; 7603 /* Now do we consume the whole thing? */ 7604 if (SEQ_GEQ(th_ack, rsm->r_end)) { 7605 /* Its all consumed. */ 7606 uint32_t left; 7607 7608 if (rsm->r_flags & BBR_ACKED) { 7609 /* 7610 * It was acked on the scoreboard -- remove it from 7611 * total 7612 */ 7613 p_acked += (rsm->r_end - rsm->r_start); 7614 bbr->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); 7615 if (bbr->r_ctl.rc_sacked == 0) 7616 bbr->r_ctl.rc_sacklast = NULL; 7617 } else { 7618 bbr_update_rtt(tp, bbr, rsm, to, cts, BBR_CUM_ACKED, th_ack); 7619 if (rsm->r_flags & BBR_MARKED_LOST) { 7620 bbr->r_ctl.rc_lost_bytes -= rsm->r_end - rsm->r_start; 7621 } 7622 if (rsm->r_flags & BBR_SACK_PASSED) { 7623 /* 7624 * There are acked segments ACKED on the 7625 * scoreboard further up. We are seeing 7626 * reordering. 7627 */ 7628 BBR_STAT_INC(bbr_reorder_seen); 7629 bbr->r_ctl.rc_reorder_ts = cts; 7630 if (rsm->r_flags & BBR_MARKED_LOST) { 7631 bbr->r_ctl.rc_lost -= rsm->r_end - rsm->r_start; 7632 if (SEQ_GT(bbr->r_ctl.rc_lt_lost, bbr->r_ctl.rc_lost)) 7633 /* LT sampling also needs adjustment */ 7634 bbr->r_ctl.rc_lt_lost = bbr->r_ctl.rc_lost; 7635 } 7636 } 7637 rsm->r_flags &= ~BBR_MARKED_LOST; 7638 } 7639 bbr->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 7640 rsm->r_rtr_bytes = 0; 7641 TAILQ_REMOVE(&bbr->r_ctl.rc_map, rsm, r_next); 7642 if (rsm->r_in_tmap) { 7643 TAILQ_REMOVE(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 7644 rsm->r_in_tmap = 0; 7645 } 7646 if (bbr->r_ctl.rc_next == rsm) { 7647 /* scoot along the marker */ 7648 bbr->r_ctl.rc_next = TAILQ_FIRST(&bbr->r_ctl.rc_map); 7649 } 7650 bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_CUM_ACKED); 7651 /* Adjust the packet counts */ 7652 left = th_ack - rsm->r_end; 7653 /* Free back to zone */ 7654 bbr_free(bbr, rsm); 7655 if (left) { 7656 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 7657 goto more; 7658 } 7659 goto proc_sack; 7660 } 7661 if (rsm->r_flags & BBR_ACKED) { 7662 /* 7663 * It was acked on the scoreboard -- remove it from total 7664 * for the part being cum-acked. 7665 */ 7666 p_acked += (rsm->r_end - rsm->r_start); 7667 bbr->r_ctl.rc_sacked -= (th_ack - rsm->r_start); 7668 if (bbr->r_ctl.rc_sacked == 0) 7669 bbr->r_ctl.rc_sacklast = NULL; 7670 } else { 7671 /* 7672 * It was acked up to th_ack point for the first time 7673 */ 7674 struct bbr_sendmap lrsm; 7675 7676 memcpy(&lrsm, rsm, sizeof(struct bbr_sendmap)); 7677 lrsm.r_end = th_ack; 7678 bbr_update_rtt(tp, bbr, &lrsm, to, cts, BBR_CUM_ACKED, th_ack); 7679 } 7680 if ((rsm->r_flags & BBR_MARKED_LOST) && 7681 ((rsm->r_flags & BBR_ACKED) == 0)) { 7682 /* 7683 * It was marked lost and partly ack'd now 7684 * for the first time. We lower the rc_lost_bytes 7685 * and still leave it MARKED. 7686 */ 7687 bbr->r_ctl.rc_lost_bytes -= th_ack - rsm->r_start; 7688 } 7689 bbr_isit_a_pkt_epoch(bbr, cts, rsm, __LINE__, BBR_CUM_ACKED); 7690 bbr->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; 7691 rsm->r_rtr_bytes = 0; 7692 /* adjust packet count */ 7693 rsm->r_start = th_ack; 7694 proc_sack: 7695 /* Check for reneging */ 7696 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 7697 if (rsm && (rsm->r_flags & BBR_ACKED) && (th_ack == rsm->r_start)) { 7698 /* 7699 * The peer has moved snd_una up to the edge of this send, 7700 * i.e. one that it had previously acked. The only way that 7701 * can be true if the peer threw away data (space issues) 7702 * that it had previously sacked (else it would have given 7703 * us snd_una up to (rsm->r_end). We need to undo the acked 7704 * markings here. 7705 * 7706 * Note we have to look to make sure th_ack is our 7707 * rsm->r_start in case we get an old ack where th_ack is 7708 * behind snd_una. 7709 */ 7710 bbr_peer_reneges(bbr, rsm, th->th_ack); 7711 } 7712 if ((to->to_flags & TOF_SACK) == 0) { 7713 /* We are done nothing left to log */ 7714 goto out; 7715 } 7716 rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_map, bbr_sendmap, r_next); 7717 if (rsm) { 7718 last_seq = rsm->r_end; 7719 } else { 7720 last_seq = tp->snd_max; 7721 } 7722 /* Sack block processing */ 7723 if (SEQ_GT(th_ack, tp->snd_una)) 7724 ack_point = th_ack; 7725 else 7726 ack_point = tp->snd_una; 7727 for (i = 0; i < to->to_nsacks; i++) { 7728 bcopy((to->to_sacks + i * TCPOLEN_SACK), 7729 &sack, sizeof(sack)); 7730 sack.start = ntohl(sack.start); 7731 sack.end = ntohl(sack.end); 7732 if (SEQ_GT(sack.end, sack.start) && 7733 SEQ_GT(sack.start, ack_point) && 7734 SEQ_LT(sack.start, tp->snd_max) && 7735 SEQ_GT(sack.end, ack_point) && 7736 SEQ_LEQ(sack.end, tp->snd_max)) { 7737 if ((bbr->r_ctl.rc_num_small_maps_alloced > bbr_sack_block_limit) && 7738 (SEQ_LT(sack.end, last_seq)) && 7739 ((sack.end - sack.start) < (p_maxseg / 8))) { 7740 /* 7741 * Not the last piece and its smaller than 7742 * 1/8th of a p_maxseg. We ignore this. 7743 */ 7744 BBR_STAT_INC(bbr_runt_sacks); 7745 continue; 7746 } 7747 sack_blocks[num_sack_blks] = sack; 7748 num_sack_blks++; 7749 #ifdef NETFLIX_STATS 7750 } else if (SEQ_LEQ(sack.start, th_ack) && 7751 SEQ_LEQ(sack.end, th_ack)) { 7752 /* 7753 * Its a D-SACK block. 7754 */ 7755 tcp_record_dsack(sack.start, sack.end); 7756 #endif 7757 } 7758 } 7759 if (num_sack_blks == 0) 7760 goto out; 7761 /* 7762 * Sort the SACK blocks so we can update the rack scoreboard with 7763 * just one pass. 7764 */ 7765 new_sb = sack_filter_blks(&bbr->r_ctl.bbr_sf, sack_blocks, 7766 num_sack_blks, th->th_ack); 7767 ctf_log_sack_filter(bbr->rc_tp, new_sb, sack_blocks); 7768 BBR_STAT_ADD(bbr_sack_blocks, num_sack_blks); 7769 BBR_STAT_ADD(bbr_sack_blocks_skip, (num_sack_blks - new_sb)); 7770 num_sack_blks = new_sb; 7771 if (num_sack_blks < 2) { 7772 goto do_sack_work; 7773 } 7774 /* Sort the sacks */ 7775 for (i = 0; i < num_sack_blks; i++) { 7776 for (j = i + 1; j < num_sack_blks; j++) { 7777 if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) { 7778 sack = sack_blocks[i]; 7779 sack_blocks[i] = sack_blocks[j]; 7780 sack_blocks[j] = sack; 7781 } 7782 } 7783 } 7784 /* 7785 * Now are any of the sack block ends the same (yes some 7786 * implememtations send these)? 7787 */ 7788 again: 7789 if (num_sack_blks > 1) { 7790 for (i = 0; i < num_sack_blks; i++) { 7791 for (j = i + 1; j < num_sack_blks; j++) { 7792 if (sack_blocks[i].end == sack_blocks[j].end) { 7793 /* 7794 * Ok these two have the same end we 7795 * want the smallest end and then 7796 * throw away the larger and start 7797 * again. 7798 */ 7799 if (SEQ_LT(sack_blocks[j].start, sack_blocks[i].start)) { 7800 /* 7801 * The second block covers 7802 * more area use that 7803 */ 7804 sack_blocks[i].start = sack_blocks[j].start; 7805 } 7806 /* 7807 * Now collapse out the dup-sack and 7808 * lower the count 7809 */ 7810 for (k = (j + 1); k < num_sack_blks; k++) { 7811 sack_blocks[j].start = sack_blocks[k].start; 7812 sack_blocks[j].end = sack_blocks[k].end; 7813 j++; 7814 } 7815 num_sack_blks--; 7816 goto again; 7817 } 7818 } 7819 } 7820 } 7821 do_sack_work: 7822 rsm = bbr->r_ctl.rc_sacklast; 7823 for (i = 0; i < num_sack_blks; i++) { 7824 acked = bbr_proc_sack_blk(tp, bbr, &sack_blocks[i], to, &rsm, cts); 7825 if (acked) { 7826 bbr->r_wanted_output = 1; 7827 changed += acked; 7828 sack_changed += acked; 7829 } 7830 } 7831 out: 7832 *prev_acked = p_acked; 7833 if ((sack_changed) && (!IN_RECOVERY(tp->t_flags))) { 7834 /* 7835 * Ok we have a high probability that we need to go in to 7836 * recovery since we have data sack'd 7837 */ 7838 struct bbr_sendmap *rsm; 7839 7840 rsm = bbr_check_recovery_mode(tp, bbr, cts); 7841 if (rsm) { 7842 /* Enter recovery */ 7843 entered_recovery = 1; 7844 bbr->r_wanted_output = 1; 7845 /* 7846 * When we enter recovery we need to assure we send 7847 * one packet. 7848 */ 7849 if (bbr->r_ctl.rc_resend == NULL) { 7850 bbr->r_ctl.rc_resend = rsm; 7851 } 7852 } 7853 } 7854 if (IN_RECOVERY(tp->t_flags) && (entered_recovery == 0)) { 7855 /* 7856 * See if we need to rack-retransmit anything if so set it 7857 * up as the thing to resend assuming something else is not 7858 * already in that position. 7859 */ 7860 if (bbr->r_ctl.rc_resend == NULL) { 7861 bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts); 7862 } 7863 } 7864 /* 7865 * We return the amount that changed via sack, this is used by the 7866 * ack-received code to augment what was changed between th_ack <-> 7867 * snd_una. 7868 */ 7869 return (sack_changed); 7870 } 7871 7872 static void 7873 bbr_strike_dupack(struct tcp_bbr *bbr) 7874 { 7875 struct bbr_sendmap *rsm; 7876 7877 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_tmap); 7878 if (rsm && (rsm->r_dupack < 0xff)) { 7879 rsm->r_dupack++; 7880 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) 7881 bbr->r_wanted_output = 1; 7882 } 7883 } 7884 7885 /* 7886 * Return value of 1, we do not need to call bbr_process_data(). 7887 * return value of 0, bbr_process_data can be called. 7888 * For ret_val if its 0 the TCB is locked and valid, if its non-zero 7889 * its unlocked and probably unsafe to touch the TCB. 7890 */ 7891 static int 7892 bbr_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so, 7893 struct tcpcb *tp, struct tcpopt *to, 7894 uint32_t tiwin, int32_t tlen, 7895 int32_t * ofia, int32_t thflags, int32_t * ret_val) 7896 { 7897 int32_t ourfinisacked = 0; 7898 int32_t acked_amount; 7899 uint16_t nsegs; 7900 int32_t acked; 7901 uint32_t lost, sack_changed = 0; 7902 struct mbuf *mfree; 7903 struct tcp_bbr *bbr; 7904 uint32_t prev_acked = 0; 7905 7906 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 7907 lost = bbr->r_ctl.rc_lost; 7908 nsegs = max(1, m->m_pkthdr.lro_nsegs); 7909 if (SEQ_GT(th->th_ack, tp->snd_max)) { 7910 ctf_do_dropafterack(m, tp, th, thflags, tlen, ret_val); 7911 bbr->r_wanted_output = 1; 7912 return (1); 7913 } 7914 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { 7915 /* Process the ack */ 7916 if (bbr->rc_in_persist) 7917 tp->t_rxtshift = 0; 7918 if ((th->th_ack == tp->snd_una) && (tiwin == tp->snd_wnd)) 7919 bbr_strike_dupack(bbr); 7920 sack_changed = bbr_log_ack(tp, to, th, &prev_acked); 7921 } 7922 bbr_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime, (bbr->r_ctl.rc_lost > lost)); 7923 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 7924 /* 7925 * Old ack, behind the last one rcv'd or a duplicate ack 7926 * with SACK info. 7927 */ 7928 if (th->th_ack == tp->snd_una) { 7929 bbr_ack_received(tp, bbr, th, 0, sack_changed, prev_acked, __LINE__, 0); 7930 if (bbr->r_state == TCPS_SYN_SENT) { 7931 /* 7932 * Special case on where we sent SYN. When 7933 * the SYN-ACK is processed in syn_sent 7934 * state it bumps the snd_una. This causes 7935 * us to hit here even though we did ack 1 7936 * byte. 7937 * 7938 * Go through the nothing left case so we 7939 * send data. 7940 */ 7941 goto nothing_left; 7942 } 7943 } 7944 return (0); 7945 } 7946 /* 7947 * If we reach this point, ACK is not a duplicate, i.e., it ACKs 7948 * something we sent. 7949 */ 7950 if (tp->t_flags & TF_NEEDSYN) { 7951 /* 7952 * T/TCP: Connection was half-synchronized, and our SYN has 7953 * been ACK'd (so connection is now fully synchronized). Go 7954 * to non-starred state, increment snd_una for ACK of SYN, 7955 * and check if we can do window scaling. 7956 */ 7957 tp->t_flags &= ~TF_NEEDSYN; 7958 tp->snd_una++; 7959 /* Do window scaling? */ 7960 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 7961 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 7962 tp->rcv_scale = tp->request_r_scale; 7963 /* Send window already scaled. */ 7964 } 7965 } 7966 INP_WLOCK_ASSERT(tp->t_inpcb); 7967 7968 acked = BYTES_THIS_ACK(tp, th); 7969 KMOD_TCPSTAT_ADD(tcps_rcvackpack, (int)nsegs); 7970 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 7971 7972 /* 7973 * If we just performed our first retransmit, and the ACK arrives 7974 * within our recovery window, then it was a mistake to do the 7975 * retransmit in the first place. Recover our original cwnd and 7976 * ssthresh, and proceed to transmit where we left off. 7977 */ 7978 if (tp->t_flags & TF_PREVVALID) { 7979 tp->t_flags &= ~TF_PREVVALID; 7980 if (tp->t_rxtshift == 1 && 7981 (int)(ticks - tp->t_badrxtwin) < 0) 7982 bbr_cong_signal(tp, th, CC_RTO_ERR, NULL); 7983 } 7984 SOCKBUF_LOCK(&so->so_snd); 7985 acked_amount = min(acked, (int)sbavail(&so->so_snd)); 7986 tp->snd_wnd -= acked_amount; 7987 mfree = sbcut_locked(&so->so_snd, acked_amount); 7988 /* NB: sowwakeup_locked() does an implicit unlock. */ 7989 sowwakeup_locked(so); 7990 m_freem(mfree); 7991 if (SEQ_GT(th->th_ack, tp->snd_una)) { 7992 bbr_collapse_rtt(tp, bbr, TCP_REXMTVAL(tp)); 7993 } 7994 tp->snd_una = th->th_ack; 7995 bbr_ack_received(tp, bbr, th, acked, sack_changed, prev_acked, __LINE__, (bbr->r_ctl.rc_lost - lost)); 7996 if (IN_RECOVERY(tp->t_flags)) { 7997 if (SEQ_LT(th->th_ack, tp->snd_recover) && 7998 (SEQ_LT(th->th_ack, tp->snd_max))) { 7999 tcp_bbr_partialack(tp); 8000 } else { 8001 bbr_post_recovery(tp); 8002 } 8003 } 8004 if (SEQ_GT(tp->snd_una, tp->snd_recover)) { 8005 tp->snd_recover = tp->snd_una; 8006 } 8007 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 8008 tp->snd_nxt = tp->snd_max; 8009 } 8010 if (tp->snd_una == tp->snd_max) { 8011 /* Nothing left outstanding */ 8012 nothing_left: 8013 bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__); 8014 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 8015 bbr->rc_tp->t_acktime = 0; 8016 if ((sbused(&so->so_snd) == 0) && 8017 (tp->t_flags & TF_SENTFIN)) { 8018 ourfinisacked = 1; 8019 } 8020 bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); 8021 if (bbr->rc_in_persist == 0) { 8022 bbr->r_ctl.rc_went_idle_time = bbr->r_ctl.rc_rcvtime; 8023 } 8024 sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una); 8025 bbr_log_ack_clear(bbr, bbr->r_ctl.rc_rcvtime); 8026 /* 8027 * We invalidate the last ack here since we 8028 * don't want to transfer forward the time 8029 * for our sum's calculations. 8030 */ 8031 if ((tp->t_state >= TCPS_FIN_WAIT_1) && 8032 (sbavail(&so->so_snd) == 0) && 8033 (tp->t_flags2 & TF2_DROP_AF_DATA)) { 8034 /* 8035 * The socket was gone and the peer sent data, time 8036 * to reset him. 8037 */ 8038 *ret_val = 1; 8039 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 8040 /* tcp_close will kill the inp pre-log the Reset */ 8041 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 8042 tp = tcp_close(tp); 8043 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, tlen); 8044 BBR_STAT_INC(bbr_dropped_af_data); 8045 return (1); 8046 } 8047 /* Set need output so persist might get set */ 8048 bbr->r_wanted_output = 1; 8049 } 8050 if (ofia) 8051 *ofia = ourfinisacked; 8052 return (0); 8053 } 8054 8055 static void 8056 bbr_enter_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, int32_t line) 8057 { 8058 if (bbr->rc_in_persist == 0) { 8059 bbr_timer_cancel(bbr, __LINE__, cts); 8060 bbr->r_ctl.rc_last_delay_val = 0; 8061 tp->t_rxtshift = 0; 8062 bbr->rc_in_persist = 1; 8063 bbr->r_ctl.rc_went_idle_time = cts; 8064 /* We should be capped when rw went to 0 but just in case */ 8065 bbr_log_type_pesist(bbr, cts, 0, line, 1); 8066 /* Time freezes for the state, so do the accounting now */ 8067 if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) { 8068 uint32_t time_in; 8069 8070 time_in = cts - bbr->r_ctl.rc_bbr_state_time; 8071 if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) { 8072 int32_t idx; 8073 8074 idx = bbr_state_val(bbr); 8075 counter_u64_add(bbr_state_time[(idx + 5)], time_in); 8076 } else { 8077 counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in); 8078 } 8079 } 8080 bbr->r_ctl.rc_bbr_state_time = cts; 8081 } 8082 } 8083 8084 static void 8085 bbr_restart_after_idle(struct tcp_bbr *bbr, uint32_t cts, uint32_t idle_time) 8086 { 8087 /* 8088 * Note that if idle time does not exceed our 8089 * threshold, we do nothing continuing the state 8090 * transitions we were last walking through. 8091 */ 8092 if (idle_time >= bbr_idle_restart_threshold) { 8093 if (bbr->rc_use_idle_restart) { 8094 bbr->rc_bbr_state = BBR_STATE_IDLE_EXIT; 8095 /* 8096 * Set our target using BBR_UNIT, so 8097 * we increase at a dramatic rate but 8098 * we stop when we get the pipe 8099 * full again for our current b/w estimate. 8100 */ 8101 bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT; 8102 bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT; 8103 bbr_set_state_target(bbr, __LINE__); 8104 /* Now setup our gains to ramp up */ 8105 bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_startup_pg; 8106 bbr->r_ctl.rc_bbr_cwnd_gain = bbr->r_ctl.rc_startup_pg; 8107 bbr_log_type_statechange(bbr, cts, __LINE__); 8108 } else { 8109 bbr_substate_change(bbr, cts, __LINE__, 1); 8110 } 8111 } 8112 } 8113 8114 static void 8115 bbr_exit_persist(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, int32_t line) 8116 { 8117 uint32_t idle_time; 8118 8119 if (bbr->rc_in_persist == 0) 8120 return; 8121 idle_time = bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time); 8122 bbr->rc_in_persist = 0; 8123 bbr->rc_hit_state_1 = 0; 8124 bbr->r_ctl.rc_del_time = cts; 8125 /* 8126 * We invalidate the last ack here since we 8127 * don't want to transfer forward the time 8128 * for our sum's calculations. 8129 */ 8130 if (bbr->rc_inp->inp_in_hpts) { 8131 tcp_hpts_remove(bbr->rc_inp, HPTS_REMOVE_OUTPUT); 8132 bbr->rc_timer_first = 0; 8133 bbr->r_ctl.rc_hpts_flags = 0; 8134 bbr->r_ctl.rc_last_delay_val = 0; 8135 bbr->r_ctl.rc_hptsi_agg_delay = 0; 8136 bbr->r_agg_early_set = 0; 8137 bbr->r_ctl.rc_agg_early = 0; 8138 } 8139 bbr_log_type_pesist(bbr, cts, idle_time, line, 0); 8140 if (idle_time >= bbr_rtt_probe_time) { 8141 /* 8142 * This qualifies as a RTT_PROBE session since we drop the 8143 * data outstanding to nothing and waited more than 8144 * bbr_rtt_probe_time. 8145 */ 8146 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_PERSIST, 0); 8147 bbr->r_ctl.last_in_probertt = bbr->r_ctl.rc_rtt_shrinks = cts; 8148 } 8149 tp->t_rxtshift = 0; 8150 /* 8151 * If in probeBW and we have persisted more than an RTT lets do 8152 * special handling. 8153 */ 8154 /* Force a time based epoch */ 8155 bbr_set_epoch(bbr, cts, __LINE__); 8156 /* 8157 * Setup the lost so we don't count anything against the guy 8158 * we have been stuck with during persists. 8159 */ 8160 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 8161 /* Time un-freezes for the state */ 8162 bbr->r_ctl.rc_bbr_state_time = cts; 8163 if ((bbr->rc_bbr_state == BBR_STATE_PROBE_BW) || 8164 (bbr->rc_bbr_state == BBR_STATE_PROBE_RTT)) { 8165 /* 8166 * If we are going back to probe-bw 8167 * or probe_rtt, we may need to possibly 8168 * do a fast restart. 8169 */ 8170 bbr_restart_after_idle(bbr, cts, idle_time); 8171 } 8172 } 8173 8174 static void 8175 bbr_collapsed_window(struct tcp_bbr *bbr) 8176 { 8177 /* 8178 * Now we must walk the 8179 * send map and divide the 8180 * ones left stranded. These 8181 * guys can't cause us to abort 8182 * the connection and are really 8183 * "unsent". However if a buggy 8184 * client actually did keep some 8185 * of the data i.e. collapsed the win 8186 * and refused to ack and then opened 8187 * the win and acked that data. We would 8188 * get into an ack war, the simplier 8189 * method then of just pretending we 8190 * did not send those segments something 8191 * won't work. 8192 */ 8193 struct bbr_sendmap *rsm, *nrsm; 8194 tcp_seq max_seq; 8195 uint32_t maxseg; 8196 int can_split = 0; 8197 int fnd = 0; 8198 8199 maxseg = bbr->rc_tp->t_maxseg - bbr->rc_last_options; 8200 max_seq = bbr->rc_tp->snd_una + bbr->rc_tp->snd_wnd; 8201 bbr_log_type_rwnd_collapse(bbr, max_seq, 1, 0); 8202 TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) { 8203 /* Find the first seq past or at maxseq */ 8204 if (rsm->r_flags & BBR_RWND_COLLAPSED) 8205 rsm->r_flags &= ~BBR_RWND_COLLAPSED; 8206 if (SEQ_GEQ(max_seq, rsm->r_start) && 8207 SEQ_GEQ(rsm->r_end, max_seq)) { 8208 fnd = 1; 8209 break; 8210 } 8211 } 8212 bbr->rc_has_collapsed = 0; 8213 if (!fnd) { 8214 /* Nothing to do strange */ 8215 return; 8216 } 8217 /* 8218 * Now can we split? 8219 * 8220 * We don't want to split if splitting 8221 * would generate too many small segments 8222 * less we let an attacker fragment our 8223 * send_map and leave us out of memory. 8224 */ 8225 if ((max_seq != rsm->r_start) && 8226 (max_seq != rsm->r_end)){ 8227 /* can we split? */ 8228 int res1, res2; 8229 8230 res1 = max_seq - rsm->r_start; 8231 res2 = rsm->r_end - max_seq; 8232 if ((res1 >= (maxseg/8)) && 8233 (res2 >= (maxseg/8))) { 8234 /* No small pieces here */ 8235 can_split = 1; 8236 } else if (bbr->r_ctl.rc_num_small_maps_alloced < bbr_sack_block_limit) { 8237 /* We are under the limit */ 8238 can_split = 1; 8239 } 8240 } 8241 /* Ok do we need to split this rsm? */ 8242 if (max_seq == rsm->r_start) { 8243 /* It's this guy no split required */ 8244 nrsm = rsm; 8245 } else if (max_seq == rsm->r_end) { 8246 /* It's the next one no split required. */ 8247 nrsm = TAILQ_NEXT(rsm, r_next); 8248 if (nrsm == NULL) { 8249 /* Huh? */ 8250 return; 8251 } 8252 } else if (can_split && SEQ_LT(max_seq, rsm->r_end)) { 8253 /* yep we need to split it */ 8254 nrsm = bbr_alloc_limit(bbr, BBR_LIMIT_TYPE_SPLIT); 8255 if (nrsm == NULL) { 8256 /* failed XXXrrs what can we do mark the whole? */ 8257 nrsm = rsm; 8258 goto no_split; 8259 } 8260 /* Clone it */ 8261 bbr_log_type_rwnd_collapse(bbr, max_seq, 3, 0); 8262 bbr_clone_rsm(bbr, nrsm, rsm, max_seq); 8263 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_map, rsm, nrsm, r_next); 8264 if (rsm->r_in_tmap) { 8265 TAILQ_INSERT_AFTER(&bbr->r_ctl.rc_tmap, rsm, nrsm, r_tnext); 8266 nrsm->r_in_tmap = 1; 8267 } 8268 } else { 8269 /* 8270 * Split not allowed just start here just 8271 * use this guy. 8272 */ 8273 nrsm = rsm; 8274 } 8275 no_split: 8276 BBR_STAT_INC(bbr_collapsed_win); 8277 /* reuse fnd as a count */ 8278 fnd = 0; 8279 TAILQ_FOREACH_FROM(nrsm, &bbr->r_ctl.rc_map, r_next) { 8280 nrsm->r_flags |= BBR_RWND_COLLAPSED; 8281 fnd++; 8282 bbr->rc_has_collapsed = 1; 8283 } 8284 bbr_log_type_rwnd_collapse(bbr, max_seq, 4, fnd); 8285 } 8286 8287 static void 8288 bbr_un_collapse_window(struct tcp_bbr *bbr) 8289 { 8290 struct bbr_sendmap *rsm; 8291 int cleared = 0; 8292 8293 TAILQ_FOREACH_REVERSE(rsm, &bbr->r_ctl.rc_map, bbr_head, r_next) { 8294 if (rsm->r_flags & BBR_RWND_COLLAPSED) { 8295 /* Clear the flag */ 8296 rsm->r_flags &= ~BBR_RWND_COLLAPSED; 8297 cleared++; 8298 } else 8299 break; 8300 } 8301 bbr_log_type_rwnd_collapse(bbr, 8302 (bbr->rc_tp->snd_una + bbr->rc_tp->snd_wnd), 0, cleared); 8303 bbr->rc_has_collapsed = 0; 8304 } 8305 8306 /* 8307 * Return value of 1, the TCB is unlocked and most 8308 * likely gone, return value of 0, the TCB is still 8309 * locked. 8310 */ 8311 static int 8312 bbr_process_data(struct mbuf *m, struct tcphdr *th, struct socket *so, 8313 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, 8314 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt) 8315 { 8316 /* 8317 * Update window information. Don't look at window if no ACK: TAC's 8318 * send garbage on first SYN. 8319 */ 8320 uint16_t nsegs; 8321 int32_t tfo_syn; 8322 struct tcp_bbr *bbr; 8323 8324 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 8325 INP_WLOCK_ASSERT(tp->t_inpcb); 8326 nsegs = max(1, m->m_pkthdr.lro_nsegs); 8327 if ((thflags & TH_ACK) && 8328 (SEQ_LT(tp->snd_wl1, th->th_seq) || 8329 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 8330 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 8331 /* keep track of pure window updates */ 8332 if (tlen == 0 && 8333 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 8334 KMOD_TCPSTAT_INC(tcps_rcvwinupd); 8335 tp->snd_wnd = tiwin; 8336 tp->snd_wl1 = th->th_seq; 8337 tp->snd_wl2 = th->th_ack; 8338 if (tp->snd_wnd > tp->max_sndwnd) 8339 tp->max_sndwnd = tp->snd_wnd; 8340 bbr->r_wanted_output = 1; 8341 } else if (thflags & TH_ACK) { 8342 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { 8343 tp->snd_wnd = tiwin; 8344 tp->snd_wl1 = th->th_seq; 8345 tp->snd_wl2 = th->th_ack; 8346 } 8347 } 8348 if (tp->snd_wnd < ctf_outstanding(tp)) 8349 /* The peer collapsed its window on us */ 8350 bbr_collapsed_window(bbr); 8351 else if (bbr->rc_has_collapsed) 8352 bbr_un_collapse_window(bbr); 8353 /* Was persist timer active and now we have window space? */ 8354 if ((bbr->rc_in_persist != 0) && 8355 (tp->snd_wnd >= min((bbr->r_ctl.rc_high_rwnd/2), 8356 bbr_minseg(bbr)))) { 8357 /* 8358 * Make the rate persist at end of persist mode if idle long 8359 * enough 8360 */ 8361 bbr_exit_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); 8362 8363 /* Make sure we output to start the timer */ 8364 bbr->r_wanted_output = 1; 8365 } 8366 /* Do we need to enter persist? */ 8367 if ((bbr->rc_in_persist == 0) && 8368 (tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) && 8369 TCPS_HAVEESTABLISHED(tp->t_state) && 8370 (tp->snd_max == tp->snd_una) && 8371 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 8372 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 8373 /* No send window.. we must enter persist */ 8374 bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); 8375 } 8376 if (tp->t_flags2 & TF2_DROP_AF_DATA) { 8377 m_freem(m); 8378 return (0); 8379 } 8380 /* 8381 * We don't support urgent data but 8382 * drag along the up just to make sure 8383 * if there is a stack switch no one 8384 * is surprised. 8385 */ 8386 tp->rcv_up = tp->rcv_nxt; 8387 INP_WLOCK_ASSERT(tp->t_inpcb); 8388 8389 /* 8390 * Process the segment text, merging it into the TCP sequencing 8391 * queue, and arranging for acknowledgment of receipt if necessary. 8392 * This process logically involves adjusting tp->rcv_wnd as data is 8393 * presented to the user (this happens in tcp_usrreq.c, case 8394 * PRU_RCVD). If a FIN has already been received on this connection 8395 * then we just ignore the text. 8396 */ 8397 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 8398 IS_FASTOPEN(tp->t_flags)); 8399 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 8400 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 8401 tcp_seq save_start = th->th_seq; 8402 tcp_seq save_rnxt = tp->rcv_nxt; 8403 int save_tlen = tlen; 8404 8405 m_adj(m, drop_hdrlen); /* delayed header drop */ 8406 /* 8407 * Insert segment which includes th into TCP reassembly 8408 * queue with control block tp. Set thflags to whether 8409 * reassembly now includes a segment with FIN. This handles 8410 * the common case inline (segment is the next to be 8411 * received on an established connection, and the queue is 8412 * empty), avoiding linkage into and removal from the queue 8413 * and repetition of various conversions. Set DELACK for 8414 * segments received in order, but ack immediately when 8415 * segments are out of order (so fast retransmit can work). 8416 */ 8417 if (th->th_seq == tp->rcv_nxt && 8418 SEGQ_EMPTY(tp) && 8419 (TCPS_HAVEESTABLISHED(tp->t_state) || 8420 tfo_syn)) { 8421 #ifdef NETFLIX_SB_LIMITS 8422 u_int mcnt, appended; 8423 8424 if (so->so_rcv.sb_shlim) { 8425 mcnt = m_memcnt(m); 8426 appended = 0; 8427 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 8428 CFO_NOSLEEP, NULL) == false) { 8429 counter_u64_add(tcp_sb_shlim_fails, 1); 8430 m_freem(m); 8431 return (0); 8432 } 8433 } 8434 8435 #endif 8436 if (DELAY_ACK(tp, bbr, nsegs) || tfo_syn) { 8437 bbr->bbr_segs_rcvd += max(1, nsegs); 8438 tp->t_flags |= TF_DELACK; 8439 bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); 8440 } else { 8441 bbr->r_wanted_output = 1; 8442 tp->t_flags |= TF_ACKNOW; 8443 } 8444 tp->rcv_nxt += tlen; 8445 thflags = th->th_flags & TH_FIN; 8446 KMOD_TCPSTAT_ADD(tcps_rcvpack, (int)nsegs); 8447 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 8448 SOCKBUF_LOCK(&so->so_rcv); 8449 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 8450 m_freem(m); 8451 else 8452 #ifdef NETFLIX_SB_LIMITS 8453 appended = 8454 #endif 8455 sbappendstream_locked(&so->so_rcv, m, 0); 8456 /* NB: sorwakeup_locked() does an implicit unlock. */ 8457 sorwakeup_locked(so); 8458 #ifdef NETFLIX_SB_LIMITS 8459 if (so->so_rcv.sb_shlim && appended != mcnt) 8460 counter_fo_release(so->so_rcv.sb_shlim, 8461 mcnt - appended); 8462 #endif 8463 } else { 8464 /* 8465 * XXX: Due to the header drop above "th" is 8466 * theoretically invalid by now. Fortunately 8467 * m_adj() doesn't actually frees any mbufs when 8468 * trimming from the head. 8469 */ 8470 tcp_seq temp = save_start; 8471 thflags = tcp_reass(tp, th, &temp, &tlen, m); 8472 tp->t_flags |= TF_ACKNOW; 8473 } 8474 if ((tp->t_flags & TF_SACK_PERMIT) && (save_tlen > 0)) { 8475 if ((tlen == 0) && (SEQ_LT(save_start, save_rnxt))) { 8476 /* 8477 * DSACK actually handled in the fastpath 8478 * above. 8479 */ 8480 tcp_update_sack_list(tp, save_start, 8481 save_start + save_tlen); 8482 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { 8483 if ((tp->rcv_numsacks >= 1) && 8484 (tp->sackblks[0].end == save_start)) { 8485 /* 8486 * Partial overlap, recorded at todrop 8487 * above. 8488 */ 8489 tcp_update_sack_list(tp, 8490 tp->sackblks[0].start, 8491 tp->sackblks[0].end); 8492 } else { 8493 tcp_update_dsack_list(tp, save_start, 8494 save_start + save_tlen); 8495 } 8496 } else if (tlen >= save_tlen) { 8497 /* Update of sackblks. */ 8498 tcp_update_dsack_list(tp, save_start, 8499 save_start + save_tlen); 8500 } else if (tlen > 0) { 8501 tcp_update_dsack_list(tp, save_start, 8502 save_start + tlen); 8503 } 8504 } 8505 } else { 8506 m_freem(m); 8507 thflags &= ~TH_FIN; 8508 } 8509 8510 /* 8511 * If FIN is received ACK the FIN and let the user know that the 8512 * connection is closing. 8513 */ 8514 if (thflags & TH_FIN) { 8515 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 8516 socantrcvmore(so); 8517 /* 8518 * If connection is half-synchronized (ie NEEDSYN 8519 * flag on) then delay ACK, so it may be piggybacked 8520 * when SYN is sent. Otherwise, since we received a 8521 * FIN then no more input can be expected, send ACK 8522 * now. 8523 */ 8524 if (tp->t_flags & TF_NEEDSYN) { 8525 tp->t_flags |= TF_DELACK; 8526 bbr_timer_cancel(bbr, 8527 __LINE__, bbr->r_ctl.rc_rcvtime); 8528 } else { 8529 tp->t_flags |= TF_ACKNOW; 8530 } 8531 tp->rcv_nxt++; 8532 } 8533 switch (tp->t_state) { 8534 8535 /* 8536 * In SYN_RECEIVED and ESTABLISHED STATES enter the 8537 * CLOSE_WAIT state. 8538 */ 8539 case TCPS_SYN_RECEIVED: 8540 tp->t_starttime = ticks; 8541 /* FALLTHROUGH */ 8542 case TCPS_ESTABLISHED: 8543 tcp_state_change(tp, TCPS_CLOSE_WAIT); 8544 break; 8545 8546 /* 8547 * If still in FIN_WAIT_1 STATE FIN has not been 8548 * acked so enter the CLOSING state. 8549 */ 8550 case TCPS_FIN_WAIT_1: 8551 tcp_state_change(tp, TCPS_CLOSING); 8552 break; 8553 8554 /* 8555 * In FIN_WAIT_2 state enter the TIME_WAIT state, 8556 * starting the time-wait timer, turning off the 8557 * other standard timers. 8558 */ 8559 case TCPS_FIN_WAIT_2: 8560 bbr->rc_timer_first = 1; 8561 bbr_timer_cancel(bbr, 8562 __LINE__, bbr->r_ctl.rc_rcvtime); 8563 INP_WLOCK_ASSERT(tp->t_inpcb); 8564 tcp_twstart(tp); 8565 return (1); 8566 } 8567 } 8568 /* 8569 * Return any desired output. 8570 */ 8571 if ((tp->t_flags & TF_ACKNOW) || 8572 (sbavail(&so->so_snd) > ctf_outstanding(tp))) { 8573 bbr->r_wanted_output = 1; 8574 } 8575 INP_WLOCK_ASSERT(tp->t_inpcb); 8576 return (0); 8577 } 8578 8579 /* 8580 * Here nothing is really faster, its just that we 8581 * have broken out the fast-data path also just like 8582 * the fast-ack. Return 1 if we processed the packet 8583 * return 0 if you need to take the "slow-path". 8584 */ 8585 static int 8586 bbr_do_fastnewdata(struct mbuf *m, struct tcphdr *th, struct socket *so, 8587 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 8588 uint32_t tiwin, int32_t nxt_pkt) 8589 { 8590 uint16_t nsegs; 8591 int32_t newsize = 0; /* automatic sockbuf scaling */ 8592 struct tcp_bbr *bbr; 8593 #ifdef NETFLIX_SB_LIMITS 8594 u_int mcnt, appended; 8595 #endif 8596 #ifdef TCPDEBUG 8597 /* 8598 * The size of tcp_saveipgen must be the size of the max ip header, 8599 * now IPv6. 8600 */ 8601 u_char tcp_saveipgen[IP6_HDR_LEN]; 8602 struct tcphdr tcp_savetcp; 8603 short ostate = 0; 8604 8605 #endif 8606 /* On the hpts and we would have called output */ 8607 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 8608 8609 /* 8610 * If last ACK falls within this segment's sequence numbers, record 8611 * the timestamp. NOTE that the test is modified according to the 8612 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 8613 */ 8614 if (bbr->r_ctl.rc_resend != NULL) { 8615 return (0); 8616 } 8617 if (tiwin && tiwin != tp->snd_wnd) { 8618 return (0); 8619 } 8620 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { 8621 return (0); 8622 } 8623 if (__predict_false((to->to_flags & TOF_TS) && 8624 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { 8625 return (0); 8626 } 8627 if (__predict_false((th->th_ack != tp->snd_una))) { 8628 return (0); 8629 } 8630 if (__predict_false(tlen > sbspace(&so->so_rcv))) { 8631 return (0); 8632 } 8633 if ((to->to_flags & TOF_TS) != 0 && 8634 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 8635 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 8636 tp->ts_recent = to->to_tsval; 8637 } 8638 /* 8639 * This is a pure, in-sequence data packet with nothing on the 8640 * reassembly queue and we have enough buffer space to take it. 8641 */ 8642 nsegs = max(1, m->m_pkthdr.lro_nsegs); 8643 8644 #ifdef NETFLIX_SB_LIMITS 8645 if (so->so_rcv.sb_shlim) { 8646 mcnt = m_memcnt(m); 8647 appended = 0; 8648 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, 8649 CFO_NOSLEEP, NULL) == false) { 8650 counter_u64_add(tcp_sb_shlim_fails, 1); 8651 m_freem(m); 8652 return (1); 8653 } 8654 } 8655 #endif 8656 /* Clean receiver SACK report if present */ 8657 if (tp->rcv_numsacks) 8658 tcp_clean_sackreport(tp); 8659 KMOD_TCPSTAT_INC(tcps_preddat); 8660 tp->rcv_nxt += tlen; 8661 /* 8662 * Pull snd_wl1 up to prevent seq wrap relative to th_seq. 8663 */ 8664 tp->snd_wl1 = th->th_seq; 8665 /* 8666 * Pull rcv_up up to prevent seq wrap relative to rcv_nxt. 8667 */ 8668 tp->rcv_up = tp->rcv_nxt; 8669 KMOD_TCPSTAT_ADD(tcps_rcvpack, (int)nsegs); 8670 KMOD_TCPSTAT_ADD(tcps_rcvbyte, tlen); 8671 #ifdef TCPDEBUG 8672 if (so->so_options & SO_DEBUG) 8673 tcp_trace(TA_INPUT, ostate, tp, 8674 (void *)tcp_saveipgen, &tcp_savetcp, 0); 8675 #endif 8676 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 8677 8678 /* Add data to socket buffer. */ 8679 SOCKBUF_LOCK(&so->so_rcv); 8680 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 8681 m_freem(m); 8682 } else { 8683 /* 8684 * Set new socket buffer size. Give up when limit is 8685 * reached. 8686 */ 8687 if (newsize) 8688 if (!sbreserve_locked(&so->so_rcv, 8689 newsize, so, NULL)) 8690 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 8691 m_adj(m, drop_hdrlen); /* delayed header drop */ 8692 8693 #ifdef NETFLIX_SB_LIMITS 8694 appended = 8695 #endif 8696 sbappendstream_locked(&so->so_rcv, m, 0); 8697 ctf_calc_rwin(so, tp); 8698 } 8699 /* NB: sorwakeup_locked() does an implicit unlock. */ 8700 sorwakeup_locked(so); 8701 #ifdef NETFLIX_SB_LIMITS 8702 if (so->so_rcv.sb_shlim && mcnt != appended) 8703 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); 8704 #endif 8705 if (DELAY_ACK(tp, bbr, nsegs)) { 8706 bbr->bbr_segs_rcvd += max(1, nsegs); 8707 tp->t_flags |= TF_DELACK; 8708 bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); 8709 } else { 8710 bbr->r_wanted_output = 1; 8711 tp->t_flags |= TF_ACKNOW; 8712 } 8713 return (1); 8714 } 8715 8716 /* 8717 * This subfunction is used to try to highly optimize the 8718 * fast path. We again allow window updates that are 8719 * in sequence to remain in the fast-path. We also add 8720 * in the __predict's to attempt to help the compiler. 8721 * Note that if we return a 0, then we can *not* process 8722 * it and the caller should push the packet into the 8723 * slow-path. If we return 1, then all is well and 8724 * the packet is fully processed. 8725 */ 8726 static int 8727 bbr_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 8728 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 8729 uint32_t tiwin, int32_t nxt_pkt, uint8_t iptos) 8730 { 8731 int32_t acked; 8732 uint16_t nsegs; 8733 uint32_t sack_changed; 8734 #ifdef TCPDEBUG 8735 /* 8736 * The size of tcp_saveipgen must be the size of the max ip header, 8737 * now IPv6. 8738 */ 8739 u_char tcp_saveipgen[IP6_HDR_LEN]; 8740 struct tcphdr tcp_savetcp; 8741 short ostate = 0; 8742 8743 #endif 8744 uint32_t prev_acked = 0; 8745 struct tcp_bbr *bbr; 8746 8747 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { 8748 /* Old ack, behind (or duplicate to) the last one rcv'd */ 8749 return (0); 8750 } 8751 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { 8752 /* Above what we have sent? */ 8753 return (0); 8754 } 8755 if (__predict_false(tiwin == 0)) { 8756 /* zero window */ 8757 return (0); 8758 } 8759 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { 8760 /* We need a SYN or a FIN, unlikely.. */ 8761 return (0); 8762 } 8763 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { 8764 /* Timestamp is behind .. old ack with seq wrap? */ 8765 return (0); 8766 } 8767 if (__predict_false(IN_RECOVERY(tp->t_flags))) { 8768 /* Still recovering */ 8769 return (0); 8770 } 8771 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 8772 if (__predict_false(bbr->r_ctl.rc_resend != NULL)) { 8773 /* We are retransmitting */ 8774 return (0); 8775 } 8776 if (__predict_false(bbr->rc_in_persist != 0)) { 8777 /* In persist mode */ 8778 return (0); 8779 } 8780 if (bbr->r_ctl.rc_sacked) { 8781 /* We have sack holes on our scoreboard */ 8782 return (0); 8783 } 8784 /* Ok if we reach here, we can process a fast-ack */ 8785 nsegs = max(1, m->m_pkthdr.lro_nsegs); 8786 sack_changed = bbr_log_ack(tp, to, th, &prev_acked); 8787 /* 8788 * We never detect loss in fast ack [we can't 8789 * have a sack and can't be in recovery so 8790 * we always pass 0 (nothing detected)]. 8791 */ 8792 bbr_lt_bw_sampling(bbr, bbr->r_ctl.rc_rcvtime, 0); 8793 /* Did the window get updated? */ 8794 if (tiwin != tp->snd_wnd) { 8795 tp->snd_wnd = tiwin; 8796 tp->snd_wl1 = th->th_seq; 8797 if (tp->snd_wnd > tp->max_sndwnd) 8798 tp->max_sndwnd = tp->snd_wnd; 8799 } 8800 /* Do we need to exit persists? */ 8801 if ((bbr->rc_in_persist != 0) && 8802 (tp->snd_wnd >= min((bbr->r_ctl.rc_high_rwnd/2), 8803 bbr_minseg(bbr)))) { 8804 bbr_exit_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); 8805 bbr->r_wanted_output = 1; 8806 } 8807 /* Do we need to enter persists? */ 8808 if ((bbr->rc_in_persist == 0) && 8809 (tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) && 8810 TCPS_HAVEESTABLISHED(tp->t_state) && 8811 (tp->snd_max == tp->snd_una) && 8812 sbavail(&tp->t_inpcb->inp_socket->so_snd) && 8813 (sbavail(&tp->t_inpcb->inp_socket->so_snd) > tp->snd_wnd)) { 8814 /* No send window.. we must enter persist */ 8815 bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); 8816 } 8817 /* 8818 * If last ACK falls within this segment's sequence numbers, record 8819 * the timestamp. NOTE that the test is modified according to the 8820 * latest proposal of the tcplw@cray.com list (Braden 1993/04/26). 8821 */ 8822 if ((to->to_flags & TOF_TS) != 0 && 8823 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 8824 tp->ts_recent_age = bbr->r_ctl.rc_rcvtime; 8825 tp->ts_recent = to->to_tsval; 8826 } 8827 /* 8828 * This is a pure ack for outstanding data. 8829 */ 8830 KMOD_TCPSTAT_INC(tcps_predack); 8831 8832 /* 8833 * "bad retransmit" recovery. 8834 */ 8835 if (tp->t_flags & TF_PREVVALID) { 8836 tp->t_flags &= ~TF_PREVVALID; 8837 if (tp->t_rxtshift == 1 && 8838 (int)(ticks - tp->t_badrxtwin) < 0) 8839 bbr_cong_signal(tp, th, CC_RTO_ERR, NULL); 8840 } 8841 /* 8842 * Recalculate the transmit timer / rtt. 8843 * 8844 * Some boxes send broken timestamp replies during the SYN+ACK 8845 * phase, ignore timestamps of 0 or we could calculate a huge RTT 8846 * and blow up the retransmit timer. 8847 */ 8848 acked = BYTES_THIS_ACK(tp, th); 8849 8850 #ifdef TCP_HHOOK 8851 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 8852 hhook_run_tcp_est_in(tp, th, to); 8853 #endif 8854 8855 KMOD_TCPSTAT_ADD(tcps_rcvackpack, (int)nsegs); 8856 KMOD_TCPSTAT_ADD(tcps_rcvackbyte, acked); 8857 sbdrop(&so->so_snd, acked); 8858 8859 if (SEQ_GT(th->th_ack, tp->snd_una)) 8860 bbr_collapse_rtt(tp, bbr, TCP_REXMTVAL(tp)); 8861 tp->snd_una = th->th_ack; 8862 if (tp->snd_wnd < ctf_outstanding(tp)) 8863 /* The peer collapsed its window on us */ 8864 bbr_collapsed_window(bbr); 8865 else if (bbr->rc_has_collapsed) 8866 bbr_un_collapse_window(bbr); 8867 8868 if (SEQ_GT(tp->snd_una, tp->snd_recover)) { 8869 tp->snd_recover = tp->snd_una; 8870 } 8871 bbr_ack_received(tp, bbr, th, acked, sack_changed, prev_acked, __LINE__, 0); 8872 /* 8873 * Pull snd_wl2 up to prevent seq wrap relative to th_ack. 8874 */ 8875 tp->snd_wl2 = th->th_ack; 8876 m_freem(m); 8877 /* 8878 * If all outstanding data are acked, stop retransmit timer, 8879 * otherwise restart timer using current (possibly backed-off) 8880 * value. If process is waiting for space, wakeup/selwakeup/signal. 8881 * If data are ready to send, let tcp_output decide between more 8882 * output or persist. 8883 */ 8884 #ifdef TCPDEBUG 8885 if (so->so_options & SO_DEBUG) 8886 tcp_trace(TA_INPUT, ostate, tp, 8887 (void *)tcp_saveipgen, 8888 &tcp_savetcp, 0); 8889 #endif 8890 /* Wake up the socket if we have room to write more */ 8891 sowwakeup(so); 8892 if (tp->snd_una == tp->snd_max) { 8893 /* Nothing left outstanding */ 8894 bbr_log_progress_event(bbr, tp, ticks, PROGRESS_CLEAR, __LINE__); 8895 if (sbavail(&tp->t_inpcb->inp_socket->so_snd) == 0) 8896 bbr->rc_tp->t_acktime = 0; 8897 bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); 8898 if (bbr->rc_in_persist == 0) { 8899 bbr->r_ctl.rc_went_idle_time = bbr->r_ctl.rc_rcvtime; 8900 } 8901 sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una); 8902 bbr_log_ack_clear(bbr, bbr->r_ctl.rc_rcvtime); 8903 /* 8904 * We invalidate the last ack here since we 8905 * don't want to transfer forward the time 8906 * for our sum's calculations. 8907 */ 8908 bbr->r_wanted_output = 1; 8909 } 8910 if (sbavail(&so->so_snd)) { 8911 bbr->r_wanted_output = 1; 8912 } 8913 return (1); 8914 } 8915 8916 /* 8917 * Return value of 1, the TCB is unlocked and most 8918 * likely gone, return value of 0, the TCB is still 8919 * locked. 8920 */ 8921 static int 8922 bbr_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so, 8923 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 8924 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 8925 { 8926 int32_t todrop; 8927 int32_t ourfinisacked = 0; 8928 struct tcp_bbr *bbr; 8929 int32_t ret_val = 0; 8930 8931 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 8932 ctf_calc_rwin(so, tp); 8933 /* 8934 * If the state is SYN_SENT: if seg contains an ACK, but not for our 8935 * SYN, drop the input. if seg contains a RST, then drop the 8936 * connection. if seg does not contain SYN, then drop it. Otherwise 8937 * this is an acceptable SYN segment initialize tp->rcv_nxt and 8938 * tp->irs if seg contains ack then advance tp->snd_una. BRR does 8939 * not support ECN so we will not say we are capable. if SYN has 8940 * been acked change to ESTABLISHED else SYN_RCVD state arrange for 8941 * segment to be acked (eventually) continue processing rest of 8942 * data/controls, beginning with URG 8943 */ 8944 if ((thflags & TH_ACK) && 8945 (SEQ_LEQ(th->th_ack, tp->iss) || 8946 SEQ_GT(th->th_ack, tp->snd_max))) { 8947 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 8948 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 8949 return (1); 8950 } 8951 if ((thflags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) { 8952 TCP_PROBE5(connect__refused, NULL, tp, 8953 mtod(m, const char *), tp, th); 8954 tp = tcp_drop(tp, ECONNREFUSED); 8955 ctf_do_drop(m, tp); 8956 return (1); 8957 } 8958 if (thflags & TH_RST) { 8959 ctf_do_drop(m, tp); 8960 return (1); 8961 } 8962 if (!(thflags & TH_SYN)) { 8963 ctf_do_drop(m, tp); 8964 return (1); 8965 } 8966 tp->irs = th->th_seq; 8967 tcp_rcvseqinit(tp); 8968 if (thflags & TH_ACK) { 8969 int tfo_partial = 0; 8970 8971 KMOD_TCPSTAT_INC(tcps_connects); 8972 soisconnected(so); 8973 #ifdef MAC 8974 mac_socketpeer_set_from_mbuf(m, so); 8975 #endif 8976 /* Do window scaling on this connection? */ 8977 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 8978 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 8979 tp->rcv_scale = tp->request_r_scale; 8980 } 8981 tp->rcv_adv += min(tp->rcv_wnd, 8982 TCP_MAXWIN << tp->rcv_scale); 8983 /* 8984 * If not all the data that was sent in the TFO SYN 8985 * has been acked, resend the remainder right away. 8986 */ 8987 if (IS_FASTOPEN(tp->t_flags) && 8988 (tp->snd_una != tp->snd_max)) { 8989 tp->snd_nxt = th->th_ack; 8990 tfo_partial = 1; 8991 } 8992 /* 8993 * If there's data, delay ACK; if there's also a FIN ACKNOW 8994 * will be turned on later. 8995 */ 8996 if (DELAY_ACK(tp, bbr, 1) && tlen != 0 && !tfo_partial) { 8997 bbr->bbr_segs_rcvd += 1; 8998 tp->t_flags |= TF_DELACK; 8999 bbr_timer_cancel(bbr, __LINE__, bbr->r_ctl.rc_rcvtime); 9000 } else { 9001 bbr->r_wanted_output = 1; 9002 tp->t_flags |= TF_ACKNOW; 9003 } 9004 if (SEQ_GT(th->th_ack, tp->iss)) { 9005 /* 9006 * The SYN is acked 9007 * handle it specially. 9008 */ 9009 bbr_log_syn(tp, to); 9010 } 9011 if (SEQ_GT(th->th_ack, tp->snd_una)) { 9012 /* 9013 * We advance snd_una for the 9014 * fast open case. If th_ack is 9015 * acknowledging data beyond 9016 * snd_una we can't just call 9017 * ack-processing since the 9018 * data stream in our send-map 9019 * will start at snd_una + 1 (one 9020 * beyond the SYN). If its just 9021 * equal we don't need to do that 9022 * and there is no send_map. 9023 */ 9024 tp->snd_una++; 9025 } 9026 /* 9027 * Received <SYN,ACK> in SYN_SENT[*] state. Transitions: 9028 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 9029 */ 9030 tp->t_starttime = ticks; 9031 if (tp->t_flags & TF_NEEDFIN) { 9032 tcp_state_change(tp, TCPS_FIN_WAIT_1); 9033 tp->t_flags &= ~TF_NEEDFIN; 9034 thflags &= ~TH_SYN; 9035 } else { 9036 tcp_state_change(tp, TCPS_ESTABLISHED); 9037 TCP_PROBE5(connect__established, NULL, tp, 9038 mtod(m, const char *), tp, th); 9039 cc_conn_init(tp); 9040 } 9041 } else { 9042 /* 9043 * Received initial SYN in SYN-SENT[*] state => simultaneous 9044 * open. If segment contains CC option and there is a 9045 * cached CC, apply TAO test. If it succeeds, connection is * 9046 * half-synchronized. Otherwise, do 3-way handshake: 9047 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If 9048 * there was no CC option, clear cached CC value. 9049 */ 9050 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 9051 tcp_state_change(tp, TCPS_SYN_RECEIVED); 9052 } 9053 INP_WLOCK_ASSERT(tp->t_inpcb); 9054 /* 9055 * Advance th->th_seq to correspond to first data byte. If data, 9056 * trim to stay within window, dropping FIN if necessary. 9057 */ 9058 th->th_seq++; 9059 if (tlen > tp->rcv_wnd) { 9060 todrop = tlen - tp->rcv_wnd; 9061 m_adj(m, -todrop); 9062 tlen = tp->rcv_wnd; 9063 thflags &= ~TH_FIN; 9064 KMOD_TCPSTAT_INC(tcps_rcvpackafterwin); 9065 KMOD_TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 9066 } 9067 tp->snd_wl1 = th->th_seq - 1; 9068 tp->rcv_up = th->th_seq; 9069 /* 9070 * Client side of transaction: already sent SYN and data. If the 9071 * remote host used T/TCP to validate the SYN, our data will be 9072 * ACK'd; if so, enter normal data segment processing in the middle 9073 * of step 5, ack processing. Otherwise, goto step 6. 9074 */ 9075 if (thflags & TH_ACK) { 9076 if ((to->to_flags & TOF_TS) != 0) { 9077 uint32_t t, rtt; 9078 9079 t = tcp_tv_to_mssectick(&bbr->rc_tv); 9080 if (TSTMP_GEQ(t, to->to_tsecr)) { 9081 rtt = t - to->to_tsecr; 9082 if (rtt == 0) { 9083 rtt = 1; 9084 } 9085 rtt *= MS_IN_USEC; 9086 tcp_bbr_xmit_timer(bbr, rtt, 0, 0, 0); 9087 apply_filter_min_small(&bbr->r_ctl.rc_rttprop, 9088 rtt, bbr->r_ctl.rc_rcvtime); 9089 } 9090 } 9091 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) 9092 return (ret_val); 9093 /* We may have changed to FIN_WAIT_1 above */ 9094 if (tp->t_state == TCPS_FIN_WAIT_1) { 9095 /* 9096 * In FIN_WAIT_1 STATE in addition to the processing 9097 * for the ESTABLISHED state if our FIN is now 9098 * acknowledged then enter FIN_WAIT_2. 9099 */ 9100 if (ourfinisacked) { 9101 /* 9102 * If we can't receive any more data, then 9103 * closing user can proceed. Starting the 9104 * timer is contrary to the specification, 9105 * but if we don't get a FIN we'll hang 9106 * forever. 9107 * 9108 * XXXjl: we should release the tp also, and 9109 * use a compressed state. 9110 */ 9111 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 9112 soisdisconnected(so); 9113 tcp_timer_activate(tp, TT_2MSL, 9114 (tcp_fast_finwait2_recycle ? 9115 tcp_finwait2_timeout : 9116 TP_MAXIDLE(tp))); 9117 } 9118 tcp_state_change(tp, TCPS_FIN_WAIT_2); 9119 } 9120 } 9121 } 9122 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9123 tiwin, thflags, nxt_pkt)); 9124 } 9125 9126 /* 9127 * Return value of 1, the TCB is unlocked and most 9128 * likely gone, return value of 0, the TCB is still 9129 * locked. 9130 */ 9131 static int 9132 bbr_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so, 9133 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9134 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9135 { 9136 int32_t ourfinisacked = 0; 9137 int32_t ret_val; 9138 struct tcp_bbr *bbr; 9139 9140 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9141 ctf_calc_rwin(so, tp); 9142 if ((thflags & TH_ACK) && 9143 (SEQ_LEQ(th->th_ack, tp->snd_una) || 9144 SEQ_GT(th->th_ack, tp->snd_max))) { 9145 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 9146 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9147 return (1); 9148 } 9149 if (IS_FASTOPEN(tp->t_flags)) { 9150 /* 9151 * When a TFO connection is in SYN_RECEIVED, the only valid 9152 * packets are the initial SYN, a retransmit/copy of the 9153 * initial SYN (possibly with a subset of the original 9154 * data), a valid ACK, a FIN, or a RST. 9155 */ 9156 if ((thflags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK)) { 9157 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 9158 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9159 return (1); 9160 } else if (thflags & TH_SYN) { 9161 /* non-initial SYN is ignored */ 9162 if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || 9163 (bbr->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || 9164 (bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { 9165 ctf_do_drop(m, NULL); 9166 return (0); 9167 } 9168 } else if (!(thflags & (TH_ACK | TH_FIN | TH_RST))) { 9169 ctf_do_drop(m, NULL); 9170 return (0); 9171 } 9172 } 9173 if ((thflags & TH_RST) || 9174 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9175 return (ctf_process_rst(m, th, so, tp)); 9176 /* 9177 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9178 * it's less than ts_recent, drop it. 9179 */ 9180 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9181 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9182 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9183 return (ret_val); 9184 } 9185 /* 9186 * In the SYN-RECEIVED state, validate that the packet belongs to 9187 * this connection before trimming the data to fit the receive 9188 * window. Check the sequence number versus IRS since we know the 9189 * sequence numbers haven't wrapped. This is a partial fix for the 9190 * "LAND" DoS attack. 9191 */ 9192 if (SEQ_LT(th->th_seq, tp->irs)) { 9193 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 9194 ctf_do_dropwithreset(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9195 return (1); 9196 } 9197 INP_WLOCK_ASSERT(tp->t_inpcb); 9198 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9199 return (ret_val); 9200 } 9201 /* 9202 * If last ACK falls within this segment's sequence numbers, record 9203 * its timestamp. NOTE: 1) That the test incorporates suggestions 9204 * from the latest proposal of the tcplw@cray.com list (Braden 9205 * 1993/04/26). 2) That updating only on newer timestamps interferes 9206 * with our earlier PAWS tests, so this check should be solely 9207 * predicated on the sequence space of this segment. 3) That we 9208 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 9209 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 9210 * SEG.Len, This modified check allows us to overcome RFC1323's 9211 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 9212 * p.869. In such cases, we can still calculate the RTT correctly 9213 * when RCV.NXT == Last.ACK.Sent. 9214 */ 9215 if ((to->to_flags & TOF_TS) != 0 && 9216 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 9217 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 9218 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 9219 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 9220 tp->ts_recent = to->to_tsval; 9221 } 9222 tp->snd_wnd = tiwin; 9223 /* 9224 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 9225 * is on (half-synchronized state), then queue data for later 9226 * processing; else drop segment and return. 9227 */ 9228 if ((thflags & TH_ACK) == 0) { 9229 if (IS_FASTOPEN(tp->t_flags)) { 9230 cc_conn_init(tp); 9231 } 9232 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9233 tiwin, thflags, nxt_pkt)); 9234 } 9235 KMOD_TCPSTAT_INC(tcps_connects); 9236 soisconnected(so); 9237 /* Do window scaling? */ 9238 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == 9239 (TF_RCVD_SCALE | TF_REQ_SCALE)) { 9240 tp->rcv_scale = tp->request_r_scale; 9241 } 9242 /* 9243 * ok for the first time in lets see if we can use the ts to figure 9244 * out what the initial RTT was. 9245 */ 9246 if ((to->to_flags & TOF_TS) != 0) { 9247 uint32_t t, rtt; 9248 9249 t = tcp_tv_to_mssectick(&bbr->rc_tv); 9250 if (TSTMP_GEQ(t, to->to_tsecr)) { 9251 rtt = t - to->to_tsecr; 9252 if (rtt == 0) { 9253 rtt = 1; 9254 } 9255 rtt *= MS_IN_USEC; 9256 tcp_bbr_xmit_timer(bbr, rtt, 0, 0, 0); 9257 apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, bbr->r_ctl.rc_rcvtime); 9258 } 9259 } 9260 /* Drop off any SYN in the send map (probably not there) */ 9261 if (thflags & TH_ACK) 9262 bbr_log_syn(tp, to); 9263 if (IS_FASTOPEN(tp->t_flags) && tp->t_tfo_pending) { 9264 9265 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 9266 tp->t_tfo_pending = NULL; 9267 } 9268 /* 9269 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> 9270 * FIN-WAIT-1 9271 */ 9272 tp->t_starttime = ticks; 9273 if (tp->t_flags & TF_NEEDFIN) { 9274 tcp_state_change(tp, TCPS_FIN_WAIT_1); 9275 tp->t_flags &= ~TF_NEEDFIN; 9276 } else { 9277 tcp_state_change(tp, TCPS_ESTABLISHED); 9278 TCP_PROBE5(accept__established, NULL, tp, 9279 mtod(m, const char *), tp, th); 9280 /* 9281 * TFO connections call cc_conn_init() during SYN 9282 * processing. Calling it again here for such connections 9283 * is not harmless as it would undo the snd_cwnd reduction 9284 * that occurs when a TFO SYN|ACK is retransmitted. 9285 */ 9286 if (!IS_FASTOPEN(tp->t_flags)) 9287 cc_conn_init(tp); 9288 } 9289 /* 9290 * Account for the ACK of our SYN prior to 9291 * regular ACK processing below, except for 9292 * simultaneous SYN, which is handled later. 9293 */ 9294 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) 9295 tp->snd_una++; 9296 /* 9297 * If segment contains data or ACK, will call tcp_reass() later; if 9298 * not, do so now to pass queued data to user. 9299 */ 9300 if (tlen == 0 && (thflags & TH_FIN) == 0) 9301 (void)tcp_reass(tp, (struct tcphdr *)0, NULL, 0, 9302 (struct mbuf *)0); 9303 tp->snd_wl1 = th->th_seq - 1; 9304 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 9305 return (ret_val); 9306 } 9307 if (tp->t_state == TCPS_FIN_WAIT_1) { 9308 /* We could have went to FIN_WAIT_1 (or EST) above */ 9309 /* 9310 * In FIN_WAIT_1 STATE in addition to the processing for the 9311 * ESTABLISHED state if our FIN is now acknowledged then 9312 * enter FIN_WAIT_2. 9313 */ 9314 if (ourfinisacked) { 9315 /* 9316 * If we can't receive any more data, then closing 9317 * user can proceed. Starting the timer is contrary 9318 * to the specification, but if we don't get a FIN 9319 * we'll hang forever. 9320 * 9321 * XXXjl: we should release the tp also, and use a 9322 * compressed state. 9323 */ 9324 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 9325 soisdisconnected(so); 9326 tcp_timer_activate(tp, TT_2MSL, 9327 (tcp_fast_finwait2_recycle ? 9328 tcp_finwait2_timeout : 9329 TP_MAXIDLE(tp))); 9330 } 9331 tcp_state_change(tp, TCPS_FIN_WAIT_2); 9332 } 9333 } 9334 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9335 tiwin, thflags, nxt_pkt)); 9336 } 9337 9338 /* 9339 * Return value of 1, the TCB is unlocked and most 9340 * likely gone, return value of 0, the TCB is still 9341 * locked. 9342 */ 9343 static int 9344 bbr_do_established(struct mbuf *m, struct tcphdr *th, struct socket *so, 9345 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9346 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9347 { 9348 struct tcp_bbr *bbr; 9349 int32_t ret_val; 9350 9351 /* 9352 * Header prediction: check for the two common cases of a 9353 * uni-directional data xfer. If the packet has no control flags, 9354 * is in-sequence, the window didn't change and we're not 9355 * retransmitting, it's a candidate. If the length is zero and the 9356 * ack moved forward, we're the sender side of the xfer. Just free 9357 * the data acked & wake any higher level process that was blocked 9358 * waiting for space. If the length is non-zero and the ack didn't 9359 * move, we're the receiver side. If we're getting packets in-order 9360 * (the reassembly queue is empty), add the data toc The socket 9361 * buffer and note that we need a delayed ack. Make sure that the 9362 * hidden state-flags are also off. Since we check for 9363 * TCPS_ESTABLISHED first, it can only be TH_NEEDSYN. 9364 */ 9365 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9366 if (bbr->r_ctl.rc_delivered < (4 * tp->t_maxseg)) { 9367 /* 9368 * If we have delived under 4 segments increase the initial 9369 * window if raised by the peer. We use this to determine 9370 * dynamic and static rwnd's at the end of a connection. 9371 */ 9372 bbr->r_ctl.rc_init_rwnd = max(tiwin, tp->snd_wnd); 9373 } 9374 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && 9375 __predict_true((thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK)) == TH_ACK) && 9376 __predict_true(SEGQ_EMPTY(tp)) && 9377 __predict_true(th->th_seq == tp->rcv_nxt)) { 9378 if (tlen == 0) { 9379 if (bbr_fastack(m, th, so, tp, to, drop_hdrlen, tlen, 9380 tiwin, nxt_pkt, iptos)) { 9381 return (0); 9382 } 9383 } else { 9384 if (bbr_do_fastnewdata(m, th, so, tp, to, drop_hdrlen, tlen, 9385 tiwin, nxt_pkt)) { 9386 return (0); 9387 } 9388 } 9389 } 9390 ctf_calc_rwin(so, tp); 9391 9392 if ((thflags & TH_RST) || 9393 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9394 return (ctf_process_rst(m, th, so, tp)); 9395 /* 9396 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 9397 * synchronized state. 9398 */ 9399 if (thflags & TH_SYN) { 9400 ctf_challenge_ack(m, th, tp, &ret_val); 9401 return (ret_val); 9402 } 9403 /* 9404 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9405 * it's less than ts_recent, drop it. 9406 */ 9407 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9408 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9409 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9410 return (ret_val); 9411 } 9412 INP_WLOCK_ASSERT(tp->t_inpcb); 9413 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9414 return (ret_val); 9415 } 9416 /* 9417 * If last ACK falls within this segment's sequence numbers, record 9418 * its timestamp. NOTE: 1) That the test incorporates suggestions 9419 * from the latest proposal of the tcplw@cray.com list (Braden 9420 * 1993/04/26). 2) That updating only on newer timestamps interferes 9421 * with our earlier PAWS tests, so this check should be solely 9422 * predicated on the sequence space of this segment. 3) That we 9423 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 9424 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 9425 * SEG.Len, This modified check allows us to overcome RFC1323's 9426 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 9427 * p.869. In such cases, we can still calculate the RTT correctly 9428 * when RCV.NXT == Last.ACK.Sent. 9429 */ 9430 if ((to->to_flags & TOF_TS) != 0 && 9431 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 9432 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 9433 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 9434 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 9435 tp->ts_recent = to->to_tsval; 9436 } 9437 /* 9438 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 9439 * is on (half-synchronized state), then queue data for later 9440 * processing; else drop segment and return. 9441 */ 9442 if ((thflags & TH_ACK) == 0) { 9443 if (tp->t_flags & TF_NEEDSYN) { 9444 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9445 tiwin, thflags, nxt_pkt)); 9446 } else if (tp->t_flags & TF_ACKNOW) { 9447 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 9448 bbr->r_wanted_output = 1; 9449 return (ret_val); 9450 } else { 9451 ctf_do_drop(m, NULL); 9452 return (0); 9453 } 9454 } 9455 /* 9456 * Ack processing. 9457 */ 9458 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 9459 return (ret_val); 9460 } 9461 if (sbavail(&so->so_snd)) { 9462 if (ctf_progress_timeout_check(tp, true)) { 9463 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 9464 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9465 return (1); 9466 } 9467 } 9468 /* State changes only happen in bbr_process_data() */ 9469 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9470 tiwin, thflags, nxt_pkt)); 9471 } 9472 9473 /* 9474 * Return value of 1, the TCB is unlocked and most 9475 * likely gone, return value of 0, the TCB is still 9476 * locked. 9477 */ 9478 static int 9479 bbr_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so, 9480 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9481 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9482 { 9483 struct tcp_bbr *bbr; 9484 int32_t ret_val; 9485 9486 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9487 ctf_calc_rwin(so, tp); 9488 if ((thflags & TH_RST) || 9489 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9490 return (ctf_process_rst(m, th, so, tp)); 9491 /* 9492 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 9493 * synchronized state. 9494 */ 9495 if (thflags & TH_SYN) { 9496 ctf_challenge_ack(m, th, tp, &ret_val); 9497 return (ret_val); 9498 } 9499 /* 9500 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9501 * it's less than ts_recent, drop it. 9502 */ 9503 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9504 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9505 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9506 return (ret_val); 9507 } 9508 INP_WLOCK_ASSERT(tp->t_inpcb); 9509 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9510 return (ret_val); 9511 } 9512 /* 9513 * If last ACK falls within this segment's sequence numbers, record 9514 * its timestamp. NOTE: 1) That the test incorporates suggestions 9515 * from the latest proposal of the tcplw@cray.com list (Braden 9516 * 1993/04/26). 2) That updating only on newer timestamps interferes 9517 * with our earlier PAWS tests, so this check should be solely 9518 * predicated on the sequence space of this segment. 3) That we 9519 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 9520 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 9521 * SEG.Len, This modified check allows us to overcome RFC1323's 9522 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 9523 * p.869. In such cases, we can still calculate the RTT correctly 9524 * when RCV.NXT == Last.ACK.Sent. 9525 */ 9526 if ((to->to_flags & TOF_TS) != 0 && 9527 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 9528 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 9529 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 9530 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 9531 tp->ts_recent = to->to_tsval; 9532 } 9533 /* 9534 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 9535 * is on (half-synchronized state), then queue data for later 9536 * processing; else drop segment and return. 9537 */ 9538 if ((thflags & TH_ACK) == 0) { 9539 if (tp->t_flags & TF_NEEDSYN) { 9540 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9541 tiwin, thflags, nxt_pkt)); 9542 } else if (tp->t_flags & TF_ACKNOW) { 9543 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 9544 bbr->r_wanted_output = 1; 9545 return (ret_val); 9546 } else { 9547 ctf_do_drop(m, NULL); 9548 return (0); 9549 } 9550 } 9551 /* 9552 * Ack processing. 9553 */ 9554 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, NULL, thflags, &ret_val)) { 9555 return (ret_val); 9556 } 9557 if (sbavail(&so->so_snd)) { 9558 if (ctf_progress_timeout_check(tp, true)) { 9559 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 9560 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9561 return (1); 9562 } 9563 } 9564 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9565 tiwin, thflags, nxt_pkt)); 9566 } 9567 9568 static int 9569 bbr_check_data_after_close(struct mbuf *m, struct tcp_bbr *bbr, 9570 struct tcpcb *tp, int32_t * tlen, struct tcphdr *th, struct socket *so) 9571 { 9572 9573 if (bbr->rc_allow_data_af_clo == 0) { 9574 close_now: 9575 tcp_log_end_status(tp, TCP_EI_STATUS_DATA_A_CLOSE); 9576 /* tcp_close will kill the inp pre-log the Reset */ 9577 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 9578 tp = tcp_close(tp); 9579 KMOD_TCPSTAT_INC(tcps_rcvafterclose); 9580 ctf_do_dropwithreset(m, tp, th, BANDLIM_UNLIMITED, (*tlen)); 9581 return (1); 9582 } 9583 if (sbavail(&so->so_snd) == 0) 9584 goto close_now; 9585 /* Ok we allow data that is ignored and a followup reset */ 9586 tp->rcv_nxt = th->th_seq + *tlen; 9587 tp->t_flags2 |= TF2_DROP_AF_DATA; 9588 bbr->r_wanted_output = 1; 9589 *tlen = 0; 9590 return (0); 9591 } 9592 9593 /* 9594 * Return value of 1, the TCB is unlocked and most 9595 * likely gone, return value of 0, the TCB is still 9596 * locked. 9597 */ 9598 static int 9599 bbr_do_fin_wait_1(struct mbuf *m, struct tcphdr *th, struct socket *so, 9600 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9601 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9602 { 9603 int32_t ourfinisacked = 0; 9604 int32_t ret_val; 9605 struct tcp_bbr *bbr; 9606 9607 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9608 ctf_calc_rwin(so, tp); 9609 if ((thflags & TH_RST) || 9610 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9611 return (ctf_process_rst(m, th, so, tp)); 9612 /* 9613 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 9614 * synchronized state. 9615 */ 9616 if (thflags & TH_SYN) { 9617 ctf_challenge_ack(m, th, tp, &ret_val); 9618 return (ret_val); 9619 } 9620 /* 9621 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9622 * it's less than ts_recent, drop it. 9623 */ 9624 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9625 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9626 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9627 return (ret_val); 9628 } 9629 INP_WLOCK_ASSERT(tp->t_inpcb); 9630 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9631 return (ret_val); 9632 } 9633 /* 9634 * If new data are received on a connection after the user processes 9635 * are gone, then RST the other end. 9636 */ 9637 if ((so->so_state & SS_NOFDREF) && tlen) { 9638 /* 9639 * We call a new function now so we might continue and setup 9640 * to reset at all data being ack'd. 9641 */ 9642 if (bbr_check_data_after_close(m, bbr, tp, &tlen, th, so)) 9643 return (1); 9644 } 9645 /* 9646 * If last ACK falls within this segment's sequence numbers, record 9647 * its timestamp. NOTE: 1) That the test incorporates suggestions 9648 * from the latest proposal of the tcplw@cray.com list (Braden 9649 * 1993/04/26). 2) That updating only on newer timestamps interferes 9650 * with our earlier PAWS tests, so this check should be solely 9651 * predicated on the sequence space of this segment. 3) That we 9652 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 9653 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 9654 * SEG.Len, This modified check allows us to overcome RFC1323's 9655 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 9656 * p.869. In such cases, we can still calculate the RTT correctly 9657 * when RCV.NXT == Last.ACK.Sent. 9658 */ 9659 if ((to->to_flags & TOF_TS) != 0 && 9660 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 9661 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 9662 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 9663 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 9664 tp->ts_recent = to->to_tsval; 9665 } 9666 /* 9667 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 9668 * is on (half-synchronized state), then queue data for later 9669 * processing; else drop segment and return. 9670 */ 9671 if ((thflags & TH_ACK) == 0) { 9672 if (tp->t_flags & TF_NEEDSYN) { 9673 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9674 tiwin, thflags, nxt_pkt)); 9675 } else if (tp->t_flags & TF_ACKNOW) { 9676 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 9677 bbr->r_wanted_output = 1; 9678 return (ret_val); 9679 } else { 9680 ctf_do_drop(m, NULL); 9681 return (0); 9682 } 9683 } 9684 /* 9685 * Ack processing. 9686 */ 9687 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 9688 return (ret_val); 9689 } 9690 if (ourfinisacked) { 9691 /* 9692 * If we can't receive any more data, then closing user can 9693 * proceed. Starting the timer is contrary to the 9694 * specification, but if we don't get a FIN we'll hang 9695 * forever. 9696 * 9697 * XXXjl: we should release the tp also, and use a 9698 * compressed state. 9699 */ 9700 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 9701 soisdisconnected(so); 9702 tcp_timer_activate(tp, TT_2MSL, 9703 (tcp_fast_finwait2_recycle ? 9704 tcp_finwait2_timeout : 9705 TP_MAXIDLE(tp))); 9706 } 9707 tcp_state_change(tp, TCPS_FIN_WAIT_2); 9708 } 9709 if (sbavail(&so->so_snd)) { 9710 if (ctf_progress_timeout_check(tp, true)) { 9711 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 9712 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9713 return (1); 9714 } 9715 } 9716 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9717 tiwin, thflags, nxt_pkt)); 9718 } 9719 9720 /* 9721 * Return value of 1, the TCB is unlocked and most 9722 * likely gone, return value of 0, the TCB is still 9723 * locked. 9724 */ 9725 static int 9726 bbr_do_closing(struct mbuf *m, struct tcphdr *th, struct socket *so, 9727 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9728 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9729 { 9730 int32_t ourfinisacked = 0; 9731 int32_t ret_val; 9732 struct tcp_bbr *bbr; 9733 9734 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9735 ctf_calc_rwin(so, tp); 9736 if ((thflags & TH_RST) || 9737 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9738 return (ctf_process_rst(m, th, so, tp)); 9739 /* 9740 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 9741 * synchronized state. 9742 */ 9743 if (thflags & TH_SYN) { 9744 ctf_challenge_ack(m, th, tp, &ret_val); 9745 return (ret_val); 9746 } 9747 /* 9748 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9749 * it's less than ts_recent, drop it. 9750 */ 9751 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9752 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9753 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9754 return (ret_val); 9755 } 9756 INP_WLOCK_ASSERT(tp->t_inpcb); 9757 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9758 return (ret_val); 9759 } 9760 /* 9761 * If new data are received on a connection after the user processes 9762 * are gone, then RST the other end. 9763 */ 9764 if ((so->so_state & SS_NOFDREF) && tlen) { 9765 /* 9766 * We call a new function now so we might continue and setup 9767 * to reset at all data being ack'd. 9768 */ 9769 if (bbr_check_data_after_close(m, bbr, tp, &tlen, th, so)) 9770 return (1); 9771 } 9772 /* 9773 * If last ACK falls within this segment's sequence numbers, record 9774 * its timestamp. NOTE: 1) That the test incorporates suggestions 9775 * from the latest proposal of the tcplw@cray.com list (Braden 9776 * 1993/04/26). 2) That updating only on newer timestamps interferes 9777 * with our earlier PAWS tests, so this check should be solely 9778 * predicated on the sequence space of this segment. 3) That we 9779 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 9780 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 9781 * SEG.Len, This modified check allows us to overcome RFC1323's 9782 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 9783 * p.869. In such cases, we can still calculate the RTT correctly 9784 * when RCV.NXT == Last.ACK.Sent. 9785 */ 9786 if ((to->to_flags & TOF_TS) != 0 && 9787 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 9788 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 9789 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 9790 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 9791 tp->ts_recent = to->to_tsval; 9792 } 9793 /* 9794 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 9795 * is on (half-synchronized state), then queue data for later 9796 * processing; else drop segment and return. 9797 */ 9798 if ((thflags & TH_ACK) == 0) { 9799 if (tp->t_flags & TF_NEEDSYN) { 9800 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9801 tiwin, thflags, nxt_pkt)); 9802 } else if (tp->t_flags & TF_ACKNOW) { 9803 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 9804 bbr->r_wanted_output = 1; 9805 return (ret_val); 9806 } else { 9807 ctf_do_drop(m, NULL); 9808 return (0); 9809 } 9810 } 9811 /* 9812 * Ack processing. 9813 */ 9814 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 9815 return (ret_val); 9816 } 9817 if (ourfinisacked) { 9818 tcp_twstart(tp); 9819 m_freem(m); 9820 return (1); 9821 } 9822 if (sbavail(&so->so_snd)) { 9823 if (ctf_progress_timeout_check(tp, true)) { 9824 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 9825 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9826 return (1); 9827 } 9828 } 9829 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9830 tiwin, thflags, nxt_pkt)); 9831 } 9832 9833 /* 9834 * Return value of 1, the TCB is unlocked and most 9835 * likely gone, return value of 0, the TCB is still 9836 * locked. 9837 */ 9838 static int 9839 bbr_do_lastack(struct mbuf *m, struct tcphdr *th, struct socket *so, 9840 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9841 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9842 { 9843 int32_t ourfinisacked = 0; 9844 int32_t ret_val; 9845 struct tcp_bbr *bbr; 9846 9847 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9848 ctf_calc_rwin(so, tp); 9849 if ((thflags & TH_RST) || 9850 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9851 return (ctf_process_rst(m, th, so, tp)); 9852 /* 9853 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 9854 * synchronized state. 9855 */ 9856 if (thflags & TH_SYN) { 9857 ctf_challenge_ack(m, th, tp, &ret_val); 9858 return (ret_val); 9859 } 9860 /* 9861 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9862 * it's less than ts_recent, drop it. 9863 */ 9864 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9865 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9866 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9867 return (ret_val); 9868 } 9869 INP_WLOCK_ASSERT(tp->t_inpcb); 9870 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9871 return (ret_val); 9872 } 9873 /* 9874 * If new data are received on a connection after the user processes 9875 * are gone, then RST the other end. 9876 */ 9877 if ((so->so_state & SS_NOFDREF) && tlen) { 9878 /* 9879 * We call a new function now so we might continue and setup 9880 * to reset at all data being ack'd. 9881 */ 9882 if (bbr_check_data_after_close(m, bbr, tp, &tlen, th, so)) 9883 return (1); 9884 } 9885 /* 9886 * If last ACK falls within this segment's sequence numbers, record 9887 * its timestamp. NOTE: 1) That the test incorporates suggestions 9888 * from the latest proposal of the tcplw@cray.com list (Braden 9889 * 1993/04/26). 2) That updating only on newer timestamps interferes 9890 * with our earlier PAWS tests, so this check should be solely 9891 * predicated on the sequence space of this segment. 3) That we 9892 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 9893 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 9894 * SEG.Len, This modified check allows us to overcome RFC1323's 9895 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 9896 * p.869. In such cases, we can still calculate the RTT correctly 9897 * when RCV.NXT == Last.ACK.Sent. 9898 */ 9899 if ((to->to_flags & TOF_TS) != 0 && 9900 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 9901 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 9902 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 9903 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 9904 tp->ts_recent = to->to_tsval; 9905 } 9906 /* 9907 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 9908 * is on (half-synchronized state), then queue data for later 9909 * processing; else drop segment and return. 9910 */ 9911 if ((thflags & TH_ACK) == 0) { 9912 if (tp->t_flags & TF_NEEDSYN) { 9913 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9914 tiwin, thflags, nxt_pkt)); 9915 } else if (tp->t_flags & TF_ACKNOW) { 9916 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 9917 bbr->r_wanted_output = 1; 9918 return (ret_val); 9919 } else { 9920 ctf_do_drop(m, NULL); 9921 return (0); 9922 } 9923 } 9924 /* 9925 * case TCPS_LAST_ACK: Ack processing. 9926 */ 9927 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 9928 return (ret_val); 9929 } 9930 if (ourfinisacked) { 9931 tp = tcp_close(tp); 9932 ctf_do_drop(m, tp); 9933 return (1); 9934 } 9935 if (sbavail(&so->so_snd)) { 9936 if (ctf_progress_timeout_check(tp, true)) { 9937 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 9938 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 9939 return (1); 9940 } 9941 } 9942 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 9943 tiwin, thflags, nxt_pkt)); 9944 } 9945 9946 9947 /* 9948 * Return value of 1, the TCB is unlocked and most 9949 * likely gone, return value of 0, the TCB is still 9950 * locked. 9951 */ 9952 static int 9953 bbr_do_fin_wait_2(struct mbuf *m, struct tcphdr *th, struct socket *so, 9954 struct tcpcb *tp, struct tcpopt *to, int32_t drop_hdrlen, int32_t tlen, 9955 uint32_t tiwin, int32_t thflags, int32_t nxt_pkt, uint8_t iptos) 9956 { 9957 int32_t ourfinisacked = 0; 9958 int32_t ret_val; 9959 struct tcp_bbr *bbr; 9960 9961 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 9962 ctf_calc_rwin(so, tp); 9963 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 9964 if ((thflags & TH_RST) || 9965 (tp->t_fin_is_rst && (thflags & TH_FIN))) 9966 return (ctf_process_rst(m, th, so, tp)); 9967 9968 /* 9969 * RFC5961 Section 4.2 Send challenge ACK for any SYN in 9970 * synchronized state. 9971 */ 9972 if (thflags & TH_SYN) { 9973 ctf_challenge_ack(m, th, tp, &ret_val); 9974 return (ret_val); 9975 } 9976 INP_WLOCK_ASSERT(tp->t_inpcb); 9977 /* 9978 * RFC 1323 PAWS: If we have a timestamp reply on this segment and 9979 * it's less than ts_recent, drop it. 9980 */ 9981 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && 9982 TSTMP_LT(to->to_tsval, tp->ts_recent)) { 9983 if (ctf_ts_check(m, th, tp, tlen, thflags, &ret_val)) 9984 return (ret_val); 9985 } 9986 INP_WLOCK_ASSERT(tp->t_inpcb); 9987 if (ctf_drop_checks(to, m, th, tp, &tlen, &thflags, &drop_hdrlen, &ret_val)) { 9988 return (ret_val); 9989 } 9990 /* 9991 * If new data are received on a connection after the user processes 9992 * are gone, then we may RST the other end depending on the outcome 9993 * of bbr_check_data_after_close. 9994 */ 9995 if ((so->so_state & SS_NOFDREF) && 9996 tlen) { 9997 /* 9998 * We call a new function now so we might continue and setup 9999 * to reset at all data being ack'd. 10000 */ 10001 if (bbr_check_data_after_close(m, bbr, tp, &tlen, th, so)) 10002 return (1); 10003 } 10004 INP_WLOCK_ASSERT(tp->t_inpcb); 10005 /* 10006 * If last ACK falls within this segment's sequence numbers, record 10007 * its timestamp. NOTE: 1) That the test incorporates suggestions 10008 * from the latest proposal of the tcplw@cray.com list (Braden 10009 * 1993/04/26). 2) That updating only on newer timestamps interferes 10010 * with our earlier PAWS tests, so this check should be solely 10011 * predicated on the sequence space of this segment. 3) That we 10012 * modify the segment boundary check to be Last.ACK.Sent <= SEG.SEQ 10013 * + SEG.Len instead of RFC1323's Last.ACK.Sent < SEG.SEQ + 10014 * SEG.Len, This modified check allows us to overcome RFC1323's 10015 * limitations as described in Stevens TCP/IP Illustrated Vol. 2 10016 * p.869. In such cases, we can still calculate the RTT correctly 10017 * when RCV.NXT == Last.ACK.Sent. 10018 */ 10019 INP_WLOCK_ASSERT(tp->t_inpcb); 10020 if ((to->to_flags & TOF_TS) != 0 && 10021 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 10022 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 10023 ((thflags & (TH_SYN | TH_FIN)) != 0))) { 10024 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 10025 tp->ts_recent = to->to_tsval; 10026 } 10027 /* 10028 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag 10029 * is on (half-synchronized state), then queue data for later 10030 * processing; else drop segment and return. 10031 */ 10032 if ((thflags & TH_ACK) == 0) { 10033 if (tp->t_flags & TF_NEEDSYN) { 10034 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 10035 tiwin, thflags, nxt_pkt)); 10036 } else if (tp->t_flags & TF_ACKNOW) { 10037 ctf_do_dropafterack(m, tp, th, thflags, tlen, &ret_val); 10038 bbr->r_wanted_output = 1; 10039 return (ret_val); 10040 } else { 10041 ctf_do_drop(m, NULL); 10042 return (0); 10043 } 10044 } 10045 /* 10046 * Ack processing. 10047 */ 10048 INP_WLOCK_ASSERT(tp->t_inpcb); 10049 if (bbr_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) { 10050 return (ret_val); 10051 } 10052 if (sbavail(&so->so_snd)) { 10053 if (ctf_progress_timeout_check(tp, true)) { 10054 bbr_log_progress_event(bbr, tp, tick, PROGRESS_DROP, __LINE__); 10055 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 10056 return (1); 10057 } 10058 } 10059 INP_WLOCK_ASSERT(tp->t_inpcb); 10060 return (bbr_process_data(m, th, so, tp, drop_hdrlen, tlen, 10061 tiwin, thflags, nxt_pkt)); 10062 } 10063 10064 static void 10065 bbr_stop_all_timers(struct tcpcb *tp) 10066 { 10067 struct tcp_bbr *bbr; 10068 10069 /* 10070 * Assure no timers are running. 10071 */ 10072 if (tcp_timer_active(tp, TT_PERSIST)) { 10073 /* We enter in persists, set the flag appropriately */ 10074 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 10075 bbr->rc_in_persist = 1; 10076 } 10077 tcp_timer_suspend(tp, TT_PERSIST); 10078 tcp_timer_suspend(tp, TT_REXMT); 10079 tcp_timer_suspend(tp, TT_KEEP); 10080 tcp_timer_suspend(tp, TT_DELACK); 10081 } 10082 10083 static void 10084 bbr_google_mode_on(struct tcp_bbr *bbr) 10085 { 10086 bbr->rc_use_google = 1; 10087 bbr->rc_no_pacing = 0; 10088 bbr->r_ctl.bbr_google_discount = bbr_google_discount; 10089 bbr->r_use_policer = bbr_policer_detection_enabled; 10090 bbr->r_ctl.rc_probertt_int = (USECS_IN_SECOND * 10); 10091 bbr->bbr_use_rack_cheat = 0; 10092 bbr->r_ctl.rc_incr_tmrs = 0; 10093 bbr->r_ctl.rc_inc_tcp_oh = 0; 10094 bbr->r_ctl.rc_inc_ip_oh = 0; 10095 bbr->r_ctl.rc_inc_enet_oh = 0; 10096 reset_time(&bbr->r_ctl.rc_delrate, 10097 BBR_NUM_RTTS_FOR_GOOG_DEL_LIMIT); 10098 reset_time_small(&bbr->r_ctl.rc_rttprop, 10099 (11 * USECS_IN_SECOND)); 10100 tcp_bbr_tso_size_check(bbr, tcp_get_usecs(&bbr->rc_tv)); 10101 } 10102 10103 static void 10104 bbr_google_mode_off(struct tcp_bbr *bbr) 10105 { 10106 bbr->rc_use_google = 0; 10107 bbr->r_ctl.bbr_google_discount = 0; 10108 bbr->no_pacing_until = bbr_no_pacing_until; 10109 bbr->r_use_policer = 0; 10110 if (bbr->no_pacing_until) 10111 bbr->rc_no_pacing = 1; 10112 else 10113 bbr->rc_no_pacing = 0; 10114 if (bbr_use_rack_resend_cheat) 10115 bbr->bbr_use_rack_cheat = 1; 10116 else 10117 bbr->bbr_use_rack_cheat = 0; 10118 if (bbr_incr_timers) 10119 bbr->r_ctl.rc_incr_tmrs = 1; 10120 else 10121 bbr->r_ctl.rc_incr_tmrs = 0; 10122 if (bbr_include_tcp_oh) 10123 bbr->r_ctl.rc_inc_tcp_oh = 1; 10124 else 10125 bbr->r_ctl.rc_inc_tcp_oh = 0; 10126 if (bbr_include_ip_oh) 10127 bbr->r_ctl.rc_inc_ip_oh = 1; 10128 else 10129 bbr->r_ctl.rc_inc_ip_oh = 0; 10130 if (bbr_include_enet_oh) 10131 bbr->r_ctl.rc_inc_enet_oh = 1; 10132 else 10133 bbr->r_ctl.rc_inc_enet_oh = 0; 10134 bbr->r_ctl.rc_probertt_int = bbr_rtt_probe_limit; 10135 reset_time(&bbr->r_ctl.rc_delrate, 10136 bbr_num_pktepo_for_del_limit); 10137 reset_time_small(&bbr->r_ctl.rc_rttprop, 10138 (bbr_filter_len_sec * USECS_IN_SECOND)); 10139 tcp_bbr_tso_size_check(bbr, tcp_get_usecs(&bbr->rc_tv)); 10140 } 10141 /* 10142 * Return 0 on success, non-zero on failure 10143 * which indicates the error (usually no memory). 10144 */ 10145 static int 10146 bbr_init(struct tcpcb *tp) 10147 { 10148 struct tcp_bbr *bbr = NULL; 10149 struct inpcb *inp; 10150 uint32_t cts; 10151 10152 tp->t_fb_ptr = uma_zalloc(bbr_pcb_zone, (M_NOWAIT | M_ZERO)); 10153 if (tp->t_fb_ptr == NULL) { 10154 /* 10155 * We need to allocate memory but cant. The INP and INP_INFO 10156 * locks and they are recusive (happens during setup. So a 10157 * scheme to drop the locks fails :( 10158 * 10159 */ 10160 return (ENOMEM); 10161 } 10162 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 10163 bbr->rtt_valid = 0; 10164 inp = tp->t_inpcb; 10165 inp->inp_flags2 |= INP_CANNOT_DO_ECN; 10166 inp->inp_flags2 |= INP_SUPPORTS_MBUFQ; 10167 TAILQ_INIT(&bbr->r_ctl.rc_map); 10168 TAILQ_INIT(&bbr->r_ctl.rc_free); 10169 TAILQ_INIT(&bbr->r_ctl.rc_tmap); 10170 bbr->rc_tp = tp; 10171 if (tp->t_inpcb) { 10172 bbr->rc_inp = tp->t_inpcb; 10173 } 10174 cts = tcp_get_usecs(&bbr->rc_tv); 10175 tp->t_acktime = 0; 10176 bbr->rc_allow_data_af_clo = bbr_ignore_data_after_close; 10177 bbr->r_ctl.rc_reorder_fade = bbr_reorder_fade; 10178 bbr->rc_tlp_threshold = bbr_tlp_thresh; 10179 bbr->r_ctl.rc_reorder_shift = bbr_reorder_thresh; 10180 bbr->r_ctl.rc_pkt_delay = bbr_pkt_delay; 10181 bbr->r_ctl.rc_min_to = bbr_min_to; 10182 bbr->rc_bbr_state = BBR_STATE_STARTUP; 10183 bbr->r_ctl.bbr_lost_at_state = 0; 10184 bbr->r_ctl.rc_lost_at_startup = 0; 10185 bbr->rc_all_timers_stopped = 0; 10186 bbr->r_ctl.rc_bbr_lastbtlbw = 0; 10187 bbr->r_ctl.rc_pkt_epoch_del = 0; 10188 bbr->r_ctl.rc_pkt_epoch = 0; 10189 bbr->r_ctl.rc_lowest_rtt = 0xffffffff; 10190 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_high_gain; 10191 bbr->r_ctl.rc_bbr_cwnd_gain = bbr_high_gain; 10192 bbr->r_ctl.rc_went_idle_time = cts; 10193 bbr->rc_pacer_started = cts; 10194 bbr->r_ctl.rc_pkt_epoch_time = cts; 10195 bbr->r_ctl.rc_rcvtime = cts; 10196 bbr->r_ctl.rc_bbr_state_time = cts; 10197 bbr->r_ctl.rc_del_time = cts; 10198 bbr->r_ctl.rc_tlp_rxt_last_time = cts; 10199 bbr->r_ctl.last_in_probertt = cts; 10200 bbr->skip_gain = 0; 10201 bbr->gain_is_limited = 0; 10202 bbr->no_pacing_until = bbr_no_pacing_until; 10203 if (bbr->no_pacing_until) 10204 bbr->rc_no_pacing = 1; 10205 if (bbr_use_google_algo) { 10206 bbr->rc_no_pacing = 0; 10207 bbr->rc_use_google = 1; 10208 bbr->r_ctl.bbr_google_discount = bbr_google_discount; 10209 bbr->r_use_policer = bbr_policer_detection_enabled; 10210 } else { 10211 bbr->rc_use_google = 0; 10212 bbr->r_ctl.bbr_google_discount = 0; 10213 bbr->r_use_policer = 0; 10214 } 10215 if (bbr_ts_limiting) 10216 bbr->rc_use_ts_limit = 1; 10217 else 10218 bbr->rc_use_ts_limit = 0; 10219 if (bbr_ts_can_raise) 10220 bbr->ts_can_raise = 1; 10221 else 10222 bbr->ts_can_raise = 0; 10223 if (V_tcp_delack_enabled == 1) 10224 tp->t_delayed_ack = 2; 10225 else if (V_tcp_delack_enabled == 0) 10226 tp->t_delayed_ack = 0; 10227 else if (V_tcp_delack_enabled < 100) 10228 tp->t_delayed_ack = V_tcp_delack_enabled; 10229 else 10230 tp->t_delayed_ack = 2; 10231 if (bbr->rc_use_google == 0) 10232 bbr->r_ctl.rc_probertt_int = bbr_rtt_probe_limit; 10233 else 10234 bbr->r_ctl.rc_probertt_int = (USECS_IN_SECOND * 10); 10235 bbr->r_ctl.rc_min_rto_ms = bbr_rto_min_ms; 10236 bbr->rc_max_rto_sec = bbr_rto_max_sec; 10237 bbr->rc_init_win = bbr_def_init_win; 10238 if (tp->t_flags & TF_REQ_TSTMP) 10239 bbr->rc_last_options = TCP_TS_OVERHEAD; 10240 bbr->r_ctl.rc_pace_max_segs = tp->t_maxseg - bbr->rc_last_options; 10241 bbr->r_ctl.rc_high_rwnd = tp->snd_wnd; 10242 bbr->r_init_rtt = 1; 10243 10244 counter_u64_add(bbr_flows_nohdwr_pacing, 1); 10245 if (bbr_allow_hdwr_pacing) 10246 bbr->bbr_hdw_pace_ena = 1; 10247 else 10248 bbr->bbr_hdw_pace_ena = 0; 10249 if (bbr_sends_full_iwnd) 10250 bbr->bbr_init_win_cheat = 1; 10251 else 10252 bbr->bbr_init_win_cheat = 0; 10253 bbr->r_ctl.bbr_utter_max = bbr_hptsi_utter_max; 10254 bbr->r_ctl.rc_drain_pg = bbr_drain_gain; 10255 bbr->r_ctl.rc_startup_pg = bbr_high_gain; 10256 bbr->rc_loss_exit = bbr_exit_startup_at_loss; 10257 bbr->r_ctl.bbr_rttprobe_gain_val = bbr_rttprobe_gain; 10258 bbr->r_ctl.bbr_hptsi_per_second = bbr_hptsi_per_second; 10259 bbr->r_ctl.bbr_hptsi_segments_delay_tar = bbr_hptsi_segments_delay_tar; 10260 bbr->r_ctl.bbr_hptsi_segments_max = bbr_hptsi_segments_max; 10261 bbr->r_ctl.bbr_hptsi_segments_floor = bbr_hptsi_segments_floor; 10262 bbr->r_ctl.bbr_hptsi_bytes_min = bbr_hptsi_bytes_min; 10263 bbr->r_ctl.bbr_cross_over = bbr_cross_over; 10264 bbr->r_ctl.rc_rtt_shrinks = cts; 10265 if (bbr->rc_use_google) { 10266 setup_time_filter(&bbr->r_ctl.rc_delrate, 10267 FILTER_TYPE_MAX, 10268 BBR_NUM_RTTS_FOR_GOOG_DEL_LIMIT); 10269 setup_time_filter_small(&bbr->r_ctl.rc_rttprop, 10270 FILTER_TYPE_MIN, (11 * USECS_IN_SECOND)); 10271 } else { 10272 setup_time_filter(&bbr->r_ctl.rc_delrate, 10273 FILTER_TYPE_MAX, 10274 bbr_num_pktepo_for_del_limit); 10275 setup_time_filter_small(&bbr->r_ctl.rc_rttprop, 10276 FILTER_TYPE_MIN, (bbr_filter_len_sec * USECS_IN_SECOND)); 10277 } 10278 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_INIT, 0); 10279 if (bbr_uses_idle_restart) 10280 bbr->rc_use_idle_restart = 1; 10281 else 10282 bbr->rc_use_idle_restart = 0; 10283 bbr->r_ctl.rc_bbr_cur_del_rate = 0; 10284 bbr->r_ctl.rc_initial_hptsi_bw = bbr_initial_bw_bps; 10285 if (bbr_resends_use_tso) 10286 bbr->rc_resends_use_tso = 1; 10287 #ifdef NETFLIX_PEAKRATE 10288 tp->t_peakrate_thr = tp->t_maxpeakrate; 10289 #endif 10290 if (tp->snd_una != tp->snd_max) { 10291 /* Create a send map for the current outstanding data */ 10292 struct bbr_sendmap *rsm; 10293 10294 rsm = bbr_alloc(bbr); 10295 if (rsm == NULL) { 10296 uma_zfree(bbr_pcb_zone, tp->t_fb_ptr); 10297 tp->t_fb_ptr = NULL; 10298 return (ENOMEM); 10299 } 10300 rsm->r_flags = BBR_OVERMAX; 10301 rsm->r_tim_lastsent[0] = cts; 10302 rsm->r_rtr_cnt = 1; 10303 rsm->r_rtr_bytes = 0; 10304 rsm->r_start = tp->snd_una; 10305 rsm->r_end = tp->snd_max; 10306 rsm->r_dupack = 0; 10307 rsm->r_delivered = bbr->r_ctl.rc_delivered; 10308 rsm->r_ts_valid = 0; 10309 rsm->r_del_ack_ts = tp->ts_recent; 10310 rsm->r_del_time = cts; 10311 if (bbr->r_ctl.r_app_limited_until) 10312 rsm->r_app_limited = 1; 10313 else 10314 rsm->r_app_limited = 0; 10315 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_map, rsm, r_next); 10316 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_tmap, rsm, r_tnext); 10317 rsm->r_in_tmap = 1; 10318 if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) 10319 rsm->r_bbr_state = bbr_state_val(bbr); 10320 else 10321 rsm->r_bbr_state = 8; 10322 } 10323 if (bbr_use_rack_resend_cheat && (bbr->rc_use_google == 0)) 10324 bbr->bbr_use_rack_cheat = 1; 10325 if (bbr_incr_timers && (bbr->rc_use_google == 0)) 10326 bbr->r_ctl.rc_incr_tmrs = 1; 10327 if (bbr_include_tcp_oh && (bbr->rc_use_google == 0)) 10328 bbr->r_ctl.rc_inc_tcp_oh = 1; 10329 if (bbr_include_ip_oh && (bbr->rc_use_google == 0)) 10330 bbr->r_ctl.rc_inc_ip_oh = 1; 10331 if (bbr_include_enet_oh && (bbr->rc_use_google == 0)) 10332 bbr->r_ctl.rc_inc_enet_oh = 1; 10333 10334 bbr_log_type_statechange(bbr, cts, __LINE__); 10335 if (TCPS_HAVEESTABLISHED(tp->t_state) && 10336 (tp->t_srtt)) { 10337 uint32_t rtt; 10338 10339 rtt = (TICKS_2_USEC(tp->t_srtt) >> TCP_RTT_SHIFT); 10340 apply_filter_min_small(&bbr->r_ctl.rc_rttprop, rtt, cts); 10341 } 10342 /* announce the settings and state */ 10343 bbr_log_settings_change(bbr, BBR_RECOVERY_LOWRTT); 10344 tcp_bbr_tso_size_check(bbr, cts); 10345 /* 10346 * Now call the generic function to start a timer. This will place 10347 * the TCB on the hptsi wheel if a timer is needed with appropriate 10348 * flags. 10349 */ 10350 bbr_stop_all_timers(tp); 10351 bbr_start_hpts_timer(bbr, tp, cts, 5, 0, 0); 10352 return (0); 10353 } 10354 10355 /* 10356 * Return 0 if we can accept the connection. Return 10357 * non-zero if we can't handle the connection. A EAGAIN 10358 * means you need to wait until the connection is up. 10359 * a EADDRNOTAVAIL means we can never handle the connection 10360 * (no SACK). 10361 */ 10362 static int 10363 bbr_handoff_ok(struct tcpcb *tp) 10364 { 10365 if ((tp->t_state == TCPS_CLOSED) || 10366 (tp->t_state == TCPS_LISTEN)) { 10367 /* Sure no problem though it may not stick */ 10368 return (0); 10369 } 10370 if ((tp->t_state == TCPS_SYN_SENT) || 10371 (tp->t_state == TCPS_SYN_RECEIVED)) { 10372 /* 10373 * We really don't know you have to get to ESTAB or beyond 10374 * to tell. 10375 */ 10376 return (EAGAIN); 10377 } 10378 if ((tp->t_flags & TF_SACK_PERMIT) || bbr_sack_not_required) { 10379 return (0); 10380 } 10381 /* 10382 * If we reach here we don't do SACK on this connection so we can 10383 * never do rack. 10384 */ 10385 return (EINVAL); 10386 } 10387 10388 static void 10389 bbr_fini(struct tcpcb *tp, int32_t tcb_is_purged) 10390 { 10391 if (tp->t_fb_ptr) { 10392 uint32_t calc; 10393 struct tcp_bbr *bbr; 10394 struct bbr_sendmap *rsm; 10395 10396 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 10397 if (bbr->r_ctl.crte) 10398 tcp_rel_pacing_rate(bbr->r_ctl.crte, bbr->rc_tp); 10399 bbr_log_flowend(bbr); 10400 bbr->rc_tp = NULL; 10401 if (tp->t_inpcb) { 10402 /* Backout any flags2 we applied */ 10403 tp->t_inpcb->inp_flags2 &= ~INP_CANNOT_DO_ECN; 10404 tp->t_inpcb->inp_flags2 &= ~INP_SUPPORTS_MBUFQ; 10405 tp->t_inpcb->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 10406 } 10407 if (bbr->bbr_hdrw_pacing) 10408 counter_u64_add(bbr_flows_whdwr_pacing, -1); 10409 else 10410 counter_u64_add(bbr_flows_nohdwr_pacing, -1); 10411 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 10412 while (rsm) { 10413 TAILQ_REMOVE(&bbr->r_ctl.rc_map, rsm, r_next); 10414 uma_zfree(bbr_zone, rsm); 10415 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 10416 } 10417 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_free); 10418 while (rsm) { 10419 TAILQ_REMOVE(&bbr->r_ctl.rc_free, rsm, r_next); 10420 uma_zfree(bbr_zone, rsm); 10421 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_free); 10422 } 10423 calc = bbr->r_ctl.rc_high_rwnd - bbr->r_ctl.rc_init_rwnd; 10424 if (calc > (bbr->r_ctl.rc_init_rwnd / 10)) 10425 BBR_STAT_INC(bbr_dynamic_rwnd); 10426 else 10427 BBR_STAT_INC(bbr_static_rwnd); 10428 bbr->r_ctl.rc_free_cnt = 0; 10429 uma_zfree(bbr_pcb_zone, tp->t_fb_ptr); 10430 tp->t_fb_ptr = NULL; 10431 } 10432 /* Make sure snd_nxt is correctly set */ 10433 tp->snd_nxt = tp->snd_max; 10434 } 10435 10436 static void 10437 bbr_set_state(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t win) 10438 { 10439 switch (tp->t_state) { 10440 case TCPS_SYN_SENT: 10441 bbr->r_state = TCPS_SYN_SENT; 10442 bbr->r_substate = bbr_do_syn_sent; 10443 break; 10444 case TCPS_SYN_RECEIVED: 10445 bbr->r_state = TCPS_SYN_RECEIVED; 10446 bbr->r_substate = bbr_do_syn_recv; 10447 break; 10448 case TCPS_ESTABLISHED: 10449 bbr->r_ctl.rc_init_rwnd = max(win, bbr->rc_tp->snd_wnd); 10450 bbr->r_state = TCPS_ESTABLISHED; 10451 bbr->r_substate = bbr_do_established; 10452 break; 10453 case TCPS_CLOSE_WAIT: 10454 bbr->r_state = TCPS_CLOSE_WAIT; 10455 bbr->r_substate = bbr_do_close_wait; 10456 break; 10457 case TCPS_FIN_WAIT_1: 10458 bbr->r_state = TCPS_FIN_WAIT_1; 10459 bbr->r_substate = bbr_do_fin_wait_1; 10460 break; 10461 case TCPS_CLOSING: 10462 bbr->r_state = TCPS_CLOSING; 10463 bbr->r_substate = bbr_do_closing; 10464 break; 10465 case TCPS_LAST_ACK: 10466 bbr->r_state = TCPS_LAST_ACK; 10467 bbr->r_substate = bbr_do_lastack; 10468 break; 10469 case TCPS_FIN_WAIT_2: 10470 bbr->r_state = TCPS_FIN_WAIT_2; 10471 bbr->r_substate = bbr_do_fin_wait_2; 10472 break; 10473 case TCPS_LISTEN: 10474 case TCPS_CLOSED: 10475 case TCPS_TIME_WAIT: 10476 default: 10477 break; 10478 }; 10479 } 10480 10481 static void 10482 bbr_substate_change(struct tcp_bbr *bbr, uint32_t cts, int32_t line, int dolog) 10483 { 10484 /* 10485 * Now what state are we going into now? Is there adjustments 10486 * needed? 10487 */ 10488 int32_t old_state, old_gain; 10489 10490 10491 old_state = bbr_state_val(bbr); 10492 old_gain = bbr->r_ctl.rc_bbr_hptsi_gain; 10493 if (bbr_state_val(bbr) == BBR_SUB_LEVEL1) { 10494 /* Save the lowest srtt we saw in our end of the sub-state */ 10495 bbr->rc_hit_state_1 = 0; 10496 if (bbr->r_ctl.bbr_smallest_srtt_this_state != 0xffffffff) 10497 bbr->r_ctl.bbr_smallest_srtt_state2 = bbr->r_ctl.bbr_smallest_srtt_this_state; 10498 } 10499 bbr->rc_bbr_substate++; 10500 if (bbr->rc_bbr_substate >= BBR_SUBSTATE_COUNT) { 10501 /* Cycle back to first state-> gain */ 10502 bbr->rc_bbr_substate = 0; 10503 } 10504 if (bbr_state_val(bbr) == BBR_SUB_GAIN) { 10505 /* 10506 * We enter the gain(5/4) cycle (possibly less if 10507 * shallow buffer detection is enabled) 10508 */ 10509 if (bbr->skip_gain) { 10510 /* 10511 * Hardware pacing has set our rate to 10512 * the max and limited our b/w just 10513 * do level i.e. no gain. 10514 */ 10515 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_LEVEL1]; 10516 } else if (bbr->gain_is_limited && 10517 bbr->bbr_hdrw_pacing && 10518 bbr->r_ctl.crte) { 10519 /* 10520 * We can't gain above the hardware pacing 10521 * rate which is less than our rate + the gain 10522 * calculate the gain needed to reach the hardware 10523 * pacing rate.. 10524 */ 10525 uint64_t bw, rate, gain_calc; 10526 10527 bw = bbr_get_bw(bbr); 10528 rate = bbr->r_ctl.crte->rate; 10529 if ((rate > bw) && 10530 (((bw * (uint64_t)bbr_hptsi_gain[BBR_SUB_GAIN]) / (uint64_t)BBR_UNIT) > rate)) { 10531 gain_calc = (rate * BBR_UNIT) / bw; 10532 if (gain_calc < BBR_UNIT) 10533 gain_calc = BBR_UNIT; 10534 bbr->r_ctl.rc_bbr_hptsi_gain = (uint16_t)gain_calc; 10535 } else { 10536 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_GAIN]; 10537 } 10538 } else 10539 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_GAIN]; 10540 if ((bbr->rc_use_google == 0) && (bbr_gain_to_target == 0)) { 10541 bbr->r_ctl.rc_bbr_state_atflight = cts; 10542 } else 10543 bbr->r_ctl.rc_bbr_state_atflight = 0; 10544 } else if (bbr_state_val(bbr) == BBR_SUB_DRAIN) { 10545 bbr->rc_hit_state_1 = 1; 10546 bbr->r_ctl.rc_exta_time_gd = 0; 10547 bbr->r_ctl.flightsize_at_drain = ctf_flight_size(bbr->rc_tp, 10548 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 10549 if (bbr_state_drain_2_tar) { 10550 bbr->r_ctl.rc_bbr_state_atflight = 0; 10551 } else 10552 bbr->r_ctl.rc_bbr_state_atflight = cts; 10553 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[BBR_SUB_DRAIN]; 10554 } else { 10555 /* All other cycles hit here 2-7 */ 10556 if ((old_state == BBR_SUB_DRAIN) && bbr->rc_hit_state_1) { 10557 if (bbr_sub_drain_slam_cwnd && 10558 (bbr->rc_use_google == 0) && 10559 (bbr->rc_tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd)) { 10560 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd; 10561 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 10562 } 10563 if ((cts - bbr->r_ctl.rc_bbr_state_time) > bbr_get_rtt(bbr, BBR_RTT_PROP)) 10564 bbr->r_ctl.rc_exta_time_gd += ((cts - bbr->r_ctl.rc_bbr_state_time) - 10565 bbr_get_rtt(bbr, BBR_RTT_PROP)); 10566 else 10567 bbr->r_ctl.rc_exta_time_gd = 0; 10568 if (bbr->r_ctl.rc_exta_time_gd) { 10569 bbr->r_ctl.rc_level_state_extra = bbr->r_ctl.rc_exta_time_gd; 10570 /* Now chop up the time for each state (div by 7) */ 10571 bbr->r_ctl.rc_level_state_extra /= 7; 10572 if (bbr_rand_ot && bbr->r_ctl.rc_level_state_extra) { 10573 /* Add a randomization */ 10574 bbr_randomize_extra_state_time(bbr); 10575 } 10576 } 10577 } 10578 bbr->r_ctl.rc_bbr_state_atflight = max(1, cts); 10579 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_hptsi_gain[bbr_state_val(bbr)]; 10580 } 10581 if (bbr->rc_use_google) { 10582 bbr->r_ctl.rc_bbr_state_atflight = max(1, cts); 10583 } 10584 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 10585 bbr->r_ctl.rc_bbr_cwnd_gain = bbr_cwnd_gain; 10586 if (dolog) 10587 bbr_log_type_statechange(bbr, cts, line); 10588 10589 if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) { 10590 uint32_t time_in; 10591 10592 time_in = cts - bbr->r_ctl.rc_bbr_state_time; 10593 if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) { 10594 counter_u64_add(bbr_state_time[(old_state + 5)], time_in); 10595 } else { 10596 counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in); 10597 } 10598 } 10599 bbr->r_ctl.bbr_smallest_srtt_this_state = 0xffffffff; 10600 bbr_set_state_target(bbr, __LINE__); 10601 if (bbr_sub_drain_slam_cwnd && 10602 (bbr->rc_use_google == 0) && 10603 (bbr_state_val(bbr) == BBR_SUB_DRAIN)) { 10604 /* Slam down the cwnd */ 10605 bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd; 10606 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state; 10607 if (bbr_sub_drain_app_limit) { 10608 /* Go app limited if we are on a long drain */ 10609 bbr->r_ctl.r_app_limited_until = (bbr->r_ctl.rc_delivered + 10610 ctf_flight_size(bbr->rc_tp, 10611 (bbr->r_ctl.rc_sacked + 10612 bbr->r_ctl.rc_lost_bytes))); 10613 } 10614 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 10615 } 10616 if (bbr->rc_lt_use_bw) { 10617 /* In policed mode we clamp pacing_gain to BBR_UNIT */ 10618 bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT; 10619 } 10620 /* Google changes TSO size every cycle */ 10621 if (bbr->rc_use_google) 10622 tcp_bbr_tso_size_check(bbr, cts); 10623 bbr->r_ctl.gain_epoch = cts; 10624 bbr->r_ctl.rc_bbr_state_time = cts; 10625 bbr->r_ctl.substate_pe = bbr->r_ctl.rc_pkt_epoch; 10626 } 10627 10628 static void 10629 bbr_set_probebw_google_gains(struct tcp_bbr *bbr, uint32_t cts, uint32_t losses) 10630 { 10631 if ((bbr_state_val(bbr) == BBR_SUB_DRAIN) && 10632 (google_allow_early_out == 1) && 10633 (bbr->r_ctl.rc_flight_at_input <= bbr->r_ctl.rc_target_at_state)) { 10634 /* We have reached out target flight size possibly early */ 10635 goto change_state; 10636 } 10637 if (TSTMP_LT(cts, bbr->r_ctl.rc_bbr_state_time)) { 10638 return; 10639 } 10640 if ((cts - bbr->r_ctl.rc_bbr_state_time) < bbr_get_rtt(bbr, BBR_RTT_PROP)) { 10641 /* 10642 * Must be a rttProp movement forward before 10643 * we can change states. 10644 */ 10645 return; 10646 } 10647 if (bbr_state_val(bbr) == BBR_SUB_GAIN) { 10648 /* 10649 * The needed time has passed but for 10650 * the gain cycle extra rules apply: 10651 * 1) If we have seen loss, we exit 10652 * 2) If we have not reached the target 10653 * we stay in GAIN (gain-to-target). 10654 */ 10655 if (google_consider_lost && losses) 10656 goto change_state; 10657 if (bbr->r_ctl.rc_target_at_state > bbr->r_ctl.rc_flight_at_input) { 10658 return; 10659 } 10660 } 10661 change_state: 10662 /* For gain we must reach our target, all others last 1 rttProp */ 10663 bbr_substate_change(bbr, cts, __LINE__, 1); 10664 } 10665 10666 static void 10667 bbr_set_probebw_gains(struct tcp_bbr *bbr, uint32_t cts, uint32_t losses) 10668 { 10669 uint32_t flight, bbr_cur_cycle_time; 10670 10671 if (bbr->rc_use_google) { 10672 bbr_set_probebw_google_gains(bbr, cts, losses); 10673 return; 10674 } 10675 if (cts == 0) { 10676 /* 10677 * Never alow cts to be 0 we 10678 * do this so we can judge if 10679 * we have set a timestamp. 10680 */ 10681 cts = 1; 10682 } 10683 if (bbr_state_is_pkt_epoch) 10684 bbr_cur_cycle_time = bbr_get_rtt(bbr, BBR_RTT_PKTRTT); 10685 else 10686 bbr_cur_cycle_time = bbr_get_rtt(bbr, BBR_RTT_PROP); 10687 10688 if (bbr->r_ctl.rc_bbr_state_atflight == 0) { 10689 if (bbr_state_val(bbr) == BBR_SUB_DRAIN) { 10690 flight = ctf_flight_size(bbr->rc_tp, 10691 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 10692 if (bbr_sub_drain_slam_cwnd && bbr->rc_hit_state_1) { 10693 /* Keep it slam down */ 10694 if (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state) { 10695 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state; 10696 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 10697 } 10698 if (bbr_sub_drain_app_limit) { 10699 /* Go app limited if we are on a long drain */ 10700 bbr->r_ctl.r_app_limited_until = (bbr->r_ctl.rc_delivered + flight); 10701 } 10702 } 10703 if (TSTMP_GT(cts, bbr->r_ctl.gain_epoch) && 10704 (((cts - bbr->r_ctl.gain_epoch) > bbr_get_rtt(bbr, BBR_RTT_PROP)) || 10705 (flight >= bbr->r_ctl.flightsize_at_drain))) { 10706 /* 10707 * Still here after the same time as 10708 * the gain. We need to drain harder 10709 * for the next srtt. Reduce by a set amount 10710 * the gain drop is capped at DRAIN states 10711 * value (88). 10712 */ 10713 bbr->r_ctl.flightsize_at_drain = flight; 10714 if (bbr_drain_drop_mul && 10715 bbr_drain_drop_div && 10716 (bbr_drain_drop_mul < bbr_drain_drop_div)) { 10717 /* Use your specific drop value (def 4/5 = 20%) */ 10718 bbr->r_ctl.rc_bbr_hptsi_gain *= bbr_drain_drop_mul; 10719 bbr->r_ctl.rc_bbr_hptsi_gain /= bbr_drain_drop_div; 10720 } else { 10721 /* You get drop of 20% */ 10722 bbr->r_ctl.rc_bbr_hptsi_gain *= 4; 10723 bbr->r_ctl.rc_bbr_hptsi_gain /= 5; 10724 } 10725 if (bbr->r_ctl.rc_bbr_hptsi_gain <= bbr_drain_floor) { 10726 /* Reduce our gain again to the bottom */ 10727 bbr->r_ctl.rc_bbr_hptsi_gain = max(bbr_drain_floor, 1); 10728 } 10729 bbr_log_exit_gain(bbr, cts, 4); 10730 /* 10731 * Extend out so we wait another 10732 * epoch before dropping again. 10733 */ 10734 bbr->r_ctl.gain_epoch = cts; 10735 } 10736 if (flight <= bbr->r_ctl.rc_target_at_state) { 10737 if (bbr_sub_drain_slam_cwnd && 10738 (bbr->rc_use_google == 0) && 10739 (bbr->rc_tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd)) { 10740 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd; 10741 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 10742 } 10743 bbr->r_ctl.rc_bbr_state_atflight = max(cts, 1); 10744 bbr_log_exit_gain(bbr, cts, 3); 10745 } 10746 } else { 10747 /* Its a gain */ 10748 if (bbr->r_ctl.rc_lost > bbr->r_ctl.bbr_lost_at_state) { 10749 bbr->r_ctl.rc_bbr_state_atflight = max(cts, 1); 10750 goto change_state; 10751 } 10752 if ((ctf_outstanding(bbr->rc_tp) >= bbr->r_ctl.rc_target_at_state) || 10753 ((ctf_outstanding(bbr->rc_tp) + bbr->rc_tp->t_maxseg - 1) >= 10754 bbr->rc_tp->snd_wnd)) { 10755 bbr->r_ctl.rc_bbr_state_atflight = max(cts, 1); 10756 bbr_log_exit_gain(bbr, cts, 2); 10757 } 10758 } 10759 /** 10760 * We fall through and return always one of two things has 10761 * occured. 10762 * 1) We are still not at target 10763 * <or> 10764 * 2) We reached the target and set rc_bbr_state_atflight 10765 * which means we no longer hit this block 10766 * next time we are called. 10767 */ 10768 return; 10769 } 10770 change_state: 10771 if (TSTMP_LT(cts, bbr->r_ctl.rc_bbr_state_time)) 10772 return; 10773 if ((cts - bbr->r_ctl.rc_bbr_state_time) < bbr_cur_cycle_time) { 10774 /* Less than a full time-period has passed */ 10775 return; 10776 } 10777 if (bbr->r_ctl.rc_level_state_extra && 10778 (bbr_state_val(bbr) > BBR_SUB_DRAIN) && 10779 ((cts - bbr->r_ctl.rc_bbr_state_time) < 10780 (bbr_cur_cycle_time + bbr->r_ctl.rc_level_state_extra))) { 10781 /* Less than a full time-period + extra has passed */ 10782 return; 10783 } 10784 if (bbr_gain_gets_extra_too && 10785 bbr->r_ctl.rc_level_state_extra && 10786 (bbr_state_val(bbr) == BBR_SUB_GAIN) && 10787 ((cts - bbr->r_ctl.rc_bbr_state_time) < 10788 (bbr_cur_cycle_time + bbr->r_ctl.rc_level_state_extra))) { 10789 /* Less than a full time-period + extra has passed */ 10790 return; 10791 } 10792 bbr_substate_change(bbr, cts, __LINE__, 1); 10793 } 10794 10795 static uint32_t 10796 bbr_get_a_state_target(struct tcp_bbr *bbr, uint32_t gain) 10797 { 10798 uint32_t mss, tar; 10799 10800 if (bbr->rc_use_google) { 10801 /* Google just uses the cwnd target */ 10802 tar = bbr_get_target_cwnd(bbr, bbr_get_bw(bbr), gain); 10803 } else { 10804 mss = min((bbr->rc_tp->t_maxseg - bbr->rc_last_options), 10805 bbr->r_ctl.rc_pace_max_segs); 10806 /* Get the base cwnd with gain rounded to a mss */ 10807 tar = roundup(bbr_get_raw_target_cwnd(bbr, bbr_get_bw(bbr), 10808 gain), mss); 10809 /* Make sure it is within our min */ 10810 if (tar < get_min_cwnd(bbr)) 10811 return (get_min_cwnd(bbr)); 10812 } 10813 return (tar); 10814 } 10815 10816 static void 10817 bbr_set_state_target(struct tcp_bbr *bbr, int line) 10818 { 10819 uint32_t tar, meth; 10820 10821 if ((bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) && 10822 ((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google)) { 10823 /* Special case using old probe-rtt method */ 10824 tar = bbr_rtt_probe_cwndtarg * (bbr->rc_tp->t_maxseg - bbr->rc_last_options); 10825 meth = 1; 10826 } else { 10827 /* Non-probe-rtt case and reduced probe-rtt */ 10828 if ((bbr->rc_bbr_state == BBR_STATE_PROBE_BW) && 10829 (bbr->r_ctl.rc_bbr_hptsi_gain > BBR_UNIT)) { 10830 /* For gain cycle we use the hptsi gain */ 10831 tar = bbr_get_a_state_target(bbr, bbr->r_ctl.rc_bbr_hptsi_gain); 10832 meth = 2; 10833 } else if ((bbr_target_is_bbunit) || bbr->rc_use_google) { 10834 /* 10835 * If configured, or for google all other states 10836 * get BBR_UNIT. 10837 */ 10838 tar = bbr_get_a_state_target(bbr, BBR_UNIT); 10839 meth = 3; 10840 } else { 10841 /* 10842 * Or we set a target based on the pacing gain 10843 * for non-google mode and default (non-configured). 10844 * Note we don't set a target goal below drain (192). 10845 */ 10846 if (bbr->r_ctl.rc_bbr_hptsi_gain < bbr_hptsi_gain[BBR_SUB_DRAIN]) { 10847 tar = bbr_get_a_state_target(bbr, bbr_hptsi_gain[BBR_SUB_DRAIN]); 10848 meth = 4; 10849 } else { 10850 tar = bbr_get_a_state_target(bbr, bbr->r_ctl.rc_bbr_hptsi_gain); 10851 meth = 5; 10852 } 10853 } 10854 } 10855 bbr_log_set_of_state_target(bbr, tar, line, meth); 10856 bbr->r_ctl.rc_target_at_state = tar; 10857 } 10858 10859 static void 10860 bbr_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts, int32_t line) 10861 { 10862 /* Change to probe_rtt */ 10863 uint32_t time_in; 10864 10865 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 10866 bbr->r_ctl.flightsize_at_drain = ctf_flight_size(bbr->rc_tp, 10867 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 10868 bbr->r_ctl.r_app_limited_until = (bbr->r_ctl.flightsize_at_drain 10869 + bbr->r_ctl.rc_delivered); 10870 /* Setup so we force feed the filter */ 10871 if (bbr->rc_use_google || bbr_probertt_sets_rtt) 10872 bbr->rc_prtt_set_ts = 1; 10873 if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) { 10874 time_in = cts - bbr->r_ctl.rc_bbr_state_time; 10875 counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in); 10876 } 10877 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_ENTERPROBE, 0); 10878 bbr->r_ctl.rc_rtt_shrinks = cts; 10879 bbr->r_ctl.last_in_probertt = cts; 10880 bbr->r_ctl.rc_probertt_srttchktim = cts; 10881 bbr->r_ctl.rc_bbr_state_time = cts; 10882 bbr->rc_bbr_state = BBR_STATE_PROBE_RTT; 10883 /* We need to force the filter to update */ 10884 10885 if ((bbr_sub_drain_slam_cwnd) && 10886 bbr->rc_hit_state_1 && 10887 (bbr->rc_use_google == 0) && 10888 (bbr_state_val(bbr) == BBR_SUB_DRAIN)) { 10889 if (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_saved_cwnd) 10890 bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd; 10891 } else 10892 bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd; 10893 /* Update the lost */ 10894 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 10895 if ((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google){ 10896 /* Set to the non-configurable default of 4 (PROBE_RTT_MIN) */ 10897 bbr->rc_tp->snd_cwnd = bbr_rtt_probe_cwndtarg * (bbr->rc_tp->t_maxseg - bbr->rc_last_options); 10898 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 10899 bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT; 10900 bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT; 10901 bbr_log_set_of_state_target(bbr, bbr->rc_tp->snd_cwnd, __LINE__, 6); 10902 bbr->r_ctl.rc_target_at_state = bbr->rc_tp->snd_cwnd; 10903 } else { 10904 /* 10905 * We bring it down slowly by using a hptsi gain that is 10906 * probably 75%. This will slowly float down our outstanding 10907 * without tampering with the cwnd. 10908 */ 10909 bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.bbr_rttprobe_gain_val; 10910 bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT; 10911 bbr_set_state_target(bbr, __LINE__); 10912 if (bbr_prtt_slam_cwnd && 10913 (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) { 10914 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state; 10915 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 10916 } 10917 } 10918 if (ctf_flight_size(bbr->rc_tp, 10919 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <= 10920 bbr->r_ctl.rc_target_at_state) { 10921 /* We are at target */ 10922 bbr->r_ctl.rc_bbr_enters_probertt = cts; 10923 } else { 10924 /* We need to come down to reach target before our time begins */ 10925 bbr->r_ctl.rc_bbr_enters_probertt = 0; 10926 } 10927 bbr->r_ctl.rc_pe_of_prtt = bbr->r_ctl.rc_pkt_epoch; 10928 BBR_STAT_INC(bbr_enter_probertt); 10929 bbr_log_exit_gain(bbr, cts, 0); 10930 bbr_log_type_statechange(bbr, cts, line); 10931 } 10932 10933 static void 10934 bbr_check_probe_rtt_limits(struct tcp_bbr *bbr, uint32_t cts) 10935 { 10936 /* 10937 * Sanity check on probe-rtt intervals. 10938 * In crazy situations where we are competing 10939 * against new-reno flows with huge buffers 10940 * our rtt-prop interval could come to dominate 10941 * things if we can't get through a full set 10942 * of cycles, we need to adjust it. 10943 */ 10944 if (bbr_can_adjust_probertt && 10945 (bbr->rc_use_google == 0)) { 10946 uint16_t val = 0; 10947 uint32_t cur_rttp, fval, newval, baseval; 10948 10949 /* Are we to small and go into probe-rtt to often? */ 10950 baseval = (bbr_get_rtt(bbr, BBR_RTT_PROP) * (BBR_SUBSTATE_COUNT + 1)); 10951 cur_rttp = roundup(baseval, USECS_IN_SECOND); 10952 fval = bbr_filter_len_sec * USECS_IN_SECOND; 10953 if (bbr_is_ratio == 0) { 10954 if (fval > bbr_rtt_probe_limit) 10955 newval = cur_rttp + (fval - bbr_rtt_probe_limit); 10956 else 10957 newval = cur_rttp; 10958 } else { 10959 int mul; 10960 10961 mul = fval / bbr_rtt_probe_limit; 10962 newval = cur_rttp * mul; 10963 } 10964 if (cur_rttp > bbr->r_ctl.rc_probertt_int) { 10965 bbr->r_ctl.rc_probertt_int = cur_rttp; 10966 reset_time_small(&bbr->r_ctl.rc_rttprop, newval); 10967 val = 1; 10968 } else { 10969 /* 10970 * No adjustments were made 10971 * do we need to shrink it? 10972 */ 10973 if (bbr->r_ctl.rc_probertt_int > bbr_rtt_probe_limit) { 10974 if (cur_rttp <= bbr_rtt_probe_limit) { 10975 /* 10976 * Things have calmed down lets 10977 * shrink all the way to default 10978 */ 10979 bbr->r_ctl.rc_probertt_int = bbr_rtt_probe_limit; 10980 reset_time_small(&bbr->r_ctl.rc_rttprop, 10981 (bbr_filter_len_sec * USECS_IN_SECOND)); 10982 cur_rttp = bbr_rtt_probe_limit; 10983 newval = (bbr_filter_len_sec * USECS_IN_SECOND); 10984 val = 2; 10985 } else { 10986 /* 10987 * Well does some adjustment make sense? 10988 */ 10989 if (cur_rttp < bbr->r_ctl.rc_probertt_int) { 10990 /* We can reduce interval time some */ 10991 bbr->r_ctl.rc_probertt_int = cur_rttp; 10992 reset_time_small(&bbr->r_ctl.rc_rttprop, newval); 10993 val = 3; 10994 } 10995 } 10996 } 10997 } 10998 if (val) 10999 bbr_log_rtt_shrinks(bbr, cts, cur_rttp, newval, __LINE__, BBR_RTTS_RESETS_VALUES, val); 11000 } 11001 } 11002 11003 static void 11004 bbr_exit_probe_rtt(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts) 11005 { 11006 /* Exit probe-rtt */ 11007 11008 if (tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd) { 11009 tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd; 11010 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 11011 } 11012 bbr_log_exit_gain(bbr, cts, 1); 11013 bbr->rc_hit_state_1 = 0; 11014 bbr->r_ctl.rc_rtt_shrinks = cts; 11015 bbr->r_ctl.last_in_probertt = cts; 11016 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_RTTPROBE, 0); 11017 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 11018 bbr->r_ctl.r_app_limited_until = (ctf_flight_size(tp, 11019 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) + 11020 bbr->r_ctl.rc_delivered); 11021 if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) { 11022 uint32_t time_in; 11023 11024 time_in = cts - bbr->r_ctl.rc_bbr_state_time; 11025 counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in); 11026 } 11027 if (bbr->rc_filled_pipe) { 11028 /* Switch to probe_bw */ 11029 bbr->rc_bbr_state = BBR_STATE_PROBE_BW; 11030 bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts); 11031 bbr->r_ctl.rc_bbr_cwnd_gain = bbr_cwnd_gain; 11032 bbr_substate_change(bbr, cts, __LINE__, 0); 11033 bbr_log_type_statechange(bbr, cts, __LINE__); 11034 } else { 11035 /* Back to startup */ 11036 bbr->rc_bbr_state = BBR_STATE_STARTUP; 11037 bbr->r_ctl.rc_bbr_state_time = cts; 11038 /* 11039 * We don't want to give a complete free 3 11040 * measurements until we exit, so we use 11041 * the number of pe's we were in probe-rtt 11042 * to add to the startup_epoch. That way 11043 * we will still retain the old state. 11044 */ 11045 bbr->r_ctl.rc_bbr_last_startup_epoch += (bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_pe_of_prtt); 11046 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 11047 /* Make sure to use the lower pg when shifting back in */ 11048 if (bbr->r_ctl.rc_lost && 11049 bbr_use_lower_gain_in_startup && 11050 (bbr->rc_use_google == 0)) 11051 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_startup_lower; 11052 else 11053 bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_startup_pg; 11054 bbr->r_ctl.rc_bbr_cwnd_gain = bbr->r_ctl.rc_startup_pg; 11055 /* Probably not needed but set it anyway */ 11056 bbr_set_state_target(bbr, __LINE__); 11057 bbr_log_type_statechange(bbr, cts, __LINE__); 11058 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11059 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 0); 11060 } 11061 bbr_check_probe_rtt_limits(bbr, cts); 11062 } 11063 11064 static int32_t inline 11065 bbr_should_enter_probe_rtt(struct tcp_bbr *bbr, uint32_t cts) 11066 { 11067 if ((bbr->rc_past_init_win == 1) && 11068 (bbr->rc_in_persist == 0) && 11069 (bbr_calc_time(cts, bbr->r_ctl.rc_rtt_shrinks) >= bbr->r_ctl.rc_probertt_int)) { 11070 return (1); 11071 } 11072 if (bbr_can_force_probertt && 11073 (bbr->rc_in_persist == 0) && 11074 (TSTMP_GT(cts, bbr->r_ctl.last_in_probertt)) && 11075 ((cts - bbr->r_ctl.last_in_probertt) > bbr->r_ctl.rc_probertt_int)) { 11076 return (1); 11077 } 11078 return (0); 11079 } 11080 11081 11082 static int32_t 11083 bbr_google_startup(struct tcp_bbr *bbr, uint32_t cts, int32_t pkt_epoch) 11084 { 11085 uint64_t btlbw, gain; 11086 if (pkt_epoch == 0) { 11087 /* 11088 * Need to be on a pkt-epoch to continue. 11089 */ 11090 return (0); 11091 } 11092 btlbw = bbr_get_full_bw(bbr); 11093 gain = ((bbr->r_ctl.rc_bbr_lastbtlbw * 11094 (uint64_t)bbr_start_exit) / (uint64_t)100) + bbr->r_ctl.rc_bbr_lastbtlbw; 11095 if (btlbw >= gain) { 11096 bbr->r_ctl.rc_bbr_last_startup_epoch = bbr->r_ctl.rc_pkt_epoch; 11097 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11098 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 3); 11099 bbr->r_ctl.rc_bbr_lastbtlbw = btlbw; 11100 } 11101 if ((bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_bbr_last_startup_epoch) >= BBR_STARTUP_EPOCHS) 11102 return (1); 11103 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11104 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 8); 11105 return(0); 11106 } 11107 11108 static int32_t inline 11109 bbr_state_startup(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch, int32_t pkt_epoch) 11110 { 11111 /* Have we gained 25% in the last 3 packet based epoch's? */ 11112 uint64_t btlbw, gain; 11113 int do_exit; 11114 int delta, rtt_gain; 11115 11116 if ((bbr->rc_tp->snd_una == bbr->rc_tp->snd_max) && 11117 (bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time) >= bbr_rtt_probe_time)) { 11118 /* 11119 * This qualifies as a RTT_PROBE session since we drop the 11120 * data outstanding to nothing and waited more than 11121 * bbr_rtt_probe_time. 11122 */ 11123 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_WASIDLE, 0); 11124 bbr_set_reduced_rtt(bbr, cts, __LINE__); 11125 } 11126 if (bbr_should_enter_probe_rtt(bbr, cts)) { 11127 bbr_enter_probe_rtt(bbr, cts, __LINE__); 11128 return (0); 11129 } 11130 if (bbr->rc_use_google) 11131 return (bbr_google_startup(bbr, cts, pkt_epoch)); 11132 11133 if ((bbr->r_ctl.rc_lost > bbr->r_ctl.rc_lost_at_startup) && 11134 (bbr_use_lower_gain_in_startup)) { 11135 /* Drop to a lower gain 1.5 x since we saw loss */ 11136 bbr->r_ctl.rc_bbr_hptsi_gain = bbr_startup_lower; 11137 } 11138 if (pkt_epoch == 0) { 11139 /* 11140 * Need to be on a pkt-epoch to continue. 11141 */ 11142 return (0); 11143 } 11144 if (bbr_rtt_gain_thresh) { 11145 /* 11146 * Do we allow a flow to stay 11147 * in startup with no loss and no 11148 * gain in rtt over a set threshold? 11149 */ 11150 if (bbr->r_ctl.rc_pkt_epoch_rtt && 11151 bbr->r_ctl.startup_last_srtt && 11152 (bbr->r_ctl.rc_pkt_epoch_rtt > bbr->r_ctl.startup_last_srtt)) { 11153 delta = bbr->r_ctl.rc_pkt_epoch_rtt - bbr->r_ctl.startup_last_srtt; 11154 rtt_gain = (delta * 100) / bbr->r_ctl.startup_last_srtt; 11155 } else 11156 rtt_gain = 0; 11157 if ((bbr->r_ctl.startup_last_srtt == 0) || 11158 (bbr->r_ctl.rc_pkt_epoch_rtt < bbr->r_ctl.startup_last_srtt)) 11159 /* First time or new lower value */ 11160 bbr->r_ctl.startup_last_srtt = bbr->r_ctl.rc_pkt_epoch_rtt; 11161 11162 if ((bbr->r_ctl.rc_lost == 0) && 11163 (rtt_gain < bbr_rtt_gain_thresh)) { 11164 /* 11165 * No loss, and we are under 11166 * our gain threhold for 11167 * increasing RTT. 11168 */ 11169 if (bbr->r_ctl.rc_bbr_last_startup_epoch < bbr->r_ctl.rc_pkt_epoch) 11170 bbr->r_ctl.rc_bbr_last_startup_epoch++; 11171 bbr_log_startup_event(bbr, cts, rtt_gain, 11172 delta, bbr->r_ctl.startup_last_srtt, 10); 11173 return (0); 11174 } 11175 } 11176 if ((bbr->r_ctl.r_measurement_count == bbr->r_ctl.last_startup_measure) && 11177 (bbr->r_ctl.rc_lost_at_startup == bbr->r_ctl.rc_lost) && 11178 (!IN_RECOVERY(bbr->rc_tp->t_flags))) { 11179 /* 11180 * We only assess if we have a new measurment when 11181 * we have no loss and are not in recovery. 11182 * Drag up by one our last_startup epoch so we will hold 11183 * the number of non-gain we have already accumulated. 11184 */ 11185 if (bbr->r_ctl.rc_bbr_last_startup_epoch < bbr->r_ctl.rc_pkt_epoch) 11186 bbr->r_ctl.rc_bbr_last_startup_epoch++; 11187 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11188 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 9); 11189 return (0); 11190 } 11191 /* Case where we reduced the lost (bad retransmit) */ 11192 if (bbr->r_ctl.rc_lost_at_startup > bbr->r_ctl.rc_lost) 11193 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 11194 bbr->r_ctl.last_startup_measure = bbr->r_ctl.r_measurement_count; 11195 btlbw = bbr_get_full_bw(bbr); 11196 if (bbr->r_ctl.rc_bbr_hptsi_gain == bbr_startup_lower) 11197 gain = ((bbr->r_ctl.rc_bbr_lastbtlbw * 11198 (uint64_t)bbr_low_start_exit) / (uint64_t)100) + bbr->r_ctl.rc_bbr_lastbtlbw; 11199 else 11200 gain = ((bbr->r_ctl.rc_bbr_lastbtlbw * 11201 (uint64_t)bbr_start_exit) / (uint64_t)100) + bbr->r_ctl.rc_bbr_lastbtlbw; 11202 do_exit = 0; 11203 if (btlbw > bbr->r_ctl.rc_bbr_lastbtlbw) 11204 bbr->r_ctl.rc_bbr_lastbtlbw = btlbw; 11205 if (btlbw >= gain) { 11206 bbr->r_ctl.rc_bbr_last_startup_epoch = bbr->r_ctl.rc_pkt_epoch; 11207 /* Update the lost so we won't exit in next set of tests */ 11208 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 11209 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11210 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 3); 11211 } 11212 if ((bbr->rc_loss_exit && 11213 (bbr->r_ctl.rc_lost > bbr->r_ctl.rc_lost_at_startup) && 11214 (bbr->r_ctl.rc_pkt_epoch_loss_rate > bbr_startup_loss_thresh)) && 11215 ((bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_bbr_last_startup_epoch) >= BBR_STARTUP_EPOCHS)) { 11216 /* 11217 * If we had no gain, we had loss and that loss was above 11218 * our threshould, the rwnd is not constrained, and we have 11219 * had at least 3 packet epochs exit. Note that this is 11220 * switched off by sysctl. Google does not do this by the 11221 * way. 11222 */ 11223 if ((ctf_flight_size(bbr->rc_tp, 11224 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) + 11225 (2 * max(bbr->r_ctl.rc_pace_max_segs, bbr->rc_tp->t_maxseg))) <= bbr->rc_tp->snd_wnd) { 11226 do_exit = 1; 11227 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11228 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 4); 11229 } else { 11230 /* Just record an updated loss value */ 11231 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 11232 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11233 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 5); 11234 } 11235 } else 11236 bbr->r_ctl.rc_lost_at_startup = bbr->r_ctl.rc_lost; 11237 if (((bbr->r_ctl.rc_pkt_epoch - bbr->r_ctl.rc_bbr_last_startup_epoch) >= BBR_STARTUP_EPOCHS) || 11238 do_exit) { 11239 /* Return 1 to exit the startup state. */ 11240 return (1); 11241 } 11242 /* Stay in startup */ 11243 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11244 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 8); 11245 return (0); 11246 } 11247 11248 static void 11249 bbr_state_change(struct tcp_bbr *bbr, uint32_t cts, int32_t epoch, int32_t pkt_epoch, uint32_t losses) 11250 { 11251 /* 11252 * A tick occured in the rtt epoch do we need to do anything? 11253 */ 11254 #ifdef BBR_INVARIANTS 11255 if ((bbr->rc_bbr_state != BBR_STATE_STARTUP) && 11256 (bbr->rc_bbr_state != BBR_STATE_DRAIN) && 11257 (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) && 11258 (bbr->rc_bbr_state != BBR_STATE_IDLE_EXIT) && 11259 (bbr->rc_bbr_state != BBR_STATE_PROBE_BW)) { 11260 /* Debug code? */ 11261 panic("Unknown BBR state %d?\n", bbr->rc_bbr_state); 11262 } 11263 #endif 11264 if (bbr->rc_bbr_state == BBR_STATE_STARTUP) { 11265 /* Do we exit the startup state? */ 11266 if (bbr_state_startup(bbr, cts, epoch, pkt_epoch)) { 11267 uint32_t time_in; 11268 11269 bbr_log_startup_event(bbr, cts, bbr->r_ctl.rc_bbr_last_startup_epoch, 11270 bbr->r_ctl.rc_lost_at_startup, bbr_start_exit, 6); 11271 bbr->rc_filled_pipe = 1; 11272 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 11273 if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) { 11274 11275 time_in = cts - bbr->r_ctl.rc_bbr_state_time; 11276 counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in); 11277 } else 11278 time_in = 0; 11279 if (bbr->rc_no_pacing) 11280 bbr->rc_no_pacing = 0; 11281 bbr->r_ctl.rc_bbr_state_time = cts; 11282 bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.rc_drain_pg; 11283 bbr->rc_bbr_state = BBR_STATE_DRAIN; 11284 bbr_set_state_target(bbr, __LINE__); 11285 if ((bbr->rc_use_google == 0) && 11286 bbr_slam_cwnd_in_main_drain) { 11287 /* Here we don't have to worry about probe-rtt */ 11288 bbr->r_ctl.rc_saved_cwnd = bbr->rc_tp->snd_cwnd; 11289 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state; 11290 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 11291 } 11292 bbr->r_ctl.rc_bbr_cwnd_gain = bbr_high_gain; 11293 bbr_log_type_statechange(bbr, cts, __LINE__); 11294 if (ctf_flight_size(bbr->rc_tp, 11295 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)) <= 11296 bbr->r_ctl.rc_target_at_state) { 11297 /* 11298 * Switch to probe_bw if we are already 11299 * there 11300 */ 11301 bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts); 11302 bbr_substate_change(bbr, cts, __LINE__, 0); 11303 bbr->rc_bbr_state = BBR_STATE_PROBE_BW; 11304 bbr_log_type_statechange(bbr, cts, __LINE__); 11305 } 11306 } 11307 } else if (bbr->rc_bbr_state == BBR_STATE_IDLE_EXIT) { 11308 uint32_t inflight; 11309 struct tcpcb *tp; 11310 11311 tp = bbr->rc_tp; 11312 inflight = ctf_flight_size(tp, 11313 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 11314 if (inflight >= bbr->r_ctl.rc_target_at_state) { 11315 /* We have reached a flight of the cwnd target */ 11316 bbr->rc_bbr_state = BBR_STATE_PROBE_BW; 11317 bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT; 11318 bbr->r_ctl.rc_bbr_cwnd_gain = BBR_UNIT; 11319 bbr_set_state_target(bbr, __LINE__); 11320 /* 11321 * Rig it so we don't do anything crazy and 11322 * start fresh with a new randomization. 11323 */ 11324 bbr->r_ctl.bbr_smallest_srtt_this_state = 0xffffffff; 11325 bbr->rc_bbr_substate = BBR_SUB_LEVEL6; 11326 bbr_substate_change(bbr, cts, __LINE__, 1); 11327 } 11328 } else if (bbr->rc_bbr_state == BBR_STATE_DRAIN) { 11329 /* Has in-flight reached the bdp (or less)? */ 11330 uint32_t inflight; 11331 struct tcpcb *tp; 11332 11333 tp = bbr->rc_tp; 11334 inflight = ctf_flight_size(tp, 11335 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 11336 if ((bbr->rc_use_google == 0) && 11337 bbr_slam_cwnd_in_main_drain && 11338 (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) { 11339 /* 11340 * Here we don't have to worry about probe-rtt 11341 * re-slam it, but keep it slammed down. 11342 */ 11343 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state; 11344 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 11345 } 11346 if (inflight <= bbr->r_ctl.rc_target_at_state) { 11347 /* We have drained */ 11348 bbr->rc_bbr_state = BBR_STATE_PROBE_BW; 11349 bbr->r_ctl.bbr_lost_at_state = bbr->r_ctl.rc_lost; 11350 if (SEQ_GT(cts, bbr->r_ctl.rc_bbr_state_time)) { 11351 uint32_t time_in; 11352 11353 time_in = cts - bbr->r_ctl.rc_bbr_state_time; 11354 counter_u64_add(bbr_state_time[bbr->rc_bbr_state], time_in); 11355 } 11356 if ((bbr->rc_use_google == 0) && 11357 bbr_slam_cwnd_in_main_drain && 11358 (tp->snd_cwnd < bbr->r_ctl.rc_saved_cwnd)) { 11359 /* Restore the cwnd */ 11360 tp->snd_cwnd = bbr->r_ctl.rc_saved_cwnd; 11361 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 11362 } 11363 /* Setup probe-rtt has being done now RRS-HERE */ 11364 bbr->r_ctl.rc_rtt_shrinks = cts; 11365 bbr->r_ctl.last_in_probertt = cts; 11366 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_LEAVE_DRAIN, 0); 11367 /* Randomly pick a sub-state */ 11368 bbr->rc_bbr_substate = bbr_pick_probebw_substate(bbr, cts); 11369 bbr_substate_change(bbr, cts, __LINE__, 0); 11370 bbr_log_type_statechange(bbr, cts, __LINE__); 11371 } 11372 } else if (bbr->rc_bbr_state == BBR_STATE_PROBE_RTT) { 11373 uint32_t flight; 11374 11375 flight = ctf_flight_size(bbr->rc_tp, 11376 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 11377 bbr->r_ctl.r_app_limited_until = (flight + bbr->r_ctl.rc_delivered); 11378 if (((bbr->r_ctl.bbr_rttprobe_gain_val == 0) || bbr->rc_use_google) && 11379 (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) { 11380 /* 11381 * We must keep cwnd at the desired MSS. 11382 */ 11383 bbr->rc_tp->snd_cwnd = bbr_rtt_probe_cwndtarg * (bbr->rc_tp->t_maxseg - bbr->rc_last_options); 11384 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 11385 } else if ((bbr_prtt_slam_cwnd) && 11386 (bbr->rc_tp->snd_cwnd > bbr->r_ctl.rc_target_at_state)) { 11387 /* Re-slam it */ 11388 bbr->rc_tp->snd_cwnd = bbr->r_ctl.rc_target_at_state; 11389 bbr_log_type_cwndupd(bbr, 0, 0, 0, 12, 0, 0, __LINE__); 11390 } 11391 if (bbr->r_ctl.rc_bbr_enters_probertt == 0) { 11392 /* Has outstanding reached our target? */ 11393 if (flight <= bbr->r_ctl.rc_target_at_state) { 11394 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_REACHTAR, 0); 11395 bbr->r_ctl.rc_bbr_enters_probertt = cts; 11396 /* If time is exactly 0, be 1usec off */ 11397 if (bbr->r_ctl.rc_bbr_enters_probertt == 0) 11398 bbr->r_ctl.rc_bbr_enters_probertt = 1; 11399 if (bbr->rc_use_google == 0) { 11400 /* 11401 * Restore any lowering that as occured to 11402 * reach here 11403 */ 11404 if (bbr->r_ctl.bbr_rttprobe_gain_val) 11405 bbr->r_ctl.rc_bbr_hptsi_gain = bbr->r_ctl.bbr_rttprobe_gain_val; 11406 else 11407 bbr->r_ctl.rc_bbr_hptsi_gain = BBR_UNIT; 11408 } 11409 } 11410 if ((bbr->r_ctl.rc_bbr_enters_probertt == 0) && 11411 (bbr->rc_use_google == 0) && 11412 bbr->r_ctl.bbr_rttprobe_gain_val && 11413 (((cts - bbr->r_ctl.rc_probertt_srttchktim) > bbr_get_rtt(bbr, bbr_drain_rtt)) || 11414 (flight >= bbr->r_ctl.flightsize_at_drain))) { 11415 /* 11416 * We have doddled with our current hptsi 11417 * gain an srtt and have still not made it 11418 * to target, or we have increased our flight. 11419 * Lets reduce the gain by xx% 11420 * flooring the reduce at DRAIN (based on 11421 * mul/div) 11422 */ 11423 int red; 11424 11425 bbr->r_ctl.flightsize_at_drain = flight; 11426 bbr->r_ctl.rc_probertt_srttchktim = cts; 11427 red = max((bbr->r_ctl.bbr_rttprobe_gain_val / 10), 1); 11428 if ((bbr->r_ctl.rc_bbr_hptsi_gain - red) > max(bbr_drain_floor, 1)) { 11429 /* Reduce our gain again */ 11430 bbr->r_ctl.rc_bbr_hptsi_gain -= red; 11431 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_SHRINK_PG, 0); 11432 } else if (bbr->r_ctl.rc_bbr_hptsi_gain > max(bbr_drain_floor, 1)) { 11433 /* one more chance before we give up */ 11434 bbr->r_ctl.rc_bbr_hptsi_gain = max(bbr_drain_floor, 1); 11435 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_SHRINK_PG_FINAL, 0); 11436 } else { 11437 /* At the very bottom */ 11438 bbr->r_ctl.rc_bbr_hptsi_gain = max((bbr_drain_floor-1), 1); 11439 } 11440 } 11441 } 11442 if (bbr->r_ctl.rc_bbr_enters_probertt && 11443 (TSTMP_GT(cts, bbr->r_ctl.rc_bbr_enters_probertt)) && 11444 ((cts - bbr->r_ctl.rc_bbr_enters_probertt) >= bbr_rtt_probe_time)) { 11445 /* Time to exit probe RTT normally */ 11446 bbr_exit_probe_rtt(bbr->rc_tp, bbr, cts); 11447 } 11448 } else if (bbr->rc_bbr_state == BBR_STATE_PROBE_BW) { 11449 if ((bbr->rc_tp->snd_una == bbr->rc_tp->snd_max) && 11450 (bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time) >= bbr_rtt_probe_time)) { 11451 /* 11452 * This qualifies as a RTT_PROBE session since we 11453 * drop the data outstanding to nothing and waited 11454 * more than bbr_rtt_probe_time. 11455 */ 11456 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_WASIDLE, 0); 11457 bbr_set_reduced_rtt(bbr, cts, __LINE__); 11458 } 11459 if (bbr_should_enter_probe_rtt(bbr, cts)) { 11460 bbr_enter_probe_rtt(bbr, cts, __LINE__); 11461 } else { 11462 bbr_set_probebw_gains(bbr, cts, losses); 11463 } 11464 } 11465 } 11466 11467 static void 11468 bbr_check_bbr_for_state(struct tcp_bbr *bbr, uint32_t cts, int32_t line, uint32_t losses) 11469 { 11470 int32_t epoch = 0; 11471 11472 if ((cts - bbr->r_ctl.rc_rcv_epoch_start) >= bbr_get_rtt(bbr, BBR_RTT_PROP)) { 11473 bbr_set_epoch(bbr, cts, line); 11474 /* At each epoch doe lt bw sampling */ 11475 epoch = 1; 11476 } 11477 bbr_state_change(bbr, cts, epoch, bbr->rc_is_pkt_epoch_now, losses); 11478 } 11479 11480 static int 11481 bbr_do_segment_nounlock(struct mbuf *m, struct tcphdr *th, struct socket *so, 11482 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos, 11483 int32_t nxt_pkt, struct timeval *tv) 11484 { 11485 int32_t thflags, retval; 11486 uint32_t cts, lcts; 11487 uint32_t tiwin; 11488 struct tcpopt to; 11489 struct tcp_bbr *bbr; 11490 struct bbr_sendmap *rsm; 11491 struct timeval ltv; 11492 int32_t did_out = 0; 11493 int32_t in_recovery; 11494 uint16_t nsegs; 11495 int32_t prev_state; 11496 uint32_t lost; 11497 11498 nsegs = max(1, m->m_pkthdr.lro_nsegs); 11499 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 11500 /* add in our stats */ 11501 kern_prefetch(bbr, &prev_state); 11502 prev_state = 0; 11503 thflags = th->th_flags; 11504 /* 11505 * If this is either a state-changing packet or current state isn't 11506 * established, we require a write lock on tcbinfo. Otherwise, we 11507 * allow the tcbinfo to be in either alocked or unlocked, as the 11508 * caller may have unnecessarily acquired a write lock due to a 11509 * race. 11510 */ 11511 INP_WLOCK_ASSERT(tp->t_inpcb); 11512 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 11513 __func__)); 11514 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 11515 __func__)); 11516 11517 tp->t_rcvtime = ticks; 11518 /* 11519 * Unscale the window into a 32-bit value. For the SYN_SENT state 11520 * the scale is zero. 11521 */ 11522 tiwin = th->th_win << tp->snd_scale; 11523 #ifdef STATS 11524 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); 11525 #endif 11526 /* 11527 * Parse options on any incoming segment. 11528 */ 11529 tcp_dooptions(&to, (u_char *)(th + 1), 11530 (th->th_off << 2) - sizeof(struct tcphdr), 11531 (thflags & TH_SYN) ? TO_SYN : 0); 11532 11533 if (m->m_flags & M_TSTMP) { 11534 /* Prefer the hardware timestamp if present */ 11535 struct timespec ts; 11536 11537 mbuf_tstmp2timespec(m, &ts); 11538 bbr->rc_tv.tv_sec = ts.tv_sec; 11539 bbr->rc_tv.tv_usec = ts.tv_nsec / 1000; 11540 bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usectick(&bbr->rc_tv); 11541 } else if (m->m_flags & M_TSTMP_LRO) { 11542 /* Next the arrival timestamp */ 11543 struct timespec ts; 11544 11545 mbuf_tstmp2timespec(m, &ts); 11546 bbr->rc_tv.tv_sec = ts.tv_sec; 11547 bbr->rc_tv.tv_usec = ts.tv_nsec / 1000; 11548 bbr->r_ctl.rc_rcvtime = cts = tcp_tv_to_usectick(&bbr->rc_tv); 11549 } else { 11550 /* 11551 * Ok just get the current time. 11552 */ 11553 bbr->r_ctl.rc_rcvtime = lcts = cts = tcp_get_usecs(&bbr->rc_tv); 11554 } 11555 /* 11556 * If echoed timestamp is later than the current time, fall back to 11557 * non RFC1323 RTT calculation. Normalize timestamp if syncookies 11558 * were used when this connection was established. 11559 */ 11560 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 11561 to.to_tsecr -= tp->ts_offset; 11562 if (TSTMP_GT(to.to_tsecr, tcp_tv_to_mssectick(&bbr->rc_tv))) 11563 to.to_tsecr = 0; 11564 } 11565 /* 11566 * If its the first time in we need to take care of options and 11567 * verify we can do SACK for rack! 11568 */ 11569 if (bbr->r_state == 0) { 11570 /* 11571 * Process options only when we get SYN/ACK back. The SYN 11572 * case for incoming connections is handled in tcp_syncache. 11573 * According to RFC1323 the window field in a SYN (i.e., a 11574 * <SYN> or <SYN,ACK>) segment itself is never scaled. XXX 11575 * this is traditional behavior, may need to be cleaned up. 11576 */ 11577 if (bbr->rc_inp == NULL) { 11578 bbr->rc_inp = tp->t_inpcb; 11579 } 11580 /* 11581 * We need to init rc_inp here since its not init'd when 11582 * bbr_init is called 11583 */ 11584 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 11585 if ((to.to_flags & TOF_SCALE) && 11586 (tp->t_flags & TF_REQ_SCALE)) { 11587 tp->t_flags |= TF_RCVD_SCALE; 11588 tp->snd_scale = to.to_wscale; 11589 } 11590 /* 11591 * Initial send window. It will be updated with the 11592 * next incoming segment to the scaled value. 11593 */ 11594 tp->snd_wnd = th->th_win; 11595 if (to.to_flags & TOF_TS) { 11596 tp->t_flags |= TF_RCVD_TSTMP; 11597 tp->ts_recent = to.to_tsval; 11598 tp->ts_recent_age = tcp_tv_to_mssectick(&bbr->rc_tv); 11599 } 11600 if (to.to_flags & TOF_MSS) 11601 tcp_mss(tp, to.to_mss); 11602 if ((tp->t_flags & TF_SACK_PERMIT) && 11603 (to.to_flags & TOF_SACKPERM) == 0) 11604 tp->t_flags &= ~TF_SACK_PERMIT; 11605 if (IS_FASTOPEN(tp->t_flags)) { 11606 if (to.to_flags & TOF_FASTOPEN) { 11607 uint16_t mss; 11608 11609 if (to.to_flags & TOF_MSS) 11610 mss = to.to_mss; 11611 else 11612 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) 11613 mss = TCP6_MSS; 11614 else 11615 mss = TCP_MSS; 11616 tcp_fastopen_update_cache(tp, mss, 11617 to.to_tfo_len, to.to_tfo_cookie); 11618 } else 11619 tcp_fastopen_disable_path(tp); 11620 } 11621 } 11622 /* 11623 * At this point we are at the initial call. Here we decide 11624 * if we are doing RACK or not. We do this by seeing if 11625 * TF_SACK_PERMIT is set, if not rack is *not* possible and 11626 * we switch to the default code. 11627 */ 11628 if ((tp->t_flags & TF_SACK_PERMIT) == 0) { 11629 /* Bail */ 11630 tcp_switch_back_to_default(tp); 11631 (*tp->t_fb->tfb_tcp_do_segment) (m, th, so, tp, drop_hdrlen, 11632 tlen, iptos); 11633 return (1); 11634 } 11635 /* Set the flag */ 11636 bbr->r_is_v6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 11637 tcp_set_hpts(tp->t_inpcb); 11638 sack_filter_clear(&bbr->r_ctl.bbr_sf, th->th_ack); 11639 } 11640 if (thflags & TH_ACK) { 11641 /* Track ack types */ 11642 if (to.to_flags & TOF_SACK) 11643 BBR_STAT_INC(bbr_acks_with_sacks); 11644 else 11645 BBR_STAT_INC(bbr_plain_acks); 11646 } 11647 /* 11648 * This is the one exception case where we set the rack state 11649 * always. All other times (timers etc) we must have a rack-state 11650 * set (so we assure we have done the checks above for SACK). 11651 */ 11652 if (thflags & TH_FIN) 11653 tcp_log_end_status(tp, TCP_EI_STATUS_CLIENT_FIN); 11654 if (bbr->r_state != tp->t_state) 11655 bbr_set_state(tp, bbr, tiwin); 11656 11657 if (SEQ_GT(th->th_ack, tp->snd_una) && (rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map)) != NULL) 11658 kern_prefetch(rsm, &prev_state); 11659 prev_state = bbr->r_state; 11660 bbr->rc_ack_was_delayed = 0; 11661 lost = bbr->r_ctl.rc_lost; 11662 bbr->rc_is_pkt_epoch_now = 0; 11663 if (m->m_flags & (M_TSTMP|M_TSTMP_LRO)) { 11664 /* Get the real time into lcts and figure the real delay */ 11665 lcts = tcp_get_usecs(<v); 11666 if (TSTMP_GT(lcts, cts)) { 11667 bbr->r_ctl.rc_ack_hdwr_delay = lcts - cts; 11668 bbr->rc_ack_was_delayed = 1; 11669 if (TSTMP_GT(bbr->r_ctl.rc_ack_hdwr_delay, 11670 bbr->r_ctl.highest_hdwr_delay)) 11671 bbr->r_ctl.highest_hdwr_delay = bbr->r_ctl.rc_ack_hdwr_delay; 11672 } else { 11673 bbr->r_ctl.rc_ack_hdwr_delay = 0; 11674 bbr->rc_ack_was_delayed = 0; 11675 } 11676 } else { 11677 bbr->r_ctl.rc_ack_hdwr_delay = 0; 11678 bbr->rc_ack_was_delayed = 0; 11679 } 11680 bbr_log_ack_event(bbr, th, &to, tlen, nsegs, cts, nxt_pkt, m); 11681 if ((thflags & TH_SYN) && (thflags & TH_FIN) && V_drop_synfin) { 11682 retval = 0; 11683 m_freem(m); 11684 goto done_with_input; 11685 } 11686 /* 11687 * If a segment with the ACK-bit set arrives in the SYN-SENT state 11688 * check SEQ.ACK first as described on page 66 of RFC 793, section 3.9. 11689 */ 11690 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && 11691 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { 11692 tcp_log_end_status(tp, TCP_EI_STATUS_RST_IN_FRONT); 11693 ctf_do_dropwithreset_conn(m, tp, th, BANDLIM_RST_OPENPORT, tlen); 11694 return (1); 11695 } 11696 in_recovery = IN_RECOVERY(tp->t_flags); 11697 if (tiwin > bbr->r_ctl.rc_high_rwnd) 11698 bbr->r_ctl.rc_high_rwnd = tiwin; 11699 #ifdef BBR_INVARIANTS 11700 if ((tp->t_inpcb->inp_flags & INP_DROPPED) || 11701 (tp->t_inpcb->inp_flags2 & INP_FREED)) { 11702 panic("tp:%p bbr:%p given a dropped inp:%p", 11703 tp, bbr, tp->t_inpcb); 11704 } 11705 #endif 11706 bbr->r_ctl.rc_flight_at_input = ctf_flight_size(tp, 11707 (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 11708 bbr->rtt_valid = 0; 11709 if (to.to_flags & TOF_TS) { 11710 bbr->rc_ts_valid = 1; 11711 bbr->r_ctl.last_inbound_ts = to.to_tsval; 11712 } else { 11713 bbr->rc_ts_valid = 0; 11714 bbr->r_ctl.last_inbound_ts = 0; 11715 } 11716 retval = (*bbr->r_substate) (m, th, so, 11717 tp, &to, drop_hdrlen, 11718 tlen, tiwin, thflags, nxt_pkt, iptos); 11719 #ifdef BBR_INVARIANTS 11720 if ((retval == 0) && 11721 (tp->t_inpcb == NULL)) { 11722 panic("retval:%d tp:%p t_inpcb:NULL state:%d", 11723 retval, tp, prev_state); 11724 } 11725 #endif 11726 if (nxt_pkt == 0) 11727 BBR_STAT_INC(bbr_rlock_left_ret0); 11728 else 11729 BBR_STAT_INC(bbr_rlock_left_ret1); 11730 if (retval == 0) { 11731 /* 11732 * If retval is 1 the tcb is unlocked and most likely the tp 11733 * is gone. 11734 */ 11735 INP_WLOCK_ASSERT(tp->t_inpcb); 11736 tcp_bbr_xmit_timer_commit(bbr, tp, cts); 11737 if (bbr->rc_is_pkt_epoch_now) 11738 bbr_set_pktepoch(bbr, cts, __LINE__); 11739 bbr_check_bbr_for_state(bbr, cts, __LINE__, (bbr->r_ctl.rc_lost - lost)); 11740 if (nxt_pkt == 0) { 11741 if (bbr->r_wanted_output != 0) { 11742 bbr->rc_output_starts_timer = 0; 11743 did_out = 1; 11744 (void)tp->t_fb->tfb_tcp_output(tp); 11745 } else 11746 bbr_start_hpts_timer(bbr, tp, cts, 6, 0, 0); 11747 } 11748 if ((nxt_pkt == 0) && 11749 ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && 11750 (SEQ_GT(tp->snd_max, tp->snd_una) || 11751 (tp->t_flags & TF_DELACK) || 11752 ((V_tcp_always_keepalive || bbr->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && 11753 (tp->t_state <= TCPS_CLOSING)))) { 11754 /* 11755 * We could not send (probably in the hpts but 11756 * stopped the timer)? 11757 */ 11758 if ((tp->snd_max == tp->snd_una) && 11759 ((tp->t_flags & TF_DELACK) == 0) && 11760 (bbr->rc_inp->inp_in_hpts) && 11761 (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 11762 /* 11763 * keep alive not needed if we are hptsi 11764 * output yet 11765 */ 11766 ; 11767 } else { 11768 if (bbr->rc_inp->inp_in_hpts) { 11769 tcp_hpts_remove(bbr->rc_inp, HPTS_REMOVE_OUTPUT); 11770 if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 11771 (TSTMP_GT(lcts, bbr->rc_pacer_started))) { 11772 uint32_t del; 11773 11774 del = lcts - bbr->rc_pacer_started; 11775 if (bbr->r_ctl.rc_last_delay_val > del) { 11776 BBR_STAT_INC(bbr_force_timer_start); 11777 bbr->r_ctl.rc_last_delay_val -= del; 11778 bbr->rc_pacer_started = lcts; 11779 } else { 11780 /* We are late */ 11781 bbr->r_ctl.rc_last_delay_val = 0; 11782 BBR_STAT_INC(bbr_force_output); 11783 (void)tp->t_fb->tfb_tcp_output(tp); 11784 } 11785 } 11786 } 11787 bbr_start_hpts_timer(bbr, tp, cts, 8, bbr->r_ctl.rc_last_delay_val, 11788 0); 11789 } 11790 } else if ((bbr->rc_output_starts_timer == 0) && (nxt_pkt == 0)) { 11791 /* Do we have the correct timer running? */ 11792 bbr_timer_audit(tp, bbr, lcts, &so->so_snd); 11793 } 11794 /* Do we have a new state */ 11795 if (bbr->r_state != tp->t_state) 11796 bbr_set_state(tp, bbr, tiwin); 11797 done_with_input: 11798 bbr_log_doseg_done(bbr, cts, nxt_pkt, did_out); 11799 if (did_out) 11800 bbr->r_wanted_output = 0; 11801 #ifdef BBR_INVARIANTS 11802 if (tp->t_inpcb == NULL) { 11803 panic("OP:%d retval:%d tp:%p t_inpcb:NULL state:%d", 11804 did_out, 11805 retval, tp, prev_state); 11806 } 11807 #endif 11808 } 11809 return (retval); 11810 } 11811 11812 static void 11813 bbr_log_type_hrdwtso(struct tcpcb *tp, struct tcp_bbr *bbr, int len, int mod, int what_we_can_send) 11814 { 11815 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 11816 union tcp_log_stackspecific log; 11817 struct timeval tv; 11818 uint32_t cts; 11819 11820 cts = tcp_get_usecs(&tv); 11821 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 11822 log.u_bbr.flex1 = bbr->r_ctl.rc_pace_min_segs; 11823 log.u_bbr.flex2 = what_we_can_send; 11824 log.u_bbr.flex3 = bbr->r_ctl.rc_pace_max_segs; 11825 log.u_bbr.flex4 = len; 11826 log.u_bbr.flex5 = 0; 11827 log.u_bbr.flex7 = mod; 11828 log.u_bbr.flex8 = 1; 11829 TCP_LOG_EVENTP(tp, NULL, 11830 &tp->t_inpcb->inp_socket->so_rcv, 11831 &tp->t_inpcb->inp_socket->so_snd, 11832 TCP_HDWR_TLS, 0, 11833 0, &log, false, &tv); 11834 } 11835 } 11836 11837 static void 11838 bbr_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 11839 struct tcpcb *tp, int32_t drop_hdrlen, int32_t tlen, uint8_t iptos) 11840 { 11841 struct timeval tv; 11842 int retval; 11843 11844 /* First lets see if we have old packets */ 11845 if (tp->t_in_pkt) { 11846 if (ctf_do_queued_segments(so, tp, 1)) { 11847 m_freem(m); 11848 return; 11849 } 11850 } 11851 if (m->m_flags & M_TSTMP_LRO) { 11852 tv.tv_sec = m->m_pkthdr.rcv_tstmp /1000000000; 11853 tv.tv_usec = (m->m_pkthdr.rcv_tstmp % 1000000000)/1000; 11854 } else { 11855 /* Should not be should we kassert instead? */ 11856 tcp_get_usecs(&tv); 11857 } 11858 retval = bbr_do_segment_nounlock(m, th, so, tp, 11859 drop_hdrlen, tlen, iptos, 0, &tv); 11860 if (retval == 0) 11861 INP_WUNLOCK(tp->t_inpcb); 11862 } 11863 11864 /* 11865 * Return how much data can be sent without violating the 11866 * cwnd or rwnd. 11867 */ 11868 11869 static inline uint32_t 11870 bbr_what_can_we_send(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t sendwin, 11871 uint32_t avail, int32_t sb_offset, uint32_t cts) 11872 { 11873 uint32_t len; 11874 11875 if (ctf_outstanding(tp) >= tp->snd_wnd) { 11876 /* We never want to go over our peers rcv-window */ 11877 len = 0; 11878 } else { 11879 uint32_t flight; 11880 11881 flight = ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + bbr->r_ctl.rc_lost_bytes)); 11882 if (flight >= sendwin) { 11883 /* 11884 * We have in flight what we are allowed by cwnd (if 11885 * it was rwnd blocking it would have hit above out 11886 * >= tp->snd_wnd). 11887 */ 11888 return (0); 11889 } 11890 len = sendwin - flight; 11891 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { 11892 /* We would send too much (beyond the rwnd) */ 11893 len = tp->snd_wnd - ctf_outstanding(tp); 11894 } 11895 if ((len + sb_offset) > avail) { 11896 /* 11897 * We don't have that much in the SB, how much is 11898 * there? 11899 */ 11900 len = avail - sb_offset; 11901 } 11902 } 11903 return (len); 11904 } 11905 11906 static inline void 11907 bbr_do_error_accounting(struct tcpcb *tp, struct tcp_bbr *bbr, struct bbr_sendmap *rsm, int32_t len, int32_t error) 11908 { 11909 #ifdef NETFLIX_STATS 11910 KMOD_TCPSTAT_INC(tcps_sndpack_error); 11911 KMOD_TCPSTAT_ADD(tcps_sndbyte_error, len); 11912 #endif 11913 } 11914 11915 static inline void 11916 bbr_do_send_accounting(struct tcpcb *tp, struct tcp_bbr *bbr, struct bbr_sendmap *rsm, int32_t len, int32_t error) 11917 { 11918 if (error) { 11919 bbr_do_error_accounting(tp, bbr, rsm, len, error); 11920 return; 11921 } 11922 if (rsm) { 11923 if (rsm->r_flags & BBR_TLP) { 11924 /* 11925 * TLP should not count in retran count, but in its 11926 * own bin 11927 */ 11928 #ifdef NETFLIX_STATS 11929 tp->t_sndtlppack++; 11930 tp->t_sndtlpbyte += len; 11931 KMOD_TCPSTAT_INC(tcps_tlpresends); 11932 KMOD_TCPSTAT_ADD(tcps_tlpresend_bytes, len); 11933 #endif 11934 } else { 11935 /* Retransmit */ 11936 tp->t_sndrexmitpack++; 11937 KMOD_TCPSTAT_INC(tcps_sndrexmitpack); 11938 KMOD_TCPSTAT_ADD(tcps_sndrexmitbyte, len); 11939 #ifdef STATS 11940 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, 11941 len); 11942 #endif 11943 } 11944 /* 11945 * Logs in 0 - 8, 8 is all non probe_bw states 0-7 is 11946 * sub-state 11947 */ 11948 counter_u64_add(bbr_state_lost[rsm->r_bbr_state], len); 11949 if (bbr->rc_bbr_state != BBR_STATE_PROBE_BW) { 11950 /* Non probe_bw log in 1, 2, or 4. */ 11951 counter_u64_add(bbr_state_resend[bbr->rc_bbr_state], len); 11952 } else { 11953 /* 11954 * Log our probe state 3, and log also 5-13 to show 11955 * us the recovery sub-state for the send. This 11956 * means that 3 == (5+6+7+8+9+10+11+12+13) 11957 */ 11958 counter_u64_add(bbr_state_resend[BBR_STATE_PROBE_BW], len); 11959 counter_u64_add(bbr_state_resend[(bbr_state_val(bbr) + 5)], len); 11960 } 11961 /* Place in both 16's the totals of retransmitted */ 11962 counter_u64_add(bbr_state_lost[16], len); 11963 counter_u64_add(bbr_state_resend[16], len); 11964 /* Place in 17's the total sent */ 11965 counter_u64_add(bbr_state_resend[17], len); 11966 counter_u64_add(bbr_state_lost[17], len); 11967 11968 } else { 11969 /* New sends */ 11970 KMOD_TCPSTAT_INC(tcps_sndpack); 11971 KMOD_TCPSTAT_ADD(tcps_sndbyte, len); 11972 /* Place in 17's the total sent */ 11973 counter_u64_add(bbr_state_resend[17], len); 11974 counter_u64_add(bbr_state_lost[17], len); 11975 #ifdef STATS 11976 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, 11977 len); 11978 #endif 11979 } 11980 } 11981 11982 static void 11983 bbr_cwnd_limiting(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t in_level) 11984 { 11985 if (bbr->rc_filled_pipe && bbr_target_cwnd_mult_limit && (bbr->rc_use_google == 0)) { 11986 /* 11987 * Limit the cwnd to not be above N x the target plus whats 11988 * is outstanding. The target is based on the current b/w 11989 * estimate. 11990 */ 11991 uint32_t target; 11992 11993 target = bbr_get_target_cwnd(bbr, bbr_get_bw(bbr), BBR_UNIT); 11994 target += ctf_outstanding(tp); 11995 target *= bbr_target_cwnd_mult_limit; 11996 if (tp->snd_cwnd > target) 11997 tp->snd_cwnd = target; 11998 bbr_log_type_cwndupd(bbr, 0, 0, 0, 10, 0, 0, __LINE__); 11999 } 12000 } 12001 12002 static int 12003 bbr_window_update_needed(struct tcpcb *tp, struct socket *so, uint32_t recwin, int32_t maxseg) 12004 { 12005 /* 12006 * "adv" is the amount we could increase the window, taking into 12007 * account that we are limited by TCP_MAXWIN << tp->rcv_scale. 12008 */ 12009 uint32_t adv; 12010 int32_t oldwin; 12011 12012 adv = min(recwin, TCP_MAXWIN << tp->rcv_scale); 12013 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { 12014 oldwin = (tp->rcv_adv - tp->rcv_nxt); 12015 adv -= oldwin; 12016 } else 12017 oldwin = 0; 12018 12019 /* 12020 * If the new window size ends up being the same as the old size 12021 * when it is scaled, then don't force a window update. 12022 */ 12023 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale) 12024 return (0); 12025 12026 if (adv >= (2 * maxseg) && 12027 (adv >= (so->so_rcv.sb_hiwat / 4) || 12028 recwin <= (so->so_rcv.sb_hiwat / 8) || 12029 so->so_rcv.sb_hiwat <= 8 * maxseg)) { 12030 return (1); 12031 } 12032 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) 12033 return (1); 12034 return (0); 12035 } 12036 12037 /* 12038 * Return 0 on success and a errno on failure to send. 12039 * Note that a 0 return may not mean we sent anything 12040 * if the TCB was on the hpts. A non-zero return 12041 * does indicate the error we got from ip[6]_output. 12042 */ 12043 static int 12044 bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv) 12045 { 12046 struct socket *so; 12047 int32_t len; 12048 uint32_t cts; 12049 uint32_t recwin, sendwin; 12050 int32_t sb_offset; 12051 int32_t flags, abandon, error = 0; 12052 struct tcp_log_buffer *lgb = NULL; 12053 struct mbuf *m; 12054 struct mbuf *mb; 12055 uint32_t if_hw_tsomaxsegcount = 0; 12056 uint32_t if_hw_tsomaxsegsize = 0; 12057 uint32_t if_hw_tsomax = 0; 12058 struct ip *ip = NULL; 12059 #ifdef TCPDEBUG 12060 struct ipovly *ipov = NULL; 12061 #endif 12062 struct tcp_bbr *bbr; 12063 struct tcphdr *th; 12064 #ifdef NETFLIX_TCPOUDP 12065 struct udphdr *udp = NULL; 12066 #endif 12067 u_char opt[TCP_MAXOLEN]; 12068 unsigned ipoptlen, optlen, hdrlen; 12069 #ifdef NETFLIX_TCPOUDP 12070 unsigned ulen; 12071 #endif 12072 uint32_t bbr_seq; 12073 uint32_t delay_calc=0; 12074 uint8_t doing_tlp = 0; 12075 uint8_t local_options; 12076 #ifdef BBR_INVARIANTS 12077 uint8_t doing_retran_from = 0; 12078 uint8_t picked_up_retran = 0; 12079 #endif 12080 uint8_t wanted_cookie = 0; 12081 uint8_t more_to_rxt=0; 12082 int32_t prefetch_so_done = 0; 12083 int32_t prefetch_rsm = 0; 12084 uint32_t what_we_can = 0; 12085 uint32_t tot_len = 0; 12086 uint32_t rtr_cnt = 0; 12087 uint32_t maxseg, pace_max_segs, p_maxseg; 12088 int32_t csum_flags; 12089 int32_t hw_tls; 12090 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 12091 unsigned ipsec_optlen = 0; 12092 12093 #endif 12094 volatile int32_t sack_rxmit; 12095 struct bbr_sendmap *rsm = NULL; 12096 int32_t tso, mtu; 12097 int force_tso = 0; 12098 struct tcpopt to; 12099 int32_t slot = 0; 12100 struct inpcb *inp; 12101 struct sockbuf *sb; 12102 uint32_t hpts_calling; 12103 #ifdef INET6 12104 struct ip6_hdr *ip6 = NULL; 12105 int32_t isipv6; 12106 #endif 12107 uint8_t app_limited = BBR_JR_SENT_DATA; 12108 uint8_t filled_all = 0; 12109 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 12110 /* We take a cache hit here */ 12111 memcpy(&bbr->rc_tv, tv, sizeof(struct timeval)); 12112 cts = tcp_tv_to_usectick(&bbr->rc_tv); 12113 inp = bbr->rc_inp; 12114 so = inp->inp_socket; 12115 sb = &so->so_snd; 12116 #ifdef KERN_TLS 12117 if (sb->sb_flags & SB_TLS_IFNET) 12118 hw_tls = 1; 12119 else 12120 #endif 12121 hw_tls = 0; 12122 kern_prefetch(sb, &maxseg); 12123 maxseg = tp->t_maxseg - bbr->rc_last_options; 12124 if (bbr_minseg(bbr) < maxseg) { 12125 tcp_bbr_tso_size_check(bbr, cts); 12126 } 12127 /* Remove any flags that indicate we are pacing on the inp */ 12128 pace_max_segs = bbr->r_ctl.rc_pace_max_segs; 12129 p_maxseg = min(maxseg, pace_max_segs); 12130 INP_WLOCK_ASSERT(inp); 12131 #ifdef TCP_OFFLOAD 12132 if (tp->t_flags & TF_TOE) 12133 return (tcp_offload_output(tp)); 12134 #endif 12135 12136 #ifdef INET6 12137 if (bbr->r_state) { 12138 /* Use the cache line loaded if possible */ 12139 isipv6 = bbr->r_is_v6; 12140 } else { 12141 isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 12142 } 12143 #endif 12144 if (((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && 12145 inp->inp_in_hpts) { 12146 /* 12147 * We are on the hpts for some timer but not hptsi output. 12148 * Possibly remove from the hpts so we can send/recv etc. 12149 */ 12150 if ((tp->t_flags & TF_ACKNOW) == 0) { 12151 /* 12152 * No immediate demand right now to send an ack, but 12153 * the user may have read, making room for new data 12154 * (a window update). If so we may want to cancel 12155 * whatever timer is running (KEEP/DEL-ACK?) and 12156 * continue to send out a window update. Or we may 12157 * have gotten more data into the socket buffer to 12158 * send. 12159 */ 12160 recwin = min(max(sbspace(&so->so_rcv), 0), 12161 TCP_MAXWIN << tp->rcv_scale); 12162 if ((bbr_window_update_needed(tp, so, recwin, maxseg) == 0) && 12163 ((tcp_outflags[tp->t_state] & TH_RST) == 0) && 12164 ((sbavail(sb) + ((tcp_outflags[tp->t_state] & TH_FIN) ? 1 : 0)) <= 12165 (tp->snd_max - tp->snd_una))) { 12166 /* 12167 * Nothing new to send and no window update 12168 * is needed to send. Lets just return and 12169 * let the timer-run off. 12170 */ 12171 return (0); 12172 } 12173 } 12174 tcp_hpts_remove(inp, HPTS_REMOVE_OUTPUT); 12175 bbr_timer_cancel(bbr, __LINE__, cts); 12176 } 12177 if (bbr->r_ctl.rc_last_delay_val) { 12178 /* Calculate a rough delay for early escape to sending */ 12179 if (SEQ_GT(cts, bbr->rc_pacer_started)) 12180 delay_calc = cts - bbr->rc_pacer_started; 12181 if (delay_calc >= bbr->r_ctl.rc_last_delay_val) 12182 delay_calc -= bbr->r_ctl.rc_last_delay_val; 12183 else 12184 delay_calc = 0; 12185 } 12186 /* Mark that we have called bbr_output(). */ 12187 if ((bbr->r_timer_override) || 12188 (tp->t_state < TCPS_ESTABLISHED)) { 12189 /* Timeouts or early states are exempt */ 12190 if (inp->inp_in_hpts) 12191 tcp_hpts_remove(inp, HPTS_REMOVE_OUTPUT); 12192 } else if (inp->inp_in_hpts) { 12193 if ((bbr->r_ctl.rc_last_delay_val) && 12194 (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && 12195 delay_calc) { 12196 /* 12197 * We were being paced for output and the delay has 12198 * already exceeded when we were supposed to be 12199 * called, lets go ahead and pull out of the hpts 12200 * and call output. 12201 */ 12202 counter_u64_add(bbr_out_size[TCP_MSS_ACCT_LATE], 1); 12203 bbr->r_ctl.rc_last_delay_val = 0; 12204 tcp_hpts_remove(inp, HPTS_REMOVE_OUTPUT); 12205 } else if (tp->t_state == TCPS_CLOSED) { 12206 bbr->r_ctl.rc_last_delay_val = 0; 12207 tcp_hpts_remove(inp, HPTS_REMOVE_OUTPUT); 12208 } else { 12209 /* 12210 * On the hpts, you shall not pass! even if ACKNOW 12211 * is on, we will when the hpts fires, unless of 12212 * course we are overdue. 12213 */ 12214 counter_u64_add(bbr_out_size[TCP_MSS_ACCT_INPACE], 1); 12215 return (0); 12216 } 12217 } 12218 bbr->rc_cwnd_limited = 0; 12219 if (bbr->r_ctl.rc_last_delay_val) { 12220 /* recalculate the real delay and deal with over/under */ 12221 if (SEQ_GT(cts, bbr->rc_pacer_started)) 12222 delay_calc = cts - bbr->rc_pacer_started; 12223 else 12224 delay_calc = 0; 12225 if (delay_calc >= bbr->r_ctl.rc_last_delay_val) 12226 /* Setup the delay which will be added in */ 12227 delay_calc -= bbr->r_ctl.rc_last_delay_val; 12228 else { 12229 /* 12230 * We are early setup to adjust 12231 * our slot time. 12232 */ 12233 uint64_t merged_val; 12234 12235 bbr->r_ctl.rc_agg_early += (bbr->r_ctl.rc_last_delay_val - delay_calc); 12236 bbr->r_agg_early_set = 1; 12237 if (bbr->r_ctl.rc_hptsi_agg_delay) { 12238 if (bbr->r_ctl.rc_hptsi_agg_delay >= bbr->r_ctl.rc_agg_early) { 12239 /* Nope our previous late cancels out the early */ 12240 bbr->r_ctl.rc_hptsi_agg_delay -= bbr->r_ctl.rc_agg_early; 12241 bbr->r_agg_early_set = 0; 12242 bbr->r_ctl.rc_agg_early = 0; 12243 } else { 12244 bbr->r_ctl.rc_agg_early -= bbr->r_ctl.rc_hptsi_agg_delay; 12245 bbr->r_ctl.rc_hptsi_agg_delay = 0; 12246 } 12247 } 12248 merged_val = bbr->rc_pacer_started; 12249 merged_val <<= 32; 12250 merged_val |= bbr->r_ctl.rc_last_delay_val; 12251 bbr_log_pacing_delay_calc(bbr, inp->inp_hpts_calls, 12252 bbr->r_ctl.rc_agg_early, cts, delay_calc, merged_val, 12253 bbr->r_agg_early_set, 3); 12254 bbr->r_ctl.rc_last_delay_val = 0; 12255 BBR_STAT_INC(bbr_early); 12256 delay_calc = 0; 12257 } 12258 } else { 12259 /* We were not delayed due to hptsi */ 12260 if (bbr->r_agg_early_set) 12261 bbr->r_ctl.rc_agg_early = 0; 12262 bbr->r_agg_early_set = 0; 12263 delay_calc = 0; 12264 } 12265 if (delay_calc) { 12266 /* 12267 * We had a hptsi delay which means we are falling behind on 12268 * sending at the expected rate. Calculate an extra amount 12269 * of data we can send, if any, to put us back on track. 12270 */ 12271 if ((bbr->r_ctl.rc_hptsi_agg_delay + delay_calc) < bbr->r_ctl.rc_hptsi_agg_delay) 12272 bbr->r_ctl.rc_hptsi_agg_delay = 0xffffffff; 12273 else 12274 bbr->r_ctl.rc_hptsi_agg_delay += delay_calc; 12275 } 12276 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 12277 if ((tp->snd_una == tp->snd_max) && 12278 (bbr->rc_bbr_state != BBR_STATE_IDLE_EXIT) && 12279 (sbavail(sb))) { 12280 /* 12281 * Ok we have been idle with nothing outstanding 12282 * we possibly need to start fresh with either a new 12283 * suite of states or a fast-ramp up. 12284 */ 12285 bbr_restart_after_idle(bbr, 12286 cts, bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time)); 12287 } 12288 /* 12289 * Now was there a hptsi delay where we are behind? We only count 12290 * being behind if: a) We are not in recovery. b) There was a delay. 12291 * <and> c) We had room to send something. 12292 * 12293 */ 12294 hpts_calling = inp->inp_hpts_calls; 12295 inp->inp_hpts_calls = 0; 12296 if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { 12297 if (bbr_process_timers(tp, bbr, cts, hpts_calling)) { 12298 counter_u64_add(bbr_out_size[TCP_MSS_ACCT_ATIMER], 1); 12299 return (0); 12300 } 12301 } 12302 bbr->rc_inp->inp_flags2 &= ~INP_MBUF_QUEUE_READY; 12303 if (hpts_calling && 12304 (bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { 12305 bbr->r_ctl.rc_last_delay_val = 0; 12306 } 12307 bbr->r_timer_override = 0; 12308 bbr->r_wanted_output = 0; 12309 /* 12310 * For TFO connections in SYN_RECEIVED, only allow the initial 12311 * SYN|ACK and those sent by the retransmit timer. 12312 */ 12313 if (IS_FASTOPEN(tp->t_flags) && 12314 ((tp->t_state == TCPS_SYN_RECEIVED) || 12315 (tp->t_state == TCPS_SYN_SENT)) && 12316 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ 12317 (tp->t_rxtshift == 0)) { /* not a retransmit */ 12318 return (0); 12319 } 12320 /* 12321 * Before sending anything check for a state update. For hpts 12322 * calling without input this is important. If its input calling 12323 * then this was already done. 12324 */ 12325 if (bbr->rc_use_google == 0) 12326 bbr_check_bbr_for_state(bbr, cts, __LINE__, 0); 12327 again: 12328 /* 12329 * If we've recently taken a timeout, snd_max will be greater than 12330 * snd_max. BBR in general does not pay much attention to snd_nxt 12331 * for historic reasons the persist timer still uses it. This means 12332 * we have to look at it. All retransmissions that are not persits 12333 * use the rsm that needs to be sent so snd_nxt is ignored. At the 12334 * end of this routine we pull snd_nxt always up to snd_max. 12335 */ 12336 doing_tlp = 0; 12337 #ifdef BBR_INVARIANTS 12338 doing_retran_from = picked_up_retran = 0; 12339 #endif 12340 error = 0; 12341 tso = 0; 12342 slot = 0; 12343 mtu = 0; 12344 sendwin = min(tp->snd_wnd, tp->snd_cwnd); 12345 sb_offset = tp->snd_max - tp->snd_una; 12346 flags = tcp_outflags[tp->t_state]; 12347 sack_rxmit = 0; 12348 len = 0; 12349 rsm = NULL; 12350 if (flags & TH_RST) { 12351 SOCKBUF_LOCK(sb); 12352 goto send; 12353 } 12354 recheck_resend: 12355 while (bbr->r_ctl.rc_free_cnt < bbr_min_req_free) { 12356 /* We need to always have one in reserve */ 12357 rsm = bbr_alloc(bbr); 12358 if (rsm == NULL) { 12359 error = ENOMEM; 12360 /* Lie to get on the hpts */ 12361 tot_len = tp->t_maxseg; 12362 if (hpts_calling) 12363 /* Retry in a ms */ 12364 slot = 1001; 12365 goto just_return_nolock; 12366 } 12367 TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next); 12368 bbr->r_ctl.rc_free_cnt++; 12369 rsm = NULL; 12370 } 12371 /* What do we send, a resend? */ 12372 if (bbr->r_ctl.rc_resend == NULL) { 12373 /* Check for rack timeout */ 12374 bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts); 12375 if (bbr->r_ctl.rc_resend) { 12376 #ifdef BBR_INVARIANTS 12377 picked_up_retran = 1; 12378 #endif 12379 bbr_cong_signal(tp, NULL, CC_NDUPACK, bbr->r_ctl.rc_resend); 12380 } 12381 } 12382 if (bbr->r_ctl.rc_resend) { 12383 rsm = bbr->r_ctl.rc_resend; 12384 #ifdef BBR_INVARIANTS 12385 doing_retran_from = 1; 12386 #endif 12387 /* Remove any TLP flags its a RACK or T-O */ 12388 rsm->r_flags &= ~BBR_TLP; 12389 bbr->r_ctl.rc_resend = NULL; 12390 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 12391 #ifdef BBR_INVARIANTS 12392 panic("Huh, tp:%p bbr:%p rsm:%p start:%u < snd_una:%u\n", 12393 tp, bbr, rsm, rsm->r_start, tp->snd_una); 12394 goto recheck_resend; 12395 #else 12396 /* TSNH */ 12397 rsm = NULL; 12398 goto recheck_resend; 12399 #endif 12400 } 12401 rtr_cnt++; 12402 if (rsm->r_flags & BBR_HAS_SYN) { 12403 /* Only retransmit a SYN by itself */ 12404 len = 0; 12405 if ((flags & TH_SYN) == 0) { 12406 /* Huh something is wrong */ 12407 rsm->r_start++; 12408 if (rsm->r_start == rsm->r_end) { 12409 /* Clean it up, somehow we missed the ack? */ 12410 bbr_log_syn(tp, NULL); 12411 } else { 12412 /* TFO with data? */ 12413 rsm->r_flags &= ~BBR_HAS_SYN; 12414 len = rsm->r_end - rsm->r_start; 12415 } 12416 } else { 12417 /* Retransmitting SYN */ 12418 rsm = NULL; 12419 SOCKBUF_LOCK(sb); 12420 goto send; 12421 } 12422 } else 12423 len = rsm->r_end - rsm->r_start; 12424 if ((bbr->rc_resends_use_tso == 0) && 12425 #ifdef KERN_TLS 12426 ((sb->sb_flags & SB_TLS_IFNET) == 0) && 12427 #endif 12428 (len > maxseg)) { 12429 len = maxseg; 12430 more_to_rxt = 1; 12431 } 12432 sb_offset = rsm->r_start - tp->snd_una; 12433 if (len > 0) { 12434 sack_rxmit = 1; 12435 KMOD_TCPSTAT_INC(tcps_sack_rexmits); 12436 KMOD_TCPSTAT_ADD(tcps_sack_rexmit_bytes, 12437 min(len, maxseg)); 12438 } else { 12439 /* I dont think this can happen */ 12440 rsm = NULL; 12441 goto recheck_resend; 12442 } 12443 BBR_STAT_INC(bbr_resends_set); 12444 } else if (bbr->r_ctl.rc_tlp_send) { 12445 /* 12446 * Tail loss probe 12447 */ 12448 doing_tlp = 1; 12449 rsm = bbr->r_ctl.rc_tlp_send; 12450 bbr->r_ctl.rc_tlp_send = NULL; 12451 sack_rxmit = 1; 12452 len = rsm->r_end - rsm->r_start; 12453 rtr_cnt++; 12454 if ((bbr->rc_resends_use_tso == 0) && (len > maxseg)) 12455 len = maxseg; 12456 12457 if (SEQ_GT(tp->snd_una, rsm->r_start)) { 12458 #ifdef BBR_INVARIANTS 12459 panic("tp:%p bbc:%p snd_una:%u rsm:%p r_start:%u", 12460 tp, bbr, tp->snd_una, rsm, rsm->r_start); 12461 #else 12462 /* TSNH */ 12463 rsm = NULL; 12464 goto recheck_resend; 12465 #endif 12466 } 12467 sb_offset = rsm->r_start - tp->snd_una; 12468 BBR_STAT_INC(bbr_tlp_set); 12469 } 12470 /* 12471 * Enforce a connection sendmap count limit if set 12472 * as long as we are not retransmiting. 12473 */ 12474 if ((rsm == NULL) && 12475 (V_tcp_map_entries_limit > 0) && 12476 (bbr->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { 12477 BBR_STAT_INC(bbr_alloc_limited); 12478 if (!bbr->alloc_limit_reported) { 12479 bbr->alloc_limit_reported = 1; 12480 BBR_STAT_INC(bbr_alloc_limited_conns); 12481 } 12482 goto just_return_nolock; 12483 } 12484 #ifdef BBR_INVARIANTS 12485 if (rsm && SEQ_LT(rsm->r_start, tp->snd_una)) { 12486 panic("tp:%p bbr:%p rsm:%p sb_offset:%u len:%u", 12487 tp, bbr, rsm, sb_offset, len); 12488 } 12489 #endif 12490 /* 12491 * Get standard flags, and add SYN or FIN if requested by 'hidden' 12492 * state flags. 12493 */ 12494 if (tp->t_flags & TF_NEEDFIN && (rsm == NULL)) 12495 flags |= TH_FIN; 12496 if (tp->t_flags & TF_NEEDSYN) 12497 flags |= TH_SYN; 12498 12499 if (rsm && (rsm->r_flags & BBR_HAS_FIN)) { 12500 /* we are retransmitting the fin */ 12501 len--; 12502 if (len) { 12503 /* 12504 * When retransmitting data do *not* include the 12505 * FIN. This could happen from a TLP probe if we 12506 * allowed data with a FIN. 12507 */ 12508 flags &= ~TH_FIN; 12509 } 12510 } else if (rsm) { 12511 if (flags & TH_FIN) 12512 flags &= ~TH_FIN; 12513 } 12514 if ((sack_rxmit == 0) && (prefetch_rsm == 0)) { 12515 void *end_rsm; 12516 12517 end_rsm = TAILQ_LAST_FAST(&bbr->r_ctl.rc_tmap, bbr_sendmap, r_tnext); 12518 if (end_rsm) 12519 kern_prefetch(end_rsm, &prefetch_rsm); 12520 prefetch_rsm = 1; 12521 } 12522 SOCKBUF_LOCK(sb); 12523 /* 12524 * If snd_nxt == snd_max and we have transmitted a FIN, the 12525 * sb_offset will be > 0 even if so_snd.sb_cc is 0, resulting in a 12526 * negative length. This can also occur when TCP opens up its 12527 * congestion window while receiving additional duplicate acks after 12528 * fast-retransmit because TCP will reset snd_nxt to snd_max after 12529 * the fast-retransmit. 12530 * 12531 * In the normal retransmit-FIN-only case, however, snd_nxt will be 12532 * set to snd_una, the sb_offset will be 0, and the length may wind 12533 * up 0. 12534 * 12535 * If sack_rxmit is true we are retransmitting from the scoreboard 12536 * in which case len is already set. 12537 */ 12538 if (sack_rxmit == 0) { 12539 uint32_t avail; 12540 12541 avail = sbavail(sb); 12542 if (SEQ_GT(tp->snd_max, tp->snd_una)) 12543 sb_offset = tp->snd_max - tp->snd_una; 12544 else 12545 sb_offset = 0; 12546 if (bbr->rc_tlp_new_data) { 12547 /* TLP is forcing out new data */ 12548 uint32_t tlplen; 12549 12550 doing_tlp = 1; 12551 tlplen = maxseg; 12552 12553 if (tlplen > (uint32_t)(avail - sb_offset)) { 12554 tlplen = (uint32_t)(avail - sb_offset); 12555 } 12556 if (tlplen > tp->snd_wnd) { 12557 len = tp->snd_wnd; 12558 } else { 12559 len = tlplen; 12560 } 12561 bbr->rc_tlp_new_data = 0; 12562 } else { 12563 what_we_can = len = bbr_what_can_we_send(tp, bbr, sendwin, avail, sb_offset, cts); 12564 if ((len < p_maxseg) && 12565 (bbr->rc_in_persist == 0) && 12566 (ctf_outstanding(tp) >= (2 * p_maxseg)) && 12567 ((avail - sb_offset) >= p_maxseg)) { 12568 /* 12569 * We are not completing whats in the socket 12570 * buffer (i.e. there is at least a segment 12571 * waiting to send) and we have 2 or more 12572 * segments outstanding. There is no sense 12573 * of sending a little piece. Lets defer and 12574 * and wait until we can send a whole 12575 * segment. 12576 */ 12577 len = 0; 12578 } 12579 if (bbr->rc_in_persist) { 12580 /* 12581 * We are in persists, figure out if 12582 * a retransmit is available (maybe the previous 12583 * persists we sent) or if we have to send new 12584 * data. 12585 */ 12586 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 12587 if (rsm) { 12588 len = rsm->r_end - rsm->r_start; 12589 if (rsm->r_flags & BBR_HAS_FIN) 12590 len--; 12591 if ((bbr->rc_resends_use_tso == 0) && (len > maxseg)) 12592 len = maxseg; 12593 if (len > 1) 12594 BBR_STAT_INC(bbr_persist_reneg); 12595 /* 12596 * XXXrrs we could force the len to 12597 * 1 byte here to cause the chunk to 12598 * split apart.. but that would then 12599 * mean we always retransmit it as 12600 * one byte even after the window 12601 * opens. 12602 */ 12603 sack_rxmit = 1; 12604 sb_offset = rsm->r_start - tp->snd_una; 12605 } else { 12606 /* 12607 * First time through in persists or peer 12608 * acked our one byte. Though we do have 12609 * to have something in the sb. 12610 */ 12611 len = 1; 12612 sb_offset = 0; 12613 if (avail == 0) 12614 len = 0; 12615 } 12616 } 12617 } 12618 } 12619 if (prefetch_so_done == 0) { 12620 kern_prefetch(so, &prefetch_so_done); 12621 prefetch_so_done = 1; 12622 } 12623 /* 12624 * Lop off SYN bit if it has already been sent. However, if this is 12625 * SYN-SENT state and if segment contains data and if we don't know 12626 * that foreign host supports TAO, suppress sending segment. 12627 */ 12628 if ((flags & TH_SYN) && (rsm == NULL) && 12629 SEQ_GT(tp->snd_max, tp->snd_una)) { 12630 if (tp->t_state != TCPS_SYN_RECEIVED) 12631 flags &= ~TH_SYN; 12632 /* 12633 * When sending additional segments following a TFO SYN|ACK, 12634 * do not include the SYN bit. 12635 */ 12636 if (IS_FASTOPEN(tp->t_flags) && 12637 (tp->t_state == TCPS_SYN_RECEIVED)) 12638 flags &= ~TH_SYN; 12639 sb_offset--, len++; 12640 if (sbavail(sb) == 0) 12641 len = 0; 12642 } else if ((flags & TH_SYN) && rsm) { 12643 /* 12644 * Subtract one from the len for the SYN being 12645 * retransmitted. 12646 */ 12647 len--; 12648 } 12649 /* 12650 * Be careful not to send data and/or FIN on SYN segments. This 12651 * measure is needed to prevent interoperability problems with not 12652 * fully conformant TCP implementations. 12653 */ 12654 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { 12655 len = 0; 12656 flags &= ~TH_FIN; 12657 } 12658 /* 12659 * On TFO sockets, ensure no data is sent in the following cases: 12660 * 12661 * - When retransmitting SYN|ACK on a passively-created socket 12662 * - When retransmitting SYN on an actively created socket 12663 * - When sending a zero-length cookie (cookie request) on an 12664 * actively created socket 12665 * - When the socket is in the CLOSED state (RST is being sent) 12666 */ 12667 if (IS_FASTOPEN(tp->t_flags) && 12668 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || 12669 ((tp->t_state == TCPS_SYN_SENT) && 12670 (tp->t_tfo_client_cookie_len == 0)) || 12671 (flags & TH_RST))) { 12672 len = 0; 12673 sack_rxmit = 0; 12674 rsm = NULL; 12675 } 12676 /* Without fast-open there should never be data sent on a SYN */ 12677 if ((flags & TH_SYN) && (!IS_FASTOPEN(tp->t_flags))) 12678 len = 0; 12679 if (len <= 0) { 12680 /* 12681 * If FIN has been sent but not acked, but we haven't been 12682 * called to retransmit, len will be < 0. Otherwise, window 12683 * shrank after we sent into it. If window shrank to 0, 12684 * cancel pending retransmit, pull snd_nxt back to (closed) 12685 * window, and set the persist timer if it isn't already 12686 * going. If the window didn't close completely, just wait 12687 * for an ACK. 12688 * 12689 * We also do a general check here to ensure that we will 12690 * set the persist timer when we have data to send, but a 12691 * 0-byte window. This makes sure the persist timer is set 12692 * even if the packet hits one of the "goto send" lines 12693 * below. 12694 */ 12695 len = 0; 12696 if ((tp->snd_wnd == 0) && 12697 (TCPS_HAVEESTABLISHED(tp->t_state)) && 12698 (tp->snd_una == tp->snd_max) && 12699 (sb_offset < (int)sbavail(sb))) { 12700 /* 12701 * Not enough room in the rwnd to send 12702 * a paced segment out. 12703 */ 12704 bbr_enter_persist(tp, bbr, cts, __LINE__); 12705 } 12706 } else if ((rsm == NULL) && 12707 (doing_tlp == 0) && 12708 (len < bbr->r_ctl.rc_pace_max_segs)) { 12709 /* 12710 * We are not sending a full segment for 12711 * some reason. Should we not send anything (think 12712 * sws or persists)? 12713 */ 12714 if ((tp->snd_wnd < min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) && 12715 (TCPS_HAVEESTABLISHED(tp->t_state)) && 12716 (len < (int)(sbavail(sb) - sb_offset))) { 12717 /* 12718 * Here the rwnd is less than 12719 * the pacing size, this is not a retransmit, 12720 * we are established and 12721 * the send is not the last in the socket buffer 12722 * lets not send, and possibly enter persists. 12723 */ 12724 len = 0; 12725 if (tp->snd_max == tp->snd_una) 12726 bbr_enter_persist(tp, bbr, cts, __LINE__); 12727 } else if ((tp->snd_cwnd >= bbr->r_ctl.rc_pace_max_segs) && 12728 (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 12729 bbr->r_ctl.rc_lost_bytes)) > (2 * maxseg)) && 12730 (len < (int)(sbavail(sb) - sb_offset)) && 12731 (len < bbr_minseg(bbr))) { 12732 /* 12733 * Here we are not retransmitting, and 12734 * the cwnd is not so small that we could 12735 * not send at least a min size (rxt timer 12736 * not having gone off), We have 2 segments or 12737 * more already in flight, its not the tail end 12738 * of the socket buffer and the cwnd is blocking 12739 * us from sending out minimum pacing segment size. 12740 * Lets not send anything. 12741 */ 12742 bbr->rc_cwnd_limited = 1; 12743 len = 0; 12744 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < 12745 min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) && 12746 (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 12747 bbr->r_ctl.rc_lost_bytes)) > (2 * maxseg)) && 12748 (len < (int)(sbavail(sb) - sb_offset)) && 12749 (TCPS_HAVEESTABLISHED(tp->t_state))) { 12750 /* 12751 * Here we have a send window but we have 12752 * filled it up and we can't send another pacing segment. 12753 * We also have in flight more than 2 segments 12754 * and we are not completing the sb i.e. we allow 12755 * the last bytes of the sb to go out even if 12756 * its not a full pacing segment. 12757 */ 12758 len = 0; 12759 } 12760 } 12761 /* len will be >= 0 after this point. */ 12762 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 12763 tcp_sndbuf_autoscale(tp, so, sendwin); 12764 /* 12765 * 12766 */ 12767 if (bbr->rc_in_persist && 12768 len && 12769 (rsm == NULL) && 12770 (len < min((bbr->r_ctl.rc_high_rwnd/2), bbr->r_ctl.rc_pace_max_segs))) { 12771 /* 12772 * We are in persist, not doing a retransmit and don't have enough space 12773 * yet to send a full TSO. So is it at the end of the sb 12774 * if so we need to send else nuke to 0 and don't send. 12775 */ 12776 int sbleft; 12777 if (sbavail(sb) > sb_offset) 12778 sbleft = sbavail(sb) - sb_offset; 12779 else 12780 sbleft = 0; 12781 if (sbleft >= min((bbr->r_ctl.rc_high_rwnd/2), bbr->r_ctl.rc_pace_max_segs)) { 12782 /* not at end of sb lets not send */ 12783 len = 0; 12784 } 12785 } 12786 /* 12787 * Decide if we can use TCP Segmentation Offloading (if supported by 12788 * hardware). 12789 * 12790 * TSO may only be used if we are in a pure bulk sending state. The 12791 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP 12792 * options prevent using TSO. With TSO the TCP header is the same 12793 * (except for the sequence number) for all generated packets. This 12794 * makes it impossible to transmit any options which vary per 12795 * generated segment or packet. 12796 * 12797 * IPv4 handling has a clear separation of ip options and ip header 12798 * flags while IPv6 combines both in in6p_outputopts. ip6_optlen() 12799 * does the right thing below to provide length of just ip options 12800 * and thus checking for ipoptlen is enough to decide if ip options 12801 * are present. 12802 */ 12803 #ifdef INET6 12804 if (isipv6) 12805 ipoptlen = ip6_optlen(inp); 12806 else 12807 #endif 12808 if (inp->inp_options) 12809 ipoptlen = inp->inp_options->m_len - 12810 offsetof(struct ipoption, ipopt_list); 12811 else 12812 ipoptlen = 0; 12813 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 12814 /* 12815 * Pre-calculate here as we save another lookup into the darknesses 12816 * of IPsec that way and can actually decide if TSO is ok. 12817 */ 12818 #ifdef INET6 12819 if (isipv6 && IPSEC_ENABLED(ipv6)) 12820 ipsec_optlen = IPSEC_HDRSIZE(ipv6, inp); 12821 #ifdef INET 12822 else 12823 #endif 12824 #endif /* INET6 */ 12825 #ifdef INET 12826 if (IPSEC_ENABLED(ipv4)) 12827 ipsec_optlen = IPSEC_HDRSIZE(ipv4, inp); 12828 #endif /* INET */ 12829 #endif /* IPSEC */ 12830 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 12831 ipoptlen += ipsec_optlen; 12832 #endif 12833 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && 12834 (len > maxseg) && 12835 (tp->t_port == 0) && 12836 ((tp->t_flags & TF_SIGNATURE) == 0) && 12837 tp->rcv_numsacks == 0 && 12838 ipoptlen == 0) 12839 tso = 1; 12840 12841 recwin = min(max(sbspace(&so->so_rcv), 0), 12842 TCP_MAXWIN << tp->rcv_scale); 12843 /* 12844 * Sender silly window avoidance. We transmit under the following 12845 * conditions when len is non-zero: 12846 * 12847 * - We have a full segment (or more with TSO) - This is the last 12848 * buffer in a write()/send() and we are either idle or running 12849 * NODELAY - we've timed out (e.g. persist timer) - we have more 12850 * then 1/2 the maximum send window's worth of data (receiver may be 12851 * limited the window size) - we need to retransmit 12852 */ 12853 if (rsm) 12854 goto send; 12855 if (len) { 12856 if (sack_rxmit) 12857 goto send; 12858 if (len >= p_maxseg) 12859 goto send; 12860 /* 12861 * NOTE! on localhost connections an 'ack' from the remote 12862 * end may occur synchronously with the output and cause us 12863 * to flush a buffer queued with moretocome. XXX 12864 * 12865 */ 12866 if (((tp->t_flags & TF_MORETOCOME) == 0) && /* normal case */ 12867 ((tp->t_flags & TF_NODELAY) || 12868 ((uint32_t)len + (uint32_t)sb_offset) >= sbavail(&so->so_snd)) && 12869 (tp->t_flags & TF_NOPUSH) == 0) { 12870 goto send; 12871 } 12872 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ 12873 goto send; 12874 } 12875 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { 12876 goto send; 12877 } 12878 } 12879 /* 12880 * Sending of standalone window updates. 12881 * 12882 * Window updates are important when we close our window due to a 12883 * full socket buffer and are opening it again after the application 12884 * reads data from it. Once the window has opened again and the 12885 * remote end starts to send again the ACK clock takes over and 12886 * provides the most current window information. 12887 * 12888 * We must avoid the silly window syndrome whereas every read from 12889 * the receive buffer, no matter how small, causes a window update 12890 * to be sent. We also should avoid sending a flurry of window 12891 * updates when the socket buffer had queued a lot of data and the 12892 * application is doing small reads. 12893 * 12894 * Prevent a flurry of pointless window updates by only sending an 12895 * update when we can increase the advertized window by more than 12896 * 1/4th of the socket buffer capacity. When the buffer is getting 12897 * full or is very small be more aggressive and send an update 12898 * whenever we can increase by two mss sized segments. In all other 12899 * situations the ACK's to new incoming data will carry further 12900 * window increases. 12901 * 12902 * Don't send an independent window update if a delayed ACK is 12903 * pending (it will get piggy-backed on it) or the remote side 12904 * already has done a half-close and won't send more data. Skip 12905 * this if the connection is in T/TCP half-open state. 12906 */ 12907 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && 12908 !(tp->t_flags & TF_DELACK) && 12909 !TCPS_HAVERCVDFIN(tp->t_state)) { 12910 /* Check to see if we should do a window update */ 12911 if (bbr_window_update_needed(tp, so, recwin, maxseg)) 12912 goto send; 12913 } 12914 /* 12915 * Send if we owe the peer an ACK, RST, SYN. ACKNOW 12916 * is also a catch-all for the retransmit timer timeout case. 12917 */ 12918 if (tp->t_flags & TF_ACKNOW) { 12919 goto send; 12920 } 12921 if (flags & TH_RST) { 12922 /* Always send a RST if one is due */ 12923 goto send; 12924 } 12925 if ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0) { 12926 goto send; 12927 } 12928 /* 12929 * If our state indicates that FIN should be sent and we have not 12930 * yet done so, then we need to send. 12931 */ 12932 if (flags & TH_FIN && 12933 ((tp->t_flags & TF_SENTFIN) == 0)) { 12934 goto send; 12935 } 12936 /* 12937 * No reason to send a segment, just return. 12938 */ 12939 just_return: 12940 SOCKBUF_UNLOCK(sb); 12941 just_return_nolock: 12942 if (tot_len) 12943 slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0); 12944 if (bbr->rc_no_pacing) 12945 slot = 0; 12946 if (tot_len == 0) { 12947 if ((ctf_outstanding(tp) + min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) >= 12948 tp->snd_wnd) { 12949 BBR_STAT_INC(bbr_rwnd_limited); 12950 app_limited = BBR_JR_RWND_LIMITED; 12951 bbr_cwnd_limiting(tp, bbr, ctf_outstanding(tp)); 12952 if ((bbr->rc_in_persist == 0) && 12953 TCPS_HAVEESTABLISHED(tp->t_state) && 12954 (tp->snd_max == tp->snd_una) && 12955 sbavail(&tp->t_inpcb->inp_socket->so_snd)) { 12956 /* No send window.. we must enter persist */ 12957 bbr_enter_persist(tp, bbr, bbr->r_ctl.rc_rcvtime, __LINE__); 12958 } 12959 } else if (ctf_outstanding(tp) >= sbavail(sb)) { 12960 BBR_STAT_INC(bbr_app_limited); 12961 app_limited = BBR_JR_APP_LIMITED; 12962 bbr_cwnd_limiting(tp, bbr, ctf_outstanding(tp)); 12963 } else if ((ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 12964 bbr->r_ctl.rc_lost_bytes)) + p_maxseg) >= tp->snd_cwnd) { 12965 BBR_STAT_INC(bbr_cwnd_limited); 12966 app_limited = BBR_JR_CWND_LIMITED; 12967 bbr_cwnd_limiting(tp, bbr, ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 12968 bbr->r_ctl.rc_lost_bytes))); 12969 bbr->rc_cwnd_limited = 1; 12970 } else { 12971 BBR_STAT_INC(bbr_app_limited); 12972 app_limited = BBR_JR_APP_LIMITED; 12973 bbr_cwnd_limiting(tp, bbr, ctf_outstanding(tp)); 12974 } 12975 bbr->r_ctl.rc_hptsi_agg_delay = 0; 12976 bbr->r_agg_early_set = 0; 12977 bbr->r_ctl.rc_agg_early = 0; 12978 bbr->r_ctl.rc_last_delay_val = 0; 12979 } else if (bbr->rc_use_google == 0) 12980 bbr_check_bbr_for_state(bbr, cts, __LINE__, 0); 12981 /* Are we app limited? */ 12982 if ((app_limited == BBR_JR_APP_LIMITED) || 12983 (app_limited == BBR_JR_RWND_LIMITED)) { 12984 /** 12985 * We are application limited. 12986 */ 12987 bbr->r_ctl.r_app_limited_until = (ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 12988 bbr->r_ctl.rc_lost_bytes)) + bbr->r_ctl.rc_delivered); 12989 } 12990 if (tot_len == 0) 12991 counter_u64_add(bbr_out_size[TCP_MSS_ACCT_JUSTRET], 1); 12992 /* Dont update the time if we did not send */ 12993 bbr->r_ctl.rc_last_delay_val = 0; 12994 bbr->rc_output_starts_timer = 1; 12995 bbr_start_hpts_timer(bbr, tp, cts, 9, slot, tot_len); 12996 bbr_log_type_just_return(bbr, cts, tot_len, hpts_calling, app_limited, p_maxseg, len); 12997 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 12998 /* Make sure snd_nxt is drug up */ 12999 tp->snd_nxt = tp->snd_max; 13000 } 13001 return (error); 13002 13003 send: 13004 if (doing_tlp == 0) { 13005 /* 13006 * Data not a TLP, and its not the rxt firing. If it is the 13007 * rxt firing, we want to leave the tlp_in_progress flag on 13008 * so we don't send another TLP. It has to be a rack timer 13009 * or normal send (response to acked data) to clear the tlp 13010 * in progress flag. 13011 */ 13012 bbr->rc_tlp_in_progress = 0; 13013 bbr->rc_tlp_rtx_out = 0; 13014 } else { 13015 /* 13016 * Its a TLP. 13017 */ 13018 bbr->rc_tlp_in_progress = 1; 13019 } 13020 bbr_timer_cancel(bbr, __LINE__, cts); 13021 if (rsm == NULL) { 13022 if (sbused(sb) > 0) { 13023 /* 13024 * This is sub-optimal. We only send a stand alone 13025 * FIN on its own segment. 13026 */ 13027 if (flags & TH_FIN) { 13028 flags &= ~TH_FIN; 13029 if ((len == 0) && ((tp->t_flags & TF_ACKNOW) == 0)) { 13030 /* Lets not send this */ 13031 slot = 0; 13032 goto just_return; 13033 } 13034 } 13035 } 13036 } else { 13037 /* 13038 * We do *not* send a FIN on a retransmit if it has data. 13039 * The if clause here where len > 1 should never come true. 13040 */ 13041 if ((len > 0) && 13042 (((rsm->r_flags & BBR_HAS_FIN) == 0) && 13043 (flags & TH_FIN))) { 13044 flags &= ~TH_FIN; 13045 len--; 13046 } 13047 } 13048 SOCKBUF_LOCK_ASSERT(sb); 13049 if (len > 0) { 13050 if ((tp->snd_una == tp->snd_max) && 13051 (bbr_calc_time(cts, bbr->r_ctl.rc_went_idle_time) >= bbr_rtt_probe_time)) { 13052 /* 13053 * This qualifies as a RTT_PROBE session since we 13054 * drop the data outstanding to nothing and waited 13055 * more than bbr_rtt_probe_time. 13056 */ 13057 bbr_log_rtt_shrinks(bbr, cts, 0, 0, __LINE__, BBR_RTTS_WASIDLE, 0); 13058 bbr_set_reduced_rtt(bbr, cts, __LINE__); 13059 } 13060 if (len >= maxseg) 13061 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; 13062 else 13063 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; 13064 } 13065 /* 13066 * Before ESTABLISHED, force sending of initial options unless TCP 13067 * set not to do any options. NOTE: we assume that the IP/TCP header 13068 * plus TCP options always fit in a single mbuf, leaving room for a 13069 * maximum link header, i.e. max_linkhdr + sizeof (struct tcpiphdr) 13070 * + optlen <= MCLBYTES 13071 */ 13072 optlen = 0; 13073 #ifdef INET6 13074 if (isipv6) 13075 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 13076 else 13077 #endif 13078 hdrlen = sizeof(struct tcpiphdr); 13079 13080 /* 13081 * Compute options for segment. We only have to care about SYN and 13082 * established connection segments. Options for SYN-ACK segments 13083 * are handled in TCP syncache. 13084 */ 13085 to.to_flags = 0; 13086 local_options = 0; 13087 if ((tp->t_flags & TF_NOOPT) == 0) { 13088 /* Maximum segment size. */ 13089 if (flags & TH_SYN) { 13090 to.to_mss = tcp_mssopt(&inp->inp_inc); 13091 #ifdef NETFLIX_TCPOUDP 13092 if (tp->t_port) 13093 to.to_mss -= V_tcp_udp_tunneling_overhead; 13094 #endif 13095 to.to_flags |= TOF_MSS; 13096 /* 13097 * On SYN or SYN|ACK transmits on TFO connections, 13098 * only include the TFO option if it is not a 13099 * retransmit, as the presence of the TFO option may 13100 * have caused the original SYN or SYN|ACK to have 13101 * been dropped by a middlebox. 13102 */ 13103 if (IS_FASTOPEN(tp->t_flags) && 13104 (tp->t_rxtshift == 0)) { 13105 if (tp->t_state == TCPS_SYN_RECEIVED) { 13106 to.to_tfo_len = TCP_FASTOPEN_COOKIE_LEN; 13107 to.to_tfo_cookie = 13108 (u_int8_t *)&tp->t_tfo_cookie.server; 13109 to.to_flags |= TOF_FASTOPEN; 13110 wanted_cookie = 1; 13111 } else if (tp->t_state == TCPS_SYN_SENT) { 13112 to.to_tfo_len = 13113 tp->t_tfo_client_cookie_len; 13114 to.to_tfo_cookie = 13115 tp->t_tfo_cookie.client; 13116 to.to_flags |= TOF_FASTOPEN; 13117 wanted_cookie = 1; 13118 } 13119 } 13120 } 13121 /* Window scaling. */ 13122 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { 13123 to.to_wscale = tp->request_r_scale; 13124 to.to_flags |= TOF_SCALE; 13125 } 13126 /* Timestamps. */ 13127 if ((tp->t_flags & TF_RCVD_TSTMP) || 13128 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { 13129 to.to_tsval = tcp_tv_to_mssectick(&bbr->rc_tv) + tp->ts_offset; 13130 to.to_tsecr = tp->ts_recent; 13131 to.to_flags |= TOF_TS; 13132 local_options += TCPOLEN_TIMESTAMP + 2; 13133 } 13134 /* Set receive buffer autosizing timestamp. */ 13135 if (tp->rfbuf_ts == 0 && 13136 (so->so_rcv.sb_flags & SB_AUTOSIZE)) 13137 tp->rfbuf_ts = tcp_tv_to_mssectick(&bbr->rc_tv); 13138 /* Selective ACK's. */ 13139 if (flags & TH_SYN) 13140 to.to_flags |= TOF_SACKPERM; 13141 else if (TCPS_HAVEESTABLISHED(tp->t_state) && 13142 tp->rcv_numsacks > 0) { 13143 to.to_flags |= TOF_SACK; 13144 to.to_nsacks = tp->rcv_numsacks; 13145 to.to_sacks = (u_char *)tp->sackblks; 13146 } 13147 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 13148 /* TCP-MD5 (RFC2385). */ 13149 if (tp->t_flags & TF_SIGNATURE) 13150 to.to_flags |= TOF_SIGNATURE; 13151 #endif /* TCP_SIGNATURE */ 13152 13153 /* Processing the options. */ 13154 hdrlen += (optlen = tcp_addoptions(&to, opt)); 13155 /* 13156 * If we wanted a TFO option to be added, but it was unable 13157 * to fit, ensure no data is sent. 13158 */ 13159 if (IS_FASTOPEN(tp->t_flags) && wanted_cookie && 13160 !(to.to_flags & TOF_FASTOPEN)) 13161 len = 0; 13162 } 13163 #ifdef NETFLIX_TCPOUDP 13164 if (tp->t_port) { 13165 if (V_tcp_udp_tunneling_port == 0) { 13166 /* The port was removed?? */ 13167 SOCKBUF_UNLOCK(&so->so_snd); 13168 return (EHOSTUNREACH); 13169 } 13170 hdrlen += sizeof(struct udphdr); 13171 } 13172 #endif 13173 #ifdef INET6 13174 if (isipv6) 13175 ipoptlen = ip6_optlen(tp->t_inpcb); 13176 else 13177 #endif 13178 if (tp->t_inpcb->inp_options) 13179 ipoptlen = tp->t_inpcb->inp_options->m_len - 13180 offsetof(struct ipoption, ipopt_list); 13181 else 13182 ipoptlen = 0; 13183 ipoptlen = 0; 13184 #if defined(IPSEC) || defined(IPSEC_SUPPORT) 13185 ipoptlen += ipsec_optlen; 13186 #endif 13187 if (bbr->rc_last_options != local_options) { 13188 /* 13189 * Cache the options length this generally does not change 13190 * on a connection. We use this to calculate TSO. 13191 */ 13192 bbr->rc_last_options = local_options; 13193 } 13194 maxseg = tp->t_maxseg - (ipoptlen + optlen); 13195 p_maxseg = min(maxseg, pace_max_segs); 13196 /* 13197 * Adjust data length if insertion of options will bump the packet 13198 * length beyond the t_maxseg length. Clear the FIN bit because we 13199 * cut off the tail of the segment. 13200 */ 13201 #ifdef KERN_TLS 13202 /* force TSO for so TLS offload can get mss */ 13203 if (sb->sb_flags & SB_TLS_IFNET) { 13204 force_tso = 1; 13205 } 13206 #endif 13207 13208 if (len > maxseg) { 13209 if (len != 0 && (flags & TH_FIN)) { 13210 flags &= ~TH_FIN; 13211 } 13212 if (tso) { 13213 uint32_t moff; 13214 int32_t max_len; 13215 13216 /* extract TSO information */ 13217 if_hw_tsomax = tp->t_tsomax; 13218 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; 13219 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; 13220 KASSERT(ipoptlen == 0, 13221 ("%s: TSO can't do IP options", __func__)); 13222 13223 /* 13224 * Check if we should limit by maximum payload 13225 * length: 13226 */ 13227 if (if_hw_tsomax != 0) { 13228 /* compute maximum TSO length */ 13229 max_len = (if_hw_tsomax - hdrlen - 13230 max_linkhdr); 13231 if (max_len <= 0) { 13232 len = 0; 13233 } else if (len > max_len) { 13234 len = max_len; 13235 } 13236 } 13237 /* 13238 * Prevent the last segment from being fractional 13239 * unless the send sockbuf can be emptied: 13240 */ 13241 if (((sb_offset + len) < sbavail(sb)) && 13242 (hw_tls == 0)) { 13243 moff = len % (uint32_t)maxseg; 13244 if (moff != 0) { 13245 len -= moff; 13246 } 13247 } 13248 /* 13249 * In case there are too many small fragments don't 13250 * use TSO: 13251 */ 13252 if (len <= maxseg) { 13253 len = maxseg; 13254 tso = 0; 13255 } 13256 } else { 13257 /* Not doing TSO */ 13258 if (optlen + ipoptlen >= tp->t_maxseg) { 13259 /* 13260 * Since we don't have enough space to put 13261 * the IP header chain and the TCP header in 13262 * one packet as required by RFC 7112, don't 13263 * send it. Also ensure that at least one 13264 * byte of the payload can be put into the 13265 * TCP segment. 13266 */ 13267 SOCKBUF_UNLOCK(&so->so_snd); 13268 error = EMSGSIZE; 13269 sack_rxmit = 0; 13270 goto out; 13271 } 13272 len = maxseg; 13273 } 13274 } else { 13275 /* Not doing TSO */ 13276 if_hw_tsomaxsegcount = 0; 13277 tso = 0; 13278 } 13279 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET, 13280 ("%s: len > IP_MAXPACKET", __func__)); 13281 #ifdef DIAGNOSTIC 13282 #ifdef INET6 13283 if (max_linkhdr + hdrlen > MCLBYTES) 13284 #else 13285 if (max_linkhdr + hdrlen > MHLEN) 13286 #endif 13287 panic("tcphdr too big"); 13288 #endif 13289 /* 13290 * This KASSERT is here to catch edge cases at a well defined place. 13291 * Before, those had triggered (random) panic conditions further 13292 * down. 13293 */ 13294 #ifdef BBR_INVARIANTS 13295 if (sack_rxmit) { 13296 if (SEQ_LT(rsm->r_start, tp->snd_una)) { 13297 panic("RSM:%p TP:%p bbr:%p start:%u is < snd_una:%u", 13298 rsm, tp, bbr, rsm->r_start, tp->snd_una); 13299 } 13300 } 13301 #endif 13302 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__)); 13303 if ((len == 0) && 13304 (flags & TH_FIN) && 13305 (sbused(sb))) { 13306 /* 13307 * We have outstanding data, don't send a fin by itself!. 13308 */ 13309 slot = 0; 13310 goto just_return; 13311 } 13312 /* 13313 * Grab a header mbuf, attaching a copy of data to be transmitted, 13314 * and initialize the header from the template for sends on this 13315 * connection. 13316 */ 13317 if (len) { 13318 uint32_t moff; 13319 uint32_t orig_len; 13320 13321 /* 13322 * We place a limit on sending with hptsi. 13323 */ 13324 if ((rsm == NULL) && len > pace_max_segs) 13325 len = pace_max_segs; 13326 if (len <= maxseg) 13327 tso = 0; 13328 #ifdef INET6 13329 if (MHLEN < hdrlen + max_linkhdr) 13330 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 13331 else 13332 #endif 13333 m = m_gethdr(M_NOWAIT, MT_DATA); 13334 13335 if (m == NULL) { 13336 BBR_STAT_INC(bbr_failed_mbuf_aloc); 13337 bbr_log_enobuf_jmp(bbr, len, cts, __LINE__, len, 0, 0); 13338 SOCKBUF_UNLOCK(sb); 13339 error = ENOBUFS; 13340 sack_rxmit = 0; 13341 goto out; 13342 } 13343 m->m_data += max_linkhdr; 13344 m->m_len = hdrlen; 13345 /* 13346 * Start the m_copy functions from the closest mbuf to the 13347 * sb_offset in the socket buffer chain. 13348 */ 13349 if ((sb_offset > sbavail(sb)) || ((len + sb_offset) > sbavail(sb))) { 13350 #ifdef BBR_INVARIANTS 13351 if ((len + sb_offset) > (sbavail(sb) + ((flags & (TH_FIN | TH_SYN)) ? 1 : 0))) 13352 panic("tp:%p bbr:%p len:%u sb_offset:%u sbavail:%u rsm:%p %u:%u:%u", 13353 tp, bbr, len, sb_offset, sbavail(sb), rsm, 13354 doing_retran_from, 13355 picked_up_retran, 13356 doing_tlp); 13357 13358 #endif 13359 /* 13360 * In this messed up situation we have two choices, 13361 * a) pretend the send worked, and just start timers 13362 * and what not (not good since that may lead us 13363 * back here a lot). <or> b) Send the lowest segment 13364 * in the map. <or> c) Drop the connection. Lets do 13365 * <b> which if it continues to happen will lead to 13366 * <c> via timeouts. 13367 */ 13368 BBR_STAT_INC(bbr_offset_recovery); 13369 rsm = TAILQ_FIRST(&bbr->r_ctl.rc_map); 13370 sb_offset = 0; 13371 if (rsm == NULL) { 13372 sack_rxmit = 0; 13373 len = sbavail(sb); 13374 } else { 13375 sack_rxmit = 1; 13376 if (rsm->r_start != tp->snd_una) { 13377 /* 13378 * Things are really messed up, <c> 13379 * is the only thing to do. 13380 */ 13381 BBR_STAT_INC(bbr_offset_drop); 13382 tcp_set_inp_to_drop(inp, EFAULT); 13383 return (0); 13384 } 13385 len = rsm->r_end - rsm->r_start; 13386 } 13387 if (len > sbavail(sb)) 13388 len = sbavail(sb); 13389 if (len > maxseg) 13390 len = maxseg; 13391 } 13392 mb = sbsndptr_noadv(sb, sb_offset, &moff); 13393 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { 13394 m_copydata(mb, moff, (int)len, 13395 mtod(m, caddr_t)+hdrlen); 13396 if (rsm == NULL) 13397 sbsndptr_adv(sb, mb, len); 13398 m->m_len += len; 13399 } else { 13400 struct sockbuf *msb; 13401 13402 if (rsm) 13403 msb = NULL; 13404 else 13405 msb = sb; 13406 #ifdef BBR_INVARIANTS 13407 if ((len + moff) > (sbavail(sb) + ((flags & (TH_FIN | TH_SYN)) ? 1 : 0))) { 13408 if (rsm) { 13409 panic("tp:%p bbr:%p len:%u moff:%u sbavail:%u rsm:%p snd_una:%u rsm_start:%u flg:%x %u:%u:%u sr:%d ", 13410 tp, bbr, len, moff, 13411 sbavail(sb), rsm, 13412 tp->snd_una, rsm->r_flags, rsm->r_start, 13413 doing_retran_from, 13414 picked_up_retran, 13415 doing_tlp, sack_rxmit); 13416 } else { 13417 panic("tp:%p bbr:%p len:%u moff:%u sbavail:%u sb_offset:%u snd_una:%u", 13418 tp, bbr, len, moff, sbavail(sb), sb_offset, tp->snd_una); 13419 } 13420 } 13421 #endif 13422 orig_len = len; 13423 m->m_next = tcp_m_copym( 13424 mb, moff, &len, 13425 if_hw_tsomaxsegcount, 13426 if_hw_tsomaxsegsize, msb, 13427 ((rsm == NULL) ? hw_tls : 0) 13428 #ifdef NETFLIX_COPY_ARGS 13429 , &filled_all 13430 #endif 13431 ); 13432 if (len <= maxseg && !force_tso) { 13433 /* 13434 * Must have ran out of mbufs for the copy 13435 * shorten it to no longer need tso. Lets 13436 * not put on sendalot since we are low on 13437 * mbufs. 13438 */ 13439 tso = 0; 13440 } 13441 if (m->m_next == NULL) { 13442 SOCKBUF_UNLOCK(sb); 13443 (void)m_free(m); 13444 error = ENOBUFS; 13445 sack_rxmit = 0; 13446 goto out; 13447 } 13448 } 13449 #ifdef BBR_INVARIANTS 13450 if (tso && len < maxseg) { 13451 panic("tp:%p tso on, but len:%d < maxseg:%d", 13452 tp, len, maxseg); 13453 } 13454 if (tso && if_hw_tsomaxsegcount) { 13455 int32_t seg_cnt = 0; 13456 struct mbuf *foo; 13457 13458 foo = m; 13459 while (foo) { 13460 seg_cnt++; 13461 foo = foo->m_next; 13462 } 13463 if (seg_cnt > if_hw_tsomaxsegcount) { 13464 panic("seg_cnt:%d > max:%d", seg_cnt, if_hw_tsomaxsegcount); 13465 } 13466 } 13467 #endif 13468 /* 13469 * If we're sending everything we've got, set PUSH. (This 13470 * will keep happy those implementations which only give 13471 * data to the user when a buffer fills or a PUSH comes in.) 13472 */ 13473 if (sb_offset + len == sbused(sb) && 13474 sbused(sb) && 13475 !(flags & TH_SYN)) { 13476 flags |= TH_PUSH; 13477 } 13478 SOCKBUF_UNLOCK(sb); 13479 } else { 13480 SOCKBUF_UNLOCK(sb); 13481 if (tp->t_flags & TF_ACKNOW) 13482 KMOD_TCPSTAT_INC(tcps_sndacks); 13483 else if (flags & (TH_SYN | TH_FIN | TH_RST)) 13484 KMOD_TCPSTAT_INC(tcps_sndctrl); 13485 else 13486 KMOD_TCPSTAT_INC(tcps_sndwinup); 13487 13488 m = m_gethdr(M_NOWAIT, MT_DATA); 13489 if (m == NULL) { 13490 BBR_STAT_INC(bbr_failed_mbuf_aloc); 13491 bbr_log_enobuf_jmp(bbr, len, cts, __LINE__, len, 0, 0); 13492 error = ENOBUFS; 13493 /* Fudge the send time since we could not send */ 13494 sack_rxmit = 0; 13495 goto out; 13496 } 13497 #ifdef INET6 13498 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) && 13499 MHLEN >= hdrlen) { 13500 M_ALIGN(m, hdrlen); 13501 } else 13502 #endif 13503 m->m_data += max_linkhdr; 13504 m->m_len = hdrlen; 13505 } 13506 SOCKBUF_UNLOCK_ASSERT(sb); 13507 m->m_pkthdr.rcvif = (struct ifnet *)0; 13508 #ifdef MAC 13509 mac_inpcb_create_mbuf(inp, m); 13510 #endif 13511 #ifdef INET6 13512 if (isipv6) { 13513 ip6 = mtod(m, struct ip6_hdr *); 13514 #ifdef NETFLIX_TCPOUDP 13515 if (tp->t_port) { 13516 udp = (struct udphdr *)((caddr_t)ip6 + ipoptlen + sizeof(struct ip6_hdr)); 13517 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 13518 udp->uh_dport = tp->t_port; 13519 ulen = hdrlen + len - sizeof(struct ip6_hdr); 13520 udp->uh_ulen = htons(ulen); 13521 th = (struct tcphdr *)(udp + 1); 13522 } else { 13523 #endif 13524 th = (struct tcphdr *)(ip6 + 1); 13525 13526 #ifdef NETFLIX_TCPOUDP 13527 } 13528 #endif 13529 tcpip_fillheaders(inp, 13530 #ifdef NETFLIX_TCPOUDP 13531 tp->t_port, 13532 #endif 13533 ip6, th); 13534 } else 13535 #endif /* INET6 */ 13536 { 13537 ip = mtod(m, struct ip *); 13538 #ifdef TCPDEBUG 13539 ipov = (struct ipovly *)ip; 13540 #endif 13541 #ifdef NETFLIX_TCPOUDP 13542 if (tp->t_port) { 13543 udp = (struct udphdr *)((caddr_t)ip + ipoptlen + sizeof(struct ip)); 13544 udp->uh_sport = htons(V_tcp_udp_tunneling_port); 13545 udp->uh_dport = tp->t_port; 13546 ulen = hdrlen + len - sizeof(struct ip); 13547 udp->uh_ulen = htons(ulen); 13548 th = (struct tcphdr *)(udp + 1); 13549 } else 13550 #endif 13551 th = (struct tcphdr *)(ip + 1); 13552 tcpip_fillheaders(inp, 13553 #ifdef NETFLIX_TCPOUDP 13554 tp->t_port, 13555 #endif 13556 ip, th); 13557 } 13558 /* 13559 * If we are doing retransmissions, then snd_nxt will not reflect 13560 * the first unsent octet. For ACK only packets, we do not want the 13561 * sequence number of the retransmitted packet, we want the sequence 13562 * number of the next unsent octet. So, if there is no data (and no 13563 * SYN or FIN), use snd_max instead of snd_nxt when filling in 13564 * ti_seq. But if we are in persist state, snd_max might reflect 13565 * one byte beyond the right edge of the window, so use snd_nxt in 13566 * that case, since we know we aren't doing a retransmission. 13567 * (retransmit and persist are mutually exclusive...) 13568 */ 13569 if (sack_rxmit == 0) { 13570 if (len && ((flags & (TH_FIN | TH_SYN | TH_RST)) == 0)) { 13571 /* New data (including new persists) */ 13572 th->th_seq = htonl(tp->snd_max); 13573 bbr_seq = tp->snd_max; 13574 } else if (flags & TH_SYN) { 13575 /* Syn's always send from iss */ 13576 th->th_seq = htonl(tp->iss); 13577 bbr_seq = tp->iss; 13578 } else if (flags & TH_FIN) { 13579 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN) { 13580 /* 13581 * If we sent the fin already its 1 minus 13582 * snd_max 13583 */ 13584 th->th_seq = (htonl(tp->snd_max - 1)); 13585 bbr_seq = (tp->snd_max - 1); 13586 } else { 13587 /* First time FIN use snd_max */ 13588 th->th_seq = htonl(tp->snd_max); 13589 bbr_seq = tp->snd_max; 13590 } 13591 } else if (flags & TH_RST) { 13592 /* 13593 * For a Reset send the last cum ack in sequence 13594 * (this like any other choice may still generate a 13595 * challenge ack, if a ack-update packet is in 13596 * flight). 13597 */ 13598 th->th_seq = htonl(tp->snd_una); 13599 bbr_seq = tp->snd_una; 13600 } else { 13601 /* 13602 * len == 0 and not persist we use snd_max, sending 13603 * an ack unless we have sent the fin then its 1 13604 * minus. 13605 */ 13606 /* 13607 * XXXRRS Question if we are in persists and we have 13608 * nothing outstanding to send and we have not sent 13609 * a FIN, we will send an ACK. In such a case it 13610 * might be better to send (tp->snd_una - 1) which 13611 * would force the peer to ack. 13612 */ 13613 if (tp->t_flags & TF_SENTFIN) { 13614 th->th_seq = htonl(tp->snd_max - 1); 13615 bbr_seq = (tp->snd_max - 1); 13616 } else { 13617 th->th_seq = htonl(tp->snd_max); 13618 bbr_seq = tp->snd_max; 13619 } 13620 } 13621 } else { 13622 /* All retransmits use the rsm to guide the send */ 13623 th->th_seq = htonl(rsm->r_start); 13624 bbr_seq = rsm->r_start; 13625 } 13626 th->th_ack = htonl(tp->rcv_nxt); 13627 if (optlen) { 13628 bcopy(opt, th + 1, optlen); 13629 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 13630 } 13631 th->th_flags = flags; 13632 /* 13633 * Calculate receive window. Don't shrink window, but avoid silly 13634 * window syndrome. 13635 */ 13636 if ((flags & TH_RST) || ((recwin < (so->so_rcv.sb_hiwat / 4) && 13637 recwin < maxseg))) 13638 recwin = 0; 13639 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && 13640 recwin < (tp->rcv_adv - tp->rcv_nxt)) 13641 recwin = (tp->rcv_adv - tp->rcv_nxt); 13642 if (recwin > TCP_MAXWIN << tp->rcv_scale) 13643 recwin = TCP_MAXWIN << tp->rcv_scale; 13644 13645 /* 13646 * According to RFC1323 the window field in a SYN (i.e., a <SYN> or 13647 * <SYN,ACK>) segment itself is never scaled. The <SYN,ACK> case is 13648 * handled in syncache. 13649 */ 13650 if (flags & TH_SYN) 13651 th->th_win = htons((u_short) 13652 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); 13653 else { 13654 /* Avoid shrinking window with window scaling. */ 13655 recwin = roundup2(recwin, 1 << tp->rcv_scale); 13656 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); 13657 } 13658 /* 13659 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 13660 * window. This may cause the remote transmitter to stall. This 13661 * flag tells soreceive() to disable delayed acknowledgements when 13662 * draining the buffer. This can occur if the receiver is 13663 * attempting to read more data than can be buffered prior to 13664 * transmitting on the connection. 13665 */ 13666 if (th->th_win == 0) { 13667 tp->t_sndzerowin++; 13668 tp->t_flags |= TF_RXWIN0SENT; 13669 } else 13670 tp->t_flags &= ~TF_RXWIN0SENT; 13671 /* 13672 * We don't support urgent data, but drag along 13673 * the pointer in case of a stack switch. 13674 */ 13675 tp->snd_up = tp->snd_una; 13676 13677 #if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 13678 if (to.to_flags & TOF_SIGNATURE) { 13679 /* 13680 * Calculate MD5 signature and put it into the place 13681 * determined before. NOTE: since TCP options buffer doesn't 13682 * point into mbuf's data, calculate offset and use it. 13683 */ 13684 if (!TCPMD5_ENABLED() || TCPMD5_OUTPUT(m, th, 13685 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { 13686 /* 13687 * Do not send segment if the calculation of MD5 13688 * digest has failed. 13689 */ 13690 goto out; 13691 } 13692 } 13693 #endif 13694 13695 /* 13696 * Put TCP length in extended header, and then checksum extended 13697 * header and data. 13698 */ 13699 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ 13700 #ifdef INET6 13701 if (isipv6) { 13702 /* 13703 * ip6_plen is not need to be filled now, and will be filled 13704 * in ip6_output. 13705 */ 13706 #ifdef NETFLIX_TCPOUDP 13707 if (tp->t_port) { 13708 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; 13709 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 13710 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); 13711 th->th_sum = htons(0); 13712 UDPSTAT_INC(udps_opackets); 13713 } else { 13714 #endif 13715 csum_flags = m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 13716 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 13717 th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) + 13718 optlen + len, IPPROTO_TCP, 0); 13719 #ifdef NETFLIX_TCPOUDP 13720 } 13721 #endif 13722 } 13723 #endif 13724 #if defined(INET6) && defined(INET) 13725 else 13726 #endif 13727 #ifdef INET 13728 { 13729 #ifdef NETFLIX_TCPOUDP 13730 if (tp->t_port) { 13731 m->m_pkthdr.csum_flags = CSUM_UDP; 13732 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); 13733 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, 13734 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); 13735 th->th_sum = htons(0); 13736 UDPSTAT_INC(udps_opackets); 13737 } else { 13738 #endif 13739 csum_flags = m->m_pkthdr.csum_flags = CSUM_TCP; 13740 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 13741 th->th_sum = in_pseudo(ip->ip_src.s_addr, 13742 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + 13743 IPPROTO_TCP + len + optlen)); 13744 #ifdef NETFLIX_TCPOUDP 13745 } 13746 #endif 13747 /* IP version must be set here for ipv4/ipv6 checking later */ 13748 KASSERT(ip->ip_v == IPVERSION, 13749 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); 13750 } 13751 #endif 13752 13753 /* 13754 * Enable TSO and specify the size of the segments. The TCP pseudo 13755 * header checksum is always provided. XXX: Fixme: This is currently 13756 * not the case for IPv6. 13757 */ 13758 if (tso || force_tso) { 13759 KASSERT(force_tso || len > maxseg, 13760 ("%s: len:%d <= tso_segsz:%d", __func__, len, maxseg)); 13761 m->m_pkthdr.csum_flags |= CSUM_TSO; 13762 csum_flags |= CSUM_TSO; 13763 m->m_pkthdr.tso_segsz = maxseg; 13764 } 13765 KASSERT(len + hdrlen == m_length(m, NULL), 13766 ("%s: mbuf chain different than expected: %d + %u != %u", 13767 __func__, len, hdrlen, m_length(m, NULL))); 13768 13769 #ifdef TCP_HHOOK 13770 /* Run HHOOK_TC_ESTABLISHED_OUT helper hooks. */ 13771 hhook_run_tcp_est_out(tp, th, &to, len, tso); 13772 #endif 13773 #ifdef TCPDEBUG 13774 /* 13775 * Trace. 13776 */ 13777 if (so->so_options & SO_DEBUG) { 13778 u_short save = 0; 13779 13780 #ifdef INET6 13781 if (!isipv6) 13782 #endif 13783 { 13784 save = ipov->ih_len; 13785 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + 13786 * (th->th_off << 2) */ ); 13787 } 13788 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); 13789 #ifdef INET6 13790 if (!isipv6) 13791 #endif 13792 ipov->ih_len = save; 13793 } 13794 #endif /* TCPDEBUG */ 13795 13796 /* Log to the black box */ 13797 if (tp->t_logstate != TCP_LOG_STATE_OFF) { 13798 union tcp_log_stackspecific log; 13799 13800 bbr_fill_in_logging_data(bbr, &log.u_bbr, cts); 13801 /* Record info on type of transmission */ 13802 log.u_bbr.flex1 = bbr->r_ctl.rc_hptsi_agg_delay; 13803 log.u_bbr.flex2 = (bbr->r_recovery_bw << 3); 13804 log.u_bbr.flex3 = maxseg; 13805 log.u_bbr.flex4 = delay_calc; 13806 /* Encode filled_all into the upper flex5 bit */ 13807 log.u_bbr.flex5 = bbr->rc_past_init_win; 13808 log.u_bbr.flex5 <<= 1; 13809 log.u_bbr.flex5 |= bbr->rc_no_pacing; 13810 log.u_bbr.flex5 <<= 29; 13811 if (filled_all) 13812 log.u_bbr.flex5 |= 0x80000000; 13813 log.u_bbr.flex5 |= tp->t_maxseg; 13814 log.u_bbr.flex6 = bbr->r_ctl.rc_pace_max_segs; 13815 log.u_bbr.flex7 = (bbr->rc_bbr_state << 8) | bbr_state_val(bbr); 13816 /* lets poke in the low and the high here for debugging */ 13817 log.u_bbr.pkts_out = bbr->rc_tp->t_maxseg; 13818 if (rsm || sack_rxmit) { 13819 if (doing_tlp) 13820 log.u_bbr.flex8 = 2; 13821 else 13822 log.u_bbr.flex8 = 1; 13823 } else { 13824 log.u_bbr.flex8 = 0; 13825 } 13826 lgb = tcp_log_event_(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, 13827 len, &log, false, NULL, NULL, 0, tv); 13828 } else { 13829 lgb = NULL; 13830 } 13831 /* 13832 * Fill in IP length and desired time to live and send to IP level. 13833 * There should be a better way to handle ttl and tos; we could keep 13834 * them in the template, but need a way to checksum without them. 13835 */ 13836 /* 13837 * m->m_pkthdr.len should have been set before cksum calcuration, 13838 * because in6_cksum() need it. 13839 */ 13840 #ifdef INET6 13841 if (isipv6) { 13842 /* 13843 * we separately set hoplimit for every segment, since the 13844 * user might want to change the value via setsockopt. Also, 13845 * desired default hop limit might be changed via Neighbor 13846 * Discovery. 13847 */ 13848 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 13849 13850 /* 13851 * Set the packet size here for the benefit of DTrace 13852 * probes. ip6_output() will set it properly; it's supposed 13853 * to include the option header lengths as well. 13854 */ 13855 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); 13856 13857 if (V_path_mtu_discovery && maxseg > V_tcp_minmss) 13858 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 13859 else 13860 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 13861 13862 if (tp->t_state == TCPS_SYN_SENT) 13863 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th); 13864 13865 TCP_PROBE5(send, NULL, tp, ip6, tp, th); 13866 /* TODO: IPv6 IP6TOS_ECT bit on */ 13867 error = ip6_output(m, inp->in6p_outputopts, 13868 &inp->inp_route6, 13869 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 13870 NULL, NULL, inp); 13871 13872 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) 13873 mtu = inp->inp_route6.ro_nh->nh_mtu; 13874 } 13875 #endif /* INET6 */ 13876 #if defined(INET) && defined(INET6) 13877 else 13878 #endif 13879 #ifdef INET 13880 { 13881 ip->ip_len = htons(m->m_pkthdr.len); 13882 #ifdef INET6 13883 if (isipv6) 13884 ip->ip_ttl = in6_selecthlim(inp, NULL); 13885 #endif /* INET6 */ 13886 /* 13887 * If we do path MTU discovery, then we set DF on every 13888 * packet. This might not be the best thing to do according 13889 * to RFC3390 Section 2. However the tcp hostcache migitates 13890 * the problem so it affects only the first tcp connection 13891 * with a host. 13892 * 13893 * NB: Don't set DF on small MTU/MSS to have a safe 13894 * fallback. 13895 */ 13896 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { 13897 tp->t_flags2 |= TF2_PLPMTU_PMTUD; 13898 if (tp->t_port == 0 || len < V_tcp_minmss) { 13899 ip->ip_off |= htons(IP_DF); 13900 } 13901 } else { 13902 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; 13903 } 13904 13905 if (tp->t_state == TCPS_SYN_SENT) 13906 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th); 13907 13908 TCP_PROBE5(send, NULL, tp, ip, tp, th); 13909 13910 error = ip_output(m, inp->inp_options, &inp->inp_route, 13911 ((rsm || sack_rxmit) ? IP_NO_SND_TAG_RL : 0), 0, 13912 inp); 13913 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) 13914 mtu = inp->inp_route.ro_nh->nh_mtu; 13915 } 13916 #endif /* INET */ 13917 out: 13918 13919 if (lgb) { 13920 lgb->tlb_errno = error; 13921 lgb = NULL; 13922 } 13923 /* 13924 * In transmit state, time the transmission and arrange for the 13925 * retransmit. In persist state, just set snd_max. 13926 */ 13927 if (error == 0) { 13928 if (TCPS_HAVEESTABLISHED(tp->t_state) && 13929 (tp->t_flags & TF_SACK_PERMIT) && 13930 tp->rcv_numsacks > 0) 13931 tcp_clean_dsack_blocks(tp); 13932 /* We sent an ack clear the bbr_segs_rcvd count */ 13933 bbr->output_error_seen = 0; 13934 bbr->oerror_cnt = 0; 13935 bbr->bbr_segs_rcvd = 0; 13936 if (len == 0) 13937 counter_u64_add(bbr_out_size[TCP_MSS_ACCT_SNDACK], 1); 13938 else if (hw_tls) { 13939 if (filled_all || 13940 (len >= bbr->r_ctl.rc_pace_max_segs)) 13941 BBR_STAT_INC(bbr_meets_tso_thresh); 13942 else { 13943 if (doing_tlp) { 13944 BBR_STAT_INC(bbr_miss_tlp); 13945 bbr_log_type_hrdwtso(tp, bbr, len, 1, what_we_can); 13946 13947 13948 } else if (rsm) { 13949 BBR_STAT_INC(bbr_miss_retran); 13950 bbr_log_type_hrdwtso(tp, bbr, len, 2, what_we_can); 13951 } else if ((ctf_outstanding(tp) + bbr->r_ctl.rc_pace_max_segs) > sbavail(sb)) { 13952 BBR_STAT_INC(bbr_miss_tso_app); 13953 bbr_log_type_hrdwtso(tp, bbr, len, 3, what_we_can); 13954 } else if ((ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 13955 bbr->r_ctl.rc_lost_bytes)) + bbr->r_ctl.rc_pace_max_segs) > tp->snd_cwnd) { 13956 BBR_STAT_INC(bbr_miss_tso_cwnd); 13957 bbr_log_type_hrdwtso(tp, bbr, len, 4, what_we_can); 13958 } else if ((ctf_outstanding(tp) + bbr->r_ctl.rc_pace_max_segs) > tp->snd_wnd) { 13959 BBR_STAT_INC(bbr_miss_tso_rwnd); 13960 bbr_log_type_hrdwtso(tp, bbr, len, 5, what_we_can); 13961 } else { 13962 BBR_STAT_INC(bbr_miss_unknown); 13963 bbr_log_type_hrdwtso(tp, bbr, len, 6, what_we_can); 13964 } 13965 } 13966 } 13967 /* Do accounting for new sends */ 13968 if ((len > 0) && (rsm == NULL)) { 13969 int idx; 13970 if (tp->snd_una == tp->snd_max) { 13971 /* 13972 * Special case to match google, when 13973 * nothing is in flight the delivered 13974 * time does get updated to the current 13975 * time (see tcp_rate_bsd.c). 13976 */ 13977 bbr->r_ctl.rc_del_time = cts; 13978 } 13979 if (len >= maxseg) { 13980 idx = (len / maxseg) + 3; 13981 if (idx >= TCP_MSS_ACCT_ATIMER) 13982 counter_u64_add(bbr_out_size[(TCP_MSS_ACCT_ATIMER - 1)], 1); 13983 else 13984 counter_u64_add(bbr_out_size[idx], 1); 13985 } else { 13986 /* smaller than a MSS */ 13987 idx = len / (bbr_hptsi_bytes_min - bbr->rc_last_options); 13988 if (idx >= TCP_MSS_SMALL_MAX_SIZE_DIV) 13989 idx = (TCP_MSS_SMALL_MAX_SIZE_DIV - 1); 13990 counter_u64_add(bbr_out_size[(idx + TCP_MSS_SMALL_SIZE_OFF)], 1); 13991 } 13992 } 13993 } 13994 abandon = 0; 13995 /* 13996 * We must do the send accounting before we log the output, 13997 * otherwise the state of the rsm could change and we account to the 13998 * wrong bucket. 13999 */ 14000 if (len > 0) { 14001 bbr_do_send_accounting(tp, bbr, rsm, len, error); 14002 if (error == 0) { 14003 if (tp->snd_una == tp->snd_max) 14004 bbr->r_ctl.rc_tlp_rxt_last_time = cts; 14005 } 14006 } 14007 bbr_log_output(bbr, tp, &to, len, bbr_seq, (uint8_t) flags, error, 14008 cts, mb, &abandon, rsm, 0, sb); 14009 if (abandon) { 14010 /* 14011 * If bbr_log_output destroys the TCB or sees a TH_RST being 14012 * sent we should hit this condition. 14013 */ 14014 return (0); 14015 } 14016 if (bbr->rc_in_persist == 0) { 14017 /* 14018 * Advance snd_nxt over sequence space of this segment. 14019 */ 14020 if (error) 14021 /* We don't log or do anything with errors */ 14022 goto skip_upd; 14023 14024 if (tp->snd_una == tp->snd_max && 14025 (len || (flags & (TH_SYN | TH_FIN)))) { 14026 /* 14027 * Update the time we just added data since none was 14028 * outstanding. 14029 */ 14030 bbr_log_progress_event(bbr, tp, ticks, PROGRESS_START, __LINE__); 14031 bbr->rc_tp->t_acktime = ticks; 14032 } 14033 if (flags & (TH_SYN | TH_FIN) && (rsm == NULL)) { 14034 if (flags & TH_SYN) { 14035 /* 14036 * Smack the snd_max to iss + 1 14037 * if its a FO we will add len below. 14038 */ 14039 tp->snd_max = tp->iss + 1; 14040 } 14041 if ((flags & TH_FIN) && ((tp->t_flags & TF_SENTFIN) == 0)) { 14042 tp->snd_max++; 14043 tp->t_flags |= TF_SENTFIN; 14044 } 14045 } 14046 if (sack_rxmit == 0) 14047 tp->snd_max += len; 14048 skip_upd: 14049 if ((error == 0) && len) 14050 tot_len += len; 14051 } else { 14052 /* Persists case */ 14053 int32_t xlen = len; 14054 14055 if (error) 14056 goto nomore; 14057 14058 if (flags & TH_SYN) 14059 ++xlen; 14060 if ((flags & TH_FIN) && ((tp->t_flags & TF_SENTFIN) == 0)) { 14061 ++xlen; 14062 tp->t_flags |= TF_SENTFIN; 14063 } 14064 if (xlen && (tp->snd_una == tp->snd_max)) { 14065 /* 14066 * Update the time we just added data since none was 14067 * outstanding. 14068 */ 14069 bbr_log_progress_event(bbr, tp, ticks, PROGRESS_START, __LINE__); 14070 bbr->rc_tp->t_acktime = ticks; 14071 } 14072 if (sack_rxmit == 0) 14073 tp->snd_max += xlen; 14074 tot_len += (len + optlen + ipoptlen); 14075 } 14076 nomore: 14077 if (error) { 14078 /* 14079 * Failures do not advance the seq counter above. For the 14080 * case of ENOBUFS we will fall out and become ack-clocked. 14081 * capping the cwnd at the current flight. 14082 * Everything else will just have to retransmit with the timer 14083 * (no pacer). 14084 */ 14085 SOCKBUF_UNLOCK_ASSERT(sb); 14086 BBR_STAT_INC(bbr_saw_oerr); 14087 /* Clear all delay/early tracks */ 14088 bbr->r_ctl.rc_hptsi_agg_delay = 0; 14089 bbr->r_ctl.rc_agg_early = 0; 14090 bbr->r_agg_early_set = 0; 14091 bbr->output_error_seen = 1; 14092 if (bbr->oerror_cnt < 0xf) 14093 bbr->oerror_cnt++; 14094 if (bbr_max_net_error_cnt && (bbr->oerror_cnt >= bbr_max_net_error_cnt)) { 14095 /* drop the session */ 14096 tcp_set_inp_to_drop(inp, ENETDOWN); 14097 } 14098 switch (error) { 14099 case ENOBUFS: 14100 /* 14101 * Make this guy have to get ack's to send 14102 * more but lets make sure we don't 14103 * slam him below a T-O (1MSS). 14104 */ 14105 if (bbr->rc_bbr_state != BBR_STATE_PROBE_RTT) { 14106 tp->snd_cwnd = ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 14107 bbr->r_ctl.rc_lost_bytes)) - maxseg; 14108 if (tp->snd_cwnd < maxseg) 14109 tp->snd_cwnd = maxseg; 14110 } 14111 slot = (bbr_error_base_paceout + 1) << bbr->oerror_cnt; 14112 BBR_STAT_INC(bbr_saw_enobuf); 14113 if (bbr->bbr_hdrw_pacing) 14114 counter_u64_add(bbr_hdwr_pacing_enobuf, 1); 14115 else 14116 counter_u64_add(bbr_nohdwr_pacing_enobuf, 1); 14117 /* 14118 * Here even in the enobuf's case we want to do our 14119 * state update. The reason being we may have been 14120 * called by the input function. If so we have had 14121 * things change. 14122 */ 14123 error = 0; 14124 goto enobufs; 14125 case EMSGSIZE: 14126 /* 14127 * For some reason the interface we used initially 14128 * to send segments changed to another or lowered 14129 * its MTU. If TSO was active we either got an 14130 * interface without TSO capabilits or TSO was 14131 * turned off. If we obtained mtu from ip_output() 14132 * then update it and try again. 14133 */ 14134 /* Turn on tracing (or try to) */ 14135 { 14136 int old_maxseg; 14137 14138 old_maxseg = tp->t_maxseg; 14139 BBR_STAT_INC(bbr_saw_emsgsiz); 14140 bbr_log_msgsize_fail(bbr, tp, len, maxseg, mtu, csum_flags, tso, cts); 14141 if (mtu != 0) 14142 tcp_mss_update(tp, -1, mtu, NULL, NULL); 14143 if (old_maxseg <= tp->t_maxseg) { 14144 /* Huh it did not shrink? */ 14145 tp->t_maxseg = old_maxseg - 40; 14146 bbr_log_msgsize_fail(bbr, tp, len, maxseg, mtu, 0, tso, cts); 14147 } 14148 /* 14149 * Nuke all other things that can interfere 14150 * with slot 14151 */ 14152 if ((tot_len + len) && (len >= tp->t_maxseg)) { 14153 slot = bbr_get_pacing_delay(bbr, 14154 bbr->r_ctl.rc_bbr_hptsi_gain, 14155 (tot_len + len), cts, 0); 14156 if (slot < bbr_error_base_paceout) 14157 slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt; 14158 } else 14159 slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt; 14160 bbr->rc_output_starts_timer = 1; 14161 bbr_start_hpts_timer(bbr, tp, cts, 10, slot, 14162 tot_len); 14163 return (error); 14164 } 14165 case EPERM: 14166 tp->t_softerror = error; 14167 /* Fall through */ 14168 case EHOSTDOWN: 14169 case EHOSTUNREACH: 14170 case ENETDOWN: 14171 case ENETUNREACH: 14172 if (TCPS_HAVERCVDSYN(tp->t_state)) { 14173 tp->t_softerror = error; 14174 } 14175 /* FALLTHROUGH */ 14176 default: 14177 slot = (bbr_error_base_paceout + 3) << bbr->oerror_cnt; 14178 bbr->rc_output_starts_timer = 1; 14179 bbr_start_hpts_timer(bbr, tp, cts, 11, slot, 0); 14180 return (error); 14181 } 14182 #ifdef STATS 14183 } else if (((tp->t_flags & TF_GPUTINPROG) == 0) && 14184 len && 14185 (rsm == NULL) && 14186 (bbr->rc_in_persist == 0)) { 14187 tp->gput_seq = bbr_seq; 14188 tp->gput_ack = bbr_seq + 14189 min(sbavail(&so->so_snd) - sb_offset, sendwin); 14190 tp->gput_ts = cts; 14191 tp->t_flags |= TF_GPUTINPROG; 14192 #endif 14193 } 14194 KMOD_TCPSTAT_INC(tcps_sndtotal); 14195 if ((bbr->bbr_hdw_pace_ena) && 14196 (bbr->bbr_attempt_hdwr_pace == 0) && 14197 (bbr->rc_past_init_win) && 14198 (bbr->rc_bbr_state != BBR_STATE_STARTUP) && 14199 (get_filter_value(&bbr->r_ctl.rc_delrate)) && 14200 (inp->inp_route.ro_nh && 14201 inp->inp_route.ro_nh->nh_ifp)) { 14202 /* 14203 * We are past the initial window and 14204 * have at least one measurement so we 14205 * could use hardware pacing if its available. 14206 * We have an interface and we have not attempted 14207 * to setup hardware pacing, lets try to now. 14208 */ 14209 uint64_t rate_wanted; 14210 int err = 0; 14211 14212 rate_wanted = bbr_get_hardware_rate(bbr); 14213 bbr->bbr_attempt_hdwr_pace = 1; 14214 bbr->r_ctl.crte = tcp_set_pacing_rate(bbr->rc_tp, 14215 inp->inp_route.ro_nh->nh_ifp, 14216 rate_wanted, 14217 (RS_PACING_GEQ|RS_PACING_SUB_OK), 14218 &err); 14219 if (bbr->r_ctl.crte) { 14220 bbr_type_log_hdwr_pacing(bbr, 14221 bbr->r_ctl.crte->ptbl->rs_ifp, 14222 rate_wanted, 14223 bbr->r_ctl.crte->rate, 14224 __LINE__, cts, err); 14225 BBR_STAT_INC(bbr_hdwr_rl_add_ok); 14226 counter_u64_add(bbr_flows_nohdwr_pacing, -1); 14227 counter_u64_add(bbr_flows_whdwr_pacing, 1); 14228 bbr->bbr_hdrw_pacing = 1; 14229 /* Now what is our gain status? */ 14230 if (bbr->r_ctl.crte->rate < rate_wanted) { 14231 /* We have a problem */ 14232 bbr_setup_less_of_rate(bbr, cts, 14233 bbr->r_ctl.crte->rate, rate_wanted); 14234 } else { 14235 /* We are good */ 14236 bbr->gain_is_limited = 0; 14237 bbr->skip_gain = 0; 14238 } 14239 tcp_bbr_tso_size_check(bbr, cts); 14240 } else { 14241 bbr_type_log_hdwr_pacing(bbr, 14242 inp->inp_route.ro_nh->nh_ifp, 14243 rate_wanted, 14244 0, 14245 __LINE__, cts, err); 14246 BBR_STAT_INC(bbr_hdwr_rl_add_fail); 14247 } 14248 } 14249 if (bbr->bbr_hdrw_pacing) { 14250 /* 14251 * Worry about cases where the route 14252 * changes or something happened that we 14253 * lost our hardware pacing possibly during 14254 * the last ip_output call. 14255 */ 14256 if (inp->inp_snd_tag == NULL) { 14257 /* A change during ip output disabled hw pacing? */ 14258 bbr->bbr_hdrw_pacing = 0; 14259 } else if ((inp->inp_route.ro_nh == NULL) || 14260 (inp->inp_route.ro_nh->nh_ifp != inp->inp_snd_tag->ifp)) { 14261 /* 14262 * We had an interface or route change, 14263 * detach from the current hdwr pacing 14264 * and setup to re-attempt next go 14265 * round. 14266 */ 14267 bbr->bbr_hdrw_pacing = 0; 14268 bbr->bbr_attempt_hdwr_pace = 0; 14269 tcp_rel_pacing_rate(bbr->r_ctl.crte, bbr->rc_tp); 14270 tcp_bbr_tso_size_check(bbr, cts); 14271 } 14272 } 14273 /* 14274 * Data sent (as far as we can tell). If this advertises a larger 14275 * window than any other segment, then remember the size of the 14276 * advertised window. Any pending ACK has now been sent. 14277 */ 14278 if (SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) 14279 tp->rcv_adv = tp->rcv_nxt + recwin; 14280 14281 tp->last_ack_sent = tp->rcv_nxt; 14282 if ((error == 0) && 14283 (bbr->r_ctl.rc_pace_max_segs > tp->t_maxseg) && 14284 (doing_tlp == 0) && 14285 (tso == 0) && 14286 (hw_tls == 0) && 14287 (len > 0) && 14288 ((flags & TH_RST) == 0) && 14289 (IN_RECOVERY(tp->t_flags) == 0) && 14290 (bbr->rc_in_persist == 0) && 14291 (tot_len < bbr->r_ctl.rc_pace_max_segs)) { 14292 /* 14293 * For non-tso we need to goto again until we have sent out 14294 * enough data to match what we are hptsi out every hptsi 14295 * interval. 14296 */ 14297 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 14298 /* Make sure snd_nxt is drug up */ 14299 tp->snd_nxt = tp->snd_max; 14300 } 14301 if (rsm != NULL) { 14302 rsm = NULL; 14303 goto skip_again; 14304 } 14305 rsm = NULL; 14306 sack_rxmit = 0; 14307 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 14308 goto again; 14309 } 14310 skip_again: 14311 if ((error == 0) && (flags & TH_FIN)) 14312 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_FIN); 14313 if ((error == 0) && (flags & TH_RST)) 14314 tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST); 14315 if (((flags & (TH_RST | TH_SYN | TH_FIN)) == 0) && tot_len) { 14316 /* 14317 * Calculate/Re-Calculate the hptsi slot in usecs based on 14318 * what we have sent so far 14319 */ 14320 slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0); 14321 if (bbr->rc_no_pacing) 14322 slot = 0; 14323 } 14324 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); 14325 enobufs: 14326 if (bbr->rc_use_google == 0) 14327 bbr_check_bbr_for_state(bbr, cts, __LINE__, 0); 14328 bbr_cwnd_limiting(tp, bbr, ctf_flight_size(tp, (bbr->r_ctl.rc_sacked + 14329 bbr->r_ctl.rc_lost_bytes))); 14330 bbr->rc_output_starts_timer = 1; 14331 if (bbr->bbr_use_rack_cheat && 14332 (more_to_rxt || 14333 ((bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts)) != NULL))) { 14334 /* Rack cheats and shotguns out all rxt's 1ms apart */ 14335 if (slot > 1000) 14336 slot = 1000; 14337 } 14338 if (bbr->bbr_hdrw_pacing && (bbr->hw_pacing_set == 0)) { 14339 /* 14340 * We don't change the tso size until some number of sends 14341 * to give the hardware commands time to get down 14342 * to the interface. 14343 */ 14344 bbr->r_ctl.bbr_hdwr_cnt_noset_snt++; 14345 if (bbr->r_ctl.bbr_hdwr_cnt_noset_snt >= bbr_hdwr_pacing_delay_cnt) { 14346 bbr->hw_pacing_set = 1; 14347 tcp_bbr_tso_size_check(bbr, cts); 14348 } 14349 } 14350 bbr_start_hpts_timer(bbr, tp, cts, 12, slot, tot_len); 14351 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { 14352 /* Make sure snd_nxt is drug up */ 14353 tp->snd_nxt = tp->snd_max; 14354 } 14355 return (error); 14356 14357 } 14358 14359 /* 14360 * See bbr_output_wtime() for return values. 14361 */ 14362 static int 14363 bbr_output(struct tcpcb *tp) 14364 { 14365 int32_t ret; 14366 struct timeval tv; 14367 struct tcp_bbr *bbr; 14368 14369 NET_EPOCH_ASSERT(); 14370 14371 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 14372 INP_WLOCK_ASSERT(tp->t_inpcb); 14373 (void)tcp_get_usecs(&tv); 14374 ret = bbr_output_wtime(tp, &tv); 14375 return (ret); 14376 } 14377 14378 static void 14379 bbr_mtu_chg(struct tcpcb *tp) 14380 { 14381 struct tcp_bbr *bbr; 14382 struct bbr_sendmap *rsm, *frsm = NULL; 14383 uint32_t maxseg; 14384 14385 /* 14386 * The MTU has changed. a) Clear the sack filter. b) Mark everything 14387 * over the current size as SACK_PASS so a retransmit will occur. 14388 */ 14389 14390 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 14391 maxseg = tp->t_maxseg - bbr->rc_last_options; 14392 sack_filter_clear(&bbr->r_ctl.bbr_sf, tp->snd_una); 14393 TAILQ_FOREACH(rsm, &bbr->r_ctl.rc_map, r_next) { 14394 /* Don't mess with ones acked (by sack?) */ 14395 if (rsm->r_flags & BBR_ACKED) 14396 continue; 14397 if ((rsm->r_end - rsm->r_start) > maxseg) { 14398 /* 14399 * We mark sack-passed on all the previous large 14400 * sends we did. This will force them to retransmit. 14401 */ 14402 rsm->r_flags |= BBR_SACK_PASSED; 14403 if (((rsm->r_flags & BBR_MARKED_LOST) == 0) && 14404 bbr_is_lost(bbr, rsm, bbr->r_ctl.rc_rcvtime)) { 14405 bbr->r_ctl.rc_lost_bytes += rsm->r_end - rsm->r_start; 14406 bbr->r_ctl.rc_lost += rsm->r_end - rsm->r_start; 14407 rsm->r_flags |= BBR_MARKED_LOST; 14408 } 14409 if (frsm == NULL) 14410 frsm = rsm; 14411 } 14412 } 14413 if (frsm) { 14414 bbr->r_ctl.rc_resend = frsm; 14415 } 14416 } 14417 14418 /* 14419 * bbr_ctloutput() must drop the inpcb lock before performing copyin on 14420 * socket option arguments. When it re-acquires the lock after the copy, it 14421 * has to revalidate that the connection is still valid for the socket 14422 * option. 14423 */ 14424 static int 14425 bbr_set_sockopt(struct socket *so, struct sockopt *sopt, 14426 struct inpcb *inp, struct tcpcb *tp, struct tcp_bbr *bbr) 14427 { 14428 struct epoch_tracker et; 14429 int32_t error = 0, optval; 14430 14431 switch (sopt->sopt_name) { 14432 case TCP_RACK_PACE_MAX_SEG: 14433 case TCP_RACK_MIN_TO: 14434 case TCP_RACK_REORD_THRESH: 14435 case TCP_RACK_REORD_FADE: 14436 case TCP_RACK_TLP_THRESH: 14437 case TCP_RACK_PKT_DELAY: 14438 case TCP_BBR_ALGORITHM: 14439 case TCP_BBR_TSLIMITS: 14440 case TCP_BBR_IWINTSO: 14441 case TCP_BBR_RECFORCE: 14442 case TCP_BBR_STARTUP_PG: 14443 case TCP_BBR_DRAIN_PG: 14444 case TCP_BBR_RWND_IS_APP: 14445 case TCP_BBR_PROBE_RTT_INT: 14446 case TCP_BBR_PROBE_RTT_GAIN: 14447 case TCP_BBR_PROBE_RTT_LEN: 14448 case TCP_BBR_STARTUP_LOSS_EXIT: 14449 case TCP_BBR_USEDEL_RATE: 14450 case TCP_BBR_MIN_RTO: 14451 case TCP_BBR_MAX_RTO: 14452 case TCP_BBR_PACE_PER_SEC: 14453 case TCP_DELACK: 14454 case TCP_BBR_PACE_DEL_TAR: 14455 case TCP_BBR_SEND_IWND_IN_TSO: 14456 case TCP_BBR_EXTRA_STATE: 14457 case TCP_BBR_UTTER_MAX_TSO: 14458 case TCP_BBR_MIN_TOPACEOUT: 14459 case TCP_BBR_FLOOR_MIN_TSO: 14460 case TCP_BBR_TSTMP_RAISES: 14461 case TCP_BBR_POLICER_DETECT: 14462 case TCP_BBR_USE_RACK_CHEAT: 14463 case TCP_DATA_AFTER_CLOSE: 14464 case TCP_BBR_HDWR_PACE: 14465 case TCP_BBR_PACE_SEG_MAX: 14466 case TCP_BBR_PACE_SEG_MIN: 14467 case TCP_BBR_PACE_CROSS: 14468 case TCP_BBR_PACE_OH: 14469 #ifdef NETFLIX_PEAKRATE 14470 case TCP_MAXPEAKRATE: 14471 #endif 14472 case TCP_BBR_TMR_PACE_OH: 14473 case TCP_BBR_RACK_RTT_USE: 14474 case TCP_BBR_RETRAN_WTSO: 14475 break; 14476 default: 14477 return (tcp_default_ctloutput(so, sopt, inp, tp)); 14478 break; 14479 } 14480 INP_WUNLOCK(inp); 14481 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 14482 if (error) 14483 return (error); 14484 INP_WLOCK(inp); 14485 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { 14486 INP_WUNLOCK(inp); 14487 return (ECONNRESET); 14488 } 14489 tp = intotcpcb(inp); 14490 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 14491 switch (sopt->sopt_name) { 14492 case TCP_BBR_PACE_PER_SEC: 14493 BBR_OPTS_INC(tcp_bbr_pace_per_sec); 14494 bbr->r_ctl.bbr_hptsi_per_second = optval; 14495 break; 14496 case TCP_BBR_PACE_DEL_TAR: 14497 BBR_OPTS_INC(tcp_bbr_pace_del_tar); 14498 bbr->r_ctl.bbr_hptsi_segments_delay_tar = optval; 14499 break; 14500 case TCP_BBR_PACE_SEG_MAX: 14501 BBR_OPTS_INC(tcp_bbr_pace_seg_max); 14502 bbr->r_ctl.bbr_hptsi_segments_max = optval; 14503 break; 14504 case TCP_BBR_PACE_SEG_MIN: 14505 BBR_OPTS_INC(tcp_bbr_pace_seg_min); 14506 bbr->r_ctl.bbr_hptsi_bytes_min = optval; 14507 break; 14508 case TCP_BBR_PACE_CROSS: 14509 BBR_OPTS_INC(tcp_bbr_pace_cross); 14510 bbr->r_ctl.bbr_cross_over = optval; 14511 break; 14512 case TCP_BBR_ALGORITHM: 14513 BBR_OPTS_INC(tcp_bbr_algorithm); 14514 if (optval && (bbr->rc_use_google == 0)) { 14515 /* Turn on the google mode */ 14516 bbr_google_mode_on(bbr); 14517 if ((optval > 3) && (optval < 500)) { 14518 /* 14519 * Must be at least greater than .3% 14520 * and must be less than 50.0%. 14521 */ 14522 bbr->r_ctl.bbr_google_discount = optval; 14523 } 14524 } else if ((optval == 0) && (bbr->rc_use_google == 1)) { 14525 /* Turn off the google mode */ 14526 bbr_google_mode_off(bbr); 14527 } 14528 break; 14529 case TCP_BBR_TSLIMITS: 14530 BBR_OPTS_INC(tcp_bbr_tslimits); 14531 if (optval == 1) 14532 bbr->rc_use_ts_limit = 1; 14533 else if (optval == 0) 14534 bbr->rc_use_ts_limit = 0; 14535 else 14536 error = EINVAL; 14537 break; 14538 14539 case TCP_BBR_IWINTSO: 14540 BBR_OPTS_INC(tcp_bbr_iwintso); 14541 if ((optval >= 0) && (optval < 128)) { 14542 uint32_t twin; 14543 14544 bbr->rc_init_win = optval; 14545 twin = bbr_initial_cwnd(bbr, tp); 14546 if ((bbr->rc_past_init_win == 0) && (twin > tp->snd_cwnd)) 14547 tp->snd_cwnd = twin; 14548 else 14549 error = EBUSY; 14550 } else 14551 error = EINVAL; 14552 break; 14553 case TCP_BBR_STARTUP_PG: 14554 BBR_OPTS_INC(tcp_bbr_startup_pg); 14555 if ((optval > 0) && (optval < BBR_MAX_GAIN_VALUE)) { 14556 bbr->r_ctl.rc_startup_pg = optval; 14557 if (bbr->rc_bbr_state == BBR_STATE_STARTUP) { 14558 bbr->r_ctl.rc_bbr_hptsi_gain = optval; 14559 } 14560 } else 14561 error = EINVAL; 14562 break; 14563 case TCP_BBR_DRAIN_PG: 14564 BBR_OPTS_INC(tcp_bbr_drain_pg); 14565 if ((optval > 0) && (optval < BBR_MAX_GAIN_VALUE)) 14566 bbr->r_ctl.rc_drain_pg = optval; 14567 else 14568 error = EINVAL; 14569 break; 14570 case TCP_BBR_PROBE_RTT_LEN: 14571 BBR_OPTS_INC(tcp_bbr_probertt_len); 14572 if (optval <= 1) 14573 reset_time_small(&bbr->r_ctl.rc_rttprop, (optval * USECS_IN_SECOND)); 14574 else 14575 error = EINVAL; 14576 break; 14577 case TCP_BBR_PROBE_RTT_GAIN: 14578 BBR_OPTS_INC(tcp_bbr_probertt_gain); 14579 if (optval <= BBR_UNIT) 14580 bbr->r_ctl.bbr_rttprobe_gain_val = optval; 14581 else 14582 error = EINVAL; 14583 break; 14584 case TCP_BBR_PROBE_RTT_INT: 14585 BBR_OPTS_INC(tcp_bbr_probe_rtt_int); 14586 if (optval > 1000) 14587 bbr->r_ctl.rc_probertt_int = optval; 14588 else 14589 error = EINVAL; 14590 break; 14591 case TCP_BBR_MIN_TOPACEOUT: 14592 BBR_OPTS_INC(tcp_bbr_topaceout); 14593 if (optval == 0) { 14594 bbr->no_pacing_until = 0; 14595 bbr->rc_no_pacing = 0; 14596 } else if (optval <= 0x00ff) { 14597 bbr->no_pacing_until = optval; 14598 if ((bbr->r_ctl.rc_pkt_epoch < bbr->no_pacing_until) && 14599 (bbr->rc_bbr_state == BBR_STATE_STARTUP)){ 14600 /* Turn on no pacing */ 14601 bbr->rc_no_pacing = 1; 14602 } 14603 } else 14604 error = EINVAL; 14605 break; 14606 case TCP_BBR_STARTUP_LOSS_EXIT: 14607 BBR_OPTS_INC(tcp_bbr_startup_loss_exit); 14608 bbr->rc_loss_exit = optval; 14609 break; 14610 case TCP_BBR_USEDEL_RATE: 14611 error = EINVAL; 14612 break; 14613 case TCP_BBR_MIN_RTO: 14614 BBR_OPTS_INC(tcp_bbr_min_rto); 14615 bbr->r_ctl.rc_min_rto_ms = optval; 14616 break; 14617 case TCP_BBR_MAX_RTO: 14618 BBR_OPTS_INC(tcp_bbr_max_rto); 14619 bbr->rc_max_rto_sec = optval; 14620 break; 14621 case TCP_RACK_MIN_TO: 14622 /* Minimum time between rack t-o's in ms */ 14623 BBR_OPTS_INC(tcp_rack_min_to); 14624 bbr->r_ctl.rc_min_to = optval; 14625 break; 14626 case TCP_RACK_REORD_THRESH: 14627 /* RACK reorder threshold (shift amount) */ 14628 BBR_OPTS_INC(tcp_rack_reord_thresh); 14629 if ((optval > 0) && (optval < 31)) 14630 bbr->r_ctl.rc_reorder_shift = optval; 14631 else 14632 error = EINVAL; 14633 break; 14634 case TCP_RACK_REORD_FADE: 14635 /* Does reordering fade after ms time */ 14636 BBR_OPTS_INC(tcp_rack_reord_fade); 14637 bbr->r_ctl.rc_reorder_fade = optval; 14638 break; 14639 case TCP_RACK_TLP_THRESH: 14640 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 14641 BBR_OPTS_INC(tcp_rack_tlp_thresh); 14642 if (optval) 14643 bbr->rc_tlp_threshold = optval; 14644 else 14645 error = EINVAL; 14646 break; 14647 case TCP_BBR_USE_RACK_CHEAT: 14648 BBR_OPTS_INC(tcp_use_rackcheat); 14649 if (bbr->rc_use_google) { 14650 error = EINVAL; 14651 break; 14652 } 14653 BBR_OPTS_INC(tcp_rack_cheat); 14654 if (optval) 14655 bbr->bbr_use_rack_cheat = 1; 14656 else 14657 bbr->bbr_use_rack_cheat = 0; 14658 break; 14659 case TCP_BBR_FLOOR_MIN_TSO: 14660 BBR_OPTS_INC(tcp_utter_max_tso); 14661 if ((optval >= 0) && (optval < 40)) 14662 bbr->r_ctl.bbr_hptsi_segments_floor = optval; 14663 else 14664 error = EINVAL; 14665 break; 14666 case TCP_BBR_UTTER_MAX_TSO: 14667 BBR_OPTS_INC(tcp_utter_max_tso); 14668 if ((optval >= 0) && (optval < 0xffff)) 14669 bbr->r_ctl.bbr_utter_max = optval; 14670 else 14671 error = EINVAL; 14672 break; 14673 14674 case TCP_BBR_EXTRA_STATE: 14675 BBR_OPTS_INC(tcp_extra_state); 14676 if (optval) 14677 bbr->rc_use_idle_restart = 1; 14678 else 14679 bbr->rc_use_idle_restart = 0; 14680 break; 14681 case TCP_BBR_SEND_IWND_IN_TSO: 14682 BBR_OPTS_INC(tcp_iwnd_tso); 14683 if (optval) { 14684 bbr->bbr_init_win_cheat = 1; 14685 if (bbr->rc_past_init_win == 0) { 14686 uint32_t cts; 14687 cts = tcp_get_usecs(&bbr->rc_tv); 14688 tcp_bbr_tso_size_check(bbr, cts); 14689 } 14690 } else 14691 bbr->bbr_init_win_cheat = 0; 14692 break; 14693 case TCP_BBR_HDWR_PACE: 14694 BBR_OPTS_INC(tcp_hdwr_pacing); 14695 if (optval){ 14696 bbr->bbr_hdw_pace_ena = 1; 14697 bbr->bbr_attempt_hdwr_pace = 0; 14698 } else { 14699 bbr->bbr_hdw_pace_ena = 0; 14700 #ifdef RATELIMIT 14701 if (bbr->bbr_hdrw_pacing) { 14702 bbr->bbr_hdrw_pacing = 0; 14703 in_pcbdetach_txrtlmt(bbr->rc_inp); 14704 } 14705 #endif 14706 } 14707 break; 14708 14709 case TCP_DELACK: 14710 BBR_OPTS_INC(tcp_delack); 14711 if (optval < 100) { 14712 if (optval == 0) /* off */ 14713 tp->t_delayed_ack = 0; 14714 else if (optval == 1) /* on which is 2 */ 14715 tp->t_delayed_ack = 2; 14716 else /* higher than 2 and less than 100 */ 14717 tp->t_delayed_ack = optval; 14718 if (tp->t_flags & TF_DELACK) { 14719 tp->t_flags &= ~TF_DELACK; 14720 tp->t_flags |= TF_ACKNOW; 14721 NET_EPOCH_ENTER(et); 14722 bbr_output(tp); 14723 NET_EPOCH_EXIT(et); 14724 } 14725 } else 14726 error = EINVAL; 14727 break; 14728 case TCP_RACK_PKT_DELAY: 14729 /* RACK added ms i.e. rack-rtt + reord + N */ 14730 BBR_OPTS_INC(tcp_rack_pkt_delay); 14731 bbr->r_ctl.rc_pkt_delay = optval; 14732 break; 14733 #ifdef NETFLIX_PEAKRATE 14734 case TCP_MAXPEAKRATE: 14735 BBR_OPTS_INC(tcp_maxpeak); 14736 error = tcp_set_maxpeakrate(tp, optval); 14737 if (!error) 14738 tp->t_peakrate_thr = tp->t_maxpeakrate; 14739 break; 14740 #endif 14741 case TCP_BBR_RETRAN_WTSO: 14742 BBR_OPTS_INC(tcp_retran_wtso); 14743 if (optval) 14744 bbr->rc_resends_use_tso = 1; 14745 else 14746 bbr->rc_resends_use_tso = 0; 14747 break; 14748 case TCP_DATA_AFTER_CLOSE: 14749 BBR_OPTS_INC(tcp_data_ac); 14750 if (optval) 14751 bbr->rc_allow_data_af_clo = 1; 14752 else 14753 bbr->rc_allow_data_af_clo = 0; 14754 break; 14755 case TCP_BBR_POLICER_DETECT: 14756 BBR_OPTS_INC(tcp_policer_det); 14757 if (bbr->rc_use_google == 0) 14758 error = EINVAL; 14759 else if (optval) 14760 bbr->r_use_policer = 1; 14761 else 14762 bbr->r_use_policer = 0; 14763 break; 14764 14765 case TCP_BBR_TSTMP_RAISES: 14766 BBR_OPTS_INC(tcp_ts_raises); 14767 if (optval) 14768 bbr->ts_can_raise = 1; 14769 else 14770 bbr->ts_can_raise = 0; 14771 break; 14772 case TCP_BBR_TMR_PACE_OH: 14773 BBR_OPTS_INC(tcp_pacing_oh_tmr); 14774 if (bbr->rc_use_google) { 14775 error = EINVAL; 14776 } else { 14777 if (optval) 14778 bbr->r_ctl.rc_incr_tmrs = 1; 14779 else 14780 bbr->r_ctl.rc_incr_tmrs = 0; 14781 } 14782 break; 14783 case TCP_BBR_PACE_OH: 14784 BBR_OPTS_INC(tcp_pacing_oh); 14785 if (bbr->rc_use_google) { 14786 error = EINVAL; 14787 } else { 14788 if (optval > (BBR_INCL_TCP_OH| 14789 BBR_INCL_IP_OH| 14790 BBR_INCL_ENET_OH)) { 14791 error = EINVAL; 14792 break; 14793 } 14794 if (optval & BBR_INCL_TCP_OH) 14795 bbr->r_ctl.rc_inc_tcp_oh = 1; 14796 else 14797 bbr->r_ctl.rc_inc_tcp_oh = 0; 14798 if (optval & BBR_INCL_IP_OH) 14799 bbr->r_ctl.rc_inc_ip_oh = 1; 14800 else 14801 bbr->r_ctl.rc_inc_ip_oh = 0; 14802 if (optval & BBR_INCL_ENET_OH) 14803 bbr->r_ctl.rc_inc_enet_oh = 1; 14804 else 14805 bbr->r_ctl.rc_inc_enet_oh = 0; 14806 } 14807 break; 14808 default: 14809 return (tcp_default_ctloutput(so, sopt, inp, tp)); 14810 break; 14811 } 14812 #ifdef NETFLIX_STATS 14813 tcp_log_socket_option(tp, sopt->sopt_name, optval, error); 14814 #endif 14815 INP_WUNLOCK(inp); 14816 return (error); 14817 } 14818 14819 /* 14820 * return 0 on success, error-num on failure 14821 */ 14822 static int 14823 bbr_get_sockopt(struct socket *so, struct sockopt *sopt, 14824 struct inpcb *inp, struct tcpcb *tp, struct tcp_bbr *bbr) 14825 { 14826 int32_t error, optval; 14827 14828 /* 14829 * Because all our options are either boolean or an int, we can just 14830 * pull everything into optval and then unlock and copy. If we ever 14831 * add a option that is not a int, then this will have quite an 14832 * impact to this routine. 14833 */ 14834 switch (sopt->sopt_name) { 14835 case TCP_BBR_PACE_PER_SEC: 14836 optval = bbr->r_ctl.bbr_hptsi_per_second; 14837 break; 14838 case TCP_BBR_PACE_DEL_TAR: 14839 optval = bbr->r_ctl.bbr_hptsi_segments_delay_tar; 14840 break; 14841 case TCP_BBR_PACE_SEG_MAX: 14842 optval = bbr->r_ctl.bbr_hptsi_segments_max; 14843 break; 14844 case TCP_BBR_MIN_TOPACEOUT: 14845 optval = bbr->no_pacing_until; 14846 break; 14847 case TCP_BBR_PACE_SEG_MIN: 14848 optval = bbr->r_ctl.bbr_hptsi_bytes_min; 14849 break; 14850 case TCP_BBR_PACE_CROSS: 14851 optval = bbr->r_ctl.bbr_cross_over; 14852 break; 14853 case TCP_BBR_ALGORITHM: 14854 optval = bbr->rc_use_google; 14855 break; 14856 case TCP_BBR_TSLIMITS: 14857 optval = bbr->rc_use_ts_limit; 14858 break; 14859 case TCP_BBR_IWINTSO: 14860 optval = bbr->rc_init_win; 14861 break; 14862 case TCP_BBR_STARTUP_PG: 14863 optval = bbr->r_ctl.rc_startup_pg; 14864 break; 14865 case TCP_BBR_DRAIN_PG: 14866 optval = bbr->r_ctl.rc_drain_pg; 14867 break; 14868 case TCP_BBR_PROBE_RTT_INT: 14869 optval = bbr->r_ctl.rc_probertt_int; 14870 break; 14871 case TCP_BBR_PROBE_RTT_LEN: 14872 optval = (bbr->r_ctl.rc_rttprop.cur_time_limit / USECS_IN_SECOND); 14873 break; 14874 case TCP_BBR_PROBE_RTT_GAIN: 14875 optval = bbr->r_ctl.bbr_rttprobe_gain_val; 14876 break; 14877 case TCP_BBR_STARTUP_LOSS_EXIT: 14878 optval = bbr->rc_loss_exit; 14879 break; 14880 case TCP_BBR_USEDEL_RATE: 14881 error = EINVAL; 14882 break; 14883 case TCP_BBR_MIN_RTO: 14884 optval = bbr->r_ctl.rc_min_rto_ms; 14885 break; 14886 case TCP_BBR_MAX_RTO: 14887 optval = bbr->rc_max_rto_sec; 14888 break; 14889 case TCP_RACK_PACE_MAX_SEG: 14890 /* Max segments in a pace */ 14891 optval = bbr->r_ctl.rc_pace_max_segs; 14892 break; 14893 case TCP_RACK_MIN_TO: 14894 /* Minimum time between rack t-o's in ms */ 14895 optval = bbr->r_ctl.rc_min_to; 14896 break; 14897 case TCP_RACK_REORD_THRESH: 14898 /* RACK reorder threshold (shift amount) */ 14899 optval = bbr->r_ctl.rc_reorder_shift; 14900 break; 14901 case TCP_RACK_REORD_FADE: 14902 /* Does reordering fade after ms time */ 14903 optval = bbr->r_ctl.rc_reorder_fade; 14904 break; 14905 case TCP_BBR_USE_RACK_CHEAT: 14906 /* Do we use the rack cheat for rxt */ 14907 optval = bbr->bbr_use_rack_cheat; 14908 break; 14909 case TCP_BBR_FLOOR_MIN_TSO: 14910 optval = bbr->r_ctl.bbr_hptsi_segments_floor; 14911 break; 14912 case TCP_BBR_UTTER_MAX_TSO: 14913 optval = bbr->r_ctl.bbr_utter_max; 14914 break; 14915 case TCP_BBR_SEND_IWND_IN_TSO: 14916 /* Do we send TSO size segments initially */ 14917 optval = bbr->bbr_init_win_cheat; 14918 break; 14919 case TCP_BBR_EXTRA_STATE: 14920 optval = bbr->rc_use_idle_restart; 14921 break; 14922 case TCP_RACK_TLP_THRESH: 14923 /* RACK TLP theshold i.e. srtt+(srtt/N) */ 14924 optval = bbr->rc_tlp_threshold; 14925 break; 14926 case TCP_RACK_PKT_DELAY: 14927 /* RACK added ms i.e. rack-rtt + reord + N */ 14928 optval = bbr->r_ctl.rc_pkt_delay; 14929 break; 14930 case TCP_BBR_RETRAN_WTSO: 14931 optval = bbr->rc_resends_use_tso; 14932 break; 14933 case TCP_DATA_AFTER_CLOSE: 14934 optval = bbr->rc_allow_data_af_clo; 14935 break; 14936 case TCP_DELACK: 14937 optval = tp->t_delayed_ack; 14938 break; 14939 case TCP_BBR_HDWR_PACE: 14940 optval = bbr->bbr_hdw_pace_ena; 14941 break; 14942 case TCP_BBR_POLICER_DETECT: 14943 optval = bbr->r_use_policer; 14944 break; 14945 case TCP_BBR_TSTMP_RAISES: 14946 optval = bbr->ts_can_raise; 14947 break; 14948 case TCP_BBR_TMR_PACE_OH: 14949 optval = bbr->r_ctl.rc_incr_tmrs; 14950 break; 14951 case TCP_BBR_PACE_OH: 14952 optval = 0; 14953 if (bbr->r_ctl.rc_inc_tcp_oh) 14954 optval |= BBR_INCL_TCP_OH; 14955 if (bbr->r_ctl.rc_inc_ip_oh) 14956 optval |= BBR_INCL_IP_OH; 14957 if (bbr->r_ctl.rc_inc_enet_oh) 14958 optval |= BBR_INCL_ENET_OH; 14959 break; 14960 default: 14961 return (tcp_default_ctloutput(so, sopt, inp, tp)); 14962 break; 14963 } 14964 INP_WUNLOCK(inp); 14965 error = sooptcopyout(sopt, &optval, sizeof optval); 14966 return (error); 14967 } 14968 14969 /* 14970 * return 0 on success, error-num on failure 14971 */ 14972 static int 14973 bbr_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp) 14974 { 14975 int32_t error = EINVAL; 14976 struct tcp_bbr *bbr; 14977 14978 bbr = (struct tcp_bbr *)tp->t_fb_ptr; 14979 if (bbr == NULL) { 14980 /* Huh? */ 14981 goto out; 14982 } 14983 if (sopt->sopt_dir == SOPT_SET) { 14984 return (bbr_set_sockopt(so, sopt, inp, tp, bbr)); 14985 } else if (sopt->sopt_dir == SOPT_GET) { 14986 return (bbr_get_sockopt(so, sopt, inp, tp, bbr)); 14987 } 14988 out: 14989 INP_WUNLOCK(inp); 14990 return (error); 14991 } 14992 14993 static int 14994 bbr_pru_options(struct tcpcb *tp, int flags) 14995 { 14996 if (flags & PRUS_OOB) 14997 return (EOPNOTSUPP); 14998 return (0); 14999 } 15000 15001 struct tcp_function_block __tcp_bbr = { 15002 .tfb_tcp_block_name = __XSTRING(STACKNAME), 15003 .tfb_tcp_output = bbr_output, 15004 .tfb_do_queued_segments = ctf_do_queued_segments, 15005 .tfb_do_segment_nounlock = bbr_do_segment_nounlock, 15006 .tfb_tcp_do_segment = bbr_do_segment, 15007 .tfb_tcp_ctloutput = bbr_ctloutput, 15008 .tfb_tcp_fb_init = bbr_init, 15009 .tfb_tcp_fb_fini = bbr_fini, 15010 .tfb_tcp_timer_stop_all = bbr_stopall, 15011 .tfb_tcp_timer_activate = bbr_timer_activate, 15012 .tfb_tcp_timer_active = bbr_timer_active, 15013 .tfb_tcp_timer_stop = bbr_timer_stop, 15014 .tfb_tcp_rexmit_tmr = bbr_remxt_tmr, 15015 .tfb_tcp_handoff_ok = bbr_handoff_ok, 15016 .tfb_tcp_mtu_chg = bbr_mtu_chg, 15017 .tfb_pru_options = bbr_pru_options, 15018 }; 15019 15020 static const char *bbr_stack_names[] = { 15021 __XSTRING(STACKNAME), 15022 #ifdef STACKALIAS 15023 __XSTRING(STACKALIAS), 15024 #endif 15025 }; 15026 15027 static bool bbr_mod_inited = false; 15028 15029 static int 15030 tcp_addbbr(module_t mod, int32_t type, void *data) 15031 { 15032 int32_t err = 0; 15033 int num_stacks; 15034 15035 switch (type) { 15036 case MOD_LOAD: 15037 printf("Attempting to load " __XSTRING(MODNAME) "\n"); 15038 bbr_zone = uma_zcreate(__XSTRING(MODNAME) "_map", 15039 sizeof(struct bbr_sendmap), 15040 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 15041 bbr_pcb_zone = uma_zcreate(__XSTRING(MODNAME) "_pcb", 15042 sizeof(struct tcp_bbr), 15043 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 15044 sysctl_ctx_init(&bbr_sysctl_ctx); 15045 bbr_sysctl_root = SYSCTL_ADD_NODE(&bbr_sysctl_ctx, 15046 SYSCTL_STATIC_CHILDREN(_net_inet_tcp), 15047 OID_AUTO, 15048 #ifdef STACKALIAS 15049 __XSTRING(STACKALIAS), 15050 #else 15051 __XSTRING(STACKNAME), 15052 #endif 15053 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 15054 ""); 15055 if (bbr_sysctl_root == NULL) { 15056 printf("Failed to add sysctl node\n"); 15057 err = EFAULT; 15058 goto free_uma; 15059 } 15060 bbr_init_sysctls(); 15061 num_stacks = nitems(bbr_stack_names); 15062 err = register_tcp_functions_as_names(&__tcp_bbr, M_WAITOK, 15063 bbr_stack_names, &num_stacks); 15064 if (err) { 15065 printf("Failed to register %s stack name for " 15066 "%s module\n", bbr_stack_names[num_stacks], 15067 __XSTRING(MODNAME)); 15068 sysctl_ctx_free(&bbr_sysctl_ctx); 15069 free_uma: 15070 uma_zdestroy(bbr_zone); 15071 uma_zdestroy(bbr_pcb_zone); 15072 bbr_counter_destroy(); 15073 printf("Failed to register " __XSTRING(MODNAME) 15074 " module err:%d\n", err); 15075 return (err); 15076 } 15077 tcp_lro_reg_mbufq(); 15078 bbr_mod_inited = true; 15079 printf(__XSTRING(MODNAME) " is now available\n"); 15080 break; 15081 case MOD_QUIESCE: 15082 err = deregister_tcp_functions(&__tcp_bbr, true, false); 15083 break; 15084 case MOD_UNLOAD: 15085 err = deregister_tcp_functions(&__tcp_bbr, false, true); 15086 if (err == EBUSY) 15087 break; 15088 if (bbr_mod_inited) { 15089 uma_zdestroy(bbr_zone); 15090 uma_zdestroy(bbr_pcb_zone); 15091 sysctl_ctx_free(&bbr_sysctl_ctx); 15092 bbr_counter_destroy(); 15093 printf(__XSTRING(MODNAME) 15094 " is now no longer available\n"); 15095 bbr_mod_inited = false; 15096 } 15097 tcp_lro_dereg_mbufq(); 15098 err = 0; 15099 break; 15100 default: 15101 return (EOPNOTSUPP); 15102 } 15103 return (err); 15104 } 15105 15106 static moduledata_t tcp_bbr = { 15107 .name = __XSTRING(MODNAME), 15108 .evhand = tcp_addbbr, 15109 .priv = 0 15110 }; 15111 15112 MODULE_VERSION(MODNAME, 1); 15113 DECLARE_MODULE(MODNAME, tcp_bbr, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 15114 MODULE_DEPEND(MODNAME, tcphpts, 1, 1, 1); 15115