Lines Matching +full:event +full:- +full:touch +full:- +full:alt
1 /*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
161 * - Matt Mathis's Rate Halving which slowly drops
164 * - Yuchung Cheng's RACK TCP (for which its named) that
167 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
185 * TCP output is also over-written with a new version since it
190 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
193 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
194 * - 60 seconds */
198 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto ->…
215 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
251 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/c…
256 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
263 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
293 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
294 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
309 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
311 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top …
312 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bott…
331 * the way fill-cw interacts with timely and caps how much
332 * timely can boost the fill-cw b/w.
338 * probeRTT as well as fixed-rate-pacing.
430 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
441 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int lin…
601 tim = rack->r_ctl.lt_bw_time; in rack_get_lt_bw()
602 bytes = rack->r_ctl.lt_bw_bytes; in rack_get_lt_bw()
603 if (rack->lt_bw_up) { in rack_get_lt_bw()
606 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); in rack_get_lt_bw()
607 tim += (tcp_tv_to_lusectick(&tv) - rack->r_ctl.lt_timemark); in rack_get_lt_bw()
625 tp = rack->rc_tp; in rack_swap_beta_values()
626 if (tp->t_cc == NULL) { in rack_swap_beta_values()
630 rack->rc_pacing_cc_set = 1; in rack_swap_beta_values()
631 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_swap_beta_values()
632 /* Not new-reno we can't play games with beta! */ in rack_swap_beta_values()
637 if (CC_ALGO(tp)->ctl_output == NULL) { in rack_swap_beta_values()
638 /* Huh, not using new-reno so no swaps.? */ in rack_swap_beta_values()
646 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
653 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
663 opt.val = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
664 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
670 opt.val = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
671 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
677 rack->r_ctl.rc_saved_beta = old_beta; in rack_swap_beta_values()
678 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; in rack_swap_beta_values()
680 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_swap_beta_values()
685 ptr = ((struct newreno *)tp->t_ccv.cc_data); in rack_swap_beta_values()
688 log.u_bbr.flex1 = ptr->beta; in rack_swap_beta_values()
689 log.u_bbr.flex2 = ptr->beta_ecn; in rack_swap_beta_values()
690 log.u_bbr.flex3 = ptr->newreno_flags; in rack_swap_beta_values()
691 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
692 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
694 log.u_bbr.flex7 = rack->gp_ready; in rack_swap_beta_values()
696 log.u_bbr.flex7 |= rack->use_fixed_rate; in rack_swap_beta_values()
698 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; in rack_swap_beta_values()
699 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_swap_beta_values()
709 if (rack->rc_pacing_cc_set) in rack_set_cc_pacing()
715 rack->rc_pacing_cc_set = 1; in rack_set_cc_pacing()
722 if (rack->rc_pacing_cc_set == 0) in rack_undo_cc_pacing()
728 rack->rc_pacing_cc_set = 0; in rack_undo_cc_pacing()
735 if (rack->rc_pacing_cc_set) in rack_remove_pacing()
737 if (rack->r_ctl.pacing_method & RACK_REG_PACING) in rack_remove_pacing()
739 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) in rack_remove_pacing()
741 rack->rc_always_pace = 0; in rack_remove_pacing()
742 rack->r_ctl.pacing_method = RACK_PACING_NONE; in rack_remove_pacing()
743 rack->dgp_on = 0; in rack_remove_pacing()
744 rack->rc_hybrid_mode = 0; in rack_remove_pacing()
745 rack->use_fixed_rate = 0; in rack_remove_pacing()
752 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { in rack_log_gpset()
758 log.u_bbr.flex2 = rack->rc_tp->gput_seq; in rack_log_gpset()
760 log.u_bbr.flex4 = rack->rc_tp->gput_ts; in rack_log_gpset()
762 log.u_bbr.flex6 = rack->rc_tp->gput_ack; in rack_log_gpset()
765 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; in rack_log_gpset()
766 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; in rack_log_gpset()
768 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; in rack_log_gpset()
769 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; in rack_log_gpset()
770 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_gpset()
771 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_gpset()
773 log.u_bbr.applimited = rsm->r_start; in rack_log_gpset()
774 log.u_bbr.delivered = rsm->r_end; in rack_log_gpset()
775 log.u_bbr.epoch = rsm->r_flags; in rack_log_gpset()
778 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gpset()
779 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gpset()
780 &rack->rc_inp->inp_socket->so_snd, in rack_log_gpset()
793 if (error || req->newptr == NULL) in sysctl_rack_clear()
916 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); in rack_init_sysctls()
921 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); in rack_init_sysctls()
966 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); in rack_init_sysctls()
971 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); in rack_init_sysctls()
996 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); in rack_init_sysctls()
1006 "Do we clear I/S counts on exiting probe-rtt"); in rack_init_sysctls()
1016 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); in rack_init_sysctls()
1190 "If we fall below this rate, dis-engage hw pacing?"); in rack_init_sysctls()
1331 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); in rack_init_sysctls()
1351 "Should we always send the oldest TLP and RACK-TLP"); in rack_init_sysctls()
1389 "When doing recovery -> rto -> recovery do we reset SSthresh?"); in rack_init_sysctls()
1424 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); in rack_init_sysctls()
1429 "Maximum RTO in microseconds -- should be at least as large as min_rto"); in rack_init_sysctls()
1451 "Does a cwnd just-return end the measurement window (app limited)"); in rack_init_sysctls()
1456 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); in rack_init_sysctls()
1513 "Should RACK use mbuf queuing for non-paced connections"); in rack_init_sysctls()
1557 … "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); in rack_init_sysctls()
1641 "Highest move to non-move ratio seen"); in rack_init_sysctls()
1782 "Total number of times a sends returned enobuf for non-hdwr paced connections"); in rack_init_sysctls()
1979 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); in rc_init_window()
1986 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) in rack_get_fixed_pacing_bw()
1987 return (rack->r_ctl.rc_fixed_pacing_rate_rec); in rack_get_fixed_pacing_bw()
1988 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_fixed_pacing_bw()
1989 return (rack->r_ctl.rc_fixed_pacing_rate_ss); in rack_get_fixed_pacing_bw()
1991 return (rack->r_ctl.rc_fixed_pacing_rate_ca); in rack_get_fixed_pacing_bw()
2013 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2021 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
2023 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); in rack_log_hybrid_bw()
2048 cur = rack->r_ctl.rc_last_sft; in rack_log_hybrid_bw()
2050 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) in rack_log_hybrid_bw()
2051 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_hybrid_bw()
2053 /* Use the last known rtt i.e. the rack-rtt */ in rack_log_hybrid_bw()
2054 log.u_bbr.inflight = rack->rc_rack_rtt; in rack_log_hybrid_bw()
2059 log.u_bbr.cur_del_rate = cur->deadline; in rack_log_hybrid_bw()
2062 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2063 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2064 log.u_bbr.flex6 = cur->start_seq; in rack_log_hybrid_bw()
2065 log.u_bbr.pkts_out = cur->end_seq; in rack_log_hybrid_bw()
2068 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
2069 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2071 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_bw()
2072 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2075 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); in rack_log_hybrid_bw()
2076 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2078 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_bw()
2079 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
2081 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_bw()
2085 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); in rack_log_hybrid_bw()
2086 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); in rack_log_hybrid_bw()
2087 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; in rack_log_hybrid_bw()
2099 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_bw()
2101 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_bw()
2103 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_bw()
2105 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_bw()
2107 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_bw()
2108 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_bw()
2109 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_bw()
2121 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { in rack_log_hybrid_sends()
2130 log.u_bbr.delRate = cur->sent_at_fs; in rack_log_hybrid_sends()
2132 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { in rack_log_hybrid_sends()
2138 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hybrid_sends()
2139 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hybrid_sends()
2145 log.u_bbr.cur_del_rate = cur->sent_at_ls; in rack_log_hybrid_sends()
2146 log.u_bbr.rttProp = cur->rxt_at_ls; in rack_log_hybrid_sends()
2148 log.u_bbr.bw_inuse = cur->rxt_at_fs; in rack_log_hybrid_sends()
2150 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_sends()
2153 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_sends()
2154 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2156 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_sends()
2157 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2160 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_sends()
2161 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2163 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); in rack_log_hybrid_sends()
2164 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2166 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid_sends()
2167 log.u_bbr.lost = cur->playout_ms; in rack_log_hybrid_sends()
2168 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid_sends()
2171 * where a false retransmit occurred so first_send <-> lastsend may in rack_log_hybrid_sends()
2174 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); in rack_log_hybrid_sends()
2175 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2183 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_sends()
2185 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_sends()
2187 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_sends()
2189 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_sends()
2192 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_sends()
2193 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_sends()
2194 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_sends()
2207 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); in rack_compensate_for_linerate()
2208 if (rack->r_is_v6){ in rack_compensate_for_linerate()
2219 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); in rack_compensate_for_linerate()
2234 if (rack->r_ctl.bw_rate_cap == 0) in rack_rate_cap_bw()
2237 if (rack->rc_catch_up && rack->rc_hybrid_mode && in rack_rate_cap_bw()
2238 (rack->r_ctl.rc_last_sft != NULL)) { in rack_rate_cap_bw()
2246 ent = rack->r_ctl.rc_last_sft; in rack_rate_cap_bw()
2249 if (timenow >= ent->deadline) { in rack_rate_cap_bw()
2251 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2253 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2257 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; in rack_rate_cap_bw()
2260 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2262 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2271 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_rate_cap_bw()
2272 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) in rack_rate_cap_bw()
2273 lenleft = ent->end_seq - rack->rc_tp->snd_una; in rack_rate_cap_bw()
2276 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2278 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2287 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) in rack_rate_cap_bw()
2288 lengone = rack->rc_tp->snd_una - ent->start_seq; in rack_rate_cap_bw()
2291 if (lengone < (ent->end - ent->start)) in rack_rate_cap_bw()
2292 lenleft = (ent->end - ent->start) - lengone; in rack_rate_cap_bw()
2295 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2297 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2303 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2305 if (rack->r_ctl.bw_rate_cap) in rack_rate_cap_bw()
2315 rack->r_ctl.bw_rate_cap = calcbw; in rack_rate_cap_bw()
2316 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2318 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2319 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2322 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2323 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2324 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); in rack_rate_cap_bw()
2325 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2327 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2330 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2331 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); in rack_rate_cap_bw()
2339 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { in rack_rate_cap_bw()
2341 if (rack->rc_hybrid_mode && in rack_rate_cap_bw()
2342 rack->rc_catch_up && in rack_rate_cap_bw()
2343 (rack->r_ctl.rc_last_sft != NULL) && in rack_rate_cap_bw()
2344 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2346 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2347 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2350 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2351 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2352 …rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg… in rack_rate_cap_bw()
2353 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2357 *bw = rack->r_ctl.bw_rate_cap; in rack_rate_cap_bw()
2358 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2369 if (rack->rc_gp_filled == 0) { in rack_get_gp_est()
2383 if (rack->dis_lt_bw == 1) in rack_get_gp_est()
2389 * No goodput bw but a long-term b/w does exist in rack_get_gp_est()
2395 if (rack->r_ctl.init_rate) in rack_get_gp_est()
2396 return (rack->r_ctl.init_rate); in rack_get_gp_est()
2399 if (rack->rc_tp->t_srtt == 0) { in rack_get_gp_est()
2407 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); in rack_get_gp_est()
2408 srtt = (uint64_t)rack->rc_tp->t_srtt; in rack_get_gp_est()
2415 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_get_gp_est()
2417 bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2420 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); in rack_get_gp_est()
2422 if (rack->dis_lt_bw) { in rack_get_gp_est()
2423 /* We are not using lt-bw */ in rack_get_gp_est()
2430 lt_bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2432 if (rack->use_lesser_lt_bw) { in rack_get_gp_est()
2464 if (rack->use_fixed_rate) { in rack_get_bw()
2475 if (rack->use_fixed_rate) { in rack_get_output_gain()
2477 } else if (rack->in_probe_rtt && (rsm == NULL)) in rack_get_output_gain()
2478 return (rack->r_ctl.rack_per_of_gp_probertt); in rack_get_output_gain()
2479 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_get_output_gain()
2480 rack->r_ctl.rack_per_of_gp_rec)) { in rack_get_output_gain()
2483 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2484 } else if (rack->rack_rec_nonrxt_use_cr) { in rack_get_output_gain()
2487 } else if (rack->rack_no_prr && in rack_get_output_gain()
2488 (rack->r_ctl.rack_per_of_gp_rec > 100)) { in rack_get_output_gain()
2493 * Here we may have a non-retransmit but we in rack_get_output_gain()
2497 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2502 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_output_gain()
2503 return (rack->r_ctl.rack_per_of_gp_ss); in rack_get_output_gain()
2505 return (rack->r_ctl.rack_per_of_gp_ca); in rack_get_output_gain()
2513 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. in rack_log_dsack_event()
2520 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_dsack_event()
2525 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; in rack_log_dsack_event()
2527 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; in rack_log_dsack_event()
2529 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; in rack_log_dsack_event()
2530 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; in rack_log_dsack_event()
2531 log.u_bbr.flex3 = rack->r_ctl.num_dsack; in rack_log_dsack_event()
2535 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; in rack_log_dsack_event()
2538 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_dsack_event()
2539 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_dsack_event()
2540 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_dsack_event()
2541 &rack->rc_inp->inp_socket->so_rcv, in rack_log_dsack_event()
2542 &rack->rc_inp->inp_socket->so_snd, in rack_log_dsack_event()
2553 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_hdwr_pacing()
2562 if (rack->r_ctl.crte) { in rack_log_hdwr_pacing()
2563 ifp = rack->r_ctl.crte->ptbl->rs_ifp; in rack_log_hdwr_pacing()
2564 } else if (rack->rc_inp->inp_route.ro_nh && in rack_log_hdwr_pacing()
2565 rack->rc_inp->inp_route.ro_nh->nh_ifp) { in rack_log_hdwr_pacing()
2566 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; in rack_log_hdwr_pacing()
2579 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; in rack_log_hdwr_pacing()
2580 log.u_bbr.flex8 = rack->use_fixed_rate; in rack_log_hdwr_pacing()
2582 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; in rack_log_hdwr_pacing()
2583 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_log_hdwr_pacing()
2584 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; in rack_log_hdwr_pacing()
2585 if (rack->r_ctl.crte) in rack_log_hdwr_pacing()
2586 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; in rack_log_hdwr_pacing()
2589 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; in rack_log_hdwr_pacing()
2590 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_hdwr_pacing()
2591 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_hdwr_pacing()
2592 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hdwr_pacing()
2593 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hdwr_pacing()
2594 &rack->rc_inp->inp_socket->so_snd, in rack_log_hdwr_pacing()
2615 if (rack->r_rack_hw_rate_caps) { in rack_get_output_bw()
2617 if (rack->r_ctl.crte != NULL) { in rack_get_output_bw()
2619 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in rack_get_output_bw()
2624 rack->r_rack_hw_rate_caps = 0; in rack_get_output_bw()
2634 } else if ((rack->rack_hdrw_pacing == 0) && in rack_get_output_bw()
2635 (rack->rack_hdw_pace_ena) && in rack_get_output_bw()
2636 (rack->rack_attempt_hdwr_pace == 0) && in rack_get_output_bw()
2637 (rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_output_bw()
2638 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_output_bw()
2646 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in rack_get_output_bw()
2664 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_retran_reason()
2671 * 1 - We are retransmitting and this tells the reason. in rack_log_retran_reason()
2672 * 2 - We are clearing a dup-ack count. in rack_log_retran_reason()
2673 * 3 - We are incrementing a dup-ack count. in rack_log_retran_reason()
2683 log.u_bbr.flex3 = rsm->r_flags; in rack_log_retran_reason()
2684 log.u_bbr.flex4 = rsm->r_dupack; in rack_log_retran_reason()
2685 log.u_bbr.flex5 = rsm->r_start; in rack_log_retran_reason()
2686 log.u_bbr.flex6 = rsm->r_end; in rack_log_retran_reason()
2688 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_retran_reason()
2690 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_retran_reason()
2691 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_retran_reason()
2692 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_retran_reason()
2693 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_retran_reason()
2694 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_retran_reason()
2695 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_retran_reason()
2696 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_retran_reason()
2697 &rack->rc_inp->inp_socket->so_rcv, in rack_log_retran_reason()
2698 &rack->rc_inp->inp_socket->so_snd, in rack_log_retran_reason()
2707 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_start()
2712 log.u_bbr.flex1 = rack->rc_tp->t_srtt; in rack_log_to_start()
2714 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; in rack_log_to_start()
2716 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; in rack_log_to_start()
2717 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_start()
2718 log.u_bbr.flex7 = rack->rc_in_persist; in rack_log_to_start()
2720 if (rack->rack_no_prr) in rack_log_to_start()
2723 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_start()
2724 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_start()
2726 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_start()
2727 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_start()
2728 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_start()
2729 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_start()
2730 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; in rack_log_to_start()
2731 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; in rack_log_to_start()
2732 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; in rack_log_to_start()
2734 log.u_bbr.epoch = rack->r_ctl.roundends; in rack_log_to_start()
2735 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_start()
2737 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_start()
2738 log.u_bbr.applimited = rack->rc_tp->t_flags2; in rack_log_to_start()
2739 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_start()
2740 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_start()
2741 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_start()
2750 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_event()
2755 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_event()
2757 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; in rack_log_to_event()
2758 log.u_bbr.flex2 = rack->rc_rack_rtt; in rack_log_to_event()
2762 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; in rack_log_to_event()
2763 if (rack->rack_no_prr) in rack_log_to_event()
2766 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_event()
2768 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_event()
2769 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_event()
2770 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_event()
2771 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_event()
2772 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_event()
2774 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_event()
2775 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_event()
2776 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_event()
2777 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_event()
2790 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_map_chg()
2796 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_map_chg()
2802 log.u_bbr.flex1 = prev->r_start; in rack_log_map_chg()
2803 log.u_bbr.flex2 = prev->r_end; in rack_log_map_chg()
2807 log.u_bbr.flex3 = rsm->r_start; in rack_log_map_chg()
2808 log.u_bbr.flex4 = rsm->r_end; in rack_log_map_chg()
2812 log.u_bbr.flex5 = next->r_start; in rack_log_map_chg()
2813 log.u_bbr.flex6 = next->r_end; in rack_log_map_chg()
2819 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_map_chg()
2820 if (rack->rack_no_prr) in rack_log_map_chg()
2823 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; in rack_log_map_chg()
2824 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_map_chg()
2826 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_map_chg()
2827 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_map_chg()
2828 &rack->rc_inp->inp_socket->so_rcv, in rack_log_map_chg()
2829 &rack->rc_inp->inp_socket->so_snd, in rack_log_map_chg()
2843 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_rtt_upd()
2846 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; in rack_log_rtt_upd()
2847 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; in rack_log_rtt_upd()
2848 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; in rack_log_rtt_upd()
2849 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2851 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; in rack_log_rtt_upd()
2852 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; in rack_log_rtt_upd()
2854 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2855 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; in rack_log_rtt_upd()
2856 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_upd()
2858 log.u_bbr.pkt_epoch = rsm->r_start; in rack_log_rtt_upd()
2859 log.u_bbr.lost = rsm->r_end; in rack_log_rtt_upd()
2860 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; in rack_log_rtt_upd()
2862 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; in rack_log_rtt_upd()
2865 log.u_bbr.pkt_epoch = rack->rc_tp->iss; in rack_log_rtt_upd()
2871 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; in rack_log_rtt_upd()
2873 log.u_bbr.use_lt_bw |= rack->forced_ack; in rack_log_rtt_upd()
2875 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; in rack_log_rtt_upd()
2877 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_rtt_upd()
2879 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_rtt_upd()
2881 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_rtt_upd()
2883 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_rtt_upd()
2885 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; in rack_log_rtt_upd()
2886 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_upd()
2887 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_upd()
2888 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_upd()
2889 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_upd()
2890 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_upd()
2891 log.u_bbr.bw_inuse = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_log_rtt_upd()
2894 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); in rack_log_rtt_upd()
2896 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_upd()
2897 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_upd()
2913 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample()
2920 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_rtt_sample()
2923 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_sample()
2924 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_rtt_sample()
2925 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_rtt_sample()
2926 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_rtt_sample()
2933 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; in rack_log_rtt_sample()
2935 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_rtt_sample()
2939 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; in rack_log_rtt_sample()
2942 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); in rack_log_rtt_sample()
2943 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; in rack_log_rtt_sample()
2945 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; in rack_log_rtt_sample()
2946 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample()
2947 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample()
2948 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample()
2957 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample_calc()
2969 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sample_calc()
2971 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sample_calc()
2972 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample_calc()
2973 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample_calc()
2974 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample_calc()
2984 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sendmap()
2996 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sendmap()
2998 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sendmap()
2999 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sendmap()
3000 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sendmap()
3001 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sendmap()
3009 rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick, int event, int lin… in rack_log_progress_event() argument
3011 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_progress_event()
3016 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_progress_event()
3019 log.u_bbr.flex3 = tp->t_maxunacktime; in rack_log_progress_event()
3020 log.u_bbr.flex4 = tp->t_acktime; in rack_log_progress_event()
3021 log.u_bbr.flex8 = event; in rack_log_progress_event()
3023 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_progress_event()
3024 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_progress_event()
3025 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_progress_event()
3026 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_progress_event()
3027 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_progress_event()
3029 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_progress_event()
3031 &rack->rc_inp->inp_socket->so_rcv, in rack_log_progress_event()
3032 &rack->rc_inp->inp_socket->so_snd, in rack_log_progress_event()
3041 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_bbrsnd()
3045 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_bbrsnd()
3047 if (rack->rack_no_prr) in rack_log_type_bbrsnd()
3050 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_bbrsnd()
3051 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_type_bbrsnd()
3053 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); in rack_log_type_bbrsnd()
3054 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_bbrsnd()
3056 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_bbrsnd()
3057 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_bbrsnd()
3058 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_bbrsnd()
3059 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_bbrsnd()
3060 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_bbrsnd()
3061 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_bbrsnd()
3062 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_bbrsnd()
3071 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_doseg_done()
3079 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_doseg_done()
3080 if (rack->rack_no_prr) in rack_log_doseg_done()
3083 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_doseg_done()
3085 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; in rack_log_doseg_done()
3086 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ in rack_log_doseg_done()
3088 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ in rack_log_doseg_done()
3090 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ in rack_log_doseg_done()
3091 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_doseg_done()
3092 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_doseg_done()
3094 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_doseg_done()
3095 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_doseg_done()
3097 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_doseg_done()
3098 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_doseg_done()
3099 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_doseg_done()
3100 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_doseg_done()
3101 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_doseg_done()
3103 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_doseg_done()
3104 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; in rack_log_doseg_done()
3105 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; in rack_log_doseg_done()
3106 log.u_bbr.lost = rack->rc_tp->t_srtt; in rack_log_doseg_done()
3107 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; in rack_log_doseg_done()
3108 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_doseg_done()
3109 &rack->rc_inp->inp_socket->so_rcv, in rack_log_doseg_done()
3110 &rack->rc_inp->inp_socket->so_snd, in rack_log_doseg_done()
3119 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_pacing_sizes()
3124 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; in rack_log_type_pacing_sizes()
3125 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_log_type_pacing_sizes()
3128 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; in rack_log_type_pacing_sizes()
3132 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_pacing_sizes()
3133 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_pacing_sizes()
3134 log.u_bbr.applimited = rack->r_ctl.rc_sacked; in rack_log_type_pacing_sizes()
3135 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_pacing_sizes()
3136 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_pacing_sizes()
3137 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, in rack_log_type_pacing_sizes()
3138 &tptosocket(tp)->so_snd, in rack_log_type_pacing_sizes()
3147 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_just_return()
3152 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_just_return()
3154 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; in rack_log_type_just_return()
3156 if (rack->rack_no_prr) in rack_log_type_just_return()
3159 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_just_return()
3161 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_just_return()
3164 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_just_return()
3165 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_just_return()
3166 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_just_return()
3167 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_just_return()
3168 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; in rack_log_type_just_return()
3169 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_type_just_return()
3171 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_type_just_return()
3172 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_just_return()
3173 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_just_return()
3174 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_just_return()
3184 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_cancel()
3188 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_cancel()
3190 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; in rack_log_to_cancel()
3193 if (rack->rack_no_prr) in rack_log_to_cancel()
3196 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_cancel()
3197 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_cancel()
3200 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; in rack_log_to_cancel()
3202 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_cancel()
3203 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_cancel()
3204 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_cancel()
3205 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_cancel()
3206 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_cancel()
3208 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_cancel()
3209 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_cancel()
3210 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_cancel()
3211 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_cancel()
3224 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_alt_to_to_cancel()
3242 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_alt_to_to_cancel()
3243 &rack->rc_inp->inp_socket->so_rcv, in rack_log_alt_to_to_cancel()
3244 &rack->rc_inp->inp_socket->so_snd, in rack_log_alt_to_to_cancel()
3253 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_processing()
3260 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; in rack_log_to_processing()
3261 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_to_processing()
3263 if (rack->rack_no_prr) in rack_log_to_processing()
3266 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_processing()
3267 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_processing()
3268 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_processing()
3269 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_processing()
3271 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_processing()
3272 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_processing()
3273 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_processing()
3274 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_processing()
3283 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_prr()
3288 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; in rack_log_to_prr()
3289 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; in rack_log_to_prr()
3290 if (rack->rack_no_prr) in rack_log_to_prr()
3293 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_prr()
3294 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; in rack_log_to_prr()
3295 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; in rack_log_to_prr()
3296 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; in rack_log_to_prr()
3301 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_prr()
3302 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_to_prr()
3304 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_to_prr()
3305 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_prr()
3306 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_prr()
3307 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_prr()
3385 if (rack->rc_free_cnt > rack_free_cache) { in rack_alloc()
3386 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3387 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3389 rack->rc_free_cnt--; in rack_alloc()
3399 rack->r_ctl.rc_num_maps_alloced++; in rack_alloc()
3407 if (rack->rc_free_cnt) { in rack_alloc()
3409 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3410 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3411 rack->rc_free_cnt--; in rack_alloc()
3421 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_alloc_full_limit()
3423 if (!rack->alloc_limit_reported) { in rack_alloc_full_limit()
3424 rack->alloc_limit_reported = 1; in rack_alloc_full_limit()
3440 if (rack->r_ctl.rc_split_limit > 0 && in rack_alloc_limit()
3441 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { in rack_alloc_limit()
3443 if (!rack->alloc_limit_reported) { in rack_alloc_limit()
3444 rack->alloc_limit_reported = 1; in rack_alloc_limit()
3454 rsm->r_limit_type = limit_type; in rack_alloc_limit()
3455 rack->r_ctl.rc_num_split_allocs++; in rack_alloc_limit()
3469 while (rack->rc_free_cnt > rack_free_cache) { in rack_free_trim()
3470 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); in rack_free_trim()
3471 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free_trim()
3472 rack->rc_free_cnt--; in rack_free_trim()
3473 rack->r_ctl.rc_num_maps_alloced--; in rack_free_trim()
3481 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_free()
3482 KASSERT((rack->r_ctl.rc_app_limited_cnt > 0), in rack_free()
3483 ("app_cnt %u, rsm %p", rack->r_ctl.rc_app_limited_cnt, rsm)); in rack_free()
3484 rack->r_ctl.rc_app_limited_cnt--; in rack_free()
3486 if (rsm->r_limit_type) { in rack_free()
3488 rack->r_ctl.rc_num_split_allocs--; in rack_free()
3490 if (rsm == rack->r_ctl.rc_first_appl) { in rack_free()
3491 rack->r_ctl.cleared_app_ack_seq = rsm->r_end; in rack_free()
3492 rack->r_ctl.cleared_app_ack = 1; in rack_free()
3493 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_free()
3494 rack->r_ctl.rc_first_appl = NULL; in rack_free()
3496 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); in rack_free()
3498 if (rsm == rack->r_ctl.rc_resend) in rack_free()
3499 rack->r_ctl.rc_resend = NULL; in rack_free()
3500 if (rsm == rack->r_ctl.rc_end_appl) in rack_free()
3501 rack->r_ctl.rc_end_appl = NULL; in rack_free()
3502 if (rack->r_ctl.rc_tlpsend == rsm) in rack_free()
3503 rack->r_ctl.rc_tlpsend = NULL; in rack_free()
3504 if (rack->r_ctl.rc_sacklast == rsm) in rack_free()
3505 rack->r_ctl.rc_sacklast = NULL; in rack_free()
3508 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { in rack_free()
3511 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free()
3512 rack->rc_free_cnt++; in rack_free()
3521 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_get_measure_window()
3523 if (rack->rc_gp_filled == 0) { in rack_get_measure_window()
3559 srtt = (uint64_t)tp->t_srtt; in rack_get_measure_window()
3601 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_enough_for_measurement()
3605 if ((tp->snd_max == tp->snd_una) || in rack_enough_for_measurement()
3606 (th_ack == tp->snd_max)){ in rack_enough_for_measurement()
3620 if (SEQ_GEQ(th_ack, tp->gput_ack)) { in rack_enough_for_measurement()
3630 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_enough_for_measurement()
3631 if (SEQ_LT(th_ack, tp->gput_ack) && in rack_enough_for_measurement()
3632 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_enough_for_measurement()
3636 if (rack->r_ctl.rc_first_appl && in rack_enough_for_measurement()
3637 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { in rack_enough_for_measurement()
3646 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); in rack_enough_for_measurement()
3647 tim = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - tp->gput_ts; in rack_enough_for_measurement()
3648 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_enough_for_measurement()
3667 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_timely()
3673 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; in rack_log_timely()
3675 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; in rack_log_timely()
3677 log.u_bbr.flex2 |= rack->rc_gp_incr; in rack_log_timely()
3679 log.u_bbr.flex2 |= rack->rc_gp_bwred; in rack_log_timely()
3680 log.u_bbr.flex3 = rack->rc_gp_incr; in rack_log_timely()
3681 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_timely()
3682 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_timely()
3683 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; in rack_log_timely()
3684 log.u_bbr.flex7 = rack->rc_gp_bwred; in rack_log_timely()
3691 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_timely()
3693 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_timely()
3694 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_timely()
3695 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_timely()
3696 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; in rack_log_timely()
3698 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; in rack_log_timely()
3700 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_timely()
3702 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_timely()
3703 log.u_bbr.lost = rack->r_ctl.rc_loss_count; in rack_log_timely()
3704 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_timely()
3705 &rack->rc_inp->inp_socket->so_rcv, in rack_log_timely()
3706 &rack->rc_inp->inp_socket->so_snd, in rack_log_timely()
3793 if (rack->r_ctl.rack_per_of_gp_rec < 100) { in rack_validate_multipliers_at_or_above100()
3794 /* This is unlikely we usually do not touch recovery */ in rack_validate_multipliers_at_or_above100()
3795 rack->r_ctl.rack_per_of_gp_rec = 100; in rack_validate_multipliers_at_or_above100()
3797 if (rack->r_ctl.rack_per_of_gp_ca < 100) { in rack_validate_multipliers_at_or_above100()
3798 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_above100()
3800 if (rack->r_ctl.rack_per_of_gp_ss < 100) { in rack_validate_multipliers_at_or_above100()
3801 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_above100()
3808 if (rack->r_ctl.rack_per_of_gp_ca > 100) { in rack_validate_multipliers_at_or_below_100()
3809 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_below_100()
3811 if (rack->r_ctl.rack_per_of_gp_ss > 100) { in rack_validate_multipliers_at_or_below_100()
3812 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_below_100()
3823 if (rack->rc_skip_timely) in rack_increase_bw_mul()
3830 * to a new-reno flow. in rack_increase_bw_mul()
3835 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3836 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3843 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3848 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) in rack_increase_bw_mul()
3850 if (rack->rc_gp_saw_rec && in rack_increase_bw_mul()
3851 (rack->rc_gp_no_rec_chg == 0) && in rack_increase_bw_mul()
3853 rack->r_ctl.rack_per_of_gp_rec)) { in rack_increase_bw_mul()
3855 calc = rack->r_ctl.rack_per_of_gp_rec + plus; in rack_increase_bw_mul()
3859 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; in rack_increase_bw_mul()
3860 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3861 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3862 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3863 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3865 if (rack->rc_gp_saw_ca && in rack_increase_bw_mul()
3866 (rack->rc_gp_saw_ss == 0) && in rack_increase_bw_mul()
3868 rack->r_ctl.rack_per_of_gp_ca)) { in rack_increase_bw_mul()
3870 calc = rack->r_ctl.rack_per_of_gp_ca + plus; in rack_increase_bw_mul()
3874 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; in rack_increase_bw_mul()
3875 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3876 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3877 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3878 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3880 if (rack->rc_gp_saw_ss && in rack_increase_bw_mul()
3882 rack->r_ctl.rack_per_of_gp_ss)) { in rack_increase_bw_mul()
3884 calc = rack->r_ctl.rack_per_of_gp_ss + plus; in rack_increase_bw_mul()
3887 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; in rack_increase_bw_mul()
3888 if (rack->r_ctl.rack_per_upper_bound_ss && in rack_increase_bw_mul()
3889 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3890 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) in rack_increase_bw_mul()
3891 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; in rack_increase_bw_mul()
3895 (rack->rc_gp_incr == 0)){ in rack_increase_bw_mul()
3897 rack->rc_gp_incr = 1; in rack_increase_bw_mul()
3898 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3900 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3902 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3903 rack->rc_gp_timely_inc_cnt++; in rack_increase_bw_mul()
3912 /*- in rack_get_decrease()
3914 * new_per = curper * (1 - B * norm_grad) in rack_get_decrease()
3917 * rtt_dif = input var current rtt-diff in rack_get_decrease()
3930 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_get_decrease()
3933 * reduce_by = (1000000 - inverse); in rack_get_decrease()
3939 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_get_decrease()
3942 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ in rack_get_decrease()
3947 perf = curper - 1; in rack_get_decrease()
3957 * result = curper * (1 - (B * ( 1 - ------ )) in rack_decrease_highrtt()
3966 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_decrease_highrtt()
3968 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_decrease_highrtt()
3969 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - in rack_decrease_highrtt()
3972 if (tcp_bblogging_on(rack->rc_tp)) { in rack_decrease_highrtt()
3993 uint32_t logged, new_per, ss_red, ca_red, rec_red, alt, val; in rack_decrease_bw_mul() local
3995 if (rack->rc_skip_timely) in rack_decrease_bw_mul()
3997 if (rack->rc_gp_incr) { in rack_decrease_bw_mul()
3999 rack->rc_gp_incr = 0; in rack_decrease_bw_mul()
4000 rack->rc_gp_timely_inc_cnt = 0; in rack_decrease_bw_mul()
4006 rtt_diff *= -1; in rack_decrease_bw_mul()
4009 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { in rack_decrease_bw_mul()
4012 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); in rack_decrease_bw_mul()
4013 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4014 if (alt < new_per) in rack_decrease_bw_mul()
4015 val = alt; in rack_decrease_bw_mul()
4019 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
4020 if (rack->r_ctl.rack_per_of_gp_rec > val) { in rack_decrease_bw_mul()
4021 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); in rack_decrease_bw_mul()
4022 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; in rack_decrease_bw_mul()
4024 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4027 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) in rack_decrease_bw_mul()
4028 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
4031 if (rack->rc_gp_saw_ss) { in rack_decrease_bw_mul()
4034 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); in rack_decrease_bw_mul()
4035 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4036 if (alt < new_per) in rack_decrease_bw_mul()
4037 val = alt; in rack_decrease_bw_mul()
4041 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
4042 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { in rack_decrease_bw_mul()
4043 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; in rack_decrease_bw_mul()
4044 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; in rack_decrease_bw_mul()
4047 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4050 logvar |= alt; in rack_decrease_bw_mul()
4056 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4061 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) in rack_decrease_bw_mul()
4062 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
4064 } else if (rack->rc_gp_saw_ca) { in rack_decrease_bw_mul()
4067 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); in rack_decrease_bw_mul()
4068 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4069 if (alt < new_per) in rack_decrease_bw_mul()
4070 val = alt; in rack_decrease_bw_mul()
4074 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
4075 if (rack->r_ctl.rack_per_of_gp_ca > val) { in rack_decrease_bw_mul()
4076 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; in rack_decrease_bw_mul()
4077 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; in rack_decrease_bw_mul()
4079 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4083 logvar |= alt; in rack_decrease_bw_mul()
4089 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
4094 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) in rack_decrease_bw_mul()
4095 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
4098 if (rack->rc_gp_timely_dec_cnt < 0x7) { in rack_decrease_bw_mul()
4099 rack->rc_gp_timely_dec_cnt++; in rack_decrease_bw_mul()
4101 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) in rack_decrease_bw_mul()
4102 rack->rc_gp_timely_dec_cnt = 0; in rack_decrease_bw_mul()
4115 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_shrinks()
4121 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_shrinks()
4122 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4123 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_rtt_shrinks()
4125 log.u_bbr.flex6 = rack->rc_highly_buffered; in rack_log_rtt_shrinks()
4127 log.u_bbr.flex6 |= rack->forced_ack; in rack_log_rtt_shrinks()
4129 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; in rack_log_rtt_shrinks()
4131 log.u_bbr.flex6 |= rack->in_probe_rtt; in rack_log_rtt_shrinks()
4133 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; in rack_log_rtt_shrinks()
4134 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; in rack_log_rtt_shrinks()
4135 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; in rack_log_rtt_shrinks()
4136 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; in rack_log_rtt_shrinks()
4140 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; in rack_log_rtt_shrinks()
4142 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; in rack_log_rtt_shrinks()
4143 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_shrinks()
4144 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_rtt_shrinks()
4145 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_shrinks()
4146 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_shrinks()
4147 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_rtt_shrinks()
4148 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4149 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_shrinks()
4150 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_log_rtt_shrinks()
4153 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; in rack_log_rtt_shrinks()
4154 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_shrinks()
4155 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_shrinks()
4156 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_shrinks()
4158 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_rtt_shrinks()
4170 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); in rack_set_prtt_target()
4171 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { in rack_set_prtt_target()
4177 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); in rack_set_prtt_target()
4198 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_enter_probertt()
4199 if (rack->rc_gp_dyn_mul == 0) in rack_enter_probertt()
4202 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { in rack_enter_probertt()
4206 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_enter_probertt()
4207 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_enter_probertt()
4215 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_enter_probertt()
4216 rack->rc_tp->snd_una, __LINE__, in rack_enter_probertt()
4219 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_enter_probertt()
4220 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_enter_probertt()
4221 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_enter_probertt()
4222 rack->r_ctl.rc_pace_min_segs); in rack_enter_probertt()
4223 rack->in_probe_rtt = 1; in rack_enter_probertt()
4224 rack->measure_saw_probe_rtt = 1; in rack_enter_probertt()
4225 rack->r_ctl.rc_time_probertt_starts = 0; in rack_enter_probertt()
4226 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; in rack_enter_probertt()
4228 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_enter_probertt()
4230 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); in rack_enter_probertt()
4231 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_enter_probertt()
4241 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_exit_probertt()
4242 rack->r_ctl.rc_pace_min_segs); in rack_exit_probertt()
4243 rack->in_probe_rtt = 0; in rack_exit_probertt()
4244 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_exit_probertt()
4245 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_exit_probertt()
4253 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_exit_probertt()
4254 rack->rc_tp->snd_una, __LINE__, in rack_exit_probertt()
4256 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in rack_exit_probertt()
4260 * probe-rtt. We probably are not interested in in rack_exit_probertt()
4263 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; in rack_exit_probertt()
4269 * We need to mark these as app-limited so we in rack_exit_probertt()
4272 rsm = tqhash_max(rack->r_ctl.tqh); in rack_exit_probertt()
4273 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_exit_probertt()
4274 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_exit_probertt()
4275 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_exit_probertt()
4282 if (rack->r_ctl.rc_end_appl) in rack_exit_probertt()
4283 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_exit_probertt()
4284 rack->r_ctl.rc_end_appl = rsm; in rack_exit_probertt()
4286 rsm->r_flags |= RACK_APP_LIMITED; in rack_exit_probertt()
4287 rack->r_ctl.rc_app_limited_cnt++; in rack_exit_probertt()
4299 rack->rc_gp_incr = 0; in rack_exit_probertt()
4300 rack->rc_gp_bwred = 0; in rack_exit_probertt()
4301 rack->rc_gp_timely_inc_cnt = 0; in rack_exit_probertt()
4302 rack->rc_gp_timely_dec_cnt = 0; in rack_exit_probertt()
4305 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { in rack_exit_probertt()
4306 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; in rack_exit_probertt()
4307 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; in rack_exit_probertt()
4309 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { in rack_exit_probertt()
4310 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; in rack_exit_probertt()
4311 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; in rack_exit_probertt()
4317 rack->r_ctl.rc_rtt_diff = 0; in rack_exit_probertt()
4320 rack->rc_tp->t_bytes_acked = 0; in rack_exit_probertt()
4321 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_exit_probertt()
4334 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_exit_probertt()
4338 rack->r_ctl.rc_gp_srtt); in rack_exit_probertt()
4342 rack->r_ctl.rc_entry_gp_rtt); in rack_exit_probertt()
4347 sum = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4349 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); in rack_exit_probertt()
4357 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4364 setval = rack->r_ctl.rc_gp_srtt; in rack_exit_probertt()
4365 if (setval > rack->r_ctl.rc_entry_gp_rtt) in rack_exit_probertt()
4366 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4373 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_exit_probertt()
4380 ebdp = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4383 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; in rack_exit_probertt()
4385 setto = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4386 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); in rack_exit_probertt()
4387 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { in rack_exit_probertt()
4389 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; in rack_exit_probertt()
4392 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); in rack_exit_probertt()
4395 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_exit_probertt()
4398 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; in rack_exit_probertt()
4399 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_exit_probertt()
4400 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_probertt()
4401 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_exit_probertt()
4407 /* Check in on probe-rtt */ in rack_check_probe_rtt()
4409 if (rack->rc_gp_filled == 0) { in rack_check_probe_rtt()
4410 /* We do not do p-rtt unless we have gp measurements */ in rack_check_probe_rtt()
4413 if (rack->in_probe_rtt) { in rack_check_probe_rtt()
4417 if (rack->r_ctl.rc_went_idle_time && in rack_check_probe_rtt()
4418 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { in rack_check_probe_rtt()
4424 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && in rack_check_probe_rtt()
4425 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { in rack_check_probe_rtt()
4430 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4435 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); in rack_check_probe_rtt()
4436 if (rack->rc_highly_buffered) in rack_check_probe_rtt()
4437 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); in rack_check_probe_rtt()
4439 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); in rack_check_probe_rtt()
4440 …if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) … in rack_check_probe_rtt()
4445 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) in rack_check_probe_rtt()
4446 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; in rack_check_probe_rtt()
4449 calc /= max(rack->r_ctl.rc_gp_srtt, 1); in rack_check_probe_rtt()
4454 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4456 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; in rack_check_probe_rtt()
4458 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) in rack_check_probe_rtt()
4459 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4464 if (rack->r_ctl.rc_time_probertt_starts == 0) { in rack_check_probe_rtt()
4466 rack->rc_highly_buffered) || in rack_check_probe_rtt()
4467 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > in rack_check_probe_rtt()
4468 rack->r_ctl.rc_target_probertt_flight)) { in rack_check_probe_rtt()
4473 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4475 rack->r_ctl.rc_time_probertt_starts = us_cts; in rack_check_probe_rtt()
4476 if (rack->r_ctl.rc_time_probertt_starts == 0) in rack_check_probe_rtt()
4477 rack->r_ctl.rc_time_probertt_starts = 1; in rack_check_probe_rtt()
4479 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_check_probe_rtt()
4484 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * in rack_check_probe_rtt()
4491 endtime += rack->r_ctl.rc_time_probertt_starts; in rack_check_probe_rtt()
4497 } else if ((rack->rc_skip_timely == 0) && in rack_check_probe_rtt()
4498 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && in rack_check_probe_rtt()
4499 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { in rack_check_probe_rtt()
4512 if ((rack->rc_gp_dyn_mul == 0) || in rack_update_multiplier()
4513 (rack->use_fixed_rate) || in rack_update_multiplier()
4514 (rack->in_probe_rtt) || in rack_update_multiplier()
4515 (rack->rc_always_pace == 0)) { in rack_update_multiplier()
4519 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; in rack_update_multiplier()
4522 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; in rack_update_multiplier()
4524 up_bnd += rack->r_ctl.last_gp_comp_bw; in rack_update_multiplier()
4526 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; in rack_update_multiplier()
4528 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; in rack_update_multiplier()
4529 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { in rack_update_multiplier()
4542 if (rack->r_ctl.rc_no_push_at_mrtt > 1) in rack_update_multiplier()
4561 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4562 if (rack->rc_gp_bwred == 0) { in rack_update_multiplier()
4564 rack->rc_gp_bwred = 1; in rack_update_multiplier()
4565 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4567 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { in rack_update_multiplier()
4573 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || in rack_update_multiplier()
4574 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || in rack_update_multiplier()
4588 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4589 /* We are not incrementing really no-count */ in rack_update_multiplier()
4590 rack->rc_gp_incr = 0; in rack_update_multiplier()
4591 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4611 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4612 if (rack->rc_gp_saw_ss && in rack_update_multiplier()
4613 rack->r_ctl.rack_per_upper_bound_ss && in rack_update_multiplier()
4614 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { in rack_update_multiplier()
4621 if (rack->rc_gp_saw_ca && in rack_update_multiplier()
4622 rack->r_ctl.rack_per_upper_bound_ca && in rack_update_multiplier()
4623 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { in rack_update_multiplier()
4630 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4631 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4633 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { in rack_update_multiplier()
4650 rack->rc_gp_incr = 0; in rack_update_multiplier()
4651 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4652 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && in rack_update_multiplier()
4657 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4658 /* We are not incrementing really no-count */ in rack_update_multiplier()
4659 rack->rc_gp_incr = 0; in rack_update_multiplier()
4660 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4664 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4665 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4680 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * in rack_make_timely_judgement()
4684 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_make_timely_judgement()
4688 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4690 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4691 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4694 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4695 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4701 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4724 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); in rack_make_timely_judgement()
4729 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); in rack_make_timely_judgement()
4738 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4739 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4744 * |----------------| in rack_in_gp_window()
4745 * |-----| <or> in rack_in_gp_window()
4746 * |----| in rack_in_gp_window()
4747 * <or> |---| in rack_in_gp_window()
4750 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4751 SEQ_GT(rsm->r_end, tp->gput_seq)){ in rack_in_gp_window()
4754 * |--------------| in rack_in_gp_window()
4755 * |-------->| in rack_in_gp_window()
4758 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4759 SEQ_LT(rsm->r_start, tp->gput_ack) && in rack_in_gp_window()
4760 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4764 * |--------------| in rack_in_gp_window()
4765 * |-------->| in rack_in_gp_window()
4776 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_mark_in_gp_win()
4784 rsm->r_flags |= RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4786 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4795 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_clear_gp_marks()
4797 rsm = tqhash_min(rack->r_ctl.tqh); in rack_clear_gp_marks()
4800 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ in rack_clear_gp_marks()
4801 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_clear_gp_marks()
4802 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_clear_gp_marks()
4812 if (tp->snd_una == tp->snd_max) { in rack_tend_gp_marks()
4816 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { in rack_tend_gp_marks()
4823 rsm = tqhash_min(rack->r_ctl.tqh); in rack_tend_gp_marks()
4826 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) in rack_tend_gp_marks()
4828 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4836 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_tend_gp_marks()
4844 * *before* we started our measurment. The rsm, if non-null in rack_tend_gp_marks()
4849 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4852 if (SEQ_GT(rsm->r_end, tp->gput_ack)) in rack_tend_gp_marks()
4854 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4861 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_gp_calc()
4873 log.u_bbr.delRate = rack->r_ctl.gp_bw; in rack_log_gp_calc()
4876 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gp_calc()
4877 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gp_calc()
4878 &rack->rc_inp->inp_socket->so_snd, in rack_log_gp_calc()
4880 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_gp_calc()
4894 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
4895 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_goodput_measurement()
4896 if (TSTMP_GEQ(us_cts, tp->gput_ts)) in rack_do_goodput_measurement()
4897 tim = us_cts - tp->gput_ts; in rack_do_goodput_measurement()
4900 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) in rack_do_goodput_measurement()
4901 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; in rack_do_goodput_measurement()
4916 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); in rack_do_goodput_measurement()
4928 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { in rack_do_goodput_measurement()
4962 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; in rack_do_goodput_measurement()
4963 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; in rack_do_goodput_measurement()
4964 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; in rack_do_goodput_measurement()
4965 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_do_goodput_measurement()
4973 bytes = (th_ack - tp->gput_seq); in rack_do_goodput_measurement()
4984 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4993 * IW - 2MSS. in rack_do_goodput_measurement()
4995 reqbytes -= (2 * segsiz); in rack_do_goodput_measurement()
4997 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
4999 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { in rack_do_goodput_measurement()
5001 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5009 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; in rack_do_goodput_measurement()
5010 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5012 rack->r_ctl.rc_rtt_diff = new_rtt_diff; in rack_do_goodput_measurement()
5014 if (rack->measure_saw_probe_rtt == 0) { in rack_do_goodput_measurement()
5021 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); in rack_do_goodput_measurement()
5022 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); in rack_do_goodput_measurement()
5026 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5027 rack->r_ctl.rc_rtt_diff, in rack_do_goodput_measurement()
5028 rack->r_ctl.rc_prev_gp_srtt in rack_do_goodput_measurement()
5032 if (bytes_ps > rack->r_ctl.last_max_bw) { in rack_do_goodput_measurement()
5043 bytes_ps, rack->r_ctl.last_max_bw, 0, in rack_do_goodput_measurement()
5045 bytes_ps = rack->r_ctl.last_max_bw; in rack_do_goodput_measurement()
5048 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
5051 rack->r_ctl.gp_bw = bytes_ps; in rack_do_goodput_measurement()
5052 rack->rc_gp_filled = 1; in rack_do_goodput_measurement()
5053 rack->r_ctl.num_measurements = 1; in rack_do_goodput_measurement()
5054 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
5057 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
5060 if (tcp_in_hpts(rack->rc_tp) && in rack_do_goodput_measurement()
5061 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_goodput_measurement()
5064 * where we transition from un-paced to paced. in rack_do_goodput_measurement()
5070 tcp_hpts_remove(rack->rc_tp); in rack_do_goodput_measurement()
5071 rack->r_ctl.rc_hpts_flags = 0; in rack_do_goodput_measurement()
5072 rack->r_ctl.rc_last_output_to = 0; in rack_do_goodput_measurement()
5075 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { in rack_do_goodput_measurement()
5077 rack->r_ctl.gp_bw += bytes_ps; in rack_do_goodput_measurement()
5078 addpart = rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5079 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5080 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_do_goodput_measurement()
5082 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
5101 if (rack->r_ctl.num_measurements < 0xff) { in rack_do_goodput_measurement()
5102 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
5104 srtt = (uint64_t)tp->t_srtt; in rack_do_goodput_measurement()
5109 if (rack->r_ctl.rc_rack_min_rtt) in rack_do_goodput_measurement()
5110 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_do_goodput_measurement()
5125 * and non-dynamic... but considering lots of folks in rack_do_goodput_measurement()
5130 if (rack->rc_gp_dyn_mul == 0) { in rack_do_goodput_measurement()
5131 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5133 if (subpart < (rack->r_ctl.gp_bw / 2)) { in rack_do_goodput_measurement()
5152 subpart = rack->r_ctl.gp_bw / 2; in rack_do_goodput_measurement()
5157 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5158 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5170 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5181 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; in rack_do_goodput_measurement()
5185 if ((rack->measure_saw_probe_rtt == 0) || in rack_do_goodput_measurement()
5186 (bytes_ps > rack->r_ctl.gp_bw)) { in rack_do_goodput_measurement()
5188 * For probe-rtt we only add it in in rack_do_goodput_measurement()
5194 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5195 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5202 * or first-slowstart that ensues. If we ever needed to watch in rack_do_goodput_measurement()
5206 if ((rack->rc_initial_ss_comp == 0) && in rack_do_goodput_measurement()
5207 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { in rack_do_goodput_measurement()
5211 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5217 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5218 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_do_goodput_measurement()
5220 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5225 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || in rack_do_goodput_measurement()
5226 (rack->r_ctl.last_gpest == 0)) { in rack_do_goodput_measurement()
5233 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5234 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5235 } else if (gp_est >= rack->r_ctl.last_gpest) { in rack_do_goodput_measurement()
5242 gp_est /= rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5243 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { in rack_do_goodput_measurement()
5247 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5253 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5255 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; in rack_do_goodput_measurement()
5257 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5262 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5263 if (rack->r_ctl.use_gp_not_last == 1) in rack_do_goodput_measurement()
5264 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5266 rack->r_ctl.last_gpest = bytes_ps; in rack_do_goodput_measurement()
5270 if ((rack->gp_ready == 0) && in rack_do_goodput_measurement()
5271 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_do_goodput_measurement()
5273 rack->gp_ready = 1; in rack_do_goodput_measurement()
5274 if (rack->dgp_on || in rack_do_goodput_measurement()
5275 rack->rack_hibeta) in rack_do_goodput_measurement()
5277 if (rack->defer_options) in rack_do_goodput_measurement()
5282 /* We do not update any multipliers if we are in or have seen a probe-rtt */ in rack_do_goodput_measurement()
5284 if ((rack->measure_saw_probe_rtt == 0) && in rack_do_goodput_measurement()
5285 rack->rc_gp_rtt_set) { in rack_do_goodput_measurement()
5286 if (rack->rc_skip_timely == 0) { in rack_do_goodput_measurement()
5288 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5289 rack->r_ctl.rc_rtt_diff); in rack_do_goodput_measurement()
5298 rack->r_ctl.gp_bw, /* delRate */ in rack_do_goodput_measurement()
5302 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5304 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; in rack_do_goodput_measurement()
5310 rack->rc_gp_rtt_set = 0; in rack_do_goodput_measurement()
5311 rack->rc_gp_saw_rec = 0; in rack_do_goodput_measurement()
5312 rack->rc_gp_saw_ca = 0; in rack_do_goodput_measurement()
5313 rack->rc_gp_saw_ss = 0; in rack_do_goodput_measurement()
5314 rack->rc_dragged_bottom = 0; in rack_do_goodput_measurement()
5322 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, in rack_do_goodput_measurement()
5329 if (tp->t_stats_gput_prev > 0) in rack_do_goodput_measurement()
5330 stats_voi_update_abs_s32(tp->t_stats, in rack_do_goodput_measurement()
5332 ((gput - tp->t_stats_gput_prev) * 100) / in rack_do_goodput_measurement()
5333 tp->t_stats_gput_prev); in rack_do_goodput_measurement()
5335 tp->t_stats_gput_prev = gput; in rack_do_goodput_measurement()
5337 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5342 * We don't do the other case i.e. non-applimited here since in rack_do_goodput_measurement()
5345 if (rack->r_ctl.rc_first_appl && in rack_do_goodput_measurement()
5346 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_goodput_measurement()
5347 rack->r_ctl.rc_app_limited_cnt && in rack_do_goodput_measurement()
5348 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && in rack_do_goodput_measurement()
5349 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > in rack_do_goodput_measurement()
5356 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_do_goodput_measurement()
5357 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_do_goodput_measurement()
5358 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
5359 rack->app_limited_needs_set = 0; in rack_do_goodput_measurement()
5360 tp->gput_seq = th_ack; in rack_do_goodput_measurement()
5361 if (rack->in_probe_rtt) in rack_do_goodput_measurement()
5362 rack->measure_saw_probe_rtt = 1; in rack_do_goodput_measurement()
5363 else if ((rack->measure_saw_probe_rtt) && in rack_do_goodput_measurement()
5364 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_do_goodput_measurement()
5365 rack->measure_saw_probe_rtt = 0; in rack_do_goodput_measurement()
5366 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { in rack_do_goodput_measurement()
5368 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_do_goodput_measurement()
5371 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); in rack_do_goodput_measurement()
5372 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_do_goodput_measurement()
5376 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5377 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_do_goodput_measurement()
5382 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_do_goodput_measurement()
5388 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { in rack_do_goodput_measurement()
5393 tp->t_flags |= TF_GPUTINPROG; in rack_do_goodput_measurement()
5395 * Now we need to find the timestamp of the send at tp->gput_seq in rack_do_goodput_measurement()
5398 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_do_goodput_measurement()
5399 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_do_goodput_measurement()
5401 /* Ok send-based limit is set */ in rack_do_goodput_measurement()
5402 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { in rack_do_goodput_measurement()
5409 tp->gput_seq = rsm->r_start; in rack_do_goodput_measurement()
5411 if (rsm->r_flags & RACK_ACKED) { in rack_do_goodput_measurement()
5414 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; in rack_do_goodput_measurement()
5415 tp->gput_seq = rsm->r_end; in rack_do_goodput_measurement()
5416 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_do_goodput_measurement()
5420 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5423 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5425 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_do_goodput_measurement()
5429 * send-limit set the current time, which in rack_do_goodput_measurement()
5430 * basically disables the send-limit. in rack_do_goodput_measurement()
5435 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_do_goodput_measurement()
5439 tp->gput_seq, in rack_do_goodput_measurement()
5440 tp->gput_ack, in rack_do_goodput_measurement()
5442 tp->gput_ts, in rack_do_goodput_measurement()
5443 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_do_goodput_measurement()
5446 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_do_goodput_measurement()
5468 tp->t_ccv.nsegs = nsegs; in rack_ack_received()
5469 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); in rack_ack_received()
5470 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { in rack_ack_received()
5473 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); in rack_ack_received()
5474 if (tp->t_ccv.bytes_this_ack > max) { in rack_ack_received()
5475 tp->t_ccv.bytes_this_ack = max; in rack_ack_received()
5479 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, in rack_ack_received()
5480 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); in rack_ack_received()
5482 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { in rack_ack_received()
5491 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); in rack_ack_received()
5492 rack->r_ctl.lt_seq = tp->snd_max; in rack_ack_received()
5493 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_ack_received()
5494 if (tmark >= rack->r_ctl.lt_timemark) { in rack_ack_received()
5495 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_ack_received()
5497 rack->r_ctl.lt_timemark = tmark; in rack_ack_received()
5498 rack->lt_bw_up = 0; in rack_ack_received()
5501 if ((tp->t_flags & TF_GPUTINPROG) && in rack_ack_received()
5507 if (tp->snd_cwnd <= tp->snd_wnd) in rack_ack_received()
5508 tp->t_ccv.flags |= CCF_CWND_LIMITED; in rack_ack_received()
5510 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; in rack_ack_received()
5511 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_ack_received()
5512 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, in rack_ack_received()
5515 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5516 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; in rack_ack_received()
5517 tp->t_ccv.flags |= CCF_ABC_SENTAWND; in rack_ack_received()
5520 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_ack_received()
5521 tp->t_bytes_acked = 0; in rack_ack_received()
5523 prior_cwnd = tp->snd_cwnd; in rack_ack_received()
5524 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || in rack_ack_received()
5525 (rack_client_low_buf && rack->client_bufferlvl && in rack_ack_received()
5526 (rack->client_bufferlvl < rack_client_low_buf))) in rack_ack_received()
5527 labc_to_use = rack->rc_labc; in rack_ack_received()
5530 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_ack_received()
5537 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_ack_received()
5538 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_ack_received()
5539 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_ack_received()
5547 if (CC_ALGO(tp)->ack_received != NULL) { in rack_ack_received()
5549 tp->t_ccv.curack = th_ack; in rack_ack_received()
5550 tp->t_ccv.labc = labc_to_use; in rack_ack_received()
5551 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; in rack_ack_received()
5552 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); in rack_ack_received()
5555 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; in rack_ack_received()
5557 if (rack->r_must_retran) { in rack_ack_received()
5558 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { in rack_ack_received()
5563 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5564 rack->r_must_retran = 0; in rack_ack_received()
5565 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { in rack_ack_received()
5572 if (acked <= rack->r_ctl.rc_out_at_rto){ in rack_ack_received()
5573 rack->r_ctl.rc_out_at_rto -= acked; in rack_ack_received()
5575 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5580 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); in rack_ack_received()
5582 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5583 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; in rack_ack_received()
5585 if ((rack->rc_initial_ss_comp == 0) && in rack_ack_received()
5586 (tp->snd_cwnd >= tp->snd_ssthresh)) { in rack_ack_received()
5591 rack->rc_initial_ss_comp = 1; in rack_ack_received()
5600 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_partialack()
5609 if ((rack->r_ctl.rc_prr_sndcnt > 0) || in tcp_rack_partialack()
5610 rack->rack_no_prr) in tcp_rack_partialack()
5611 rack->r_wanted_output = 1; in tcp_rack_partialack()
5620 EXIT_RECOVERY(tp->t_flags); in rack_exit_recovery()
5629 orig_cwnd = tp->snd_cwnd; in rack_post_recovery()
5631 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_post_recovery()
5633 if (CC_ALGO(tp)->post_recovery != NULL) { in rack_post_recovery()
5634 tp->t_ccv.curack = th_ack; in rack_post_recovery()
5635 CC_ALGO(tp)->post_recovery(&tp->t_ccv); in rack_post_recovery()
5636 if (tp->snd_cwnd < tp->snd_ssthresh) { in rack_post_recovery()
5640 * snd_ssthresh per RFC-6582 (option 2). in rack_post_recovery()
5642 tp->snd_cwnd = tp->snd_ssthresh; in rack_post_recovery()
5645 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_post_recovery()
5652 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_post_recovery()
5653 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_post_recovery()
5654 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_post_recovery()
5658 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_post_recovery()
5663 if ((rack->rack_no_prr == 0) && in rack_post_recovery()
5664 (rack->no_prr_addback == 0) && in rack_post_recovery()
5665 (rack->r_ctl.rc_prr_sndcnt > 0)) { in rack_post_recovery()
5670 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { in rack_post_recovery()
5680 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), in rack_post_recovery()
5681 rack->r_ctl.rc_prr_sndcnt); in rack_post_recovery()
5683 rack->r_ctl.rc_prr_sndcnt = 0; in rack_post_recovery()
5687 tp->snd_recover = tp->snd_una; in rack_post_recovery()
5688 if (rack->r_ctl.dsack_persist) { in rack_post_recovery()
5689 rack->r_ctl.dsack_persist--; in rack_post_recovery()
5690 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_post_recovery()
5691 rack->r_ctl.num_dsack = 0; in rack_post_recovery()
5695 if (rack->rto_from_rec == 1) { in rack_post_recovery()
5696 rack->rto_from_rec = 0; in rack_post_recovery()
5697 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_post_recovery()
5698 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_post_recovery()
5711 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); in rack_cong_signal()
5713 if (IN_RECOVERY(tp->t_flags) == 0) { in rack_cong_signal()
5715 ssthresh_enter = tp->snd_ssthresh; in rack_cong_signal()
5716 cwnd_enter = tp->snd_cwnd; in rack_cong_signal()
5719 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cong_signal()
5722 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5723 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5724 if (!IN_FASTRECOVERY(tp->t_flags)) { in rack_cong_signal()
5725 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ in rack_cong_signal()
5726 if (rack->rc_initial_ss_comp == 0) { in rack_cong_signal()
5728 rack->rc_initial_ss_comp = 1; in rack_cong_signal()
5730 rack->r_ctl.rc_prr_delivered = 0; in rack_cong_signal()
5731 rack->r_ctl.rc_prr_out = 0; in rack_cong_signal()
5732 rack->r_fast_output = 0; in rack_cong_signal()
5733 if (rack->rack_no_prr == 0) { in rack_cong_signal()
5734 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_cong_signal()
5737 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; in rack_cong_signal()
5738 tp->snd_recover = tp->snd_max; in rack_cong_signal()
5739 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5740 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5744 if (!IN_CONGRECOVERY(tp->t_flags) || in rack_cong_signal()
5749 SEQ_GEQ(ack, tp->snd_recover)) { in rack_cong_signal()
5750 EXIT_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5752 rack->r_fast_output = 0; in rack_cong_signal()
5753 tp->snd_recover = tp->snd_max + 1; in rack_cong_signal()
5754 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5755 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5759 tp->t_dupacks = 0; in rack_cong_signal()
5760 tp->t_bytes_acked = 0; in rack_cong_signal()
5761 rack->r_fast_output = 0; in rack_cong_signal()
5762 if (IN_RECOVERY(tp->t_flags)) in rack_cong_signal()
5764 orig_cwnd = tp->snd_cwnd; in rack_cong_signal()
5766 if (CC_ALGO(tp)->cong_signal == NULL) { in rack_cong_signal()
5768 tp->snd_ssthresh = max(2, in rack_cong_signal()
5769 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / in rack_cong_signal()
5771 tp->snd_cwnd = ctf_fixed_maxseg(tp); in rack_cong_signal()
5773 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5774 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5779 tp->snd_cwnd = tp->snd_cwnd_prev; in rack_cong_signal()
5780 tp->snd_ssthresh = tp->snd_ssthresh_prev; in rack_cong_signal()
5781 tp->snd_recover = tp->snd_recover_prev; in rack_cong_signal()
5782 if (tp->t_flags & TF_WASFRECOVERY) { in rack_cong_signal()
5783 ENTER_FASTRECOVERY(tp->t_flags); in rack_cong_signal()
5784 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5786 if (tp->t_flags & TF_WASCRECOVERY) { in rack_cong_signal()
5787 ENTER_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5788 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5790 tp->snd_nxt = tp->snd_max; in rack_cong_signal()
5791 tp->t_badrxtwin = 0; in rack_cong_signal()
5794 if ((CC_ALGO(tp)->cong_signal != NULL) && in rack_cong_signal()
5796 tp->t_ccv.curack = ack; in rack_cong_signal()
5797 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); in rack_cong_signal()
5799 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { in rack_cong_signal()
5801 rack->r_ctl.dsack_byte_cnt = 0; in rack_cong_signal()
5802 rack->r_ctl.retran_during_recovery = 0; in rack_cong_signal()
5803 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; in rack_cong_signal()
5804 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; in rack_cong_signal()
5805 rack->r_ent_rec_ns = 1; in rack_cong_signal()
5816 if (CC_ALGO(tp)->after_idle != NULL) in rack_cc_after_idle()
5817 CC_ALGO(tp)->after_idle(&tp->t_ccv); in rack_cc_after_idle()
5819 if (tp->snd_cwnd == 1) in rack_cc_after_idle()
5820 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ in rack_cc_after_idle()
5829 if (tp->snd_cwnd < i_cwnd) { in rack_cc_after_idle()
5830 tp->snd_cwnd = i_cwnd; in rack_cc_after_idle()
5837 * - There is no delayed ack timer in progress.
5838 * - Our last ack wasn't a 0-sized window. We never want to delay
5839 * the ack that opens up a 0-sized window.
5840 * - LRO wasn't used for this segment. We make sure by checking that the
5842 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5846 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5847 ((tp->t_flags & TF_DELACK) == 0) && \
5848 (tlen <= tp->t_maxseg) && \
5849 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5857 * Walk the time-order transmitted list looking for an rsm that is in rack_find_lowest_rsm()
5861 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_find_lowest_rsm()
5862 if (rsm->r_flags & RACK_ACKED) { in rack_find_lowest_rsm()
5883 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { in rack_find_high_nonack()
5884 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { in rack_find_high_nonack()
5903 * If reorder-fade is configured, then we track the last time we saw in rack_calc_thresh_rack()
5904 * re-ordering occur. If we reach the point where enough time as in rack_calc_thresh_rack()
5907 * Or if reorder-face is 0, then once we see reordering we consider in rack_calc_thresh_rack()
5911 * In the end if lro is non-zero we add the extra time for in rack_calc_thresh_rack()
5916 if (rack->r_ctl.rc_reorder_ts) { in rack_calc_thresh_rack()
5917 if (rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5918 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { in rack_calc_thresh_rack()
5919 lro = cts - rack->r_ctl.rc_reorder_ts; in rack_calc_thresh_rack()
5931 if (lro > rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5933 rack->r_ctl.rc_reorder_ts = 0; in rack_calc_thresh_rack()
5943 if (rack->rc_rack_tmr_std_based == 0) { in rack_calc_thresh_rack()
5944 thresh = srtt + rack->r_ctl.rc_pkt_delay; in rack_calc_thresh_rack()
5946 /* Standards based pkt-delay is 1/4 srtt */ in rack_calc_thresh_rack()
5949 if (lro && (rack->rc_rack_tmr_std_based == 0)) { in rack_calc_thresh_rack()
5951 if (rack->r_ctl.rc_reorder_shift) in rack_calc_thresh_rack()
5952 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); in rack_calc_thresh_rack()
5956 if (rack->rc_rack_use_dsack && in rack_calc_thresh_rack()
5958 (rack->r_ctl.num_dsack > 0)) { in rack_calc_thresh_rack()
5963 thresh += rack->r_ctl.num_dsack * (srtt >> 2); in rack_calc_thresh_rack()
5990 if (rack->r_ctl.rc_tlp_threshold) in rack_calc_thresh_tlp()
5991 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); in rack_calc_thresh_tlp()
5996 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_calc_thresh_tlp()
5997 len = rsm->r_end - rsm->r_start; in rack_calc_thresh_tlp()
5998 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { in rack_calc_thresh_tlp()
6000 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { in rack_calc_thresh_tlp()
6003 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6009 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { in rack_calc_thresh_tlp()
6015 * possible inter-packet delay (if any). in rack_calc_thresh_tlp()
6020 idx = rsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6021 nidx = prsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
6022 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { in rack_calc_thresh_tlp()
6024 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; in rack_calc_thresh_tlp()
6029 * Possibly compensate for delayed-ack. in rack_calc_thresh_tlp()
6037 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { in rack_calc_thresh_tlp()
6042 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
6050 if (thresh > tp->t_rxtcur) { in rack_calc_thresh_tlp()
6051 thresh = tp->t_rxtcur; in rack_calc_thresh_tlp()
6075 if (rack->rc_rack_rtt) in rack_grab_rtt()
6076 return (rack->rc_rack_rtt); in rack_grab_rtt()
6077 else if (tp->t_srtt == 0) in rack_grab_rtt()
6079 return (tp->t_srtt); in rack_grab_rtt()
6095 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_recovery_mode()
6096 if (tqhash_empty(rack->r_ctl.tqh)) { in rack_check_recovery_mode()
6099 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_check_recovery_mode()
6104 if (rsm->r_flags & RACK_ACKED) { in rack_check_recovery_mode()
6109 idx = rsm->r_rtr_cnt - 1; in rack_check_recovery_mode()
6112 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { in rack_check_recovery_mode()
6115 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { in rack_check_recovery_mode()
6118 /* Ok if we reach here we are over-due and this guy can be sent */ in rack_check_recovery_mode()
6119 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_check_recovery_mode()
6130 t = (tp->t_srtt + (tp->t_rttvar << 2)); in rack_get_persists_timer_val()
6131 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], in rack_get_persists_timer_val()
6132 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); in rack_get_persists_timer_val()
6133 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; in rack_get_persists_timer_val()
6153 if (rack->t_timers_stopped) { in rack_timer_start()
6157 if (rack->rc_in_persist) { in rack_timer_start()
6161 rack->rc_on_min_to = 0; in rack_timer_start()
6162 if ((tp->t_state < TCPS_ESTABLISHED) || in rack_timer_start()
6163 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_timer_start()
6166 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6171 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6178 * recently thats the discount we want to use (now - timer time). in rack_timer_start()
6180 * we want to use that (now - oldest-packet-last_transmit_time). in rack_timer_start()
6183 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6184 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) in rack_timer_start()
6185 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6187 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6189 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6191 if (SEQ_LT(tp->snd_una, tp->snd_max) || in rack_timer_start()
6192 sbavail(&tptosocket(tp)->so_snd)) { in rack_timer_start()
6193 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; in rack_timer_start()
6194 to = tp->t_rxtcur; in rack_timer_start()
6196 to -= time_since_sent; in rack_timer_start()
6198 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6202 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timer_start()
6207 * of the keep-init timeout. in rack_timer_start()
6212 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { in rack_timer_start()
6213 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); in rack_timer_start()
6215 max_time -= red; in rack_timer_start()
6227 if (rsm->r_flags & RACK_ACKED) { in rack_timer_start()
6235 if ((rsm->r_flags & RACK_SACK_PASSED) || in rack_timer_start()
6236 (rsm->r_flags & RACK_RWND_COLLAPSED) || in rack_timer_start()
6237 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in rack_timer_start()
6238 if ((tp->t_flags & TF_SENTFIN) && in rack_timer_start()
6239 ((tp->snd_max - tp->snd_una) == 1) && in rack_timer_start()
6240 (rsm->r_flags & RACK_HAS_FIN)) { in rack_timer_start()
6247 if ((rack->use_rack_rr == 0) && in rack_timer_start()
6248 (IN_FASTRECOVERY(tp->t_flags)) && in rack_timer_start()
6249 (rack->rack_no_prr == 0) && in rack_timer_start()
6250 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { in rack_timer_start()
6257 * get to use the rack-cheat. in rack_timer_start()
6263 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6264 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; in rack_timer_start()
6266 to = exp - cts; in rack_timer_start()
6267 if (to < rack->r_ctl.rc_min_to) { in rack_timer_start()
6268 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6269 if (rack->r_rr_config == 3) in rack_timer_start()
6270 rack->rc_on_min_to = 1; in rack_timer_start()
6273 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6274 if (rack->r_rr_config == 3) in rack_timer_start()
6275 rack->rc_on_min_to = 1; in rack_timer_start()
6280 if ((rack->rc_tlp_in_progress != 0) && in rack_timer_start()
6281 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { in rack_timer_start()
6288 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_timer_start()
6293 if (rsm->r_flags & RACK_HAS_FIN) { in rack_timer_start()
6298 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6300 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) in rack_timer_start()
6301 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6303 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6305 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6307 if (tp->t_srtt) { in rack_timer_start()
6308 if ((rack->rc_srtt_measure_made == 0) && in rack_timer_start()
6309 (tp->t_srtt == 1)) { in rack_timer_start()
6316 srtt_cur = tp->t_srtt; in rack_timer_start()
6327 tp->t_srtt && in rack_timer_start()
6333 to = thresh - time_since_sent; in rack_timer_start()
6335 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6340 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ in rack_timer_start()
6341 (uint32_t)rsm->r_tim_lastsent[idx], in rack_timer_start()
6357 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; in rack_timer_start()
6359 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; in rack_timer_start()
6369 if (rack->rc_in_persist == 0) { in rack_enter_persist()
6370 if (tp->t_flags & TF_GPUTINPROG) { in rack_enter_persist()
6375 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, in rack_enter_persist()
6379 if (rack->r_ctl.rc_scw) { in rack_enter_persist()
6380 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_enter_persist()
6381 rack->rack_scwnd_is_idle = 1; in rack_enter_persist()
6384 rack->r_ctl.rc_went_idle_time = cts; in rack_enter_persist()
6385 if (rack->r_ctl.rc_went_idle_time == 0) in rack_enter_persist()
6386 rack->r_ctl.rc_went_idle_time = 1; in rack_enter_persist()
6387 if (rack->lt_bw_up) { in rack_enter_persist()
6391 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); in rack_enter_persist()
6392 rack->r_ctl.lt_seq = snd_una; in rack_enter_persist()
6393 tmark = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_enter_persist()
6394 if (tmark >= rack->r_ctl.lt_timemark) { in rack_enter_persist()
6395 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_enter_persist()
6397 rack->r_ctl.lt_timemark = tmark; in rack_enter_persist()
6398 rack->lt_bw_up = 0; in rack_enter_persist()
6399 rack->r_persist_lt_bw_off = 1; in rack_enter_persist()
6402 rack->r_ctl.persist_lost_ends = 0; in rack_enter_persist()
6403 rack->probe_not_answered = 0; in rack_enter_persist()
6404 rack->forced_ack = 0; in rack_enter_persist()
6405 tp->t_rxtshift = 0; in rack_enter_persist()
6406 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_enter_persist()
6407 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_enter_persist()
6408 rack->rc_in_persist = 1; in rack_enter_persist()
6415 if (tcp_in_hpts(rack->rc_tp)) { in rack_exit_persist()
6416 tcp_hpts_remove(rack->rc_tp); in rack_exit_persist()
6417 rack->r_ctl.rc_hpts_flags = 0; in rack_exit_persist()
6420 if (rack->r_ctl.rc_scw) { in rack_exit_persist()
6421 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_exit_persist()
6422 rack->rack_scwnd_is_idle = 0; in rack_exit_persist()
6425 if (rack->rc_gp_dyn_mul && in rack_exit_persist()
6426 (rack->use_fixed_rate == 0) && in rack_exit_persist()
6427 (rack->rc_always_pace)) { in rack_exit_persist()
6429 * Do we count this as if a probe-rtt just in rack_exit_persist()
6434 time_idle = cts - rack->r_ctl.rc_went_idle_time; in rack_exit_persist()
6438 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * in rack_exit_persist()
6444 /* Yes, we count it as a probe-rtt. */ in rack_exit_persist()
6448 if (rack->in_probe_rtt == 0) { in rack_exit_persist()
6449 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_persist()
6450 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6451 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6452 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6458 if (rack->r_persist_lt_bw_off) { in rack_exit_persist()
6460 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); in rack_exit_persist()
6461 rack->lt_bw_up = 1; in rack_exit_persist()
6462 rack->r_persist_lt_bw_off = 0; in rack_exit_persist()
6464 rack->rc_in_persist = 0; in rack_exit_persist()
6465 rack->r_ctl.rc_went_idle_time = 0; in rack_exit_persist()
6466 tp->t_rxtshift = 0; in rack_exit_persist()
6467 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_exit_persist()
6468 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_exit_persist()
6469 rack->r_ctl.rc_agg_delayed = 0; in rack_exit_persist()
6470 rack->r_early = 0; in rack_exit_persist()
6471 rack->r_late = 0; in rack_exit_persist()
6472 rack->r_ctl.rc_agg_early = 0; in rack_exit_persist()
6479 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_hpts_diag()
6483 log.u_bbr.flex1 = diag->p_nxt_slot; in rack_log_hpts_diag()
6484 log.u_bbr.flex2 = diag->p_cur_slot; in rack_log_hpts_diag()
6485 log.u_bbr.flex3 = diag->slot_req; in rack_log_hpts_diag()
6486 log.u_bbr.flex4 = diag->inp_hptsslot; in rack_log_hpts_diag()
6487 log.u_bbr.flex5 = diag->slot_remaining; in rack_log_hpts_diag()
6488 log.u_bbr.flex6 = diag->need_new_to; in rack_log_hpts_diag()
6489 log.u_bbr.flex7 = diag->p_hpts_active; in rack_log_hpts_diag()
6490 log.u_bbr.flex8 = diag->p_on_min_sleep; in rack_log_hpts_diag()
6492 log.u_bbr.epoch = diag->have_slept; in rack_log_hpts_diag()
6493 log.u_bbr.lt_epoch = diag->yet_to_sleep; in rack_log_hpts_diag()
6494 log.u_bbr.pkts_out = diag->co_ret; in rack_log_hpts_diag()
6495 log.u_bbr.applimited = diag->hpts_sleep_time; in rack_log_hpts_diag()
6496 log.u_bbr.delivered = diag->p_prev_slot; in rack_log_hpts_diag()
6497 log.u_bbr.inflight = diag->p_runningslot; in rack_log_hpts_diag()
6498 log.u_bbr.bw_inuse = diag->wheel_slot; in rack_log_hpts_diag()
6499 log.u_bbr.rttProp = diag->wheel_cts; in rack_log_hpts_diag()
6501 log.u_bbr.delRate = diag->maxslots; in rack_log_hpts_diag()
6502 log.u_bbr.cur_del_rate = diag->p_curtick; in rack_log_hpts_diag()
6504 log.u_bbr.cur_del_rate |= diag->p_lasttick; in rack_log_hpts_diag()
6505 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hpts_diag()
6506 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hpts_diag()
6507 &rack->rc_inp->inp_socket->so_snd, in rack_log_hpts_diag()
6517 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_wakeup()
6522 log.u_bbr.flex1 = sb->sb_flags; in rack_log_wakeup()
6524 log.u_bbr.flex3 = sb->sb_state; in rack_log_wakeup()
6527 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_wakeup()
6528 &rack->rc_inp->inp_socket->so_rcv, in rack_log_wakeup()
6529 &rack->rc_inp->inp_socket->so_snd, in rack_log_wakeup()
6549 if ((tp->t_state == TCPS_CLOSED) || in rack_start_hpts_timer()
6550 (tp->t_state == TCPS_LISTEN)) { in rack_start_hpts_timer()
6557 stopped = rack->rc_tmr_stopped; in rack_start_hpts_timer()
6558 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_start_hpts_timer()
6559 left = rack->r_ctl.rc_timer_exp - cts; in rack_start_hpts_timer()
6561 rack->r_ctl.rc_timer_exp = 0; in rack_start_hpts_timer()
6562 rack->r_ctl.rc_hpts_flags = 0; in rack_start_hpts_timer()
6566 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { in rack_start_hpts_timer()
6574 * by an ack aka the rc_agg_early (non-paced mode). in rack_start_hpts_timer()
6576 slot += rack->r_ctl.rc_agg_early; in rack_start_hpts_timer()
6577 rack->r_early = 0; in rack_start_hpts_timer()
6578 rack->r_ctl.rc_agg_early = 0; in rack_start_hpts_timer()
6580 if ((rack->r_late) && in rack_start_hpts_timer()
6581 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { in rack_start_hpts_timer()
6588 if (rack->r_ctl.rc_agg_delayed >= slot) { in rack_start_hpts_timer()
6597 rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); in rack_start_hpts_timer()
6601 rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); in rack_start_hpts_timer()
6605 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6606 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6609 rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; in rack_start_hpts_timer()
6612 if (rack->r_ctl.rc_agg_delayed == 0) in rack_start_hpts_timer()
6613 rack->r_late = 0; in rack_start_hpts_timer()
6615 } else if (rack->r_late) { in rack_start_hpts_timer()
6619 max_red = (slot * rack->r_ctl.max_reduction) / 100; in rack_start_hpts_timer()
6620 if (max_red >= rack->r_ctl.rc_agg_delayed) { in rack_start_hpts_timer()
6621 slot -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6622 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6624 slot -= max_red; in rack_start_hpts_timer()
6625 rack->r_ctl.rc_agg_delayed -= max_red; in rack_start_hpts_timer()
6628 if ((rack->r_use_hpts_min == 1) && in rack_start_hpts_timer()
6630 (rack->dgp_on == 1)) { in rack_start_hpts_timer()
6643 if (tp->t_flags & TF_DELACK) { in rack_start_hpts_timer()
6645 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; in rack_start_hpts_timer()
6651 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_start_hpts_timer()
6654 * wheel, we resort to a keep-alive timer if its configured. in rack_start_hpts_timer()
6658 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_start_hpts_timer()
6659 (tp->t_state <= TCPS_CLOSING)) { in rack_start_hpts_timer()
6662 * del-ack), we don't have segments being paced. So in rack_start_hpts_timer()
6665 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_start_hpts_timer()
6666 /* Get the established keep-alive time */ in rack_start_hpts_timer()
6670 * Get the initial setup keep-alive time, in rack_start_hpts_timer()
6678 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; in rack_start_hpts_timer()
6679 if (rack->in_probe_rtt) { in rack_start_hpts_timer()
6683 * exit probe-rtt and initiate a keep-alive ack. in rack_start_hpts_timer()
6684 * This will get us out of probe-rtt and update in rack_start_hpts_timer()
6685 * our min-rtt. in rack_start_hpts_timer()
6692 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { in rack_start_hpts_timer()
6698 * keep-alive, delayed_ack we keep track of what was left in rack_start_hpts_timer()
6706 * Hack alert for now we can't time-out over 2,147,483 in rack_start_hpts_timer()
6712 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; in rack_start_hpts_timer()
6715 if ((rack->gp_ready == 0) && in rack_start_hpts_timer()
6716 (rack->use_fixed_rate == 0) && in rack_start_hpts_timer()
6718 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { in rack_start_hpts_timer()
6738 * TF2_MBUF_QUEUE_READY - This flags says that I am busy in rack_start_hpts_timer()
6743 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction in rack_start_hpts_timer()
6758 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); in rack_start_hpts_timer()
6760 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6761 rack->r_ctl.rc_last_output_to = us_cts + slot; in rack_start_hpts_timer()
6770 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; in rack_start_hpts_timer()
6776 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || in rack_start_hpts_timer()
6777 (IN_RECOVERY(tp->t_flags))) { in rack_start_hpts_timer()
6778 if (rack->r_rr_config != 3) in rack_start_hpts_timer()
6779 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6780 else if (rack->rc_pace_dnd) { in rack_start_hpts_timer()
6789 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6792 if (rack->rc_ack_can_sendout_data) { in rack_start_hpts_timer()
6796 * backout the changes (used for non-paced in rack_start_hpts_timer()
6799 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | in rack_start_hpts_timer()
6802 if ((rack->use_rack_rr) && in rack_start_hpts_timer()
6803 (rack->r_rr_config < 2) && in rack_start_hpts_timer()
6807 * t-o if the t-o does not cause a send. in rack_start_hpts_timer()
6828 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6836 if (SEQ_GT(tp->snd_max, tp->snd_una)) { in rack_start_hpts_timer()
6837 panic("tp:%p rack:%p tlts:%d cts:%u slot:%u pto:%u -- no timer started?", in rack_start_hpts_timer()
6842 rack->rc_tmr_stopped = 0; in rack_start_hpts_timer()
6856 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mark_lost()
6857 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { in rack_mark_lost()
6858 /* Got up to all that were marked sack-passed */ in rack_mark_lost()
6861 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_mark_lost()
6862 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_mark_lost()
6865 nrsm->r_flags |= RACK_WAS_LOST; in rack_mark_lost()
6866 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_mark_lost()
6888 * retransmissions, if so we will enter fast-recovery. The output in rack_timeout_rack()
6895 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_rack()
6897 rack->rc_on_min_to = 0; in rack_timeout_rack()
6903 rack->r_ctl.rc_resend = rsm; in rack_timeout_rack()
6904 rack->r_timer_override = 1; in rack_timeout_rack()
6905 if (rack->use_rack_rr) { in rack_timeout_rack()
6909 * over-ride pacing i.e. rrr takes precedence in rack_timeout_rack()
6914 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timeout_rack()
6917 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; in rack_timeout_rack()
6933 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { in rack_adjust_orig_mlen()
6940 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), in rack_adjust_orig_mlen()
6942 rsm->m, in rack_adjust_orig_mlen()
6944 (intmax_t)M_TRAILINGROOM(rsm->m), in rack_adjust_orig_mlen()
6945 rsm->orig_t_space, in rack_adjust_orig_mlen()
6946 rsm->orig_m_len, in rack_adjust_orig_mlen()
6947 rsm->m->m_len)); in rack_adjust_orig_mlen()
6948 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); in rack_adjust_orig_mlen()
6949 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_orig_mlen()
6951 if (rsm->m->m_len < rsm->orig_m_len) { in rack_adjust_orig_mlen()
6956 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), in rack_adjust_orig_mlen()
6958 rsm->m, rsm->m->m_len, in rack_adjust_orig_mlen()
6959 rsm, rsm->orig_m_len, in rack_adjust_orig_mlen()
6960 rsm->soff)); in rack_adjust_orig_mlen()
6961 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) in rack_adjust_orig_mlen()
6962 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); in rack_adjust_orig_mlen()
6964 rsm->soff = 0; in rack_adjust_orig_mlen()
6965 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_orig_mlen()
6967 } else if (rsm->m->m_len > rsm->orig_m_len) { in rack_adjust_orig_mlen()
6969 rsm, rsm->m); in rack_adjust_orig_mlen()
6980 if (src_rsm->m && in rack_setup_offset_for_rsm()
6981 ((src_rsm->orig_m_len != src_rsm->m->m_len) || in rack_setup_offset_for_rsm()
6982 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { in rack_setup_offset_for_rsm()
6986 m = src_rsm->m; in rack_setup_offset_for_rsm()
6987 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); in rack_setup_offset_for_rsm()
6988 while (soff >= m->m_len) { in rack_setup_offset_for_rsm()
6990 soff -= m->m_len; in rack_setup_offset_for_rsm()
6991 m = m->m_next; in rack_setup_offset_for_rsm()
6997 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
6998 (src_rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
6999 &src_rsm->soff); in rack_setup_offset_for_rsm()
7000 src_rsm->orig_m_len = src_rsm->m->m_len; in rack_setup_offset_for_rsm()
7001 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); in rack_setup_offset_for_rsm()
7002 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
7003 (rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
7004 &rsm->soff); in rack_setup_offset_for_rsm()
7005 rsm->orig_m_len = rsm->m->m_len; in rack_setup_offset_for_rsm()
7006 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7010 rsm->m = m; in rack_setup_offset_for_rsm()
7011 rsm->soff = soff; in rack_setup_offset_for_rsm()
7012 rsm->orig_m_len = m->m_len; in rack_setup_offset_for_rsm()
7013 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
7022 nrsm->r_start = start; in rack_clone_rsm()
7023 nrsm->r_end = rsm->r_end; in rack_clone_rsm()
7024 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; in rack_clone_rsm()
7025 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; in rack_clone_rsm()
7026 nrsm->r_flags = rsm->r_flags; in rack_clone_rsm()
7027 nrsm->r_dupack = rsm->r_dupack; in rack_clone_rsm()
7028 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; in rack_clone_rsm()
7029 nrsm->r_rtr_bytes = 0; in rack_clone_rsm()
7030 nrsm->r_fas = rsm->r_fas; in rack_clone_rsm()
7031 nrsm->r_bas = rsm->r_bas; in rack_clone_rsm()
7032 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); in rack_clone_rsm()
7033 nrsm->r_just_ret = rsm->r_just_ret; in rack_clone_rsm()
7034 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { in rack_clone_rsm()
7035 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; in rack_clone_rsm()
7038 if (nrsm->r_flags & RACK_HAS_SYN) in rack_clone_rsm()
7039 nrsm->r_flags &= ~RACK_HAS_SYN; in rack_clone_rsm()
7041 if (rsm->r_flags & RACK_HAS_FIN) in rack_clone_rsm()
7042 rsm->r_flags &= ~RACK_HAS_FIN; in rack_clone_rsm()
7044 if (rsm->r_flags & RACK_HAD_PUSH) in rack_clone_rsm()
7045 rsm->r_flags &= ~RACK_HAD_PUSH; in rack_clone_rsm()
7047 if (nrsm->r_flags & RACK_APP_LIMITED) in rack_clone_rsm()
7048 rack->r_ctl.rc_app_limited_cnt++; in rack_clone_rsm()
7050 nrsm->r_hw_tls = rsm->r_hw_tls; in rack_clone_rsm()
7058 KASSERT(((rsm->m != NULL) || in rack_clone_rsm()
7059 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), in rack_clone_rsm()
7060 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); in rack_clone_rsm()
7061 if (rsm->m) in rack_clone_rsm()
7080 rack_log_map_chg(rack->rc_tp, rack, NULL, in rack_merge_rsm()
7081 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); in rack_merge_rsm()
7082 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); in rack_merge_rsm()
7083 if (l_rsm->r_dupack < r_rsm->r_dupack) in rack_merge_rsm()
7084 l_rsm->r_dupack = r_rsm->r_dupack; in rack_merge_rsm()
7085 if (r_rsm->r_rtr_bytes) in rack_merge_rsm()
7086 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; in rack_merge_rsm()
7087 if (r_rsm->r_in_tmap) { in rack_merge_rsm()
7089 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); in rack_merge_rsm()
7090 r_rsm->r_in_tmap = 0; in rack_merge_rsm()
7094 if (r_rsm->r_flags & RACK_HAS_FIN) in rack_merge_rsm()
7095 l_rsm->r_flags |= RACK_HAS_FIN; in rack_merge_rsm()
7096 if (r_rsm->r_flags & RACK_TLP) in rack_merge_rsm()
7097 l_rsm->r_flags |= RACK_TLP; in rack_merge_rsm()
7098 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) in rack_merge_rsm()
7099 l_rsm->r_flags |= RACK_RWND_COLLAPSED; in rack_merge_rsm()
7100 if ((r_rsm->r_flags & RACK_APP_LIMITED) && in rack_merge_rsm()
7101 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_merge_rsm()
7103 * If both are app-limited then let the in rack_merge_rsm()
7107 l_rsm->r_flags |= RACK_APP_LIMITED; in rack_merge_rsm()
7108 r_rsm->r_flags &= ~RACK_APP_LIMITED; in rack_merge_rsm()
7109 if (r_rsm == rack->r_ctl.rc_first_appl) in rack_merge_rsm()
7110 rack->r_ctl.rc_first_appl = l_rsm; in rack_merge_rsm()
7112 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); in rack_merge_rsm()
7127 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < in rack_merge_rsm()
7128 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { in rack_merge_rsm()
7129 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; in rack_merge_rsm()
7136 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) in rack_merge_rsm()
7137 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; in rack_merge_rsm()
7139 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { in rack_merge_rsm()
7141 r_rsm->r_limit_type = l_rsm->r_limit_type; in rack_merge_rsm()
7142 l_rsm->r_limit_type = 0; in rack_merge_rsm()
7145 l_rsm->r_flags |= RACK_MERGED; in rack_merge_rsm()
7170 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_timeout_tlp()
7176 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_tlp()
7183 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_tlp()
7184 rack->r_might_revert = 0; in rack_timeout_tlp()
7185 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_tlp()
7187 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_tlp()
7189 avail = sbavail(&so->so_snd); in rack_timeout_tlp()
7190 out = tp->snd_max - tp->snd_una; in rack_timeout_tlp()
7191 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { in rack_timeout_tlp()
7196 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { in rack_timeout_tlp()
7197 rack->r_ctl.dsack_persist--; in rack_timeout_tlp()
7198 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_tlp()
7199 rack->r_ctl.num_dsack = 0; in rack_timeout_tlp()
7203 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_tlp()
7204 (rack->r_ctl.rc_tlp_cnt_out == 1)) { in rack_timeout_tlp()
7213 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_tlp()
7214 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_tlp()
7215 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_tlp()
7216 tp->gput_seq, in rack_timeout_tlp()
7223 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) in rack_timeout_tlp()
7228 amm = avail - out; in rack_timeout_tlp()
7231 if ((amm + out) > tp->snd_wnd) { in rack_timeout_tlp()
7239 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_timeout_tlp()
7241 if (rack->rack_no_prr == 0) { in rack_timeout_tlp()
7242 if (out + amm <= tp->snd_wnd) { in rack_timeout_tlp()
7243 rack->r_ctl.rc_prr_sndcnt = amm; in rack_timeout_tlp()
7244 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7250 /* Set the send-new override */ in rack_timeout_tlp()
7251 if (out + amm <= tp->snd_wnd) in rack_timeout_tlp()
7252 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7256 rack->r_ctl.rc_tlpsend = NULL; in rack_timeout_tlp()
7262 * Ok we need to arrange the last un-acked segment to be re-sent, or in rack_timeout_tlp()
7263 * optionally the first un-acked segment. in rack_timeout_tlp()
7267 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timeout_tlp()
7269 rsm = tqhash_max(rack->r_ctl.tqh); in rack_timeout_tlp()
7270 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { in rack_timeout_tlp()
7285 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) in rack_timeout_tlp()
7286 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); in rack_timeout_tlp()
7288 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_tlp()
7295 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { in rack_timeout_tlp()
7310 (rsm->r_end - ctf_fixed_maxseg(tp))); in rack_timeout_tlp()
7313 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_timeout_tlp()
7315 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_timeout_tlp()
7320 if (rsm->r_in_tmap) { in rack_timeout_tlp()
7321 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_timeout_tlp()
7322 nrsm->r_in_tmap = 1; in rack_timeout_tlp()
7326 rack->r_ctl.rc_tlpsend = rsm; in rack_timeout_tlp()
7330 rack->r_timer_override = 1; in rack_timeout_tlp()
7331 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7334 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7351 tp->t_flags &= ~TF_DELACK; in rack_timeout_delack()
7352 tp->t_flags |= TF_ACKNOW; in rack_timeout_delack()
7354 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_timeout_delack()
7363 t_template = tcpip_maketemplate(rack->rc_inp); in rack_send_ack_challange()
7365 if (rack->forced_ack == 0) { in rack_send_ack_challange()
7366 rack->forced_ack = 1; in rack_send_ack_challange()
7367 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); in rack_send_ack_challange()
7369 rack->probe_not_answered = 1; in rack_send_ack_challange()
7371 tcp_respond(rack->rc_tp, t_template->tt_ipgen, in rack_send_ack_challange()
7372 &t_template->tt_t, (struct mbuf *)NULL, in rack_send_ack_challange()
7373 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); in rack_send_ack_challange()
7375 /* This does send an ack so kill any D-ack timer */ in rack_send_ack_challange()
7376 if (rack->rc_tp->t_flags & TF_DELACK) in rack_send_ack_challange()
7377 rack->rc_tp->t_flags &= ~TF_DELACK; in rack_send_ack_challange()
7397 if (rack->rc_in_persist == 0) in rack_timeout_persist()
7402 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7403 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_persist()
7416 if (tp->t_rxtshift >= V_tcp_retries && in rack_timeout_persist()
7417 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || in rack_timeout_persist()
7418 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { in rack_timeout_persist()
7421 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7422 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7425 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && in rack_timeout_persist()
7426 tp->snd_una == tp->snd_max) in rack_timeout_persist()
7428 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; in rack_timeout_persist()
7433 if (tp->t_state > TCPS_CLOSE_WAIT && in rack_timeout_persist()
7434 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { in rack_timeout_persist()
7437 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7438 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7443 if (rack->probe_not_answered) { in rack_timeout_persist()
7445 rack->r_ctl.persist_lost_ends++; in rack_timeout_persist()
7450 if (tp->t_rxtshift < V_tcp_retries) in rack_timeout_persist()
7451 tp->t_rxtshift++; in rack_timeout_persist()
7470 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; in rack_timeout_keepalive()
7473 * Keep-alive timer went off; send something or drop connection if in rack_timeout_keepalive()
7477 if (tp->t_state < TCPS_ESTABLISHED) in rack_timeout_keepalive()
7479 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timeout_keepalive()
7480 tp->t_state <= TCPS_CLOSING) { in rack_timeout_keepalive()
7481 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) in rack_timeout_keepalive()
7488 * number tp->snd_una-1 causes the transmitted zero-length in rack_timeout_keepalive()
7501 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_keepalive()
7513 * un-acked. in rack_remxt_tmr()
7518 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_remxt_tmr()
7521 rack->r_timer_override = 1; in rack_remxt_tmr()
7522 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_remxt_tmr()
7523 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; in rack_remxt_tmr()
7524 rack->r_late = 0; in rack_remxt_tmr()
7525 rack->r_early = 0; in rack_remxt_tmr()
7526 rack->r_ctl.rc_agg_delayed = 0; in rack_remxt_tmr()
7527 rack->r_ctl.rc_agg_early = 0; in rack_remxt_tmr()
7528 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_remxt_tmr()
7530 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { in rack_remxt_tmr()
7533 * more than rack_rxt_scoreboard_clear_thresh time-outs. in rack_remxt_tmr()
7535 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7536 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7537 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7543 * mark SACK-PASS on anything not acked here. in rack_remxt_tmr()
7552 * sacks that come floating in will "re-ack" the data. in rack_remxt_tmr()
7557 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7559 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_remxt_tmr()
7560 rsm->r_dupack = 0; in rack_remxt_tmr()
7563 /* We must re-add it back to the tlist */ in rack_remxt_tmr()
7565 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_remxt_tmr()
7567 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); in rack_remxt_tmr()
7569 rsm->r_in_tmap = 1; in rack_remxt_tmr()
7571 if (rsm->r_flags & RACK_ACKED) in rack_remxt_tmr()
7572 rsm->r_flags |= RACK_WAS_ACKED; in rack_remxt_tmr()
7573 …rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_W… in rack_remxt_tmr()
7574 rsm->r_flags |= RACK_MUST_RXT; in rack_remxt_tmr()
7577 rack->r_ctl.rc_considered_lost = 0; in rack_remxt_tmr()
7578 /* Clear the count (we just un-acked them) */ in rack_remxt_tmr()
7579 rack->r_ctl.rc_sacked = 0; in rack_remxt_tmr()
7580 rack->r_ctl.rc_sacklast = NULL; in rack_remxt_tmr()
7582 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7583 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7584 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7585 rack->r_ctl.rc_prr_sndcnt = 0; in rack_remxt_tmr()
7587 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7588 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7589 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7590 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && in rack_remxt_tmr()
7591 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_remxt_tmr()
7593 * For non-sack customers new data in rack_remxt_tmr()
7597 rack->r_must_retran = 1; in rack_remxt_tmr()
7598 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, in rack_remxt_tmr()
7599 rack->r_ctl.rc_sacked); in rack_remxt_tmr()
7607 tp->t_rxtcur = RACK_REXMTVAL(tp); in rack_convert_rtts()
7608 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_convert_rtts()
7609 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); in rack_convert_rtts()
7611 if (tp->t_rxtcur > rack_rto_max) { in rack_convert_rtts()
7612 tp->t_rxtcur = rack_rto_max; in rack_convert_rtts()
7622 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cc_conn_init()
7623 srtt = tp->t_srtt; in rack_cc_conn_init()
7629 if ((srtt == 0) && (tp->t_srtt != 0)) in rack_cc_conn_init()
7637 if (tp->snd_ssthresh < tp->snd_wnd) { in rack_cc_conn_init()
7638 tp->snd_ssthresh = tp->snd_wnd; in rack_cc_conn_init()
7644 if (rc_init_window(rack) < tp->snd_cwnd) in rack_cc_conn_init()
7645 tp->snd_cwnd = rc_init_window(rack); in rack_cc_conn_init()
7649 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7660 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_rxt()
7661 (tp->t_rxtshift)) { in rack_timeout_rxt()
7668 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_rxt()
7669 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_rxt()
7670 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_rxt()
7671 tp->gput_seq, in rack_timeout_rxt()
7677 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_rxt()
7679 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; in rack_timeout_rxt()
7680 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_rxt()
7681 rack->rc_ack_required = 1; in rack_timeout_rxt()
7682 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_rxt()
7683 if (IN_RECOVERY(tp->t_flags) && in rack_timeout_rxt()
7684 (rack->rto_from_rec == 0)) { in rack_timeout_rxt()
7691 rack->rto_from_rec = 1; in rack_timeout_rxt()
7692 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; in rack_timeout_rxt()
7694 if (IN_FASTRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7695 tp->t_flags |= TF_WASFRECOVERY; in rack_timeout_rxt()
7697 tp->t_flags &= ~TF_WASFRECOVERY; in rack_timeout_rxt()
7698 if (IN_CONGRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7699 tp->t_flags |= TF_WASCRECOVERY; in rack_timeout_rxt()
7701 tp->t_flags &= ~TF_WASCRECOVERY; in rack_timeout_rxt()
7702 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_timeout_rxt()
7703 (tp->snd_una == tp->snd_max)) { in rack_timeout_rxt()
7707 if (rack->r_ctl.dsack_persist) { in rack_timeout_rxt()
7708 rack->r_ctl.dsack_persist--; in rack_timeout_rxt()
7709 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_rxt()
7710 rack->r_ctl.num_dsack = 0; in rack_timeout_rxt()
7722 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timeout_rxt()
7726 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_rxt()
7729 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && in rack_timeout_rxt()
7730 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { in rack_timeout_rxt()
7742 if ((rack->r_ctl.rc_resend == NULL) || in rack_timeout_rxt()
7743 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_timeout_rxt()
7750 tp->t_rxtshift++; in rack_timeout_rxt()
7753 if (tp->t_rxtshift > V_tcp_retries) { in rack_timeout_rxt()
7756 tp->t_rxtshift = V_tcp_retries; in rack_timeout_rxt()
7759 MPASS(tp->t_softerror >= 0); in rack_timeout_rxt()
7760 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; in rack_timeout_rxt()
7763 if (tp->t_state == TCPS_SYN_SENT) { in rack_timeout_rxt()
7768 tp->snd_cwnd = 1; in rack_timeout_rxt()
7769 } else if (tp->t_rxtshift == 1) { in rack_timeout_rxt()
7776 * End-to-End Network Path Properties" by Allman and Paxson in rack_timeout_rxt()
7779 tp->snd_cwnd_prev = tp->snd_cwnd; in rack_timeout_rxt()
7780 tp->snd_ssthresh_prev = tp->snd_ssthresh; in rack_timeout_rxt()
7781 tp->snd_recover_prev = tp->snd_recover; in rack_timeout_rxt()
7782 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); in rack_timeout_rxt()
7783 tp->t_flags |= TF_PREVVALID; in rack_timeout_rxt()
7784 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) in rack_timeout_rxt()
7785 tp->t_flags &= ~TF_PREVVALID; in rack_timeout_rxt()
7787 if ((tp->t_state == TCPS_SYN_SENT) || in rack_timeout_rxt()
7788 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_timeout_rxt()
7789 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7791 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7793 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, in rack_timeout_rxt()
7794 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); in rack_timeout_rxt()
7803 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; in rack_timeout_rxt()
7810 ((tp->t_state == TCPS_ESTABLISHED) || in rack_timeout_rxt()
7811 (tp->t_state == TCPS_FIN_WAIT_1))) { in rack_timeout_rxt()
7814 * 1448 -> 1188 -> 524) should be given 2 chances to recover in rack_timeout_rxt()
7815 * before further clamping down. 'tp->t_rxtshift % 2 == 0' in rack_timeout_rxt()
7818 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == in rack_timeout_rxt()
7820 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && in rack_timeout_rxt()
7821 tp->t_rxtshift % 2 == 0)) { in rack_timeout_rxt()
7823 * Enter Path MTU Black-hole Detection mechanism: - in rack_timeout_rxt()
7824 * Disable Path MTU Discovery (IP "DF" bit). - in rack_timeout_rxt()
7828 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { in rack_timeout_rxt()
7830 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7832 tp->t_pmtud_saved_maxseg = tp->t_maxseg; in rack_timeout_rxt()
7841 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { in rack_timeout_rxt()
7843 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; in rack_timeout_rxt()
7847 tp->t_maxseg = V_tcp_v6mssdflt; in rack_timeout_rxt()
7852 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7860 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { in rack_timeout_rxt()
7862 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; in rack_timeout_rxt()
7866 tp->t_maxseg = V_tcp_mssdflt; in rack_timeout_rxt()
7871 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7884 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && in rack_timeout_rxt()
7885 (tp->t_rxtshift >= 6)) { in rack_timeout_rxt()
7886 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7887 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7888 tp->t_maxseg = tp->t_pmtud_saved_maxseg; in rack_timeout_rxt()
7889 if (tp->t_maxseg < V_tcp_mssdflt) { in rack_timeout_rxt()
7895 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7897 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7905 * our third SYN to work-around some broken terminal servers in rack_timeout_rxt()
7908 * unknown-to-them TCP options. in rack_timeout_rxt()
7910 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && in rack_timeout_rxt()
7911 (tp->t_rxtshift == 3)) in rack_timeout_rxt()
7912 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); in rack_timeout_rxt()
7919 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { in rack_timeout_rxt()
7921 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_timeout_rxt()
7926 tp->t_rttvar += tp->t_srtt; in rack_timeout_rxt()
7927 tp->t_srtt = 0; in rack_timeout_rxt()
7929 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_timeout_rxt()
7930 tp->snd_recover = tp->snd_max; in rack_timeout_rxt()
7931 tp->t_flags |= TF_ACKNOW; in rack_timeout_rxt()
7932 tp->t_rtttime = 0; in rack_timeout_rxt()
7933 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); in rack_timeout_rxt()
7942 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); in rack_process_timers()
7944 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_timers()
7945 (tp->t_flags & TF_GPUTINPROG)) { in rack_process_timers()
7954 bytes = tp->gput_ack - tp->gput_seq; in rack_process_timers()
7955 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_process_timers()
7956 bytes += tp->gput_seq - tp->snd_una; in rack_process_timers()
7957 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_process_timers()
7963 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_process_timers()
7964 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_process_timers()
7965 tp->gput_seq, in rack_process_timers()
7967 tp->t_flags &= ~TF_GPUTINPROG; in rack_process_timers()
7973 if (tp->t_state == TCPS_LISTEN) { in rack_process_timers()
7975 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) in rack_process_timers()
7980 rack->rc_on_min_to) { in rack_process_timers()
7983 * are on a min-timeout (which means rrr_conf = 3) in rack_process_timers()
7988 * If its on a normal rack timer (non-min) then in rack_process_timers()
7993 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_process_timers()
7996 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_process_timers()
7997 ret = -1; in rack_process_timers()
8008 ret = -2; in rack_process_timers()
8015 * no-sack wakeup on since we no longer have a PKT_OUTPUT in rack_process_timers()
8018 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; in rack_process_timers()
8019 ret = -3; in rack_process_timers()
8020 left = rack->r_ctl.rc_timer_exp - cts; in rack_process_timers()
8026 rack->rc_tmr_stopped = 0; in rack_process_timers()
8027 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; in rack_process_timers()
8031 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8032 rack->r_fast_output = 0; in rack_process_timers()
8035 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8036 rack->r_fast_output = 0; in rack_process_timers()
8039 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
8040 rack->r_fast_output = 0; in rack_process_timers()
8058 flags_on_entry = rack->r_ctl.rc_hpts_flags; in rack_timer_cancel()
8060 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_timer_cancel()
8061 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || in rack_timer_cancel()
8062 ((tp->snd_max - tp->snd_una) == 0))) { in rack_timer_cancel()
8063 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8066 if ((tp->snd_max - tp->snd_una) == 0) in rack_timer_cancel()
8067 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_cancel()
8070 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_timer_cancel()
8071 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_cancel()
8072 if (tcp_in_hpts(rack->rc_tp) && in rack_timer_cancel()
8073 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { in rack_timer_cancel()
8079 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
8082 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); in rack_timer_cancel()
8093 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_stopall()
8094 rack->t_timers_stopped = 1; in rack_stopall()
8109 rack->rc_in_persist = 1; in rack_stop_all_timers()
8111 if (tcp_in_hpts(rack->rc_tp)) { in rack_stop_all_timers()
8112 tcp_hpts_remove(rack->rc_tp); in rack_stop_all_timers()
8122 rsm->r_rtr_cnt++; in rack_update_rsm()
8123 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { in rack_update_rsm()
8124 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; in rack_update_rsm()
8125 rsm->r_flags |= RACK_OVERMAX; in rack_update_rsm()
8127 rsm->r_act_rxt_cnt++; in rack_update_rsm()
8130 rsm->r_dupack = 0; in rack_update_rsm()
8131 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { in rack_update_rsm()
8132 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8133 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); in rack_update_rsm()
8135 if (rsm->r_flags & RACK_WAS_LOST) { in rack_update_rsm()
8141 rsm->r_flags &= ~RACK_WAS_LOST; in rack_update_rsm()
8142 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_update_rsm()
8144 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_update_rsm()
8145 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_update_rsm()
8147 rack->r_ctl.rc_considered_lost = 0; in rack_update_rsm()
8149 idx = rsm->r_rtr_cnt - 1; in rack_update_rsm()
8150 rsm->r_tim_lastsent[idx] = ts; in rack_update_rsm()
8153 * in snduna <->snd_max. in rack_update_rsm()
8155 rsm->r_fas = ctf_flight_size(rack->rc_tp, in rack_update_rsm()
8156 rack->r_ctl.rc_sacked); in rack_update_rsm()
8157 if (rsm->r_flags & RACK_ACKED) { in rack_update_rsm()
8159 rsm->r_flags &= ~RACK_ACKED; in rack_update_rsm()
8160 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8162 if (rsm->r_in_tmap) { in rack_update_rsm()
8163 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8164 rsm->r_in_tmap = 0; in rack_update_rsm()
8168 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8169 rsm->r_in_tmap = 1; in rack_update_rsm()
8170 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); in rack_update_rsm()
8172 if (rsm->r_flags & RACK_MUST_RXT) { in rack_update_rsm()
8173 if (rack->r_must_retran) in rack_update_rsm()
8174 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8175 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_update_rsm()
8180 rack->r_must_retran = 0; in rack_update_rsm()
8181 rack->r_ctl.rc_out_at_rto = 0; in rack_update_rsm()
8183 rsm->r_flags &= ~RACK_MUST_RXT; in rack_update_rsm()
8186 rsm->r_flags &= ~RACK_RWND_COLLAPSED; in rack_update_rsm()
8187 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_update_rsm()
8189 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_update_rsm()
8190 rsm->r_flags |= RACK_WAS_SACKPASS; in rack_update_rsm()
8199 * We (re-)transmitted starting at rsm->r_start for some length in rack_update_entry()
8208 c_end = rsm->r_start + len; in rack_update_entry()
8209 if (SEQ_GEQ(c_end, rsm->r_end)) { in rack_update_entry()
8215 if (c_end == rsm->r_end) { in rack_update_entry()
8222 act_len = rsm->r_end - rsm->r_start; in rack_update_entry()
8223 *lenp = (len - act_len); in rack_update_entry()
8224 return (rsm->r_end); in rack_update_entry()
8248 nrsm->r_dupack = 0; in rack_update_entry()
8251 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_update_entry()
8253 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_update_entry()
8258 if (rsm->r_in_tmap) { in rack_update_entry()
8259 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_update_entry()
8260 nrsm->r_in_tmap = 1; in rack_update_entry()
8262 rsm->r_flags &= (~RACK_HAS_FIN); in rack_update_entry()
8300 * -- i.e. return if err != 0 or should we pretend we sent it? -- in rack_log_output()
8306 * We don't log errors -- we could but snd_max does not in rack_log_output()
8318 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_output()
8319 snd_una = tp->snd_una; in rack_log_output()
8320 snd_max = tp->snd_max; in rack_log_output()
8328 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) in rack_log_output()
8334 /* Are sending an old segment to induce an ack (keep-alive)? */ in rack_log_output()
8344 len = end - seq_out; in rack_log_output()
8352 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_log_output()
8353 rack->r_ctl.rc_prr_out += len; in rack_log_output()
8369 rsm->r_flags = RACK_HAS_FIN|add_flag; in rack_log_output()
8371 rsm->r_flags = add_flag; in rack_log_output()
8374 rsm->r_hw_tls = 1; in rack_log_output()
8375 rsm->r_tim_lastsent[0] = cts; in rack_log_output()
8376 rsm->r_rtr_cnt = 1; in rack_log_output()
8377 rsm->r_act_rxt_cnt = 0; in rack_log_output()
8378 rsm->r_rtr_bytes = 0; in rack_log_output()
8381 rsm->r_flags |= RACK_HAS_SYN; in rack_log_output()
8383 rsm->r_start = seq_out; in rack_log_output()
8384 rsm->r_end = rsm->r_start + len; in rack_log_output()
8386 rsm->r_dupack = 0; in rack_log_output()
8392 rsm->m = s_mb; in rack_log_output()
8393 rsm->soff = s_moff; in rack_log_output()
8396 * reflected in in snduna <->snd_max in rack_log_output()
8398 rsm->r_fas = (ctf_flight_size(rack->rc_tp, in rack_log_output()
8399 rack->r_ctl.rc_sacked) + in rack_log_output()
8400 (rsm->r_end - rsm->r_start)); in rack_log_output()
8401 if ((rack->rc_initial_ss_comp == 0) && in rack_log_output()
8402 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { in rack_log_output()
8403 rack->r_ctl.ss_hi_fs = rsm->r_fas; in rack_log_output()
8405 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ in rack_log_output()
8406 if (rsm->m) { in rack_log_output()
8407 if (rsm->m->m_len <= rsm->soff) { in rack_log_output()
8413 * within rsm->m. But if the sbsndptr was in rack_log_output()
8419 lm = rsm->m; in rack_log_output()
8420 while (lm->m_len <= rsm->soff) { in rack_log_output()
8421 rsm->soff -= lm->m_len; in rack_log_output()
8422 lm = lm->m_next; in rack_log_output()
8423 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", in rack_log_output()
8424 __func__, rack, s_moff, s_mb, rsm->soff)); in rack_log_output()
8426 rsm->m = lm; in rack_log_output()
8428 rsm->orig_m_len = rsm->m->m_len; in rack_log_output()
8429 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_log_output()
8431 rsm->orig_m_len = 0; in rack_log_output()
8432 rsm->orig_t_space = 0; in rack_log_output()
8434 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); in rack_log_output()
8439 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_log_output()
8441 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_log_output()
8446 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_log_output()
8447 rsm->r_in_tmap = 1; in rack_log_output()
8448 if (rsm->r_flags & RACK_IS_PCM) { in rack_log_output()
8449 rack->r_ctl.pcm_i.send_time = cts; in rack_log_output()
8450 rack->r_ctl.pcm_i.eseq = rsm->r_end; in rack_log_output()
8452 if (rack->pcm_in_progress == 0) in rack_log_output()
8453 rack->r_ctl.pcm_i.sseq = rsm->r_start; in rack_log_output()
8461 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_log_output()
8462 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { in rack_log_output()
8465 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_log_output()
8467 prsm->r_one_out_nr = 1; in rack_log_output()
8475 if (hintrsm && (hintrsm->r_start == seq_out)) { in rack_log_output()
8482 if ((rsm) && (rsm->r_start == seq_out)) { in rack_log_output()
8492 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); in rack_log_output()
8494 if (rsm->r_start == seq_out) { in rack_log_output()
8502 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { in rack_log_output()
8520 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_log_output()
8522 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_log_output()
8527 if (rsm->r_in_tmap) { in rack_log_output()
8528 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_log_output()
8529 nrsm->r_in_tmap = 1; in rack_log_output()
8531 rsm->r_flags &= (~RACK_HAS_FIN); in rack_log_output()
8543 if (seq_out == tp->snd_max) { in rack_log_output()
8545 } else if (SEQ_LT(seq_out, tp->snd_max)) { in rack_log_output()
8547 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", in rack_log_output()
8548 seq_out, len, tp->snd_una, tp->snd_max); in rack_log_output()
8550 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_log_output()
8552 rsm, rsm->r_start, rsm->r_end); in rack_log_output()
8561 * Hmm beyond sndmax? (only if we are using the new rtt-pack in rack_log_output()
8565 seq_out, len, tp->snd_max, tp); in rack_log_output()
8579 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8580 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { in tcp_rack_xmit_timer()
8581 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; in tcp_rack_xmit_timer()
8583 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8584 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { in tcp_rack_xmit_timer()
8585 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; in tcp_rack_xmit_timer()
8587 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in tcp_rack_xmit_timer()
8588 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) in tcp_rack_xmit_timer()
8589 rack->r_ctl.rc_gp_lowrtt = us_rtt; in tcp_rack_xmit_timer()
8590 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) in tcp_rack_xmit_timer()
8591 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in tcp_rack_xmit_timer()
8595 (rsm->r_just_ret) || in tcp_rack_xmit_timer()
8596 (rsm->r_one_out_nr && in tcp_rack_xmit_timer()
8597 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { in tcp_rack_xmit_timer()
8604 * the r_one_out_nr. If it was a CUM-ACK and in tcp_rack_xmit_timer()
8611 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8612 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { in tcp_rack_xmit_timer()
8613 if (rack->r_ctl.rack_rs.confidence == 0) { in tcp_rack_xmit_timer()
8618 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8619 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8620 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8629 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8630 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8631 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8634 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); in tcp_rack_xmit_timer()
8635 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; in tcp_rack_xmit_timer()
8636 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; in tcp_rack_xmit_timer()
8637 rack->r_ctl.rack_rs.rs_rtt_cnt++; in tcp_rack_xmit_timer()
8641 * Collect new round-trip time estimate
8650 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) in tcp_rack_xmit_timer_commit()
8653 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { in tcp_rack_xmit_timer_commit()
8655 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; in tcp_rack_xmit_timer_commit()
8656 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { in tcp_rack_xmit_timer_commit()
8658 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; in tcp_rack_xmit_timer_commit()
8659 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { in tcp_rack_xmit_timer_commit()
8661 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / in tcp_rack_xmit_timer_commit()
8662 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); in tcp_rack_xmit_timer_commit()
8665 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); in tcp_rack_xmit_timer_commit()
8671 if (rack->rc_gp_rtt_set == 0) { in tcp_rack_xmit_timer_commit()
8676 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8677 rack->rc_gp_rtt_set = 1; in tcp_rack_xmit_timer_commit()
8678 } else if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8680 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); in tcp_rack_xmit_timer_commit()
8681 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; in tcp_rack_xmit_timer_commit()
8683 if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8688 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8689 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8691 if (rack->rc_highly_buffered == 0) { in tcp_rack_xmit_timer_commit()
8697 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { in tcp_rack_xmit_timer_commit()
8698 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, in tcp_rack_xmit_timer_commit()
8699 rack->r_ctl.rc_highest_us_rtt, in tcp_rack_xmit_timer_commit()
8700 rack->r_ctl.rc_lowest_us_rtt, in tcp_rack_xmit_timer_commit()
8702 rack->rc_highly_buffered = 1; in tcp_rack_xmit_timer_commit()
8706 if ((rack->r_ctl.rack_rs.confidence) || in tcp_rack_xmit_timer_commit()
8707 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { in tcp_rack_xmit_timer_commit()
8712 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8714 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8715 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8716 if (rack->r_ctl.rc_lowest_us_rtt == 0) in tcp_rack_xmit_timer_commit()
8717 rack->r_ctl.rc_lowest_us_rtt = 1; in tcp_rack_xmit_timer_commit()
8720 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_xmit_timer_commit()
8721 if (tp->t_srtt != 0) { in tcp_rack_xmit_timer_commit()
8730 delta = tp->t_srtt - rtt; in tcp_rack_xmit_timer_commit()
8732 tp->t_srtt -= (tp->t_srtt >> 3); in tcp_rack_xmit_timer_commit()
8734 tp->t_srtt += (rtt >> 3); in tcp_rack_xmit_timer_commit()
8735 if (tp->t_srtt <= 0) in tcp_rack_xmit_timer_commit()
8736 tp->t_srtt = 1; in tcp_rack_xmit_timer_commit()
8739 delta = -delta; in tcp_rack_xmit_timer_commit()
8741 tp->t_rttvar -= (tp->t_rttvar >> 3); in tcp_rack_xmit_timer_commit()
8743 tp->t_rttvar += (delta >> 3); in tcp_rack_xmit_timer_commit()
8744 if (tp->t_rttvar <= 0) in tcp_rack_xmit_timer_commit()
8745 tp->t_rttvar = 1; in tcp_rack_xmit_timer_commit()
8748 * No rtt measurement yet - use the unsmoothed rtt. Set the in tcp_rack_xmit_timer_commit()
8752 tp->t_srtt = rtt; in tcp_rack_xmit_timer_commit()
8753 tp->t_rttvar = rtt >> 1; in tcp_rack_xmit_timer_commit()
8755 rack->rc_srtt_measure_made = 1; in tcp_rack_xmit_timer_commit()
8757 if (tp->t_rttupdated < UCHAR_MAX) in tcp_rack_xmit_timer_commit()
8758 tp->t_rttupdated++; in tcp_rack_xmit_timer_commit()
8762 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); in tcp_rack_xmit_timer_commit()
8768 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8769 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8775 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8776 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8779 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8781 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8783 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in tcp_rack_xmit_timer_commit()
8788 * tick of rounding and 1 extra tick because of +-1/2 tick in tcp_rack_xmit_timer_commit()
8794 tp->t_rxtshift = 0; in tcp_rack_xmit_timer_commit()
8795 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in tcp_rack_xmit_timer_commit()
8796 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); in tcp_rack_xmit_timer_commit()
8798 tp->t_softerror = 0; in tcp_rack_xmit_timer_commit()
8806 * Apply to filter the inbound us-rtt at us_cts. in rack_apply_updated_usrtt()
8810 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_apply_updated_usrtt()
8811 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, in rack_apply_updated_usrtt()
8821 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { in rack_apply_updated_usrtt()
8823 rack->rc_gp_dyn_mul && in rack_apply_updated_usrtt()
8824 (rack->use_fixed_rate == 0) && in rack_apply_updated_usrtt()
8825 (rack->rc_always_pace)) { in rack_apply_updated_usrtt()
8828 * to the time that we would have entered probe-rtt. in rack_apply_updated_usrtt()
8830 * has entered probe-rtt. Lets go in now too. in rack_apply_updated_usrtt()
8836 if ((rack->in_probe_rtt == 0) && in rack_apply_updated_usrtt()
8837 (rack->rc_skip_timely == 0) && in rack_apply_updated_usrtt()
8838 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { in rack_apply_updated_usrtt()
8842 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_apply_updated_usrtt()
8855 if ((rsm->r_flags & RACK_ACKED) || in rack_update_rtt()
8856 (rsm->r_flags & RACK_WAS_ACKED)) in rack_update_rtt()
8859 if (rsm->r_no_rtt_allowed) { in rack_update_rtt()
8864 if (SEQ_GT(th_ack, rsm->r_end)) { in rack_update_rtt()
8865 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8868 len_acked = th_ack - rsm->r_start; in rack_update_rtt()
8872 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8875 if (rsm->r_rtr_cnt == 1) { in rack_update_rtt()
8877 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8880 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
8881 tp->t_rttlow = t; in rack_update_rtt()
8882 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8883 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8884 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8885 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8886 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8889 …if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)… in rack_update_rtt()
8890 …us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr… in rack_update_rtt()
8892 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_update_rtt()
8895 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8897 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8899 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_update_rtt()
8901 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); in rack_update_rtt()
8902 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8915 * When we are not app-limited then we see if in rack_update_rtt()
8932 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_update_rtt()
8937 } else if (rack->app_limited_needs_set == 0) { in rack_update_rtt()
8942 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); in rack_update_rtt()
8944 calc_conf, rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8946 if ((rsm->r_flags & RACK_TLP) && in rack_update_rtt()
8947 (!IN_FASTRECOVERY(tp->t_flags))) { in rack_update_rtt()
8949 if (rack->r_ctl.rc_tlp_cwnd_reduce) { in rack_update_rtt()
8953 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8954 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8955 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
8957 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8958 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8959 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8960 rack->rc_rack_rtt = t; in rack_update_rtt()
8969 tp->t_rxtshift = 0; in rack_update_rtt()
8970 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_update_rtt()
8971 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_update_rtt()
8972 tp->t_softerror = 0; in rack_update_rtt()
8973 if (to && (to->to_flags & TOF_TS) && in rack_update_rtt()
8975 (to->to_tsecr) && in rack_update_rtt()
8976 ((rsm->r_flags & RACK_OVERMAX) == 0)) { in rack_update_rtt()
8981 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
8982 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { in rack_update_rtt()
8983 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8986 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8994 if (TSTMP_GT(tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) in rack_update_rtt()
8995 us_rtt = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8997 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8998 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
9000 if ((i + 1) < rsm->r_rtr_cnt) { in rack_update_rtt()
9012 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
9013 tp->t_rttlow = t; in rack_update_rtt()
9014 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9015 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9016 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9017 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9020 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9021 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9022 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
9024 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
9025 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9026 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9027 rack->rc_rack_rtt = t; in rack_update_rtt()
9029 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); in rack_update_rtt()
9031 rsm->r_rtr_cnt); in rack_update_rtt()
9036 if (tcp_bblogging_on(rack->rc_tp)) { in rack_update_rtt()
9037 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
9038 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); in rack_update_rtt()
9046 * time-stamp since its not there or the time the peer last in rack_update_rtt()
9047 * received a segment that moved forward its cum-ack point. in rack_update_rtt()
9050 i = rsm->r_rtr_cnt - 1; in rack_update_rtt()
9051 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9054 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9059 * 6.2 Step 2 point 2 in the rack-draft so we in rack_update_rtt()
9065 } else if (rack->r_ctl.rc_rack_min_rtt) { in rack_update_rtt()
9070 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
9071 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
9072 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
9073 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
9074 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
9077 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
9078 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
9079 (uint32_t)rsm->r_tim_lastsent[i]))) { in rack_update_rtt()
9081 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
9082 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
9083 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
9084 rack->rc_rack_rtt = t; in rack_update_rtt()
9106 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, in rack_log_sack_passed()
9112 if (nrsm->r_flags & RACK_ACKED) { in rack_log_sack_passed()
9120 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { in rack_log_sack_passed()
9128 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_log_sack_passed()
9131 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_log_sack_passed()
9134 nrsm->r_flags |= RACK_WAS_LOST; in rack_log_sack_passed()
9135 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_log_sack_passed()
9138 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_log_sack_passed()
9146 nrsm->r_flags |= RACK_SACK_PASSED; in rack_log_sack_passed()
9147 nrsm->r_flags &= ~RACK_WAS_SACKPASS; in rack_log_sack_passed()
9161 if ((tp->t_flags & TF_GPUTINPROG) && in rack_need_set_test()
9162 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9172 if (rsm->r_rtr_cnt > 1) { in rack_need_set_test()
9185 seq = tp->gput_seq; in rack_need_set_test()
9186 ts = tp->gput_ts; in rack_need_set_test()
9187 rack->app_limited_needs_set = 0; in rack_need_set_test()
9188 tp->gput_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_need_set_test()
9191 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { in rack_need_set_test()
9199 tp->gput_seq = rsm->r_start; in rack_need_set_test()
9202 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9214 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9220 * way up to where this ack cum-ack moves in rack_need_set_test()
9223 if (SEQ_GT(th_ack, rsm->r_end)) in rack_need_set_test()
9224 tp->gput_seq = th_ack; in rack_need_set_test()
9226 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9228 if (SEQ_LT(tp->gput_seq, tp->snd_max)) in rack_need_set_test()
9229 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_need_set_test()
9243 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; in rack_need_set_test()
9245 /* If we hit here we have to have *not* sent tp->gput_seq */ in rack_need_set_test()
9246 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_need_set_test()
9248 rack->app_limited_needs_set = 1; in rack_need_set_test()
9250 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { in rack_need_set_test()
9252 * We moved beyond this guy's range, re-calculate in rack_need_set_test()
9255 if (rack->rc_gp_filled == 0) { in rack_need_set_test()
9256 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); in rack_need_set_test()
9258 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_need_set_test()
9265 if ((rack->in_probe_rtt == 0) && in rack_need_set_test()
9266 (rack->measure_saw_probe_rtt) && in rack_need_set_test()
9267 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_need_set_test()
9268 rack->measure_saw_probe_rtt = 0; in rack_need_set_test()
9269 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, in rack_need_set_test()
9270 seq, tp->gput_seq, in rack_need_set_test()
9271 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9272 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9274 if (rack->rc_gp_filled && in rack_need_set_test()
9275 ((tp->gput_ack - tp->gput_seq) < in rack_need_set_test()
9281 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { in rack_need_set_test()
9288 tp->t_flags &= ~TF_GPUTINPROG; in rack_need_set_test()
9289 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_need_set_test()
9291 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9292 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9298 tp->gput_ack = tp->gput_seq + ideal_amount; in rack_need_set_test()
9302 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); in rack_need_set_test()
9309 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { in is_rsm_inside_declared_tlp_block()
9313 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { in is_rsm_inside_declared_tlp_block()
9317 /* It has to be a sub-part of the original TLP recorded */ in is_rsm_inside_declared_tlp_block()
9333 start = sack->start; in rack_proc_sack_blk()
9334 end = sack->end; in rack_proc_sack_blk()
9339 (SEQ_LT(end, rsm->r_start)) || in rack_proc_sack_blk()
9340 (SEQ_GEQ(start, rsm->r_end)) || in rack_proc_sack_blk()
9341 (SEQ_LT(start, rsm->r_start))) { in rack_proc_sack_blk()
9347 rsm = tqhash_find(rack->r_ctl.tqh, start); in rack_proc_sack_blk()
9354 if (rsm->r_start != start) { in rack_proc_sack_blk()
9355 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9360 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9361 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9366 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9374 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9378 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9379 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9380 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9381 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9383 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9384 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9385 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9386 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9389 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9390 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9391 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9392 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9393 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9400 * rsm |--------------| in rack_proc_sack_blk()
9401 * sackblk |-------> in rack_proc_sack_blk()
9403 * rsm |---| in rack_proc_sack_blk()
9405 * nrsm |----------| in rack_proc_sack_blk()
9417 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9419 (rsm->bindex == next->bindex) && in rack_proc_sack_blk()
9420 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9421 ((next->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9422 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9423 ((next->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9424 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9425 (next->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9430 (next->r_flags & RACK_ACKED) && in rack_proc_sack_blk()
9431 SEQ_GEQ(end, next->r_start)) { in rack_proc_sack_blk()
9438 * rsm |------------| (not-acked) in rack_proc_sack_blk()
9439 * next |-----------| (acked) in rack_proc_sack_blk()
9440 * sackblk |--------> in rack_proc_sack_blk()
9442 * rsm |------| (not-acked) in rack_proc_sack_blk()
9443 * next |-----------------| (acked) in rack_proc_sack_blk()
9444 * nrsm |-----| in rack_proc_sack_blk()
9452 tqhash_update_end(rack->r_ctl.tqh, rsm, start); in rack_proc_sack_blk()
9453 next->r_start = start; in rack_proc_sack_blk()
9454 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9455 next->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9456 /* Now we must adjust back where next->m is */ in rack_proc_sack_blk()
9476 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9477 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) in rack_proc_sack_blk()
9478 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9482 if (next->r_ack_arrival < in rack_proc_sack_blk()
9483 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9484 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9489 rsm->r_dupack = 0; in rack_proc_sack_blk()
9490 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9493 nrsm->r_start = start; in rack_proc_sack_blk()
9496 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9497 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9498 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9499 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9500 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9503 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9504 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9506 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9507 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9509 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9511 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9512 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9513 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9514 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9518 * one left un-acked) to the next one in rack_proc_sack_blk()
9521 * sack-passed on rsm (The one passed in in rack_proc_sack_blk()
9526 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9532 if (nrsm && nrsm->r_in_tmap) in rack_proc_sack_blk()
9536 if (SEQ_LT(end, next->r_end) || in rack_proc_sack_blk()
9537 (end == next->r_end)) { in rack_proc_sack_blk()
9544 start = next->r_end; in rack_proc_sack_blk()
9545 rsm = tqhash_next(rack->r_ctl.tqh, next); in rack_proc_sack_blk()
9553 * rsm |--------| in rack_proc_sack_blk()
9554 * sackblk |-----> in rack_proc_sack_blk()
9559 * rsm |----| in rack_proc_sack_blk()
9560 * sackblk |-----> in rack_proc_sack_blk()
9561 * nrsm |---| in rack_proc_sack_blk()
9576 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9578 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9580 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9585 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9586 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9587 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9590 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9597 if (end == rsm->r_end) { in rack_proc_sack_blk()
9599 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9601 } else if (SEQ_LT(end, rsm->r_end)) { in rack_proc_sack_blk()
9603 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9611 start = rsm->r_end; in rack_proc_sack_blk()
9612 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9618 if (SEQ_GEQ(end, rsm->r_end)) { in rack_proc_sack_blk()
9622 * rsm --- |-----| in rack_proc_sack_blk()
9623 * end |-----| in rack_proc_sack_blk()
9625 * end |---------| in rack_proc_sack_blk()
9627 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9631 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9632 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9637 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9644 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9648 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9649 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9650 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9651 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9653 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9654 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9655 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9656 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9659 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9660 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9661 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9662 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9663 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9667 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9669 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9672 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9673 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9674 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9676 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9677 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9679 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9681 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9682 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9685 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9686 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9687 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9688 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9689 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9691 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9692 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9693 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9694 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9695 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9696 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9697 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9698 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
9704 if (end == rsm->r_end) { in rack_proc_sack_blk()
9705 /* This block only - done, setup for next */ in rack_proc_sack_blk()
9712 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9713 start = rsm->r_end; in rack_proc_sack_blk()
9722 * rsm --- |-----| in rack_proc_sack_blk()
9723 * end |--| in rack_proc_sack_blk()
9725 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9729 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9730 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9735 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9742 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9746 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9747 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9748 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9749 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9751 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9752 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9753 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9754 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9757 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9758 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9759 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9760 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9761 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9769 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9771 (rsm->bindex == prev->bindex) && in rack_proc_sack_blk()
9772 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9773 ((prev->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9774 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9775 ((prev->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9776 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9777 (prev->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9782 (prev->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
9785 * in place and span from (rsm->r_start = end) to rsm->r_end. in rack_proc_sack_blk()
9787 * to prev->r_end <- end. in rack_proc_sack_blk()
9789 * prev |--------| (acked) in rack_proc_sack_blk()
9790 * rsm |-------| (non-acked) in rack_proc_sack_blk()
9791 * sackblk |-| in rack_proc_sack_blk()
9793 * prev |----------| (acked) in rack_proc_sack_blk()
9794 * rsm |-----| (non-acked) in rack_proc_sack_blk()
9795 * nrsm |-| (temporary) in rack_proc_sack_blk()
9802 tqhash_update_end(rack->r_ctl.tqh, prev, end); in rack_proc_sack_blk()
9803 rsm->r_start = end; in rack_proc_sack_blk()
9804 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9805 prev->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9810 nrsm->r_end = end; in rack_proc_sack_blk()
9811 rsm->r_dupack = 0; in rack_proc_sack_blk()
9830 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9831 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { in rack_proc_sack_blk()
9832 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9838 if(prev->r_ack_arrival < in rack_proc_sack_blk()
9839 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9840 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9855 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9856 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9857 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9858 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9859 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9862 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9863 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9865 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9866 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9868 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9870 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9871 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9872 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9873 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9889 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9890 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9895 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9902 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9906 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9907 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9908 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9909 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9911 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9912 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9913 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9914 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9917 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9918 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9919 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9920 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9921 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9926 * nrsm->r_start = end; in rack_proc_sack_blk()
9927 * nrsm->r_end = rsm->r_end; in rack_proc_sack_blk()
9928 * which is un-acked. in rack_proc_sack_blk()
9930 * rsm->r_end = nrsm->r_start; in rack_proc_sack_blk()
9931 * i.e. the remaining un-acked in rack_proc_sack_blk()
9936 * rsm |----------| (not acked) in rack_proc_sack_blk()
9937 * sackblk |---| in rack_proc_sack_blk()
9939 * rsm |---| (acked) in rack_proc_sack_blk()
9940 * nrsm |------| (not acked) in rack_proc_sack_blk()
9944 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9945 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9947 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9949 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9954 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9955 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9956 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9958 nrsm->r_dupack = 0; in rack_proc_sack_blk()
9961 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9962 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9965 my_chg = (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9966 rsm->r_flags &= ~RACK_WAS_LOST; in rack_proc_sack_blk()
9967 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9969 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9970 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9972 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9974 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9976 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9979 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9980 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9981 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9982 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9983 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9985 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9986 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9987 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9988 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9989 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9991 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9992 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9993 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
10004 ((rsm->r_flags & RACK_TLP) == 0) && in rack_proc_sack_blk()
10005 (rsm->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
10011 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10013 if (next->r_flags & RACK_TLP) in rack_proc_sack_blk()
10016 if ((next->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10017 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10020 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10021 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10024 if (rsm->bindex != next->bindex) in rack_proc_sack_blk()
10026 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10028 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10030 if (next->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10032 if (next->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10034 if (next->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10037 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10042 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10044 if (prev->r_flags & RACK_TLP) in rack_proc_sack_blk()
10047 if ((prev->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10048 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10051 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
10052 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
10055 if (rsm->bindex != prev->bindex) in rack_proc_sack_blk()
10057 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10059 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10061 if (prev->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
10063 if (prev->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
10065 if (prev->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
10068 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
10079 nrsm = tqhash_find(rack->r_ctl.tqh, end); in rack_proc_sack_blk()
10080 *prsm = rack->r_ctl.rc_sacklast = nrsm; in rack_proc_sack_blk()
10090 while (rsm && (rsm->r_flags & RACK_ACKED)) { in rack_peer_reneges()
10092 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_peer_reneges()
10094 if (rsm->r_in_tmap) { in rack_peer_reneges()
10096 rack, rsm, rsm->r_flags); in rack_peer_reneges()
10099 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); in rack_peer_reneges()
10102 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_peer_reneges()
10105 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); in rack_peer_reneges()
10108 tmap->r_in_tmap = 1; in rack_peer_reneges()
10109 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_peer_reneges()
10115 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); in rack_peer_reneges()
10160 * The cum-ack is being advanced upon the sendmap. in rack_rsm_sender_update()
10166 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_rsm_sender_update()
10173 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { in rack_rsm_sender_update()
10174 tp->gput_ack = rsm->r_end; in rack_rsm_sender_update()
10183 if (rack->app_limited_needs_set) in rack_rsm_sender_update()
10201 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= in rack_rsm_sender_update()
10202 rack->r_ctl.rc_gp_cumack_ts) in rack_rsm_sender_update()
10205 rack->r_ctl.rc_gp_cumack_ts = ts; in rack_rsm_sender_update()
10206 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, in rack_rsm_sender_update()
10221 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { in rack_process_to_cumack()
10226 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); in rack_process_to_cumack()
10228 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_process_to_cumack()
10230 rack->r_ctl.cleared_app_ack = 0; in rack_process_to_cumack()
10232 rack->r_wanted_output = 1; in rack_process_to_cumack()
10233 if (SEQ_GT(th_ack, tp->snd_una)) in rack_process_to_cumack()
10234 rack->r_ctl.last_cumack_advance = acktime; in rack_process_to_cumack()
10237 if ((rack->rc_last_tlp_acked_set == 1)&& in rack_process_to_cumack()
10238 (rack->rc_last_tlp_past_cumack == 1) && in rack_process_to_cumack()
10239 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { in rack_process_to_cumack()
10242 * tlp retransmit sequence is ahead of the cum-ack. in rack_process_to_cumack()
10243 * This can only happen when the cum-ack moves all in rack_process_to_cumack()
10248 * Note since sack's also turn on this event we have in rack_process_to_cumack()
10250 * the cum-ack is by the TLP before checking which is in rack_process_to_cumack()
10254 rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10255 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10256 rack->rc_last_tlp_acked_set = 0; in rack_process_to_cumack()
10257 rack->rc_last_tlp_past_cumack = 0; in rack_process_to_cumack()
10258 } else if ((rack->rc_last_tlp_acked_set == 1) && in rack_process_to_cumack()
10259 (rack->rc_last_tlp_past_cumack == 0) && in rack_process_to_cumack()
10260 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { in rack_process_to_cumack()
10264 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10267 if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10268 (rack->rc_last_sent_tlp_past_cumack == 1) && in rack_process_to_cumack()
10269 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { in rack_process_to_cumack()
10271 rack->r_ctl.last_sent_tlp_seq, in rack_process_to_cumack()
10272 (rack->r_ctl.last_sent_tlp_seq + in rack_process_to_cumack()
10273 rack->r_ctl.last_sent_tlp_len)); in rack_process_to_cumack()
10274 rack->rc_last_sent_tlp_seq_valid = 0; in rack_process_to_cumack()
10275 rack->rc_last_sent_tlp_past_cumack = 0; in rack_process_to_cumack()
10276 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10277 (rack->rc_last_sent_tlp_past_cumack == 0) && in rack_process_to_cumack()
10278 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { in rack_process_to_cumack()
10282 rack->rc_last_sent_tlp_past_cumack = 1; in rack_process_to_cumack()
10285 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10287 if ((th_ack - 1) == tp->iss) { in rack_process_to_cumack()
10296 if (tp->t_flags & TF_SENTFIN) { in rack_process_to_cumack()
10303 tp->t_state, th_ack, rack, in rack_process_to_cumack()
10304 tp->snd_una, tp->snd_max); in rack_process_to_cumack()
10308 if (SEQ_LT(th_ack, rsm->r_start)) { in rack_process_to_cumack()
10312 rsm->r_start, in rack_process_to_cumack()
10313 th_ack, tp->t_state, rack->r_state); in rack_process_to_cumack()
10320 if ((rsm->r_flags & RACK_TLP) && in rack_process_to_cumack()
10321 (rsm->r_rtr_cnt > 1)) { in rack_process_to_cumack()
10331 if (rack->rc_last_tlp_acked_set && in rack_process_to_cumack()
10338 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10342 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_process_to_cumack()
10343 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10344 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10345 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10347 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_process_to_cumack()
10348 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10349 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10350 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10353 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10354 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10355 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10356 rack->rc_last_tlp_acked_set = 1; in rack_process_to_cumack()
10357 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10361 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_process_to_cumack()
10362 if (SEQ_GEQ(th_ack, rsm->r_end)) { in rack_process_to_cumack()
10367 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10373 rsm->r_flags &= ~RACK_WAS_LOST; in rack_process_to_cumack()
10374 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_process_to_cumack()
10376 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_process_to_cumack()
10377 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_process_to_cumack()
10379 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10381 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); in rack_process_to_cumack()
10382 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; in rack_process_to_cumack()
10383 rsm->r_rtr_bytes = 0; in rack_process_to_cumack()
10389 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_process_to_cumack()
10390 if (rsm->r_in_tmap) { in rack_process_to_cumack()
10391 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_process_to_cumack()
10392 rsm->r_in_tmap = 0; in rack_process_to_cumack()
10395 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10397 * It was acked on the scoreboard -- remove in rack_process_to_cumack()
10400 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_process_to_cumack()
10402 } else if (rsm->r_flags & RACK_SACK_PASSED) { in rack_process_to_cumack()
10408 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_process_to_cumack()
10409 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_process_to_cumack()
10410 rsm->r_flags |= RACK_ACKED; in rack_process_to_cumack()
10411 rack->r_ctl.rc_reorder_ts = cts; in rack_process_to_cumack()
10412 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_to_cumack()
10413 rack->r_ctl.rc_reorder_ts = 1; in rack_process_to_cumack()
10414 if (rack->r_ent_rec_ns) { in rack_process_to_cumack()
10419 rack->r_might_revert = 1; in rack_process_to_cumack()
10421 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10423 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10425 if ((rsm->r_flags & RACK_TO_REXT) && in rack_process_to_cumack()
10426 (tp->t_flags & TF_RCVD_TSTMP) && in rack_process_to_cumack()
10427 (to->to_flags & TOF_TS) && in rack_process_to_cumack()
10428 (to->to_tsecr != 0) && in rack_process_to_cumack()
10429 (tp->t_flags & TF_PREVVALID)) { in rack_process_to_cumack()
10435 tp->t_flags &= ~TF_PREVVALID; in rack_process_to_cumack()
10436 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { in rack_process_to_cumack()
10441 left = th_ack - rsm->r_end; in rack_process_to_cumack()
10442 if (rack->app_limited_needs_set && newly_acked) in rack_process_to_cumack()
10450 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10451 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { in rack_process_to_cumack()
10459 * given us snd_una up to (rsm->r_end). in rack_process_to_cumack()
10463 * our rsm->r_start in case we get an old ack in rack_process_to_cumack()
10470 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10472 * It was acked on the scoreboard -- remove it from in rack_process_to_cumack()
10473 * total for the part being cum-acked. in rack_process_to_cumack()
10475 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); in rack_process_to_cumack()
10477 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); in rack_process_to_cumack()
10480 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10487 KASSERT((rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)), in rack_process_to_cumack()
10489 if (rack->r_ctl.rc_considered_lost >= (th_ack - rsm->r_start)) in rack_process_to_cumack()
10490 rack->r_ctl.rc_considered_lost -= th_ack - rsm->r_start; in rack_process_to_cumack()
10492 rack->r_ctl.rc_considered_lost = 0; in rack_process_to_cumack()
10498 rsm->r_dupack = 0; in rack_process_to_cumack()
10500 if (rsm->r_rtr_bytes) { in rack_process_to_cumack()
10507 ack_am = (th_ack - rsm->r_start); in rack_process_to_cumack()
10508 if (ack_am >= rsm->r_rtr_bytes) { in rack_process_to_cumack()
10509 rack->r_ctl.rc_holes_rxt -= ack_am; in rack_process_to_cumack()
10510 rsm->r_rtr_bytes -= ack_am; in rack_process_to_cumack()
10520 if (rsm->m && in rack_process_to_cumack()
10521 ((rsm->orig_m_len != rsm->m->m_len) || in rack_process_to_cumack()
10522 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_process_to_cumack()
10526 rsm->soff += (th_ack - rsm->r_start); in rack_process_to_cumack()
10529 tqhash_trim(rack->r_ctl.tqh, th_ack); in rack_process_to_cumack()
10535 m = rsm->m; in rack_process_to_cumack()
10536 soff = rsm->soff; in rack_process_to_cumack()
10538 while (soff >= m->m_len) { in rack_process_to_cumack()
10539 soff -= m->m_len; in rack_process_to_cumack()
10540 KASSERT((m->m_next != NULL), in rack_process_to_cumack()
10542 rsm, rsm->soff, soff, m)); in rack_process_to_cumack()
10543 m = m->m_next; in rack_process_to_cumack()
10546 * This is a fall-back that prevents a panic. In reality in rack_process_to_cumack()
10549 * but tqhash_trim did update rsm->r_start so the offset calcuation in rack_process_to_cumack()
10554 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_process_to_cumack()
10555 (rsm->r_start - tp->snd_una), in rack_process_to_cumack()
10563 rsm->m = m; in rack_process_to_cumack()
10564 rsm->soff = soff; in rack_process_to_cumack()
10565 rsm->orig_m_len = rsm->m->m_len; in rack_process_to_cumack()
10566 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_process_to_cumack()
10569 if (rack->app_limited_needs_set && in rack_process_to_cumack()
10570 SEQ_GEQ(th_ack, tp->gput_seq)) in rack_process_to_cumack()
10571 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); in rack_process_to_cumack()
10580 if (rack->r_might_revert) { in rack_handle_might_revert()
10591 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_handle_might_revert()
10592 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_handle_might_revert()
10604 rack->r_ent_rec_ns = 0; in rack_handle_might_revert()
10605 orig_cwnd = tp->snd_cwnd; in rack_handle_might_revert()
10606 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; in rack_handle_might_revert()
10607 tp->snd_recover = tp->snd_una; in rack_handle_might_revert()
10609 if (IN_RECOVERY(tp->t_flags)) { in rack_handle_might_revert()
10611 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ in rack_handle_might_revert()
10614 * and then re-entered recovery (more sack's arrived) in rack_handle_might_revert()
10616 * the first recovery. We want to be able to slow-start in rack_handle_might_revert()
10620 * so we get no slow-start after our RTO. in rack_handle_might_revert()
10622 rack->rto_from_rec = 0; in rack_handle_might_revert()
10623 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_handle_might_revert()
10624 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_handle_might_revert()
10628 rack->r_might_revert = 0; in rack_handle_might_revert()
10641 am = end - start; in rack_note_dsack()
10644 if ((rack->rc_last_tlp_acked_set ) && in rack_note_dsack()
10645 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && in rack_note_dsack()
10646 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { in rack_note_dsack()
10657 if (rack->rc_last_sent_tlp_seq_valid) { in rack_note_dsack()
10658 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; in rack_note_dsack()
10659 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && in rack_note_dsack()
10670 if (rack->rc_dsack_round_seen == 0) { in rack_note_dsack()
10671 rack->rc_dsack_round_seen = 1; in rack_note_dsack()
10672 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; in rack_note_dsack()
10673 rack->r_ctl.num_dsack++; in rack_note_dsack()
10674 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ in rack_note_dsack()
10682 rack->r_ctl.dsack_byte_cnt += am; in rack_note_dsack()
10683 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_note_dsack()
10684 rack->r_ctl.retran_during_recovery && in rack_note_dsack()
10685 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { in rack_note_dsack()
10690 rack->r_might_revert = 1; in rack_note_dsack()
10691 rack_handle_might_revert(rack->rc_tp, rack); in rack_note_dsack()
10692 rack->r_might_revert = 0; in rack_note_dsack()
10693 rack->r_ctl.retran_during_recovery = 0; in rack_note_dsack()
10694 rack->r_ctl.dsack_byte_cnt = 0; in rack_note_dsack()
10702 return (((tp->snd_max - snd_una) - in do_rack_compute_pipe()
10703 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); in do_rack_compute_pipe()
10710 (struct tcp_rack *)tp->t_fb_ptr, in rack_compute_pipe()
10711 tp->snd_una)); in rack_compute_pipe()
10720 rack->r_ctl.rc_prr_delivered += changed; in rack_update_prr()
10722 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { in rack_update_prr()
10726 * Note we use tp->snd_una here and not th_ack because in rack_update_prr()
10729 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10733 if (SEQ_GT(tp->snd_una, th_ack)) { in rack_update_prr()
10734 snd_una = tp->snd_una; in rack_update_prr()
10739 if (pipe > tp->snd_ssthresh) { in rack_update_prr()
10742 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; in rack_update_prr()
10743 if (rack->r_ctl.rc_prr_recovery_fs > 0) in rack_update_prr()
10744 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; in rack_update_prr()
10746 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10751 if (sndcnt > (long)rack->r_ctl.rc_prr_out) in rack_update_prr()
10752 sndcnt -= rack->r_ctl.rc_prr_out; in rack_update_prr()
10755 rack->r_ctl.rc_prr_sndcnt = sndcnt; in rack_update_prr()
10760 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) in rack_update_prr()
10761 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); in rack_update_prr()
10767 if (tp->snd_ssthresh > pipe) { in rack_update_prr()
10768 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); in rack_update_prr()
10771 rack->r_ctl.rc_prr_sndcnt = min(0, limit); in rack_update_prr()
10798 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_ack()
10800 rsm = tqhash_min(rack->r_ctl.tqh); in rack_log_ack()
10802 th_ack = th->th_ack; in rack_log_ack()
10803 segsiz = ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10808 * credit for larger cum-ack moves). in rack_log_ack()
10812 ac = BYTES_THIS_ACK(tp, th) / ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10815 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_log_ack()
10817 tp->t_acktime = ticks; in rack_log_ack()
10819 if (rsm && SEQ_GT(th_ack, rsm->r_start)) in rack_log_ack()
10820 changed = th_ack - rsm->r_start; in rack_log_ack()
10823 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_log_ack()
10825 if ((to->to_flags & TOF_SACK) == 0) { in rack_log_ack()
10829 * For cases where we struck a dup-ack in rack_log_ack()
10834 changed += ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10839 if (SEQ_GT(th_ack, tp->snd_una)) in rack_log_ack()
10842 ack_point = tp->snd_una; in rack_log_ack()
10843 for (i = 0; i < to->to_nsacks; i++) { in rack_log_ack()
10844 bcopy((to->to_sacks + i * TCPOLEN_SACK), in rack_log_ack()
10850 SEQ_LT(sack.start, tp->snd_max) && in rack_log_ack()
10852 SEQ_LEQ(sack.end, tp->snd_max)) { in rack_log_ack()
10863 * Its a D-SACK block. in rack_log_ack()
10868 if (rack->rc_dsack_round_seen) { in rack_log_ack()
10870 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { in rack_log_ack()
10872 rack->rc_dsack_round_seen = 0; in rack_log_ack()
10880 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, in rack_log_ack()
10881 num_sack_blks, th->th_ack); in rack_log_ack()
10882 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); in rack_log_ack()
10929 * Now collapse out the dup-sack and in rack_log_ack()
10937 num_sack_blks--; in rack_log_ack()
10949 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_log_ack()
10951 SEQ_GT(sack_blocks[0].end, rsm->r_start) && in rack_log_ack()
10952 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { in rack_log_ack()
10959 rack->r_wanted_output = 1; in rack_log_ack()
10967 * i.e the sack-filter pushes down in rack_log_ack()
10973 counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp))); in rack_log_ack()
10985 rsm = rack->r_ctl.rc_sacklast; in rack_log_ack()
10989 rack->r_wanted_output = 1; in rack_log_ack()
10997 * you have more than one sack-blk, this in rack_log_ack()
10999 * and the sack-filter is still working, or in rack_log_ack()
11008 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_log_ack()
11012 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_log_ack()
11014 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { in rack_log_ack()
11022 if (rack->rack_no_prr == 0) { in rack_log_ack()
11023 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_log_ack()
11026 rack->r_timer_override = 1; in rack_log_ack()
11027 rack->r_early = 0; in rack_log_ack()
11028 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11029 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11031 (rack->r_rr_config == 3)) { in rack_log_ack()
11036 rack->r_timer_override = 1; in rack_log_ack()
11037 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_log_ack()
11038 rack->r_ctl.rc_resend = rsm; in rack_log_ack()
11040 if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
11041 (rack->rack_no_prr == 0) && in rack_log_ack()
11044 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && in rack_log_ack()
11045 ((tcp_in_hpts(rack->rc_tp) == 0) && in rack_log_ack()
11046 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { in rack_log_ack()
11051 rack->r_early = 0; in rack_log_ack()
11052 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
11053 rack->r_timer_override = 1; in rack_log_ack()
11063 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_strike_dupack()
11069 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in rack_strike_dupack()
11070 (rsm->r_flags & RACK_MUST_RXT)) { in rack_strike_dupack()
11076 if (rsm && (rsm->r_dupack < 0xff)) { in rack_strike_dupack()
11077 rsm->r_dupack++; in rack_strike_dupack()
11078 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { in rack_strike_dupack()
11084 * we will get a return of the rsm. For a non-sack in rack_strike_dupack()
11089 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); in rack_strike_dupack()
11090 if (rack->r_ctl.rc_resend != NULL) { in rack_strike_dupack()
11091 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { in rack_strike_dupack()
11092 rack_cong_signal(rack->rc_tp, CC_NDUPACK, in rack_strike_dupack()
11095 rack->r_wanted_output = 1; in rack_strike_dupack()
11096 rack->r_timer_override = 1; in rack_strike_dupack()
11120 * gauge the inter-ack times). If that occurs we have a real problem in rack_check_bottom_drag()
11133 if (tp->snd_max == tp->snd_una) { in rack_check_bottom_drag()
11145 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); in rack_check_bottom_drag()
11147 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11149 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && in rack_check_bottom_drag()
11150 (rack->dis_lt_bw == 0) && in rack_check_bottom_drag()
11151 (rack->use_lesser_lt_bw == 0) && in rack_check_bottom_drag()
11154 * Lets use the long-term b/w we have in rack_check_bottom_drag()
11157 if (rack->rc_gp_filled == 0) { in rack_check_bottom_drag()
11169 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11170 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11171 rack->rc_gp_filled = 1; in rack_check_bottom_drag()
11172 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11173 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11174 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11175 } else if (lt_bw > rack->r_ctl.gp_bw) { in rack_check_bottom_drag()
11176 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
11177 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
11178 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
11179 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
11180 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
11182 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11183 if ((rack->gp_ready == 0) && in rack_check_bottom_drag()
11184 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_check_bottom_drag()
11186 rack->gp_ready = 1; in rack_check_bottom_drag()
11187 if (rack->dgp_on || in rack_check_bottom_drag()
11188 rack->rack_hibeta) in rack_check_bottom_drag()
11190 if (rack->defer_options) in rack_check_bottom_drag()
11197 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11199 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_check_bottom_drag()
11200 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), in rack_check_bottom_drag()
11202 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11203 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
11204 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= in rack_check_bottom_drag()
11215 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11216 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11227 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid()
11229 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) in rack_log_hybrid()
11250 log.u_bbr.flex2 = cur->start_seq; in rack_log_hybrid()
11251 log.u_bbr.flex3 = cur->end_seq; in rack_log_hybrid()
11252 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid()
11253 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid()
11254 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid()
11255 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid()
11256 log.u_bbr.rttProp = cur->timestamp; in rack_log_hybrid()
11257 log.u_bbr.cur_del_rate = cur->cspr; in rack_log_hybrid()
11258 log.u_bbr.bw_inuse = cur->start; in rack_log_hybrid()
11259 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid()
11260 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11261 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); in rack_log_hybrid()
11262 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11265 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid()
11274 log.u_bbr.flex7 = rack->rc_catch_up; in rack_log_hybrid()
11276 log.u_bbr.flex7 |= rack->rc_hybrid_mode; in rack_log_hybrid()
11278 log.u_bbr.flex7 |= rack->dgp_on; in rack_log_hybrid()
11286 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid()
11288 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid()
11290 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid()
11292 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid()
11294 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; in rack_log_hybrid()
11295 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; in rack_log_hybrid()
11296 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_hybrid()
11297 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; in rack_log_hybrid()
11298 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; in rack_log_hybrid()
11299 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; in rack_log_hybrid()
11300 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid()
11301 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid()
11302 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid()
11317 orig_ent = rack->r_ctl.rc_last_sft; in rack_set_dgp_hybrid_mode()
11318 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); in rack_set_dgp_hybrid_mode()
11321 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11323 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); in rack_set_dgp_hybrid_mode()
11330 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11331 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11332 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11333 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11334 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11336 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11338 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11339 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11341 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11342 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11346 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { in rack_set_dgp_hybrid_mode()
11348 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11349 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11350 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11351 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11353 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11354 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11356 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_set_dgp_hybrid_mode()
11357 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_set_dgp_hybrid_mode()
11358 rc_cur->first_send = cts; in rack_set_dgp_hybrid_mode()
11359 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11360 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11371 tp = rack->rc_tp; in rack_set_dgp_hybrid_mode()
11372 if ((rack->r_ctl.rc_last_sft != NULL) && in rack_set_dgp_hybrid_mode()
11373 (rack->r_ctl.rc_last_sft == rc_cur)) { in rack_set_dgp_hybrid_mode()
11375 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11379 if (rack->rc_hybrid_mode == 0) { in rack_set_dgp_hybrid_mode()
11380 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11382 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11383 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11384 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11389 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ in rack_set_dgp_hybrid_mode()
11391 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11392 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11394 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11396 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11397 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11398 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11400 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11403 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) in rack_set_dgp_hybrid_mode()
11404 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; in rack_set_dgp_hybrid_mode()
11406 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11407 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { in rack_set_dgp_hybrid_mode()
11411 * sendtime not arrival time for catch-up mode. in rack_set_dgp_hybrid_mode()
11413 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; in rack_set_dgp_hybrid_mode()
11415 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && in rack_set_dgp_hybrid_mode()
11416 (rc_cur->cspr > 0)) { in rack_set_dgp_hybrid_mode()
11419 rack->rc_catch_up = 1; in rack_set_dgp_hybrid_mode()
11424 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { in rack_set_dgp_hybrid_mode()
11430 rc_cur->deadline = cts; in rack_set_dgp_hybrid_mode()
11436 rc_cur->deadline = rc_cur->localtime; in rack_set_dgp_hybrid_mode()
11442 len = rc_cur->end - rc_cur->start; in rack_set_dgp_hybrid_mode()
11443 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { in rack_set_dgp_hybrid_mode()
11448 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); in rack_set_dgp_hybrid_mode()
11458 len /= rc_cur->cspr; in rack_set_dgp_hybrid_mode()
11459 rc_cur->deadline += len; in rack_set_dgp_hybrid_mode()
11461 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11462 rc_cur->deadline = 0; in rack_set_dgp_hybrid_mode()
11464 if (rack->r_ctl.client_suggested_maxseg != 0) { in rack_set_dgp_hybrid_mode()
11472 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11473 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11474 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11478 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11479 rack->r_ctl.last_tm_mark = rc_cur->timestamp; in rack_set_dgp_hybrid_mode()
11489 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11491 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || in rack_chk_req_and_hybrid_on_out()
11492 (SEQ_GEQ(seq, ent->end_seq))) { in rack_chk_req_and_hybrid_on_out()
11495 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11501 if (SEQ_LT(ent->end_seq, (seq + len))) { in rack_chk_req_and_hybrid_on_out()
11512 ent->end_seq = (seq + len); in rack_chk_req_and_hybrid_on_out()
11513 if (rack->rc_hybrid_mode) in rack_chk_req_and_hybrid_on_out()
11517 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_chk_req_and_hybrid_on_out()
11518 ent->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_chk_req_and_hybrid_on_out()
11519 ent->first_send = cts; in rack_chk_req_and_hybrid_on_out()
11520 ent->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_chk_req_and_hybrid_on_out()
11521 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_chk_req_and_hybrid_on_out()
11550 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; in rack_gain_for_fastoutput()
11551 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), in rack_gain_for_fastoutput()
11552 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11556 rack->r_ctl.fsb.left_to_send = new_total; in rack_gain_for_fastoutput()
11557 …KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_ma… in rack_gain_for_fastoutput()
11559 rack, rack->r_ctl.fsb.left_to_send, in rack_gain_for_fastoutput()
11560 sbavail(&rack->rc_inp->inp_socket->so_snd), in rack_gain_for_fastoutput()
11561 (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11600 snd_una = rack->rc_tp->snd_una; in rack_adjust_sendmap_head()
11602 m = sb->sb_mb; in rack_adjust_sendmap_head()
11603 rsm = tqhash_min(rack->r_ctl.tqh); in rack_adjust_sendmap_head()
11609 KASSERT((rsm->m == m), in rack_adjust_sendmap_head()
11610 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", in rack_adjust_sendmap_head()
11612 while (rsm->m && (rsm->m == m)) { in rack_adjust_sendmap_head()
11618 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); in rack_adjust_sendmap_head()
11619 if ((rsm->orig_m_len != m->m_len) || in rack_adjust_sendmap_head()
11620 (rsm->orig_t_space != M_TRAILINGROOM(m))){ in rack_adjust_sendmap_head()
11624 KASSERT((rsm->soff == 0), in rack_adjust_sendmap_head()
11625 ("Rack:%p rsm:%p -- rsm at head but soff not zero", in rack_adjust_sendmap_head()
11629 if ((rsm->soff != soff) || (rsm->m != tm)) { in rack_adjust_sendmap_head()
11638 rsm->m = tm; in rack_adjust_sendmap_head()
11639 rsm->soff = soff; in rack_adjust_sendmap_head()
11641 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11642 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11644 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11645 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11648 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); in rack_adjust_sendmap_head()
11649 if (rsm->m) { in rack_adjust_sendmap_head()
11650 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11651 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11653 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11654 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11657 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_adjust_sendmap_head()
11670 if ((rack->rc_hybrid_mode == 0) && in rack_req_check_for_comp()
11671 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { in rack_req_check_for_comp()
11676 tcp_req_check_for_comp(rack->rc_tp, th_ack); in rack_req_check_for_comp()
11686 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11700 data = ent->end - ent->start; in rack_req_check_for_comp()
11701 laa = tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time); in rack_req_check_for_comp()
11702 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { in rack_req_check_for_comp()
11703 if (ent->first_send > ent->localtime) in rack_req_check_for_comp()
11704 ftim = ent->first_send; in rack_req_check_for_comp()
11706 ftim = ent->localtime; in rack_req_check_for_comp()
11709 ftim = ent->localtime; in rack_req_check_for_comp()
11711 if (laa > ent->localtime) in rack_req_check_for_comp()
11712 tim = laa - ftim; in rack_req_check_for_comp()
11726 if (ent == rack->r_ctl.rc_last_sft) { in rack_req_check_for_comp()
11727 rack->r_ctl.rc_last_sft = NULL; in rack_req_check_for_comp()
11728 if (rack->rc_hybrid_mode) { in rack_req_check_for_comp()
11729 rack->rc_catch_up = 0; in rack_req_check_for_comp()
11730 if (rack->cspr_is_fcc == 0) in rack_req_check_for_comp()
11731 rack->r_ctl.bw_rate_cap = 0; in rack_req_check_for_comp()
11733 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_req_check_for_comp()
11734 rack->r_ctl.client_suggested_maxseg = 0; in rack_req_check_for_comp()
11738 tcp_req_log_req_info(rack->rc_tp, ent, in rack_req_check_for_comp()
11741 tcp_req_free_a_slot(rack->rc_tp, ent); in rack_req_check_for_comp()
11742 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11751 * For ret_val if its 0 the TCP is locked, if its non-zero
11752 * its unlocked and probably unsafe to touch the TCB.
11771 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_ack()
11772 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { in rack_process_ack()
11774 tp->t_flags2 |= TF2_NO_ISS_CHECK; in rack_process_ack()
11780 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { in rack_process_ack()
11782 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11785 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { in rack_process_ack()
11787 seq_min = tp->iss + 1; in rack_process_ack()
11794 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11798 if (SEQ_LT(th->th_ack, seq_min)) { in rack_process_ack()
11805 rack->r_wanted_output = 1; in rack_process_ack()
11809 if (SEQ_GT(th->th_ack, tp->snd_max)) { in rack_process_ack()
11811 rack->r_wanted_output = 1; in rack_process_ack()
11814 if (rack->gp_ready && in rack_process_ack()
11815 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_process_ack()
11818 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { in rack_process_ack()
11822 in_rec = IN_FASTRECOVERY(tp->t_flags); in rack_process_ack()
11823 if (rack->rc_in_persist) { in rack_process_ack()
11824 tp->t_rxtshift = 0; in rack_process_ack()
11825 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11826 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11829 if ((th->th_ack == tp->snd_una) && in rack_process_ack()
11830 (tiwin == tp->snd_wnd) && in rack_process_ack()
11832 ((to->to_flags & TOF_SACK) == 0)) { in rack_process_ack()
11833 rack_strike_dupack(rack, th->th_ack); in rack_process_ack()
11836 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), in rack_process_ack()
11840 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_process_ack()
11846 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { in rack_process_ack()
11847 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_process_ack()
11848 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_ack()
11849 rack->r_ctl.rc_reorder_ts = 1; in rack_process_ack()
11857 if (tp->t_flags & TF_NEEDSYN) { in rack_process_ack()
11859 * T/TCP: Connection was half-synchronized, and our SYN has in rack_process_ack()
11861 * to non-starred state, increment snd_una for ACK of SYN, in rack_process_ack()
11864 tp->t_flags &= ~TF_NEEDSYN; in rack_process_ack()
11865 tp->snd_una++; in rack_process_ack()
11867 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_process_ack()
11869 tp->rcv_scale = tp->request_r_scale; in rack_process_ack()
11873 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_ack()
11878 * Any time we move the cum-ack forward clear in rack_process_ack()
11879 * keep-alive tied probe-not-answered. The in rack_process_ack()
11882 rack->probe_not_answered = 0; in rack_process_ack()
11892 if ((tp->t_flags & TF_PREVVALID) && in rack_process_ack()
11893 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_process_ack()
11894 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
11895 if (tp->t_rxtshift == 1 && in rack_process_ack()
11896 (int)(ticks - tp->t_badrxtwin) < 0) in rack_process_ack()
11897 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_process_ack()
11901 tp->t_rxtshift = 0; in rack_process_ack()
11902 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11903 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11904 rack->rc_tlp_in_progress = 0; in rack_process_ack()
11905 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_process_ack()
11910 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_process_ack()
11911 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11913 rack_req_check_for_comp(rack, th->th_ack); in rack_process_ack()
11932 * (possibly backed-off) value. in rack_process_ack()
11939 if (IN_RECOVERY(tp->t_flags)) { in rack_process_ack()
11940 if (SEQ_LT(th->th_ack, tp->snd_recover) && in rack_process_ack()
11941 (SEQ_LT(th->th_ack, tp->snd_max))) { in rack_process_ack()
11944 rack_post_recovery(tp, th->th_ack); in rack_process_ack()
11951 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_process_ack()
11953 p_cwnd += tp->snd_cwnd; in rack_process_ack()
11955 } else if ((rack->rto_from_rec == 1) && in rack_process_ack()
11956 SEQ_GEQ(th->th_ack, tp->snd_recover)) { in rack_process_ack()
11959 * and never re-entered recovery. The timeout(s) in rack_process_ack()
11963 rack->rto_from_rec = 0; in rack_process_ack()
11970 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); in rack_process_ack()
11972 (tp->snd_cwnd > p_cwnd)) { in rack_process_ack()
11973 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_process_ack()
11974 tp->snd_cwnd = p_cwnd; in rack_process_ack()
11977 acked_amount = min(acked, (int)sbavail(&so->so_snd)); in rack_process_ack()
11978 tp->snd_wnd -= acked_amount; in rack_process_ack()
11979 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_process_ack()
11980 if ((sbused(&so->so_snd) == 0) && in rack_process_ack()
11982 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
11983 (tp->t_flags & TF_SENTFIN)) { in rack_process_ack()
11992 tp->snd_una = th->th_ack; in rack_process_ack()
11994 if (acked_amount && sbavail(&so->so_snd)) in rack_process_ack()
11995 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_process_ack()
11996 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_process_ack()
12000 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_process_ack()
12001 tp->snd_recover = tp->snd_una; in rack_process_ack()
12003 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { in rack_process_ack()
12004 tp->snd_nxt = tp->snd_max; in rack_process_ack()
12007 (rack->use_fixed_rate == 0) && in rack_process_ack()
12008 (rack->in_probe_rtt == 0) && in rack_process_ack()
12009 rack->rc_gp_dyn_mul && in rack_process_ack()
12010 rack->rc_always_pace) { in rack_process_ack()
12014 if (tp->snd_una == tp->snd_max) { in rack_process_ack()
12016 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
12017 if (rack->r_ctl.rc_went_idle_time == 0) in rack_process_ack()
12018 rack->r_ctl.rc_went_idle_time = 1; in rack_process_ack()
12019 rack->r_ctl.retran_during_recovery = 0; in rack_process_ack()
12020 rack->r_ctl.dsack_byte_cnt = 0; in rack_process_ack()
12022 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_process_ack()
12023 tp->t_acktime = 0; in rack_process_ack()
12024 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
12025 rack->rc_suspicious = 0; in rack_process_ack()
12027 rack->r_wanted_output = 1; in rack_process_ack()
12028 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_process_ack()
12029 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
12030 (sbavail(&so->so_snd) == 0) && in rack_process_ack()
12031 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_process_ack()
12038 /* tcp_close will kill the inp pre-log the Reset */ in rack_process_ack()
12055 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_collapse()
12064 log.u_bbr.flex5 = rack->r_must_retran; in rack_log_collapse()
12066 log.u_bbr.flex7 = rack->rc_has_collapsed; in rack_log_collapse()
12076 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_collapse()
12077 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_collapse()
12078 &rack->rc_inp->inp_socket->so_rcv, in rack_log_collapse()
12079 &rack->rc_inp->inp_socket->so_snd, in rack_log_collapse()
12094 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_collapsed_window()
12095 if ((rack->rc_has_collapsed == 0) || in rack_collapsed_window()
12096 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) in rack_collapsed_window()
12098 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; in rack_collapsed_window()
12099 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; in rack_collapsed_window()
12100 rack->rc_has_collapsed = 1; in rack_collapsed_window()
12101 rack->r_collapse_point_valid = 1; in rack_collapsed_window()
12102 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); in rack_collapsed_window()
12113 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_un_collapse_window()
12114 rack->rc_has_collapsed = 0; in rack_un_collapse_window()
12115 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12118 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12122 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { in rack_un_collapse_window()
12123 rack_log_collapse(rack, rsm->r_start, rsm->r_end, in rack_un_collapse_window()
12124 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); in rack_un_collapse_window()
12133 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
12135 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_un_collapse_window()
12137 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_un_collapse_window()
12142 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, in rack_un_collapse_window()
12143 rack->r_ctl.last_collapse_point, __LINE__); in rack_un_collapse_window()
12144 if (rsm->r_in_tmap) { in rack_un_collapse_window()
12145 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_un_collapse_window()
12146 nrsm->r_in_tmap = 1; in rack_un_collapse_window()
12156 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { in rack_un_collapse_window()
12158 nrsm->r_flags |= RACK_RWND_COLLAPSED; in rack_un_collapse_window()
12159 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); in rack_un_collapse_window()
12165 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
12174 rack->r_ctl.rc_rcvtime, __LINE__); in rack_handle_delayed_ack()
12175 tp->t_flags |= TF_DELACK; in rack_handle_delayed_ack()
12177 rack->r_wanted_output = 1; in rack_handle_delayed_ack()
12178 tp->t_flags |= TF_ACKNOW; in rack_handle_delayed_ack()
12190 if (rack->r_fast_output) { in rack_validate_fo_sendwin_up()
12198 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12200 if (out >= tp->snd_wnd) { in rack_validate_fo_sendwin_up()
12202 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12205 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; in rack_validate_fo_sendwin_up()
12206 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { in rack_validate_fo_sendwin_up()
12208 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12235 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_data()
12236 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_data()
12238 (SEQ_LT(tp->snd_wl1, th->th_seq) || in rack_process_data()
12239 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || in rack_process_data()
12240 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { in rack_process_data()
12243 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) in rack_process_data()
12245 tp->snd_wnd = tiwin; in rack_process_data()
12247 tp->snd_wl1 = th->th_seq; in rack_process_data()
12248 tp->snd_wl2 = th->th_ack; in rack_process_data()
12249 if (tp->snd_wnd > tp->max_sndwnd) in rack_process_data()
12250 tp->max_sndwnd = tp->snd_wnd; in rack_process_data()
12251 rack->r_wanted_output = 1; in rack_process_data()
12253 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { in rack_process_data()
12254 tp->snd_wnd = tiwin; in rack_process_data()
12256 tp->snd_wl1 = th->th_seq; in rack_process_data()
12257 tp->snd_wl2 = th->th_ack; in rack_process_data()
12260 if (tp->snd_wnd < ctf_outstanding(tp)) in rack_process_data()
12262 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_process_data()
12263 else if (rack->rc_has_collapsed) in rack_process_data()
12265 if ((rack->r_collapse_point_valid) && in rack_process_data()
12266 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) in rack_process_data()
12267 rack->r_collapse_point_valid = 0; in rack_process_data()
12269 if ((rack->rc_in_persist != 0) && in rack_process_data()
12270 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_process_data()
12271 rack->r_ctl.rc_pace_min_segs))) { in rack_process_data()
12272 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); in rack_process_data()
12273 tp->snd_nxt = tp->snd_max; in rack_process_data()
12275 rack->r_wanted_output = 1; in rack_process_data()
12278 if ((rack->rc_in_persist == 0) && in rack_process_data()
12279 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_process_data()
12280 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_process_data()
12281 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_process_data()
12282 sbavail(&tptosocket(tp)->so_snd) && in rack_process_data()
12283 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_process_data()
12290 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_process_data()
12292 if (tp->t_flags2 & TF2_DROP_AF_DATA) { in rack_process_data()
12300 tp->rcv_up = tp->rcv_nxt; in rack_process_data()
12305 * This process logically involves adjusting tp->rcv_wnd as data is in rack_process_data()
12310 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_process_data()
12311 (tp->t_flags & TF_FASTOPEN)); in rack_process_data()
12313 TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12314 tcp_seq save_start = th->th_seq; in rack_process_data()
12315 tcp_seq save_rnxt = tp->rcv_nxt; in rack_process_data()
12330 if (th->th_seq == tp->rcv_nxt && in rack_process_data()
12332 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_process_data()
12337 if (so->so_rcv.sb_shlim) { in rack_process_data()
12340 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_process_data()
12349 tp->rcv_nxt += tlen; in rack_process_data()
12351 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_process_data()
12352 (tp->t_fbyte_in == 0)) { in rack_process_data()
12353 tp->t_fbyte_in = ticks; in rack_process_data()
12354 if (tp->t_fbyte_in == 0) in rack_process_data()
12355 tp->t_fbyte_in = 1; in rack_process_data()
12356 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_process_data()
12357 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_process_data()
12363 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_process_data()
12372 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_process_data()
12377 sbappendstream_locked(&so->so_rcv, m, 0); in rack_process_data()
12379 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_process_data()
12383 if (so->so_rcv.sb_shlim && appended != mcnt) in rack_process_data()
12384 counter_fo_release(so->so_rcv.sb_shlim, in rack_process_data()
12385 mcnt - appended); in rack_process_data()
12397 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12398 if (tp->t_flags & TF_WAKESOR) { in rack_process_data()
12399 tp->t_flags &= ~TF_WAKESOR; in rack_process_data()
12404 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_process_data()
12406 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_process_data()
12414 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { in rack_process_data()
12415 if ((tp->rcv_numsacks >= 1) && in rack_process_data()
12416 (tp->sackblks[0].end == save_start)) { in rack_process_data()
12422 tp->sackblks[0].start, in rack_process_data()
12423 tp->sackblks[0].end); in rack_process_data()
12447 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12451 * If connection is half-synchronized (ie NEEDSYN in rack_process_data()
12457 if (tp->t_flags & TF_NEEDSYN) { in rack_process_data()
12459 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12460 tp->t_flags |= TF_DELACK; in rack_process_data()
12462 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12464 tp->rcv_nxt++; in rack_process_data()
12466 switch (tp->t_state) { in rack_process_data()
12472 tp->t_starttime = ticks; in rack_process_data()
12476 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12486 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12492 * starting the time-wait timer, turning off the in rack_process_data()
12497 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12505 if ((tp->t_flags & TF_ACKNOW) || in rack_process_data()
12506 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { in rack_process_data()
12507 rack->r_wanted_output = 1; in rack_process_data()
12514 * have broken out the fast-data path also just like
12515 * the fast-ack.
12534 if (__predict_false(th->th_seq != tp->rcv_nxt)) { in rack_do_fastnewdata()
12537 if (tiwin && tiwin != tp->snd_wnd) { in rack_do_fastnewdata()
12540 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { in rack_do_fastnewdata()
12543 if (__predict_false((to->to_flags & TOF_TS) && in rack_do_fastnewdata()
12544 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { in rack_do_fastnewdata()
12547 if (__predict_false((th->th_ack != tp->snd_una))) { in rack_do_fastnewdata()
12550 if (__predict_false(tlen > sbspace(&so->so_rcv))) { in rack_do_fastnewdata()
12553 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fastnewdata()
12554 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_do_fastnewdata()
12555 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fastnewdata()
12556 tp->ts_recent = to->to_tsval; in rack_do_fastnewdata()
12558 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_fastnewdata()
12560 * This is a pure, in-sequence data packet with nothing on the in rack_do_fastnewdata()
12563 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_do_fastnewdata()
12566 if (so->so_rcv.sb_shlim) { in rack_do_fastnewdata()
12569 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_do_fastnewdata()
12578 if (tp->rcv_numsacks) in rack_do_fastnewdata()
12581 tp->rcv_nxt += tlen; in rack_do_fastnewdata()
12583 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_do_fastnewdata()
12584 (tp->t_fbyte_in == 0)) { in rack_do_fastnewdata()
12585 tp->t_fbyte_in = ticks; in rack_do_fastnewdata()
12586 if (tp->t_fbyte_in == 0) in rack_do_fastnewdata()
12587 tp->t_fbyte_in = 1; in rack_do_fastnewdata()
12588 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_do_fastnewdata()
12589 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_do_fastnewdata()
12594 tp->snd_wl1 = th->th_seq; in rack_do_fastnewdata()
12598 tp->rcv_up = tp->rcv_nxt; in rack_do_fastnewdata()
12605 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fastnewdata()
12614 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_do_fastnewdata()
12619 sbappendstream_locked(&so->so_rcv, m, 0); in rack_do_fastnewdata()
12622 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_do_fastnewdata()
12626 if (so->so_rcv.sb_shlim && mcnt != appended) in rack_do_fastnewdata()
12627 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); in rack_do_fastnewdata()
12630 if (tp->snd_una == tp->snd_max) in rack_do_fastnewdata()
12631 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_fastnewdata()
12638 * in sequence to remain in the fast-path. We also add
12642 * slow-path.
12654 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_fastack()
12658 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { in rack_fastack()
12666 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { in rack_fastack()
12670 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { in rack_fastack()
12674 if (__predict_false(IN_RECOVERY(tp->t_flags))) { in rack_fastack()
12678 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fastack()
12679 if (rack->r_ctl.rc_sacked) { in rack_fastack()
12683 /* Ok if we reach here, we can process a fast-ack */ in rack_fastack()
12684 if (rack->gp_ready && in rack_fastack()
12685 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_fastack()
12688 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_fastack()
12691 if (tiwin != tp->snd_wnd) { in rack_fastack()
12692 tp->snd_wnd = tiwin; in rack_fastack()
12694 tp->snd_wl1 = th->th_seq; in rack_fastack()
12695 if (tp->snd_wnd > tp->max_sndwnd) in rack_fastack()
12696 tp->max_sndwnd = tp->snd_wnd; in rack_fastack()
12699 if ((rack->rc_in_persist != 0) && in rack_fastack()
12700 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_fastack()
12701 rack->r_ctl.rc_pace_min_segs))) { in rack_fastack()
12705 if ((rack->rc_in_persist == 0) && in rack_fastack()
12706 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_fastack()
12707 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_fastack()
12708 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_fastack()
12709 sbavail(&tptosocket(tp)->so_snd) && in rack_fastack()
12710 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_fastack()
12717 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); in rack_fastack()
12724 if ((to->to_flags & TOF_TS) != 0 && in rack_fastack()
12725 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_fastack()
12726 tp->ts_recent_age = tcp_ts_getticks(); in rack_fastack()
12727 tp->ts_recent = to->to_tsval; in rack_fastack()
12737 if ((tp->t_flags & TF_PREVVALID) && in rack_fastack()
12738 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_fastack()
12739 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12740 if (tp->t_rxtshift == 1 && in rack_fastack()
12741 (int)(ticks - tp->t_badrxtwin) < 0) in rack_fastack()
12742 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_fastack()
12762 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); in rack_fastack()
12764 mfree = sbcut_locked(&so->so_snd, acked); in rack_fastack()
12765 tp->snd_una = th->th_ack; in rack_fastack()
12767 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_fastack()
12769 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_fastack()
12772 tp->t_rxtshift = 0; in rack_fastack()
12773 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_fastack()
12774 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_fastack()
12775 rack->rc_tlp_in_progress = 0; in rack_fastack()
12776 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_fastack()
12781 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_fastack()
12782 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12785 rack_req_check_for_comp(rack, th->th_ack); in rack_fastack()
12793 if (tp->snd_wnd < ctf_outstanding(tp)) { in rack_fastack()
12795 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_fastack()
12796 } else if (rack->rc_has_collapsed) in rack_fastack()
12798 if ((rack->r_collapse_point_valid) && in rack_fastack()
12799 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) in rack_fastack()
12800 rack->r_collapse_point_valid = 0; in rack_fastack()
12804 tp->snd_wl2 = th->th_ack; in rack_fastack()
12805 tp->t_dupacks = 0; in rack_fastack()
12811 * otherwise restart timer using current (possibly backed-off) in rack_fastack()
12817 (rack->use_fixed_rate == 0) && in rack_fastack()
12818 (rack->in_probe_rtt == 0) && in rack_fastack()
12819 rack->rc_gp_dyn_mul && in rack_fastack()
12820 rack->rc_always_pace) { in rack_fastack()
12824 if (tp->snd_una == tp->snd_max) { in rack_fastack()
12825 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12826 rack->r_ctl.retran_during_recovery = 0; in rack_fastack()
12827 rack->rc_suspicious = 0; in rack_fastack()
12828 rack->r_ctl.dsack_byte_cnt = 0; in rack_fastack()
12829 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_fastack()
12830 if (rack->r_ctl.rc_went_idle_time == 0) in rack_fastack()
12831 rack->r_ctl.rc_went_idle_time = 1; in rack_fastack()
12833 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_fastack()
12834 tp->t_acktime = 0; in rack_fastack()
12835 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12837 if (acked && rack->r_fast_output) in rack_fastack()
12839 if (sbavail(&so->so_snd)) { in rack_fastack()
12840 rack->r_wanted_output = 1; in rack_fastack()
12868 * this is an acceptable SYN segment initialize tp->rcv_nxt and in rack_do_syn_sent()
12869 * tp->irs if seg contains ack then advance tp->snd_una if seg in rack_do_syn_sent()
12876 (SEQ_LEQ(th->th_ack, tp->iss) || in rack_do_syn_sent()
12877 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_sent()
12897 tp->irs = th->th_seq; in rack_do_syn_sent()
12899 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_sent()
12909 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_sent()
12911 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_sent()
12913 tp->rcv_adv += min(tp->rcv_wnd, in rack_do_syn_sent()
12914 TCP_MAXWIN << tp->rcv_scale); in rack_do_syn_sent()
12919 if ((tp->t_flags & TF_FASTOPEN) && in rack_do_syn_sent()
12920 (tp->snd_una != tp->snd_max)) { in rack_do_syn_sent()
12922 if (SEQ_LT(th->th_ack, tp->snd_max)) in rack_do_syn_sent()
12931 rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_syn_sent()
12932 tp->t_flags |= TF_DELACK; in rack_do_syn_sent()
12934 rack->r_wanted_output = 1; in rack_do_syn_sent()
12935 tp->t_flags |= TF_ACKNOW; in rack_do_syn_sent()
12940 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_syn_sent()
12946 * ack-processing since the in rack_do_syn_sent()
12947 * data stream in our send-map in rack_do_syn_sent()
12953 tp->snd_una++; in rack_do_syn_sent()
12954 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { in rack_do_syn_sent()
12963 rsm = tqhash_min(rack->r_ctl.tqh); in rack_do_syn_sent()
12965 if (rsm->r_flags & RACK_HAS_SYN) { in rack_do_syn_sent()
12966 rsm->r_flags &= ~RACK_HAS_SYN; in rack_do_syn_sent()
12967 rsm->r_start++; in rack_do_syn_sent()
12969 rack->r_ctl.rc_resend = rsm; in rack_do_syn_sent()
12975 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 in rack_do_syn_sent()
12977 tp->t_starttime = ticks; in rack_do_syn_sent()
12978 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_sent()
12980 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_sent()
12990 * Received initial SYN in SYN-SENT[*] state => simultaneous in rack_do_syn_sent()
12993 * half-synchronized. Otherwise, do 3-way handshake: in rack_do_syn_sent()
12994 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If in rack_do_syn_sent()
12997 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); in rack_do_syn_sent()
13001 * Advance th->th_seq to correspond to first data byte. If data, in rack_do_syn_sent()
13004 th->th_seq++; in rack_do_syn_sent()
13005 if (tlen > tp->rcv_wnd) { in rack_do_syn_sent()
13006 todrop = tlen - tp->rcv_wnd; in rack_do_syn_sent()
13007 m_adj(m, -todrop); in rack_do_syn_sent()
13008 tlen = tp->rcv_wnd; in rack_do_syn_sent()
13013 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_sent()
13014 tp->rcv_up = th->th_seq; in rack_do_syn_sent()
13022 /* For syn-sent we need to possibly update the rtt */ in rack_do_syn_sent()
13023 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_sent()
13027 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_sent()
13028 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_sent()
13029 tp->t_rttlow = t; in rack_do_syn_sent()
13030 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); in rack_do_syn_sent()
13037 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_sent()
13054 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_sent()
13084 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_recv()
13087 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_syn_recv()
13090 (SEQ_LEQ(th->th_ack, tp->snd_una) || in rack_do_syn_recv()
13091 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_recv()
13096 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13109 /* non-initial SYN is ignored */ in rack_do_syn_recv()
13110 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || in rack_do_syn_recv()
13111 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || in rack_do_syn_recv()
13112 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { in rack_do_syn_recv()
13126 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_syn_recv()
13127 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_syn_recv()
13132 * In the SYN-RECEIVED state, validate that the packet belongs to in rack_do_syn_recv()
13138 if (SEQ_LT(th->th_seq, tp->irs)) { in rack_do_syn_recv()
13160 if ((to->to_flags & TOF_TS) != 0 && in rack_do_syn_recv()
13161 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_syn_recv()
13162 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_syn_recv()
13164 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_syn_recv()
13165 tp->ts_recent = to->to_tsval; in rack_do_syn_recv()
13167 tp->snd_wnd = tiwin; in rack_do_syn_recv()
13170 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_syn_recv()
13171 * is on (half-synchronized state), then queue data for later in rack_do_syn_recv()
13175 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
13182 if (tp->t_flags & TF_SONOTCONN) { in rack_do_syn_recv()
13183 tp->t_flags &= ~TF_SONOTCONN; in rack_do_syn_recv()
13187 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_recv()
13189 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_recv()
13192 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> in rack_do_syn_recv()
13193 * FIN-WAIT-1 in rack_do_syn_recv()
13195 tp->t_starttime = ticks; in rack_do_syn_recv()
13196 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { in rack_do_syn_recv()
13197 tcp_fastopen_decrement_counter(tp->t_tfo_pending); in rack_do_syn_recv()
13198 tp->t_tfo_pending = NULL; in rack_do_syn_recv()
13200 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_recv()
13202 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_recv()
13213 if (!(tp->t_flags & TF_FASTOPEN)) in rack_do_syn_recv()
13221 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) in rack_do_syn_recv()
13222 tp->snd_una++; in rack_do_syn_recv()
13230 if (tp->t_flags & TF_WAKESOR) { in rack_do_syn_recv()
13231 tp->t_flags &= ~TF_WAKESOR; in rack_do_syn_recv()
13236 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_recv()
13237 /* For syn-recv we need to possibly update the rtt */ in rack_do_syn_recv()
13238 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_recv()
13242 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_recv()
13243 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_recv()
13244 tp->t_rttlow = t; in rack_do_syn_recv()
13245 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); in rack_do_syn_recv()
13252 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_recv()
13269 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_recv()
13299 * uni-directional data xfer. If the packet has no control flags, in rack_do_established()
13300 * is in-sequence, the window didn't change and we're not in rack_do_established()
13304 * waiting for space. If the length is non-zero and the ack didn't in rack_do_established()
13305 * move, we're the receiver side. If we're getting packets in-order in rack_do_established()
13308 * hidden state-flags are also off. Since we check for in rack_do_established()
13311 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_established()
13312 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && in rack_do_established()
13315 __predict_true(th->th_seq == tp->rcv_nxt)) { in rack_do_established()
13318 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { in rack_do_established()
13331 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_established()
13346 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_established()
13347 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_established()
13368 if ((to->to_flags & TOF_TS) != 0 && in rack_do_established()
13369 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_established()
13370 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_established()
13372 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_established()
13373 tp->ts_recent = to->to_tsval; in rack_do_established()
13376 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_established()
13377 * is on (half-synchronized state), then queue data for later in rack_do_established()
13381 if (tp->t_flags & TF_NEEDSYN) { in rack_do_established()
13385 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_established()
13387 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_established()
13400 if (sbavail(&so->so_snd)) { in rack_do_established()
13427 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_close_wait()
13441 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_close_wait()
13442 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_close_wait()
13463 if ((to->to_flags & TOF_TS) != 0 && in rack_do_close_wait()
13464 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_close_wait()
13465 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_close_wait()
13467 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_close_wait()
13468 tp->ts_recent = to->to_tsval; in rack_do_close_wait()
13471 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_close_wait()
13472 * is on (half-synchronized state), then queue data for later in rack_do_close_wait()
13476 if (tp->t_flags & TF_NEEDSYN) { in rack_do_close_wait()
13480 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_close_wait()
13482 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_close_wait()
13495 if (sbavail(&so->so_snd)) { in rack_do_close_wait()
13497 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_close_wait()
13513 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_data_after_close()
13514 if (rack->rc_allow_data_af_clo == 0) { in rack_check_data_after_close()
13517 /* tcp_close will kill the inp pre-log the Reset */ in rack_check_data_after_close()
13524 if (sbavail(&so->so_snd) == 0) in rack_check_data_after_close()
13528 tp->rcv_nxt = th->th_seq + *tlen; in rack_check_data_after_close()
13529 tp->t_flags2 |= TF2_DROP_AF_DATA; in rack_check_data_after_close()
13530 rack->r_wanted_output = 1; in rack_check_data_after_close()
13552 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_1()
13566 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_1()
13567 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_1()
13578 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_1()
13595 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_1()
13596 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_1()
13597 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_1()
13599 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_1()
13600 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_1()
13603 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_1()
13604 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_1()
13608 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_1()
13611 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_1()
13613 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_1()
13636 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fin_wait_1()
13645 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_1()
13647 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_1()
13674 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_closing()
13688 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_closing()
13689 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_closing()
13710 if ((to->to_flags & TOF_TS) != 0 && in rack_do_closing()
13711 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_closing()
13712 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_closing()
13714 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_closing()
13715 tp->ts_recent = to->to_tsval; in rack_do_closing()
13718 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_closing()
13719 * is on (half-synchronized state), then queue data for later in rack_do_closing()
13723 if (tp->t_flags & TF_NEEDSYN) { in rack_do_closing()
13726 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_closing()
13728 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_closing()
13746 if (sbavail(&so->so_snd)) { in rack_do_closing()
13748 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_closing()
13775 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_lastack()
13789 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_lastack()
13790 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_lastack()
13812 if ((to->to_flags & TOF_TS) != 0 && in rack_do_lastack()
13813 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_lastack()
13814 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_lastack()
13816 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_lastack()
13817 tp->ts_recent = to->to_tsval; in rack_do_lastack()
13820 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_lastack()
13821 * is on (half-synchronized state), then queue data for later in rack_do_lastack()
13825 if (tp->t_flags & TF_NEEDSYN) { in rack_do_lastack()
13828 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_lastack()
13830 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_lastack()
13848 if (sbavail(&so->so_snd)) { in rack_do_lastack()
13850 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_lastack()
13878 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_2()
13892 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_2()
13893 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_2()
13904 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_2()
13921 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_2()
13922 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_2()
13923 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_2()
13925 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_2()
13926 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_2()
13929 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_2()
13930 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_2()
13934 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_2()
13937 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_2()
13939 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_2()
13952 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_2()
13954 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_2()
13967 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; in rack_clear_rate_sample()
13968 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; in rack_clear_rate_sample()
13969 rack->r_ctl.rack_rs.rs_rtt_tot = 0; in rack_clear_rate_sample()
13980 if (rack->rc_hybrid_mode && in rack_set_pace_segments()
13981 (rack->r_ctl.rc_pace_max_segs != 0) && in rack_set_pace_segments()
13983 (rack->r_ctl.rc_last_sft != NULL)) { in rack_set_pace_segments()
13984 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; in rack_set_pace_segments()
13988 orig_min = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
13989 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
13990 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; in rack_set_pace_segments()
13991 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) in rack_set_pace_segments()
13993 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); in rack_set_pace_segments()
13994 if (rack->use_fixed_rate || rack->rc_force_max_seg) { in rack_set_pace_segments()
13995 if (user_max != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
13998 if (rack->rc_force_max_seg) { in rack_set_pace_segments()
13999 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14000 } else if (rack->use_fixed_rate) { in rack_set_pace_segments()
14002 if ((rack->r_ctl.crte == NULL) || in rack_set_pace_segments()
14003 (bw_est != rack->r_ctl.crte->rate)) { in rack_set_pace_segments()
14004 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
14010 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_set_pace_segments()
14015 rack->r_ctl.rc_pace_min_segs); in rack_set_pace_segments()
14016 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( in rack_set_pace_segments()
14018 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_set_pace_segments()
14020 } else if (rack->rc_always_pace) { in rack_set_pace_segments()
14021 if (rack->r_ctl.gp_bw || in rack_set_pace_segments()
14022 rack->r_ctl.init_rate) { in rack_set_pace_segments()
14027 orig = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
14034 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, in rack_set_pace_segments()
14036 ctf_fixed_maxseg(rack->rc_tp)); in rack_set_pace_segments()
14038 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
14039 if (orig != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
14041 } else if ((rack->r_ctl.gp_bw == 0) && in rack_set_pace_segments()
14042 (rack->r_ctl.rc_pace_max_segs == 0)) { in rack_set_pace_segments()
14048 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); in rack_set_pace_segments()
14051 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { in rack_set_pace_segments()
14053 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; in rack_set_pace_segments()
14073 if (rack->r_is_v6) { in rack_init_fsb_block()
14074 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in rack_init_fsb_block()
14075 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14076 if (tp->t_port) { in rack_init_fsb_block()
14077 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14079 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14080 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14081 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14082 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14085 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); in rack_init_fsb_block()
14086 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14088 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14089 tp->t_port, in rack_init_fsb_block()
14090 ip6, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14091 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); in rack_init_fsb_block()
14096 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); in rack_init_fsb_block()
14097 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
14098 if (tp->t_port) { in rack_init_fsb_block()
14099 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
14101 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
14102 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
14103 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
14104 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
14107 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
14108 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); in rack_init_fsb_block()
14110 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
14111 tp->t_port, in rack_init_fsb_block()
14112 ip, rack->r_ctl.fsb.th); in rack_init_fsb_block()
14113 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; in rack_init_fsb_block()
14116 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), in rack_init_fsb_block()
14117 (long)TCP_MAXWIN << tp->rcv_scale); in rack_init_fsb_block()
14118 rack->r_fsb_inited = 1; in rack_init_fsb_block()
14129 …rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct ud… in rack_init_fsb()
14131 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); in rack_init_fsb()
14133 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, in rack_init_fsb()
14135 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { in rack_init_fsb()
14138 rack->r_fsb_inited = 0; in rack_init_fsb()
14147 * 20 - Initial round setup in rack_log_hystart_event()
14148 * 21 - Rack declares a new round. in rack_log_hystart_event()
14152 tp = rack->rc_tp; in rack_log_hystart_event()
14158 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_log_hystart_event()
14159 log.u_bbr.flex2 = rack->r_ctl.roundends; in rack_log_hystart_event()
14161 log.u_bbr.flex4 = tp->snd_max; in rack_log_hystart_event()
14164 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hystart_event()
14165 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hystart_event()
14167 &tptosocket(tp)->so_rcv, in rack_log_hystart_event()
14168 &tptosocket(tp)->so_snd, in rack_log_hystart_event()
14177 rack->rack_deferred_inited = 1; in rack_deferred_init()
14178 rack->r_ctl.roundends = tp->snd_max; in rack_deferred_init()
14179 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; in rack_deferred_init()
14180 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_deferred_init()
14194 * 1 - Use full sized retransmits i.e. limit in rack_init_retransmit_value()
14198 * 2 - Use pacer min granularity as a guide to in rack_init_retransmit_value()
14206 * 0 - The rack default 1 MSS (anything not 0/1/2 in rack_init_retransmit_value()
14211 rack->full_size_rxt = 1; in rack_init_retransmit_value()
14212 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14214 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14215 rack->shape_rxt_to_pacing_min = 1; in rack_init_retransmit_value()
14217 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14218 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14228 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_chg_info()
14251 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_chg_query()
14252 switch (reqr->req) { in rack_chg_query()
14254 if ((reqr->req_param == tp->snd_max) || in rack_chg_query()
14255 (tp->snd_max == tp->snd_una)){ in rack_chg_query()
14259 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); in rack_chg_query()
14261 /* Can't find that seq -- unlikely */ in rack_chg_query()
14264 reqr->sendmap_start = rsm->r_start; in rack_chg_query()
14265 reqr->sendmap_end = rsm->r_end; in rack_chg_query()
14266 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; in rack_chg_query()
14267 reqr->sendmap_fas = rsm->r_fas; in rack_chg_query()
14268 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) in rack_chg_query()
14269 reqr->sendmap_send_cnt = SNDMAP_NRTX; in rack_chg_query()
14270 for(i=0; i<reqr->sendmap_send_cnt; i++) in rack_chg_query()
14271 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; in rack_chg_query()
14272 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; in rack_chg_query()
14273 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; in rack_chg_query()
14274 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; in rack_chg_query()
14275 reqr->sendmap_dupacks = rsm->r_dupack; in rack_chg_query()
14277 rsm->r_start, in rack_chg_query()
14278 rsm->r_end, in rack_chg_query()
14279 rsm->r_flags); in rack_chg_query()
14283 if (rack->r_ctl.rc_hpts_flags == 0) { in rack_chg_query()
14287 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; in rack_chg_query()
14288 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_chg_query()
14289 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; in rack_chg_query()
14291 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_chg_query()
14292 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; in rack_chg_query()
14295 rack->r_ctl.rc_hpts_flags, in rack_chg_query()
14296 rack->r_ctl.rc_last_output_to, in rack_chg_query()
14297 rack->r_ctl.rc_timer_exp); in rack_chg_query()
14302 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; in rack_chg_query()
14303 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; in rack_chg_query()
14305 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; in rack_chg_query()
14306 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; in rack_chg_query()
14307 reqr->rack_rtt = rack->rc_rack_rtt; in rack_chg_query()
14308 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; in rack_chg_query()
14309 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; in rack_chg_query()
14311 reqr->rack_sacked = rack->r_ctl.rc_sacked; in rack_chg_query()
14312 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; in rack_chg_query()
14313 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; in rack_chg_query()
14314 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; in rack_chg_query()
14315 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; in rack_chg_query()
14316 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; in rack_chg_query()
14318 reqr->rack_tlp_out = rack->rc_tlp_in_progress; in rack_chg_query()
14319 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; in rack_chg_query()
14320 if (rack->rc_in_persist) { in rack_chg_query()
14321 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; in rack_chg_query()
14322 reqr->rack_in_persist = 1; in rack_chg_query()
14324 reqr->rack_time_went_idle = 0; in rack_chg_query()
14325 reqr->rack_in_persist = 0; in rack_chg_query()
14327 if (rack->r_wanted_output) in rack_chg_query()
14328 reqr->rack_wanted_output = 1; in rack_chg_query()
14330 reqr->rack_wanted_output = 0; in rack_chg_query()
14334 return (-EINVAL); in rack_chg_query()
14353 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_switch_failed()
14355 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_switch_failed()
14356 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14358 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14359 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_switch_failed()
14360 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_switch_failed()
14361 if (tp->t_in_hpts > IHPTS_NONE) { in rack_switch_failed()
14366 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_switch_failed()
14367 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_switch_failed()
14368 toval = rack->r_ctl.rc_last_output_to - cts; in rack_switch_failed()
14373 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_switch_failed()
14374 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_switch_failed()
14375 toval = rack->r_ctl.rc_timer_exp - cts; in rack_switch_failed()
14394 * to not refer to tp->t_fb_ptr. This has the old rack in rack_init_outstanding()
14400 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init_outstanding()
14408 rsm->r_no_rtt_allowed = 1; in rack_init_outstanding()
14409 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_init_outstanding()
14410 rsm->r_rtr_cnt = 1; in rack_init_outstanding()
14411 rsm->r_rtr_bytes = 0; in rack_init_outstanding()
14412 if (tp->t_flags & TF_SENTFIN) in rack_init_outstanding()
14413 rsm->r_flags |= RACK_HAS_FIN; in rack_init_outstanding()
14414 rsm->r_end = tp->snd_max; in rack_init_outstanding()
14415 if (tp->snd_una == tp->iss) { in rack_init_outstanding()
14417 rsm->r_flags |= RACK_HAS_SYN; in rack_init_outstanding()
14418 rsm->r_start = tp->iss; in rack_init_outstanding()
14419 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); in rack_init_outstanding()
14421 rsm->r_start = tp->snd_una; in rack_init_outstanding()
14422 rsm->r_dupack = 0; in rack_init_outstanding()
14423 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { in rack_init_outstanding()
14424 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); in rack_init_outstanding()
14425 if (rsm->m) { in rack_init_outstanding()
14426 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14427 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14429 rsm->orig_m_len = 0; in rack_init_outstanding()
14430 rsm->orig_t_space = 0; in rack_init_outstanding()
14434 * This can happen if we have a stand-alone FIN or in rack_init_outstanding()
14437 rsm->m = NULL; in rack_init_outstanding()
14438 rsm->orig_m_len = 0; in rack_init_outstanding()
14439 rsm->orig_t_space = 0; in rack_init_outstanding()
14440 rsm->soff = 0; in rack_init_outstanding()
14443 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14448 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14450 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14451 rsm->r_in_tmap = 1; in rack_init_outstanding()
14458 at = tp->snd_una; in rack_init_outstanding()
14459 while (at != tp->snd_max) { in rack_init_outstanding()
14463 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) in rack_init_outstanding()
14475 rsm->r_dupack = qr.sendmap_dupacks; in rack_init_outstanding()
14476 rsm->r_start = qr.sendmap_start; in rack_init_outstanding()
14477 rsm->r_end = qr.sendmap_end; in rack_init_outstanding()
14479 rsm->r_fas = qr.sendmap_end; in rack_init_outstanding()
14481 rsm->r_fas = rsm->r_start - tp->snd_una; in rack_init_outstanding()
14487 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; in rack_init_outstanding()
14488 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; in rack_init_outstanding()
14489 rsm->r_rtr_cnt = qr.sendmap_send_cnt; in rack_init_outstanding()
14490 rsm->r_ack_arrival = qr.sendmap_ack_arrival; in rack_init_outstanding()
14491 for (i=0 ; i<rsm->r_rtr_cnt; i++) in rack_init_outstanding()
14492 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; in rack_init_outstanding()
14493 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_init_outstanding()
14494 (rsm->r_start - tp->snd_una), &rsm->soff); in rack_init_outstanding()
14495 if (rsm->m) { in rack_init_outstanding()
14496 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14497 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14499 rsm->orig_m_len = 0; in rack_init_outstanding()
14500 rsm->orig_t_space = 0; in rack_init_outstanding()
14503 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14508 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14510 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_init_outstanding()
14511 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_init_outstanding()
14512 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > in rack_init_outstanding()
14513 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { in rack_init_outstanding()
14520 rsm->r_in_tmap = 1; in rack_init_outstanding()
14525 if (rsm->r_in_tmap == 0) { in rack_init_outstanding()
14529 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14530 rsm->r_in_tmap = 1; in rack_init_outstanding()
14533 if ((rack->r_ctl.rc_sacklast == NULL) || in rack_init_outstanding()
14534 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { in rack_init_outstanding()
14535 rack->r_ctl.rc_sacklast = rsm; in rack_init_outstanding()
14539 rsm->r_start, in rack_init_outstanding()
14540 rsm->r_end, in rack_init_outstanding()
14541 rsm->r_flags); in rack_init_outstanding()
14562 * will be tp->t_fb_ptr. If its a stack switch that in rack_init()
14566 if (ptr == &tp->t_fb_ptr) in rack_init()
14582 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); in rack_init()
14583 if (rack->r_ctl.tqh == NULL) { in rack_init()
14587 tqhash_init(rack->r_ctl.tqh); in rack_init()
14588 TAILQ_INIT(&rack->r_ctl.rc_free); in rack_init()
14589 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_init()
14590 rack->rc_tp = tp; in rack_init()
14591 rack->rc_inp = inp; in rack_init()
14593 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; in rack_init()
14610 rack->rc_new_rnd_needed = 1; in rack_init()
14611 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; in rack_init()
14614 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; in rack_init()
14615 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; in rack_init()
14616 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; in rack_init()
14618 rack->rc_pace_to_cwnd = 1; in rack_init()
14620 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; in rack_init()
14622 rack->use_rack_rr = 1; in rack_init()
14624 rack->rc_pace_dnd = 1; in rack_init()
14627 tp->t_delayed_ack = 1; in rack_init()
14629 tp->t_delayed_ack = 0; in rack_init()
14632 tp->t_flags2 |= TF2_TCP_ACCOUNTING; in rack_init()
14635 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; in rack_init()
14636 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); in rack_init()
14637 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); in rack_init()
14638 if (rack->r_ctl.pcm_s == NULL) { in rack_init()
14639 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_init()
14642 rack->r_ctl.side_chan_dis_mask = tcp_sidechannel_disable_mask; in rack_init()
14644 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; in rack_init()
14645 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; in rack_init()
14647 rack->rack_enable_scwnd = 1; in rack_init()
14648 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_init()
14649 rack->rc_user_set_max_segs = rack_hptsi_segments; in rack_init()
14650 rack->r_ctl.max_reduction = rack_max_reduce; in rack_init()
14651 rack->rc_force_max_seg = 0; in rack_init()
14652 TAILQ_INIT(&rack->r_ctl.opt_list); in rack_init()
14653 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; in rack_init()
14654 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; in rack_init()
14656 rack->rack_hibeta = 1; in rack_init()
14659 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; in rack_init()
14660 rack->r_ctl.saved_hibeta = rack_hibeta_setting; in rack_init()
14663 rack->r_ctl.saved_hibeta = 50; in rack_init()
14668 * will never have all 1's in ms :-) in rack_init()
14670 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; in rack_init()
14671 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; in rack_init()
14672 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; in rack_init()
14673 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; in rack_init()
14674 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; in rack_init()
14675 rack->r_ctl.rc_highest_us_rtt = 0; in rack_init()
14676 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; in rack_init()
14677 rack->pcm_enabled = rack_pcm_is_enabled; in rack_init()
14679 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_init()
14680 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); in rack_init()
14682 rack->r_use_cmp_ack = 1; in rack_init()
14684 rack->rack_no_prr = 1; in rack_init()
14686 rack->rc_gp_no_rec_chg = 1; in rack_init()
14688 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_init()
14689 rack->rc_always_pace = 1; in rack_init()
14690 if (rack->rack_hibeta) in rack_init()
14693 rack->rc_always_pace = 0; in rack_init()
14694 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) in rack_init()
14695 rack->r_mbuf_queue = 1; in rack_init()
14697 rack->r_mbuf_queue = 0; in rack_init()
14700 rack->r_limit_scw = 1; in rack_init()
14702 rack->r_limit_scw = 0; in rack_init()
14704 rack->rc_labc = V_tcp_abc_l_var; in rack_init()
14706 rack->r_use_hpts_min = 1; in rack_init()
14707 if (tp->snd_una != 0) { in rack_init()
14708 rack->rc_sendvars_notset = 0; in rack_init()
14716 * syn-cache. This means none of the in rack_init()
14720 rack->rc_sendvars_notset = 1; in rack_init()
14723 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; in rack_init()
14724 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; in rack_init()
14725 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; in rack_init()
14726 rack->r_ctl.rc_min_to = rack_min_to; in rack_init()
14727 microuptime(&rack->r_ctl.act_rcv_time); in rack_init()
14728 rack->r_ctl.rc_last_time_decay = rack->r_ctl.act_rcv_time; in rack_init()
14729 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; in rack_init()
14731 rack->r_up_only = 1; in rack_init()
14734 rack->rc_gp_dyn_mul = 1; in rack_init()
14736 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_init()
14738 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_init()
14739 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; in rack_init()
14741 rack->rc_skip_timely = 1; in rack_init()
14743 if (rack->rc_skip_timely) { in rack_init()
14744 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_init()
14745 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_init()
14746 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_init()
14748 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_init()
14749 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14750 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_mssectick(&rack->r_ctl.act_rcv_time); in rack_init()
14752 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, in rack_init()
14754 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_init()
14755 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_init()
14756 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_init()
14757 rack->r_ctl.rc_went_idle_time = us_cts; in rack_init()
14758 rack->r_ctl.rc_time_probertt_starts = 0; in rack_init()
14760 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; in rack_init()
14762 rack->r_ctl.gate_to_fs = 1; in rack_init()
14763 rack->r_ctl.gp_gain_req = rack_gp_gain_req; in rack_init()
14769 rack->rc_rack_tmr_std_based = 1; in rack_init()
14773 rack->rc_rack_use_dsack = 1; in rack_init()
14777 rack->r_ctl.req_measurements = rack_req_measurements; in rack_init()
14779 rack->r_ctl.req_measurements = 1; in rack_init()
14781 rack->rack_hdw_pace_ena = 1; in rack_init()
14783 rack->r_rack_hw_rate_caps = 1; in rack_init()
14785 rack->rack_rec_nonrxt_use_cr = 1; in rack_init()
14794 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_init()
14796 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_init()
14798 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_init()
14806 tp->t_flags &= ~TF_GPUTINPROG; in rack_init()
14807 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14808 (tp->t_state != TCPS_TIME_WAIT)) { in rack_init()
14813 if (SEQ_GT(tp->snd_max, tp->iss)) in rack_init()
14814 snt = tp->snd_max - tp->iss; in rack_init()
14825 if (tp->snd_cwnd < iwin) in rack_init()
14826 tp->snd_cwnd = iwin; in rack_init()
14847 tp->snd_ssthresh = 0xffffffff; in rack_init()
14858 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14859 (tp->t_state != TCPS_TIME_WAIT) && in rack_init()
14861 (tp->snd_una != tp->snd_max)) { in rack_init()
14870 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_init()
14871 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_init()
14873 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_init()
14874 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_init()
14875 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_init()
14881 * they are non-zero. They are kept with a 5 in rack_init()
14886 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); in rack_init()
14887 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { in rack_init()
14889 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init()
14899 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14901 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; in rack_init()
14902 rack->r_ctl.num_dsack = qr.rack_num_dsacks; in rack_init()
14903 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; in rack_init()
14904 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; in rack_init()
14905 rack->rc_rack_rtt = qr.rack_rtt; in rack_init()
14906 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; in rack_init()
14907 rack->r_ctl.rc_sacked = qr.rack_sacked; in rack_init()
14908 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; in rack_init()
14909 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; in rack_init()
14910 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; in rack_init()
14911 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; in rack_init()
14912 rack->r_ctl.rc_prr_out = qr.rack_prr_out; in rack_init()
14914 rack->rc_tlp_in_progress = 1; in rack_init()
14915 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; in rack_init()
14917 rack->rc_tlp_in_progress = 0; in rack_init()
14918 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_init()
14921 rack->rc_srtt_measure_made = 1; in rack_init()
14923 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; in rack_init()
14925 if (rack->r_ctl.rc_scw) { in rack_init()
14926 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_init()
14927 rack->rack_scwnd_is_idle = 1; in rack_init()
14930 rack->r_ctl.persist_lost_ends = 0; in rack_init()
14931 rack->probe_not_answered = 0; in rack_init()
14932 rack->forced_ack = 0; in rack_init()
14933 tp->t_rxtshift = 0; in rack_init()
14934 rack->rc_in_persist = 1; in rack_init()
14935 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_init()
14936 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_init()
14939 rack->r_wanted_output = 1; in rack_init()
14948 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14951 * non-zero return means we have a timer('s) in rack_init()
14957 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; in rack_init()
14959 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; in rack_init()
14961 tov = qr.timer_pacing_to - us_cts; in rack_init()
14966 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; in rack_init()
14969 tov = qr.timer_timer_exp - us_cts; in rack_init()
14975 rack->r_ctl.rc_hpts_flags, in rack_init()
14976 rack->r_ctl.rc_last_output_to, in rack_init()
14977 rack->r_ctl.rc_timer_exp); in rack_init()
14983 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); in rack_init()
14987 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, in rack_init()
14996 if ((tp->t_state == TCPS_CLOSED) || in rack_handoff_ok()
14997 (tp->t_state == TCPS_LISTEN)) { in rack_handoff_ok()
15001 if ((tp->t_state == TCPS_SYN_SENT) || in rack_handoff_ok()
15002 (tp->t_state == TCPS_SYN_RECEIVED)) { in rack_handoff_ok()
15009 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { in rack_handoff_ok()
15022 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ in rack_handoff_ok()
15036 if (tp->t_fb_ptr) { in rack_fini()
15042 tp->t_flags &= ~TF_FORCEDATA; in rack_fini()
15043 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fini()
15052 if (rack->r_ctl.rc_scw) { in rack_fini()
15055 if (rack->r_limit_scw) in rack_fini()
15056 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); in rack_fini()
15059 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, in rack_fini()
15060 rack->r_ctl.rc_scw_index, in rack_fini()
15062 rack->r_ctl.rc_scw = NULL; in rack_fini()
15065 if (rack->r_ctl.fsb.tcp_ip_hdr) { in rack_fini()
15066 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); in rack_fini()
15067 rack->r_ctl.fsb.tcp_ip_hdr = NULL; in rack_fini()
15068 rack->r_ctl.fsb.th = NULL; in rack_fini()
15070 if (rack->rc_always_pace == 1) { in rack_fini()
15074 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { in rack_fini()
15077 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); in rack_fini()
15078 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_fini()
15082 if (rack->r_ctl.crte != NULL) { in rack_fini()
15083 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_fini()
15084 rack->rack_hdrw_pacing = 0; in rack_fini()
15085 rack->r_ctl.crte = NULL; in rack_fini()
15092 * get each one and free it like a cum-ack would and in rack_fini()
15095 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15097 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_fini()
15098 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15100 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15102 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15104 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_fini()
15105 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
15106 rack->rc_free_cnt--; in rack_fini()
15109 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15111 if (rack->r_ctl.pcm_s != NULL) { in rack_fini()
15112 free(rack->r_ctl.pcm_s, M_TCPPCM); in rack_fini()
15113 rack->r_ctl.pcm_s = NULL; in rack_fini()
15114 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_fini()
15115 rack->r_ctl.pcm_i.cnt = 0; in rack_fini()
15117 if ((rack->r_ctl.rc_num_maps_alloced > 0) && in rack_fini()
15124 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; in rack_fini()
15125 log.u_bbr.flex2 = rack->rc_free_cnt; in rack_fini()
15127 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fini()
15128 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
15130 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
15137 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), in rack_fini()
15140 rack->r_ctl.rc_num_maps_alloced)); in rack_fini()
15141 rack->rc_free_cnt = 0; in rack_fini()
15142 free(rack->r_ctl.tqh, M_TCPFSB); in rack_fini()
15143 rack->r_ctl.tqh = NULL; in rack_fini()
15144 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); in rack_fini()
15145 tp->t_fb_ptr = NULL; in rack_fini()
15148 tp->snd_nxt = tp->snd_max; in rack_fini()
15154 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { in rack_set_state()
15155 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; in rack_set_state()
15157 switch (tp->t_state) { in rack_set_state()
15159 rack->r_state = TCPS_SYN_SENT; in rack_set_state()
15160 rack->r_substate = rack_do_syn_sent; in rack_set_state()
15163 rack->r_state = TCPS_SYN_RECEIVED; in rack_set_state()
15164 rack->r_substate = rack_do_syn_recv; in rack_set_state()
15168 rack->r_state = TCPS_ESTABLISHED; in rack_set_state()
15169 rack->r_substate = rack_do_established; in rack_set_state()
15172 rack->r_state = TCPS_CLOSE_WAIT; in rack_set_state()
15173 rack->r_substate = rack_do_close_wait; in rack_set_state()
15177 rack->r_state = TCPS_FIN_WAIT_1; in rack_set_state()
15178 rack->r_substate = rack_do_fin_wait_1; in rack_set_state()
15182 rack->r_state = TCPS_CLOSING; in rack_set_state()
15183 rack->r_substate = rack_do_closing; in rack_set_state()
15187 rack->r_state = TCPS_LAST_ACK; in rack_set_state()
15188 rack->r_substate = rack_do_lastack; in rack_set_state()
15191 rack->r_state = TCPS_FIN_WAIT_2; in rack_set_state()
15192 rack->r_substate = rack_do_fin_wait_2; in rack_set_state()
15200 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_set_state()
15201 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_state()
15217 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_audit()
15218 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_timer_audit()
15224 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15228 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) in rack_timer_audit()
15230 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_audit()
15231 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && in rack_timer_audit()
15238 if (tp->t_flags & TF_DELACK) { in rack_timer_audit()
15243 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timer_audit()
15244 (tp->t_state <= TCPS_CLOSING)) && in rack_timer_audit()
15246 (tp->snd_max == tp->snd_una)) { in rack_timer_audit()
15251 if (SEQ_GT(tp->snd_max, tp->snd_una) && in rack_timer_audit()
15275 if (tcp_in_hpts(rack->rc_tp)) { in rack_timer_audit()
15276 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_timer_audit()
15280 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_timer_audit()
15281 rack->r_early = 1; in rack_timer_audit()
15282 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_timer_audit()
15284 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_audit()
15286 tcp_hpts_remove(rack->rc_tp); in rack_timer_audit()
15288 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15296 if ((SEQ_LT(tp->snd_wl1, seq) || in rack_do_win_updates()
15297 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || in rack_do_win_updates()
15298 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { in rack_do_win_updates()
15300 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) in rack_do_win_updates()
15302 tp->snd_wnd = tiwin; in rack_do_win_updates()
15304 tp->snd_wl1 = seq; in rack_do_win_updates()
15305 tp->snd_wl2 = ack; in rack_do_win_updates()
15306 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15307 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15308 rack->r_wanted_output = 1; in rack_do_win_updates()
15309 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { in rack_do_win_updates()
15310 tp->snd_wnd = tiwin; in rack_do_win_updates()
15312 tp->snd_wl1 = seq; in rack_do_win_updates()
15313 tp->snd_wl2 = ack; in rack_do_win_updates()
15318 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15319 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15321 if ((rack->rc_in_persist != 0) && in rack_do_win_updates()
15322 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_do_win_updates()
15323 rack->r_ctl.rc_pace_min_segs))) { in rack_do_win_updates()
15327 if ((rack->rc_in_persist == 0) && in rack_do_win_updates()
15328 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_do_win_updates()
15329 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_win_updates()
15330 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_do_win_updates()
15331 sbavail(&tptosocket(tp)->so_snd) && in rack_do_win_updates()
15332 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_do_win_updates()
15339 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); in rack_do_win_updates()
15347 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_input_packet()
15360 if (SEQ_GT(ae->ack, tp->snd_una)) { in rack_log_input_packet()
15361 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); in rack_log_input_packet()
15363 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); in rack_log_input_packet()
15367 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_input_packet()
15368 if (rack->rack_no_prr == 0) in rack_log_input_packet()
15369 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_log_input_packet()
15372 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_input_packet()
15374 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_input_packet()
15375 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_log_input_packet()
15376 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_log_input_packet()
15377 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_log_input_packet()
15378 log.u_bbr.pkts_out = tp->t_maxseg; in rack_log_input_packet()
15379 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_input_packet()
15381 log.u_bbr.lost = ae->flags; in rack_log_input_packet()
15384 if (ae->flags & TSTMP_HDWR) { in rack_log_input_packet()
15387 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15388 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15392 } else if (ae->flags & TSTMP_LRO) { in rack_log_input_packet()
15395 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15396 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15403 log.u_bbr.delRate = ae->timestamp; in rack_log_input_packet()
15405 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_log_input_packet()
15407 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_log_input_packet()
15409 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_log_input_packet()
15413 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_log_input_packet()
15415 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_log_input_packet()
15416 log.u_bbr.rttProp = tcp_req->timestamp; in rack_log_input_packet()
15417 log.u_bbr.cur_del_rate = tcp_req->start; in rack_log_input_packet()
15418 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_log_input_packet()
15422 log.u_bbr.bw_inuse = tcp_req->end; in rack_log_input_packet()
15424 log.u_bbr.flex6 = tcp_req->start_seq; in rack_log_input_packet()
15425 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_log_input_packet()
15427 log.u_bbr.epoch = tcp_req->end_seq; in rack_log_input_packet()
15433 th->th_seq = ae->seq; in rack_log_input_packet()
15434 th->th_ack = ae->ack; in rack_log_input_packet()
15435 th->th_win = ae->win; in rack_log_input_packet()
15437 th->th_sport = inp->inp_fport; in rack_log_input_packet()
15438 th->th_dport = inp->inp_lport; in rack_log_input_packet()
15439 tcp_set_flags(th, ae->flags); in rack_log_input_packet()
15441 if (ae->flags & HAS_TSTMP) { in rack_log_input_packet()
15445 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); in rack_log_input_packet()
15455 val = htonl(ae->ts_value); in rack_log_input_packet()
15458 val = htonl(ae->ts_echo); in rack_log_input_packet()
15462 th->th_off = (sizeof(struct tcphdr) >> 2); in rack_log_input_packet()
15471 * snd_una was advanced and then un-advancing it so that the in rack_log_input_packet()
15474 if (tp->snd_una != high_seq) { in rack_log_input_packet()
15475 orig_snd_una = tp->snd_una; in rack_log_input_packet()
15476 tp->snd_una = high_seq; in rack_log_input_packet()
15481 &tptosocket(tp)->so_rcv, in rack_log_input_packet()
15482 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, in rack_log_input_packet()
15485 tp->snd_una = orig_snd_una; in rack_log_input_packet()
15496 * A persist or keep-alive was forced out, update our in rack_handle_probe_response()
15498 * When a subsequent keep-alive or persist times out in rack_handle_probe_response()
15504 * will clear the probe_not_answered flag i.e. cum-ack in rack_handle_probe_response()
15508 rack->forced_ack = 0; in rack_handle_probe_response()
15509 rack->rc_tp->t_rxtshift = 0; in rack_handle_probe_response()
15510 if ((rack->rc_in_persist && in rack_handle_probe_response()
15511 (tiwin == rack->rc_tp->snd_wnd)) || in rack_handle_probe_response()
15512 (rack->rc_in_persist == 0)) { in rack_handle_probe_response()
15527 if (rack->rc_in_persist) in rack_handle_probe_response()
15529 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; in rack_handle_probe_response()
15532 if (rack->probe_not_answered == 0) { in rack_handle_probe_response()
15554 rack->r_ctl.roundends = tp->snd_max; in rack_new_round_starts()
15555 rack->rc_new_rnd_needed = 0; in rack_new_round_starts()
15556 rack_log_hystart_event(rack, tp->snd_max, 4); in rack_new_round_starts()
15564 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pcm()
15571 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pcm()
15577 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; in rack_log_pcm()
15578 log.u_bbr.bbr_substate = rack->pcm_needed; in rack_log_pcm()
15580 log.u_bbr.bbr_substate |= rack->pcm_in_progress; in rack_log_pcm()
15582 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ in rack_log_pcm()
15583 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, in rack_log_pcm()
15598 rack->r_ctl.current_round++; in rack_new_round_setup()
15600 rack->rc_new_rnd_needed = 1; in rack_new_round_setup()
15601 if ((rack->pcm_enabled == 1) && in rack_new_round_setup()
15602 (rack->pcm_needed == 0) && in rack_new_round_setup()
15603 (rack->pcm_in_progress == 0)) { in rack_new_round_setup()
15611 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_new_round_setup()
15612 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_new_round_setup()
15613 rack->pcm_needed = 1; in rack_new_round_setup()
15614 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15616 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15619 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_new_round_setup()
15621 if (CC_ALGO(tp)->newround != NULL) { in rack_new_round_setup()
15622 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); in rack_new_round_setup()
15627 * that we are not just pushing on slow-start and just in rack_new_round_setup()
15629 * boost in b/w during the inital slow-start. in rack_new_round_setup()
15631 if (rack->dgp_on && in rack_new_round_setup()
15632 (rack->rc_initial_ss_comp == 0) && in rack_new_round_setup()
15633 (tp->snd_cwnd < tp->snd_ssthresh) && in rack_new_round_setup()
15634 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && in rack_new_round_setup()
15635 (rack->r_ctl.gp_rnd_thresh > 0) && in rack_new_round_setup()
15636 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { in rack_new_round_setup()
15646 rack->rc_initial_ss_comp = 1; in rack_new_round_setup()
15648 if (tcp_bblogging_on(rack->rc_tp)) { in rack_new_round_setup()
15654 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_new_round_setup()
15655 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_new_round_setup()
15656 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; in rack_new_round_setup()
15657 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; in rack_new_round_setup()
15658 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15663 if ((rack->r_ctl.gate_to_fs == 1) && in rack_new_round_setup()
15664 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { in rack_new_round_setup()
15665 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15667 tp->snd_ssthresh = tp->snd_cwnd - 1; in rack_new_round_setup()
15669 rack->r_fast_output = 0; in rack_new_round_setup()
15680 * A) It moves the cum-ack forward in rack_do_compressed_ack_processing()
15681 * B) It is behind the cum-ack. in rack_do_compressed_ack_processing()
15682 * C) It is a window-update ack. in rack_do_compressed_ack_processing()
15683 * D) It is a dup-ack. in rack_do_compressed_ack_processing()
15685 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES in rack_do_compressed_ack_processing()
15710 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_compressed_ack_processing()
15711 if (rack->gp_ready && in rack_do_compressed_ack_processing()
15712 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) in rack_do_compressed_ack_processing()
15715 if (rack->r_state != tp->t_state) in rack_do_compressed_ack_processing()
15717 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15718 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_compressed_ack_processing()
15727 bytes = tp->gput_ack - tp->gput_seq; in rack_do_compressed_ack_processing()
15728 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_compressed_ack_processing()
15729 bytes += tp->gput_seq - tp->snd_una; in rack_do_compressed_ack_processing()
15730 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_compressed_ack_processing()
15736 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_compressed_ack_processing()
15737 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_compressed_ack_processing()
15738 tp->gput_seq, in rack_do_compressed_ack_processing()
15740 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_compressed_ack_processing()
15744 to->to_flags = 0; in rack_do_compressed_ack_processing()
15745 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), in rack_do_compressed_ack_processing()
15746 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); in rack_do_compressed_ack_processing()
15747 cnt = m->m_len / sizeof(struct tcp_ackent); in rack_do_compressed_ack_processing()
15749 high_seq = tp->snd_una; in rack_do_compressed_ack_processing()
15750 the_win = tp->snd_wnd; in rack_do_compressed_ack_processing()
15751 win_seq = tp->snd_wl1; in rack_do_compressed_ack_processing()
15752 win_upd_ack = tp->snd_wl2; in rack_do_compressed_ack_processing()
15755 rack->r_ctl.rc_rcvtime = cts; in rack_do_compressed_ack_processing()
15757 if ((rack->rc_gp_dyn_mul) && in rack_do_compressed_ack_processing()
15758 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15759 (rack->rc_always_pace)) { in rack_do_compressed_ack_processing()
15769 if (ae->flags & TH_FIN) in rack_do_compressed_ack_processing()
15778 tiwin = ae->win << tp->snd_scale; in rack_do_compressed_ack_processing()
15779 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_compressed_ack_processing()
15780 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_compressed_ack_processing()
15782 if (SEQ_LT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15784 ae->ack_val_set = ACK_BEHIND; in rack_do_compressed_ack_processing()
15785 } else if (SEQ_GT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15787 ae->ack_val_set = ACK_CUMACK; in rack_do_compressed_ack_processing()
15788 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ in rack_do_compressed_ack_processing()
15790 ae->ack_val_set = ACK_DUPACK; in rack_do_compressed_ack_processing()
15793 ae->ack_val_set = ACK_RWND; in rack_do_compressed_ack_processing()
15796 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); in rack_do_compressed_ack_processing()
15798 if (ae->flags & HAS_TSTMP) { in rack_do_compressed_ack_processing()
15800 to->to_flags = TOF_TS; in rack_do_compressed_ack_processing()
15801 ae->ts_echo -= tp->ts_offset; in rack_do_compressed_ack_processing()
15802 to->to_tsecr = ae->ts_echo; in rack_do_compressed_ack_processing()
15803 to->to_tsval = ae->ts_value; in rack_do_compressed_ack_processing()
15809 if (TSTMP_GT(ae->ts_echo, ms_cts)) in rack_do_compressed_ack_processing()
15810 to->to_tsecr = 0; in rack_do_compressed_ack_processing()
15811 if (tp->ts_recent && in rack_do_compressed_ack_processing()
15812 TSTMP_LT(ae->ts_value, tp->ts_recent)) { in rack_do_compressed_ack_processing()
15813 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { in rack_do_compressed_ack_processing()
15817 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15818 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15825 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && in rack_do_compressed_ack_processing()
15826 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { in rack_do_compressed_ack_processing()
15827 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_compressed_ack_processing()
15828 tp->ts_recent = ae->ts_value; in rack_do_compressed_ack_processing()
15832 to->to_flags = 0; in rack_do_compressed_ack_processing()
15835 if (tp->t_idle_reduce && in rack_do_compressed_ack_processing()
15836 (tp->snd_max == tp->snd_una) && in rack_do_compressed_ack_processing()
15837 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_compressed_ack_processing()
15841 tp->t_rcvtime = ticks; in rack_do_compressed_ack_processing()
15843 if (tcp_ecn_input_segment(tp, ae->flags, 0, in rack_do_compressed_ack_processing()
15844 tcp_packets_this_ack(tp, ae->ack), in rack_do_compressed_ack_processing()
15845 ae->codepoint)) in rack_do_compressed_ack_processing()
15846 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); in rack_do_compressed_ack_processing()
15849 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15850 tp->tcp_cnt_counters[ae->ack_val_set]++; in rack_do_compressed_ack_processing()
15857 * The non-compressed path through the code has this in rack_do_compressed_ack_processing()
15864 if (ae->ack_val_set == ACK_BEHIND) { in rack_do_compressed_ack_processing()
15867 * or it could be a keep-alive or persists in rack_do_compressed_ack_processing()
15869 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { in rack_do_compressed_ack_processing()
15870 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_compressed_ack_processing()
15871 if (rack->r_ctl.rc_reorder_ts == 0) in rack_do_compressed_ack_processing()
15872 rack->r_ctl.rc_reorder_ts = 1; in rack_do_compressed_ack_processing()
15874 } else if (ae->ack_val_set == ACK_DUPACK) { in rack_do_compressed_ack_processing()
15876 rack_strike_dupack(rack, ae->ack); in rack_do_compressed_ack_processing()
15877 } else if (ae->ack_val_set == ACK_RWND) { in rack_do_compressed_ack_processing()
15879 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15880 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15881 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15882 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15883 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15885 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15887 if (rack->forced_ack) { in rack_do_compressed_ack_processing()
15889 tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15894 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15895 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15900 if (SEQ_GT(ae->ack, tp->snd_max)) { in rack_do_compressed_ack_processing()
15905 if ((tp->t_flags & TF_ACKNOW) == 0) { in rack_do_compressed_ack_processing()
15907 if (tp->t_flags && TF_ACKNOW) in rack_do_compressed_ack_processing()
15908 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15913 if (tiwin != tp->snd_wnd) { in rack_do_compressed_ack_processing()
15914 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15915 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15921 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15922 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); in rack_do_compressed_ack_processing()
15925 high_seq = ae->ack; in rack_do_compressed_ack_processing()
15927 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15928 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15929 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15930 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15931 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15933 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15935 rack_process_to_cumack(tp, rack, ae->ack, cts, to, in rack_do_compressed_ack_processing()
15936 tcp_tv_to_lusectick(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15940 if (rack->rc_dsack_round_seen) { in rack_do_compressed_ack_processing()
15942 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { in rack_do_compressed_ack_processing()
15944 rack->rc_dsack_round_seen = 0; in rack_do_compressed_ack_processing()
15955 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15956 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15957 if (ae->ack_val_set == ACK_CUMACK) in rack_do_compressed_ack_processing()
15958 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15967 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { in rack_do_compressed_ack_processing()
15969 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); in rack_do_compressed_ack_processing()
15970 } else if (rack->rc_has_collapsed) in rack_do_compressed_ack_processing()
15972 if ((rack->r_collapse_point_valid) && in rack_do_compressed_ack_processing()
15973 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) in rack_do_compressed_ack_processing()
15974 rack->r_collapse_point_valid = 0; in rack_do_compressed_ack_processing()
15975 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
15988 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && in rack_do_compressed_ack_processing()
15989 (rack->rc_new_rnd_needed == 0) && in rack_do_compressed_ack_processing()
15999 * since cum-ack moved forward. in rack_do_compressed_ack_processing()
16001 rack->probe_not_answered = 0; in rack_do_compressed_ack_processing()
16002 if (tp->t_flags & TF_NEEDSYN) { in rack_do_compressed_ack_processing()
16004 * T/TCP: Connection was half-synchronized, and our SYN has in rack_do_compressed_ack_processing()
16006 * to non-starred state, increment snd_una for ACK of SYN, in rack_do_compressed_ack_processing()
16009 tp->t_flags &= ~TF_NEEDSYN; in rack_do_compressed_ack_processing()
16010 tp->snd_una++; in rack_do_compressed_ack_processing()
16011 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
16013 if (acked > sbavail(&so->so_snd)) in rack_do_compressed_ack_processing()
16014 acked_amount = sbavail(&so->so_snd); in rack_do_compressed_ack_processing()
16015 if (IN_FASTRECOVERY(tp->t_flags) && in rack_do_compressed_ack_processing()
16016 (rack->rack_no_prr == 0)) in rack_do_compressed_ack_processing()
16018 if (IN_RECOVERY(tp->t_flags)) { in rack_do_compressed_ack_processing()
16019 if (SEQ_LT(high_seq, tp->snd_recover) && in rack_do_compressed_ack_processing()
16020 (SEQ_LT(high_seq, tp->snd_max))) { in rack_do_compressed_ack_processing()
16026 } else if ((rack->rto_from_rec == 1) && in rack_do_compressed_ack_processing()
16027 SEQ_GEQ(high_seq, tp->snd_recover)) { in rack_do_compressed_ack_processing()
16030 * and never re-entered recovery. The timeout(s) in rack_do_compressed_ack_processing()
16034 rack->rto_from_rec = 0; in rack_do_compressed_ack_processing()
16036 /* Handle the rack-log-ack part (sendmap) */ in rack_do_compressed_ack_processing()
16037 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16039 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16040 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16053 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16056 if ((tp->t_flags & TF_PREVVALID) && in rack_do_compressed_ack_processing()
16057 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_do_compressed_ack_processing()
16058 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16059 if (tp->t_rxtshift == 1 && in rack_do_compressed_ack_processing()
16060 (int)(ticks - tp->t_badrxtwin) < 0) in rack_do_compressed_ack_processing()
16076 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_compressed_ack_processing()
16078 p_cwnd += tp->snd_cwnd; in rack_do_compressed_ack_processing()
16081 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { in rack_do_compressed_ack_processing()
16082 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_do_compressed_ack_processing()
16083 tp->snd_cwnd = p_cwnd; in rack_do_compressed_ack_processing()
16086 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_do_compressed_ack_processing()
16087 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
16089 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_do_compressed_ack_processing()
16091 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_do_compressed_ack_processing()
16096 tp->t_acktime = ticks; in rack_do_compressed_ack_processing()
16097 rack_log_progress_event(rack, tp, tp->t_acktime, in rack_do_compressed_ack_processing()
16100 tp->t_rxtshift = 0; in rack_do_compressed_ack_processing()
16101 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_do_compressed_ack_processing()
16102 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_do_compressed_ack_processing()
16103 rack->rc_tlp_in_progress = 0; in rack_do_compressed_ack_processing()
16104 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_do_compressed_ack_processing()
16106 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_do_compressed_ack_processing()
16107 tp->snd_recover = tp->snd_una; in rack_do_compressed_ack_processing()
16108 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) in rack_do_compressed_ack_processing()
16109 tp->snd_nxt = tp->snd_max; in rack_do_compressed_ack_processing()
16114 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_do_compressed_ack_processing()
16115 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16116 tp->snd_wl2 = high_seq; in rack_do_compressed_ack_processing()
16117 tp->t_dupacks = 0; in rack_do_compressed_ack_processing()
16119 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
16120 (rack->in_probe_rtt == 0) && in rack_do_compressed_ack_processing()
16121 rack->rc_gp_dyn_mul && in rack_do_compressed_ack_processing()
16122 rack->rc_always_pace) { in rack_do_compressed_ack_processing()
16126 if (tp->snd_una == tp->snd_max) { in rack_do_compressed_ack_processing()
16127 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
16128 rack->r_ctl.retran_during_recovery = 0; in rack_do_compressed_ack_processing()
16129 rack->rc_suspicious = 0; in rack_do_compressed_ack_processing()
16130 rack->r_ctl.dsack_byte_cnt = 0; in rack_do_compressed_ack_processing()
16131 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_do_compressed_ack_processing()
16132 if (rack->r_ctl.rc_went_idle_time == 0) in rack_do_compressed_ack_processing()
16133 rack->r_ctl.rc_went_idle_time = 1; in rack_do_compressed_ack_processing()
16135 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_do_compressed_ack_processing()
16136 tp->t_acktime = 0; in rack_do_compressed_ack_processing()
16138 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16139 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16140 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_compressed_ack_processing()
16141 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16142 (sbavail(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16143 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_do_compressed_ack_processing()
16149 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
16150 /* tcp_close will kill the inp pre-log the Reset */ in rack_do_compressed_ack_processing()
16155 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16156 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16157 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16170 * We would normally do drop-with-reset which would in rack_do_compressed_ack_processing()
16181 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
16182 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
16183 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
16191 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16200 * We don't change to fin-wait-2 if we have our fin acked in rack_do_compressed_ack_processing()
16208 if (sbavail(&so->so_snd)) { in rack_do_compressed_ack_processing()
16209 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
16211 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_compressed_ack_processing()
16220 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16221 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16222 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16233 switch(tp->t_state) { in rack_do_compressed_ack_processing()
16238 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16239 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16240 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16253 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16254 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16255 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16268 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16269 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16270 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16274 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16287 if (rack->r_fast_output) { in rack_do_compressed_ack_processing()
16296 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16297 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16298 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16305 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16306 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16323 if ((rack->r_wanted_output != 0) || in rack_do_compressed_ack_processing()
16324 (rack->r_fast_output != 0) || in rack_do_compressed_ack_processing()
16325 (tp->t_flags & TF_ACKNOW )) { in rack_do_compressed_ack_processing()
16335 if (tp->t_flags2 & TF2_HPTS_CALLS) in rack_do_compressed_ack_processing()
16336 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_compressed_ack_processing()
16341 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_compressed_ack_processing()
16363 * cts - is the current time from tv (caller gets ts) in microseconds. in rack_do_segment_nounlock()
16364 * ms_cts - is the current time from tv in milliseconds. in rack_do_segment_nounlock()
16365 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. in rack_do_segment_nounlock()
16388 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_segment_nounlock()
16389 if (rack->rack_deferred_inited == 0) { in rack_do_segment_nounlock()
16400 * can happen in the non-LRO path where we are pacing and in rack_do_segment_nounlock()
16405 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16410 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16414 if ((rack->rc_always_pace == 1) && in rack_do_segment_nounlock()
16415 (rack->rc_ack_can_sendout_data == 0) && in rack_do_segment_nounlock()
16416 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_do_segment_nounlock()
16417 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { in rack_do_segment_nounlock()
16424 slot_remaining = rack->r_ctl.rc_last_output_to - us_cts; in rack_do_segment_nounlock()
16425 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { in rack_do_segment_nounlock()
16437 optlen = (th->th_off << 2) - sizeof(struct tcphdr); in rack_do_segment_nounlock()
16463 rack->r_ctl.gp_bw, in rack_do_segment_nounlock()
16469 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16474 nsegs = m->m_pkthdr.lro_nsegs; in rack_do_segment_nounlock()
16481 if ((m->m_flags & M_TSTMP) || in rack_do_segment_nounlock()
16482 (m->m_flags & M_TSTMP_LRO)) { in rack_do_segment_nounlock()
16484 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_segment_nounlock()
16485 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_segment_nounlock()
16487 rack->r_ctl.act_rcv_time = *tv; in rack_do_segment_nounlock()
16491 * Unscale the window into a 32-bit value. For the SYN_SENT state in rack_do_segment_nounlock()
16494 tiwin = th->th_win << tp->snd_scale; in rack_do_segment_nounlock()
16512 * time. This is ok since its a rare event. in rack_do_segment_nounlock()
16523 (th->th_off << 2) - sizeof(struct tcphdr), in rack_do_segment_nounlock()
16525 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", in rack_do_segment_nounlock()
16527 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", in rack_do_segment_nounlock()
16529 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { in rack_do_segment_nounlock()
16537 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_segment_nounlock()
16538 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_segment_nounlock()
16547 bytes = tp->gput_ack - tp->gput_seq; in rack_do_segment_nounlock()
16548 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_segment_nounlock()
16549 bytes += tp->gput_seq - tp->snd_una; in rack_do_segment_nounlock()
16550 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_segment_nounlock()
16556 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_segment_nounlock()
16557 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_segment_nounlock()
16558 tp->gput_seq, in rack_do_segment_nounlock()
16560 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_segment_nounlock()
16563 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_segment_nounlock()
16569 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_segment_nounlock()
16570 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); in rack_do_segment_nounlock()
16572 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); in rack_do_segment_nounlock()
16576 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_do_segment_nounlock()
16577 if (rack->rack_no_prr == 0) in rack_do_segment_nounlock()
16578 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_do_segment_nounlock()
16581 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_do_segment_nounlock()
16583 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_do_segment_nounlock()
16584 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_do_segment_nounlock()
16585 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_do_segment_nounlock()
16586 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_do_segment_nounlock()
16587 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_do_segment_nounlock()
16588 log.u_bbr.flex3 = m->m_flags; in rack_do_segment_nounlock()
16589 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_do_segment_nounlock()
16596 if (m->m_flags & M_TSTMP) { in rack_do_segment_nounlock()
16602 } else if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment_nounlock()
16611 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; in rack_do_segment_nounlock()
16613 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_do_segment_nounlock()
16615 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_do_segment_nounlock()
16617 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_do_segment_nounlock()
16621 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16623 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16624 log.u_bbr.rttProp = tcp_req->timestamp; in rack_do_segment_nounlock()
16625 log.u_bbr.cur_del_rate = tcp_req->start; in rack_do_segment_nounlock()
16626 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_do_segment_nounlock()
16630 log.u_bbr.bw_inuse = tcp_req->end; in rack_do_segment_nounlock()
16632 log.u_bbr.flex6 = tcp_req->start_seq; in rack_do_segment_nounlock()
16633 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_do_segment_nounlock()
16635 log.u_bbr.epoch = tcp_req->end_seq; in rack_do_segment_nounlock()
16639 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, in rack_do_segment_nounlock()
16644 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16653 * If a segment with the ACK-bit set arrives in the SYN-SENT state in rack_do_segment_nounlock()
16656 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && in rack_do_segment_nounlock()
16657 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_segment_nounlock()
16672 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && in rack_do_segment_nounlock()
16680 * Segment received on connection. Reset idle time and keep-alive in rack_do_segment_nounlock()
16684 if (tp->t_idle_reduce && in rack_do_segment_nounlock()
16685 (tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16686 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_segment_nounlock()
16690 tp->t_rcvtime = ticks; in rack_do_segment_nounlock()
16692 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); in rack_do_segment_nounlock()
16694 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_segment_nounlock()
16695 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_segment_nounlock()
16701 tcp_packets_this_ack(tp, th->th_ack), in rack_do_segment_nounlock()
16703 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); in rack_do_segment_nounlock()
16711 to.to_tsecr -= tp->ts_offset; in rack_do_segment_nounlock()
16715 if ((rack->r_rcvpath_rtt_up == 1) && in rack_do_segment_nounlock()
16717 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { in rack_do_segment_nounlock()
16728 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) in rack_do_segment_nounlock()
16729 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); in rack_do_segment_nounlock()
16730 rack->r_rcvpath_rtt_up = 0; in rack_do_segment_nounlock()
16741 if (rack->r_state == 0) { in rack_do_segment_nounlock()
16743 KASSERT(rack->rc_inp != NULL, in rack_do_segment_nounlock()
16744 ("%s: rack->rc_inp unexpectedly NULL", __func__)); in rack_do_segment_nounlock()
16745 if (rack->rc_inp == NULL) { in rack_do_segment_nounlock()
16746 rack->rc_inp = inp; in rack_do_segment_nounlock()
16756 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { in rack_do_segment_nounlock()
16760 (tp->t_flags & TF_REQ_SCALE)) { in rack_do_segment_nounlock()
16761 tp->t_flags |= TF_RCVD_SCALE; in rack_do_segment_nounlock()
16762 tp->snd_scale = to.to_wscale; in rack_do_segment_nounlock()
16764 tp->t_flags &= ~TF_REQ_SCALE; in rack_do_segment_nounlock()
16769 tp->snd_wnd = th->th_win; in rack_do_segment_nounlock()
16772 (tp->t_flags & TF_REQ_TSTMP)) { in rack_do_segment_nounlock()
16773 tp->t_flags |= TF_RCVD_TSTMP; in rack_do_segment_nounlock()
16774 tp->ts_recent = to.to_tsval; in rack_do_segment_nounlock()
16775 tp->ts_recent_age = cts; in rack_do_segment_nounlock()
16777 tp->t_flags &= ~TF_REQ_TSTMP; in rack_do_segment_nounlock()
16781 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_do_segment_nounlock()
16783 tp->t_flags &= ~TF_SACK_PERMIT; in rack_do_segment_nounlock()
16784 if (tp->t_flags & TF_FASTOPEN) { in rack_do_segment_nounlock()
16791 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_do_segment_nounlock()
16804 * TF_SACK_PERMIT is set and the sack-not-required is clear. in rack_do_segment_nounlock()
16805 * The code now does do dup-ack counting so if you don't in rack_do_segment_nounlock()
16811 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_do_segment_nounlock()
16813 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, in rack_do_segment_nounlock()
16821 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); in rack_do_segment_nounlock()
16825 us_cts = tcp_tv_to_usectick(&rack->r_ctl.act_rcv_time); in rack_do_segment_nounlock()
16826 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16827 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16828 (rack->rc_always_pace)) { in rack_do_segment_nounlock()
16833 if ((rack->forced_ack) && in rack_do_segment_nounlock()
16839 * always. All other times (timers etc) we must have a rack-state in rack_do_segment_nounlock()
16842 rack->r_ctl.rc_rcvtime = cts; in rack_do_segment_nounlock()
16843 if (rack->r_state != tp->t_state) in rack_do_segment_nounlock()
16845 if (SEQ_GT(th->th_ack, tp->snd_una) && in rack_do_segment_nounlock()
16846 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) in rack_do_segment_nounlock()
16848 prev_state = rack->r_state; in rack_do_segment_nounlock()
16850 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && in rack_do_segment_nounlock()
16851 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || in rack_do_segment_nounlock()
16852 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { in rack_do_segment_nounlock()
16854 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); in rack_do_segment_nounlock()
16856 retval = (*rack->r_substate) (m, th, so, in rack_do_segment_nounlock()
16865 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16866 (rack->rc_always_pace) && in rack_do_segment_nounlock()
16867 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16868 rack->in_probe_rtt && in rack_do_segment_nounlock()
16869 (rack->r_ctl.rc_time_probertt_starts == 0)) { in rack_do_segment_nounlock()
16876 if (rack->set_pacing_done_a_iw == 0) { in rack_do_segment_nounlock()
16878 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { in rack_do_segment_nounlock()
16880 rack->set_pacing_done_a_iw = 1; in rack_do_segment_nounlock()
16891 * use of 0xf here since we only have 11 counter (0 - 0xa) and in rack_do_segment_nounlock()
16899 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_segment_nounlock()
16900 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); in rack_do_segment_nounlock()
16905 if ((rack->r_wanted_output != 0) || in rack_do_segment_nounlock()
16906 (tp->t_flags & TF_ACKNOW) || in rack_do_segment_nounlock()
16907 (rack->r_fast_output != 0)) { in rack_do_segment_nounlock()
16920 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { in rack_do_segment_nounlock()
16924 (tcp_in_hpts(rack->rc_tp) == 0)) { in rack_do_segment_nounlock()
16934 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) in rack_do_segment_nounlock()
16935 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_segment_nounlock()
16946 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && in rack_do_segment_nounlock()
16947 (rack->rc_new_rnd_needed == 0) && in rack_do_segment_nounlock()
16953 rack_new_round_setup(tp, rack, tp->snd_una); in rack_do_segment_nounlock()
16956 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && in rack_do_segment_nounlock()
16957 (SEQ_GT(tp->snd_max, tp->snd_una) || in rack_do_segment_nounlock()
16958 (tp->t_flags & TF_DELACK) || in rack_do_segment_nounlock()
16959 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_do_segment_nounlock()
16960 (tp->t_state <= TCPS_CLOSING)))) { in rack_do_segment_nounlock()
16962 if ((tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16963 ((tp->t_flags & TF_DELACK) == 0) && in rack_do_segment_nounlock()
16964 (tcp_in_hpts(rack->rc_tp)) && in rack_do_segment_nounlock()
16965 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_segment_nounlock()
16971 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_do_segment_nounlock()
16973 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_do_segment_nounlock()
16974 rack->r_early = 1; in rack_do_segment_nounlock()
16975 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_do_segment_nounlock()
16978 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_do_segment_nounlock()
16995 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_segment_nounlock()
17001 rack->r_wanted_output = 0; in rack_do_segment_nounlock()
17017 if (!STAILQ_EMPTY(&tp->t_inqueue)) { in rack_do_segment()
17023 if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment()
17042 /* Return the next guy to be re-transmitted */ in tcp_rack_output()
17043 if (tqhash_empty(rack->r_ctl.tqh)) { in tcp_rack_output()
17046 if (tp->t_flags & TF_SENTFIN) { in tcp_rack_output()
17051 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in tcp_rack_output()
17052 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { in tcp_rack_output()
17055 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { in tcp_rack_output()
17063 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && in tcp_rack_output()
17064 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17071 if (rsm->r_flags & RACK_ACKED) { in tcp_rack_output()
17074 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && in tcp_rack_output()
17075 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { in tcp_rack_output()
17080 idx = rsm->r_rtr_cnt - 1; in tcp_rack_output()
17081 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; in tcp_rack_output()
17088 if ((tsused - ts_low) < thresh) { in tcp_rack_output()
17092 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in tcp_rack_output()
17093 ((rsm->r_flags & RACK_SACK_PASSED))) { in tcp_rack_output()
17095 * We have passed the dup-ack threshold <or> in tcp_rack_output()
17098 * it is only the dup-ack threshold that in tcp_rack_output()
17102 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); in tcp_rack_output()
17103 rack->r_fast_output = 0; in tcp_rack_output()
17114 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pacing_delay_calc()
17135 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; in rack_log_pacing_delay_calc()
17136 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; in rack_log_pacing_delay_calc()
17137 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_pacing_delay_calc()
17138 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_pacing_delay_calc()
17139 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; in rack_log_pacing_delay_calc()
17141 log.u_bbr.use_lt_bw |= rack->r_late; in rack_log_pacing_delay_calc()
17143 log.u_bbr.use_lt_bw |= rack->r_early; in rack_log_pacing_delay_calc()
17145 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_pacing_delay_calc()
17147 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_pacing_delay_calc()
17149 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_pacing_delay_calc()
17151 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_pacing_delay_calc()
17153 log.u_bbr.use_lt_bw |= rack->gp_ready; in rack_log_pacing_delay_calc()
17155 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; in rack_log_pacing_delay_calc()
17156 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; in rack_log_pacing_delay_calc()
17157 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; in rack_log_pacing_delay_calc()
17160 if (rack->r_ctl.gp_bw == 0) in rack_log_pacing_delay_calc()
17165 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; in rack_log_pacing_delay_calc()
17166 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; in rack_log_pacing_delay_calc()
17168 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { in rack_log_pacing_delay_calc()
17177 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pacing_delay_calc()
17178 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; in rack_log_pacing_delay_calc()
17180 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_pacing_delay_calc()
17182 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_pacing_delay_calc()
17184 log.u_bbr.cwnd_gain |= rack->use_fixed_rate; in rack_log_pacing_delay_calc()
17186 log.u_bbr.cwnd_gain |= rack->rc_always_pace; in rack_log_pacing_delay_calc()
17188 log.u_bbr.cwnd_gain |= rack->gp_ready; in rack_log_pacing_delay_calc()
17190 log.u_bbr.bbr_state = rack->dgp_on; in rack_log_pacing_delay_calc()
17192 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; in rack_log_pacing_delay_calc()
17194 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_pacing_delay_calc()
17195 &rack->rc_inp->inp_socket->so_rcv, in rack_log_pacing_delay_calc()
17196 &rack->rc_inp->inp_socket->so_snd, in rack_log_pacing_delay_calc()
17207 user_max = rack->rc_user_set_max_segs * mss; in rack_get_pacing_len()
17208 if (rack->rc_force_max_seg) { in rack_get_pacing_len()
17211 if (rack->use_fixed_rate && in rack_get_pacing_len()
17212 ((rack->r_ctl.crte == NULL) || in rack_get_pacing_len()
17213 (bw != rack->r_ctl.crte->rate))) { in rack_get_pacing_len()
17218 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_len()
17223 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, in rack_get_pacing_len()
17224 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_len()
17227 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { in rack_get_pacing_len()
17228 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) in rack_get_pacing_len()
17229 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; in rack_get_pacing_len()
17231 if (rack->r_ctl.rc_user_set_min_segs && in rack_get_pacing_len()
17232 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) in rack_get_pacing_len()
17233 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; in rack_get_pacing_len()
17246 * nearly zero, maybe because of a time-out? in rack_arrive_at_discounted_rate()
17247 * Lets drop back to the lt-bw. in rack_arrive_at_discounted_rate()
17253 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { in rack_arrive_at_discounted_rate()
17258 if (rack->rack_hibeta == 0) { in rack_arrive_at_discounted_rate()
17262 reduced_win = window_input * rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17264 gain = rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17296 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17297 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) in pace_to_fill_cwnd()
17299 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17301 if (rack->r_ctl.rc_last_us_rtt == 0) in pace_to_fill_cwnd()
17303 if (rack->rc_pace_fill_if_rttin_range && in pace_to_fill_cwnd()
17304 (rack->r_ctl.rc_last_us_rtt >= in pace_to_fill_cwnd()
17305 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { in pace_to_fill_cwnd()
17309 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17312 * first lets calculate the b/w based on the last us-rtt in pace_to_fill_cwnd()
17315 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in pace_to_fill_cwnd()
17316 if (rack->rc_fillcw_apply_discount) { in pace_to_fill_cwnd()
17325 if (fill_bw > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17326 fill_bw = rack->rc_tp->snd_wnd; in pace_to_fill_cwnd()
17329 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in pace_to_fill_cwnd()
17331 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17332 fill_bw = rack->r_ctl.fillcw_cap; in pace_to_fill_cwnd()
17337 * We want to limit fill-cw to the some multiplier in pace_to_fill_cwnd()
17351 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in pace_to_fill_cwnd()
17364 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, in pace_to_fill_cwnd()
17377 rack->r_via_fill_cw = 1; in pace_to_fill_cwnd()
17378 if (rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17379 (rack->r_ctl.crte != NULL)) { in pace_to_fill_cwnd()
17382 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in pace_to_fill_cwnd()
17387 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17396 } else if ((rack->r_ctl.crte == NULL) && in pace_to_fill_cwnd()
17397 (rack->rack_hdrw_pacing == 0) && in pace_to_fill_cwnd()
17398 (rack->rack_hdw_pace_ena) && in pace_to_fill_cwnd()
17399 rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17400 (rack->rack_attempt_hdwr_pace == 0) && in pace_to_fill_cwnd()
17401 (rack->rc_inp->inp_route.ro_nh != NULL) && in pace_to_fill_cwnd()
17402 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in pace_to_fill_cwnd()
17409 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in pace_to_fill_cwnd()
17418 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { in pace_to_fill_cwnd()
17419 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in pace_to_fill_cwnd()
17421 fill_bw = rack->r_ctl.bw_rate_cap; in pace_to_fill_cwnd()
17449 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_delay()
17453 if (rack->rc_always_pace == 0) { in rack_get_pacing_delay()
17469 if (rack->r_ctl.rc_rack_min_rtt) in rack_get_pacing_delay()
17470 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_get_pacing_delay()
17472 srtt = max(tp->t_srtt, 1); in rack_get_pacing_delay()
17473 if (rack->r_ctl.rc_rack_largest_cwnd) in rack_get_pacing_delay()
17474 cwnd = rack->r_ctl.rc_rack_largest_cwnd; in rack_get_pacing_delay()
17476 cwnd = rack->r_ctl.cwnd_to_use; in rack_get_pacing_delay()
17496 slot -= reduce; in rack_get_pacing_delay()
17502 if (rack->rc_pace_to_cwnd) { in rack_get_pacing_delay()
17506 rack->rc_ack_can_sendout_data = 1; in rack_get_pacing_delay()
17511 /* RRS: We insert non-paced call to stats here for len */ in rack_get_pacing_delay()
17519 if ((rack->r_rr_config == 1) && rsm) { in rack_get_pacing_delay()
17520 return (rack->r_ctl.rc_min_to); in rack_get_pacing_delay()
17522 if (rack->use_fixed_rate) { in rack_get_pacing_delay()
17524 } else if ((rack->r_ctl.init_rate == 0) && in rack_get_pacing_delay()
17525 (rack->r_ctl.gp_bw == 0)) { in rack_get_pacing_delay()
17528 } else if (rack->dgp_on) { in rack_get_pacing_delay()
17534 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in rack_get_pacing_delay()
17537 if (rate_wanted > rack->rc_tp->snd_wnd) in rack_get_pacing_delay()
17538 rate_wanted = rack->rc_tp->snd_wnd; in rack_get_pacing_delay()
17541 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in rack_get_pacing_delay()
17544 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, in rack_get_pacing_delay()
17545 rack->r_ctl.cwnd_to_use, in rack_get_pacing_delay()
17547 rack->r_ctl.rc_last_us_rtt, in rack_get_pacing_delay()
17550 if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) && in rack_get_pacing_delay()
17551 (rack->use_fixed_rate == 0)) { in rack_get_pacing_delay()
17560 segs = (len + segsiz - 1) / segsiz; in rack_get_pacing_delay()
17562 * We need the diff between 1514 bytes (e-mtu with e-hdr) in rack_get_pacing_delay()
17568 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); in rack_get_pacing_delay()
17569 if (rack->r_is_v6) { in rack_get_pacing_delay()
17587 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17592 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17593 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17594 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17595 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17598 if (rack->r_ctl.crte && in rack_get_pacing_delay()
17599 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17605 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17612 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17613 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17614 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17615 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17618 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { in rack_get_pacing_delay()
17623 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17624 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17625 /* Lets re-allow attempting to setup pacing */ in rack_get_pacing_delay()
17626 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17627 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17632 prev_fill = rack->r_via_fill_cw; in rack_get_pacing_delay()
17633 if ((rack->rc_pace_to_cwnd) && in rack_get_pacing_delay()
17635 (rack->dgp_on == 1) && in rack_get_pacing_delay()
17636 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17637 (rack->in_probe_rtt == 0) && in rack_get_pacing_delay()
17638 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_get_pacing_delay()
17644 /* Re-check to make sure we are not exceeding our max b/w */ in rack_get_pacing_delay()
17645 if ((rack->r_ctl.crte != NULL) && in rack_get_pacing_delay()
17646 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17652 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17659 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17660 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17661 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17662 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17663 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_get_pacing_delay()
17667 if ((rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_pacing_delay()
17668 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_pacing_delay()
17669 if ((rack->rack_hdw_pace_ena) && in rack_get_pacing_delay()
17671 (rack->rack_hdrw_pacing == 0) && in rack_get_pacing_delay()
17672 (rack->rack_attempt_hdwr_pace == 0)) { in rack_get_pacing_delay()
17677 rack->rack_attempt_hdwr_pace = 1; in rack_get_pacing_delay()
17678 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, in rack_get_pacing_delay()
17679 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17682 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17683 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17684 rack->rack_hdrw_pacing = 1; in rack_get_pacing_delay()
17685 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, in rack_get_pacing_delay()
17686 pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17687 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17689 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17691 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17695 } else if (rack->rack_hdrw_pacing && in rack_get_pacing_delay()
17696 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { in rack_get_pacing_delay()
17700 if (rack->r_up_only && in rack_get_pacing_delay()
17701 (rate_wanted < rack->r_ctl.crte->rate)) { in rack_get_pacing_delay()
17706 * previous | this-time in rack_get_pacing_delay()
17707 * A) 0 | 0 -- fill_cw not in the picture in rack_get_pacing_delay()
17708 * B) 1 | 0 -- we were doing a fill-cw but now are not in rack_get_pacing_delay()
17709 * C) 1 | 1 -- all rates from fill_cw in rack_get_pacing_delay()
17710 * D) 0 | 1 -- we were doing non-fill and now we are filling in rack_get_pacing_delay()
17717 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) in rack_get_pacing_delay()
17720 if ((rate_wanted > rack->r_ctl.crte->rate) || in rack_get_pacing_delay()
17721 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { in rack_get_pacing_delay()
17729 bw_est, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17731 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17732 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17733 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17734 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17735 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17738 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, in rack_get_pacing_delay()
17739 rack->rc_tp, in rack_get_pacing_delay()
17740 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17743 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17749 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17750 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17754 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17756 } else if (nrte != rack->r_ctl.crte) { in rack_get_pacing_delay()
17757 rack->r_ctl.crte = nrte; in rack_get_pacing_delay()
17758 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, in rack_get_pacing_delay()
17759 segsiz, pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17760 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17762 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17764 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17768 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17770 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17772 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17778 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17779 (rack->rack_hdrw_pacing == 0)) { in rack_get_pacing_delay()
17790 if (rack->rc_tp->t_srtt) in rack_get_pacing_delay()
17791 srtt = rack->rc_tp->t_srtt; in rack_get_pacing_delay()
17804 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { in rack_get_pacing_delay()
17808 * of gas or we are mis-estimating the time in rack_get_pacing_delay()
17814 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; in rack_get_pacing_delay()
17830 if (tp->t_state < TCPS_ESTABLISHED) { in rack_start_gp_measurement()
17837 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_start_gp_measurement()
17844 if (sbavail(&tptosocket(tp)->so_snd) < in rack_start_gp_measurement()
17851 tp->t_flags |= TF_GPUTINPROG; in rack_start_gp_measurement()
17852 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17853 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_start_gp_measurement()
17854 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_start_gp_measurement()
17855 tp->gput_seq = startseq; in rack_start_gp_measurement()
17856 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17857 if (rack->in_probe_rtt) in rack_start_gp_measurement()
17858 rack->measure_saw_probe_rtt = 1; in rack_start_gp_measurement()
17859 else if ((rack->measure_saw_probe_rtt) && in rack_start_gp_measurement()
17860 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_start_gp_measurement()
17861 rack->measure_saw_probe_rtt = 0; in rack_start_gp_measurement()
17862 if (rack->rc_gp_filled) in rack_start_gp_measurement()
17863 tp->gput_ts = rack->r_ctl.last_cumack_advance; in rack_start_gp_measurement()
17868 tp->gput_ts = tcp_get_usecs(&tv); in rack_start_gp_measurement()
17869 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17875 * initial-windows worth of data to in rack_start_gp_measurement()
17879 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { in rack_start_gp_measurement()
17880 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17881 tp->gput_ack = startseq + max(rc_init_window(rack), in rack_start_gp_measurement()
17884 tp->gput_seq, in rack_start_gp_measurement()
17885 tp->gput_ack, in rack_start_gp_measurement()
17887 tp->gput_ts, in rack_start_gp_measurement()
17888 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17892 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17901 if (rack->r_ctl.rc_app_limited_cnt == 0) { in rack_start_gp_measurement()
17904 * the tp->gput_ts is correctly set based on in rack_start_gp_measurement()
17908 my_rsm = tqhash_min(rack->r_ctl.tqh); in rack_start_gp_measurement()
17910 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17915 if (rack->r_ctl.rc_first_appl == NULL) { in rack_start_gp_measurement()
17930 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17934 * after that (after the app-limited). in rack_start_gp_measurement()
17936 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); in rack_start_gp_measurement()
17938 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) in rack_start_gp_measurement()
17940 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17943 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); in rack_start_gp_measurement()
17948 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17951 * the last is the app-limited one. in rack_start_gp_measurement()
17956 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
17958 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
17964 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
17965 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17970 tp->gput_seq = my_rsm->r_end; in rack_start_gp_measurement()
17975 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17986 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
17987 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
17988 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17989 if ((rack->r_ctl.cleared_app_ack == 1) && in rack_start_gp_measurement()
17990 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.cleared_app_ack_seq))) { in rack_start_gp_measurement()
17996 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17997 rack->r_ctl.cleared_app_ack = 0; in rack_start_gp_measurement()
18000 tp->gput_seq, in rack_start_gp_measurement()
18001 tp->gput_ack, in rack_start_gp_measurement()
18003 tp->gput_ts, in rack_start_gp_measurement()
18004 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18009 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18016 * idle or if this is the first-send. Lets in rack_start_gp_measurement()
18021 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
18022 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
18023 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
18025 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); in rack_start_gp_measurement()
18027 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
18028 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
18033 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
18034 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
18036 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { in rack_start_gp_measurement()
18038 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
18042 * TSNH unless we have some send-map limit, in rack_start_gp_measurement()
18049 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
18053 tp->gput_seq, in rack_start_gp_measurement()
18054 tp->gput_ack, in rack_start_gp_measurement()
18056 tp->gput_ts, in rack_start_gp_measurement()
18057 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
18059 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
18069 if (tp->snd_wnd > cwnd_to_use) in rack_what_can_we_send()
18072 sendwin = tp->snd_wnd; in rack_what_can_we_send()
18073 if (ctf_outstanding(tp) >= tp->snd_wnd) { in rack_what_can_we_send()
18074 /* We never want to go over our peers rcv-window */ in rack_what_can_we_send()
18079 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_what_can_we_send()
18084 * >= tp->snd_wnd). in rack_what_can_we_send()
18088 len = sendwin - flight; in rack_what_can_we_send()
18089 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { in rack_what_can_we_send()
18091 len = tp->snd_wnd - ctf_outstanding(tp); in rack_what_can_we_send()
18098 len = avail - sb_offset; in rack_what_can_we_send()
18109 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_fsb()
18114 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_fsb()
18119 log.u_bbr.flex5 = tp->rcv_numsacks; in rack_log_fsb()
18120 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_log_fsb()
18122 log.u_bbr.flex8 = rack->r_fsb_inited; in rack_log_fsb()
18123 log.u_bbr.applimited = rack->r_fast_output; in rack_log_fsb()
18131 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_fsb()
18132 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, in rack_log_fsb()
18162 if (hw_tls && (m->m_flags & M_EXTPG)) in rack_fo_base_copym()
18163 tls = m->m_epg_tls; in rack_fo_base_copym()
18177 if (m->m_flags & M_EXTPG) in rack_fo_base_copym()
18178 ntls = m->m_epg_tls; in rack_fo_base_copym()
18194 mlen = min(len, m->m_len - off); in rack_fo_base_copym()
18204 if (m->m_flags & M_EXTPG) { in rack_fo_base_copym()
18225 mlen = (seglimit - frags - 1) * fragsize; in rack_fo_base_copym()
18232 seglimit -= frags; in rack_fo_base_copym()
18236 n = m_get(M_NOWAIT, m->m_type); in rack_fo_base_copym()
18240 n->m_len = mlen; in rack_fo_base_copym()
18242 len_cp += n->m_len; in rack_fo_base_copym()
18243 if (m->m_flags & (M_EXT | M_EXTPG)) { in rack_fo_base_copym()
18244 n->m_data = m->m_data + off; in rack_fo_base_copym()
18248 (u_int)n->m_len); in rack_fo_base_copym()
18250 len -= n->m_len; in rack_fo_base_copym()
18252 m = m->m_next; in rack_fo_base_copym()
18253 np = &n->m_next; in rack_fo_base_copym()
18254 if (len || (soff == smb->m_len)) { in rack_fo_base_copym()
18266 fsb->m = smb; in rack_fo_base_copym()
18267 fsb->off = soff; in rack_fo_base_copym()
18275 fsb->o_m_len = smb->m_len; in rack_fo_base_copym()
18276 fsb->o_t_len = M_TRAILINGROOM(smb); in rack_fo_base_copym()
18286 fsb->o_m_len = 0; in rack_fo_base_copym()
18287 fsb->o_t_len = 0; in rack_fo_base_copym()
18309 m = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18310 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { in rack_fo_m_copym()
18317 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), in rack_fo_m_copym()
18322 rack->r_ctl.fsb.o_t_len, in rack_fo_m_copym()
18323 rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18324 m->m_len)); in rack_fo_m_copym()
18325 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); in rack_fo_m_copym()
18326 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); in rack_fo_m_copym()
18328 if (m->m_len < rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18333 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), in rack_fo_m_copym()
18335 m, m->m_len, in rack_fo_m_copym()
18336 rack, rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18337 rack->r_ctl.fsb.off)); in rack_fo_m_copym()
18339 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) in rack_fo_m_copym()
18340 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); in rack_fo_m_copym()
18342 rack->r_ctl.fsb.off = 0; in rack_fo_m_copym()
18343 rack->r_ctl.fsb.o_m_len = m->m_len; in rack_fo_m_copym()
18345 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18350 soff = rack->r_ctl.fsb.off; in rack_fo_m_copym()
18353 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", in rack_fo_m_copym()
18355 rack, *plen, m, m->m_len)); in rack_fo_m_copym()
18358 *s_mb = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18360 &rack->r_ctl.fsb, in rack_fo_m_copym()
18361 seglimit, segsize, rack->r_ctl.fsb.hw_tls); in rack_fo_m_copym()
18375 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_log_queue_level()
18376 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_log_queue_level()
18379 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_queue_level()
18382 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_log_queue_level()
18383 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_log_queue_level()
18384 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_log_queue_level()
18388 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_log_queue_level()
18390 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_queue_level()
18406 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_check_queue_level()
18412 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_check_queue_level()
18433 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); in rack_check_queue_level()
18438 /* TSNH -- KASSERT? */ in rack_check_queue_level()
18444 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_check_queue_level()
18447 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_check_queue_level()
18448 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_check_queue_level()
18449 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_check_queue_level()
18453 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_check_queue_level()
18456 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_check_queue_level()
18499 if (rack->r_is_v6) { in rack_fast_rsm_output()
18500 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18505 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18508 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_rsm_output()
18513 rsm->r_flags |= RACK_TLP; in rack_fast_rsm_output()
18516 rsm->r_flags &= ~RACK_TLP; in rack_fast_rsm_output()
18518 startseq = rsm->r_start; in rack_fast_rsm_output()
18519 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_rsm_output()
18520 inp = rack->rc_inp; in rack_fast_rsm_output()
18522 flags = tcp_outflags[tp->t_state]; in rack_fast_rsm_output()
18526 if (rsm->r_flags & RACK_HAS_FIN) { in rack_fast_rsm_output()
18534 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_rsm_output()
18535 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_rsm_output()
18536 to.to_tsecr = tp->ts_recent; in rack_fast_rsm_output()
18540 /* TCP-MD5 (RFC2385). */ in rack_fast_rsm_output()
18541 if (tp->t_flags & TF_SIGNATURE) in rack_fast_rsm_output()
18546 udp = rack->r_ctl.fsb.udp; in rack_fast_rsm_output()
18549 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_rsm_output()
18550 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18551 else if (rack->rc_user_set_max_segs) in rack_fast_rsm_output()
18552 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_rsm_output()
18555 if ((tp->t_flags & TF_TSO) && in rack_fast_rsm_output()
18558 (tp->t_port == 0)) in rack_fast_rsm_output()
18568 m->m_data += max_linkhdr; in rack_fast_rsm_output()
18569 m->m_len = hdrlen; in rack_fast_rsm_output()
18570 th = rack->r_ctl.fsb.th; in rack_fast_rsm_output()
18579 if_hw_tsomax = tp->t_tsomax; in rack_fast_rsm_output()
18580 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_rsm_output()
18581 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_rsm_output()
18588 max_len = (if_hw_tsomax - hdrlen - in rack_fast_rsm_output()
18610 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_rsm_output()
18613 th->th_seq = htonl(rsm->r_start); in rack_fast_rsm_output()
18614 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_rsm_output()
18622 if ((rsm->r_flags & RACK_HAD_PUSH) && in rack_fast_rsm_output()
18623 (len == (rsm->r_end - rsm->r_start))) in rack_fast_rsm_output()
18625 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_rsm_output()
18626 if (th->th_win == 0) { in rack_fast_rsm_output()
18627 tp->t_sndzerowin++; in rack_fast_rsm_output()
18628 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_rsm_output()
18630 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_rsm_output()
18631 if (rsm->r_flags & RACK_TLP) { in rack_fast_rsm_output()
18639 tp->t_sndrexmitpack++; in rack_fast_rsm_output()
18644 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_fast_rsm_output()
18647 if (rsm->m == NULL) in rack_fast_rsm_output()
18649 if (rsm->m && in rack_fast_rsm_output()
18650 ((rsm->orig_m_len != rsm->m->m_len) || in rack_fast_rsm_output()
18651 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_fast_rsm_output()
18655 …m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxse… in rack_fast_rsm_output()
18665 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_rsm_output()
18669 if (rack->r_is_v6) in rack_fast_rsm_output()
18670 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_rsm_output()
18672 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_rsm_output()
18673 udp->uh_ulen = htons(ulen); in rack_fast_rsm_output()
18675 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_rsm_output()
18676 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_rsm_output()
18677 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_rsm_output()
18679 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_rsm_output()
18680 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_rsm_output()
18681 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_rsm_output()
18683 if (rack->r_is_v6) { in rack_fast_rsm_output()
18684 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_rsm_output()
18685 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_rsm_output()
18690 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_rsm_output()
18691 ip->ip_tos |= ect; in rack_fast_rsm_output()
18694 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18702 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_rsm_output()
18712 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_rsm_output()
18722 if (rack->r_is_v6) { in rack_fast_rsm_output()
18723 if (tp->t_port) { in rack_fast_rsm_output()
18724 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_rsm_output()
18725 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18726 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_rsm_output()
18727 th->th_sum = htons(0); in rack_fast_rsm_output()
18730 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_rsm_output()
18731 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18732 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_rsm_output()
18743 if (tp->t_port) { in rack_fast_rsm_output()
18744 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_rsm_output()
18745 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18746 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18747 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_rsm_output()
18748 th->th_sum = htons(0); in rack_fast_rsm_output()
18751 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_rsm_output()
18752 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18753 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18754 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_rsm_output()
18758 KASSERT(ip->ip_v == IPVERSION, in rack_fast_rsm_output()
18759 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_rsm_output()
18766 * via either fast-path). in rack_fast_rsm_output()
18770 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_rsm_output()
18771 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_rsm_output()
18774 if (rack->r_is_v6) { in rack_fast_rsm_output()
18775 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18776 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_rsm_output()
18777 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_rsm_output()
18778 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18780 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18788 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_rsm_output()
18789 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18790 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_rsm_output()
18791 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18792 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_rsm_output()
18793 ip->ip_off |= htons(IP_DF); in rack_fast_rsm_output()
18796 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18802 rack->rc_gp_saw_rec = 1; in rack_fast_rsm_output()
18805 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_rsm_output()
18807 rack->rc_gp_saw_ca = 1; in rack_fast_rsm_output()
18810 rack->rc_gp_saw_ss = 1; in rack_fast_rsm_output()
18815 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_rsm_output()
18816 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_rsm_output()
18819 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_rsm_output()
18821 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_rsm_output()
18823 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_rsm_output()
18826 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_fast_rsm_output()
18827 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_fast_rsm_output()
18829 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_fast_rsm_output()
18832 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_rsm_output()
18833 if (rack->rack_no_prr) in rack_fast_rsm_output()
18836 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_rsm_output()
18837 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_rsm_output()
18838 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18841 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_rsm_output()
18842 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_rsm_output()
18844 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_rsm_output()
18851 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_rsm_output()
18853 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_rsm_output()
18854 if (rsm->r_rtr_cnt > 0) { in rack_fast_rsm_output()
18859 log.u_bbr.flex5 = rsm->r_fas; in rack_fast_rsm_output()
18860 log.u_bbr.bbr_substate = rsm->r_bas; in rack_fast_rsm_output()
18867 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_rsm_output()
18869 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_rsm_output()
18872 log.u_bbr.delRate = rsm->r_flags; in rack_fast_rsm_output()
18874 log.u_bbr.delRate |= rack->r_must_retran; in rack_fast_rsm_output()
18882 if ((rack->r_ctl.crte != NULL) && in rack_fast_rsm_output()
18887 if (rack->r_is_v6) { in rack_fast_rsm_output()
18888 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_rsm_output()
18889 &inp->inp_route6, in rack_fast_rsm_output()
18897 &inp->inp_route, in rack_fast_rsm_output()
18903 lgb->tlb_errno = error; in rack_fast_rsm_output()
18907 tp->snd_nxt = tp->snd_max; in rack_fast_rsm_output()
18910 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { in rack_fast_rsm_output()
18911 rack->rc_hw_nobuf = 0; in rack_fast_rsm_output()
18912 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_rsm_output()
18913 rack->r_early = 0; in rack_fast_rsm_output()
18914 rack->r_late = 0; in rack_fast_rsm_output()
18915 rack->r_ctl.rc_agg_early = 0; in rack_fast_rsm_output()
18917 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), in rack_fast_rsm_output()
18918 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); in rack_fast_rsm_output()
18920 rack->rc_tlp_in_progress = 1; in rack_fast_rsm_output()
18921 rack->r_ctl.rc_tlp_cnt_out++; in rack_fast_rsm_output()
18925 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); in rack_fast_rsm_output()
18927 rack->rc_last_sent_tlp_past_cumack = 0; in rack_fast_rsm_output()
18928 rack->rc_last_sent_tlp_seq_valid = 1; in rack_fast_rsm_output()
18929 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_fast_rsm_output()
18930 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_fast_rsm_output()
18932 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_fast_rsm_output()
18933 rack->r_ctl.rc_prr_sndcnt -= len; in rack_fast_rsm_output()
18935 rack->r_ctl.rc_prr_sndcnt = 0; in rack_fast_rsm_output()
18937 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_rsm_output()
18938 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_rsm_output()
18939 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_fast_rsm_output()
18940 rack->r_ctl.retran_during_recovery += len; in rack_fast_rsm_output()
18946 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_rsm_output()
18950 if (tp->t_rtttime == 0) { in rack_fast_rsm_output()
18951 tp->t_rtttime = ticks; in rack_fast_rsm_output()
18952 tp->t_rtseq = startseq; in rack_fast_rsm_output()
18957 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18958 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_fast_rsm_output()
18959 if (tcp_bblogging_on(rack->rc_tp)) in rack_fast_rsm_output()
18962 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_fast_rsm_output()
18963 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_fast_rsm_output()
18964 if (rack->rc_enobuf < 0x7f) in rack_fast_rsm_output()
18965 rack->rc_enobuf++; in rack_fast_rsm_output()
18968 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18970 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_fast_rsm_output()
18979 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
18980 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_rsm_output()
18981 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_rsm_output()
18982 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); in rack_fast_rsm_output()
18990 return (-1); in rack_fast_rsm_output()
19001 * delay (eg. trans-continental/oceanic links). Setting the in rack_sndbuf_autoscale()
19023 tp = rack->rc_tp; in rack_sndbuf_autoscale()
19024 so = rack->rc_inp->inp_socket; in rack_sndbuf_autoscale()
19025 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); in rack_sndbuf_autoscale()
19026 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { in rack_sndbuf_autoscale()
19027 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && in rack_sndbuf_autoscale()
19028 sbused(&so->so_snd) >= in rack_sndbuf_autoscale()
19029 (so->so_snd.sb_hiwat / 8 * 7) && in rack_sndbuf_autoscale()
19030 sbused(&so->so_snd) < V_tcp_autosndbuf_max && in rack_sndbuf_autoscale()
19031 sendwin >= (sbused(&so->so_snd) - in rack_sndbuf_autoscale()
19032 (tp->snd_max - tp->snd_una))) { in rack_sndbuf_autoscale()
19034 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; in rack_sndbuf_autoscale()
19039 scaleup += so->so_snd.sb_hiwat; in rack_sndbuf_autoscale()
19043 so->so_snd.sb_flags &= ~SB_AUTOSIZE; in rack_sndbuf_autoscale()
19058 * the max-burst). We have how much to send and all the info we in rack_fast_output()
19088 if (rack->r_is_v6) { in rack_fast_output()
19089 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19095 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
19099 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_output()
19103 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_fast_output()
19104 startseq = tp->snd_max; in rack_fast_output()
19105 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_output()
19106 inp = rack->rc_inp; in rack_fast_output()
19107 len = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19109 flags = rack->r_ctl.fsb.tcp_flags; in rack_fast_output()
19110 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_output()
19111 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_output()
19112 to.to_tsecr = tp->ts_recent; in rack_fast_output()
19116 /* TCP-MD5 (RFC2385). */ in rack_fast_output()
19117 if (tp->t_flags & TF_SIGNATURE) in rack_fast_output()
19122 udp = rack->r_ctl.fsb.udp; in rack_fast_output()
19125 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_output()
19126 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19127 else if (rack->rc_user_set_max_segs) in rack_fast_output()
19128 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_output()
19131 if ((tp->t_flags & TF_TSO) && in rack_fast_output()
19134 (tp->t_port == 0)) in rack_fast_output()
19145 m->m_data += max_linkhdr; in rack_fast_output()
19146 m->m_len = hdrlen; in rack_fast_output()
19147 th = rack->r_ctl.fsb.th; in rack_fast_output()
19156 if_hw_tsomax = tp->t_tsomax; in rack_fast_output()
19157 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_output()
19158 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_output()
19165 max_len = (if_hw_tsomax - hdrlen - in rack_fast_output()
19187 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_output()
19190 sb_offset = tp->snd_max - tp->snd_una; in rack_fast_output()
19191 th->th_seq = htonl(tp->snd_max); in rack_fast_output()
19192 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_output()
19193 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_output()
19194 if (th->th_win == 0) { in rack_fast_output()
19195 tp->t_sndzerowin++; in rack_fast_output()
19196 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_output()
19198 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_output()
19199 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_fast_output()
19203 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_fast_output()
19206 if (rack->r_ctl.fsb.m == NULL) in rack_fast_output()
19210 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, in rack_fast_output()
19221 if (rack->r_ctl.fsb.rfo_apply_push && in rack_fast_output()
19222 (len == rack->r_ctl.fsb.left_to_send)) { in rack_fast_output()
19226 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_output()
19230 if (rack->r_is_v6) in rack_fast_output()
19231 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_output()
19233 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_output()
19234 udp->uh_ulen = htons(ulen); in rack_fast_output()
19236 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_output()
19237 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_output()
19238 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_output()
19240 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_output()
19241 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_output()
19242 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_output()
19244 if (rack->r_is_v6) { in rack_fast_output()
19245 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_output()
19246 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_output()
19252 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_output()
19253 ip->ip_tos |= ect; in rack_fast_output()
19258 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_output()
19268 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_output()
19278 if (rack->r_is_v6) { in rack_fast_output()
19279 if (tp->t_port) { in rack_fast_output()
19280 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_output()
19281 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19282 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_output()
19283 th->th_sum = htons(0); in rack_fast_output()
19286 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_output()
19287 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19288 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_output()
19299 if (tp->t_port) { in rack_fast_output()
19300 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_output()
19301 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19302 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19303 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_output()
19304 th->th_sum = htons(0); in rack_fast_output()
19307 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_output()
19308 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19309 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19310 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_output()
19314 KASSERT(ip->ip_v == IPVERSION, in rack_fast_output()
19315 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_output()
19322 * via either fast-path). in rack_fast_output()
19326 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_output()
19327 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_output()
19330 if (rack->r_is_v6) { in rack_fast_output()
19331 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19332 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_output()
19333 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_output()
19334 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19336 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19344 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_output()
19345 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19346 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_output()
19347 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19348 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_output()
19349 ip->ip_off |= htons(IP_DF); in rack_fast_output()
19352 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19356 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_output()
19358 rack->rc_gp_saw_ca = 1; in rack_fast_output()
19361 rack->rc_gp_saw_ss = 1; in rack_fast_output()
19365 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_output()
19366 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_output()
19369 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_output()
19371 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_output()
19373 if ((rack->r_ctl.crte != NULL) && in rack_fast_output()
19377 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_output()
19381 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_output()
19382 if (rack->rack_no_prr) in rack_fast_output()
19385 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_output()
19386 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_output()
19387 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19390 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_output()
19391 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_output()
19393 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_output()
19397 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_output()
19399 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_output()
19401 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_output()
19402 log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19404 log.u_bbr.delRate = rack->r_must_retran; in rack_fast_output()
19409 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_output()
19415 if (rack->r_is_v6) { in rack_fast_output()
19416 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_output()
19417 &inp->inp_route6, in rack_fast_output()
19427 &inp->inp_route, in rack_fast_output()
19432 lgb->tlb_errno = error; in rack_fast_output()
19439 } else if (rack->rc_hw_nobuf) { in rack_fast_output()
19440 rack->rc_hw_nobuf = 0; in rack_fast_output()
19441 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_output()
19442 rack->r_early = 0; in rack_fast_output()
19443 rack->r_late = 0; in rack_fast_output()
19444 rack->r_ctl.rc_agg_early = 0; in rack_fast_output()
19446 if ((error == 0) && (rack->lt_bw_up == 0)) { in rack_fast_output()
19448 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(tv); in rack_fast_output()
19449 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19450 rack->lt_bw_up = 1; in rack_fast_output()
19452 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { in rack_fast_output()
19460 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_fast_output()
19461 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19463 if (tmark > rack->r_ctl.lt_timemark) { in rack_fast_output()
19464 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_fast_output()
19465 rack->r_ctl.lt_timemark = tmark; in rack_fast_output()
19468 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), in rack_fast_output()
19469 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); in rack_fast_output()
19470 if (tp->snd_una == tp->snd_max) { in rack_fast_output()
19471 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_fast_output()
19473 tp->t_acktime = ticks; in rack_fast_output()
19476 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); in rack_fast_output()
19478 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_output()
19480 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_fast_output()
19481 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); in rack_fast_output()
19482 tp->snd_max += len; in rack_fast_output()
19483 tp->snd_nxt = tp->snd_max; in rack_fast_output()
19484 if (rack->rc_new_rnd_needed) { in rack_fast_output()
19485 rack_new_round_starts(tp, rack, tp->snd_max); in rack_fast_output()
19492 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_output()
19496 if (len <= rack->r_ctl.fsb.left_to_send) in rack_fast_output()
19497 rack->r_ctl.fsb.left_to_send -= len; in rack_fast_output()
19499 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19500 if (rack->r_ctl.fsb.left_to_send < segsiz) { in rack_fast_output()
19501 rack->r_fast_output = 0; in rack_fast_output()
19502 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19504 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19506 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19508 if (tp->t_rtttime == 0) { in rack_fast_output()
19509 tp->t_rtttime = ticks; in rack_fast_output()
19510 tp->t_rtseq = startseq; in rack_fast_output()
19513 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && in rack_fast_output()
19515 (*tot_len < rack->r_ctl.rc_pace_max_segs) && in rack_fast_output()
19517 max_val -= len; in rack_fast_output()
19519 th = rack->r_ctl.fsb.th; in rack_fast_output()
19525 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_output()
19531 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19532 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_output()
19533 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_output()
19534 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz); in rack_fast_output()
19542 rack->r_fast_output = 0; in rack_fast_output()
19543 return (-1); in rack_fast_output()
19553 rack->r_fast_output = 1; in rack_setup_fast_output()
19554 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_setup_fast_output()
19555 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_setup_fast_output()
19556 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_setup_fast_output()
19557 rack->r_ctl.fsb.tcp_flags = flags; in rack_setup_fast_output()
19558 rack->r_ctl.fsb.left_to_send = orig_len - len; in rack_setup_fast_output()
19559 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { in rack_setup_fast_output()
19561 rack->r_fast_output = 0; in rack_setup_fast_output()
19565 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); in rack_setup_fast_output()
19568 rack->r_ctl.fsb.hw_tls = 1; in rack_setup_fast_output()
19570 rack->r_ctl.fsb.hw_tls = 0; in rack_setup_fast_output()
19571 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), in rack_setup_fast_output()
19573 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), in rack_setup_fast_output()
19574 (tp->snd_max - tp->snd_una))); in rack_setup_fast_output()
19575 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_setup_fast_output()
19576 rack->r_fast_output = 0; in rack_setup_fast_output()
19578 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) in rack_setup_fast_output()
19579 rack->r_ctl.fsb.rfo_apply_push = 1; in rack_setup_fast_output()
19581 rack->r_ctl.fsb.rfo_apply_push = 0; in rack_setup_fast_output()
19592 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); in rack_get_hpts_pacing_min_for_bw()
19604 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_check_collapsed()
19605 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_check_collapsed()
19607 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19611 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { in rack_check_collapsed()
19618 if (rsm->r_flags & RACK_ACKED) { in rack_check_collapsed()
19623 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_check_collapsed()
19625 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_check_collapsed()
19626 rack->r_ctl.high_collapse_point)) { in rack_check_collapsed()
19627 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19633 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); in rack_check_collapsed()
19634 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { in rack_check_collapsed()
19635 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19636 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19637 thresh, __LINE__, 6, rsm->r_flags, rsm); in rack_check_collapsed()
19641 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19642 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19643 thresh, __LINE__, 7, rsm->r_flags, rsm); in rack_check_collapsed()
19650 if ((rack->full_size_rxt == 0) && in rack_validate_sizes()
19651 (rack->shape_rxt_to_pacing_min == 0) && in rack_validate_sizes()
19654 } else if (rack->shape_rxt_to_pacing_min && in rack_validate_sizes()
19655 rack->gp_ready) { in rack_validate_sizes()
19744 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_output()
19749 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); in rack_output()
19750 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_output()
19752 if (tp->t_flags & TF_TOE) { in rack_output()
19759 if (rack->rack_deferred_inited == 0) { in rack_output()
19772 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19773 (tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
19774 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ in rack_output()
19775 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ in rack_output()
19782 if (rack->r_state) { in rack_output()
19784 isipv6 = rack->r_is_v6; in rack_output()
19786 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; in rack_output()
19792 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && in rack_output()
19793 tcp_in_hpts(rack->rc_tp)) { in rack_output()
19801 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19802 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { in rack_output()
19804 delayed = cts - rack->r_ctl.rc_last_output_to; in rack_output()
19809 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_output()
19826 if (rack->rc_in_persist) { in rack_output()
19827 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19836 if ((rack->rc_ack_required == 1) && in rack_output()
19837 (rack->r_timer_override == 0)){ in rack_output()
19839 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19848 if ((rack->r_timer_override) || in rack_output()
19849 (rack->rc_ack_can_sendout_data) || in rack_output()
19851 (tp->t_state < TCPS_ESTABLISHED)) { in rack_output()
19852 rack->rc_ack_can_sendout_data = 0; in rack_output()
19853 if (tcp_in_hpts(rack->rc_tp)) in rack_output()
19854 tcp_hpts_remove(rack->rc_tp); in rack_output()
19855 } else if (tcp_in_hpts(rack->rc_tp)) { in rack_output()
19862 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
19863 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); in rack_output()
19864 tp->tcp_cnt_counters[SND_BLOCKED]++; in rack_output()
19872 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19873 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_output()
19874 early = rack->r_ctl.rc_last_output_to - cts; in rack_output()
19877 if (delayed && (rack->rc_always_pace == 1)) { in rack_output()
19878 rack->r_ctl.rc_agg_delayed += delayed; in rack_output()
19879 rack->r_late = 1; in rack_output()
19880 } else if (early && (rack->rc_always_pace == 1)) { in rack_output()
19881 rack->r_ctl.rc_agg_early += early; in rack_output()
19882 rack->r_early = 1; in rack_output()
19883 } else if (rack->rc_always_pace == 0) { in rack_output()
19884 /* Non-paced we are not late */ in rack_output()
19885 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; in rack_output()
19886 rack->r_early = rack->r_late = 0; in rack_output()
19889 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_output()
19890 rack->r_wanted_output = 0; in rack_output()
19891 rack->r_timer_override = 0; in rack_output()
19892 if ((tp->t_state != rack->r_state) && in rack_output()
19893 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
19896 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_output()
19898 if (rack->r_ctl.rc_pace_max_segs == 0) in rack_output()
19899 pace_max_seg = rack->rc_user_set_max_segs * segsiz; in rack_output()
19901 pace_max_seg = rack->r_ctl.rc_pace_max_segs; in rack_output()
19902 if ((rack->r_fast_output) && in rack_output()
19904 (tp->rcv_numsacks == 0)) { in rack_output()
19912 inp = rack->rc_inp; in rack_output()
19913 so = inp->inp_socket; in rack_output()
19914 sb = &so->so_snd; in rack_output()
19921 /* We need to re-pin since fast_output un-pined */ in rack_output()
19928 inp = rack->rc_inp; in rack_output()
19934 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19935 ((tp->t_state == TCPS_SYN_RECEIVED) || in rack_output()
19936 (tp->t_state == TCPS_SYN_SENT)) && in rack_output()
19937 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ in rack_output()
19938 (tp->t_rxtshift == 0)) { /* not a retransmit */ in rack_output()
19951 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); in rack_output()
19952 if (tp->t_idle_reduce) { in rack_output()
19953 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) in rack_output()
19956 tp->t_flags &= ~TF_LASTIDLE; in rack_output()
19958 if (tp->t_flags & TF_MORETOCOME) { in rack_output()
19959 tp->t_flags |= TF_LASTIDLE; in rack_output()
19963 if ((tp->snd_una == tp->snd_max) && in rack_output()
19964 rack->r_ctl.rc_went_idle_time && in rack_output()
19965 (cts > rack->r_ctl.rc_went_idle_time)) { in rack_output()
19966 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); in rack_output()
19969 if (rack->in_probe_rtt == 0) { in rack_output()
19970 rack->r_ctl.rc_lower_rtt_us_cts = cts; in rack_output()
19971 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19972 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19973 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19981 (rack->r_ctl.fsb.tcp_ip_hdr) && in rack_output()
19982 (rack->r_fsb_inited == 0) && in rack_output()
19983 (rack->r_state != TCPS_CLOSED)) in rack_output()
19984 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); in rack_output()
19985 if (rack->rc_sendvars_notset == 1) { in rack_output()
19986 rack->rc_sendvars_notset = 0; in rack_output()
19988 * Make sure any TCP timers (keep-alive) is not running. in rack_output()
19992 if ((rack->rack_no_prr == 1) && in rack_output()
19993 (rack->rc_always_pace == 0)) { in rack_output()
19996 * no-pacing enabled and prr is turned off that in rack_output()
20004 rack->rack_no_prr = 0; in rack_output()
20006 if ((rack->pcm_enabled == 1) && in rack_output()
20007 (rack->pcm_needed == 0) && in rack_output()
20015 if (tp->t_srtt) in rack_output()
20016 rtts_idle = tot_idle / tp->t_srtt; in rack_output()
20019 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_output()
20020 rack->r_ctl.pcm_idle_rounds += rtts_idle; in rack_output()
20021 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_output()
20022 rack->pcm_needed = 1; in rack_output()
20023 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); in rack_output()
20032 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20033 (rack->r_ctl.pcm_max_seg == 0)) { in rack_output()
20039 rack->r_ctl.pcm_max_seg = rc_init_window(rack); in rack_output()
20040 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { in rack_output()
20044 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; in rack_output()
20047 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { in rack_output()
20050 if (tp->snd_wnd > ctf_outstanding(tp)) in rack_output()
20051 rw_avail = tp->snd_wnd - ctf_outstanding(tp); in rack_output()
20054 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) in rack_output()
20055 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
20058 if ((cwa >= rack->r_ctl.pcm_max_seg) && in rack_output()
20059 (rw_avail > rack->r_ctl.pcm_max_seg)) { in rack_output()
20061 pace_max_seg = rack->r_ctl.pcm_max_seg; in rack_output()
20063 rack->r_fast_output = 0; in rack_output()
20067 cwa, rack->r_ctl.pcm_max_seg, rw_avail); in rack_output()
20070 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20071 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
20072 flags = tcp_outflags[tp->t_state]; in rack_output()
20073 while (rack->rc_free_cnt < rack_free_cache) { in rack_output()
20079 so = inp->inp_socket; in rack_output()
20080 sb = &so->so_snd; in rack_output()
20083 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_output()
20084 rack->rc_free_cnt++; in rack_output()
20091 SOCK_SENDBUF_LOCK(inp->inp_socket); in rack_output()
20092 so = inp->inp_socket; in rack_output()
20093 sb = &so->so_snd; in rack_output()
20096 if (rack->r_ctl.rc_resend) { in rack_output()
20098 rsm = rack->r_ctl.rc_resend; in rack_output()
20099 rack->r_ctl.rc_resend = NULL; in rack_output()
20100 len = rsm->r_end - rsm->r_start; in rack_output()
20103 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20106 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20107 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20109 } else if (rack->r_collapse_point_valid && in rack_output()
20116 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); in rack_output()
20117 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_output()
20119 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_output()
20120 rack->r_ctl.high_collapse_point)) in rack_output()
20121 rack->r_collapse_point_valid = 0; in rack_output()
20125 len = rsm->r_end - rsm->r_start; in rack_output()
20126 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20131 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_output()
20132 ((rsm->r_flags & RACK_MUST_RXT) == 0) && in rack_output()
20133 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { in rack_output()
20134 /* Enter recovery if not induced by a time-out */ in rack_output()
20135 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_output()
20138 if (SEQ_LT(rsm->r_start, tp->snd_una)) { in rack_output()
20140 tp, rack, rsm, rsm->r_start, tp->snd_una); in rack_output()
20143 len = rsm->r_end - rsm->r_start; in rack_output()
20144 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20147 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20148 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20157 } else if (rack->r_ctl.rc_tlpsend) { in rack_output()
20168 rsm = rack->r_ctl.rc_tlpsend; in rack_output()
20170 rsm->r_flags |= RACK_TLP; in rack_output()
20171 rack->r_ctl.rc_tlpsend = NULL; in rack_output()
20173 tlen = rsm->r_end - rsm->r_start; in rack_output()
20176 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
20179 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
20180 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20181 cwin = min(tp->snd_wnd, tlen); in rack_output()
20184 if (rack->r_must_retran && in rack_output()
20186 (SEQ_GT(tp->snd_max, tp->snd_una)) && in rack_output()
20191 * a) This is a non-sack connection, we had a time-out in rack_output()
20205 sendwin = min(tp->snd_wnd, tp->snd_cwnd); in rack_output()
20206 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); in rack_output()
20211 so = inp->inp_socket; in rack_output()
20212 sb = &so->so_snd; in rack_output()
20217 * outstanding/not-acked should be marked. in rack_output()
20220 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_output()
20223 rack->r_must_retran = 0; in rack_output()
20224 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20225 so = inp->inp_socket; in rack_output()
20226 sb = &so->so_snd; in rack_output()
20229 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { in rack_output()
20234 rack->r_must_retran = 0; in rack_output()
20235 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20240 len = rsm->r_end - rsm->r_start; in rack_output()
20241 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20243 if ((rack->full_size_rxt == 0) && in rack_output()
20244 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20247 else if (rack->shape_rxt_to_pacing_min && in rack_output()
20248 rack->gp_ready) { in rack_output()
20269 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_output()
20271 if (!rack->alloc_limit_reported) { in rack_output()
20272 rack->alloc_limit_reported = 1; in rack_output()
20275 so = inp->inp_socket; in rack_output()
20276 sb = &so->so_snd; in rack_output()
20279 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { in rack_output()
20281 len--; in rack_output()
20290 if (rsm && rack->r_fsb_inited && in rack_output()
20292 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { in rack_output()
20299 so = inp->inp_socket; in rack_output()
20300 sb = &so->so_snd; in rack_output()
20306 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && in rack_output()
20307 rack->rack_enable_scwnd) { in rack_output()
20309 if (rack->gp_ready && in rack_output()
20310 (rack->rack_attempted_scwnd == 0) && in rack_output()
20311 (rack->r_ctl.rc_scw == NULL) && in rack_output()
20312 tp->t_lib) { in rack_output()
20315 rack->rack_attempted_scwnd = 1; in rack_output()
20316 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, in rack_output()
20317 &rack->r_ctl.rc_scw_index, in rack_output()
20320 if (rack->r_ctl.rc_scw && in rack_output()
20321 (rack->rack_scwnd_is_idle == 1) && in rack_output()
20322 sbavail(&so->so_snd)) { in rack_output()
20324 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20325 rack->rack_scwnd_is_idle = 0; in rack_output()
20327 if (rack->r_ctl.rc_scw) { in rack_output()
20329 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, in rack_output()
20330 rack->r_ctl.rc_scw_index, in rack_output()
20331 tp->snd_cwnd, tp->snd_wnd, segsiz); in rack_output()
20339 if (tp->t_flags & TF_NEEDFIN) in rack_output()
20341 if (tp->t_flags & TF_NEEDSYN) in rack_output()
20345 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_output()
20352 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_output()
20353 (tp->t_flags & TF_FASTOPEN))) { in rack_output()
20363 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) in rack_output()
20364 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20367 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { in rack_output()
20368 if (rack->r_ctl.rc_tlp_new_data) { in rack_output()
20370 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { in rack_output()
20371 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); in rack_output()
20373 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { in rack_output()
20374 if (tp->snd_wnd > sb_offset) in rack_output()
20375 len = tp->snd_wnd - sb_offset; in rack_output()
20379 len = rack->r_ctl.rc_tlp_new_data; in rack_output()
20381 rack->r_ctl.rc_tlp_new_data = 0; in rack_output()
20385 if ((rack->r_ctl.crte == NULL) && in rack_output()
20386 IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20387 (rack->full_size_rxt == 0) && in rack_output()
20388 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20398 } else if (rack->shape_rxt_to_pacing_min && in rack_output()
20399 rack->gp_ready) { in rack_output()
20417 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20418 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { in rack_output()
20419 if (tp->snd_wnd > outstanding) { in rack_output()
20420 len = tp->snd_wnd - outstanding; in rack_output()
20425 len = avail - sb_offset; in rack_output()
20433 len = avail - sb_offset; in rack_output()
20438 if (len > rack->r_ctl.rc_prr_sndcnt) { in rack_output()
20439 len = rack->r_ctl.rc_prr_sndcnt; in rack_output()
20451 * let us send a lot as well :-) in rack_output()
20453 if (rack->r_ctl.rc_prr_sendalot == 0) { in rack_output()
20465 leftinsb = sbavail(sb) - sb_offset; in rack_output()
20472 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
20479 !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20491 * SYN-SENT state and if segment contains data and if we don't know in rack_output()
20495 SEQ_GT(tp->snd_max, tp->snd_una) && in rack_output()
20497 (tp->t_rxtshift == 0))) { in rack_output()
20502 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20503 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_output()
20511 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { in rack_output()
20518 * - When retransmitting SYN|ACK on a passively-created socket in rack_output()
20520 * - When retransmitting SYN on an actively created socket in rack_output()
20522 * - When sending a zero-length cookie (cookie request) on an in rack_output()
20525 * - When the socket is in the CLOSED state (RST is being sent) in rack_output()
20527 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20528 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || in rack_output()
20529 ((tp->t_state == TCPS_SYN_SENT) && in rack_output()
20530 (tp->t_tfo_client_cookie_len == 0)) || in rack_output()
20535 /* Without fast-open there should never be data sent on a SYN */ in rack_output()
20536 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20550 if ((tp->snd_wnd == 0) && in rack_output()
20551 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20552 (tp->snd_una == tp->snd_max) && in rack_output()
20554 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20564 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20565 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20567 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20577 if (tp->snd_max == tp->snd_una) { in rack_output()
20582 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20585 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20586 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20599 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < in rack_output()
20600 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20601 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20602 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20603 (TCPS_HAVEESTABLISHED(tp->t_state))) { in rack_output()
20613 } else if ((rack->r_ctl.crte != NULL) && in rack_output()
20614 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && in rack_output()
20616 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && in rack_output()
20617 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20637 * defeats the point of hw-pacing (i.e. to help us get in rack_output()
20652 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP in rack_output()
20666 * Pre-calculate here as we save another lookup into the darknesses in rack_output()
20685 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && in rack_output()
20686 (tp->t_port == 0) && in rack_output()
20687 ((tp->t_flags & TF_SIGNATURE) == 0) && in rack_output()
20694 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20695 if (tp->t_flags & TF_SENTFIN) { in rack_output()
20700 outstanding--; in rack_output()
20703 if ((rsm->r_flags & RACK_HAS_FIN) == 0) in rack_output()
20707 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), in rack_output()
20708 (long)TCP_MAXWIN << tp->rcv_scale); in rack_output()
20712 * conditions when len is non-zero: in rack_output()
20714 * - We have a full segment (or more with TSO) - This is the last in rack_output()
20716 * NODELAY - we've timed out (e.g. persist timer) - we have more in rack_output()
20718 * limited the window size) - we need to retransmit in rack_output()
20730 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ in rack_output()
20731 (idle || (tp->t_flags & TF_NODELAY)) && in rack_output()
20733 (tp->t_flags & TF_NOPUSH) == 0) { in rack_output()
20737 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ in rack_output()
20741 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { in rack_output()
20749 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && in rack_output()
20786 * pending (it will get piggy-backed on it) or the remote side in rack_output()
20787 * already has done a half-close and won't send more data. Skip in rack_output()
20788 * this if the connection is in T/TCP half-open state. in rack_output()
20790 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && in rack_output()
20791 !(tp->t_flags & TF_DELACK) && in rack_output()
20792 !TCPS_HAVERCVDFIN(tp->t_state)) { in rack_output()
20796 * tp->rcv_scale. in rack_output()
20802 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { in rack_output()
20803 oldwin = (tp->rcv_adv - tp->rcv_nxt); in rack_output()
20805 adv -= oldwin; in rack_output()
20818 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) in rack_output()
20822 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || in rack_output()
20823 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || in rack_output()
20824 so->so_rcv.sb_hiwat <= 8 * segsiz)) { in rack_output()
20828 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { in rack_output()
20837 * is also a catch-all for the retransmit timer timeout case. in rack_output()
20839 if (tp->t_flags & TF_ACKNOW) { in rack_output()
20843 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { in rack_output()
20852 (tp->snd_max == tp->snd_una)) { in rack_output()
20865 if ((tp->t_flags & TF_FASTOPEN) == 0 && in rack_output()
20868 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
20869 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
20878 * the peer wait for the delayed-ack timer to run off in rack_output()
20884 rack->r_ctl.fsb.recwin = recwin; in rack_output()
20890 rack->r_fsb_inited && in rack_output()
20891 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20892 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
20894 (rack->r_must_retran == 0) && in rack_output()
20895 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
20898 ((orig_len - len) >= segsiz) && in rack_output()
20905 rack->r_fast_output = 0; in rack_output()
20910 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
20911 tp->snd_nxt = tp->snd_max; in rack_output()
20914 uint32_t seq = tp->gput_ack; in rack_output()
20916 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20919 * Mark the last sent that we just-returned (hinting in rack_output()
20922 rsm->r_just_ret = 1; in rack_output()
20925 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20926 rack->r_early = 0; in rack_output()
20927 rack->r_late = 0; in rack_output()
20928 rack->r_ctl.rc_agg_early = 0; in rack_output()
20930 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), in rack_output()
20931 minseg)) >= tp->snd_wnd) { in rack_output()
20934 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20935 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20937 /* We are limited by whats available -- app limited */ in rack_output()
20939 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20940 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20942 ((tp->t_flags & TF_NODELAY) == 0) && in rack_output()
20949 * don't send. Another app-limited case. in rack_output()
20952 } else if (tp->t_flags & TF_NOPUSH) { in rack_output()
20963 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20964 (rack->rack_no_prr == 0) && in rack_output()
20965 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { in rack_output()
21020 if ((tp->t_flags & TF_GPUTINPROG) && in rack_output()
21021 SEQ_GT(tp->gput_ack, tp->snd_max)) { in rack_output()
21022 tp->gput_ack = tp->snd_max; in rack_output()
21023 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_output()
21027 tp->t_flags &= ~TF_GPUTINPROG; in rack_output()
21028 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_output()
21029 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_output()
21030 tp->gput_seq, in rack_output()
21036 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
21037 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_output()
21038 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_output()
21039 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_output()
21046 if (rack->r_ctl.rc_end_appl) in rack_output()
21047 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_output()
21048 rack->r_ctl.rc_end_appl = rsm; in rack_output()
21050 rsm->r_flags |= RACK_APP_LIMITED; in rack_output()
21051 rack->r_ctl.rc_app_limited_cnt++; in rack_output()
21055 rack->r_ctl.rc_app_limited_cnt, seq, in rack_output()
21056 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); in rack_output()
21060 if ((tp->snd_max == tp->snd_una) && in rack_output()
21061 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21063 (sbavail(sb) > tp->snd_wnd) && in rack_output()
21064 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { in rack_output()
21065 /* Yes lets make sure to move to persist before timer-start */ in rack_output()
21066 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_output()
21073 rack->r_ctl.rc_scw) { in rack_output()
21074 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
21075 rack->rack_scwnd_is_idle = 1; in rack_output()
21081 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21082 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
21083 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_output()
21084 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); in rack_output()
21088 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21089 tp->tcp_cnt_counters[SND_LIMITED]++; in rack_output()
21090 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); in rack_output()
21098 if ((rack->r_ctl.crte != NULL) && in rack_output()
21100 ((rack->rc_hw_nobuf == 1) || in rack_output()
21110 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
21111 rack->r_ctl.rc_agg_early = 0; in rack_output()
21112 rack->r_early = 0; in rack_output()
21113 rack->r_late = 0; in rack_output()
21131 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21132 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21133 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21142 * the peer wait for the delayed-ack timer to run off in rack_output()
21155 (rack->pcm_in_progress == 0) && in rack_output()
21156 (rack->r_ctl.pcm_max_seg > 0) && in rack_output()
21157 (len >= rack->r_ctl.pcm_max_seg)) { in rack_output()
21160 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21162 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21168 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; in rack_output()
21170 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; in rack_output()
21192 * be snd_max-1 else its snd_max. in rack_output()
21196 rack_seq = tp->iss; in rack_output()
21198 (tp->t_flags & TF_SENTFIN)) in rack_output()
21199 rack_seq = tp->snd_max - 1; in rack_output()
21201 rack_seq = tp->snd_max; in rack_output()
21203 rack_seq = rsm->r_start; in rack_output()
21207 * established connection segments. Options for SYN-ACK segments in rack_output()
21211 if ((tp->t_flags & TF_NOOPT) == 0) { in rack_output()
21214 to.to_mss = tcp_mssopt(&inp->inp_inc); in rack_output()
21215 if (tp->t_port) in rack_output()
21216 to.to_mss -= V_tcp_udp_tunneling_overhead; in rack_output()
21226 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
21227 (tp->t_rxtshift == 0)) { in rack_output()
21228 if (tp->t_state == TCPS_SYN_RECEIVED) { in rack_output()
21231 (u_int8_t *)&tp->t_tfo_cookie.server; in rack_output()
21234 } else if (tp->t_state == TCPS_SYN_SENT) { in rack_output()
21236 tp->t_tfo_client_cookie_len; in rack_output()
21238 tp->t_tfo_cookie.client; in rack_output()
21253 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { in rack_output()
21254 to.to_wscale = tp->request_r_scale; in rack_output()
21258 if ((tp->t_flags & TF_RCVD_TSTMP) || in rack_output()
21259 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { in rack_output()
21262 if ((rack->r_rcvpath_rtt_up == 1) && in rack_output()
21263 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { in rack_output()
21271 * our ack-probe. in rack_output()
21277 to.to_tsval = ts_to_use + tp->ts_offset; in rack_output()
21278 to.to_tsecr = tp->ts_recent; in rack_output()
21281 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
21282 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && in rack_output()
21283 (tp->snd_una == tp->snd_max) && in rack_output()
21286 (rack->r_ctl.current_round != 0) && in rack_output()
21288 (rack->r_rcvpath_rtt_up == 0)) { in rack_output()
21289 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; in rack_output()
21290 rack->r_ctl.last_time_of_arm_rcv = cts; in rack_output()
21291 rack->r_rcvpath_rtt_up = 1; in rack_output()
21293 rack_seq--; in rack_output()
21297 if (tp->rfbuf_ts == 0 && in rack_output()
21298 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { in rack_output()
21299 tp->rfbuf_ts = ms_cts; in rack_output()
21302 if (tp->t_flags & TF_SACK_PERMIT) { in rack_output()
21305 else if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21306 tp->rcv_numsacks > 0) { in rack_output()
21308 to.to_nsacks = tp->rcv_numsacks; in rack_output()
21309 to.to_sacks = (u_char *)tp->sackblks; in rack_output()
21313 /* TCP-MD5 (RFC2385). */ in rack_output()
21314 if (tp->t_flags & TF_SIGNATURE) in rack_output()
21324 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && in rack_output()
21328 if (tp->t_port) { in rack_output()
21334 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21335 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
21336 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
21349 if (inp->inp_options) in rack_output()
21350 ipoptlen = inp->inp_options->m_len - in rack_output()
21363 if (len + optlen + ipoptlen > tp->t_maxseg) { in rack_output()
21370 if_hw_tsomax = tp->t_tsomax; in rack_output()
21371 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_output()
21372 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_output()
21382 max_len = (if_hw_tsomax - hdrlen - in rack_output()
21397 max_len = (tp->t_maxseg - optlen); in rack_output()
21402 len -= moff; in rack_output()
21419 if (tp->t_flags & TF_NEEDFIN) { in rack_output()
21424 if (optlen + ipoptlen >= tp->t_maxseg) { in rack_output()
21438 len = tp->t_maxseg - optlen - ipoptlen; in rack_output()
21470 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21471 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21480 * the peer wait for the delayed-ack timer to run off in rack_output()
21492 hw_tls = tp->t_nic_ktls_xmit != 0; in rack_output()
21521 m->m_data += max_linkhdr; in rack_output()
21522 m->m_len = hdrlen; in rack_output()
21531 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { in rack_output()
21541 m->m_len += len; in rack_output()
21556 m->m_next = tcp_m_copym( in rack_output()
21564 if (len <= (tp->t_maxseg - optlen)) { in rack_output()
21573 if (m->m_next == NULL) { in rack_output()
21582 if (rsm && (rsm->r_flags & RACK_TLP)) { in rack_output()
21590 tp->t_sndrexmitpack++; in rack_output()
21595 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_output()
21602 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_output()
21620 if (tp->t_flags & TF_ACKNOW) in rack_output()
21639 m->m_data += max_linkhdr; in rack_output()
21640 m->m_len = hdrlen; in rack_output()
21643 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_output()
21647 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21650 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21654 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21656 th = rack->r_ctl.fsb.th; in rack_output()
21657 udp = rack->r_ctl.fsb.udp; in rack_output()
21661 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21664 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21665 udp->uh_ulen = htons(ulen); in rack_output()
21671 if (tp->t_port) { in rack_output()
21673 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21674 udp->uh_dport = tp->t_port; in rack_output()
21675 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21676 udp->uh_ulen = htons(ulen); in rack_output()
21680 tcpip_fillheaders(inp, tp->t_port, ip6, th); in rack_output()
21686 if (tp->t_port) { in rack_output()
21688 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21689 udp->uh_dport = tp->t_port; in rack_output()
21690 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21691 udp->uh_ulen = htons(ulen); in rack_output()
21695 tcpip_fillheaders(inp, tp->t_port, ip, th); in rack_output()
21704 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { in rack_output()
21708 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_output()
21709 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_output()
21711 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
21712 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_output()
21713 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_output()
21716 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_output()
21717 ip6->ip6_flow |= htonl(ect << 20); in rack_output()
21723 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_output()
21724 ip->ip_tos |= ect; in rack_output()
21728 th->th_seq = htonl(rack_seq); in rack_output()
21729 th->th_ack = htonl(tp->rcv_nxt); in rack_output()
21739 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && in rack_output()
21743 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && in rack_output()
21744 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) in rack_output()
21745 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); in rack_output()
21754 th->th_win = htons((u_short) in rack_output()
21755 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); in rack_output()
21758 recwin = roundup2(recwin, 1 << tp->rcv_scale); in rack_output()
21759 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); in rack_output()
21762 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 in rack_output()
21769 if (th->th_win == 0) { in rack_output()
21770 tp->t_sndzerowin++; in rack_output()
21771 tp->t_flags |= TF_RXWIN0SENT; in rack_output()
21773 tp->t_flags &= ~TF_RXWIN0SENT; in rack_output()
21774 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_output()
21776 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21780 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_output()
21800 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21803 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21807 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_output()
21813 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_output()
21823 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_output()
21838 if (tp->t_port) { in rack_output()
21839 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_output()
21840 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21841 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_output()
21842 th->th_sum = htons(0); in rack_output()
21845 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_output()
21846 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21847 th->th_sum = in6_cksum_pseudo(ip6, in rack_output()
21858 if (tp->t_port) { in rack_output()
21859 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_output()
21860 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21861 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21862 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_output()
21863 th->th_sum = htons(0); in rack_output()
21866 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_output()
21867 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21868 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21869 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_output()
21873 KASSERT(ip->ip_v == IPVERSION, in rack_output()
21874 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_output()
21887 KASSERT(len > tp->t_maxseg - optlen, in rack_output()
21889 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_output()
21890 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; in rack_output()
21900 if ((rack->r_ctl.crte != NULL) && in rack_output()
21901 (rack->rc_hw_nobuf == 0) && in rack_output()
21906 if (tcp_bblogging_on(rack->rc_tp)) { in rack_output()
21910 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_output()
21911 if (rack->rack_no_prr) in rack_output()
21914 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_output()
21915 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_output()
21916 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_output()
21919 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_output()
21920 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_output()
21922 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_output()
21925 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_output()
21926 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_output()
21928 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_output()
21942 log.u_bbr.pkts_out = tp->t_maxseg; in rack_output()
21944 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
21945 if (rsm && (rsm->r_rtr_cnt > 0)) { in rack_output()
21950 log.u_bbr.flex5 = rsm->r_fas; in rack_output()
21951 log.u_bbr.bbr_substate = rsm->r_bas; in rack_output()
21959 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_output()
21966 log.u_bbr.delRate = rsm->r_flags; in rack_output()
21968 log.u_bbr.delRate |= rack->r_must_retran; in rack_output()
21972 log.u_bbr.delRate = rack->r_must_retran; in rack_output()
21976 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, in rack_output()
21987 * m->m_pkthdr.len should have been set before cksum calcuration, in rack_output()
21998 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); in rack_output()
22005 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_output()
22007 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_output()
22008 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22010 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22012 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22018 inp->in6p_outputopts, in rack_output()
22019 &inp->inp_route6, in rack_output()
22023 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) in rack_output()
22024 mtu = inp->inp_route6.ro_nh->nh_mtu; in rack_output()
22032 ip->ip_len = htons(m->m_pkthdr.len); in rack_output()
22034 if (inp->inp_vflag & INP_IPV6PROTO) in rack_output()
22035 ip->ip_ttl = in6_selecthlim(inp, NULL); in rack_output()
22037 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; in rack_output()
22048 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_output()
22049 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
22050 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_output()
22051 ip->ip_off |= htons(IP_DF); in rack_output()
22054 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
22057 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
22064 inp->inp_options, in rack_output()
22068 &inp->inp_route, in rack_output()
22071 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) in rack_output()
22072 mtu = inp->inp_route.ro_nh->nh_mtu; in rack_output()
22076 lgb->tlb_errno = error; in rack_output()
22094 rack->pcm_in_progress = 1; in rack_output()
22095 rack->pcm_needed = 0; in rack_output()
22096 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
22099 if (rack->lt_bw_up == 0) { in rack_output()
22100 rack->r_ctl.lt_timemark = tcp_tv_to_lusectick(&tv); in rack_output()
22101 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22102 rack->lt_bw_up = 1; in rack_output()
22103 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { in rack_output()
22110 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_output()
22111 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
22113 if (tmark > rack->r_ctl.lt_timemark) { in rack_output()
22114 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_output()
22115 rack->r_ctl.lt_timemark = tmark; in rack_output()
22119 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_output()
22123 rack->rc_last_sent_tlp_past_cumack = 0; in rack_output()
22124 rack->rc_last_sent_tlp_seq_valid = 1; in rack_output()
22125 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_output()
22126 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_output()
22128 if (rack->rc_hw_nobuf) { in rack_output()
22129 rack->rc_hw_nobuf = 0; in rack_output()
22130 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22131 rack->r_early = 0; in rack_output()
22132 rack->r_late = 0; in rack_output()
22133 rack->r_ctl.rc_agg_early = 0; in rack_output()
22137 rack->rc_gp_saw_rec = 1; in rack_output()
22139 if (cwnd_to_use > tp->snd_ssthresh) { in rack_output()
22141 rack->rc_gp_saw_ca = 1; in rack_output()
22144 rack->rc_gp_saw_ss = 1; in rack_output()
22147 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22148 (tp->t_flags & TF_SACK_PERMIT) && in rack_output()
22149 tp->rcv_numsacks > 0) in rack_output()
22159 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_output()
22164 if ((rack->rack_no_prr == 0) && in rack_output()
22167 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_output()
22168 rack->r_ctl.rc_prr_sndcnt -= len; in rack_output()
22170 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
22176 rsm->r_flags |= RACK_TLP; in rack_output()
22179 rsm->r_flags &= ~RACK_TLP; in rack_output()
22183 (tp->snd_una == tp->snd_max)) in rack_output()
22184 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_output()
22191 tcp_seq startseq = tp->snd_max; in rack_output()
22195 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; in rack_output()
22206 rack->rc_tlp_in_progress = 0; in rack_output()
22207 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_output()
22215 rack->rc_tlp_in_progress = 1; in rack_output()
22216 rack->r_ctl.rc_tlp_cnt_out++; in rack_output()
22224 if ((tp->snd_una == tp->snd_max) && (len > 0)) { in rack_output()
22230 tp->t_acktime = ticks; in rack_output()
22237 ((tp->t_flags & TF_SENTSYN) == 0)) { in rack_output()
22238 tp->snd_max++; in rack_output()
22239 tp->t_flags |= TF_SENTSYN; in rack_output()
22242 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_output()
22243 tp->snd_max++; in rack_output()
22244 tp->t_flags |= TF_SENTFIN; in rack_output()
22247 tp->snd_max += len; in rack_output()
22248 if (rack->rc_new_rnd_needed) { in rack_output()
22249 rack_new_round_starts(tp, rack, tp->snd_max); in rack_output()
22257 if (tp->t_rtttime == 0) { in rack_output()
22258 tp->t_rtttime = ticks; in rack_output()
22259 tp->t_rtseq = startseq; in rack_output()
22263 ((tp->t_flags & TF_GPUTINPROG) == 0)) in rack_output()
22274 if (rack->r_fast_output && len) { in rack_output()
22275 if (rack->r_ctl.fsb.left_to_send > len) in rack_output()
22276 rack->r_ctl.fsb.left_to_send -= len; in rack_output()
22278 rack->r_ctl.fsb.left_to_send = 0; in rack_output()
22279 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_output()
22280 rack->r_fast_output = 0; in rack_output()
22281 if (rack->r_fast_output) { in rack_output()
22282 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_output()
22283 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_output()
22284 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_output()
22291 ((pace_max_seg - len) > segsiz)) { in rack_output()
22299 n_len = (orig_len - len); in rack_output()
22300 orig_len -= len; in rack_output()
22301 pace_max_seg -= len; in rack_output()
22303 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22304 /* Re-lock for the next spin */ in rack_output()
22311 ((orig_len - len) > segsiz)) { in rack_output()
22319 n_len = (orig_len - len); in rack_output()
22320 orig_len -= len; in rack_output()
22322 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22323 /* Re-lock for the next spin */ in rack_output()
22331 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22332 rack->r_early = 0; in rack_output()
22333 rack->r_late = 0; in rack_output()
22334 rack->r_ctl.rc_agg_early = 0; in rack_output()
22349 tp->t_softerror = error; in rack_output()
22352 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22353 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22354 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22364 if (rack->r_ctl.crte != NULL) { in rack_output()
22365 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_output()
22366 if (tcp_bblogging_on(rack->rc_tp)) in rack_output()
22369 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_output()
22370 slot = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_output()
22371 if (rack->rc_enobuf < 0x7f) in rack_output()
22372 rack->rc_enobuf++; in rack_output()
22375 if (rack->r_ctl.crte != NULL) { in rack_output()
22377 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_output()
22391 tp->t_flags &= ~TF_TSO; in rack_output()
22395 saved_mtu = tp->t_maxseg; in rack_output()
22396 tcp_mss_update(tp, -1, mtu, NULL, NULL); in rack_output()
22397 if (saved_mtu > tp->t_maxseg) { in rack_output()
22405 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22406 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22407 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22418 if (TCPS_HAVERCVDSYN(tp->t_state)) { in rack_output()
22419 tp->t_softerror = error; in rack_output()
22428 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22429 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22430 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22437 rack->rc_enobuf = 0; in rack_output()
22438 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_output()
22439 rack->r_ctl.retran_during_recovery += len; in rack_output()
22448 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) in rack_output()
22449 tp->rcv_adv = tp->rcv_nxt + recwin; in rack_output()
22451 tp->last_ack_sent = tp->rcv_nxt; in rack_output()
22452 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_output()
22482 rack->r_ent_rec_ns = 0; in rack_output()
22483 if (rack->r_must_retran) { in rack_output()
22485 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_output()
22486 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22490 rack->r_must_retran = 0; in rack_output()
22491 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22493 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22498 rack->r_must_retran = 0; in rack_output()
22499 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22502 rack->r_ctl.fsb.recwin = recwin; in rack_output()
22503 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && in rack_output()
22504 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22509 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); in rack_output()
22519 rack->r_fsb_inited && in rack_output()
22520 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22521 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22522 (rack->r_must_retran == 0) && in rack_output()
22523 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22526 ((orig_len - len) >= segsiz) && in rack_output()
22533 rack->r_fast_output = 0; in rack_output()
22547 (rack->r_must_retran == 0) && in rack_output()
22548 rack->r_fsb_inited && in rack_output()
22549 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22550 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22551 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22554 ((orig_len - len) >= segsiz) && in rack_output()
22560 if (rack->r_fast_output) { in rack_output()
22574 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
22575 tp->snd_nxt = tp->snd_max; in rack_output()
22578 crtsc = get_cyclecount() - ts_val; in rack_output()
22580 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22581 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
22582 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; in rack_output()
22583 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); in rack_output()
22586 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22587 tp->tcp_cnt_counters[SND_OUT_ACK]++; in rack_output()
22588 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; in rack_output()
22603 orig_val = rack->r_ctl.rc_pace_max_segs; in rack_update_seg()
22604 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_update_seg()
22605 if (orig_val != rack->r_ctl.rc_pace_max_segs) in rack_update_seg()
22618 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_mtu_change()
22619 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { in rack_mtu_change()
22628 rack->r_fast_output = 0; in rack_mtu_change()
22629 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, in rack_mtu_change()
22630 rack->r_ctl.rc_sacked); in rack_mtu_change()
22631 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_mtu_change()
22632 rack->r_must_retran = 1; in rack_mtu_change()
22634 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mtu_change()
22635 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); in rack_mtu_change()
22638 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_mtu_change()
22640 tp->snd_nxt = tp->snd_max; in rack_mtu_change()
22646 if (rack->dgp_on == 1) in rack_set_dgp()
22648 if ((rack->use_fixed_rate == 1) && in rack_set_dgp()
22649 (rack->rc_always_pace == 1)) { in rack_set_dgp()
22656 if (rack->rc_always_pace == 1) { in rack_set_dgp()
22661 rack->r_ctl.pacing_method |= RACK_DGP_PACING; in rack_set_dgp()
22662 rack->rc_fillcw_apply_discount = 0; in rack_set_dgp()
22663 rack->dgp_on = 1; in rack_set_dgp()
22664 rack->rc_always_pace = 1; in rack_set_dgp()
22665 rack->rc_pace_dnd = 1; in rack_set_dgp()
22666 rack->use_fixed_rate = 0; in rack_set_dgp()
22667 if (rack->gp_ready) in rack_set_dgp()
22669 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_dgp()
22670 rack->rack_attempt_hdwr_pace = 0; in rack_set_dgp()
22672 rack->full_size_rxt = 1; in rack_set_dgp()
22673 rack->shape_rxt_to_pacing_min = 0; in rack_set_dgp()
22675 rack->r_use_cmp_ack = 1; in rack_set_dgp()
22676 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && in rack_set_dgp()
22677 rack->r_use_cmp_ack) in rack_set_dgp()
22678 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_dgp()
22680 rack->rack_enable_scwnd = 1; in rack_set_dgp()
22682 rack->rc_gp_dyn_mul = 1; in rack_set_dgp()
22684 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_set_dgp()
22686 rack->r_rr_config = 3; in rack_set_dgp()
22688 rack->r_ctl.rc_no_push_at_mrtt = 2; in rack_set_dgp()
22690 rack->rc_pace_to_cwnd = 1; in rack_set_dgp()
22691 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_dgp()
22692 rack->rtt_limit_mul = 0; in rack_set_dgp()
22694 rack->rack_no_prr = 1; in rack_set_dgp()
22696 rack->r_limit_scw = 1; in rack_set_dgp()
22698 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_set_dgp()
22720 * fill-cw the same settings that profile5 does in rack_set_profile()
22721 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). in rack_set_profile()
22723 rack->rc_fillcw_apply_discount = 1; in rack_set_profile()
22726 if (rack->rc_always_pace == 1) { in rack_set_profile()
22730 rack->dgp_on = 0; in rack_set_profile()
22731 rack->rc_hybrid_mode = 0; in rack_set_profile()
22732 rack->use_fixed_rate = 0; in rack_set_profile()
22736 rack->rc_pace_to_cwnd = 1; in rack_set_profile()
22738 rack->rc_pace_to_cwnd = 0; in rack_set_profile()
22741 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_set_profile()
22742 rack->rc_always_pace = 1; in rack_set_profile()
22743 if (rack->rack_hibeta) in rack_set_profile()
22746 rack->rc_always_pace = 0; in rack_set_profile()
22749 rack->rc_rack_tmr_std_based = 1; in rack_set_profile()
22753 rack->rc_rack_use_dsack = 1; in rack_set_profile()
22756 rack->r_use_cmp_ack = 1; in rack_set_profile()
22758 rack->r_use_cmp_ack = 0; in rack_set_profile()
22760 rack->rack_no_prr = 1; in rack_set_profile()
22762 rack->rack_no_prr = 0; in rack_set_profile()
22764 rack->rc_gp_no_rec_chg = 1; in rack_set_profile()
22766 rack->rc_gp_no_rec_chg = 0; in rack_set_profile()
22767 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { in rack_set_profile()
22768 rack->r_mbuf_queue = 1; in rack_set_profile()
22769 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) in rack_set_profile()
22770 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_profile()
22771 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22773 rack->r_mbuf_queue = 0; in rack_set_profile()
22774 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22777 rack->rack_enable_scwnd = 1; in rack_set_profile()
22779 rack->rack_enable_scwnd = 0; in rack_set_profile()
22782 rack->rc_gp_dyn_mul = 1; in rack_set_profile()
22784 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_set_profile()
22786 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_set_profile()
22787 rack->rc_gp_dyn_mul = 0; in rack_set_profile()
22789 rack->r_rr_config = 0; in rack_set_profile()
22790 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_set_profile()
22791 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_profile()
22792 rack->rtt_limit_mul = 0; in rack_set_profile()
22795 rack->rack_hdw_pace_ena = 1; in rack_set_profile()
22797 rack->rack_hdw_pace_ena = 0; in rack_set_profile()
22799 rack->rack_no_prr = 1; in rack_set_profile()
22801 rack->rack_no_prr = 0; in rack_set_profile()
22803 rack->r_limit_scw = 1; in rack_set_profile()
22805 rack->r_limit_scw = 0; in rack_set_profile()
22821 * No space yikes -- fail out.. in rack_add_deferred_option()
22825 dol->optname = sopt_name; in rack_add_deferred_option()
22826 dol->optval = loptval; in rack_add_deferred_option()
22827 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); in rack_add_deferred_option()
22843 rack->use_fixed_rate = 0; in process_hybrid_pacing()
22844 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; in process_hybrid_pacing()
22845 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; in process_hybrid_pacing()
22846 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; in process_hybrid_pacing()
22848 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusectick(&tv), 0); in process_hybrid_pacing()
22850 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22852 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; in process_hybrid_pacing()
22857 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; in process_hybrid_pacing()
22859 seq = sft->start_seq; in process_hybrid_pacing()
22860 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { in process_hybrid_pacing()
22862 if (rack->rc_hybrid_mode) { in process_hybrid_pacing()
22864 rack->rc_tp->tcp_hybrid_stop++; in process_hybrid_pacing()
22869 if (rack->dgp_on == 0) { in process_hybrid_pacing()
22877 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22886 if (rack->rc_hybrid_mode == 0) { in process_hybrid_pacing()
22889 rack->r_ctl.pacing_method |= RACK_REG_PACING; in process_hybrid_pacing()
22890 rack->rc_hybrid_mode = 1; in process_hybrid_pacing()
22894 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { in process_hybrid_pacing()
22899 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in process_hybrid_pacing()
22903 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; in process_hybrid_pacing()
22904 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) in process_hybrid_pacing()
22905 sft->cspr = hybrid->cspr; in process_hybrid_pacing()
22907 sft->cspr = 0; in process_hybrid_pacing()
22908 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) in process_hybrid_pacing()
22909 sft->hint_maxseg = hybrid->hint_maxseg; in process_hybrid_pacing()
22911 sft->hint_maxseg = 0; in process_hybrid_pacing()
22912 rack->rc_tp->tcp_hybrid_start++; in process_hybrid_pacing()
22924 si->bytes_transmitted = tp->t_sndbytes; in rack_stack_information()
22925 si->bytes_retransmitted = tp->t_snd_rxt_bytes; in rack_stack_information()
22956 rack->rc_rack_tmr_std_based = 1; in rack_process_option()
22958 rack->rc_rack_tmr_std_based = 0; in rack_process_option()
22961 rack->rc_rack_use_dsack = 1; in rack_process_option()
22963 rack->rc_rack_use_dsack = 0; in rack_process_option()
22970 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_process_option()
22973 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; in rack_process_option()
22975 rack->r_ctl.pace_len_divisor = optval; in rack_process_option()
22981 rack->rack_hibeta = 1; in rack_process_option()
22987 rack->r_ctl.saved_hibeta = optval; in rack_process_option()
22988 if (rack->rc_pacing_cc_set) in rack_process_option()
22990 rack->r_ctl.rc_saved_beta = optval; in rack_process_option()
22992 if (rack->rc_pacing_cc_set == 0) in rack_process_option()
22995 rack->rack_hibeta = 0; in rack_process_option()
22996 if (rack->rc_pacing_cc_set) in rack_process_option()
23005 rack->r_ctl.timer_slop = optval; in rack_process_option()
23006 if (rack->rc_tp->t_srtt) { in rack_process_option()
23011 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_option()
23013 rack->r_ctl.timer_slop); in rack_process_option()
23018 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_process_option()
23023 if (rack->rc_pacing_cc_set) { in rack_process_option()
23032 if (CC_ALGO(tp)->ctl_output != NULL) in rack_process_option()
23033 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_process_option()
23041 rack->r_ctl.rc_saved_beta_ecn = optval; in rack_process_option()
23047 if (rack->gp_ready) { in rack_process_option()
23052 rack->defer_options = 1; in rack_process_option()
23054 rack->defer_options = 0; in rack_process_option()
23059 rack->r_ctl.req_measurements = optval; in rack_process_option()
23066 rack->r_use_labc_for_rec = 1; in rack_process_option()
23068 rack->r_use_labc_for_rec = 0; in rack_process_option()
23073 rack->rc_labc = optval; in rack_process_option()
23080 rack->r_up_only = 1; in rack_process_option()
23082 rack->r_up_only = 0; in rack_process_option()
23086 rack->r_ctl.fillcw_cap = loptval; in rack_process_option()
23090 if ((rack->dgp_on == 1) && in rack_process_option()
23091 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23103 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23105 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23107 rack->r_ctl.bw_rate_cap = loptval; in rack_process_option()
23114 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { in rack_process_option()
23122 rack->r_ctl.side_chan_dis_mask = optval; in rack_process_option()
23124 rack->r_ctl.side_chan_dis_mask = 0; in rack_process_option()
23132 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { in rack_process_option()
23135 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { in rack_process_option()
23136 rack->r_use_cmp_ack = 1; in rack_process_option()
23137 rack->r_mbuf_queue = 1; in rack_process_option()
23138 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23140 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_process_option()
23141 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_process_option()
23146 rack->r_limit_scw = 1; in rack_process_option()
23148 rack->r_limit_scw = 0; in rack_process_option()
23156 rack->rc_pace_to_cwnd = 0; in rack_process_option()
23158 rack->rc_pace_to_cwnd = 1; in rack_process_option()
23163 rack->rc_pace_fill_if_rttin_range = 1; in rack_process_option()
23164 rack->rtt_limit_mul = optval; in rack_process_option()
23166 rack->rc_pace_fill_if_rttin_range = 0; in rack_process_option()
23167 rack->rtt_limit_mul = 0; in rack_process_option()
23173 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_process_option()
23175 rack->r_ctl.rc_no_push_at_mrtt = optval; in rack_process_option()
23182 rack->rack_enable_scwnd = 0; in rack_process_option()
23184 rack->rack_enable_scwnd = 1; in rack_process_option()
23187 /* Now do we use the LRO mbuf-queue feature */ in rack_process_option()
23189 if (optval || rack->r_use_cmp_ack) in rack_process_option()
23190 rack->r_mbuf_queue = 1; in rack_process_option()
23192 rack->r_mbuf_queue = 0; in rack_process_option()
23193 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23194 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23196 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23201 rack->rack_rec_nonrxt_use_cr = 0; in rack_process_option()
23203 rack->rack_rec_nonrxt_use_cr = 1; in rack_process_option()
23208 rack->rack_no_prr = 0; in rack_process_option()
23210 rack->rack_no_prr = 1; in rack_process_option()
23212 rack->no_prr_addback = 1; in rack_process_option()
23218 rack->cspr_is_fcc = 1; in rack_process_option()
23220 rack->cspr_is_fcc = 0; in rack_process_option()
23225 rack->rc_gp_dyn_mul = 0; in rack_process_option()
23227 rack->rc_gp_dyn_mul = 1; in rack_process_option()
23233 rack->r_ctl.rack_per_of_gp_ca = optval; in rack_process_option()
23246 rack->rack_tlp_threshold_use = optval; in rack_process_option()
23251 rack->r_ctl.rc_tlp_cwnd_reduce = optval; in rack_process_option()
23260 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23265 if (rack->rc_always_pace) { in rack_process_option()
23269 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23270 rack->rc_always_pace = 1; in rack_process_option()
23271 if (rack->rack_hibeta) in rack_process_option()
23279 if (rack->rc_always_pace == 1) { in rack_process_option()
23283 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23284 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23286 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23296 rack->r_ctl.init_rate = val; in rack_process_option()
23297 if (rack->rc_always_pace) in rack_process_option()
23306 rack->rc_force_max_seg = 1; in rack_process_option()
23308 rack->rc_force_max_seg = 0; in rack_process_option()
23312 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); in rack_process_option()
23318 if ((rack->dgp_on == 1) && in rack_process_option()
23319 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23321 * If we set a max-seg and are doing DGP then in rack_process_option()
23332 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23334 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23337 rack->rc_user_set_max_segs = optval; in rack_process_option()
23339 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; in rack_process_option()
23345 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23349 if (rack->dgp_on) { in rack_process_option()
23357 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23358 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23359 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23360 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23361 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23362 rack->use_fixed_rate = 1; in rack_process_option()
23363 if (rack->rack_hibeta) in rack_process_option()
23366 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23367 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23368 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23375 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23379 if (rack->dgp_on) { in rack_process_option()
23387 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23388 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23389 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23390 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23391 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23392 rack->use_fixed_rate = 1; in rack_process_option()
23393 if (rack->rack_hibeta) in rack_process_option()
23396 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23397 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23398 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23405 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23409 if (rack->dgp_on) { in rack_process_option()
23417 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23418 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23419 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23420 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23421 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23422 rack->use_fixed_rate = 1; in rack_process_option()
23423 if (rack->rack_hibeta) in rack_process_option()
23426 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23427 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23428 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23433 rack->r_ctl.rack_per_of_gp_rec = optval; in rack_process_option()
23435 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23436 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23437 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23451 rack->r_ctl.rack_per_of_gp_ca = ca; in rack_process_option()
23453 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23454 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23455 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23469 rack->r_ctl.rack_per_of_gp_ss = ss; in rack_process_option()
23471 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23472 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23473 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23479 rack->r_rr_config = optval; in rack_process_option()
23481 rack->r_rr_config = 0; in rack_process_option()
23485 rack->rc_pace_dnd = 1; in rack_process_option()
23487 rack->rc_pace_dnd = 0; in rack_process_option()
23492 if (rack->r_rack_hw_rate_caps == 0) in rack_process_option()
23493 rack->r_rack_hw_rate_caps = 1; in rack_process_option()
23497 rack->r_rack_hw_rate_caps = 0; in rack_process_option()
23504 rack->r_ctl.rack_per_upper_bound_ca = val; in rack_process_option()
23506 rack->r_ctl.rack_per_upper_bound_ss = val; in rack_process_option()
23511 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; in rack_process_option()
23513 rack->r_ctl.gate_to_fs = 1; in rack_process_option()
23515 rack->r_ctl.gate_to_fs = 0; in rack_process_option()
23518 rack->r_ctl.use_gp_not_last = 1; in rack_process_option()
23520 rack->r_ctl.use_gp_not_last = 0; in rack_process_option()
23527 rack->r_ctl.gp_gain_req = v; in rack_process_option()
23531 rack->rc_initial_ss_comp = 1; in rack_process_option()
23532 rack->r_ctl.gp_rnd_thresh = 0; in rack_process_option()
23537 rack->r_ctl.rc_split_limit = optval; in rack_process_option()
23542 if (rack->rack_hdrw_pacing == 0) { in rack_process_option()
23543 rack->rack_hdw_pace_ena = 1; in rack_process_option()
23544 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23548 rack->rack_hdw_pace_ena = 0; in rack_process_option()
23550 if (rack->r_ctl.crte != NULL) { in rack_process_option()
23551 rack->rack_hdrw_pacing = 0; in rack_process_option()
23552 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23553 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_process_option()
23554 rack->r_ctl.crte = NULL; in rack_process_option()
23563 rack->r_ctl.rc_prr_sendalot = optval; in rack_process_option()
23566 /* Minimum time between rack t-o's in ms */ in rack_process_option()
23568 rack->r_ctl.rc_min_to = optval; in rack_process_option()
23573 rack->r_ctl.rc_early_recovery_segs = optval; in rack_process_option()
23578 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_process_option()
23580 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_process_option()
23582 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_process_option()
23584 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_process_option()
23592 rack->r_ctl.rc_reorder_shift = optval; in rack_process_option()
23599 rack->r_ctl.rc_reorder_fade = optval; in rack_process_option()
23605 rack->r_ctl.rc_tlp_threshold = optval; in rack_process_option()
23612 rack->use_rack_rr = 1; in rack_process_option()
23614 rack->use_rack_rr = 0; in rack_process_option()
23617 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_process_option()
23619 rack->r_ctl.rc_pkt_delay = optval; in rack_process_option()
23624 tp->t_delayed_ack = 0; in rack_process_option()
23626 tp->t_delayed_ack = 1; in rack_process_option()
23627 if (tp->t_flags & TF_DELACK) { in rack_process_option()
23628 tp->t_flags &= ~TF_DELACK; in rack_process_option()
23629 tp->t_flags |= TF_ACKNOW; in rack_process_option()
23643 rack->r_ctl.rc_rate_sample_method = optval; in rack_process_option()
23648 rack->r_use_hpts_min = 1; in rack_process_option()
23650 * Must be between 2 - 80% to be a reduction else in rack_process_option()
23654 rack->r_ctl.max_reduction = optval; in rack_process_option()
23657 rack->r_use_hpts_min = 0; in rack_process_option()
23662 rack->rc_gp_no_rec_chg = 1; in rack_process_option()
23664 rack->rc_gp_no_rec_chg = 0; in rack_process_option()
23669 rack->rc_skip_timely = 1; in rack_process_option()
23670 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_process_option()
23671 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_process_option()
23672 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_process_option()
23674 rack->rc_skip_timely = 0; in rack_process_option()
23679 rack->use_lesser_lt_bw = 0; in rack_process_option()
23680 rack->dis_lt_bw = 1; in rack_process_option()
23682 rack->use_lesser_lt_bw = 1; in rack_process_option()
23683 rack->dis_lt_bw = 0; in rack_process_option()
23685 rack->use_lesser_lt_bw = 0; in rack_process_option()
23686 rack->dis_lt_bw = 0; in rack_process_option()
23692 rack->rc_allow_data_af_clo = 1; in rack_process_option()
23694 rack->rc_allow_data_af_clo = 0; in rack_process_option()
23709 * apply a read-lock to the parent (we are already in rack_inherit()
23720 if (par->t_fb != tp->t_fb) { in rack_inherit()
23726 dest = (struct tcp_rack *)tp->t_fb_ptr; in rack_inherit()
23727 src = (struct tcp_rack *)par->t_fb_ptr; in rack_inherit()
23733 /* Now copy out anything we wish to inherit i.e. things in socket-options */ in rack_inherit()
23735 if ((src->dgp_on) && (dest->dgp_on == 0)) { in rack_inherit()
23741 if (dest->full_size_rxt != src->full_size_rxt) { in rack_inherit()
23742 dest->full_size_rxt = src->full_size_rxt; in rack_inherit()
23745 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { in rack_inherit()
23746 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; in rack_inherit()
23750 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { in rack_inherit()
23751 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; in rack_inherit()
23754 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { in rack_inherit()
23755 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; in rack_inherit()
23759 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { in rack_inherit()
23760 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; in rack_inherit()
23764 if (src->rack_hibeta != dest->rack_hibeta) { in rack_inherit()
23766 if (src->rack_hibeta) { in rack_inherit()
23767 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; in rack_inherit()
23768 dest->rack_hibeta = 1; in rack_inherit()
23770 dest->rack_hibeta = 0; in rack_inherit()
23774 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { in rack_inherit()
23775 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; in rack_inherit()
23779 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { in rack_inherit()
23780 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; in rack_inherit()
23785 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { in rack_inherit()
23786 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; in rack_inherit()
23790 if (dest->r_up_only != src->r_up_only) { in rack_inherit()
23791 dest->r_up_only = src->r_up_only; in rack_inherit()
23795 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { in rack_inherit()
23796 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; in rack_inherit()
23800 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { in rack_inherit()
23801 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; in rack_inherit()
23806 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { in rack_inherit()
23807 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; in rack_inherit()
23811 if (dest->r_limit_scw != src->r_limit_scw) { in rack_inherit()
23812 dest->r_limit_scw = src->r_limit_scw; in rack_inherit()
23816 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { in rack_inherit()
23817 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; in rack_inherit()
23820 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { in rack_inherit()
23821 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; in rack_inherit()
23824 if (dest->rtt_limit_mul != src->rtt_limit_mul) { in rack_inherit()
23825 dest->rtt_limit_mul = src->rtt_limit_mul; in rack_inherit()
23829 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { in rack_inherit()
23830 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; in rack_inherit()
23834 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { in rack_inherit()
23835 dest->rack_enable_scwnd = src->rack_enable_scwnd; in rack_inherit()
23839 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { in rack_inherit()
23840 dest->r_use_cmp_ack = src->r_use_cmp_ack; in rack_inherit()
23844 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23845 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23849 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23850 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23853 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { in rack_inherit()
23854 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_inherit()
23856 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_inherit()
23858 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_inherit()
23859 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_inherit()
23862 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { in rack_inherit()
23863 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; in rack_inherit()
23867 if (dest->rack_no_prr != src->rack_no_prr) { in rack_inherit()
23868 dest->rack_no_prr = src->rack_no_prr; in rack_inherit()
23871 if (dest->no_prr_addback != src->no_prr_addback) { in rack_inherit()
23872 dest->no_prr_addback = src->no_prr_addback; in rack_inherit()
23876 if (dest->cspr_is_fcc != src->cspr_is_fcc) { in rack_inherit()
23877 dest->cspr_is_fcc = src->cspr_is_fcc; in rack_inherit()
23881 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { in rack_inherit()
23882 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; in rack_inherit()
23885 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23886 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23890 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { in rack_inherit()
23891 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; in rack_inherit()
23896 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { in rack_inherit()
23897 dest->r_ctl.init_rate = src->r_ctl.init_rate; in rack_inherit()
23901 if (dest->rc_force_max_seg != src->rc_force_max_seg) { in rack_inherit()
23902 dest->rc_force_max_seg = src->rc_force_max_seg; in rack_inherit()
23906 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { in rack_inherit()
23907 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; in rack_inherit()
23912 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { in rack_inherit()
23913 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; in rack_inherit()
23916 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { in rack_inherit()
23917 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; in rack_inherit()
23920 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { in rack_inherit()
23921 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; in rack_inherit()
23925 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { in rack_inherit()
23926 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; in rack_inherit()
23929 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23930 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23934 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { in rack_inherit()
23935 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; in rack_inherit()
23939 if (dest->r_rr_config != src->r_rr_config) { in rack_inherit()
23940 dest->r_rr_config = src->r_rr_config; in rack_inherit()
23944 if (dest->rc_pace_dnd != src->rc_pace_dnd) { in rack_inherit()
23945 dest->rc_pace_dnd = src->rc_pace_dnd; in rack_inherit()
23949 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { in rack_inherit()
23950 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; in rack_inherit()
23954 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { in rack_inherit()
23955 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; in rack_inherit()
23958 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { in rack_inherit()
23959 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; in rack_inherit()
23963 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { in rack_inherit()
23964 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; in rack_inherit()
23967 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { in rack_inherit()
23968 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; in rack_inherit()
23971 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { in rack_inherit()
23972 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; in rack_inherit()
23975 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { in rack_inherit()
23976 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; in rack_inherit()
23980 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { in rack_inherit()
23981 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; in rack_inherit()
23984 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { in rack_inherit()
23985 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; in rack_inherit()
23989 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { in rack_inherit()
23990 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; in rack_inherit()
23994 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { in rack_inherit()
23995 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; in rack_inherit()
23999 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { in rack_inherit()
24000 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; in rack_inherit()
24004 if (par->t_ccv.flags != tp->t_ccv.flags) { in rack_inherit()
24006 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_inherit()
24007 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_inherit()
24009 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_inherit()
24011 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_inherit()
24013 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_inherit()
24017 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { in rack_inherit()
24018 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; in rack_inherit()
24022 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { in rack_inherit()
24023 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; in rack_inherit()
24027 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { in rack_inherit()
24028 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; in rack_inherit()
24032 if (dest->use_rack_rr != src->use_rack_rr) { in rack_inherit()
24033 dest->use_rack_rr = src->use_rack_rr; in rack_inherit()
24037 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { in rack_inherit()
24038 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; in rack_inherit()
24043 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { in rack_inherit()
24044 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; in rack_inherit()
24048 if (dest->r_use_hpts_min != src->r_use_hpts_min) { in rack_inherit()
24049 dest->r_use_hpts_min = src->r_use_hpts_min; in rack_inherit()
24052 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { in rack_inherit()
24053 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; in rack_inherit()
24057 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { in rack_inherit()
24058 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; in rack_inherit()
24061 if (dest->rc_skip_timely != src->rc_skip_timely) { in rack_inherit()
24062 dest->rc_skip_timely = src->rc_skip_timely; in rack_inherit()
24066 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { in rack_inherit()
24067 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; in rack_inherit()
24071 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { in rack_inherit()
24072 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; in rack_inherit()
24075 if (dest->dis_lt_bw != src->dis_lt_bw) { in rack_inherit()
24076 dest->dis_lt_bw = src->dis_lt_bw; in rack_inherit()
24089 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { in rack_apply_deferred_options()
24090 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_apply_deferred_options()
24092 s_optval = (uint32_t)dol->optval; in rack_apply_deferred_options()
24093 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); in rack_apply_deferred_options()
24104 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_hw_tls_change()
24106 rack->r_ctl.fsb.hw_tls = 1; in rack_hw_tls_change()
24108 rack->r_ctl.fsb.hw_tls = 0; in rack_hw_tls_change()
24126 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_wake_check()
24127 if (rack->r_ctl.rc_hpts_flags) { in rack_wake_check()
24129 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ in rack_wake_check()
24133 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) in rack_wake_check()
24135 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { in rack_wake_check()
24139 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) in rack_wake_check()
24173 * socket option arguments. When it re-acquires the lock after the copy, it
24189 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_set_sockopt()
24195 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_set_sockopt()
24198 switch (sopt->sopt_level) { in rack_set_sockopt()
24201 MPASS(inp->inp_vflag & INP_IPV6PROTO); in rack_set_sockopt()
24202 switch (sopt->sopt_name) { in rack_set_sockopt()
24212 switch (sopt->sopt_name) { in rack_set_sockopt()
24217 ip->ip_tos = rack->rc_inp->inp_ip_tos; in rack_set_sockopt()
24223 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; in rack_set_sockopt()
24231 switch (sopt->sopt_name) { in rack_set_sockopt()
24232 case SO_PEERPRIO: /* SC-URL:bs */ in rack_set_sockopt()
24234 if (inp->inp_socket) { in rack_set_sockopt()
24235 rack->client_bufferlvl = inp->inp_socket->so_peerprio; in rack_set_sockopt()
24243 switch (sopt->sopt_name) { in rack_set_sockopt()
24260 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ in rack_set_sockopt()
24261 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ in rack_set_sockopt()
24321 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_set_sockopt()
24322 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { in rack_set_sockopt()
24325 * We truncate it down to 32 bits for the socket-option trace this in rack_set_sockopt()
24329 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { in rack_set_sockopt()
24339 if (tp->t_fb != &__tcp_rack) { in rack_set_sockopt()
24343 if (rack->defer_options && (rack->gp_ready == 0) && in rack_set_sockopt()
24344 (sopt->sopt_name != TCP_DEFER_OPTIONS) && in rack_set_sockopt()
24345 (sopt->sopt_name != TCP_HYBRID_PACING) && in rack_set_sockopt()
24346 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && in rack_set_sockopt()
24347 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && in rack_set_sockopt()
24348 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { in rack_set_sockopt()
24350 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { in rack_set_sockopt()
24359 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); in rack_set_sockopt()
24371 ti->tcpi_state = tp->t_state; in rack_fill_info()
24372 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) in rack_fill_info()
24373 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; in rack_fill_info()
24374 if (tp->t_flags & TF_SACK_PERMIT) in rack_fill_info()
24375 ti->tcpi_options |= TCPI_OPT_SACK; in rack_fill_info()
24376 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { in rack_fill_info()
24377 ti->tcpi_options |= TCPI_OPT_WSCALE; in rack_fill_info()
24378 ti->tcpi_snd_wscale = tp->snd_scale; in rack_fill_info()
24379 ti->tcpi_rcv_wscale = tp->rcv_scale; in rack_fill_info()
24381 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) in rack_fill_info()
24382 ti->tcpi_options |= TCPI_OPT_ECN; in rack_fill_info()
24383 if (tp->t_flags & TF_FASTOPEN) in rack_fill_info()
24384 ti->tcpi_options |= TCPI_OPT_TFO; in rack_fill_info()
24386 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; in rack_fill_info()
24388 ti->tcpi_rtt = tp->t_srtt; in rack_fill_info()
24389 ti->tcpi_rttvar = tp->t_rttvar; in rack_fill_info()
24390 ti->tcpi_rto = tp->t_rxtcur; in rack_fill_info()
24391 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; in rack_fill_info()
24392 ti->tcpi_snd_cwnd = tp->snd_cwnd; in rack_fill_info()
24394 * FreeBSD-specific extension fields for tcp_info. in rack_fill_info()
24396 ti->tcpi_rcv_space = tp->rcv_wnd; in rack_fill_info()
24397 ti->tcpi_rcv_nxt = tp->rcv_nxt; in rack_fill_info()
24398 ti->tcpi_snd_wnd = tp->snd_wnd; in rack_fill_info()
24399 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ in rack_fill_info()
24400 ti->tcpi_snd_nxt = tp->snd_nxt; in rack_fill_info()
24401 ti->tcpi_snd_mss = tp->t_maxseg; in rack_fill_info()
24402 ti->tcpi_rcv_mss = tp->t_maxseg; in rack_fill_info()
24403 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; in rack_fill_info()
24404 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; in rack_fill_info()
24405 ti->tcpi_snd_zerowin = tp->t_sndzerowin; in rack_fill_info()
24406 ti->tcpi_total_tlp = tp->t_sndtlppack; in rack_fill_info()
24407 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; in rack_fill_info()
24408 ti->tcpi_rttmin = tp->t_rttlow; in rack_fill_info()
24410 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); in rack_fill_info()
24413 if (tp->t_flags & TF_TOE) { in rack_fill_info()
24414 ti->tcpi_options |= TCPI_OPT_TOE; in rack_fill_info()
24435 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_get_sockopt()
24440 switch (sopt->sopt_name) { in rack_get_sockopt()
24455 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24457 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24458 optval = rack->r_ctl.rc_saved_beta; in rack_get_sockopt()
24465 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24466 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; in rack_get_sockopt()
24479 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24481 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24482 optval = rack->r_ctl.rc_saved_beta_ecn; in rack_get_sockopt()
24489 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24490 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; in rack_get_sockopt()
24497 if (rack->rc_rack_tmr_std_based) { in rack_get_sockopt()
24500 if (rack->rc_rack_use_dsack) { in rack_get_sockopt()
24506 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_get_sockopt()
24508 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) in rack_get_sockopt()
24510 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) in rack_get_sockopt()
24521 optval = rack->rack_hibeta; in rack_get_sockopt()
24524 optval = rack->defer_options; in rack_get_sockopt()
24527 optval = rack->r_ctl.req_measurements; in rack_get_sockopt()
24530 optval = rack->r_use_labc_for_rec; in rack_get_sockopt()
24533 optval = rack->rc_labc; in rack_get_sockopt()
24536 optval= rack->r_up_only; in rack_get_sockopt()
24539 loptval = rack->r_ctl.fillcw_cap; in rack_get_sockopt()
24542 loptval = rack->r_ctl.bw_rate_cap; in rack_get_sockopt()
24549 optval = rack->r_ctl.side_chan_dis_mask; in rack_get_sockopt()
24556 optval = rack->r_use_cmp_ack; in rack_get_sockopt()
24559 optval = rack->rc_pace_to_cwnd; in rack_get_sockopt()
24562 optval = rack->r_ctl.rc_no_push_at_mrtt; in rack_get_sockopt()
24565 optval = rack->rack_enable_scwnd; in rack_get_sockopt()
24568 optval = rack->rack_rec_nonrxt_use_cr; in rack_get_sockopt()
24571 if (rack->rack_no_prr == 1) in rack_get_sockopt()
24573 else if (rack->no_prr_addback == 1) in rack_get_sockopt()
24579 if (rack->dis_lt_bw) { in rack_get_sockopt()
24582 } else if (rack->use_lesser_lt_bw) { in rack_get_sockopt()
24594 /* Now do we use the LRO mbuf-queue feature */ in rack_get_sockopt()
24595 optval = rack->r_mbuf_queue; in rack_get_sockopt()
24598 optval = rack->cspr_is_fcc; in rack_get_sockopt()
24601 optval = rack->rc_gp_dyn_mul; in rack_get_sockopt()
24608 optval = rack->r_ctl.rc_tlp_cwnd_reduce; in rack_get_sockopt()
24611 val = rack->r_ctl.init_rate; in rack_get_sockopt()
24618 optval = rack->rc_force_max_seg; in rack_get_sockopt()
24621 optval = rack->r_ctl.rc_user_set_min_segs; in rack_get_sockopt()
24625 optval = rack->rc_user_set_max_segs; in rack_get_sockopt()
24629 optval = rack->rc_always_pace; in rack_get_sockopt()
24633 optval = rack->r_ctl.rc_prr_sendalot; in rack_get_sockopt()
24636 /* Minimum time between rack t-o's in ms */ in rack_get_sockopt()
24637 optval = rack->r_ctl.rc_min_to; in rack_get_sockopt()
24640 optval = rack->r_ctl.rc_split_limit; in rack_get_sockopt()
24644 optval = rack->r_ctl.rc_early_recovery_segs; in rack_get_sockopt()
24648 optval = rack->r_ctl.rc_reorder_shift; in rack_get_sockopt()
24651 if (rack->r_ctl.gp_rnd_thresh) { in rack_get_sockopt()
24654 v = rack->r_ctl.gp_gain_req; in rack_get_sockopt()
24656 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); in rack_get_sockopt()
24657 if (rack->r_ctl.gate_to_fs == 1) in rack_get_sockopt()
24664 optval = rack->r_ctl.rc_reorder_fade; in rack_get_sockopt()
24668 optval = rack->use_rack_rr; in rack_get_sockopt()
24671 optval = rack->r_rr_config; in rack_get_sockopt()
24674 optval = rack->r_rack_hw_rate_caps; in rack_get_sockopt()
24677 optval = rack->rack_hdw_pace_ena; in rack_get_sockopt()
24681 optval = rack->r_ctl.rc_tlp_threshold; in rack_get_sockopt()
24684 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_get_sockopt()
24685 optval = rack->r_ctl.rc_pkt_delay; in rack_get_sockopt()
24688 optval = rack->rack_tlp_threshold_use; in rack_get_sockopt()
24691 optval = rack->rc_pace_dnd; in rack_get_sockopt()
24694 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; in rack_get_sockopt()
24697 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; in rack_get_sockopt()
24700 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; in rack_get_sockopt()
24703 optval = rack->r_ctl.rack_per_upper_bound_ss; in rack_get_sockopt()
24705 optval |= rack->r_ctl.rack_per_upper_bound_ca; in rack_get_sockopt()
24708 optval = rack->r_ctl.rack_per_of_gp_ca; in rack_get_sockopt()
24711 optval = rack->r_ctl.rack_per_of_gp_ss; in rack_get_sockopt()
24714 optval = rack->r_ctl.pace_len_divisor; in rack_get_sockopt()
24717 optval = rack->r_ctl.rc_rate_sample_method; in rack_get_sockopt()
24720 optval = tp->t_delayed_ack; in rack_get_sockopt()
24723 optval = rack->rc_allow_data_af_clo; in rack_get_sockopt()
24726 optval = rack->r_limit_scw; in rack_get_sockopt()
24729 if (rack->r_use_hpts_min) in rack_get_sockopt()
24730 optval = rack->r_ctl.max_reduction; in rack_get_sockopt()
24735 optval = rack->rc_gp_no_rec_chg; in rack_get_sockopt()
24738 optval = rack->rc_skip_timely; in rack_get_sockopt()
24741 optval = rack->r_ctl.timer_slop; in rack_get_sockopt()
24749 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_get_sockopt()
24750 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) in rack_get_sockopt()
24761 if (sopt->sopt_dir == SOPT_SET) { in rack_ctloutput()
24763 } else if (sopt->sopt_dir == SOPT_GET) { in rack_ctloutput()
24766 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); in rack_ctloutput()
24837 printf("Failed to register rack module -- err:%d\n", err); in tcp_addrack()