Lines Matching +full:gated +full:- +full:fixed +full:- +full:clock

1 /*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
159 * - Matt Mathis's Rate Halving which slowly drops
160 * the congestion window so that the ack clock can
162 * - Yuchung Cheng's RACK TCP (for which its named) that
165 * - Reorder Detection of RFC4737 and the Tail-Loss probe draft
183 * TCP output is also over-written with a new version since it
188 static int32_t rack_tlp_limit = 2; /* No more than 2 TLPs w-out new data */
191 static int32_t rack_reorder_fade = 60000000; /* 0 - never fade, def 60,000,000
192 * - 60 seconds */
196 static uint8_t rack_ssthresh_rest_rto_rec = 0; /* Do we restore ssthresh when we have rec -> rto ->…
209 static int32_t rack_hw_rate_cap_per = 0; /* 0 -- off */
244 static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the configured rate (ss/c…
249 static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
256 static int32_t rack_hw_check_queue = 0; /* Do we always pre-check queue depth of a hw queue */
286 static uint16_t rack_per_of_gp_ss = 250; /* 250 % slow-start */
287 static uint16_t rack_per_of_gp_ca = 200; /* 200 % congestion-avoidance */
302 static uint32_t rack_probe_rtt_safety_val = 2000000; /* No more than 2 sec in probe-rtt */
304 static uint32_t rack_probertt_gpsrtt_cnt_mul = 0; /* How many srtt periods does probe-rtt last top …
305 static uint32_t rack_probertt_gpsrtt_cnt_div = 0; /* How many srtt periods does probe-rtt last bott…
324 * the way fill-cw interacts with timely and caps how much
325 * timely can boost the fill-cw b/w.
331 * probeRTT as well as fixed-rate-pacing.
411 #define RACK_REXMTVAL(tp) max(rack_rto_min, ((tp)->t_srtt + ((tp)->t_rttvar << 2)))
582 tim = rack->r_ctl.lt_bw_time; in rack_get_lt_bw()
583 bytes = rack->r_ctl.lt_bw_bytes; in rack_get_lt_bw()
584 if (rack->lt_bw_up) { in rack_get_lt_bw()
587 bytes += (rack->rc_tp->snd_una - rack->r_ctl.lt_seq); in rack_get_lt_bw()
588 tim += (tcp_tv_to_lusec(&tv) - rack->r_ctl.lt_timemark); in rack_get_lt_bw()
606 tp = rack->rc_tp; in rack_swap_beta_values()
607 if (tp->t_cc == NULL) { in rack_swap_beta_values()
611 rack->rc_pacing_cc_set = 1; in rack_swap_beta_values()
612 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_swap_beta_values()
613 /* Not new-reno we can't play games with beta! */ in rack_swap_beta_values()
618 if (CC_ALGO(tp)->ctl_output == NULL) { in rack_swap_beta_values()
619 /* Huh, not using new-reno so no swaps.? */ in rack_swap_beta_values()
627 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
634 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
644 opt.val = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
645 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
651 opt.val = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
652 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_swap_beta_values()
658 rack->r_ctl.rc_saved_beta = old_beta; in rack_swap_beta_values()
659 rack->r_ctl.rc_saved_beta_ecn = old_beta_ecn; in rack_swap_beta_values()
661 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_swap_beta_values()
666 ptr = ((struct newreno *)tp->t_ccv.cc_data); in rack_swap_beta_values()
669 log.u_bbr.flex1 = ptr->beta; in rack_swap_beta_values()
670 log.u_bbr.flex2 = ptr->beta_ecn; in rack_swap_beta_values()
671 log.u_bbr.flex3 = ptr->newreno_flags; in rack_swap_beta_values()
672 log.u_bbr.flex4 = rack->r_ctl.rc_saved_beta; in rack_swap_beta_values()
673 log.u_bbr.flex5 = rack->r_ctl.rc_saved_beta_ecn; in rack_swap_beta_values()
675 log.u_bbr.flex7 = rack->gp_ready; in rack_swap_beta_values()
677 log.u_bbr.flex7 |= rack->use_fixed_rate; in rack_swap_beta_values()
679 log.u_bbr.flex7 |= rack->rc_pacing_cc_set; in rack_swap_beta_values()
680 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_swap_beta_values()
690 if (rack->rc_pacing_cc_set) in rack_set_cc_pacing()
696 rack->rc_pacing_cc_set = 1; in rack_set_cc_pacing()
703 if (rack->rc_pacing_cc_set == 0) in rack_undo_cc_pacing()
709 rack->rc_pacing_cc_set = 0; in rack_undo_cc_pacing()
716 if (rack->rc_pacing_cc_set) in rack_remove_pacing()
718 if (rack->r_ctl.pacing_method & RACK_REG_PACING) in rack_remove_pacing()
720 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) in rack_remove_pacing()
722 rack->rc_always_pace = 0; in rack_remove_pacing()
723 rack->r_ctl.pacing_method = RACK_PACING_NONE; in rack_remove_pacing()
724 rack->dgp_on = 0; in rack_remove_pacing()
725 rack->rc_hybrid_mode = 0; in rack_remove_pacing()
726 rack->use_fixed_rate = 0; in rack_remove_pacing()
733 if (tcp_bblogging_on(rack->rc_tp) && (rack_verbose_logging != 0)) { in rack_log_gpset()
739 log.u_bbr.flex2 = rack->rc_tp->gput_seq; in rack_log_gpset()
741 log.u_bbr.flex4 = rack->rc_tp->gput_ts; in rack_log_gpset()
743 log.u_bbr.flex6 = rack->rc_tp->gput_ack; in rack_log_gpset()
746 log.u_bbr.rttProp = rack->r_ctl.rc_gp_cumack_ts; in rack_log_gpset()
747 log.u_bbr.delRate = rack->r_ctl.rc_gp_output_ts; in rack_log_gpset()
749 log.u_bbr.cwnd_gain = rack->app_limited_needs_set; in rack_log_gpset()
750 log.u_bbr.pkt_epoch = rack->r_ctl.rc_app_limited_cnt; in rack_log_gpset()
751 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_gpset()
752 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_gpset()
754 log.u_bbr.applimited = rsm->r_start; in rack_log_gpset()
755 log.u_bbr.delivered = rsm->r_end; in rack_log_gpset()
756 log.u_bbr.epoch = rsm->r_flags; in rack_log_gpset()
759 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gpset()
760 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gpset()
761 &rack->rc_inp->inp_socket->so_snd, in rack_log_gpset()
774 if (error || req->newptr == NULL) in sysctl_rack_clear()
878 "What percentage above goodput do we clamp CA/SS to at exit on high-BDP path 110%"); in rack_init_sysctls()
883 "What percentage above goodput do we clamp CA/SS to at exit on a non high-BDP path 100%"); in rack_init_sysctls()
928 "Should we use the min-rtt to calculate the goal rtt (else gp_srtt) at entry"); in rack_init_sysctls()
933 "How to set cwnd at exit, 0 - dynamic, 1 - use min-rtt, 2 - use curgprtt, 3 - entry gp-rtt"); in rack_init_sysctls()
958 "If the rtt goes lower within this percentage of the time, go into probe-rtt"); in rack_init_sysctls()
968 "Do we clear I/S counts on exiting probe-rtt"); in rack_init_sysctls()
978 "We are highly buffered if min_rtt_seen / max_rtt_seen > this-threshold"); in rack_init_sysctls()
1152 "If we fall below this rate, dis-engage hw pacing?"); in rack_init_sysctls()
1293 "What method do we do for TLP time calc 0=no-de-ack-comp, 1=ID, 2=2.1, 3=2.2"); in rack_init_sysctls()
1313 "Should we always send the oldest TLP and RACK-TLP"); in rack_init_sysctls()
1351 "When doing recovery -> rto -> recovery do we reset SSthresh?"); in rack_init_sysctls()
1386 "Minimum RTO in microseconds -- set with caution below 1000 due to TLP"); in rack_init_sysctls()
1391 "Maximum RTO in microseconds -- should be at least as large as min_rto"); in rack_init_sysctls()
1413 "Does a cwnd just-return end the measurement window (app limited)"); in rack_init_sysctls()
1418 "Does an rwnd just-return end the measurement window (app limited -- not persists)"); in rack_init_sysctls()
1475 "Should RACK use mbuf queuing for non-paced connections"); in rack_init_sysctls()
1514 … "When a persist or keep-alive probe is not answered do we calculate rtt on subsequent answers?"); in rack_init_sysctls()
1660 "Total number of times a sends returned enobuf for non-hdwr paced connections"); in rack_init_sysctls()
1845 return (tcp_compute_initwnd(tcp_maxseg(rack->rc_tp))); in rc_init_window()
1852 if (IN_FASTRECOVERY(rack->rc_tp->t_flags)) in rack_get_fixed_pacing_bw()
1853 return (rack->r_ctl.rc_fixed_pacing_rate_rec); in rack_get_fixed_pacing_bw()
1854 else if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_fixed_pacing_bw()
1855 return (rack->r_ctl.rc_fixed_pacing_rate_ss); in rack_get_fixed_pacing_bw()
1857 return (rack->r_ctl.rc_fixed_pacing_rate_ca); in rack_get_fixed_pacing_bw()
1879 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
1887 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid_bw()
1889 do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING); in rack_log_hybrid_bw()
1914 cur = rack->r_ctl.rc_last_sft; in rack_log_hybrid_bw()
1916 if (rack->r_ctl.rack_rs.rs_flags != RACK_RTT_EMPTY) in rack_log_hybrid_bw()
1917 log.u_bbr.inflight = rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_hybrid_bw()
1919 /* Use the last known rtt i.e. the rack-rtt */ in rack_log_hybrid_bw()
1920 log.u_bbr.inflight = rack->rc_rack_rtt; in rack_log_hybrid_bw()
1925 log.u_bbr.cur_del_rate = cur->deadline; in rack_log_hybrid_bw()
1928 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
1929 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
1930 log.u_bbr.flex6 = cur->start_seq; in rack_log_hybrid_bw()
1931 log.u_bbr.pkts_out = cur->end_seq; in rack_log_hybrid_bw()
1934 log.u_bbr.pkt_epoch = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_bw()
1935 log.u_bbr.lost = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
1937 log.u_bbr.flex6 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_bw()
1938 log.u_bbr.pkts_out = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
1941 log.u_bbr.epoch = (uint32_t)(cur->first_send & 0x00000000ffffffff); in rack_log_hybrid_bw()
1942 log.u_bbr.lt_epoch = (uint32_t)((cur->first_send >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
1944 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_bw()
1945 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_bw()
1947 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_bw()
1951 log.u_bbr.flex4 = (uint32_t)(rack->rc_tp->t_sndbytes - cur->sent_at_fs); in rack_log_hybrid_bw()
1952 log.u_bbr.flex5 = (uint32_t)(rack->rc_tp->t_snd_rxt_bytes - cur->rxt_at_fs); in rack_log_hybrid_bw()
1953 log.u_bbr.flex7 = (uint16_t)cur->hybrid_flags; in rack_log_hybrid_bw()
1965 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_bw()
1967 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_bw()
1969 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_bw()
1971 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_bw()
1973 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_bw()
1974 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_bw()
1975 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_bw()
1987 if (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING)) { in rack_log_hybrid_sends()
1996 log.u_bbr.delRate = cur->sent_at_fs; in rack_log_hybrid_sends()
1998 if ((cur->flags & TCP_TRK_TRACK_FLG_LSND) == 0) { in rack_log_hybrid_sends()
2004 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hybrid_sends()
2005 log.u_bbr.rttProp = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hybrid_sends()
2011 log.u_bbr.cur_del_rate = cur->sent_at_ls; in rack_log_hybrid_sends()
2012 log.u_bbr.rttProp = cur->rxt_at_ls; in rack_log_hybrid_sends()
2014 log.u_bbr.bw_inuse = cur->rxt_at_fs; in rack_log_hybrid_sends()
2016 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid_sends()
2019 log.u_bbr.flex2 = (uint32_t)(cur->start & 0x00000000ffffffff); in rack_log_hybrid_sends()
2020 log.u_bbr.flex1 = (uint32_t)((cur->start >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2022 log.u_bbr.flex4 = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid_sends()
2023 log.u_bbr.flex3 = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2026 log.u_bbr.applimited = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid_sends()
2027 log.u_bbr.delivered = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2029 log.u_bbr.epoch = (uint32_t)(cur->timestamp & 0x00000000ffffffff); in rack_log_hybrid_sends()
2030 log.u_bbr.lt_epoch = (uint32_t)((cur->timestamp >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2032 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid_sends()
2033 log.u_bbr.lost = cur->playout_ms; in rack_log_hybrid_sends()
2034 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid_sends()
2037 * where a false retransmit occurred so first_send <-> lastsend may in rack_log_hybrid_sends()
2040 log.u_bbr.pkt_epoch = (uint32_t)(rack->r_ctl.last_tmit_time_acked & 0x00000000ffffffff); in rack_log_hybrid_sends()
2041 log.u_bbr.flex5 = (uint32_t)((rack->r_ctl.last_tmit_time_acked >> 32) & 0x00000000ffffffff); in rack_log_hybrid_sends()
2049 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid_sends()
2051 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid_sends()
2053 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid_sends()
2055 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid_sends()
2058 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid_sends()
2059 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid_sends()
2060 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid_sends()
2073 ether = rack->rc_tp->t_maxseg + sizeof(struct tcphdr); in rack_compensate_for_linerate()
2074 if (rack->r_is_v6){ in rack_compensate_for_linerate()
2085 u_segsiz = (uint64_t)min(ctf_fixed_maxseg(rack->rc_tp), rack->r_ctl.rc_pace_min_segs); in rack_compensate_for_linerate()
2100 if (rack->r_ctl.bw_rate_cap == 0) in rack_rate_cap_bw()
2103 if (rack->rc_catch_up && rack->rc_hybrid_mode && in rack_rate_cap_bw()
2104 (rack->r_ctl.rc_last_sft != NULL)) { in rack_rate_cap_bw()
2112 ent = rack->r_ctl.rc_last_sft; in rack_rate_cap_bw()
2115 if (timenow >= ent->deadline) { in rack_rate_cap_bw()
2117 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2119 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2123 timeleft = rack->r_ctl.rc_last_sft->deadline - timenow; in rack_rate_cap_bw()
2126 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2128 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2137 if (ent->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_rate_cap_bw()
2138 if (SEQ_GT(ent->end_seq, rack->rc_tp->snd_una)) in rack_rate_cap_bw()
2139 lenleft = ent->end_seq - rack->rc_tp->snd_una; in rack_rate_cap_bw()
2142 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2144 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2153 if (SEQ_GT(rack->rc_tp->snd_una, ent->start_seq)) in rack_rate_cap_bw()
2154 lengone = rack->rc_tp->snd_una - ent->start_seq; in rack_rate_cap_bw()
2157 if (lengone < (ent->end - ent->start)) in rack_rate_cap_bw()
2158 lenleft = (ent->end - ent->start) - lengone; in rack_rate_cap_bw()
2161 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2163 rack->r_ctl.bw_rate_cap = 0; in rack_rate_cap_bw()
2169 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2171 if (rack->r_ctl.bw_rate_cap) in rack_rate_cap_bw()
2181 rack->r_ctl.bw_rate_cap = calcbw; in rack_rate_cap_bw()
2182 if ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2184 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2185 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2188 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2189 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2190 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, calcbw, ctf_fixed_maxseg(rack->rc_tp)); in rack_rate_cap_bw()
2191 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2193 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2196 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2197 *bw, ent->deadline, lenleft, HYBRID_LOG_RATE_CAP, 0, ent, __LINE__); in rack_rate_cap_bw()
2205 if ((rack->r_ctl.bw_rate_cap > 0) && (*bw > rack->r_ctl.bw_rate_cap)) { in rack_rate_cap_bw()
2207 if (rack->rc_hybrid_mode && in rack_rate_cap_bw()
2208 rack->rc_catch_up && in rack_rate_cap_bw()
2209 (rack->r_ctl.rc_last_sft != NULL) && in rack_rate_cap_bw()
2210 (rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_S_MSS) && in rack_rate_cap_bw()
2212 ((rack->r_ctl.rc_last_sft->hybrid_flags & TCP_HYBRID_PACING_SETMSS) == 0)) { in rack_rate_cap_bw()
2213 /* Lets set in a smaller mss possibly here to match our rate-cap */ in rack_rate_cap_bw()
2216 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_rate_cap_bw()
2217 rack->r_ctl.rc_last_sft->hybrid_flags |= TCP_HYBRID_PACING_SETMSS; in rack_rate_cap_bw()
2218 …rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, rack->r_ctl.bw_rate_cap, ctf_fixed_maxseg… in rack_rate_cap_bw()
2219 …rack_log_type_pacing_sizes(rack->rc_tp, rack, rack->r_ctl.client_suggested_maxseg, orig_max, __LIN… in rack_rate_cap_bw()
2223 *bw = rack->r_ctl.bw_rate_cap; in rack_rate_cap_bw()
2224 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in rack_rate_cap_bw()
2235 if (rack->rc_gp_filled == 0) { in rack_get_gp_est()
2249 if (rack->dis_lt_bw == 1) in rack_get_gp_est()
2255 * No goodput bw but a long-term b/w does exist in rack_get_gp_est()
2261 if (rack->r_ctl.init_rate) in rack_get_gp_est()
2262 return (rack->r_ctl.init_rate); in rack_get_gp_est()
2265 if (rack->rc_tp->t_srtt == 0) { in rack_get_gp_est()
2273 bw = tcp_compute_initwnd(tcp_maxseg(rack->rc_tp)); in rack_get_gp_est()
2274 srtt = (uint64_t)rack->rc_tp->t_srtt; in rack_get_gp_est()
2281 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_get_gp_est()
2283 bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2286 bw = rack->r_ctl.gp_bw / max(rack->r_ctl.num_measurements, 1); in rack_get_gp_est()
2288 if (rack->dis_lt_bw) { in rack_get_gp_est()
2289 /* We are not using lt-bw */ in rack_get_gp_est()
2296 lt_bw = rack->r_ctl.gp_bw; in rack_get_gp_est()
2298 if (rack->use_lesser_lt_bw) { in rack_get_gp_est()
2330 if (rack->use_fixed_rate) { in rack_get_bw()
2331 /* Return the fixed pacing rate */ in rack_get_bw()
2341 if (rack->use_fixed_rate) { in rack_get_output_gain()
2343 } else if (rack->in_probe_rtt && (rsm == NULL)) in rack_get_output_gain()
2344 return (rack->r_ctl.rack_per_of_gp_probertt); in rack_get_output_gain()
2345 else if ((IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_get_output_gain()
2346 rack->r_ctl.rack_per_of_gp_rec)) { in rack_get_output_gain()
2349 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2350 } else if (rack->rack_rec_nonrxt_use_cr) { in rack_get_output_gain()
2353 } else if (rack->rack_no_prr && in rack_get_output_gain()
2354 (rack->r_ctl.rack_per_of_gp_rec > 100)) { in rack_get_output_gain()
2359 * Here we may have a non-retransmit but we in rack_get_output_gain()
2363 return (rack->r_ctl.rack_per_of_gp_rec); in rack_get_output_gain()
2368 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) in rack_get_output_gain()
2369 return (rack->r_ctl.rack_per_of_gp_ss); in rack_get_output_gain()
2371 return (rack->r_ctl.rack_per_of_gp_ca); in rack_get_output_gain()
2379 * 1 = dsack_persists reduced by 1 via T-O or fast recovery exit. in rack_log_dsack_event()
2386 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_dsack_event()
2391 log.u_bbr.flex1 = rack->rc_rack_tmr_std_based; in rack_log_dsack_event()
2393 log.u_bbr.flex1 |= rack->rc_rack_use_dsack; in rack_log_dsack_event()
2395 log.u_bbr.flex1 |= rack->rc_dsack_round_seen; in rack_log_dsack_event()
2396 log.u_bbr.flex2 = rack->r_ctl.dsack_round_end; in rack_log_dsack_event()
2397 log.u_bbr.flex3 = rack->r_ctl.num_dsack; in rack_log_dsack_event()
2401 log.u_bbr.flex7 = rack->r_ctl.dsack_persist; in rack_log_dsack_event()
2404 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_dsack_event()
2405 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_dsack_event()
2406 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_dsack_event()
2407 &rack->rc_inp->inp_socket->so_rcv, in rack_log_dsack_event()
2408 &rack->rc_inp->inp_socket->so_snd, in rack_log_dsack_event()
2419 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_hdwr_pacing()
2428 if (rack->r_ctl.crte) { in rack_log_hdwr_pacing()
2429 ifp = rack->r_ctl.crte->ptbl->rs_ifp; in rack_log_hdwr_pacing()
2430 } else if (rack->rc_inp->inp_route.ro_nh && in rack_log_hdwr_pacing()
2431 rack->rc_inp->inp_route.ro_nh->nh_ifp) { in rack_log_hdwr_pacing()
2432 ifp = rack->rc_inp->inp_route.ro_nh->nh_ifp; in rack_log_hdwr_pacing()
2445 log.u_bbr.applimited = rack->r_ctl.rc_pace_max_segs; in rack_log_hdwr_pacing()
2446 log.u_bbr.flex8 = rack->use_fixed_rate; in rack_log_hdwr_pacing()
2448 log.u_bbr.flex8 |= rack->rack_hdrw_pacing; in rack_log_hdwr_pacing()
2449 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_log_hdwr_pacing()
2450 log.u_bbr.delRate = rack->r_ctl.crte_prev_rate; in rack_log_hdwr_pacing()
2451 if (rack->r_ctl.crte) in rack_log_hdwr_pacing()
2452 log.u_bbr.cur_del_rate = rack->r_ctl.crte->rate; in rack_log_hdwr_pacing()
2455 log.u_bbr.rttProp = rack->r_ctl.last_hw_bw_req; in rack_log_hdwr_pacing()
2456 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_hdwr_pacing()
2457 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_hdwr_pacing()
2458 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hdwr_pacing()
2459 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hdwr_pacing()
2460 &rack->rc_inp->inp_socket->so_snd, in rack_log_hdwr_pacing()
2481 if (rack->r_rack_hw_rate_caps) { in rack_get_output_bw()
2483 if (rack->r_ctl.crte != NULL) { in rack_get_output_bw()
2485 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in rack_get_output_bw()
2490 rack->r_rack_hw_rate_caps = 0; in rack_get_output_bw()
2500 } else if ((rack->rack_hdrw_pacing == 0) && in rack_get_output_bw()
2501 (rack->rack_hdw_pace_ena) && in rack_get_output_bw()
2502 (rack->rack_attempt_hdwr_pace == 0) && in rack_get_output_bw()
2503 (rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_output_bw()
2504 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_output_bw()
2512 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in rack_get_output_bw()
2530 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_retran_reason()
2537 * 1 - We are retransmitting and this tells the reason. in rack_log_retran_reason()
2538 * 2 - We are clearing a dup-ack count. in rack_log_retran_reason()
2539 * 3 - We are incrementing a dup-ack count. in rack_log_retran_reason()
2549 log.u_bbr.flex3 = rsm->r_flags; in rack_log_retran_reason()
2550 log.u_bbr.flex4 = rsm->r_dupack; in rack_log_retran_reason()
2551 log.u_bbr.flex5 = rsm->r_start; in rack_log_retran_reason()
2552 log.u_bbr.flex6 = rsm->r_end; in rack_log_retran_reason()
2554 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_retran_reason()
2556 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_retran_reason()
2557 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_retran_reason()
2558 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_retran_reason()
2559 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_retran_reason()
2560 log.u_bbr.epoch = rack->r_ctl.current_round; in rack_log_retran_reason()
2561 log.u_bbr.lt_epoch = rack->r_ctl.rc_considered_lost; in rack_log_retran_reason()
2562 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_retran_reason()
2563 &rack->rc_inp->inp_socket->so_rcv, in rack_log_retran_reason()
2564 &rack->rc_inp->inp_socket->so_snd, in rack_log_retran_reason()
2573 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_start()
2578 log.u_bbr.flex1 = rack->rc_tp->t_srtt; in rack_log_to_start()
2580 log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags; in rack_log_to_start()
2582 log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot; in rack_log_to_start()
2583 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_start()
2584 log.u_bbr.flex7 = rack->rc_in_persist; in rack_log_to_start()
2586 if (rack->rack_no_prr) in rack_log_to_start()
2589 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_start()
2590 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_start()
2592 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_start()
2593 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_start()
2594 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_start()
2595 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_start()
2596 log.u_bbr.cwnd_gain = rack->rack_deferred_inited; in rack_log_to_start()
2597 log.u_bbr.pkt_epoch = rack->rc_has_collapsed; in rack_log_to_start()
2598 log.u_bbr.lt_epoch = rack->rc_tp->t_rxtshift; in rack_log_to_start()
2600 log.u_bbr.epoch = rack->r_ctl.roundends; in rack_log_to_start()
2601 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_start()
2603 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_start()
2604 log.u_bbr.applimited = rack->rc_tp->t_flags2; in rack_log_to_start()
2605 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_start()
2606 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_start()
2607 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_start()
2616 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_event()
2621 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_event()
2623 log.u_bbr.flex1 = rack->r_ctl.rc_rack_min_rtt; in rack_log_to_event()
2624 log.u_bbr.flex2 = rack->rc_rack_rtt; in rack_log_to_event()
2628 log.u_bbr.flex3 = rsm->r_end - rsm->r_start; in rack_log_to_event()
2629 if (rack->rack_no_prr) in rack_log_to_event()
2632 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_event()
2634 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_event()
2635 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_event()
2636 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_event()
2637 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_event()
2638 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_event()
2640 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_event()
2641 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_event()
2642 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_event()
2643 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_event()
2656 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_map_chg()
2662 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_map_chg()
2668 log.u_bbr.flex1 = prev->r_start; in rack_log_map_chg()
2669 log.u_bbr.flex2 = prev->r_end; in rack_log_map_chg()
2673 log.u_bbr.flex3 = rsm->r_start; in rack_log_map_chg()
2674 log.u_bbr.flex4 = rsm->r_end; in rack_log_map_chg()
2678 log.u_bbr.flex5 = next->r_start; in rack_log_map_chg()
2679 log.u_bbr.flex6 = next->r_end; in rack_log_map_chg()
2685 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_map_chg()
2686 if (rack->rack_no_prr) in rack_log_map_chg()
2689 log.u_bbr.lost = rack->r_ctl.rc_prr_sndcnt; in rack_log_map_chg()
2690 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_map_chg()
2692 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_map_chg()
2693 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_map_chg()
2694 &rack->rc_inp->inp_socket->so_rcv, in rack_log_map_chg()
2695 &rack->rc_inp->inp_socket->so_snd, in rack_log_map_chg()
2709 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_rtt_upd()
2712 log.u_bbr.flex3 = rack->r_ctl.rc_rack_min_rtt; in rack_log_rtt_upd()
2713 log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest; in rack_log_rtt_upd()
2714 log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest; in rack_log_rtt_upd()
2715 log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2717 log.u_bbr.rttProp = (uint64_t)rack->r_ctl.rack_rs.rs_rtt_tot; in rack_log_rtt_upd()
2718 log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method; in rack_log_rtt_upd()
2720 log.u_bbr.delivered = rack->r_ctl.rack_rs.rs_us_rtrcnt; in rack_log_rtt_upd()
2721 log.u_bbr.pkts_out = rack->r_ctl.rack_rs.rs_flags; in rack_log_rtt_upd()
2722 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_upd()
2724 log.u_bbr.pkt_epoch = rsm->r_start; in rack_log_rtt_upd()
2725 log.u_bbr.lost = rsm->r_end; in rack_log_rtt_upd()
2726 log.u_bbr.cwnd_gain = rsm->r_rtr_cnt; in rack_log_rtt_upd()
2728 log.u_bbr.pacing_gain = (uint16_t)rsm->r_flags; in rack_log_rtt_upd()
2731 log.u_bbr.pkt_epoch = rack->rc_tp->iss; in rack_log_rtt_upd()
2737 log.u_bbr.use_lt_bw = rack->rc_highly_buffered; in rack_log_rtt_upd()
2739 log.u_bbr.use_lt_bw |= rack->forced_ack; in rack_log_rtt_upd()
2741 log.u_bbr.use_lt_bw |= rack->rc_gp_dyn_mul; in rack_log_rtt_upd()
2743 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_rtt_upd()
2745 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_rtt_upd()
2747 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_rtt_upd()
2749 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_rtt_upd()
2751 log.u_bbr.use_lt_bw |= rack->rc_dragged_bottom; in rack_log_rtt_upd()
2752 log.u_bbr.applimited = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_upd()
2753 log.u_bbr.epoch = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_upd()
2754 log.u_bbr.lt_epoch = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_upd()
2755 log.u_bbr.cur_del_rate = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_upd()
2756 log.u_bbr.delRate = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_upd()
2757 log.u_bbr.bw_inuse = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_log_rtt_upd()
2760 log.u_bbr.bw_inuse |= ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]); in rack_log_rtt_upd()
2762 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_upd()
2763 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_upd()
2779 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample()
2786 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_rtt_sample()
2789 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_sample()
2790 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_rtt_sample()
2791 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_rtt_sample()
2792 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_rtt_sample()
2799 log.u_bbr.delRate = rack->r_ctl.rack_rs.confidence; in rack_log_rtt_sample()
2801 log.u_bbr.delRate |= rack->r_ctl.rack_rs.rs_us_rtt; in rack_log_rtt_sample()
2805 log.u_bbr.lt_epoch = rack->r_ctl.timer_slop; in rack_log_rtt_sample()
2808 log.u_bbr.rttProp = RACK_REXMTVAL(rack->rc_tp); in rack_log_rtt_sample()
2809 log.u_bbr.bw_inuse = rack->r_ctl.act_rcv_time.tv_sec; in rack_log_rtt_sample()
2811 log.u_bbr.bw_inuse += rack->r_ctl.act_rcv_time.tv_usec; in rack_log_rtt_sample()
2812 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample()
2813 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample()
2814 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample()
2823 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sample_calc()
2835 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sample_calc()
2837 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sample_calc()
2838 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sample_calc()
2839 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sample_calc()
2840 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sample_calc()
2850 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_sendmap()
2862 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_rtt_sendmap()
2864 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_rtt_sendmap()
2865 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_sendmap()
2866 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_sendmap()
2867 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_sendmap()
2877 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_progress_event()
2882 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_progress_event()
2885 log.u_bbr.flex3 = tp->t_maxunacktime; in rack_log_progress_event()
2886 log.u_bbr.flex4 = tp->t_acktime; in rack_log_progress_event()
2889 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_progress_event()
2890 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_progress_event()
2891 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_progress_event()
2892 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_progress_event()
2893 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_progress_event()
2895 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_progress_event()
2897 &rack->rc_inp->inp_socket->so_rcv, in rack_log_progress_event()
2898 &rack->rc_inp->inp_socket->so_snd, in rack_log_progress_event()
2907 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_bbrsnd()
2911 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_bbrsnd()
2913 if (rack->rack_no_prr) in rack_log_type_bbrsnd()
2916 log.u_bbr.flex2 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_bbrsnd()
2917 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_type_bbrsnd()
2919 log.u_bbr.flex7 = (0x0000ffff & rack->r_ctl.rc_hpts_flags); in rack_log_type_bbrsnd()
2920 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_bbrsnd()
2922 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_bbrsnd()
2923 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_bbrsnd()
2924 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_bbrsnd()
2925 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_bbrsnd()
2926 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_bbrsnd()
2927 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_bbrsnd()
2928 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_bbrsnd()
2937 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_doseg_done()
2945 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_doseg_done()
2946 if (rack->rack_no_prr) in rack_log_doseg_done()
2949 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_doseg_done()
2951 log.u_bbr.applimited = rack->r_ctl.rc_pace_min_segs; in rack_log_doseg_done()
2952 log.u_bbr.flex7 = rack->rc_ack_can_sendout_data; /* Do we have ack-can-send set */ in rack_log_doseg_done()
2954 log.u_bbr.flex7 |= rack->r_fast_output; /* is fast output primed */ in rack_log_doseg_done()
2956 log.u_bbr.flex7 |= rack->r_wanted_output; /* Do we want output */ in rack_log_doseg_done()
2957 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_doseg_done()
2958 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_doseg_done()
2960 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_doseg_done()
2961 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_doseg_done()
2963 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_doseg_done()
2964 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_doseg_done()
2965 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_doseg_done()
2966 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_doseg_done()
2967 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_doseg_done()
2969 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_doseg_done()
2970 log.u_bbr.epoch = rack->rc_inp->inp_socket->so_snd.sb_hiwat; in rack_log_doseg_done()
2971 log.u_bbr.lt_epoch = rack->rc_inp->inp_socket->so_rcv.sb_hiwat; in rack_log_doseg_done()
2972 log.u_bbr.lost = rack->rc_tp->t_srtt; in rack_log_doseg_done()
2973 log.u_bbr.pkt_epoch = rack->rc_tp->rfbuf_cnt; in rack_log_doseg_done()
2974 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_doseg_done()
2975 &rack->rc_inp->inp_socket->so_rcv, in rack_log_doseg_done()
2976 &rack->rc_inp->inp_socket->so_snd, in rack_log_doseg_done()
2985 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_pacing_sizes()
2990 log.u_bbr.flex1 = rack->r_ctl.rc_pace_min_segs; in rack_log_type_pacing_sizes()
2991 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_log_type_pacing_sizes()
2994 log.u_bbr.flex7 = rack->r_ctl.rc_user_set_min_segs; in rack_log_type_pacing_sizes()
2998 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_pacing_sizes()
2999 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_pacing_sizes()
3000 log.u_bbr.applimited = rack->r_ctl.rc_sacked; in rack_log_type_pacing_sizes()
3001 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_pacing_sizes()
3002 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_pacing_sizes()
3003 TCP_LOG_EVENTP(tp, NULL, &tptosocket(tp)->so_rcv, in rack_log_type_pacing_sizes()
3004 &tptosocket(tp)->so_snd, in rack_log_type_pacing_sizes()
3013 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_type_just_return()
3018 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_type_just_return()
3020 log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags; in rack_log_type_just_return()
3022 if (rack->rack_no_prr) in rack_log_type_just_return()
3025 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_type_just_return()
3027 log.u_bbr.flex8 = rack->rc_in_persist; in rack_log_type_just_return()
3030 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_type_just_return()
3031 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_type_just_return()
3032 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_type_just_return()
3033 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_type_just_return()
3034 log.u_bbr.cwnd_gain = rack->rc_has_collapsed; in rack_log_type_just_return()
3035 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_type_just_return()
3037 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_type_just_return()
3038 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_type_just_return()
3039 &rack->rc_inp->inp_socket->so_rcv, in rack_log_type_just_return()
3040 &rack->rc_inp->inp_socket->so_snd, in rack_log_type_just_return()
3050 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_cancel()
3054 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_to_cancel()
3056 log.u_bbr.flex2 = rack->r_ctl.rc_last_output_to; in rack_log_to_cancel()
3059 if (rack->rack_no_prr) in rack_log_to_cancel()
3062 log.u_bbr.flex5 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_cancel()
3063 log.u_bbr.flex6 = rack->rc_tp->t_rxtcur; in rack_log_to_cancel()
3066 log.u_bbr.applimited = rack->r_ctl.rc_hpts_flags; in rack_log_to_cancel()
3068 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_cancel()
3069 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_cancel()
3070 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_cancel()
3071 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_cancel()
3072 log.u_bbr.bw_inuse = rack->r_ctl.current_round; in rack_log_to_cancel()
3074 log.u_bbr.bw_inuse |= rack->r_ctl.rc_considered_lost; in rack_log_to_cancel()
3075 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_cancel()
3076 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_cancel()
3077 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_cancel()
3090 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_alt_to_to_cancel()
3108 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_alt_to_to_cancel()
3109 &rack->rc_inp->inp_socket->so_rcv, in rack_log_alt_to_to_cancel()
3110 &rack->rc_inp->inp_socket->so_snd, in rack_log_alt_to_to_cancel()
3119 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_processing()
3126 log.u_bbr.flex3 = rack->r_ctl.rc_timer_exp; in rack_log_to_processing()
3127 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_to_processing()
3129 if (rack->rack_no_prr) in rack_log_to_processing()
3132 log.u_bbr.flex6 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_processing()
3133 log.u_bbr.pkts_out = rack->r_ctl.rc_out_at_rto; in rack_log_to_processing()
3134 log.u_bbr.delivered = rack->r_ctl.rc_snd_max_at_rto; in rack_log_to_processing()
3135 log.u_bbr.pacing_gain = rack->r_must_retran; in rack_log_to_processing()
3137 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_processing()
3138 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_processing()
3139 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_processing()
3140 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_processing()
3149 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_to_prr()
3154 log.u_bbr.flex1 = rack->r_ctl.rc_prr_out; in rack_log_to_prr()
3155 log.u_bbr.flex2 = rack->r_ctl.rc_prr_recovery_fs; in rack_log_to_prr()
3156 if (rack->rack_no_prr) in rack_log_to_prr()
3159 log.u_bbr.flex3 = rack->r_ctl.rc_prr_sndcnt; in rack_log_to_prr()
3160 log.u_bbr.flex4 = rack->r_ctl.rc_prr_delivered; in rack_log_to_prr()
3161 log.u_bbr.flex5 = rack->r_ctl.rc_sacked; in rack_log_to_prr()
3162 log.u_bbr.flex6 = rack->r_ctl.rc_holes_rxt; in rack_log_to_prr()
3167 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_to_prr()
3168 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_to_prr()
3170 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_to_prr()
3171 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_to_prr()
3172 &rack->rc_inp->inp_socket->so_rcv, in rack_log_to_prr()
3173 &rack->rc_inp->inp_socket->so_snd, in rack_log_to_prr()
3239 if (rack->rc_free_cnt > rack_free_cache) { in rack_alloc()
3240 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3241 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3243 rack->rc_free_cnt--; in rack_alloc()
3253 rack->r_ctl.rc_num_maps_alloced++; in rack_alloc()
3261 if (rack->rc_free_cnt) { in rack_alloc()
3263 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_alloc()
3264 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_alloc()
3265 rack->rc_free_cnt--; in rack_alloc()
3275 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_alloc_full_limit()
3277 if (!rack->alloc_limit_reported) { in rack_alloc_full_limit()
3278 rack->alloc_limit_reported = 1; in rack_alloc_full_limit()
3294 if (rack->r_ctl.rc_split_limit > 0 && in rack_alloc_limit()
3295 rack->r_ctl.rc_num_split_allocs >= rack->r_ctl.rc_split_limit) { in rack_alloc_limit()
3297 if (!rack->alloc_limit_reported) { in rack_alloc_limit()
3298 rack->alloc_limit_reported = 1; in rack_alloc_limit()
3308 rsm->r_limit_type = limit_type; in rack_alloc_limit()
3309 rack->r_ctl.rc_num_split_allocs++; in rack_alloc_limit()
3323 while (rack->rc_free_cnt > rack_free_cache) { in rack_free_trim()
3324 rsm = TAILQ_LAST(&rack->r_ctl.rc_free, rack_head); in rack_free_trim()
3325 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free_trim()
3326 rack->rc_free_cnt--; in rack_free_trim()
3327 rack->r_ctl.rc_num_maps_alloced--; in rack_free_trim()
3335 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_free()
3336 KASSERT((rack->r_ctl.rc_app_limited_cnt > 0), in rack_free()
3337 ("app_cnt %u, rsm %p", rack->r_ctl.rc_app_limited_cnt, rsm)); in rack_free()
3338 rack->r_ctl.rc_app_limited_cnt--; in rack_free()
3340 if (rsm->r_limit_type) { in rack_free()
3342 rack->r_ctl.rc_num_split_allocs--; in rack_free()
3344 if (rsm == rack->r_ctl.rc_first_appl) { in rack_free()
3345 rack->r_ctl.cleared_app_ack_seq = rsm->r_end; in rack_free()
3346 rack->r_ctl.cleared_app_ack = 1; in rack_free()
3347 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_free()
3348 rack->r_ctl.rc_first_appl = NULL; in rack_free()
3350 rack->r_ctl.rc_first_appl = tqhash_find(rack->r_ctl.tqh, rsm->r_nseq_appl); in rack_free()
3352 if (rsm == rack->r_ctl.rc_resend) in rack_free()
3353 rack->r_ctl.rc_resend = NULL; in rack_free()
3354 if (rsm == rack->r_ctl.rc_end_appl) in rack_free()
3355 rack->r_ctl.rc_end_appl = NULL; in rack_free()
3356 if (rack->r_ctl.rc_tlpsend == rsm) in rack_free()
3357 rack->r_ctl.rc_tlpsend = NULL; in rack_free()
3358 if (rack->r_ctl.rc_sacklast == rsm) in rack_free()
3359 rack->r_ctl.rc_sacklast = NULL; in rack_free()
3362 if ((rack->rc_free_cnt + 1) > RACK_FREE_CNT_MAX) { in rack_free()
3365 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_free()
3366 rack->rc_free_cnt++; in rack_free()
3375 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_get_measure_window()
3377 if (rack->rc_gp_filled == 0) { in rack_get_measure_window()
3413 srtt = (uint64_t)tp->t_srtt; in rack_get_measure_window()
3455 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_enough_for_measurement()
3459 if ((tp->snd_max == tp->snd_una) || in rack_enough_for_measurement()
3460 (th_ack == tp->snd_max)){ in rack_enough_for_measurement()
3474 if (SEQ_GEQ(th_ack, tp->gput_ack)) { in rack_enough_for_measurement()
3484 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_enough_for_measurement()
3485 if (SEQ_LT(th_ack, tp->gput_ack) && in rack_enough_for_measurement()
3486 ((th_ack - tp->gput_seq) < max(rc_init_window(rack), (MIN_GP_WIN * segsiz)))) { in rack_enough_for_measurement()
3490 if (rack->r_ctl.rc_first_appl && in rack_enough_for_measurement()
3491 (SEQ_GEQ(th_ack, rack->r_ctl.rc_first_appl->r_end))) { in rack_enough_for_measurement()
3500 srtts = (rack->r_ctl.rc_gp_srtt * rack_min_srtts); in rack_enough_for_measurement()
3501 tim = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - tp->gput_ts; in rack_enough_for_measurement()
3502 if ((tim >= srtts) && (IN_RECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_enough_for_measurement()
3521 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_timely()
3527 log.u_bbr.flex2 = rack->rc_gp_timely_inc_cnt; in rack_log_timely()
3529 log.u_bbr.flex2 |= rack->rc_gp_timely_dec_cnt; in rack_log_timely()
3531 log.u_bbr.flex2 |= rack->rc_gp_incr; in rack_log_timely()
3533 log.u_bbr.flex2 |= rack->rc_gp_bwred; in rack_log_timely()
3534 log.u_bbr.flex3 = rack->rc_gp_incr; in rack_log_timely()
3535 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_timely()
3536 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_timely()
3537 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_rec; in rack_log_timely()
3538 log.u_bbr.flex7 = rack->rc_gp_bwred; in rack_log_timely()
3545 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_timely()
3547 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_timely()
3548 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_timely()
3549 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_timely()
3550 log.u_bbr.cwnd_gain = rack->rc_dragged_bottom; in rack_log_timely()
3552 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_rec; in rack_log_timely()
3554 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_timely()
3556 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_timely()
3557 log.u_bbr.lost = rack->r_ctl.rc_loss_count; in rack_log_timely()
3558 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_timely()
3559 &rack->rc_inp->inp_socket->so_rcv, in rack_log_timely()
3560 &rack->rc_inp->inp_socket->so_snd, in rack_log_timely()
3647 if (rack->r_ctl.rack_per_of_gp_rec < 100) { in rack_validate_multipliers_at_or_above100()
3649 rack->r_ctl.rack_per_of_gp_rec = 100; in rack_validate_multipliers_at_or_above100()
3651 if (rack->r_ctl.rack_per_of_gp_ca < 100) { in rack_validate_multipliers_at_or_above100()
3652 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_above100()
3654 if (rack->r_ctl.rack_per_of_gp_ss < 100) { in rack_validate_multipliers_at_or_above100()
3655 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_above100()
3662 if (rack->r_ctl.rack_per_of_gp_ca > 100) { in rack_validate_multipliers_at_or_below_100()
3663 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_validate_multipliers_at_or_below_100()
3665 if (rack->r_ctl.rack_per_of_gp_ss > 100) { in rack_validate_multipliers_at_or_below_100()
3666 rack->r_ctl.rack_per_of_gp_ss = 100; in rack_validate_multipliers_at_or_below_100()
3677 if (rack->rc_skip_timely) in rack_increase_bw_mul()
3684 * to a new-reno flow. in rack_increase_bw_mul()
3689 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3690 ((rack->rc_gp_timely_inc_cnt + 1) >= RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3697 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3702 ((rack->r_ctl.rc_rtt_diff <= 0) || (timely_says <= 0))) in rack_increase_bw_mul()
3704 if (rack->rc_gp_saw_rec && in rack_increase_bw_mul()
3705 (rack->rc_gp_no_rec_chg == 0) && in rack_increase_bw_mul()
3707 rack->r_ctl.rack_per_of_gp_rec)) { in rack_increase_bw_mul()
3709 calc = rack->r_ctl.rack_per_of_gp_rec + plus; in rack_increase_bw_mul()
3713 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)calc; in rack_increase_bw_mul()
3714 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3715 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3716 (rack->r_ctl.rack_per_of_gp_rec > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3717 rack->r_ctl.rack_per_of_gp_rec = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3719 if (rack->rc_gp_saw_ca && in rack_increase_bw_mul()
3720 (rack->rc_gp_saw_ss == 0) && in rack_increase_bw_mul()
3722 rack->r_ctl.rack_per_of_gp_ca)) { in rack_increase_bw_mul()
3724 calc = rack->r_ctl.rack_per_of_gp_ca + plus; in rack_increase_bw_mul()
3728 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)calc; in rack_increase_bw_mul()
3729 if (rack->r_ctl.rack_per_upper_bound_ca && in rack_increase_bw_mul()
3730 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3731 (rack->r_ctl.rack_per_of_gp_ca > rack->r_ctl.rack_per_upper_bound_ca)) in rack_increase_bw_mul()
3732 rack->r_ctl.rack_per_of_gp_ca = rack->r_ctl.rack_per_upper_bound_ca; in rack_increase_bw_mul()
3734 if (rack->rc_gp_saw_ss && in rack_increase_bw_mul()
3736 rack->r_ctl.rack_per_of_gp_ss)) { in rack_increase_bw_mul()
3738 calc = rack->r_ctl.rack_per_of_gp_ss + plus; in rack_increase_bw_mul()
3741 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)calc; in rack_increase_bw_mul()
3742 if (rack->r_ctl.rack_per_upper_bound_ss && in rack_increase_bw_mul()
3743 (rack->rc_dragged_bottom == 0) && in rack_increase_bw_mul()
3744 (rack->r_ctl.rack_per_of_gp_ss > rack->r_ctl.rack_per_upper_bound_ss)) in rack_increase_bw_mul()
3745 rack->r_ctl.rack_per_of_gp_ss = rack->r_ctl.rack_per_upper_bound_ss; in rack_increase_bw_mul()
3749 (rack->rc_gp_incr == 0)){ in rack_increase_bw_mul()
3751 rack->rc_gp_incr = 1; in rack_increase_bw_mul()
3752 rack->rc_gp_timely_inc_cnt = 0; in rack_increase_bw_mul()
3754 if (rack->rc_gp_incr && in rack_increase_bw_mul()
3756 (rack->rc_gp_timely_inc_cnt < RACK_TIMELY_CNT_BOOST)) { in rack_increase_bw_mul()
3757 rack->rc_gp_timely_inc_cnt++; in rack_increase_bw_mul()
3766 /*- in rack_get_decrease()
3768 * new_per = curper * (1 - B * norm_grad) in rack_get_decrease()
3771 * rtt_dif = input var current rtt-diff in rack_get_decrease()
3784 * (uint64_t)get_filter_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_get_decrease()
3787 * reduce_by = (1000000 - inverse); in rack_get_decrease()
3793 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_get_decrease()
3796 (uint64_t)get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)))/ in rack_get_decrease()
3801 perf = curper - 1; in rack_get_decrease()
3811 * result = curper * (1 - (B * ( 1 - ------ )) in rack_decrease_highrtt()
3820 highrttthresh = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_decrease_highrtt()
3822 perf = (((uint64_t)curper * ((uint64_t)1000000 - in rack_decrease_highrtt()
3823 ((uint64_t)rack_gp_decrease_per * ((uint64_t)1000000 - in rack_decrease_highrtt()
3826 if (tcp_bblogging_on(rack->rc_tp)) { in rack_decrease_highrtt()
3849 if (rack->rc_skip_timely) in rack_decrease_bw_mul()
3851 if (rack->rc_gp_incr) { in rack_decrease_bw_mul()
3853 rack->rc_gp_incr = 0; in rack_decrease_bw_mul()
3854 rack->rc_gp_timely_inc_cnt = 0; in rack_decrease_bw_mul()
3860 rtt_diff *= -1; in rack_decrease_bw_mul()
3863 if (rack->rc_gp_saw_rec && (rack->rc_gp_no_rec_chg == 0)) { in rack_decrease_bw_mul()
3866 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_rec, rtt); in rack_decrease_bw_mul()
3867 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
3873 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_rec, rtt_diff); in rack_decrease_bw_mul()
3874 if (rack->r_ctl.rack_per_of_gp_rec > val) { in rack_decrease_bw_mul()
3875 rec_red = (rack->r_ctl.rack_per_of_gp_rec - val); in rack_decrease_bw_mul()
3876 rack->r_ctl.rack_per_of_gp_rec = (uint16_t)val; in rack_decrease_bw_mul()
3878 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
3881 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_rec) in rack_decrease_bw_mul()
3882 rack->r_ctl.rack_per_of_gp_rec = rack_per_lower_bound; in rack_decrease_bw_mul()
3885 if (rack->rc_gp_saw_ss) { in rack_decrease_bw_mul()
3888 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ss, rtt); in rack_decrease_bw_mul()
3889 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
3895 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ss, rtt_diff); in rack_decrease_bw_mul()
3896 if (rack->r_ctl.rack_per_of_gp_ss > new_per) { in rack_decrease_bw_mul()
3897 ss_red = rack->r_ctl.rack_per_of_gp_ss - val; in rack_decrease_bw_mul()
3898 rack->r_ctl.rack_per_of_gp_ss = (uint16_t)val; in rack_decrease_bw_mul()
3901 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
3910 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
3915 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ss) in rack_decrease_bw_mul()
3916 rack->r_ctl.rack_per_of_gp_ss = rack_per_lower_bound; in rack_decrease_bw_mul()
3918 } else if (rack->rc_gp_saw_ca) { in rack_decrease_bw_mul()
3921 new_per = rack_decrease_highrtt(rack, rack->r_ctl.rack_per_of_gp_ca, rtt); in rack_decrease_bw_mul()
3922 alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
3928 val = new_per = alt = rack_get_decrease(rack, rack->r_ctl.rack_per_of_gp_ca, rtt_diff); in rack_decrease_bw_mul()
3929 if (rack->r_ctl.rack_per_of_gp_ca > val) { in rack_decrease_bw_mul()
3930 ca_red = rack->r_ctl.rack_per_of_gp_ca - val; in rack_decrease_bw_mul()
3931 rack->r_ctl.rack_per_of_gp_ca = (uint16_t)val; in rack_decrease_bw_mul()
3933 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
3943 logvar3 |= get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_decrease_bw_mul()
3948 if (rack_per_lower_bound > rack->r_ctl.rack_per_of_gp_ca) in rack_decrease_bw_mul()
3949 rack->r_ctl.rack_per_of_gp_ca = rack_per_lower_bound; in rack_decrease_bw_mul()
3952 if (rack->rc_gp_timely_dec_cnt < 0x7) { in rack_decrease_bw_mul()
3953 rack->rc_gp_timely_dec_cnt++; in rack_decrease_bw_mul()
3955 (rack->rc_gp_timely_dec_cnt == rack_timely_dec_clear)) in rack_decrease_bw_mul()
3956 rack->rc_gp_timely_dec_cnt = 0; in rack_decrease_bw_mul()
3969 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_rtt_shrinks()
3975 log.u_bbr.flex2 = rack->r_ctl.rc_time_probertt_starts; in rack_log_rtt_shrinks()
3976 log.u_bbr.flex3 = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
3977 log.u_bbr.flex4 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_rtt_shrinks()
3979 log.u_bbr.flex6 = rack->rc_highly_buffered; in rack_log_rtt_shrinks()
3981 log.u_bbr.flex6 |= rack->forced_ack; in rack_log_rtt_shrinks()
3983 log.u_bbr.flex6 |= rack->rc_gp_dyn_mul; in rack_log_rtt_shrinks()
3985 log.u_bbr.flex6 |= rack->in_probe_rtt; in rack_log_rtt_shrinks()
3987 log.u_bbr.flex6 |= rack->measure_saw_probe_rtt; in rack_log_rtt_shrinks()
3988 log.u_bbr.flex7 = rack->r_ctl.rack_per_of_gp_probertt; in rack_log_rtt_shrinks()
3989 log.u_bbr.pacing_gain = rack->r_ctl.rack_per_of_gp_ca; in rack_log_rtt_shrinks()
3990 log.u_bbr.cwnd_gain = rack->r_ctl.rack_per_of_gp_rec; in rack_log_rtt_shrinks()
3994 log.u_bbr.cur_del_rate = rack->r_ctl.rc_highest_us_rtt; in rack_log_rtt_shrinks()
3996 log.u_bbr.cur_del_rate |= rack->r_ctl.rc_lowest_us_rtt; in rack_log_rtt_shrinks()
3997 log.u_bbr.applimited = rack->r_ctl.rc_time_probertt_entered; in rack_log_rtt_shrinks()
3998 log.u_bbr.pkts_out = rack->r_ctl.rc_rtt_diff; in rack_log_rtt_shrinks()
3999 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_rtt_shrinks()
4000 log.u_bbr.epoch = rack->r_ctl.rc_gp_srtt; in rack_log_rtt_shrinks()
4001 log.u_bbr.lt_epoch = rack->r_ctl.rc_prev_gp_srtt; in rack_log_rtt_shrinks()
4002 log.u_bbr.pkt_epoch = rack->r_ctl.rc_lower_rtt_us_cts; in rack_log_rtt_shrinks()
4003 log.u_bbr.delivered = rack->r_ctl.rc_target_probertt_flight; in rack_log_rtt_shrinks()
4004 log.u_bbr.lost = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_log_rtt_shrinks()
4007 log.u_bbr.rttProp |= rack->r_ctl.rc_entry_gp_rtt; in rack_log_rtt_shrinks()
4008 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_rtt_shrinks()
4009 &rack->rc_inp->inp_socket->so_rcv, in rack_log_rtt_shrinks()
4010 &rack->rc_inp->inp_socket->so_snd, in rack_log_rtt_shrinks()
4012 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_rtt_shrinks()
4024 rack->r_ctl.rc_target_probertt_flight = roundup((uint32_t)bwdp, segsiz); in rack_set_prtt_target()
4025 if (rack->r_ctl.rc_target_probertt_flight < (segsiz * rack_timely_min_segs)) { in rack_set_prtt_target()
4031 rack->r_ctl.rc_target_probertt_flight = (segsiz * rack_timely_min_segs); in rack_set_prtt_target()
4052 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_enter_probertt()
4053 if (rack->rc_gp_dyn_mul == 0) in rack_enter_probertt()
4056 if (rack->rc_tp->snd_max == rack->rc_tp->snd_una) { in rack_enter_probertt()
4060 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_enter_probertt()
4061 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_enter_probertt()
4069 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_enter_probertt()
4070 rack->rc_tp->snd_una, __LINE__, in rack_enter_probertt()
4073 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_enter_probertt()
4074 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_enter_probertt()
4075 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_enter_probertt()
4076 rack->r_ctl.rc_pace_min_segs); in rack_enter_probertt()
4077 rack->in_probe_rtt = 1; in rack_enter_probertt()
4078 rack->measure_saw_probe_rtt = 1; in rack_enter_probertt()
4079 rack->r_ctl.rc_time_probertt_starts = 0; in rack_enter_probertt()
4080 rack->r_ctl.rc_entry_gp_rtt = rack->r_ctl.rc_gp_srtt; in rack_enter_probertt()
4082 rack_set_prtt_target(rack, segsiz, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_enter_probertt()
4084 rack_set_prtt_target(rack, segsiz, rack->r_ctl.rc_gp_srtt); in rack_enter_probertt()
4085 rack_log_rtt_shrinks(rack, us_cts, get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_enter_probertt()
4095 segsiz = min(ctf_fixed_maxseg(rack->rc_tp), in rack_exit_probertt()
4096 rack->r_ctl.rc_pace_min_segs); in rack_exit_probertt()
4097 rack->in_probe_rtt = 0; in rack_exit_probertt()
4098 if ((rack->rc_tp->t_flags & TF_GPUTINPROG) && in rack_exit_probertt()
4099 SEQ_GT(rack->rc_tp->snd_una, rack->rc_tp->gput_seq)) { in rack_exit_probertt()
4107 rack_do_goodput_measurement(rack->rc_tp, rack, in rack_exit_probertt()
4108 rack->rc_tp->snd_una, __LINE__, in rack_exit_probertt()
4110 } else if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in rack_exit_probertt()
4114 * probe-rtt. We probably are not interested in in rack_exit_probertt()
4117 rack->rc_tp->t_flags &= ~TF_GPUTINPROG; in rack_exit_probertt()
4123 * We need to mark these as app-limited so we in rack_exit_probertt()
4126 rsm = tqhash_max(rack->r_ctl.tqh); in rack_exit_probertt()
4127 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_exit_probertt()
4128 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_exit_probertt()
4129 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_exit_probertt()
4136 if (rack->r_ctl.rc_end_appl) in rack_exit_probertt()
4137 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_exit_probertt()
4138 rack->r_ctl.rc_end_appl = rsm; in rack_exit_probertt()
4140 rsm->r_flags |= RACK_APP_LIMITED; in rack_exit_probertt()
4141 rack->r_ctl.rc_app_limited_cnt++; in rack_exit_probertt()
4153 rack->rc_gp_incr = 0; in rack_exit_probertt()
4154 rack->rc_gp_bwred = 0; in rack_exit_probertt()
4155 rack->rc_gp_timely_inc_cnt = 0; in rack_exit_probertt()
4156 rack->rc_gp_timely_dec_cnt = 0; in rack_exit_probertt()
4159 if (rack->rc_highly_buffered && rack_atexit_prtt_hbp) { in rack_exit_probertt()
4160 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt_hbp; in rack_exit_probertt()
4161 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt_hbp; in rack_exit_probertt()
4163 if ((rack->rc_highly_buffered == 0) && rack_atexit_prtt) { in rack_exit_probertt()
4164 rack->r_ctl.rack_per_of_gp_ca = rack_atexit_prtt; in rack_exit_probertt()
4165 rack->r_ctl.rack_per_of_gp_ss = rack_atexit_prtt; in rack_exit_probertt()
4171 rack->r_ctl.rc_rtt_diff = 0; in rack_exit_probertt()
4174 rack->rc_tp->t_bytes_acked = 0; in rack_exit_probertt()
4175 rack->rc_tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_exit_probertt()
4188 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt)); in rack_exit_probertt()
4192 rack->r_ctl.rc_gp_srtt); in rack_exit_probertt()
4196 rack->r_ctl.rc_entry_gp_rtt); in rack_exit_probertt()
4201 sum = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4203 sum /= (uint64_t)(max(1, rack->r_ctl.rc_gp_srtt)); in rack_exit_probertt()
4211 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4218 setval = rack->r_ctl.rc_gp_srtt; in rack_exit_probertt()
4219 if (setval > rack->r_ctl.rc_entry_gp_rtt) in rack_exit_probertt()
4220 setval = rack->r_ctl.rc_entry_gp_rtt; in rack_exit_probertt()
4227 setval = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_exit_probertt()
4234 ebdp = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4237 setto = rack->r_ctl.rc_target_probertt_flight + ebdp; in rack_exit_probertt()
4239 setto = rack->r_ctl.rc_target_probertt_flight; in rack_exit_probertt()
4240 rack->rc_tp->snd_cwnd = roundup(setto, segsiz); in rack_exit_probertt()
4241 if (rack->rc_tp->snd_cwnd < (segsiz * rack_timely_min_segs)) { in rack_exit_probertt()
4243 rack->rc_tp->snd_cwnd = segsiz * rack_timely_min_segs; in rack_exit_probertt()
4246 rack->rc_tp->snd_ssthresh = (rack->rc_tp->snd_cwnd - 1); in rack_exit_probertt()
4249 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_exit_probertt()
4252 rack->r_ctl.rc_probertt_sndmax_atexit = rack->rc_tp->snd_max; in rack_exit_probertt()
4253 rack->r_ctl.rc_time_probertt_entered = us_cts; in rack_exit_probertt()
4254 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_probertt()
4255 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_exit_probertt()
4261 /* Check in on probe-rtt */ in rack_check_probe_rtt()
4263 if (rack->rc_gp_filled == 0) { in rack_check_probe_rtt()
4264 /* We do not do p-rtt unless we have gp measurements */ in rack_check_probe_rtt()
4267 if (rack->in_probe_rtt) { in rack_check_probe_rtt()
4271 if (rack->r_ctl.rc_went_idle_time && in rack_check_probe_rtt()
4272 ((us_cts - rack->r_ctl.rc_went_idle_time) > rack_min_probertt_hold)) { in rack_check_probe_rtt()
4278 TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered) && in rack_check_probe_rtt()
4279 ((us_cts - rack->r_ctl.rc_time_probertt_entered) > rack_probe_rtt_safety_val)) { in rack_check_probe_rtt()
4284 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4289 endtime = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_max_drain_wait); in rack_check_probe_rtt()
4290 if (rack->rc_highly_buffered) in rack_check_probe_rtt()
4291 endtime += (rack->r_ctl.rc_gp_srtt * rack_max_drain_hbp); in rack_check_probe_rtt()
4293 must_stay = rack->r_ctl.rc_time_probertt_entered + (rack->r_ctl.rc_gp_srtt * rack_must_drain); in rack_check_probe_rtt()
4294 …if ((ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.rc_target_probertt_flight) … in rack_check_probe_rtt()
4299 if (TSTMP_GT(us_cts, rack->r_ctl.rc_time_probertt_entered)) in rack_check_probe_rtt()
4300 calc = us_cts - rack->r_ctl.rc_time_probertt_entered; in rack_check_probe_rtt()
4303 calc /= max(rack->r_ctl.rc_gp_srtt, 1); in rack_check_probe_rtt()
4308 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4310 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt - calc; in rack_check_probe_rtt()
4312 if (rack->r_ctl.rack_per_of_gp_probertt < rack_per_of_gp_lowthresh) in rack_check_probe_rtt()
4313 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_lowthresh; in rack_check_probe_rtt()
4318 if (rack->r_ctl.rc_time_probertt_starts == 0) { in rack_check_probe_rtt()
4320 rack->rc_highly_buffered) || in rack_check_probe_rtt()
4321 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > in rack_check_probe_rtt()
4322 rack->r_ctl.rc_target_probertt_flight)) { in rack_check_probe_rtt()
4327 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_check_probe_rtt()
4329 rack->r_ctl.rc_time_probertt_starts = us_cts; in rack_check_probe_rtt()
4330 if (rack->r_ctl.rc_time_probertt_starts == 0) in rack_check_probe_rtt()
4331 rack->r_ctl.rc_time_probertt_starts = 1; in rack_check_probe_rtt()
4333 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_check_probe_rtt()
4338 no_overflow = ((uint64_t)rack->r_ctl.rc_gp_srtt * in rack_check_probe_rtt()
4345 endtime += rack->r_ctl.rc_time_probertt_starts; in rack_check_probe_rtt()
4351 } else if ((rack->rc_skip_timely == 0) && in rack_check_probe_rtt()
4352 (TSTMP_GT(us_cts, rack->r_ctl.rc_lower_rtt_us_cts)) && in rack_check_probe_rtt()
4353 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= rack_time_between_probertt)) { in rack_check_probe_rtt()
4366 if ((rack->rc_gp_dyn_mul == 0) || in rack_update_multiplier()
4367 (rack->use_fixed_rate) || in rack_update_multiplier()
4368 (rack->in_probe_rtt) || in rack_update_multiplier()
4369 (rack->rc_always_pace == 0)) { in rack_update_multiplier()
4373 losses = rack->r_ctl.rc_loss_count - rack->r_ctl.rc_loss_at_start; in rack_update_multiplier()
4376 up_bnd = rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_up; in rack_update_multiplier()
4378 up_bnd += rack->r_ctl.last_gp_comp_bw; in rack_update_multiplier()
4380 subfr = (uint64_t)rack->r_ctl.last_gp_comp_bw * (uint64_t)rack_gp_per_bw_mul_down; in rack_update_multiplier()
4382 low_bnd = rack->r_ctl.last_gp_comp_bw - subfr; in rack_update_multiplier()
4383 if ((timely_says == 2) && (rack->r_ctl.rc_no_push_at_mrtt)) { in rack_update_multiplier()
4396 if (rack->r_ctl.rc_no_push_at_mrtt > 1) in rack_update_multiplier()
4415 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4416 if (rack->rc_gp_bwred == 0) { in rack_update_multiplier()
4418 rack->rc_gp_bwred = 1; in rack_update_multiplier()
4419 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4421 if (rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) { in rack_update_multiplier()
4427 if ((rack->rc_gp_saw_ca && rack->r_ctl.rack_per_of_gp_ca <= rack_down_raise_thresh) || in rack_update_multiplier()
4428 (rack->rc_gp_saw_ss && rack->r_ctl.rack_per_of_gp_ss <= rack_down_raise_thresh) || in rack_update_multiplier()
4442 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4443 /* We are not incrementing really no-count */ in rack_update_multiplier()
4444 rack->rc_gp_incr = 0; in rack_update_multiplier()
4445 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4465 rack->r_ctl.last_gp_comp_bw = cur_bw; in rack_update_multiplier()
4466 if (rack->rc_gp_saw_ss && in rack_update_multiplier()
4467 rack->r_ctl.rack_per_upper_bound_ss && in rack_update_multiplier()
4468 (rack->r_ctl.rack_per_of_gp_ss == rack->r_ctl.rack_per_upper_bound_ss)) { in rack_update_multiplier()
4475 if (rack->rc_gp_saw_ca && in rack_update_multiplier()
4476 rack->r_ctl.rack_per_upper_bound_ca && in rack_update_multiplier()
4477 (rack->r_ctl.rack_per_of_gp_ca == rack->r_ctl.rack_per_upper_bound_ca)) { in rack_update_multiplier()
4484 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4485 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4487 if ((rack->rc_gp_incr < rack_timely_max_push_rise) || (timely_says == 0)) { in rack_update_multiplier()
4504 rack->rc_gp_incr = 0; in rack_update_multiplier()
4505 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4506 if ((rack->rc_gp_timely_dec_cnt < rack_timely_max_push_drop) && in rack_update_multiplier()
4511 rack->rc_gp_timely_dec_cnt++; in rack_update_multiplier()
4512 /* We are not incrementing really no-count */ in rack_update_multiplier()
4513 rack->rc_gp_incr = 0; in rack_update_multiplier()
4514 rack->rc_gp_timely_inc_cnt = 0; in rack_update_multiplier()
4518 rack->rc_gp_bwred = 0; in rack_update_multiplier()
4519 rack->rc_gp_timely_dec_cnt = 0; in rack_update_multiplier()
4534 if (rtt >= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * in rack_make_timely_judgement()
4538 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_maxmul; in rack_make_timely_judgement()
4542 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4544 } else if (rtt <= (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4545 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4548 log_mult = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) + in rack_make_timely_judgement()
4549 ((get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack_gp_rtt_minmul) / in rack_make_timely_judgement()
4555 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), in rack_make_timely_judgement()
4578 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 6); in rack_make_timely_judgement()
4583 get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt), log_rtt_a_diff, __LINE__, 7); in rack_make_timely_judgement()
4592 if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4593 SEQ_LEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4598 * |----------------| in rack_in_gp_window()
4599 * |-----| <or> in rack_in_gp_window()
4600 * |----| in rack_in_gp_window()
4601 * <or> |---| in rack_in_gp_window()
4604 } else if (SEQ_LT(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4605 SEQ_GT(rsm->r_end, tp->gput_seq)){ in rack_in_gp_window()
4608 * |--------------| in rack_in_gp_window()
4609 * |-------->| in rack_in_gp_window()
4612 } else if (SEQ_GEQ(rsm->r_start, tp->gput_seq) && in rack_in_gp_window()
4613 SEQ_LT(rsm->r_start, tp->gput_ack) && in rack_in_gp_window()
4614 SEQ_GEQ(rsm->r_end, tp->gput_ack)) { in rack_in_gp_window()
4618 * |--------------| in rack_in_gp_window()
4619 * |-------->| in rack_in_gp_window()
4630 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_mark_in_gp_win()
4638 rsm->r_flags |= RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4640 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_mark_in_gp_win()
4649 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_clear_gp_marks()
4651 rsm = tqhash_min(rack->r_ctl.tqh); in rack_clear_gp_marks()
4654 while ((rsm != NULL) && (SEQ_GEQ(tp->gput_ack, rsm->r_start))){ in rack_clear_gp_marks()
4655 rsm->r_flags &= ~RACK_IN_GP_WIN; in rack_clear_gp_marks()
4656 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_clear_gp_marks()
4666 if (tp->snd_una == tp->snd_max) { in rack_tend_gp_marks()
4670 if (SEQ_GT(tp->gput_seq, tp->snd_una)) { in rack_tend_gp_marks()
4677 rsm = tqhash_min(rack->r_ctl.tqh); in rack_tend_gp_marks()
4680 if (SEQ_GEQ(rsm->r_end, tp->gput_seq)) in rack_tend_gp_marks()
4682 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4690 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_tend_gp_marks()
4698 * *before* we started our measurment. The rsm, if non-null in rack_tend_gp_marks()
4703 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4706 if (SEQ_GT(rsm->r_end, tp->gput_ack)) in rack_tend_gp_marks()
4708 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_tend_gp_marks()
4715 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_gp_calc()
4727 log.u_bbr.delRate = rack->r_ctl.gp_bw; in rack_log_gp_calc()
4730 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_gp_calc()
4731 &rack->rc_inp->inp_socket->so_rcv, in rack_log_gp_calc()
4732 &rack->rc_inp->inp_socket->so_snd, in rack_log_gp_calc()
4734 0, &log, false, &rack->r_ctl.act_rcv_time); in rack_log_gp_calc()
4748 us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
4749 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_goodput_measurement()
4750 if (TSTMP_GEQ(us_cts, tp->gput_ts)) in rack_do_goodput_measurement()
4751 tim = us_cts - tp->gput_ts; in rack_do_goodput_measurement()
4754 if (rack->r_ctl.rc_gp_cumack_ts > rack->r_ctl.rc_gp_output_ts) in rack_do_goodput_measurement()
4755 stim = rack->r_ctl.rc_gp_cumack_ts - rack->r_ctl.rc_gp_output_ts; in rack_do_goodput_measurement()
4770 rack_log_gpset(rack, th_ack, us_cts, rack->r_ctl.rc_gp_cumack_ts, __LINE__, 3, NULL); in rack_do_goodput_measurement()
4782 if (rack->r_ctl.rc_gp_lowrtt == 0xffffffff) { in rack_do_goodput_measurement()
4809 * the connection could possibly do. This is gated on in rack_do_goodput_measurement()
4816 rack->r_ctl.last_max_bw = rack->r_ctl.rc_gp_high_rwnd; in rack_do_goodput_measurement()
4817 rack->r_ctl.last_max_bw *= HPTS_USEC_IN_SEC; in rack_do_goodput_measurement()
4818 rack->r_ctl.last_max_bw /= rack->r_ctl.rc_gp_lowrtt; in rack_do_goodput_measurement()
4819 if (SEQ_LT(th_ack, tp->gput_seq)) { in rack_do_goodput_measurement()
4827 bytes = (th_ack - tp->gput_seq); in rack_do_goodput_measurement()
4838 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4847 * IW - 2MSS. in rack_do_goodput_measurement()
4849 reqbytes -= (2 * segsiz); in rack_do_goodput_measurement()
4851 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
4853 if ((bytes_ps < reqbytes) || rack->app_limited_needs_set) { in rack_do_goodput_measurement()
4855 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
4863 new_rtt_diff = (int32_t)rack->r_ctl.rc_gp_srtt - (int32_t)rack->r_ctl.rc_prev_gp_srtt; in rack_do_goodput_measurement()
4864 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4866 rack->r_ctl.rc_rtt_diff = new_rtt_diff; in rack_do_goodput_measurement()
4868 if (rack->measure_saw_probe_rtt == 0) { in rack_do_goodput_measurement()
4875 rack->r_ctl.rc_rtt_diff -= (rack->r_ctl.rc_rtt_diff / 8); in rack_do_goodput_measurement()
4876 rack->r_ctl.rc_rtt_diff += (new_rtt_diff / 8); in rack_do_goodput_measurement()
4880 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
4881 rack->r_ctl.rc_rtt_diff, in rack_do_goodput_measurement()
4882 rack->r_ctl.rc_prev_gp_srtt in rack_do_goodput_measurement()
4886 if (bytes_ps > rack->r_ctl.last_max_bw) { in rack_do_goodput_measurement()
4897 bytes_ps, rack->r_ctl.last_max_bw, 0, in rack_do_goodput_measurement()
4899 bytes_ps = rack->r_ctl.last_max_bw; in rack_do_goodput_measurement()
4902 if (rack->rc_gp_filled == 0) { in rack_do_goodput_measurement()
4905 rack->r_ctl.gp_bw = bytes_ps; in rack_do_goodput_measurement()
4906 rack->rc_gp_filled = 1; in rack_do_goodput_measurement()
4907 rack->r_ctl.num_measurements = 1; in rack_do_goodput_measurement()
4908 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_do_goodput_measurement()
4911 rack->r_ctl.rc_app_limited_cnt, in rack_do_goodput_measurement()
4914 if (tcp_in_hpts(rack->rc_tp) && in rack_do_goodput_measurement()
4915 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_goodput_measurement()
4918 * where we transition from un-paced to paced. in rack_do_goodput_measurement()
4924 tcp_hpts_remove(rack->rc_tp); in rack_do_goodput_measurement()
4925 rack->r_ctl.rc_hpts_flags = 0; in rack_do_goodput_measurement()
4926 rack->r_ctl.rc_last_output_to = 0; in rack_do_goodput_measurement()
4929 } else if (rack->r_ctl.num_measurements < RACK_REQ_AVG) { in rack_do_goodput_measurement()
4931 rack->r_ctl.gp_bw += bytes_ps; in rack_do_goodput_measurement()
4932 addpart = rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
4933 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
4934 if (rack->r_ctl.num_measurements >= RACK_REQ_AVG) { in rack_do_goodput_measurement()
4936 rack->r_ctl.gp_bw /= (uint64_t)rack->r_ctl.num_measurements; in rack_do_goodput_measurement()
4955 if (rack->r_ctl.num_measurements < 0xff) { in rack_do_goodput_measurement()
4956 rack->r_ctl.num_measurements++; in rack_do_goodput_measurement()
4958 srtt = (uint64_t)tp->t_srtt; in rack_do_goodput_measurement()
4963 if (rack->r_ctl.rc_rack_min_rtt) in rack_do_goodput_measurement()
4964 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_do_goodput_measurement()
4979 * and non-dynamic... but considering lots of folks in rack_do_goodput_measurement()
4984 if (rack->rc_gp_dyn_mul == 0) { in rack_do_goodput_measurement()
4985 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
4987 if (subpart < (rack->r_ctl.gp_bw / 2)) { in rack_do_goodput_measurement()
5006 subpart = rack->r_ctl.gp_bw / 2; in rack_do_goodput_measurement()
5011 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5012 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5024 subpart = rack->r_ctl.gp_bw * utim; in rack_do_goodput_measurement()
5035 subpart = rack->r_ctl.gp_bw / rack_wma_divisor; in rack_do_goodput_measurement()
5039 if ((rack->measure_saw_probe_rtt == 0) || in rack_do_goodput_measurement()
5040 (bytes_ps > rack->r_ctl.gp_bw)) { in rack_do_goodput_measurement()
5042 * For probe-rtt we only add it in in rack_do_goodput_measurement()
5048 resid_bw = rack->r_ctl.gp_bw - subpart; in rack_do_goodput_measurement()
5049 rack->r_ctl.gp_bw = resid_bw + addpart; in rack_do_goodput_measurement()
5056 * or first-slowstart that ensues. If we ever needed to watch in rack_do_goodput_measurement()
5060 if ((rack->rc_initial_ss_comp == 0) && in rack_do_goodput_measurement()
5061 (rack->r_ctl.num_measurements >= RACK_REQ_AVG)) { in rack_do_goodput_measurement()
5065 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5071 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5072 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_do_goodput_measurement()
5074 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5079 if ((rack->r_ctl.num_measurements == RACK_REQ_AVG) || in rack_do_goodput_measurement()
5080 (rack->r_ctl.last_gpest == 0)) { in rack_do_goodput_measurement()
5087 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5088 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5089 } else if (gp_est >= rack->r_ctl.last_gpest) { in rack_do_goodput_measurement()
5096 gp_est /= rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5097 if ((uint32_t)gp_est > rack->r_ctl.gp_gain_req) { in rack_do_goodput_measurement()
5101 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_goodput_measurement()
5107 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5109 log.u_bbr.flex3 = rack->r_ctl.gp_gain_req; in rack_do_goodput_measurement()
5111 log.u_bbr.cur_del_rate = rack->r_ctl.last_gpest; in rack_do_goodput_measurement()
5116 rack->r_ctl.last_rnd_of_gp_rise = rack->r_ctl.current_round; in rack_do_goodput_measurement()
5117 if (rack->r_ctl.use_gp_not_last == 1) in rack_do_goodput_measurement()
5118 rack->r_ctl.last_gpest = rack->r_ctl.gp_bw; in rack_do_goodput_measurement()
5120 rack->r_ctl.last_gpest = bytes_ps; in rack_do_goodput_measurement()
5124 if ((rack->gp_ready == 0) && in rack_do_goodput_measurement()
5125 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_do_goodput_measurement()
5127 rack->gp_ready = 1; in rack_do_goodput_measurement()
5128 if (rack->dgp_on || in rack_do_goodput_measurement()
5129 rack->rack_hibeta) in rack_do_goodput_measurement()
5131 if (rack->defer_options) in rack_do_goodput_measurement()
5136 /* We do not update any multipliers if we are in or have seen a probe-rtt */ in rack_do_goodput_measurement()
5138 if ((rack->measure_saw_probe_rtt == 0) && in rack_do_goodput_measurement()
5139 rack->rc_gp_rtt_set) { in rack_do_goodput_measurement()
5140 if (rack->rc_skip_timely == 0) { in rack_do_goodput_measurement()
5142 rack->r_ctl.rc_gp_srtt, in rack_do_goodput_measurement()
5143 rack->r_ctl.rc_rtt_diff); in rack_do_goodput_measurement()
5152 rack->r_ctl.gp_bw, /* delRate */ in rack_do_goodput_measurement()
5156 rack->r_ctl.rc_prev_gp_srtt = rack->r_ctl.rc_gp_srtt; in rack_do_goodput_measurement()
5158 rack->r_ctl.rc_loss_at_start = rack->r_ctl.rc_loss_count; in rack_do_goodput_measurement()
5164 rack->rc_gp_rtt_set = 0; in rack_do_goodput_measurement()
5165 rack->rc_gp_saw_rec = 0; in rack_do_goodput_measurement()
5166 rack->rc_gp_saw_ca = 0; in rack_do_goodput_measurement()
5167 rack->rc_gp_saw_ss = 0; in rack_do_goodput_measurement()
5168 rack->rc_dragged_bottom = 0; in rack_do_goodput_measurement()
5176 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_GPUT, in rack_do_goodput_measurement()
5183 if (tp->t_stats_gput_prev > 0) in rack_do_goodput_measurement()
5184 stats_voi_update_abs_s32(tp->t_stats, in rack_do_goodput_measurement()
5186 ((gput - tp->t_stats_gput_prev) * 100) / in rack_do_goodput_measurement()
5187 tp->t_stats_gput_prev); in rack_do_goodput_measurement()
5189 tp->t_stats_gput_prev = gput; in rack_do_goodput_measurement()
5191 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5196 * We don't do the other case i.e. non-applimited here since in rack_do_goodput_measurement()
5199 if (rack->r_ctl.rc_first_appl && in rack_do_goodput_measurement()
5200 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_goodput_measurement()
5201 rack->r_ctl.rc_app_limited_cnt && in rack_do_goodput_measurement()
5202 (SEQ_GT(rack->r_ctl.rc_first_appl->r_start, th_ack)) && in rack_do_goodput_measurement()
5203 ((rack->r_ctl.rc_first_appl->r_end - th_ack) > in rack_do_goodput_measurement()
5210 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_do_goodput_measurement()
5211 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_do_goodput_measurement()
5212 tp->gput_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_do_goodput_measurement()
5213 rack->app_limited_needs_set = 0; in rack_do_goodput_measurement()
5214 tp->gput_seq = th_ack; in rack_do_goodput_measurement()
5215 if (rack->in_probe_rtt) in rack_do_goodput_measurement()
5216 rack->measure_saw_probe_rtt = 1; in rack_do_goodput_measurement()
5217 else if ((rack->measure_saw_probe_rtt) && in rack_do_goodput_measurement()
5218 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_do_goodput_measurement()
5219 rack->measure_saw_probe_rtt = 0; in rack_do_goodput_measurement()
5220 if ((rack->r_ctl.rc_first_appl->r_end - th_ack) >= rack_get_measure_window(tp, rack)) { in rack_do_goodput_measurement()
5222 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_do_goodput_measurement()
5225 tp->gput_ack = tp->gput_seq + (rack->r_ctl.rc_first_appl->r_end - th_ack); in rack_do_goodput_measurement()
5226 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_do_goodput_measurement()
5230 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_goodput_measurement()
5231 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_do_goodput_measurement()
5236 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_do_goodput_measurement()
5242 if (sbavail(&tptosocket(tp)->so_snd) < (tp->gput_ack - tp->gput_seq)) { in rack_do_goodput_measurement()
5247 tp->t_flags |= TF_GPUTINPROG; in rack_do_goodput_measurement()
5249 * Now we need to find the timestamp of the send at tp->gput_seq in rack_do_goodput_measurement()
5252 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_do_goodput_measurement()
5253 rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_do_goodput_measurement()
5255 /* Ok send-based limit is set */ in rack_do_goodput_measurement()
5256 if (SEQ_LT(rsm->r_start, tp->gput_seq)) { in rack_do_goodput_measurement()
5263 tp->gput_seq = rsm->r_start; in rack_do_goodput_measurement()
5265 if (rsm->r_flags & RACK_ACKED) { in rack_do_goodput_measurement()
5268 tp->gput_ts = (uint32_t)rsm->r_ack_arrival; in rack_do_goodput_measurement()
5269 tp->gput_seq = rsm->r_end; in rack_do_goodput_measurement()
5270 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_do_goodput_measurement()
5274 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5277 rack->app_limited_needs_set = 1; in rack_do_goodput_measurement()
5279 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_do_goodput_measurement()
5283 * send-limit set the current time, which in rack_do_goodput_measurement()
5284 * basically disables the send-limit. in rack_do_goodput_measurement()
5289 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_do_goodput_measurement()
5293 tp->gput_seq, in rack_do_goodput_measurement()
5294 tp->gput_ack, in rack_do_goodput_measurement()
5296 tp->gput_ts, in rack_do_goodput_measurement()
5297 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_do_goodput_measurement()
5300 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_do_goodput_measurement()
5322 tp->t_ccv.nsegs = nsegs; in rack_ack_received()
5323 acked = tp->t_ccv.bytes_this_ack = (th_ack - tp->snd_una); in rack_ack_received()
5324 if ((post_recovery) && (rack->r_ctl.rc_early_recovery_segs)) { in rack_ack_received()
5327 max = rack->r_ctl.rc_early_recovery_segs * ctf_fixed_maxseg(tp); in rack_ack_received()
5328 if (tp->t_ccv.bytes_this_ack > max) { in rack_ack_received()
5329 tp->t_ccv.bytes_this_ack = max; in rack_ack_received()
5333 stats_voi_update_abs_s32(tp->t_stats, VOI_TCP_CALCFRWINDIFF, in rack_ack_received()
5334 ((int32_t)rack->r_ctl.cwnd_to_use) - tp->snd_wnd); in rack_ack_received()
5336 if ((th_ack == tp->snd_max) && rack->lt_bw_up) { in rack_ack_received()
5345 rack->r_ctl.lt_bw_bytes += (tp->snd_max - rack->r_ctl.lt_seq); in rack_ack_received()
5346 rack->r_ctl.lt_seq = tp->snd_max; in rack_ack_received()
5347 tmark = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time); in rack_ack_received()
5348 if (tmark >= rack->r_ctl.lt_timemark) { in rack_ack_received()
5349 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_ack_received()
5351 rack->r_ctl.lt_timemark = tmark; in rack_ack_received()
5352 rack->lt_bw_up = 0; in rack_ack_received()
5355 if ((tp->t_flags & TF_GPUTINPROG) && in rack_ack_received()
5361 if (tp->snd_cwnd <= tp->snd_wnd) in rack_ack_received()
5362 tp->t_ccv.flags |= CCF_CWND_LIMITED; in rack_ack_received()
5364 tp->t_ccv.flags &= ~CCF_CWND_LIMITED; in rack_ack_received()
5365 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_ack_received()
5366 tp->t_bytes_acked += min(tp->t_ccv.bytes_this_ack, in rack_ack_received()
5369 if (tp->t_bytes_acked >= rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5370 tp->t_bytes_acked -= rack->r_ctl.cwnd_to_use; in rack_ack_received()
5371 tp->t_ccv.flags |= CCF_ABC_SENTAWND; in rack_ack_received()
5374 tp->t_ccv.flags &= ~CCF_ABC_SENTAWND; in rack_ack_received()
5375 tp->t_bytes_acked = 0; in rack_ack_received()
5377 prior_cwnd = tp->snd_cwnd; in rack_ack_received()
5378 if ((post_recovery == 0) || (rack_max_abc_post_recovery == 0) || rack->r_use_labc_for_rec || in rack_ack_received()
5379 (rack_client_low_buf && rack->client_bufferlvl && in rack_ack_received()
5380 (rack->client_bufferlvl < rack_client_low_buf))) in rack_ack_received()
5381 labc_to_use = rack->rc_labc; in rack_ack_received()
5384 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_ack_received()
5391 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_ack_received()
5392 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_ack_received()
5393 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_ack_received()
5401 if (CC_ALGO(tp)->ack_received != NULL) { in rack_ack_received()
5403 tp->t_ccv.curack = th_ack; in rack_ack_received()
5404 tp->t_ccv.labc = labc_to_use; in rack_ack_received()
5405 tp->t_ccv.flags |= CCF_USE_LOCAL_ABC; in rack_ack_received()
5406 CC_ALGO(tp)->ack_received(&tp->t_ccv, type); in rack_ack_received()
5409 lgb->tlb_stackinfo.u_bbr.flex6 = tp->snd_cwnd; in rack_ack_received()
5411 if (rack->r_must_retran) { in rack_ack_received()
5412 if (SEQ_GEQ(th_ack, rack->r_ctl.rc_snd_max_at_rto)) { in rack_ack_received()
5417 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5418 rack->r_must_retran = 0; in rack_ack_received()
5419 } else if ((prior_cwnd + ctf_fixed_maxseg(tp)) <= tp->snd_cwnd) { in rack_ack_received()
5426 if (acked <= rack->r_ctl.rc_out_at_rto){ in rack_ack_received()
5427 rack->r_ctl.rc_out_at_rto -= acked; in rack_ack_received()
5429 rack->r_ctl.rc_out_at_rto = 0; in rack_ack_received()
5434 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_LCWIN, rack->r_ctl.cwnd_to_use); in rack_ack_received()
5436 if (rack->r_ctl.rc_rack_largest_cwnd < rack->r_ctl.cwnd_to_use) { in rack_ack_received()
5437 rack->r_ctl.rc_rack_largest_cwnd = rack->r_ctl.cwnd_to_use; in rack_ack_received()
5439 if ((rack->rc_initial_ss_comp == 0) && in rack_ack_received()
5440 (tp->snd_cwnd >= tp->snd_ssthresh)) { in rack_ack_received()
5445 rack->rc_initial_ss_comp = 1; in rack_ack_received()
5454 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_partialack()
5463 if ((rack->r_ctl.rc_prr_sndcnt > 0) || in tcp_rack_partialack()
5464 rack->rack_no_prr) in tcp_rack_partialack()
5465 rack->r_wanted_output = 1; in tcp_rack_partialack()
5474 EXIT_RECOVERY(tp->t_flags); in rack_exit_recovery()
5483 orig_cwnd = tp->snd_cwnd; in rack_post_recovery()
5485 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_post_recovery()
5487 if (CC_ALGO(tp)->post_recovery != NULL) { in rack_post_recovery()
5488 tp->t_ccv.curack = th_ack; in rack_post_recovery()
5489 CC_ALGO(tp)->post_recovery(&tp->t_ccv); in rack_post_recovery()
5490 if (tp->snd_cwnd < tp->snd_ssthresh) { in rack_post_recovery()
5494 * snd_ssthresh per RFC-6582 (option 2). in rack_post_recovery()
5496 tp->snd_cwnd = tp->snd_ssthresh; in rack_post_recovery()
5499 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_post_recovery()
5506 log.u_bbr.flex2 = tp->t_ccv.flags; in rack_post_recovery()
5507 log.u_bbr.flex3 = tp->t_ccv.bytes_this_ack; in rack_post_recovery()
5508 log.u_bbr.flex4 = tp->t_ccv.nsegs; in rack_post_recovery()
5512 log.u_bbr.pkts_out = rack->r_ctl.rc_prr_sndcnt; in rack_post_recovery()
5517 if ((rack->rack_no_prr == 0) && in rack_post_recovery()
5518 (rack->no_prr_addback == 0) && in rack_post_recovery()
5519 (rack->r_ctl.rc_prr_sndcnt > 0)) { in rack_post_recovery()
5524 if (ctf_outstanding(tp) <= sbavail(&tptosocket(tp)->so_snd)) { in rack_post_recovery()
5534 tp->snd_cwnd += min((ctf_fixed_maxseg(tp) * rack_prr_addbackmax), in rack_post_recovery()
5535 rack->r_ctl.rc_prr_sndcnt); in rack_post_recovery()
5537 rack->r_ctl.rc_prr_sndcnt = 0; in rack_post_recovery()
5541 tp->snd_recover = tp->snd_una; in rack_post_recovery()
5542 if (rack->r_ctl.dsack_persist) { in rack_post_recovery()
5543 rack->r_ctl.dsack_persist--; in rack_post_recovery()
5544 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_post_recovery()
5545 rack->r_ctl.num_dsack = 0; in rack_post_recovery()
5549 if (rack->rto_from_rec == 1) { in rack_post_recovery()
5550 rack->rto_from_rec = 0; in rack_post_recovery()
5551 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_post_recovery()
5552 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_post_recovery()
5565 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_CSIG, type); in rack_cong_signal()
5567 if (IN_RECOVERY(tp->t_flags) == 0) { in rack_cong_signal()
5569 ssthresh_enter = tp->snd_ssthresh; in rack_cong_signal()
5570 cwnd_enter = tp->snd_cwnd; in rack_cong_signal()
5573 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cong_signal()
5576 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5577 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5578 if (!IN_FASTRECOVERY(tp->t_flags)) { in rack_cong_signal()
5579 /* Check if this is the end of the initial Start-up i.e. initial slow-start */ in rack_cong_signal()
5580 if (rack->rc_initial_ss_comp == 0) { in rack_cong_signal()
5582 rack->rc_initial_ss_comp = 1; in rack_cong_signal()
5584 rack->r_ctl.rc_prr_delivered = 0; in rack_cong_signal()
5585 rack->r_ctl.rc_prr_out = 0; in rack_cong_signal()
5586 rack->r_fast_output = 0; in rack_cong_signal()
5587 if (rack->rack_no_prr == 0) { in rack_cong_signal()
5588 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_cong_signal()
5591 rack->r_ctl.rc_prr_recovery_fs = tp->snd_max - tp->snd_una; in rack_cong_signal()
5592 tp->snd_recover = tp->snd_max; in rack_cong_signal()
5593 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5594 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5598 if (!IN_CONGRECOVERY(tp->t_flags) || in rack_cong_signal()
5603 SEQ_GEQ(ack, tp->snd_recover)) { in rack_cong_signal()
5604 EXIT_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5606 rack->r_fast_output = 0; in rack_cong_signal()
5607 tp->snd_recover = tp->snd_max + 1; in rack_cong_signal()
5608 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5609 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5613 tp->t_dupacks = 0; in rack_cong_signal()
5614 tp->t_bytes_acked = 0; in rack_cong_signal()
5615 rack->r_fast_output = 0; in rack_cong_signal()
5616 if (IN_RECOVERY(tp->t_flags)) in rack_cong_signal()
5618 orig_cwnd = tp->snd_cwnd; in rack_cong_signal()
5620 if (CC_ALGO(tp)->cong_signal == NULL) { in rack_cong_signal()
5622 tp->snd_ssthresh = max(2, in rack_cong_signal()
5623 min(tp->snd_wnd, rack->r_ctl.cwnd_to_use) / 2 / in rack_cong_signal()
5625 tp->snd_cwnd = ctf_fixed_maxseg(tp); in rack_cong_signal()
5627 if (tp->t_flags2 & TF2_ECN_PERMIT) in rack_cong_signal()
5628 tp->t_flags2 |= TF2_ECN_SND_CWR; in rack_cong_signal()
5633 tp->snd_cwnd = tp->snd_cwnd_prev; in rack_cong_signal()
5634 tp->snd_ssthresh = tp->snd_ssthresh_prev; in rack_cong_signal()
5635 tp->snd_recover = tp->snd_recover_prev; in rack_cong_signal()
5636 if (tp->t_flags & TF_WASFRECOVERY) { in rack_cong_signal()
5637 ENTER_FASTRECOVERY(tp->t_flags); in rack_cong_signal()
5638 tp->t_flags &= ~TF_WASFRECOVERY; in rack_cong_signal()
5640 if (tp->t_flags & TF_WASCRECOVERY) { in rack_cong_signal()
5641 ENTER_CONGRECOVERY(tp->t_flags); in rack_cong_signal()
5642 tp->t_flags &= ~TF_WASCRECOVERY; in rack_cong_signal()
5644 tp->snd_nxt = tp->snd_max; in rack_cong_signal()
5645 tp->t_badrxtwin = 0; in rack_cong_signal()
5648 if ((CC_ALGO(tp)->cong_signal != NULL) && in rack_cong_signal()
5650 tp->t_ccv.curack = ack; in rack_cong_signal()
5651 CC_ALGO(tp)->cong_signal(&tp->t_ccv, type); in rack_cong_signal()
5653 if ((in_rec_at_entry == 0) && IN_RECOVERY(tp->t_flags)) { in rack_cong_signal()
5655 rack->r_ctl.dsack_byte_cnt = 0; in rack_cong_signal()
5656 rack->r_ctl.retran_during_recovery = 0; in rack_cong_signal()
5657 rack->r_ctl.rc_cwnd_at_erec = cwnd_enter; in rack_cong_signal()
5658 rack->r_ctl.rc_ssthresh_at_erec = ssthresh_enter; in rack_cong_signal()
5659 rack->r_ent_rec_ns = 1; in rack_cong_signal()
5670 if (CC_ALGO(tp)->after_idle != NULL) in rack_cc_after_idle()
5671 CC_ALGO(tp)->after_idle(&tp->t_ccv); in rack_cc_after_idle()
5673 if (tp->snd_cwnd == 1) in rack_cc_after_idle()
5674 i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ in rack_cc_after_idle()
5683 if (tp->snd_cwnd < i_cwnd) { in rack_cc_after_idle()
5684 tp->snd_cwnd = i_cwnd; in rack_cc_after_idle()
5691 * - There is no delayed ack timer in progress.
5692 * - Our last ack wasn't a 0-sized window. We never want to delay
5693 * the ack that opens up a 0-sized window.
5694 * - LRO wasn't used for this segment. We make sure by checking that the
5696 * - Delayed acks are enabled or this is a half-synchronized T/TCP
5700 (((tp->t_flags & TF_RXWIN0SENT) == 0) && \
5701 ((tp->t_flags & TF_DELACK) == 0) && \
5702 (tlen <= tp->t_maxseg) && \
5703 (tp->t_delayed_ack || (tp->t_flags & TF_NEEDSYN)))
5711 * Walk the time-order transmitted list looking for an rsm that is in rack_find_lowest_rsm()
5715 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_find_lowest_rsm()
5716 if (rsm->r_flags & RACK_ACKED) { in rack_find_lowest_rsm()
5737 TQHASH_FOREACH_REVERSE_FROM(prsm, rack->r_ctl.tqh) { in rack_find_high_nonack()
5738 if (prsm->r_flags & (RACK_ACKED | RACK_HAS_FIN)) { in rack_find_high_nonack()
5757 * If reorder-fade is configured, then we track the last time we saw in rack_calc_thresh_rack()
5758 * re-ordering occur. If we reach the point where enough time as in rack_calc_thresh_rack()
5761 * Or if reorder-face is 0, then once we see reordering we consider in rack_calc_thresh_rack()
5765 * In the end if lro is non-zero we add the extra time for in rack_calc_thresh_rack()
5770 if (rack->r_ctl.rc_reorder_ts) { in rack_calc_thresh_rack()
5771 if (rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5772 if (SEQ_GEQ(cts, rack->r_ctl.rc_reorder_ts)) { in rack_calc_thresh_rack()
5773 lro = cts - rack->r_ctl.rc_reorder_ts; in rack_calc_thresh_rack()
5785 if (lro > rack->r_ctl.rc_reorder_fade) { in rack_calc_thresh_rack()
5787 rack->r_ctl.rc_reorder_ts = 0; in rack_calc_thresh_rack()
5797 if (rack->rc_rack_tmr_std_based == 0) { in rack_calc_thresh_rack()
5798 thresh = srtt + rack->r_ctl.rc_pkt_delay; in rack_calc_thresh_rack()
5800 /* Standards based pkt-delay is 1/4 srtt */ in rack_calc_thresh_rack()
5803 if (lro && (rack->rc_rack_tmr_std_based == 0)) { in rack_calc_thresh_rack()
5805 if (rack->r_ctl.rc_reorder_shift) in rack_calc_thresh_rack()
5806 thresh += (srtt >> rack->r_ctl.rc_reorder_shift); in rack_calc_thresh_rack()
5810 if (rack->rc_rack_use_dsack && in rack_calc_thresh_rack()
5812 (rack->r_ctl.num_dsack > 0)) { in rack_calc_thresh_rack()
5817 thresh += rack->r_ctl.num_dsack * (srtt >> 2); in rack_calc_thresh_rack()
5844 if (rack->r_ctl.rc_tlp_threshold) in rack_calc_thresh_tlp()
5845 thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold); in rack_calc_thresh_tlp()
5850 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_calc_thresh_tlp()
5851 len = rsm->r_end - rsm->r_start; in rack_calc_thresh_tlp()
5852 if (rack->rack_tlp_threshold_use == TLP_USE_ID) { in rack_calc_thresh_tlp()
5854 if (((tp->snd_max - tp->snd_una) - rack->r_ctl.rc_sacked + rack->r_ctl.rc_holes_rxt) <= segsiz) { in rack_calc_thresh_tlp()
5857 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
5863 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_ONE) { in rack_calc_thresh_tlp()
5869 * possible inter-packet delay (if any). in rack_calc_thresh_tlp()
5874 idx = rsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
5875 nidx = prsm->r_rtr_cnt - 1; in rack_calc_thresh_tlp()
5876 if (rsm->r_tim_lastsent[nidx] >= prsm->r_tim_lastsent[idx]) { in rack_calc_thresh_tlp()
5878 inter_gap = rsm->r_tim_lastsent[idx] - prsm->r_tim_lastsent[nidx]; in rack_calc_thresh_tlp()
5883 * Possibly compensate for delayed-ack. in rack_calc_thresh_tlp()
5891 } else if (rack->rack_tlp_threshold_use == TLP_USE_TWO_TWO) { in rack_calc_thresh_tlp()
5896 * Compensate for delayed-ack with the d-ack time. in rack_calc_thresh_tlp()
5904 if (thresh > tp->t_rxtcur) { in rack_calc_thresh_tlp()
5905 thresh = tp->t_rxtcur; in rack_calc_thresh_tlp()
5929 if (rack->rc_rack_rtt) in rack_grab_rtt()
5930 return (rack->rc_rack_rtt); in rack_grab_rtt()
5931 else if (tp->t_srtt == 0) in rack_grab_rtt()
5933 return (tp->t_srtt); in rack_grab_rtt()
5949 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_recovery_mode()
5950 if (tqhash_empty(rack->r_ctl.tqh)) { in rack_check_recovery_mode()
5953 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_check_recovery_mode()
5958 if (rsm->r_flags & RACK_ACKED) { in rack_check_recovery_mode()
5963 idx = rsm->r_rtr_cnt - 1; in rack_check_recovery_mode()
5966 if (TSTMP_LT(tsused, ((uint32_t)rsm->r_tim_lastsent[idx]))) { in rack_check_recovery_mode()
5969 if ((tsused - ((uint32_t)rsm->r_tim_lastsent[idx])) < thresh) { in rack_check_recovery_mode()
5972 /* Ok if we reach here we are over-due and this guy can be sent */ in rack_check_recovery_mode()
5973 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_check_recovery_mode()
5984 t = (tp->t_srtt + (tp->t_rttvar << 2)); in rack_get_persists_timer_val()
5985 RACK_TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], in rack_get_persists_timer_val()
5986 rack_persist_min, rack_persist_max, rack->r_ctl.timer_slop); in rack_get_persists_timer_val()
5987 rack->r_ctl.rc_hpts_flags |= PACE_TMR_PERSIT; in rack_get_persists_timer_val()
6007 if (rack->t_timers_stopped) { in rack_timer_start()
6011 if (rack->rc_in_persist) { in rack_timer_start()
6015 rack->rc_on_min_to = 0; in rack_timer_start()
6016 if ((tp->t_state < TCPS_ESTABLISHED) || in rack_timer_start()
6017 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_timer_start()
6020 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6025 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_start()
6032 * recently thats the discount we want to use (now - timer time). in rack_timer_start()
6034 * we want to use that (now - oldest-packet-last_transmit_time). in rack_timer_start()
6037 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6038 if (TSTMP_GEQ(rack->r_ctl.rc_tlp_rxt_last_time, ((uint32_t)rsm->r_tim_lastsent[idx]))) in rack_timer_start()
6039 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6041 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6043 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6045 if (SEQ_LT(tp->snd_una, tp->snd_max) || in rack_timer_start()
6046 sbavail(&tptosocket(tp)->so_snd)) { in rack_timer_start()
6047 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RXT; in rack_timer_start()
6048 to = tp->t_rxtcur; in rack_timer_start()
6050 to -= time_since_sent; in rack_timer_start()
6052 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6056 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timer_start()
6061 * of the keep-init timeout. in rack_timer_start()
6066 if (TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) { in rack_timer_start()
6067 red = (cts - (uint32_t)rsm->r_tim_lastsent[0]); in rack_timer_start()
6069 max_time -= red; in rack_timer_start()
6081 if (rsm->r_flags & RACK_ACKED) { in rack_timer_start()
6089 if ((rsm->r_flags & RACK_SACK_PASSED) || in rack_timer_start()
6090 (rsm->r_flags & RACK_RWND_COLLAPSED) || in rack_timer_start()
6091 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in rack_timer_start()
6092 if ((tp->t_flags & TF_SENTFIN) && in rack_timer_start()
6093 ((tp->snd_max - tp->snd_una) == 1) && in rack_timer_start()
6094 (rsm->r_flags & RACK_HAS_FIN)) { in rack_timer_start()
6101 if ((rack->use_rack_rr == 0) && in rack_timer_start()
6102 (IN_FASTRECOVERY(tp->t_flags)) && in rack_timer_start()
6103 (rack->rack_no_prr == 0) && in rack_timer_start()
6104 (rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) { in rack_timer_start()
6111 * get to use the rack-cheat. in rack_timer_start()
6117 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6118 exp = ((uint32_t)rsm->r_tim_lastsent[idx]) + thresh; in rack_timer_start()
6120 to = exp - cts; in rack_timer_start()
6121 if (to < rack->r_ctl.rc_min_to) { in rack_timer_start()
6122 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6123 if (rack->r_rr_config == 3) in rack_timer_start()
6124 rack->rc_on_min_to = 1; in rack_timer_start()
6127 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6128 if (rack->r_rr_config == 3) in rack_timer_start()
6129 rack->rc_on_min_to = 1; in rack_timer_start()
6134 if ((rack->rc_tlp_in_progress != 0) && in rack_timer_start()
6135 (rack->r_ctl.rc_tlp_cnt_out >= rack_tlp_limit)) { in rack_timer_start()
6142 rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_timer_start()
6147 if (rsm->r_flags & RACK_HAS_FIN) { in rack_timer_start()
6152 idx = rsm->r_rtr_cnt - 1; in rack_timer_start()
6154 if (TSTMP_GEQ(((uint32_t)rsm->r_tim_lastsent[idx]), rack->r_ctl.rc_tlp_rxt_last_time)) in rack_timer_start()
6155 tstmp_touse = (uint32_t)rsm->r_tim_lastsent[idx]; in rack_timer_start()
6157 tstmp_touse = (uint32_t)rack->r_ctl.rc_tlp_rxt_last_time; in rack_timer_start()
6159 time_since_sent = cts - tstmp_touse; in rack_timer_start()
6161 if (tp->t_srtt) { in rack_timer_start()
6162 if ((rack->rc_srtt_measure_made == 0) && in rack_timer_start()
6163 (tp->t_srtt == 1)) { in rack_timer_start()
6170 srtt_cur = tp->t_srtt; in rack_timer_start()
6181 tp->t_srtt && in rack_timer_start()
6187 to = thresh - time_since_sent; in rack_timer_start()
6189 to = rack->r_ctl.rc_min_to; in rack_timer_start()
6194 rack->r_ctl.rc_tlp_rxt_last_time, /* flex4 */ in rack_timer_start()
6195 (uint32_t)rsm->r_tim_lastsent[idx], in rack_timer_start()
6211 rack->r_ctl.rc_hpts_flags |= PACE_TMR_RACK; in rack_timer_start()
6213 rack->r_ctl.rc_hpts_flags |= PACE_TMR_TLP; in rack_timer_start()
6223 if (rack->rc_in_persist == 0) { in rack_enter_persist()
6224 if (tp->t_flags & TF_GPUTINPROG) { in rack_enter_persist()
6229 rack_do_goodput_measurement(tp, rack, tp->snd_una, __LINE__, in rack_enter_persist()
6233 if (rack->r_ctl.rc_scw) { in rack_enter_persist()
6234 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_enter_persist()
6235 rack->rack_scwnd_is_idle = 1; in rack_enter_persist()
6238 rack->r_ctl.rc_went_idle_time = cts; in rack_enter_persist()
6239 if (rack->r_ctl.rc_went_idle_time == 0) in rack_enter_persist()
6240 rack->r_ctl.rc_went_idle_time = 1; in rack_enter_persist()
6241 if (rack->lt_bw_up) { in rack_enter_persist()
6245 rack->r_ctl.lt_bw_bytes += (snd_una - rack->r_ctl.lt_seq); in rack_enter_persist()
6246 rack->r_ctl.lt_seq = snd_una; in rack_enter_persist()
6247 tmark = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time); in rack_enter_persist()
6248 if (tmark >= rack->r_ctl.lt_timemark) { in rack_enter_persist()
6249 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_enter_persist()
6251 rack->r_ctl.lt_timemark = tmark; in rack_enter_persist()
6252 rack->lt_bw_up = 0; in rack_enter_persist()
6253 rack->r_persist_lt_bw_off = 1; in rack_enter_persist()
6256 rack->r_ctl.persist_lost_ends = 0; in rack_enter_persist()
6257 rack->probe_not_answered = 0; in rack_enter_persist()
6258 rack->forced_ack = 0; in rack_enter_persist()
6259 tp->t_rxtshift = 0; in rack_enter_persist()
6260 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_enter_persist()
6261 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_enter_persist()
6262 rack->rc_in_persist = 1; in rack_enter_persist()
6269 if (tcp_in_hpts(rack->rc_tp)) { in rack_exit_persist()
6270 tcp_hpts_remove(rack->rc_tp); in rack_exit_persist()
6271 rack->r_ctl.rc_hpts_flags = 0; in rack_exit_persist()
6274 if (rack->r_ctl.rc_scw) { in rack_exit_persist()
6275 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_exit_persist()
6276 rack->rack_scwnd_is_idle = 0; in rack_exit_persist()
6279 if (rack->rc_gp_dyn_mul && in rack_exit_persist()
6280 (rack->use_fixed_rate == 0) && in rack_exit_persist()
6281 (rack->rc_always_pace)) { in rack_exit_persist()
6283 * Do we count this as if a probe-rtt just in rack_exit_persist()
6288 time_idle = cts - rack->r_ctl.rc_went_idle_time; in rack_exit_persist()
6292 extra = (uint64_t)rack->r_ctl.rc_gp_srtt * in rack_exit_persist()
6298 /* Yes, we count it as a probe-rtt. */ in rack_exit_persist()
6302 if (rack->in_probe_rtt == 0) { in rack_exit_persist()
6303 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_exit_persist()
6304 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6305 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6306 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_exit_persist()
6312 if (rack->r_persist_lt_bw_off) { in rack_exit_persist()
6314 rack->r_ctl.lt_timemark = tcp_get_u64_usecs(NULL); in rack_exit_persist()
6315 rack->lt_bw_up = 1; in rack_exit_persist()
6316 rack->r_persist_lt_bw_off = 0; in rack_exit_persist()
6318 rack->rc_in_persist = 0; in rack_exit_persist()
6319 rack->r_ctl.rc_went_idle_time = 0; in rack_exit_persist()
6320 tp->t_rxtshift = 0; in rack_exit_persist()
6321 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_exit_persist()
6322 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_exit_persist()
6323 rack->r_ctl.rc_agg_delayed = 0; in rack_exit_persist()
6324 rack->r_early = 0; in rack_exit_persist()
6325 rack->r_late = 0; in rack_exit_persist()
6326 rack->r_ctl.rc_agg_early = 0; in rack_exit_persist()
6333 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_hpts_diag()
6337 log.u_bbr.flex1 = diag->p_nxt_slot; in rack_log_hpts_diag()
6338 log.u_bbr.flex2 = diag->p_cur_slot; in rack_log_hpts_diag()
6339 log.u_bbr.flex3 = diag->slot_req; in rack_log_hpts_diag()
6340 log.u_bbr.flex4 = diag->inp_hptsslot; in rack_log_hpts_diag()
6341 log.u_bbr.flex5 = diag->time_remaining; in rack_log_hpts_diag()
6342 log.u_bbr.flex6 = diag->need_new_to; in rack_log_hpts_diag()
6343 log.u_bbr.flex7 = diag->p_hpts_active; in rack_log_hpts_diag()
6344 log.u_bbr.flex8 = diag->p_on_min_sleep; in rack_log_hpts_diag()
6346 log.u_bbr.epoch = diag->have_slept; in rack_log_hpts_diag()
6347 log.u_bbr.lt_epoch = diag->yet_to_sleep; in rack_log_hpts_diag()
6348 log.u_bbr.pkts_out = diag->co_ret; in rack_log_hpts_diag()
6349 log.u_bbr.applimited = diag->hpts_sleep_time; in rack_log_hpts_diag()
6350 log.u_bbr.delivered = diag->p_prev_slot; in rack_log_hpts_diag()
6351 log.u_bbr.inflight = diag->p_runningslot; in rack_log_hpts_diag()
6352 log.u_bbr.bw_inuse = diag->wheel_slot; in rack_log_hpts_diag()
6353 log.u_bbr.rttProp = diag->wheel_cts; in rack_log_hpts_diag()
6355 log.u_bbr.delRate = diag->maxslots; in rack_log_hpts_diag()
6356 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_hpts_diag()
6357 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hpts_diag()
6358 &rack->rc_inp->inp_socket->so_snd, in rack_log_hpts_diag()
6368 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_wakeup()
6373 log.u_bbr.flex1 = sb->sb_flags; in rack_log_wakeup()
6375 log.u_bbr.flex3 = sb->sb_state; in rack_log_wakeup()
6378 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_wakeup()
6379 &rack->rc_inp->inp_socket->so_rcv, in rack_log_wakeup()
6380 &rack->rc_inp->inp_socket->so_snd, in rack_log_wakeup()
6400 if ((tp->t_state == TCPS_CLOSED) || in rack_start_hpts_timer()
6401 (tp->t_state == TCPS_LISTEN)) { in rack_start_hpts_timer()
6408 stopped = rack->rc_tmr_stopped; in rack_start_hpts_timer()
6409 if (stopped && TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_start_hpts_timer()
6410 left = rack->r_ctl.rc_timer_exp - cts; in rack_start_hpts_timer()
6412 rack->r_ctl.rc_timer_exp = 0; in rack_start_hpts_timer()
6413 rack->r_ctl.rc_hpts_flags = 0; in rack_start_hpts_timer()
6417 if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) { in rack_start_hpts_timer()
6425 * by an ack aka the rc_agg_early (non-paced mode). in rack_start_hpts_timer()
6427 usecs += rack->r_ctl.rc_agg_early; in rack_start_hpts_timer()
6428 rack->r_early = 0; in rack_start_hpts_timer()
6429 rack->r_ctl.rc_agg_early = 0; in rack_start_hpts_timer()
6431 if ((rack->r_late) && in rack_start_hpts_timer()
6432 ((rack->r_use_hpts_min == 0) || (rack->dgp_on == 0))) { in rack_start_hpts_timer()
6439 if (rack->r_ctl.rc_agg_delayed >= usecs) { in rack_start_hpts_timer()
6443 * on the clock. We always have a min in rack_start_hpts_timer()
6448 rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - usecs); in rack_start_hpts_timer()
6452 rack->r_ctl.rc_agg_delayed -= (usecs - HPTS_USECS_PER_SLOT); in rack_start_hpts_timer()
6456 usecs -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6457 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6460 rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - usecs; in rack_start_hpts_timer()
6463 if (rack->r_ctl.rc_agg_delayed == 0) in rack_start_hpts_timer()
6464 rack->r_late = 0; in rack_start_hpts_timer()
6466 } else if (rack->r_late) { in rack_start_hpts_timer()
6470 max_red = (usecs * rack->r_ctl.max_reduction) / 100; in rack_start_hpts_timer()
6471 if (max_red >= rack->r_ctl.rc_agg_delayed) { in rack_start_hpts_timer()
6472 usecs -= rack->r_ctl.rc_agg_delayed; in rack_start_hpts_timer()
6473 rack->r_ctl.rc_agg_delayed = 0; in rack_start_hpts_timer()
6475 usecs -= max_red; in rack_start_hpts_timer()
6476 rack->r_ctl.rc_agg_delayed -= max_red; in rack_start_hpts_timer()
6479 if ((rack->r_use_hpts_min == 1) && in rack_start_hpts_timer()
6481 (rack->dgp_on == 1)) { in rack_start_hpts_timer()
6494 if (tp->t_flags & TF_DELACK) { in rack_start_hpts_timer()
6496 rack->r_ctl.rc_hpts_flags |= PACE_TMR_DELACK; in rack_start_hpts_timer()
6502 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_start_hpts_timer()
6505 * wheel, we resort to a keep-alive timer if its configured. in rack_start_hpts_timer()
6509 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_start_hpts_timer()
6510 (tp->t_state <= TCPS_CLOSING)) { in rack_start_hpts_timer()
6513 * del-ack), we don't have segments being paced. So in rack_start_hpts_timer()
6516 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_start_hpts_timer()
6517 /* Get the established keep-alive time */ in rack_start_hpts_timer()
6521 * Get the initial setup keep-alive time, in rack_start_hpts_timer()
6529 rack->r_ctl.rc_hpts_flags |= PACE_TMR_KEEP; in rack_start_hpts_timer()
6530 if (rack->in_probe_rtt) { in rack_start_hpts_timer()
6534 * exit probe-rtt and initiate a keep-alive ack. in rack_start_hpts_timer()
6535 * This will get us out of probe-rtt and update in rack_start_hpts_timer()
6536 * our min-rtt. in rack_start_hpts_timer()
6543 (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK)) { in rack_start_hpts_timer()
6549 * keep-alive, delayed_ack we keep track of what was left in rack_start_hpts_timer()
6557 * Hack alert for now we can't time-out over 2,147,483 in rack_start_hpts_timer()
6563 rack->r_ctl.rc_timer_exp = cts + hpts_timeout; in rack_start_hpts_timer()
6566 if ((rack->gp_ready == 0) && in rack_start_hpts_timer()
6567 (rack->use_fixed_rate == 0) && in rack_start_hpts_timer()
6569 (rack->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) { in rack_start_hpts_timer()
6589 * TF2_MBUF_QUEUE_READY - This flags says that I am busy in rack_start_hpts_timer()
6594 * TF2_DONT_SACK_QUEUE - This flag is used in conjunction in rack_start_hpts_timer()
6609 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE|TF2_MBUF_QUEUE_READY); in rack_start_hpts_timer()
6611 rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6612 rack->r_ctl.rc_last_output_to = us_cts + usecs; in rack_start_hpts_timer()
6621 tp->t_flags2 |= TF2_MBUF_QUEUE_READY; in rack_start_hpts_timer()
6627 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK) || in rack_start_hpts_timer()
6628 (IN_RECOVERY(tp->t_flags))) { in rack_start_hpts_timer()
6629 if (rack->r_rr_config != 3) in rack_start_hpts_timer()
6630 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6631 else if (rack->rc_pace_dnd) { in rack_start_hpts_timer()
6640 tp->t_flags2 |= TF2_DONT_SACK_QUEUE; in rack_start_hpts_timer()
6643 if (rack->rc_ack_can_sendout_data) { in rack_start_hpts_timer()
6647 * backout the changes (used for non-paced in rack_start_hpts_timer()
6650 tp->t_flags2 &= ~(TF2_DONT_SACK_QUEUE | in rack_start_hpts_timer()
6653 if ((rack->use_rack_rr) && in rack_start_hpts_timer()
6654 (rack->r_rr_config < 2) && in rack_start_hpts_timer()
6658 * t-o if the t-o does not cause a send. in rack_start_hpts_timer()
6677 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_start_hpts_timer()
6684 if (SEQ_GT(tp->snd_max, tp->snd_una)) { in rack_start_hpts_timer()
6685 panic("tp:%p rack:%p tlts:%d cts:%u usecs:%u pto:%u -- no timer started?", in rack_start_hpts_timer()
6690 rack->rc_tmr_stopped = 0; in rack_start_hpts_timer()
6704 TAILQ_FOREACH_FROM(nrsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mark_lost()
6705 if ((nrsm->r_flags & RACK_SACK_PASSED) == 0) { in rack_mark_lost()
6706 /* Got up to all that were marked sack-passed */ in rack_mark_lost()
6709 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_mark_lost()
6710 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_mark_lost()
6713 nrsm->r_flags |= RACK_WAS_LOST; in rack_mark_lost()
6714 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_mark_lost()
6726 KASSERT((rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)), in rack_mark_nolonger_lost()
6728 rsm->r_flags &= ~RACK_WAS_LOST; in rack_mark_nolonger_lost()
6729 if (rack->r_ctl.rc_considered_lost >= (rsm->r_end - rsm->r_start)) in rack_mark_nolonger_lost()
6730 rack->r_ctl.rc_considered_lost -= rsm->r_end - rsm->r_start; in rack_mark_nolonger_lost()
6732 rack->r_ctl.rc_considered_lost = 0; in rack_mark_nolonger_lost()
6748 * retransmissions, if so we will enter fast-recovery. The output in rack_timeout_rack()
6755 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_rack()
6757 rack->rc_on_min_to = 0; in rack_timeout_rack()
6763 rack->r_ctl.rc_resend = rsm; in rack_timeout_rack()
6764 rack->r_timer_override = 1; in rack_timeout_rack()
6765 if (rack->use_rack_rr) { in rack_timeout_rack()
6769 * over-ride pacing i.e. rrr takes precedence in rack_timeout_rack()
6774 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timeout_rack()
6777 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RACK; in rack_timeout_rack()
6793 if ((M_TRAILINGROOM(rsm->m) != rsm->orig_t_space)) { in rack_adjust_orig_mlen()
6800 KASSERT((rsm->orig_t_space > M_TRAILINGROOM(rsm->m)), in rack_adjust_orig_mlen()
6802 rsm->m, in rack_adjust_orig_mlen()
6804 (intmax_t)M_TRAILINGROOM(rsm->m), in rack_adjust_orig_mlen()
6805 rsm->orig_t_space, in rack_adjust_orig_mlen()
6806 rsm->orig_m_len, in rack_adjust_orig_mlen()
6807 rsm->m->m_len)); in rack_adjust_orig_mlen()
6808 rsm->orig_m_len += (rsm->orig_t_space - M_TRAILINGROOM(rsm->m)); in rack_adjust_orig_mlen()
6809 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_orig_mlen()
6811 if (rsm->m->m_len < rsm->orig_m_len) { in rack_adjust_orig_mlen()
6816 KASSERT((rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)), in rack_adjust_orig_mlen()
6818 rsm->m, rsm->m->m_len, in rack_adjust_orig_mlen()
6819 rsm, rsm->orig_m_len, in rack_adjust_orig_mlen()
6820 rsm->soff)); in rack_adjust_orig_mlen()
6821 if (rsm->soff >= (rsm->orig_m_len - rsm->m->m_len)) in rack_adjust_orig_mlen()
6822 rsm->soff -= (rsm->orig_m_len - rsm->m->m_len); in rack_adjust_orig_mlen()
6824 rsm->soff = 0; in rack_adjust_orig_mlen()
6825 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_orig_mlen()
6827 } else if (rsm->m->m_len > rsm->orig_m_len) { in rack_adjust_orig_mlen()
6829 rsm, rsm->m); in rack_adjust_orig_mlen()
6840 if (src_rsm->m && in rack_setup_offset_for_rsm()
6841 ((src_rsm->orig_m_len != src_rsm->m->m_len) || in rack_setup_offset_for_rsm()
6842 (M_TRAILINGROOM(src_rsm->m) != src_rsm->orig_t_space))) { in rack_setup_offset_for_rsm()
6846 m = src_rsm->m; in rack_setup_offset_for_rsm()
6847 soff = src_rsm->soff + (src_rsm->r_end - src_rsm->r_start); in rack_setup_offset_for_rsm()
6848 while (soff >= m->m_len) { in rack_setup_offset_for_rsm()
6850 soff -= m->m_len; in rack_setup_offset_for_rsm()
6851 m = m->m_next; in rack_setup_offset_for_rsm()
6857 src_rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
6858 (src_rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
6859 &src_rsm->soff); in rack_setup_offset_for_rsm()
6860 src_rsm->orig_m_len = src_rsm->m->m_len; in rack_setup_offset_for_rsm()
6861 src_rsm->orig_t_space = M_TRAILINGROOM(src_rsm->m); in rack_setup_offset_for_rsm()
6862 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_setup_offset_for_rsm()
6863 (rsm->r_start - rack->rc_tp->snd_una), in rack_setup_offset_for_rsm()
6864 &rsm->soff); in rack_setup_offset_for_rsm()
6865 rsm->orig_m_len = rsm->m->m_len; in rack_setup_offset_for_rsm()
6866 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
6870 rsm->m = m; in rack_setup_offset_for_rsm()
6871 rsm->soff = soff; in rack_setup_offset_for_rsm()
6872 rsm->orig_m_len = m->m_len; in rack_setup_offset_for_rsm()
6873 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_setup_offset_for_rsm()
6882 nrsm->r_start = start; in rack_clone_rsm()
6883 nrsm->r_end = rsm->r_end; in rack_clone_rsm()
6884 nrsm->r_rtr_cnt = rsm->r_rtr_cnt; in rack_clone_rsm()
6885 nrsm->r_act_rxt_cnt = rsm->r_act_rxt_cnt; in rack_clone_rsm()
6886 nrsm->r_flags = rsm->r_flags; in rack_clone_rsm()
6887 nrsm->r_dupack = rsm->r_dupack; in rack_clone_rsm()
6888 nrsm->r_no_rtt_allowed = rsm->r_no_rtt_allowed; in rack_clone_rsm()
6889 nrsm->r_rtr_bytes = 0; in rack_clone_rsm()
6890 nrsm->r_fas = rsm->r_fas; in rack_clone_rsm()
6891 nrsm->r_bas = rsm->r_bas; in rack_clone_rsm()
6892 tqhash_update_end(rack->r_ctl.tqh, rsm, nrsm->r_start); in rack_clone_rsm()
6893 nrsm->r_just_ret = rsm->r_just_ret; in rack_clone_rsm()
6894 for (idx = 0; idx < nrsm->r_rtr_cnt; idx++) { in rack_clone_rsm()
6895 nrsm->r_tim_lastsent[idx] = rsm->r_tim_lastsent[idx]; in rack_clone_rsm()
6898 if (nrsm->r_flags & RACK_HAS_SYN) in rack_clone_rsm()
6899 nrsm->r_flags &= ~RACK_HAS_SYN; in rack_clone_rsm()
6901 if (rsm->r_flags & RACK_HAS_FIN) in rack_clone_rsm()
6902 rsm->r_flags &= ~RACK_HAS_FIN; in rack_clone_rsm()
6904 if (rsm->r_flags & RACK_HAD_PUSH) in rack_clone_rsm()
6905 rsm->r_flags &= ~RACK_HAD_PUSH; in rack_clone_rsm()
6907 if (nrsm->r_flags & RACK_APP_LIMITED) in rack_clone_rsm()
6908 rack->r_ctl.rc_app_limited_cnt++; in rack_clone_rsm()
6910 nrsm->r_hw_tls = rsm->r_hw_tls; in rack_clone_rsm()
6918 KASSERT(((rsm->m != NULL) || in rack_clone_rsm()
6919 (rsm->r_flags & (RACK_HAS_SYN|RACK_HAS_FIN))), in rack_clone_rsm()
6920 ("rsm:%p nrsm:%p rack:%p -- rsm->m is NULL?", rsm, nrsm, rack)); in rack_clone_rsm()
6921 if (rsm->m) in rack_clone_rsm()
6940 rack_log_map_chg(rack->rc_tp, rack, NULL, in rack_merge_rsm()
6941 l_rsm, r_rsm, MAP_MERGE, r_rsm->r_end, __LINE__); in rack_merge_rsm()
6942 tqhash_update_end(rack->r_ctl.tqh, l_rsm, r_rsm->r_end); in rack_merge_rsm()
6943 if (l_rsm->r_dupack < r_rsm->r_dupack) in rack_merge_rsm()
6944 l_rsm->r_dupack = r_rsm->r_dupack; in rack_merge_rsm()
6945 if (r_rsm->r_rtr_bytes) in rack_merge_rsm()
6946 l_rsm->r_rtr_bytes += r_rsm->r_rtr_bytes; in rack_merge_rsm()
6947 if (r_rsm->r_in_tmap) { in rack_merge_rsm()
6949 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, r_rsm, r_tnext); in rack_merge_rsm()
6950 r_rsm->r_in_tmap = 0; in rack_merge_rsm()
6954 if (r_rsm->r_flags & RACK_HAS_FIN) in rack_merge_rsm()
6955 l_rsm->r_flags |= RACK_HAS_FIN; in rack_merge_rsm()
6956 if (r_rsm->r_flags & RACK_TLP) in rack_merge_rsm()
6957 l_rsm->r_flags |= RACK_TLP; in rack_merge_rsm()
6958 if (r_rsm->r_flags & RACK_RWND_COLLAPSED) in rack_merge_rsm()
6959 l_rsm->r_flags |= RACK_RWND_COLLAPSED; in rack_merge_rsm()
6960 if ((r_rsm->r_flags & RACK_APP_LIMITED) && in rack_merge_rsm()
6961 ((l_rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_merge_rsm()
6963 * If both are app-limited then let the in rack_merge_rsm()
6967 l_rsm->r_flags |= RACK_APP_LIMITED; in rack_merge_rsm()
6968 r_rsm->r_flags &= ~RACK_APP_LIMITED; in rack_merge_rsm()
6969 if (r_rsm == rack->r_ctl.rc_first_appl) in rack_merge_rsm()
6970 rack->r_ctl.rc_first_appl = l_rsm; in rack_merge_rsm()
6972 tqhash_remove(rack->r_ctl.tqh, r_rsm, REMOVE_TYPE_MERGE); in rack_merge_rsm()
6987 if(l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] < in rack_merge_rsm()
6988 r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]) { in rack_merge_rsm()
6989 l_rsm->r_tim_lastsent[(l_rsm->r_rtr_cnt-1)] = r_rsm->r_tim_lastsent[(r_rsm->r_rtr_cnt-1)]; in rack_merge_rsm()
6996 if(l_rsm->r_ack_arrival < r_rsm->r_ack_arrival) in rack_merge_rsm()
6997 l_rsm->r_ack_arrival = r_rsm->r_ack_arrival; in rack_merge_rsm()
6999 if ((r_rsm->r_limit_type == 0) && (l_rsm->r_limit_type != 0)) { in rack_merge_rsm()
7001 r_rsm->r_limit_type = l_rsm->r_limit_type; in rack_merge_rsm()
7002 l_rsm->r_limit_type = 0; in rack_merge_rsm()
7005 l_rsm->r_flags |= RACK_MERGED; in rack_merge_rsm()
7030 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_timeout_tlp()
7036 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_tlp()
7043 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_tlp()
7044 rack->r_might_revert = 0; in rack_timeout_tlp()
7045 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_tlp()
7047 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_timeout_tlp()
7049 avail = sbavail(&so->so_snd); in rack_timeout_tlp()
7050 out = tp->snd_max - tp->snd_una; in rack_timeout_tlp()
7051 if ((out > tp->snd_wnd) || rack->rc_has_collapsed) { in rack_timeout_tlp()
7056 if (rack->r_ctl.dsack_persist && (rack->r_ctl.rc_tlp_cnt_out >= 1)) { in rack_timeout_tlp()
7057 rack->r_ctl.dsack_persist--; in rack_timeout_tlp()
7058 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_tlp()
7059 rack->r_ctl.num_dsack = 0; in rack_timeout_tlp()
7063 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_tlp()
7064 (rack->r_ctl.rc_tlp_cnt_out == 1)) { in rack_timeout_tlp()
7073 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_tlp()
7074 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_tlp()
7075 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_tlp()
7076 tp->gput_seq, in rack_timeout_tlp()
7083 if (rack_always_send_oldest && (TAILQ_EMPTY(&rack->r_ctl.rc_tmap) == 0)) in rack_timeout_tlp()
7088 amm = avail - out; in rack_timeout_tlp()
7091 if ((amm + out) > tp->snd_wnd) { in rack_timeout_tlp()
7099 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_timeout_tlp()
7101 if (rack->rack_no_prr == 0) { in rack_timeout_tlp()
7102 if (out + amm <= tp->snd_wnd) { in rack_timeout_tlp()
7103 rack->r_ctl.rc_prr_sndcnt = amm; in rack_timeout_tlp()
7104 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7110 /* Set the send-new override */ in rack_timeout_tlp()
7111 if (out + amm <= tp->snd_wnd) in rack_timeout_tlp()
7112 rack->r_ctl.rc_tlp_new_data = amm; in rack_timeout_tlp()
7116 rack->r_ctl.rc_tlpsend = NULL; in rack_timeout_tlp()
7122 * Ok we need to arrange the last un-acked segment to be re-sent, or in rack_timeout_tlp()
7123 * optionally the first un-acked segment. in rack_timeout_tlp()
7127 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timeout_tlp()
7129 rsm = tqhash_max(rack->r_ctl.tqh); in rack_timeout_tlp()
7130 if (rsm && (rsm->r_flags & (RACK_ACKED | RACK_HAS_FIN))) { in rack_timeout_tlp()
7145 if (SEQ_GT((rack->r_ctl.last_collapse_point - 1), rack->rc_tp->snd_una)) in rack_timeout_tlp()
7146 rsm = tqhash_find(rack->r_ctl.tqh, (rack->r_ctl.last_collapse_point - 1)); in rack_timeout_tlp()
7148 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_tlp()
7155 if ((rsm->r_end - rsm->r_start) > ctf_fixed_maxseg(tp)) { in rack_timeout_tlp()
7170 (rsm->r_end - ctf_fixed_maxseg(tp))); in rack_timeout_tlp()
7173 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_timeout_tlp()
7175 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_timeout_tlp()
7180 if (rsm->r_in_tmap) { in rack_timeout_tlp()
7181 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_timeout_tlp()
7182 nrsm->r_in_tmap = 1; in rack_timeout_tlp()
7186 rack->r_ctl.rc_tlpsend = rsm; in rack_timeout_tlp()
7190 rack->r_timer_override = 1; in rack_timeout_tlp()
7191 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7194 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_TLP; in rack_timeout_tlp()
7211 tp->t_flags &= ~TF_DELACK; in rack_timeout_delack()
7212 tp->t_flags |= TF_ACKNOW; in rack_timeout_delack()
7214 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK; in rack_timeout_delack()
7223 t_template = tcpip_maketemplate(rack->rc_inp); in rack_send_ack_challange()
7225 if (rack->forced_ack == 0) { in rack_send_ack_challange()
7226 rack->forced_ack = 1; in rack_send_ack_challange()
7227 rack->r_ctl.forced_ack_ts = tcp_get_usecs(NULL); in rack_send_ack_challange()
7229 rack->probe_not_answered = 1; in rack_send_ack_challange()
7231 tcp_respond(rack->rc_tp, t_template->tt_ipgen, in rack_send_ack_challange()
7232 &t_template->tt_t, (struct mbuf *)NULL, in rack_send_ack_challange()
7233 rack->rc_tp->rcv_nxt, rack->rc_tp->snd_una - 1, 0); in rack_send_ack_challange()
7235 /* This does send an ack so kill any D-ack timer */ in rack_send_ack_challange()
7236 if (rack->rc_tp->t_flags & TF_DELACK) in rack_send_ack_challange()
7237 rack->rc_tp->t_flags &= ~TF_DELACK; in rack_send_ack_challange()
7257 if (rack->rc_in_persist == 0) in rack_timeout_persist()
7262 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7263 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_persist()
7276 if (tp->t_rxtshift >= V_tcp_retries && in rack_timeout_persist()
7277 (ticks - tp->t_rcvtime >= tcp_maxpersistidle || in rack_timeout_persist()
7278 TICKS_2_USEC(ticks - tp->t_rcvtime) >= RACK_REXMTVAL(tp) * tcp_totbackoff)) { in rack_timeout_persist()
7281 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7282 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7285 if ((sbavail(&rack->rc_inp->inp_socket->so_snd) == 0) && in rack_timeout_persist()
7286 tp->snd_una == tp->snd_max) in rack_timeout_persist()
7288 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_PERSIT; in rack_timeout_persist()
7293 if (tp->t_state > TCPS_CLOSE_WAIT && in rack_timeout_persist()
7294 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) { in rack_timeout_persist()
7297 counter_u64_add(rack_persists_lost_ends, rack->r_ctl.persist_lost_ends); in rack_timeout_persist()
7298 retval = -ETIMEDOUT; /* tcp_drop() */ in rack_timeout_persist()
7303 if (rack->probe_not_answered) { in rack_timeout_persist()
7305 rack->r_ctl.persist_lost_ends++; in rack_timeout_persist()
7310 if (tp->t_rxtshift < V_tcp_retries) in rack_timeout_persist()
7311 tp->t_rxtshift++; in rack_timeout_persist()
7330 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_KEEP; in rack_timeout_keepalive()
7333 * Keep-alive timer went off; send something or drop connection if in rack_timeout_keepalive()
7337 if (tp->t_state < TCPS_ESTABLISHED) in rack_timeout_keepalive()
7339 if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timeout_keepalive()
7340 tp->t_state <= TCPS_CLOSING) { in rack_timeout_keepalive()
7341 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp)) in rack_timeout_keepalive()
7348 * number tp->snd_una-1 causes the transmitted zero-length in rack_timeout_keepalive()
7361 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_keepalive()
7373 * un-acked. in rack_remxt_tmr()
7378 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_remxt_tmr()
7381 rack->r_timer_override = 1; in rack_remxt_tmr()
7382 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_remxt_tmr()
7383 rack->r_ctl.rc_last_timeout_snduna = tp->snd_una; in rack_remxt_tmr()
7384 rack->r_late = 0; in rack_remxt_tmr()
7385 rack->r_early = 0; in rack_remxt_tmr()
7386 rack->r_ctl.rc_agg_delayed = 0; in rack_remxt_tmr()
7387 rack->r_ctl.rc_agg_early = 0; in rack_remxt_tmr()
7388 if (rack->r_state && (rack->r_state != tp->t_state)) in rack_remxt_tmr()
7390 if (tp->t_rxtshift <= rack_rxt_scoreboard_clear_thresh) { in rack_remxt_tmr()
7393 * more than rack_rxt_scoreboard_clear_thresh time-outs. in rack_remxt_tmr()
7395 rack->r_ctl.rc_resend = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7396 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7397 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7403 * mark SACK-PASS on anything not acked here. in rack_remxt_tmr()
7412 * sacks that come floating in will "re-ack" the data. in rack_remxt_tmr()
7417 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_remxt_tmr()
7419 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_remxt_tmr()
7420 rsm->r_dupack = 0; in rack_remxt_tmr()
7423 /* We must re-add it back to the tlist */ in rack_remxt_tmr()
7425 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_remxt_tmr()
7427 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, trsm, rsm, r_tnext); in rack_remxt_tmr()
7429 rsm->r_in_tmap = 1; in rack_remxt_tmr()
7431 if (rsm->r_flags & RACK_ACKED) in rack_remxt_tmr()
7432 rsm->r_flags |= RACK_WAS_ACKED; in rack_remxt_tmr()
7433 …rsm->r_flags &= ~(RACK_ACKED | RACK_SACK_PASSED | RACK_WAS_SACKPASS | RACK_RWND_COLLAPSED | RACK_W… in rack_remxt_tmr()
7434 rsm->r_flags |= RACK_MUST_RXT; in rack_remxt_tmr()
7437 rack->r_ctl.rc_considered_lost = 0; in rack_remxt_tmr()
7438 /* Clear the count (we just un-acked them) */ in rack_remxt_tmr()
7439 rack->r_ctl.rc_sacked = 0; in rack_remxt_tmr()
7440 rack->r_ctl.rc_sacklast = NULL; in rack_remxt_tmr()
7442 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7443 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7444 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7445 rack->r_ctl.rc_prr_sndcnt = 0; in rack_remxt_tmr()
7447 rack->r_ctl.rc_resend = tqhash_min(rack->r_ctl.tqh); in rack_remxt_tmr()
7448 if (rack->r_ctl.rc_resend != NULL) in rack_remxt_tmr()
7449 rack->r_ctl.rc_resend->r_flags |= RACK_TO_REXT; in rack_remxt_tmr()
7450 if (((tp->t_flags & TF_SACK_PERMIT) == 0) && in rack_remxt_tmr()
7451 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_remxt_tmr()
7453 * For non-sack customers new data in rack_remxt_tmr()
7457 rack->r_must_retran = 1; in rack_remxt_tmr()
7458 rack->r_ctl.rc_out_at_rto = ctf_flight_size(rack->rc_tp, in rack_remxt_tmr()
7459 rack->r_ctl.rc_sacked); in rack_remxt_tmr()
7467 tp->t_rxtcur = RACK_REXMTVAL(tp); in rack_convert_rtts()
7468 if (TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_convert_rtts()
7469 tp->t_rxtcur += TICKS_2_USEC(tcp_rexmit_slop); in rack_convert_rtts()
7471 if (tp->t_rxtcur > rack_rto_max) { in rack_convert_rtts()
7472 tp->t_rxtcur = rack_rto_max; in rack_convert_rtts()
7482 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_cc_conn_init()
7483 srtt = tp->t_srtt; in rack_cc_conn_init()
7489 if ((srtt == 0) && (tp->t_srtt != 0)) in rack_cc_conn_init()
7497 if (tp->snd_ssthresh < tp->snd_wnd) { in rack_cc_conn_init()
7498 tp->snd_ssthresh = tp->snd_wnd; in rack_cc_conn_init()
7504 if (rc_init_window(rack) < tp->snd_cwnd) in rack_cc_conn_init()
7505 tp->snd_cwnd = rc_init_window(rack); in rack_cc_conn_init()
7509 * Re-transmit timeout! If we drop the PCB we will return 1, otherwise
7520 if ((tp->t_flags & TF_GPUTINPROG) && in rack_timeout_rxt()
7521 (tp->t_rxtshift)) { in rack_timeout_rxt()
7528 tp->t_flags &= ~TF_GPUTINPROG; in rack_timeout_rxt()
7529 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_timeout_rxt()
7530 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_timeout_rxt()
7531 tp->gput_seq, in rack_timeout_rxt()
7537 return (-ETIMEDOUT); /* tcp_drop() */ in rack_timeout_rxt()
7539 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_RXT; in rack_timeout_rxt()
7540 rack->r_ctl.retran_during_recovery = 0; in rack_timeout_rxt()
7541 rack->rc_ack_required = 1; in rack_timeout_rxt()
7542 rack->r_ctl.dsack_byte_cnt = 0; in rack_timeout_rxt()
7543 if (IN_RECOVERY(tp->t_flags) && in rack_timeout_rxt()
7544 (rack->rto_from_rec == 0)) { in rack_timeout_rxt()
7551 rack->rto_from_rec = 1; in rack_timeout_rxt()
7552 rack->r_ctl.rto_ssthresh = tp->snd_ssthresh; in rack_timeout_rxt()
7554 if (IN_FASTRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7555 tp->t_flags |= TF_WASFRECOVERY; in rack_timeout_rxt()
7557 tp->t_flags &= ~TF_WASFRECOVERY; in rack_timeout_rxt()
7558 if (IN_CONGRECOVERY(tp->t_flags)) in rack_timeout_rxt()
7559 tp->t_flags |= TF_WASCRECOVERY; in rack_timeout_rxt()
7561 tp->t_flags &= ~TF_WASCRECOVERY; in rack_timeout_rxt()
7562 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_timeout_rxt()
7563 (tp->snd_una == tp->snd_max)) { in rack_timeout_rxt()
7567 if (rack->r_ctl.dsack_persist) { in rack_timeout_rxt()
7568 rack->r_ctl.dsack_persist--; in rack_timeout_rxt()
7569 if (rack->r_ctl.num_dsack && (rack->r_ctl.dsack_persist == 0)) { in rack_timeout_rxt()
7570 rack->r_ctl.num_dsack = 0; in rack_timeout_rxt()
7582 if ((TCPS_HAVEESTABLISHED(tp->t_state) == 0) && in rack_timeout_rxt()
7586 rsm = tqhash_min(rack->r_ctl.tqh); in rack_timeout_rxt()
7589 if ((TSTMP_GT(cts, (uint32_t)rsm->r_tim_lastsent[0])) && in rack_timeout_rxt()
7590 ((cts - (uint32_t)rsm->r_tim_lastsent[0]) >= TICKS_2_USEC(TP_KEEPINIT(tp)))) { in rack_timeout_rxt()
7602 if ((rack->r_ctl.rc_resend == NULL) || in rack_timeout_rxt()
7603 ((rack->r_ctl.rc_resend->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_timeout_rxt()
7610 tp->t_rxtshift++; in rack_timeout_rxt()
7613 if (tp->t_rxtshift > V_tcp_retries) { in rack_timeout_rxt()
7616 tp->t_rxtshift = V_tcp_retries; in rack_timeout_rxt()
7619 MPASS(tp->t_softerror >= 0); in rack_timeout_rxt()
7620 retval = tp->t_softerror ? -tp->t_softerror : -ETIMEDOUT; in rack_timeout_rxt()
7623 if (tp->t_state == TCPS_SYN_SENT) { in rack_timeout_rxt()
7628 tp->snd_cwnd = 1; in rack_timeout_rxt()
7629 } else if (tp->t_rxtshift == 1) { in rack_timeout_rxt()
7636 * End-to-End Network Path Properties" by Allman and Paxson in rack_timeout_rxt()
7639 tp->snd_cwnd_prev = tp->snd_cwnd; in rack_timeout_rxt()
7640 tp->snd_ssthresh_prev = tp->snd_ssthresh; in rack_timeout_rxt()
7641 tp->snd_recover_prev = tp->snd_recover; in rack_timeout_rxt()
7642 tp->t_badrxtwin = ticks + (USEC_2_TICKS(tp->t_srtt)/2); in rack_timeout_rxt()
7643 tp->t_flags |= TF_PREVVALID; in rack_timeout_rxt()
7644 } else if ((tp->t_flags & TF_RCVD_TSTMP) == 0) in rack_timeout_rxt()
7645 tp->t_flags &= ~TF_PREVVALID; in rack_timeout_rxt()
7647 if ((tp->t_state == TCPS_SYN_SENT) || in rack_timeout_rxt()
7648 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_timeout_rxt()
7649 rexmt = RACK_INITIAL_RTO * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7651 rexmt = max(rack_rto_min, (tp->t_srtt + (tp->t_rttvar << 2))) * tcp_backoff[tp->t_rxtshift]; in rack_timeout_rxt()
7653 RACK_TCPT_RANGESET(tp->t_rxtcur, rexmt, in rack_timeout_rxt()
7654 max(rack_rto_min, rexmt), rack_rto_max, rack->r_ctl.timer_slop); in rack_timeout_rxt()
7663 isipv6 = (inp->inp_vflag & INP_IPV6) ? true : false; in rack_timeout_rxt()
7670 ((tp->t_state == TCPS_ESTABLISHED) || in rack_timeout_rxt()
7671 (tp->t_state == TCPS_FIN_WAIT_1))) { in rack_timeout_rxt()
7674 * 1448 -> 1188 -> 524) should be given 2 chances to recover in rack_timeout_rxt()
7675 * before further clamping down. 'tp->t_rxtshift % 2 == 0' in rack_timeout_rxt()
7678 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD | TF2_PLPMTU_MAXSEGSNT)) == in rack_timeout_rxt()
7680 (tp->t_rxtshift >= 2 && tp->t_rxtshift < 6 && in rack_timeout_rxt()
7681 tp->t_rxtshift % 2 == 0)) { in rack_timeout_rxt()
7683 * Enter Path MTU Black-hole Detection mechanism: - in rack_timeout_rxt()
7684 * Disable Path MTU Discovery (IP "DF" bit). - in rack_timeout_rxt()
7688 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) { in rack_timeout_rxt()
7690 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7692 tp->t_pmtud_saved_maxseg = tp->t_maxseg; in rack_timeout_rxt()
7701 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss) { in rack_timeout_rxt()
7703 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss; in rack_timeout_rxt()
7707 tp->t_maxseg = V_tcp_v6mssdflt; in rack_timeout_rxt()
7712 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7720 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss) { in rack_timeout_rxt()
7722 tp->t_maxseg = V_tcp_pmtud_blackhole_mss; in rack_timeout_rxt()
7726 tp->t_maxseg = V_tcp_mssdflt; in rack_timeout_rxt()
7731 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7744 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) && in rack_timeout_rxt()
7745 (tp->t_rxtshift >= 6)) { in rack_timeout_rxt()
7746 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_timeout_rxt()
7747 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE; in rack_timeout_rxt()
7748 tp->t_maxseg = tp->t_pmtud_saved_maxseg; in rack_timeout_rxt()
7749 if (tp->t_maxseg < V_tcp_mssdflt) { in rack_timeout_rxt()
7755 tp->t_flags2 |= TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7757 tp->t_flags2 &= ~TF2_PROC_SACK_PROHIBIT; in rack_timeout_rxt()
7765 * our third SYN to work-around some broken terminal servers in rack_timeout_rxt()
7768 * unknown-to-them TCP options. in rack_timeout_rxt()
7770 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) && in rack_timeout_rxt()
7771 (tp->t_rxtshift == 3)) in rack_timeout_rxt()
7772 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT); in rack_timeout_rxt()
7779 if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { in rack_timeout_rxt()
7781 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_timeout_rxt()
7786 tp->t_rttvar += tp->t_srtt; in rack_timeout_rxt()
7787 tp->t_srtt = 0; in rack_timeout_rxt()
7789 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_timeout_rxt()
7790 tp->snd_recover = tp->snd_max; in rack_timeout_rxt()
7791 tp->t_flags |= TF_ACKNOW; in rack_timeout_rxt()
7792 tp->t_rtttime = 0; in rack_timeout_rxt()
7793 rack_cong_signal(tp, CC_RTO, tp->snd_una, __LINE__); in rack_timeout_rxt()
7802 int32_t timers = (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK); in rack_process_timers()
7804 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_timers()
7805 (tp->t_flags & TF_GPUTINPROG)) { in rack_process_timers()
7814 bytes = tp->gput_ack - tp->gput_seq; in rack_process_timers()
7815 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_process_timers()
7816 bytes += tp->gput_seq - tp->snd_una; in rack_process_timers()
7817 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_process_timers()
7823 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_process_timers()
7824 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_process_timers()
7825 tp->gput_seq, in rack_process_timers()
7827 tp->t_flags &= ~TF_GPUTINPROG; in rack_process_timers()
7833 if (tp->t_state == TCPS_LISTEN) { in rack_process_timers()
7835 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) in rack_process_timers()
7840 rack->rc_on_min_to) { in rack_process_timers()
7843 * are on a min-timeout (which means rrr_conf = 3) in rack_process_timers()
7848 * If its on a normal rack timer (non-min) then in rack_process_timers()
7853 if (TSTMP_LT(cts, rack->r_ctl.rc_timer_exp)) { in rack_process_timers()
7856 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_process_timers()
7857 ret = -1; in rack_process_timers()
7868 ret = -2; in rack_process_timers()
7875 * no-sack wakeup on since we no longer have a PKT_OUTPUT in rack_process_timers()
7878 rack->rc_tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE; in rack_process_timers()
7879 ret = -3; in rack_process_timers()
7880 left = rack->r_ctl.rc_timer_exp - cts; in rack_process_timers()
7886 rack->rc_tmr_stopped = 0; in rack_process_timers()
7887 rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_MASK; in rack_process_timers()
7891 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
7892 rack->r_fast_output = 0; in rack_process_timers()
7895 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
7896 rack->r_fast_output = 0; in rack_process_timers()
7899 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_process_timers()
7900 rack->r_fast_output = 0; in rack_process_timers()
7918 flags_on_entry = rack->r_ctl.rc_hpts_flags; in rack_timer_cancel()
7920 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_timer_cancel()
7921 ((TSTMP_GEQ(us_cts, rack->r_ctl.rc_last_output_to)) || in rack_timer_cancel()
7922 ((tp->snd_max - tp->snd_una) == 0))) { in rack_timer_cancel()
7923 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
7926 if ((tp->snd_max - tp->snd_una) == 0) in rack_timer_cancel()
7927 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_cancel()
7930 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_timer_cancel()
7931 rack->rc_tmr_stopped = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_cancel()
7932 if (tcp_in_hpts(rack->rc_tp) && in rack_timer_cancel()
7933 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)) { in rack_timer_cancel()
7939 tcp_hpts_remove(rack->rc_tp); in rack_timer_cancel()
7942 rack->r_ctl.rc_hpts_flags &= ~(PACE_TMR_MASK); in rack_timer_cancel()
7953 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_stopall()
7954 rack->t_timers_stopped = 1; in rack_stopall()
7969 rack->rc_in_persist = 1; in rack_stop_all_timers()
7971 if (tcp_in_hpts(rack->rc_tp)) { in rack_stop_all_timers()
7972 tcp_hpts_remove(rack->rc_tp); in rack_stop_all_timers()
7982 rsm->r_rtr_cnt++; in rack_update_rsm()
7983 if (rsm->r_rtr_cnt > RACK_NUM_OF_RETRANS) { in rack_update_rsm()
7984 rsm->r_rtr_cnt = RACK_NUM_OF_RETRANS; in rack_update_rsm()
7985 rsm->r_flags |= RACK_OVERMAX; in rack_update_rsm()
7987 rsm->r_act_rxt_cnt++; in rack_update_rsm()
7990 rsm->r_dupack = 0; in rack_update_rsm()
7991 if ((rsm->r_rtr_cnt > 1) && ((rsm->r_flags & RACK_TLP) == 0)) { in rack_update_rsm()
7992 rack->r_ctl.rc_holes_rxt += (rsm->r_end - rsm->r_start); in rack_update_rsm()
7993 rsm->r_rtr_bytes += (rsm->r_end - rsm->r_start); in rack_update_rsm()
7995 if (rsm->r_flags & RACK_WAS_LOST) { in rack_update_rsm()
8003 idx = rsm->r_rtr_cnt - 1; in rack_update_rsm()
8004 rsm->r_tim_lastsent[idx] = ts; in rack_update_rsm()
8007 * in snduna <->snd_max. in rack_update_rsm()
8009 rsm->r_fas = ctf_flight_size(rack->rc_tp, in rack_update_rsm()
8010 rack->r_ctl.rc_sacked); in rack_update_rsm()
8011 if (rsm->r_flags & RACK_ACKED) { in rack_update_rsm()
8013 rsm->r_flags &= ~RACK_ACKED; in rack_update_rsm()
8014 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8016 if (rsm->r_in_tmap) { in rack_update_rsm()
8017 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8018 rsm->r_in_tmap = 0; in rack_update_rsm()
8022 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_update_rsm()
8023 rsm->r_in_tmap = 1; in rack_update_rsm()
8024 rsm->r_bas = (uint8_t)(((rsm->r_end - rsm->r_start) + segsiz - 1) / segsiz); in rack_update_rsm()
8026 if (rsm->r_flags & RACK_MUST_RXT) { in rack_update_rsm()
8027 if (rack->r_must_retran) in rack_update_rsm()
8028 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_update_rsm()
8029 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_update_rsm()
8034 rack->r_must_retran = 0; in rack_update_rsm()
8035 rack->r_ctl.rc_out_at_rto = 0; in rack_update_rsm()
8037 rsm->r_flags &= ~RACK_MUST_RXT; in rack_update_rsm()
8040 rsm->r_flags &= ~RACK_RWND_COLLAPSED; in rack_update_rsm()
8041 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_update_rsm()
8043 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_update_rsm()
8044 rsm->r_flags |= RACK_WAS_SACKPASS; in rack_update_rsm()
8053 * We (re-)transmitted starting at rsm->r_start for some length in rack_update_entry()
8062 c_end = rsm->r_start + len; in rack_update_entry()
8063 if (SEQ_GEQ(c_end, rsm->r_end)) { in rack_update_entry()
8069 if (c_end == rsm->r_end) { in rack_update_entry()
8076 act_len = rsm->r_end - rsm->r_start; in rack_update_entry()
8077 *lenp = (len - act_len); in rack_update_entry()
8078 return (rsm->r_end); in rack_update_entry()
8102 nrsm->r_dupack = 0; in rack_update_entry()
8105 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_update_entry()
8107 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_update_entry()
8112 if (rsm->r_in_tmap) { in rack_update_entry()
8113 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_update_entry()
8114 nrsm->r_in_tmap = 1; in rack_update_entry()
8116 rsm->r_flags &= (~RACK_HAS_FIN); in rack_update_entry()
8154 * -- i.e. return if err != 0 or should we pretend we sent it? -- in rack_log_output()
8160 * We don't log errors -- we could but snd_max does not in rack_log_output()
8172 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_output()
8173 snd_una = tp->snd_una; in rack_log_output()
8174 snd_max = tp->snd_max; in rack_log_output()
8182 if ((th_flags & TH_SYN) && (seq_out == tp->iss)) in rack_log_output()
8188 /* Are sending an old segment to induce an ack (keep-alive)? */ in rack_log_output()
8198 len = end - seq_out; in rack_log_output()
8206 if (IN_FASTRECOVERY(tp->t_flags)) { in rack_log_output()
8207 rack->r_ctl.rc_prr_out += len; in rack_log_output()
8223 rsm->r_flags = RACK_HAS_FIN|add_flag; in rack_log_output()
8225 rsm->r_flags = add_flag; in rack_log_output()
8228 rsm->r_hw_tls = 1; in rack_log_output()
8229 rsm->r_tim_lastsent[0] = cts; in rack_log_output()
8230 rsm->r_rtr_cnt = 1; in rack_log_output()
8231 rsm->r_act_rxt_cnt = 0; in rack_log_output()
8232 rsm->r_rtr_bytes = 0; in rack_log_output()
8235 rsm->r_flags |= RACK_HAS_SYN; in rack_log_output()
8237 rsm->r_start = seq_out; in rack_log_output()
8238 rsm->r_end = rsm->r_start + len; in rack_log_output()
8240 rsm->r_dupack = 0; in rack_log_output()
8246 rsm->m = s_mb; in rack_log_output()
8247 rsm->soff = s_moff; in rack_log_output()
8250 * reflected in in snduna <->snd_max in rack_log_output()
8252 rsm->r_fas = (ctf_flight_size(rack->rc_tp, in rack_log_output()
8253 rack->r_ctl.rc_sacked) + in rack_log_output()
8254 (rsm->r_end - rsm->r_start)); in rack_log_output()
8255 if ((rack->rc_initial_ss_comp == 0) && in rack_log_output()
8256 (rack->r_ctl.ss_hi_fs < rsm->r_fas)) { in rack_log_output()
8257 rack->r_ctl.ss_hi_fs = rsm->r_fas; in rack_log_output()
8259 /* rsm->m will be NULL if RACK_HAS_SYN or RACK_HAS_FIN is set */ in rack_log_output()
8260 if (rsm->m) { in rack_log_output()
8261 if (rsm->m->m_len <= rsm->soff) { in rack_log_output()
8267 * within rsm->m. But if the sbsndptr was in rack_log_output()
8273 lm = rsm->m; in rack_log_output()
8274 while (lm->m_len <= rsm->soff) { in rack_log_output()
8275 rsm->soff -= lm->m_len; in rack_log_output()
8276 lm = lm->m_next; in rack_log_output()
8277 KASSERT(lm != NULL, ("%s rack:%p lm goes null orig_off:%u origmb:%p rsm->soff:%u", in rack_log_output()
8278 __func__, rack, s_moff, s_mb, rsm->soff)); in rack_log_output()
8280 rsm->m = lm; in rack_log_output()
8282 rsm->orig_m_len = rsm->m->m_len; in rack_log_output()
8283 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_log_output()
8285 rsm->orig_m_len = 0; in rack_log_output()
8286 rsm->orig_t_space = 0; in rack_log_output()
8288 rsm->r_bas = (uint8_t)((len + segsiz - 1) / segsiz); in rack_log_output()
8293 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_log_output()
8295 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_log_output()
8300 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_log_output()
8301 rsm->r_in_tmap = 1; in rack_log_output()
8302 if (rsm->r_flags & RACK_IS_PCM) { in rack_log_output()
8303 rack->r_ctl.pcm_i.send_time = cts; in rack_log_output()
8304 rack->r_ctl.pcm_i.eseq = rsm->r_end; in rack_log_output()
8306 if (rack->pcm_in_progress == 0) in rack_log_output()
8307 rack->r_ctl.pcm_i.sseq = rsm->r_start; in rack_log_output()
8315 if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_log_output()
8316 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) == ctf_fixed_maxseg(tp))) { in rack_log_output()
8319 prsm = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_log_output()
8321 prsm->r_one_out_nr = 1; in rack_log_output()
8329 if (hintrsm && (hintrsm->r_start == seq_out)) { in rack_log_output()
8336 if ((rsm) && (rsm->r_start == seq_out)) { in rack_log_output()
8346 rsm = tqhash_find(rack->r_ctl.tqh, seq_out); in rack_log_output()
8348 if (rsm->r_start == seq_out) { in rack_log_output()
8356 if (SEQ_GEQ(seq_out, rsm->r_start) && SEQ_LT(seq_out, rsm->r_end)) { in rack_log_output()
8374 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_log_output()
8376 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_log_output()
8381 if (rsm->r_in_tmap) { in rack_log_output()
8382 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_log_output()
8383 nrsm->r_in_tmap = 1; in rack_log_output()
8385 rsm->r_flags &= (~RACK_HAS_FIN); in rack_log_output()
8397 if (seq_out == tp->snd_max) { in rack_log_output()
8399 } else if (SEQ_LT(seq_out, tp->snd_max)) { in rack_log_output()
8401 printf("seq_out:%u len:%d snd_una:%u snd_max:%u -- but rsm not found?\n", in rack_log_output()
8402 seq_out, len, tp->snd_una, tp->snd_max); in rack_log_output()
8404 TQHASH_FOREACH(rsm, rack->r_ctl.tqh) { in rack_log_output()
8406 rsm, rsm->r_start, rsm->r_end); in rack_log_output()
8415 * Hmm beyond sndmax? (only if we are using the new rtt-pack in rack_log_output()
8419 seq_out, len, tp->snd_max, tp); in rack_log_output()
8433 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8434 (rack->r_ctl.rack_rs.rs_rtt_lowest > rtt)) { in tcp_rack_xmit_timer()
8435 rack->r_ctl.rack_rs.rs_rtt_lowest = rtt; in tcp_rack_xmit_timer()
8437 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8438 (rack->r_ctl.rack_rs.rs_rtt_highest < rtt)) { in tcp_rack_xmit_timer()
8439 rack->r_ctl.rack_rs.rs_rtt_highest = rtt; in tcp_rack_xmit_timer()
8441 if (rack->rc_tp->t_flags & TF_GPUTINPROG) { in tcp_rack_xmit_timer()
8442 if (us_rtt < rack->r_ctl.rc_gp_lowrtt) in tcp_rack_xmit_timer()
8443 rack->r_ctl.rc_gp_lowrtt = us_rtt; in tcp_rack_xmit_timer()
8444 if (rack->rc_tp->snd_wnd > rack->r_ctl.rc_gp_high_rwnd) in tcp_rack_xmit_timer()
8445 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in tcp_rack_xmit_timer()
8449 (rsm->r_just_ret) || in tcp_rack_xmit_timer()
8450 (rsm->r_one_out_nr && in tcp_rack_xmit_timer()
8451 len < (ctf_fixed_maxseg(rack->rc_tp) * 2)))) { in tcp_rack_xmit_timer()
8458 * the r_one_out_nr. If it was a CUM-ACK and in tcp_rack_xmit_timer()
8465 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) || in tcp_rack_xmit_timer()
8466 (rack->r_ctl.rack_rs.rs_us_rtt > us_rtt)) { in tcp_rack_xmit_timer()
8467 if (rack->r_ctl.rack_rs.confidence == 0) { in tcp_rack_xmit_timer()
8472 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8473 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8474 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8483 rack->r_ctl.rack_rs.rs_us_rtt = us_rtt; in tcp_rack_xmit_timer()
8484 rack->r_ctl.rack_rs.confidence = confidence; in tcp_rack_xmit_timer()
8485 rack->r_ctl.rack_rs.rs_us_rtrcnt = rtrcnt; in tcp_rack_xmit_timer()
8488 rack_log_rtt_upd(rack->rc_tp, rack, us_rtt, len, rsm, confidence); in tcp_rack_xmit_timer()
8489 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_VALID; in tcp_rack_xmit_timer()
8490 rack->r_ctl.rack_rs.rs_rtt_tot += rtt; in tcp_rack_xmit_timer()
8491 rack->r_ctl.rack_rs.rs_rtt_cnt++; in tcp_rack_xmit_timer()
8495 * Collect new round-trip time estimate
8504 if (rack->r_ctl.rack_rs.rs_flags & RACK_RTT_EMPTY) in tcp_rack_xmit_timer_commit()
8507 if (rack->r_ctl.rc_rate_sample_method == USE_RTT_LOW) { in tcp_rack_xmit_timer_commit()
8509 rtt = rack->r_ctl.rack_rs.rs_rtt_lowest; in tcp_rack_xmit_timer_commit()
8510 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_HIGH) { in tcp_rack_xmit_timer_commit()
8512 rtt = rack->r_ctl.rack_rs.rs_rtt_highest; in tcp_rack_xmit_timer_commit()
8513 } else if (rack->r_ctl.rc_rate_sample_method == USE_RTT_AVG) { in tcp_rack_xmit_timer_commit()
8515 rtt = (int32_t)(rack->r_ctl.rack_rs.rs_rtt_tot / in tcp_rack_xmit_timer_commit()
8516 (uint64_t)rack->r_ctl.rack_rs.rs_rtt_cnt); in tcp_rack_xmit_timer_commit()
8519 panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method); in tcp_rack_xmit_timer_commit()
8525 if (rack->rc_gp_rtt_set == 0) { in tcp_rack_xmit_timer_commit()
8530 rack->r_ctl.rc_gp_srtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8531 rack->rc_gp_rtt_set = 1; in tcp_rack_xmit_timer_commit()
8532 } else if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8534 rack->r_ctl.rc_gp_srtt -= (rack->r_ctl.rc_gp_srtt/8); in tcp_rack_xmit_timer_commit()
8535 rack->r_ctl.rc_gp_srtt += rack->r_ctl.rack_rs.rs_us_rtt / 8; in tcp_rack_xmit_timer_commit()
8537 if (rack->r_ctl.rack_rs.confidence) { in tcp_rack_xmit_timer_commit()
8542 if (rack->r_ctl.rc_highest_us_rtt < rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8543 rack->r_ctl.rc_highest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8545 if (rack->rc_highly_buffered == 0) { in tcp_rack_xmit_timer_commit()
8551 if ((rack->r_ctl.rc_highest_us_rtt / rack->r_ctl.rc_lowest_us_rtt) > rack_hbp_thresh) { in tcp_rack_xmit_timer_commit()
8552 rack_log_rtt_shrinks(rack, rack->r_ctl.rack_rs.rs_us_rtt, in tcp_rack_xmit_timer_commit()
8553 rack->r_ctl.rc_highest_us_rtt, in tcp_rack_xmit_timer_commit()
8554 rack->r_ctl.rc_lowest_us_rtt, in tcp_rack_xmit_timer_commit()
8556 rack->rc_highly_buffered = 1; in tcp_rack_xmit_timer_commit()
8560 if ((rack->r_ctl.rack_rs.confidence) || in tcp_rack_xmit_timer_commit()
8561 (rack->r_ctl.rack_rs.rs_us_rtrcnt == 1)) { in tcp_rack_xmit_timer_commit()
8566 rack->r_ctl.rc_last_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8568 if (rack->r_ctl.rc_lowest_us_rtt > rack->r_ctl.rack_rs.rs_us_rtt) { in tcp_rack_xmit_timer_commit()
8569 rack->r_ctl.rc_lowest_us_rtt = rack->r_ctl.rack_rs.rs_us_rtt; in tcp_rack_xmit_timer_commit()
8570 if (rack->r_ctl.rc_lowest_us_rtt == 0) in tcp_rack_xmit_timer_commit()
8571 rack->r_ctl.rc_lowest_us_rtt = 1; in tcp_rack_xmit_timer_commit()
8574 rack = (struct tcp_rack *)tp->t_fb_ptr; in tcp_rack_xmit_timer_commit()
8575 if (tp->t_srtt != 0) { in tcp_rack_xmit_timer_commit()
8584 delta = tp->t_srtt - rtt; in tcp_rack_xmit_timer_commit()
8586 tp->t_srtt -= (tp->t_srtt >> 3); in tcp_rack_xmit_timer_commit()
8588 tp->t_srtt += (rtt >> 3); in tcp_rack_xmit_timer_commit()
8589 if (tp->t_srtt <= 0) in tcp_rack_xmit_timer_commit()
8590 tp->t_srtt = 1; in tcp_rack_xmit_timer_commit()
8593 delta = -delta; in tcp_rack_xmit_timer_commit()
8595 tp->t_rttvar -= (tp->t_rttvar >> 3); in tcp_rack_xmit_timer_commit()
8597 tp->t_rttvar += (delta >> 3); in tcp_rack_xmit_timer_commit()
8598 if (tp->t_rttvar <= 0) in tcp_rack_xmit_timer_commit()
8599 tp->t_rttvar = 1; in tcp_rack_xmit_timer_commit()
8602 * No rtt measurement yet - use the unsmoothed rtt. Set the in tcp_rack_xmit_timer_commit()
8606 tp->t_srtt = rtt; in tcp_rack_xmit_timer_commit()
8607 tp->t_rttvar = rtt >> 1; in tcp_rack_xmit_timer_commit()
8609 rack->rc_srtt_measure_made = 1; in tcp_rack_xmit_timer_commit()
8611 if (tp->t_rttupdated < UCHAR_MAX) in tcp_rack_xmit_timer_commit()
8612 tp->t_rttupdated++; in tcp_rack_xmit_timer_commit()
8616 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rtt)); in tcp_rack_xmit_timer_commit()
8622 ms_rtt = (rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8623 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8629 ms_rtt = (rack->r_ctl.rack_rs.rs_us_rtt + HPTS_USEC_IN_MSEC - 1) / HPTS_USEC_IN_MSEC; in tcp_rack_xmit_timer_commit()
8630 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, ms_rtt)); in tcp_rack_xmit_timer_commit()
8633 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8635 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_PATHRTT, imax(0, rack->r_ctl.rack_rs.rs_us_rtt)); in tcp_rack_xmit_timer_commit()
8637 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time); in tcp_rack_xmit_timer_commit()
8642 * tick of rounding and 1 extra tick because of +-1/2 tick in tcp_rack_xmit_timer_commit()
8648 tp->t_rxtshift = 0; in tcp_rack_xmit_timer_commit()
8649 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in tcp_rack_xmit_timer_commit()
8650 max(rack_rto_min, rtt + 2), rack_rto_max, rack->r_ctl.timer_slop); in tcp_rack_xmit_timer_commit()
8652 tp->t_softerror = 0; in tcp_rack_xmit_timer_commit()
8660 * Apply to filter the inbound us-rtt at us_cts. in rack_apply_updated_usrtt()
8664 old_rtt = get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt); in rack_apply_updated_usrtt()
8665 apply_filter_min_small(&rack->r_ctl.rc_gp_min_rtt, in rack_apply_updated_usrtt()
8675 if ((old_rtt - us_rtt) > rack_min_rtt_movement) { in rack_apply_updated_usrtt()
8677 rack->rc_gp_dyn_mul && in rack_apply_updated_usrtt()
8678 (rack->use_fixed_rate == 0) && in rack_apply_updated_usrtt()
8679 (rack->rc_always_pace)) { in rack_apply_updated_usrtt()
8682 * to the time that we would have entered probe-rtt. in rack_apply_updated_usrtt()
8684 * has entered probe-rtt. Lets go in now too. in rack_apply_updated_usrtt()
8690 if ((rack->in_probe_rtt == 0) && in rack_apply_updated_usrtt()
8691 (rack->rc_skip_timely == 0) && in rack_apply_updated_usrtt()
8692 ((us_cts - rack->r_ctl.rc_lower_rtt_us_cts) >= (rack_time_between_probertt - val))) { in rack_apply_updated_usrtt()
8696 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_apply_updated_usrtt()
8709 if ((rsm->r_flags & RACK_ACKED) || in rack_update_rtt()
8710 (rsm->r_flags & RACK_WAS_ACKED)) in rack_update_rtt()
8713 if (rsm->r_no_rtt_allowed) { in rack_update_rtt()
8718 if (SEQ_GT(th_ack, rsm->r_end)) { in rack_update_rtt()
8719 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8722 len_acked = th_ack - rsm->r_start; in rack_update_rtt()
8726 len_acked = rsm->r_end - rsm->r_start; in rack_update_rtt()
8729 if (rsm->r_rtr_cnt == 1) { in rack_update_rtt()
8731 t = cts - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8734 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
8735 tp->t_rttlow = t; in rack_update_rtt()
8736 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8737 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8738 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8739 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8740 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8743 if (TSTMP_GT(tcp_tv_to_usec(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) in rack_update_rtt()
8744 …us_rtt = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt in rack_update_rtt()
8746 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_update_rtt()
8749 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8751 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8753 rack_apply_updated_usrtt(rack, us_rtt, tcp_tv_to_usec(&rack->r_ctl.act_rcv_time)); in rack_update_rtt()
8755 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 1); in rack_update_rtt()
8756 tcp_rack_xmit_timer(rack, t + 1, len_acked, us_rtt, 2 , rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8769 * When we are not app-limited then we see if in rack_update_rtt()
8786 if (rsm->r_flags & RACK_APP_LIMITED) { in rack_update_rtt()
8791 } else if (rack->app_limited_needs_set == 0) { in rack_update_rtt()
8796 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)], cts, 2); in rack_update_rtt()
8798 calc_conf, rsm, rsm->r_rtr_cnt); in rack_update_rtt()
8800 if ((rsm->r_flags & RACK_TLP) && in rack_update_rtt()
8801 (!IN_FASTRECOVERY(tp->t_flags))) { in rack_update_rtt()
8803 if (rack->r_ctl.rc_tlp_cwnd_reduce) { in rack_update_rtt()
8807 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8808 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8809 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
8811 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8812 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8813 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8814 rack->rc_rack_rtt = t; in rack_update_rtt()
8823 tp->t_rxtshift = 0; in rack_update_rtt()
8824 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_update_rtt()
8825 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_update_rtt()
8826 tp->t_softerror = 0; in rack_update_rtt()
8827 if (to && (to->to_flags & TOF_TS) && in rack_update_rtt()
8829 (to->to_tsecr) && in rack_update_rtt()
8830 ((rsm->r_flags & RACK_OVERMAX) == 0)) { in rack_update_rtt()
8835 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
8836 if (rack_ts_to_msec(rsm->r_tim_lastsent[i]) == to->to_tsecr) { in rack_update_rtt()
8837 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8840 if (CC_ALGO(tp)->rttsample != NULL) { in rack_update_rtt()
8848 if (TSTMP_GT(tcp_tv_to_usec(&rack->r_ctl.act_rcv_time), rsm->r_tim_lastsent[i])) in rack_update_rtt()
8849 us_rtt = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8851 us_rtt = tcp_get_usecs(NULL) - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8852 CC_ALGO(tp)->rttsample(&tp->t_ccv, us_rtt, 1, rsm->r_fas); in rack_update_rtt()
8854 if ((i + 1) < rsm->r_rtr_cnt) { in rack_update_rtt()
8866 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_update_rtt()
8867 tp->t_rttlow = t; in rack_update_rtt()
8868 if (!rack->r_ctl.rc_rack_min_rtt || SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8869 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8870 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8871 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8874 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8875 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8876 (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]))) { in rack_update_rtt()
8878 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_update_rtt()
8879 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8880 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8881 rack->rc_rack_rtt = t; in rack_update_rtt()
8883 rack_log_rtt_sample_calc(rack, t, (uint32_t)rsm->r_tim_lastsent[i], cts, 3); in rack_update_rtt()
8885 rsm->r_rtr_cnt); in rack_update_rtt()
8890 if (tcp_bblogging_on(rack->rc_tp)) { in rack_update_rtt()
8891 for (i = 0; i < rsm->r_rtr_cnt; i++) { in rack_update_rtt()
8892 rack_log_rtt_sendmap(rack, i, rsm->r_tim_lastsent[i], to->to_tsecr); in rack_update_rtt()
8900 * time-stamp since its not there or the time the peer last in rack_update_rtt()
8901 * received a segment that moved forward its cum-ack point. in rack_update_rtt()
8904 i = rsm->r_rtr_cnt - 1; in rack_update_rtt()
8905 t = cts - (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8908 if (rack->r_ctl.rc_rack_min_rtt && SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8913 * 6.2 Step 2 point 2 in the rack-draft so we in rack_update_rtt()
8919 } else if (rack->r_ctl.rc_rack_min_rtt) { in rack_update_rtt()
8924 if (!rack->r_ctl.rc_rack_min_rtt || in rack_update_rtt()
8925 SEQ_LT(t, rack->r_ctl.rc_rack_min_rtt)) { in rack_update_rtt()
8926 rack->r_ctl.rc_rack_min_rtt = t; in rack_update_rtt()
8927 if (rack->r_ctl.rc_rack_min_rtt == 0) { in rack_update_rtt()
8928 rack->r_ctl.rc_rack_min_rtt = 1; in rack_update_rtt()
8931 if ((rack->r_ctl.rc_rack_tmit_time == 0) || in rack_update_rtt()
8932 (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, in rack_update_rtt()
8933 (uint32_t)rsm->r_tim_lastsent[i]))) { in rack_update_rtt()
8935 rack->r_ctl.rc_rack_tmit_time = (uint32_t)rsm->r_tim_lastsent[i]; in rack_update_rtt()
8936 if (rack->r_ctl.rc_rack_tmit_time == 0) in rack_update_rtt()
8937 rack->r_ctl.rc_rack_tmit_time = 1; in rack_update_rtt()
8938 rack->rc_rack_rtt = t; in rack_update_rtt()
8960 TAILQ_FOREACH_REVERSE_FROM(nrsm, &rack->r_ctl.rc_tmap, in rack_log_sack_passed()
8966 if (nrsm->r_flags & RACK_ACKED) { in rack_log_sack_passed()
8974 if (nrsm->r_flags & RACK_RWND_COLLAPSED) { in rack_log_sack_passed()
8982 if ((nrsm->r_flags & RACK_WAS_LOST) == 0) { in rack_log_sack_passed()
8985 exp = ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]) + thresh; in rack_log_sack_passed()
8988 nrsm->r_flags |= RACK_WAS_LOST; in rack_log_sack_passed()
8989 rack->r_ctl.rc_considered_lost += nrsm->r_end - nrsm->r_start; in rack_log_sack_passed()
8992 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_log_sack_passed()
9000 nrsm->r_flags |= RACK_SACK_PASSED; in rack_log_sack_passed()
9001 nrsm->r_flags &= ~RACK_WAS_SACKPASS; in rack_log_sack_passed()
9015 if ((tp->t_flags & TF_GPUTINPROG) && in rack_need_set_test()
9016 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9026 if (rsm->r_rtr_cnt > 1) { in rack_need_set_test()
9039 seq = tp->gput_seq; in rack_need_set_test()
9040 ts = tp->gput_ts; in rack_need_set_test()
9041 rack->app_limited_needs_set = 0; in rack_need_set_test()
9042 tp->gput_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_need_set_test()
9045 SEQ_GEQ(rsm->r_start, tp->gput_seq)) { in rack_need_set_test()
9053 tp->gput_seq = rsm->r_start; in rack_need_set_test()
9056 SEQ_GEQ(rsm->r_end, tp->gput_seq)) { in rack_need_set_test()
9068 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9074 * way up to where this ack cum-ack moves in rack_need_set_test()
9077 if (SEQ_GT(th_ack, rsm->r_end)) in rack_need_set_test()
9078 tp->gput_seq = th_ack; in rack_need_set_test()
9080 tp->gput_seq = rsm->r_end; in rack_need_set_test()
9082 if (SEQ_LT(tp->gput_seq, tp->snd_max)) in rack_need_set_test()
9083 s_rsm = tqhash_find(rack->r_ctl.tqh, tp->gput_seq); in rack_need_set_test()
9097 rack->r_ctl.rc_gp_output_ts = s_rsm->r_tim_lastsent[0]; in rack_need_set_test()
9099 /* If we hit here we have to have *not* sent tp->gput_seq */ in rack_need_set_test()
9100 rack->r_ctl.rc_gp_output_ts = rsm->r_tim_lastsent[0]; in rack_need_set_test()
9102 rack->app_limited_needs_set = 1; in rack_need_set_test()
9104 if (SEQ_GT(tp->gput_seq, tp->gput_ack)) { in rack_need_set_test()
9106 * We moved beyond this guy's range, re-calculate in rack_need_set_test()
9109 if (rack->rc_gp_filled == 0) { in rack_need_set_test()
9110 tp->gput_ack = tp->gput_seq + max(rc_init_window(rack), (MIN_GP_WIN * ctf_fixed_maxseg(tp))); in rack_need_set_test()
9112 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_need_set_test()
9119 if ((rack->in_probe_rtt == 0) && in rack_need_set_test()
9120 (rack->measure_saw_probe_rtt) && in rack_need_set_test()
9121 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_need_set_test()
9122 rack->measure_saw_probe_rtt = 0; in rack_need_set_test()
9123 rack_log_pacing_delay_calc(rack, ts, tp->gput_ts, in rack_need_set_test()
9124 seq, tp->gput_seq, in rack_need_set_test()
9125 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9126 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9128 if (rack->rc_gp_filled && in rack_need_set_test()
9129 ((tp->gput_ack - tp->gput_seq) < in rack_need_set_test()
9135 if (ideal_amount > sbavail(&tptosocket(tp)->so_snd)) { in rack_need_set_test()
9142 tp->t_flags &= ~TF_GPUTINPROG; in rack_need_set_test()
9143 rack_log_pacing_delay_calc(rack, tp->gput_ack, tp->gput_seq, in rack_need_set_test()
9145 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | in rack_need_set_test()
9146 (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_need_set_test()
9152 tp->gput_ack = tp->gput_seq + ideal_amount; in rack_need_set_test()
9156 rack_log_gpset(rack, tp->gput_ack, 0, 0, line, 2, rsm); in rack_need_set_test()
9163 if (SEQ_LT(rsm->r_end, rack->r_ctl.last_tlp_acked_start)) { in is_rsm_inside_declared_tlp_block()
9167 if (SEQ_GT(rsm->r_start, rack->r_ctl.last_tlp_acked_end)) { in is_rsm_inside_declared_tlp_block()
9171 /* It has to be a sub-part of the original TLP recorded */ in is_rsm_inside_declared_tlp_block()
9187 start = sack->start; in rack_proc_sack_blk()
9188 end = sack->end; in rack_proc_sack_blk()
9193 (SEQ_LT(end, rsm->r_start)) || in rack_proc_sack_blk()
9194 (SEQ_GEQ(start, rsm->r_end)) || in rack_proc_sack_blk()
9195 (SEQ_LT(start, rsm->r_start))) { in rack_proc_sack_blk()
9201 rsm = tqhash_find(rack->r_ctl.tqh, start); in rack_proc_sack_blk()
9208 if (rsm->r_start != start) { in rack_proc_sack_blk()
9209 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9214 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9215 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9220 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9228 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9232 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9233 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9234 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9235 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9237 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9238 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9239 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9240 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9243 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9244 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9245 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9246 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9247 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9254 * rsm |--------------| in rack_proc_sack_blk()
9255 * sackblk |-------> in rack_proc_sack_blk()
9257 * rsm |---| in rack_proc_sack_blk()
9259 * nrsm |----------| in rack_proc_sack_blk()
9271 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9273 (rsm->bindex == next->bindex) && in rack_proc_sack_blk()
9274 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9275 ((next->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9276 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9277 ((next->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9278 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9279 (next->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9284 (next->r_flags & RACK_ACKED) && in rack_proc_sack_blk()
9285 SEQ_GEQ(end, next->r_start)) { in rack_proc_sack_blk()
9292 * rsm |------------| (not-acked) in rack_proc_sack_blk()
9293 * next |-----------| (acked) in rack_proc_sack_blk()
9294 * sackblk |--------> in rack_proc_sack_blk()
9296 * rsm |------| (not-acked) in rack_proc_sack_blk()
9297 * next |-----------------| (acked) in rack_proc_sack_blk()
9298 * nrsm |-----| in rack_proc_sack_blk()
9306 tqhash_update_end(rack->r_ctl.tqh, rsm, start); in rack_proc_sack_blk()
9307 next->r_start = start; in rack_proc_sack_blk()
9308 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9309 next->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9310 /* Now we must adjust back where next->m is */ in rack_proc_sack_blk()
9330 if (next->r_tim_lastsent[(next->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9331 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) in rack_proc_sack_blk()
9332 next->r_tim_lastsent[(next->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9336 if (next->r_ack_arrival < in rack_proc_sack_blk()
9337 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9338 next->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9343 rsm->r_dupack = 0; in rack_proc_sack_blk()
9344 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9347 nrsm->r_start = start; in rack_proc_sack_blk()
9350 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9351 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9352 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9353 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9354 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9362 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9363 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9365 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9366 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9368 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9370 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9371 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9372 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9373 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9377 * one left un-acked) to the next one in rack_proc_sack_blk()
9380 * sack-passed on rsm (The one passed in in rack_proc_sack_blk()
9385 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9391 if (nrsm && nrsm->r_in_tmap) in rack_proc_sack_blk()
9395 if (SEQ_LT(end, next->r_end) || in rack_proc_sack_blk()
9396 (end == next->r_end)) { in rack_proc_sack_blk()
9402 start = next->r_end; in rack_proc_sack_blk()
9403 rsm = tqhash_next(rack->r_ctl.tqh, next); in rack_proc_sack_blk()
9411 * rsm |--------| in rack_proc_sack_blk()
9412 * sackblk |-----> in rack_proc_sack_blk()
9417 * rsm |----| in rack_proc_sack_blk()
9418 * sackblk |-----> in rack_proc_sack_blk()
9419 * nrsm |---| in rack_proc_sack_blk()
9433 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9435 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9437 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9442 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9443 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9444 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9447 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9453 if (end == rsm->r_end) { in rack_proc_sack_blk()
9455 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9457 } else if (SEQ_LT(end, rsm->r_end)) { in rack_proc_sack_blk()
9459 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9467 start = rsm->r_end; in rack_proc_sack_blk()
9468 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9474 if (SEQ_GEQ(end, rsm->r_end)) { in rack_proc_sack_blk()
9478 * rsm --- |-----| in rack_proc_sack_blk()
9479 * end |-----| in rack_proc_sack_blk()
9481 * end |---------| in rack_proc_sack_blk()
9483 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9487 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9488 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9493 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9500 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9504 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9505 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9506 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9507 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9509 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9510 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9511 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9512 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9515 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9516 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9517 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9518 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9519 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9523 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9525 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9532 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9533 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9536 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9537 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9538 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9539 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9540 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9542 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9543 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9544 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9545 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9546 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9547 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9548 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9549 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
9553 if (end == rsm->r_end) { in rack_proc_sack_blk()
9554 /* This block only - done, setup for next */ in rack_proc_sack_blk()
9561 nrsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9562 start = rsm->r_end; in rack_proc_sack_blk()
9571 * rsm --- |-----| in rack_proc_sack_blk()
9572 * end |--| in rack_proc_sack_blk()
9574 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_proc_sack_blk()
9578 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9579 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9584 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9591 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9595 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9596 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9597 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9598 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9600 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9601 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9602 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9603 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9606 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9607 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9608 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9609 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9610 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9618 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9620 (rsm->bindex == prev->bindex) && in rack_proc_sack_blk()
9621 ((rsm->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9622 ((prev->r_flags & RACK_STRADDLE) == 0) && in rack_proc_sack_blk()
9623 ((rsm->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9624 ((prev->r_flags & RACK_IS_PCM) == 0) && in rack_proc_sack_blk()
9625 (rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9626 (prev->r_flags & RACK_IN_GP_WIN)) in rack_proc_sack_blk()
9631 (prev->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
9634 * in place and span from (rsm->r_start = end) to rsm->r_end. in rack_proc_sack_blk()
9636 * to prev->r_end <- end. in rack_proc_sack_blk()
9638 * prev |--------| (acked) in rack_proc_sack_blk()
9639 * rsm |-------| (non-acked) in rack_proc_sack_blk()
9640 * sackblk |-| in rack_proc_sack_blk()
9642 * prev |----------| (acked) in rack_proc_sack_blk()
9643 * rsm |-----| (non-acked) in rack_proc_sack_blk()
9644 * nrsm |-| (temporary) in rack_proc_sack_blk()
9651 tqhash_update_end(rack->r_ctl.tqh, prev, end); in rack_proc_sack_blk()
9652 rsm->r_start = end; in rack_proc_sack_blk()
9653 rsm->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9654 prev->r_flags |= RACK_SHUFFLED; in rack_proc_sack_blk()
9659 nrsm->r_end = end; in rack_proc_sack_blk()
9660 rsm->r_dupack = 0; in rack_proc_sack_blk()
9679 if(prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] < in rack_proc_sack_blk()
9680 nrsm->r_tim_lastsent[(nrsm->r_rtr_cnt-1)]) { in rack_proc_sack_blk()
9681 prev->r_tim_lastsent[(prev->r_rtr_cnt-1)] = nrsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]; in rack_proc_sack_blk()
9687 if(prev->r_ack_arrival < in rack_proc_sack_blk()
9688 rack_to_usec_ts(&rack->r_ctl.act_rcv_time)) in rack_proc_sack_blk()
9689 prev->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9704 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9705 rack_need_set_test(tp, rack, nrsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9706 changed += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9707 rack->r_ctl.rc_sacked += (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9708 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9715 my_chg = (nrsm->r_end - nrsm->r_start); in rack_proc_sack_blk()
9716 KASSERT((rack->r_ctl.rc_considered_lost >= my_chg), in rack_proc_sack_blk()
9718 if (my_chg <= rack->r_ctl.rc_considered_lost) in rack_proc_sack_blk()
9719 rack->r_ctl.rc_considered_lost -= my_chg; in rack_proc_sack_blk()
9721 rack->r_ctl.rc_considered_lost = 0; in rack_proc_sack_blk()
9723 if (nrsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9724 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9725 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9726 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9741 if ((rsm->r_flags & RACK_TLP) && in rack_proc_sack_blk()
9742 (rsm->r_rtr_cnt > 1)) { in rack_proc_sack_blk()
9747 if (rack->rc_last_tlp_acked_set && in rack_proc_sack_blk()
9754 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9758 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_proc_sack_blk()
9759 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9760 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9761 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9763 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_proc_sack_blk()
9764 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9765 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_proc_sack_blk()
9766 rack->r_ctl.last_tlp_acked_end); in rack_proc_sack_blk()
9769 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_proc_sack_blk()
9770 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_proc_sack_blk()
9771 rack->rc_last_tlp_acked_set = 1; in rack_proc_sack_blk()
9772 rack->rc_last_tlp_past_cumack = 0; in rack_proc_sack_blk()
9773 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9778 * nrsm->r_start = end; in rack_proc_sack_blk()
9779 * nrsm->r_end = rsm->r_end; in rack_proc_sack_blk()
9780 * which is un-acked. in rack_proc_sack_blk()
9782 * rsm->r_end = nrsm->r_start; in rack_proc_sack_blk()
9783 * i.e. the remaining un-acked in rack_proc_sack_blk()
9788 * rsm |----------| (not acked) in rack_proc_sack_blk()
9789 * sackblk |---| in rack_proc_sack_blk()
9791 * rsm |---| (acked) in rack_proc_sack_blk()
9792 * nrsm |------| (not acked) in rack_proc_sack_blk()
9795 rsm->r_flags &= (~RACK_HAS_FIN); in rack_proc_sack_blk()
9796 rsm->r_just_ret = 0; in rack_proc_sack_blk()
9798 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_proc_sack_blk()
9800 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_proc_sack_blk()
9805 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9806 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_proc_sack_blk()
9807 nrsm->r_in_tmap = 1; in rack_proc_sack_blk()
9809 nrsm->r_dupack = 0; in rack_proc_sack_blk()
9812 changed += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9813 if (rsm->r_flags & RACK_WAS_LOST) { in rack_proc_sack_blk()
9819 rack->r_ctl.rc_sacked += (rsm->r_end - rsm->r_start); in rack_proc_sack_blk()
9821 if (rsm->r_in_tmap) /* should be true */ in rack_proc_sack_blk()
9824 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_proc_sack_blk()
9825 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_proc_sack_blk()
9826 rack->r_ctl.rc_reorder_ts = cts; in rack_proc_sack_blk()
9827 if (rack->r_ctl.rc_reorder_ts == 0) in rack_proc_sack_blk()
9828 rack->r_ctl.rc_reorder_ts = 1; in rack_proc_sack_blk()
9830 if (rack->app_limited_needs_set) in rack_proc_sack_blk()
9831 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_END); in rack_proc_sack_blk()
9832 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_proc_sack_blk()
9833 rsm->r_flags |= RACK_ACKED; in rack_proc_sack_blk()
9834 rack_update_pcm_ack(rack, 0, rsm->r_start, rsm->r_end); in rack_proc_sack_blk()
9836 if (rsm->r_in_tmap) { in rack_proc_sack_blk()
9837 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_proc_sack_blk()
9838 rsm->r_in_tmap = 0; in rack_proc_sack_blk()
9844 ((rsm->r_flags & RACK_TLP) == 0) && in rack_proc_sack_blk()
9845 (rsm->r_flags & RACK_ACKED)) { in rack_proc_sack_blk()
9851 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9853 if (next->r_flags & RACK_TLP) in rack_proc_sack_blk()
9856 if ((next->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9857 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
9860 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9861 ((next->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
9864 if (rsm->bindex != next->bindex) in rack_proc_sack_blk()
9866 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
9868 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
9870 if (next->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
9872 if (next->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
9874 if (next->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
9877 next = tqhash_next(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9882 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9884 if (prev->r_flags & RACK_TLP) in rack_proc_sack_blk()
9887 if ((prev->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9888 ((rsm->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
9891 if ((rsm->r_flags & RACK_IN_GP_WIN) && in rack_proc_sack_blk()
9892 ((prev->r_flags & RACK_IN_GP_WIN) == 0)) { in rack_proc_sack_blk()
9895 if (rsm->bindex != prev->bindex) in rack_proc_sack_blk()
9897 if (rsm->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
9899 if (rsm->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
9901 if (prev->r_flags & RACK_STRADDLE) in rack_proc_sack_blk()
9903 if (prev->r_flags & RACK_IS_PCM) in rack_proc_sack_blk()
9905 if (prev->r_flags & RACK_ACKED) { in rack_proc_sack_blk()
9908 prev = tqhash_prev(rack->r_ctl.tqh, rsm); in rack_proc_sack_blk()
9919 nrsm = tqhash_find(rack->r_ctl.tqh, end); in rack_proc_sack_blk()
9920 *prsm = rack->r_ctl.rc_sacklast = nrsm; in rack_proc_sack_blk()
9930 while (rsm && (rsm->r_flags & RACK_ACKED)) { in rack_peer_reneges()
9932 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_peer_reneges()
9934 if (rsm->r_in_tmap) { in rack_peer_reneges()
9936 rack, rsm, rsm->r_flags); in rack_peer_reneges()
9939 rsm->r_flags &= ~(RACK_ACKED|RACK_SACK_PASSED|RACK_WAS_SACKPASS); in rack_peer_reneges()
9942 TAILQ_INSERT_HEAD(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_peer_reneges()
9945 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, tmap, rsm, r_tnext); in rack_peer_reneges()
9948 tmap->r_in_tmap = 1; in rack_peer_reneges()
9949 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_peer_reneges()
9955 sack_filter_clear(&rack->r_ctl.rack_sf, th_ack); in rack_peer_reneges()
10000 * The cum-ack is being advanced upon the sendmap. in rack_rsm_sender_update()
10006 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_rsm_sender_update()
10013 if (SEQ_GT(rsm->r_end, tp->gput_ack)) { in rack_rsm_sender_update()
10014 tp->gput_ack = rsm->r_end; in rack_rsm_sender_update()
10023 if (rack->app_limited_needs_set) in rack_rsm_sender_update()
10041 if ((ts = rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) <= in rack_rsm_sender_update()
10042 rack->r_ctl.rc_gp_cumack_ts) in rack_rsm_sender_update()
10045 rack->r_ctl.rc_gp_cumack_ts = ts; in rack_rsm_sender_update()
10046 rack_log_gpset(rack, tp->gput_ack, (uint32_t)ts, rsm->r_end, in rack_rsm_sender_update()
10061 if (sack_filter_blks_used(&rack->r_ctl.rack_sf)) { in rack_process_to_cumack()
10066 sack_filter_blks(tp, &rack->r_ctl.rack_sf, NULL, 0, th_ack); in rack_process_to_cumack()
10068 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_process_to_cumack()
10070 rack->r_ctl.cleared_app_ack = 0; in rack_process_to_cumack()
10072 rack->r_wanted_output = 1; in rack_process_to_cumack()
10073 if (SEQ_GT(th_ack, tp->snd_una)) in rack_process_to_cumack()
10074 rack->r_ctl.last_cumack_advance = acktime; in rack_process_to_cumack()
10077 if ((rack->rc_last_tlp_acked_set == 1)&& in rack_process_to_cumack()
10078 (rack->rc_last_tlp_past_cumack == 1) && in rack_process_to_cumack()
10079 (SEQ_GT(rack->r_ctl.last_tlp_acked_start, th_ack))) { in rack_process_to_cumack()
10082 * tlp retransmit sequence is ahead of the cum-ack. in rack_process_to_cumack()
10083 * This can only happen when the cum-ack moves all in rack_process_to_cumack()
10090 * the cum-ack is by the TLP before checking which is in rack_process_to_cumack()
10094 rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10095 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10096 rack->rc_last_tlp_acked_set = 0; in rack_process_to_cumack()
10097 rack->rc_last_tlp_past_cumack = 0; in rack_process_to_cumack()
10098 } else if ((rack->rc_last_tlp_acked_set == 1) && in rack_process_to_cumack()
10099 (rack->rc_last_tlp_past_cumack == 0) && in rack_process_to_cumack()
10100 (SEQ_GEQ(th_ack, rack->r_ctl.last_tlp_acked_end))) { in rack_process_to_cumack()
10104 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10107 if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10108 (rack->rc_last_sent_tlp_past_cumack == 1) && in rack_process_to_cumack()
10109 (SEQ_GT(rack->r_ctl.last_sent_tlp_seq, th_ack))) { in rack_process_to_cumack()
10111 rack->r_ctl.last_sent_tlp_seq, in rack_process_to_cumack()
10112 (rack->r_ctl.last_sent_tlp_seq + in rack_process_to_cumack()
10113 rack->r_ctl.last_sent_tlp_len)); in rack_process_to_cumack()
10114 rack->rc_last_sent_tlp_seq_valid = 0; in rack_process_to_cumack()
10115 rack->rc_last_sent_tlp_past_cumack = 0; in rack_process_to_cumack()
10116 } else if ((rack->rc_last_sent_tlp_seq_valid == 1) && in rack_process_to_cumack()
10117 (rack->rc_last_sent_tlp_past_cumack == 0) && in rack_process_to_cumack()
10118 (SEQ_GEQ(th_ack, rack->r_ctl.last_sent_tlp_seq))) { in rack_process_to_cumack()
10122 rack->rc_last_sent_tlp_past_cumack = 1; in rack_process_to_cumack()
10125 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10127 if ((th_ack - 1) == tp->iss) { in rack_process_to_cumack()
10136 if (tp->t_flags & TF_SENTFIN) { in rack_process_to_cumack()
10143 tp->t_state, th_ack, rack, in rack_process_to_cumack()
10144 tp->snd_una, tp->snd_max); in rack_process_to_cumack()
10148 if (SEQ_LT(th_ack, rsm->r_start)) { in rack_process_to_cumack()
10152 rsm->r_start, in rack_process_to_cumack()
10153 th_ack, tp->t_state, rack->r_state); in rack_process_to_cumack()
10160 if ((rsm->r_flags & RACK_TLP) && in rack_process_to_cumack()
10161 (rsm->r_rtr_cnt > 1)) { in rack_process_to_cumack()
10171 if (rack->rc_last_tlp_acked_set && in rack_process_to_cumack()
10178 rack_log_dsack_event(rack, 10, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10182 if (SEQ_LT(rsm->r_start, rack->r_ctl.last_tlp_acked_start)) { in rack_process_to_cumack()
10183 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10184 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10185 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10187 if (SEQ_GT(rsm->r_end, rack->r_ctl.last_tlp_acked_end)) { in rack_process_to_cumack()
10188 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10189 rack_log_dsack_event(rack, 11, __LINE__, rack->r_ctl.last_tlp_acked_start, in rack_process_to_cumack()
10190 rack->r_ctl.last_tlp_acked_end); in rack_process_to_cumack()
10193 rack->rc_last_tlp_past_cumack = 1; in rack_process_to_cumack()
10194 rack->r_ctl.last_tlp_acked_start = rsm->r_start; in rack_process_to_cumack()
10195 rack->r_ctl.last_tlp_acked_end = rsm->r_end; in rack_process_to_cumack()
10196 rack->rc_last_tlp_acked_set = 1; in rack_process_to_cumack()
10197 rack_log_dsack_event(rack, 8, __LINE__, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10201 rack->r_ctl.last_tmit_time_acked = rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)]; in rack_process_to_cumack()
10202 if (SEQ_GEQ(th_ack, rsm->r_end)) { in rack_process_to_cumack()
10207 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10215 rack_log_map_chg(tp, rack, NULL, rsm, NULL, MAP_FREE, rsm->r_end, __LINE__); in rack_process_to_cumack()
10216 rack->r_ctl.rc_holes_rxt -= rsm->r_rtr_bytes; in rack_process_to_cumack()
10217 rsm->r_rtr_bytes = 0; in rack_process_to_cumack()
10223 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_process_to_cumack()
10224 if (rsm->r_in_tmap) { in rack_process_to_cumack()
10225 TAILQ_REMOVE(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_process_to_cumack()
10226 rsm->r_in_tmap = 0; in rack_process_to_cumack()
10229 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10231 * It was acked on the scoreboard -- remove in rack_process_to_cumack()
10234 rack->r_ctl.rc_sacked -= (rsm->r_end - rsm->r_start); in rack_process_to_cumack()
10236 } else if (rsm->r_flags & RACK_SACK_PASSED) { in rack_process_to_cumack()
10242 rsm->r_flags &= ~RACK_SACK_PASSED; in rack_process_to_cumack()
10243 rsm->r_ack_arrival = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_process_to_cumack()
10244 rsm->r_flags |= RACK_ACKED; in rack_process_to_cumack()
10245 rack->r_ctl.rc_reorder_ts = cts; in rack_process_to_cumack()
10246 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_to_cumack()
10247 rack->r_ctl.rc_reorder_ts = 1; in rack_process_to_cumack()
10248 if (rack->r_ent_rec_ns) { in rack_process_to_cumack()
10253 rack->r_might_revert = 1; in rack_process_to_cumack()
10255 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10257 rack_update_pcm_ack(rack, 1, rsm->r_start, rsm->r_end); in rack_process_to_cumack()
10259 if ((rsm->r_flags & RACK_TO_REXT) && in rack_process_to_cumack()
10260 (tp->t_flags & TF_RCVD_TSTMP) && in rack_process_to_cumack()
10261 (to->to_flags & TOF_TS) && in rack_process_to_cumack()
10262 (to->to_tsecr != 0) && in rack_process_to_cumack()
10263 (tp->t_flags & TF_PREVVALID)) { in rack_process_to_cumack()
10269 tp->t_flags &= ~TF_PREVVALID; in rack_process_to_cumack()
10270 if (to->to_tsecr == rack_ts_to_msec(rsm->r_tim_lastsent[0])) { in rack_process_to_cumack()
10275 left = th_ack - rsm->r_end; in rack_process_to_cumack()
10276 if (rack->app_limited_needs_set && newly_acked) in rack_process_to_cumack()
10284 rsm = tqhash_min(rack->r_ctl.tqh); in rack_process_to_cumack()
10285 if (rsm && (rsm->r_flags & RACK_ACKED) && (th_ack == rsm->r_start)) { in rack_process_to_cumack()
10293 * given us snd_una up to (rsm->r_end). in rack_process_to_cumack()
10297 * our rsm->r_start in case we get an old ack in rack_process_to_cumack()
10304 if (rsm->r_flags & RACK_ACKED) { in rack_process_to_cumack()
10306 * It was acked on the scoreboard -- remove it from in rack_process_to_cumack()
10307 * total for the part being cum-acked. in rack_process_to_cumack()
10309 rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start); in rack_process_to_cumack()
10311 rack_update_pcm_ack(rack, 1, rsm->r_start, th_ack); in rack_process_to_cumack()
10314 if (rsm->r_flags & RACK_WAS_LOST) { in rack_process_to_cumack()
10327 rsm->r_dupack = 0; in rack_process_to_cumack()
10329 if (rsm->r_rtr_bytes) { in rack_process_to_cumack()
10336 ack_am = (th_ack - rsm->r_start); in rack_process_to_cumack()
10337 if (ack_am >= rsm->r_rtr_bytes) { in rack_process_to_cumack()
10338 rack->r_ctl.rc_holes_rxt -= ack_am; in rack_process_to_cumack()
10339 rsm->r_rtr_bytes -= ack_am; in rack_process_to_cumack()
10349 if (rsm->m && in rack_process_to_cumack()
10350 ((rsm->orig_m_len != rsm->m->m_len) || in rack_process_to_cumack()
10351 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_process_to_cumack()
10355 rsm->soff += (th_ack - rsm->r_start); in rack_process_to_cumack()
10358 tqhash_trim(rack->r_ctl.tqh, th_ack); in rack_process_to_cumack()
10364 m = rsm->m; in rack_process_to_cumack()
10365 soff = rsm->soff; in rack_process_to_cumack()
10367 while (soff >= m->m_len) { in rack_process_to_cumack()
10368 soff -= m->m_len; in rack_process_to_cumack()
10369 KASSERT((m->m_next != NULL), in rack_process_to_cumack()
10371 rsm, rsm->soff, soff, m)); in rack_process_to_cumack()
10372 m = m->m_next; in rack_process_to_cumack()
10375 * This is a fall-back that prevents a panic. In reality in rack_process_to_cumack()
10378 * but tqhash_trim did update rsm->r_start so the offset calcuation in rack_process_to_cumack()
10383 m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_process_to_cumack()
10384 (rsm->r_start - tp->snd_una), in rack_process_to_cumack()
10392 rsm->m = m; in rack_process_to_cumack()
10393 rsm->soff = soff; in rack_process_to_cumack()
10394 rsm->orig_m_len = rsm->m->m_len; in rack_process_to_cumack()
10395 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_process_to_cumack()
10398 if (rack->app_limited_needs_set && in rack_process_to_cumack()
10399 SEQ_GEQ(th_ack, tp->gput_seq)) in rack_process_to_cumack()
10400 rack_need_set_test(tp, rack, rsm, tp->snd_una, __LINE__, RACK_USE_BEG); in rack_process_to_cumack()
10409 if (rack->r_might_revert) { in rack_handle_might_revert()
10420 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_handle_might_revert()
10421 if (rsm->r_flags & RACK_SACK_PASSED) { in rack_handle_might_revert()
10433 rack->r_ent_rec_ns = 0; in rack_handle_might_revert()
10434 orig_cwnd = tp->snd_cwnd; in rack_handle_might_revert()
10435 tp->snd_ssthresh = rack->r_ctl.rc_ssthresh_at_erec; in rack_handle_might_revert()
10436 tp->snd_recover = tp->snd_una; in rack_handle_might_revert()
10438 if (IN_RECOVERY(tp->t_flags)) { in rack_handle_might_revert()
10440 if ((rack->rto_from_rec == 1) && (rack_ssthresh_rest_rto_rec != 0) ){ in rack_handle_might_revert()
10443 * and then re-entered recovery (more sack's arrived) in rack_handle_might_revert()
10445 * the first recovery. We want to be able to slow-start in rack_handle_might_revert()
10449 * so we get no slow-start after our RTO. in rack_handle_might_revert()
10451 rack->rto_from_rec = 0; in rack_handle_might_revert()
10452 if (rack->r_ctl.rto_ssthresh > tp->snd_ssthresh) in rack_handle_might_revert()
10453 tp->snd_ssthresh = rack->r_ctl.rto_ssthresh; in rack_handle_might_revert()
10457 rack->r_might_revert = 0; in rack_handle_might_revert()
10470 am = end - start; in rack_note_dsack()
10473 if ((rack->rc_last_tlp_acked_set ) && in rack_note_dsack()
10474 (SEQ_GEQ(start, rack->r_ctl.last_tlp_acked_start)) && in rack_note_dsack()
10475 (SEQ_LEQ(end, rack->r_ctl.last_tlp_acked_end))) { in rack_note_dsack()
10486 if (rack->rc_last_sent_tlp_seq_valid) { in rack_note_dsack()
10487 l_end = rack->r_ctl.last_sent_tlp_seq + rack->r_ctl.last_sent_tlp_len; in rack_note_dsack()
10488 if (SEQ_GEQ(start, rack->r_ctl.last_sent_tlp_seq) && in rack_note_dsack()
10499 if (rack->rc_dsack_round_seen == 0) { in rack_note_dsack()
10500 rack->rc_dsack_round_seen = 1; in rack_note_dsack()
10501 rack->r_ctl.dsack_round_end = rack->rc_tp->snd_max; in rack_note_dsack()
10502 rack->r_ctl.num_dsack++; in rack_note_dsack()
10503 rack->r_ctl.dsack_persist = 16; /* 16 is from the standard */ in rack_note_dsack()
10511 rack->r_ctl.dsack_byte_cnt += am; in rack_note_dsack()
10512 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags) && in rack_note_dsack()
10513 rack->r_ctl.retran_during_recovery && in rack_note_dsack()
10514 (rack->r_ctl.dsack_byte_cnt >= rack->r_ctl.retran_during_recovery)) { in rack_note_dsack()
10519 rack->r_might_revert = 1; in rack_note_dsack()
10520 rack_handle_might_revert(rack->rc_tp, rack); in rack_note_dsack()
10521 rack->r_might_revert = 0; in rack_note_dsack()
10522 rack->r_ctl.retran_during_recovery = 0; in rack_note_dsack()
10523 rack->r_ctl.dsack_byte_cnt = 0; in rack_note_dsack()
10531 return (((tp->snd_max - snd_una) - in do_rack_compute_pipe()
10532 (rack->r_ctl.rc_sacked + rack->r_ctl.rc_considered_lost)) + rack->r_ctl.rc_holes_rxt); in do_rack_compute_pipe()
10539 (struct tcp_rack *)tp->t_fb_ptr, in rack_compute_pipe()
10540 tp->snd_una)); in rack_compute_pipe()
10549 rack->r_ctl.rc_prr_delivered += changed; in rack_update_prr()
10551 if (sbavail(&rack->rc_inp->inp_socket->so_snd) <= (tp->snd_max - tp->snd_una)) { in rack_update_prr()
10555 * Note we use tp->snd_una here and not th_ack because in rack_update_prr()
10558 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10562 if (SEQ_GT(tp->snd_una, th_ack)) { in rack_update_prr()
10563 snd_una = tp->snd_una; in rack_update_prr()
10568 if (pipe > tp->snd_ssthresh) { in rack_update_prr()
10571 sndcnt = rack->r_ctl.rc_prr_delivered * tp->snd_ssthresh; in rack_update_prr()
10572 if (rack->r_ctl.rc_prr_recovery_fs > 0) in rack_update_prr()
10573 sndcnt /= (long)rack->r_ctl.rc_prr_recovery_fs; in rack_update_prr()
10575 rack->r_ctl.rc_prr_sndcnt = 0; in rack_update_prr()
10580 if (sndcnt > (long)rack->r_ctl.rc_prr_out) in rack_update_prr()
10581 sndcnt -= rack->r_ctl.rc_prr_out; in rack_update_prr()
10584 rack->r_ctl.rc_prr_sndcnt = sndcnt; in rack_update_prr()
10589 if (rack->r_ctl.rc_prr_delivered > rack->r_ctl.rc_prr_out) in rack_update_prr()
10590 limit = (rack->r_ctl.rc_prr_delivered - rack->r_ctl.rc_prr_out); in rack_update_prr()
10596 if (tp->snd_ssthresh > pipe) { in rack_update_prr()
10597 rack->r_ctl.rc_prr_sndcnt = min((tp->snd_ssthresh - pipe), limit); in rack_update_prr()
10600 rack->r_ctl.rc_prr_sndcnt = min(0, limit); in rack_update_prr()
10627 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_log_ack()
10629 rsm = tqhash_min(rack->r_ctl.tqh); in rack_log_ack()
10631 th_ack = th->th_ack; in rack_log_ack()
10632 segsiz = ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10633 if (SEQ_GT(th_ack, tp->snd_una)) { in rack_log_ack()
10635 tp->t_acktime = ticks; in rack_log_ack()
10637 if (rsm && SEQ_GT(th_ack, rsm->r_start)) in rack_log_ack()
10638 changed = th_ack - rsm->r_start; in rack_log_ack()
10641 tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time)); in rack_log_ack()
10643 if ((to->to_flags & TOF_SACK) == 0) { in rack_log_ack()
10647 * For cases where we struck a dup-ack in rack_log_ack()
10652 changed += ctf_fixed_maxseg(rack->rc_tp); in rack_log_ack()
10657 if (SEQ_GT(th_ack, tp->snd_una)) in rack_log_ack()
10660 ack_point = tp->snd_una; in rack_log_ack()
10661 for (i = 0; i < to->to_nsacks; i++) { in rack_log_ack()
10662 bcopy((to->to_sacks + i * TCPOLEN_SACK), in rack_log_ack()
10668 SEQ_LT(sack.start, tp->snd_max) && in rack_log_ack()
10670 SEQ_LEQ(sack.end, tp->snd_max)) { in rack_log_ack()
10681 * Its a D-SACK block. in rack_log_ack()
10686 if (rack->rc_dsack_round_seen) { in rack_log_ack()
10688 if (SEQ_GEQ(th_ack, rack->r_ctl.dsack_round_end)) { in rack_log_ack()
10690 rack->rc_dsack_round_seen = 0; in rack_log_ack()
10698 num_sack_blks = sack_filter_blks(tp, &rack->r_ctl.rack_sf, sack_blocks, in rack_log_ack()
10699 num_sack_blks, th->th_ack); in rack_log_ack()
10700 ctf_log_sack_filter(rack->rc_tp, num_sack_blks, sack_blocks); in rack_log_ack()
10747 * Now collapse out the dup-sack and in rack_log_ack()
10755 num_sack_blks--; in rack_log_ack()
10767 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_log_ack()
10769 SEQ_GT(sack_blocks[0].end, rsm->r_start) && in rack_log_ack()
10770 SEQ_LT(sack_blocks[0].start, rsm->r_end)) { in rack_log_ack()
10777 rack->r_wanted_output = 1; in rack_log_ack()
10790 rsm = rack->r_ctl.rc_sacklast; in rack_log_ack()
10794 rack->r_wanted_output = 1; in rack_log_ack()
10801 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_log_ack()
10805 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_log_ack()
10807 ((rsm->r_flags & RACK_MUST_RXT) == 0)) { in rack_log_ack()
10815 if (rack->rack_no_prr == 0) { in rack_log_ack()
10816 rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp); in rack_log_ack()
10819 rack->r_timer_override = 1; in rack_log_ack()
10820 rack->r_early = 0; in rack_log_ack()
10821 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
10822 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
10824 (rack->r_rr_config == 3)) { in rack_log_ack()
10829 rack->r_timer_override = 1; in rack_log_ack()
10830 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_log_ack()
10831 rack->r_ctl.rc_resend = rsm; in rack_log_ack()
10833 if (IN_FASTRECOVERY(tp->t_flags) && in rack_log_ack()
10834 (rack->rack_no_prr == 0) && in rack_log_ack()
10837 if ((rsm && (rack->r_ctl.rc_prr_sndcnt >= ctf_fixed_maxseg(tp)) && in rack_log_ack()
10838 ((tcp_in_hpts(rack->rc_tp) == 0) && in rack_log_ack()
10839 ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0)))) { in rack_log_ack()
10844 rack->r_early = 0; in rack_log_ack()
10845 rack->r_ctl.rc_agg_early = 0; in rack_log_ack()
10846 rack->r_timer_override = 1; in rack_log_ack()
10856 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_strike_dupack()
10862 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in rack_strike_dupack()
10863 (rsm->r_flags & RACK_MUST_RXT)) { in rack_strike_dupack()
10869 if (rsm && (rsm->r_dupack < 0xff)) { in rack_strike_dupack()
10870 rsm->r_dupack++; in rack_strike_dupack()
10871 if (rsm->r_dupack >= DUP_ACK_THRESHOLD) { in rack_strike_dupack()
10877 * we will get a return of the rsm. For a non-sack in rack_strike_dupack()
10882 rack->r_ctl.rc_resend = tcp_rack_output(rack->rc_tp, rack, cts); in rack_strike_dupack()
10883 if (rack->r_ctl.rc_resend != NULL) { in rack_strike_dupack()
10884 if (!IN_FASTRECOVERY(rack->rc_tp->t_flags)) { in rack_strike_dupack()
10885 rack_cong_signal(rack->rc_tp, CC_NDUPACK, in rack_strike_dupack()
10888 rack->r_wanted_output = 1; in rack_strike_dupack()
10889 rack->r_timer_override = 1; in rack_strike_dupack()
10913 * gauge the inter-ack times). If that occurs we have a real problem in rack_check_bottom_drag()
10926 if (tp->snd_max == tp->snd_una) { in rack_check_bottom_drag()
10938 tcp_trace_point(rack->rc_tp, TCP_TP_PACED_BOTTOM); in rack_check_bottom_drag()
10940 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
10942 if ((rack->r_ctl.rack_rs.rs_flags & RACK_RTT_VALID) && in rack_check_bottom_drag()
10943 (rack->dis_lt_bw == 0) && in rack_check_bottom_drag()
10944 (rack->use_lesser_lt_bw == 0) && in rack_check_bottom_drag()
10947 * Lets use the long-term b/w we have in rack_check_bottom_drag()
10950 if (rack->rc_gp_filled == 0) { in rack_check_bottom_drag()
10962 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
10963 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
10964 rack->rc_gp_filled = 1; in rack_check_bottom_drag()
10965 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
10966 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
10967 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
10968 } else if (lt_bw > rack->r_ctl.gp_bw) { in rack_check_bottom_drag()
10969 rack->r_ctl.rc_rtt_diff = 0; in rack_check_bottom_drag()
10970 if (rack->r_ctl.num_measurements < RACK_REQ_AVG) in rack_check_bottom_drag()
10971 rack->r_ctl.num_measurements = RACK_REQ_AVG; in rack_check_bottom_drag()
10972 rack->r_ctl.gp_bw = lt_bw; in rack_check_bottom_drag()
10973 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_check_bottom_drag()
10975 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
10976 if ((rack->gp_ready == 0) && in rack_check_bottom_drag()
10977 (rack->r_ctl.num_measurements >= rack->r_ctl.req_measurements)) { in rack_check_bottom_drag()
10979 rack->gp_ready = 1; in rack_check_bottom_drag()
10980 if (rack->dgp_on || in rack_check_bottom_drag()
10981 rack->rack_hibeta) in rack_check_bottom_drag()
10983 if (rack->defer_options) in rack_check_bottom_drag()
10990 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
10992 } else if ((IN_FASTRECOVERY(tp->t_flags) == 0) && in rack_check_bottom_drag()
10993 (sbavail(&so->so_snd) > max((segsiz * (4 + rack_req_segs)), in rack_check_bottom_drag()
10995 (rack->r_ctl.cwnd_to_use > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
10996 (tp->snd_wnd > max((segsiz * (rack_req_segs + 2)), minseg)) && in rack_check_bottom_drag()
10997 (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) <= in rack_check_bottom_drag()
11008 rack->rc_dragged_bottom = 1; in rack_check_bottom_drag()
11009 rack_increase_bw_mul(rack, -1, 0, 0, 1); in rack_check_bottom_drag()
11020 do_log = tcp_bblogging_on(rack->rc_tp); in rack_log_hybrid()
11022 if ((do_log = tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) )== 0) in rack_log_hybrid()
11043 log.u_bbr.flex2 = cur->start_seq; in rack_log_hybrid()
11044 log.u_bbr.flex3 = cur->end_seq; in rack_log_hybrid()
11045 log.u_bbr.flex4 = (uint32_t)((cur->localtime >> 32) & 0x00000000ffffffff); in rack_log_hybrid()
11046 log.u_bbr.flex5 = (uint32_t)(cur->localtime & 0x00000000ffffffff); in rack_log_hybrid()
11047 log.u_bbr.flex6 = cur->flags; in rack_log_hybrid()
11048 log.u_bbr.pkts_out = cur->hybrid_flags; in rack_log_hybrid()
11049 log.u_bbr.rttProp = cur->timestamp; in rack_log_hybrid()
11050 log.u_bbr.cur_del_rate = cur->cspr; in rack_log_hybrid()
11051 log.u_bbr.bw_inuse = cur->start; in rack_log_hybrid()
11052 log.u_bbr.applimited = (uint32_t)(cur->end & 0x00000000ffffffff); in rack_log_hybrid()
11053 log.u_bbr.delivered = (uint32_t)((cur->end >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11054 log.u_bbr.epoch = (uint32_t)(cur->deadline & 0x00000000ffffffff); in rack_log_hybrid()
11055 log.u_bbr.lt_epoch = (uint32_t)((cur->deadline >> 32) & 0x00000000ffffffff) ; in rack_log_hybrid()
11058 off = (uint64_t)(cur) - (uint64_t)(&rack->rc_tp->t_tcpreq_info[0]); in rack_log_hybrid()
11067 log.u_bbr.flex7 = rack->rc_catch_up; in rack_log_hybrid()
11069 log.u_bbr.flex7 |= rack->rc_hybrid_mode; in rack_log_hybrid()
11071 log.u_bbr.flex7 |= rack->dgp_on; in rack_log_hybrid()
11079 log.u_bbr.bbr_state = rack->rc_always_pace; in rack_log_hybrid()
11081 log.u_bbr.bbr_state |= rack->dgp_on; in rack_log_hybrid()
11083 log.u_bbr.bbr_state |= rack->rc_hybrid_mode; in rack_log_hybrid()
11085 log.u_bbr.bbr_state |= rack->use_fixed_rate; in rack_log_hybrid()
11087 log.u_bbr.delRate = rack->r_ctl.bw_rate_cap; in rack_log_hybrid()
11088 log.u_bbr.bbr_substate = rack->r_ctl.client_suggested_maxseg; in rack_log_hybrid()
11089 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_hybrid()
11090 log.u_bbr.pkt_epoch = rack->rc_tp->tcp_hybrid_start; in rack_log_hybrid()
11091 log.u_bbr.lost = rack->rc_tp->tcp_hybrid_error; in rack_log_hybrid()
11092 log.u_bbr.pacing_gain = (uint16_t)rack->rc_tp->tcp_hybrid_stop; in rack_log_hybrid()
11093 tcp_log_event(rack->rc_tp, NULL, in rack_log_hybrid()
11094 &rack->rc_inp->inp_socket->so_rcv, in rack_log_hybrid()
11095 &rack->rc_inp->inp_socket->so_snd, in rack_log_hybrid()
11110 orig_ent = rack->r_ctl.rc_last_sft; in rack_set_dgp_hybrid_mode()
11111 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, seq); in rack_set_dgp_hybrid_mode()
11114 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11116 rc_cur = tcp_req_find_req_for_seq(rack->rc_tp, (seq + len - 1)); in rack_set_dgp_hybrid_mode()
11123 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11124 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11125 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11126 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11127 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11129 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11131 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11132 rack_log_hybrid(rack, (seq + len - 1), NULL, HYBRID_LOG_NO_RANGE, __LINE__, err); in rack_set_dgp_hybrid_mode()
11134 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11135 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11139 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_WASSET) == 0) { in rack_set_dgp_hybrid_mode()
11141 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11142 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11143 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11144 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11146 if (rack->r_ctl.rc_last_sft) { in rack_set_dgp_hybrid_mode()
11147 rack->r_ctl.rc_last_sft = NULL; in rack_set_dgp_hybrid_mode()
11149 if ((rc_cur->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_set_dgp_hybrid_mode()
11150 rc_cur->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_set_dgp_hybrid_mode()
11151 rc_cur->first_send = cts; in rack_set_dgp_hybrid_mode()
11152 rc_cur->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11153 rc_cur->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11164 tp = rack->rc_tp; in rack_set_dgp_hybrid_mode()
11165 if ((rack->r_ctl.rc_last_sft != NULL) && in rack_set_dgp_hybrid_mode()
11166 (rack->r_ctl.rc_last_sft == rc_cur)) { in rack_set_dgp_hybrid_mode()
11168 if (rack->rc_hybrid_mode) in rack_set_dgp_hybrid_mode()
11172 if (rack->rc_hybrid_mode == 0) { in rack_set_dgp_hybrid_mode()
11173 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11175 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11176 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11177 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11182 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CSPR) && rc_cur->cspr){ in rack_set_dgp_hybrid_mode()
11184 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11185 rack->r_ctl.bw_rate_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11187 rack->r_ctl.fillcw_cap = rack_compensate_for_linerate(rack, rc_cur->cspr); in rack_set_dgp_hybrid_mode()
11189 if (rack->rc_hybrid_mode) { in rack_set_dgp_hybrid_mode()
11190 if (rack->cspr_is_fcc == 0) in rack_set_dgp_hybrid_mode()
11191 rack->r_ctl.bw_rate_cap = 0; in rack_set_dgp_hybrid_mode()
11193 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_set_dgp_hybrid_mode()
11196 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_H_MS) in rack_set_dgp_hybrid_mode()
11197 rack->r_ctl.client_suggested_maxseg = rc_cur->hint_maxseg; in rack_set_dgp_hybrid_mode()
11199 rack->r_ctl.client_suggested_maxseg = 0; in rack_set_dgp_hybrid_mode()
11200 if (rc_cur->timestamp == rack->r_ctl.last_tm_mark) { in rack_set_dgp_hybrid_mode()
11204 * sendtime not arrival time for catch-up mode. in rack_set_dgp_hybrid_mode()
11206 rc_cur->hybrid_flags |= TCP_HYBRID_PACING_SENDTIME; in rack_set_dgp_hybrid_mode()
11208 if ((rc_cur->hybrid_flags & TCP_HYBRID_PACING_CU) && in rack_set_dgp_hybrid_mode()
11209 (rc_cur->cspr > 0)) { in rack_set_dgp_hybrid_mode()
11212 rack->rc_catch_up = 1; in rack_set_dgp_hybrid_mode()
11217 if (rc_cur->hybrid_flags & TCP_HYBRID_PACING_SENDTIME) { in rack_set_dgp_hybrid_mode()
11223 rc_cur->deadline = cts; in rack_set_dgp_hybrid_mode()
11229 rc_cur->deadline = rc_cur->localtime; in rack_set_dgp_hybrid_mode()
11235 len = rc_cur->end - rc_cur->start; in rack_set_dgp_hybrid_mode()
11236 if (tp->t_inpcb.inp_socket->so_snd.sb_tls_info) { in rack_set_dgp_hybrid_mode()
11241 len += tcp_estimate_tls_overhead(tp->t_inpcb.inp_socket, len); in rack_set_dgp_hybrid_mode()
11251 len /= rc_cur->cspr; in rack_set_dgp_hybrid_mode()
11252 rc_cur->deadline += len; in rack_set_dgp_hybrid_mode()
11254 rack->rc_catch_up = 0; in rack_set_dgp_hybrid_mode()
11255 rc_cur->deadline = 0; in rack_set_dgp_hybrid_mode()
11257 if (rack->r_ctl.client_suggested_maxseg != 0) { in rack_set_dgp_hybrid_mode()
11265 orig_ent->sent_at_ls = rack->rc_tp->t_sndbytes; in rack_set_dgp_hybrid_mode()
11266 orig_ent->rxt_at_ls = rack->rc_tp->t_snd_rxt_bytes; in rack_set_dgp_hybrid_mode()
11267 orig_ent->flags |= TCP_TRK_TRACK_FLG_LSND; in rack_set_dgp_hybrid_mode()
11271 rack->r_ctl.rc_last_sft = rc_cur; in rack_set_dgp_hybrid_mode()
11272 rack->r_ctl.last_tm_mark = rc_cur->timestamp; in rack_set_dgp_hybrid_mode()
11282 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11284 (ent->flags == TCP_TRK_TRACK_FLG_EMPTY) || in rack_chk_req_and_hybrid_on_out()
11285 (SEQ_GEQ(seq, ent->end_seq))) { in rack_chk_req_and_hybrid_on_out()
11288 ent = rack->r_ctl.rc_last_sft; in rack_chk_req_and_hybrid_on_out()
11294 if (SEQ_LT(ent->end_seq, (seq + len))) { in rack_chk_req_and_hybrid_on_out()
11305 ent->end_seq = (seq + len); in rack_chk_req_and_hybrid_on_out()
11306 if (rack->rc_hybrid_mode) in rack_chk_req_and_hybrid_on_out()
11310 if ((ent->flags & TCP_TRK_TRACK_FLG_FSND) == 0) { in rack_chk_req_and_hybrid_on_out()
11311 ent->flags |= TCP_TRK_TRACK_FLG_FSND; in rack_chk_req_and_hybrid_on_out()
11312 ent->first_send = cts; in rack_chk_req_and_hybrid_on_out()
11313 ent->sent_at_fs = rack->rc_tp->t_sndbytes; in rack_chk_req_and_hybrid_on_out()
11314 ent->rxt_at_fs = rack->rc_tp->t_snd_rxt_bytes; in rack_chk_req_and_hybrid_on_out()
11343 new_total = acked_amount + rack->r_ctl.fsb.left_to_send; in rack_gain_for_fastoutput()
11344 gating_val = min((sbavail(&so->so_snd) - (tp->snd_max - tp->snd_una)), in rack_gain_for_fastoutput()
11345 (tp->snd_wnd - (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11349 rack->r_ctl.fsb.left_to_send = new_total; in rack_gain_for_fastoutput()
11350 …KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(&rack->rc_inp->inp_socket->so_snd) - (tp->snd_ma… in rack_gain_for_fastoutput()
11352 rack, rack->r_ctl.fsb.left_to_send, in rack_gain_for_fastoutput()
11353 sbavail(&rack->rc_inp->inp_socket->so_snd), in rack_gain_for_fastoutput()
11354 (tp->snd_max - tp->snd_una))); in rack_gain_for_fastoutput()
11393 snd_una = rack->rc_tp->snd_una; in rack_adjust_sendmap_head()
11395 m = sb->sb_mb; in rack_adjust_sendmap_head()
11396 rsm = tqhash_min(rack->r_ctl.tqh); in rack_adjust_sendmap_head()
11402 KASSERT((rsm->m == m), in rack_adjust_sendmap_head()
11403 ("Rack:%p sb:%p rsm:%p -- first rsm mbuf not aligned to sb", in rack_adjust_sendmap_head()
11405 while (rsm->m && (rsm->m == m)) { in rack_adjust_sendmap_head()
11411 tm = sbsndmbuf(sb, (rsm->r_start - snd_una), &soff); in rack_adjust_sendmap_head()
11412 if ((rsm->orig_m_len != m->m_len) || in rack_adjust_sendmap_head()
11413 (rsm->orig_t_space != M_TRAILINGROOM(m))){ in rack_adjust_sendmap_head()
11417 KASSERT((rsm->soff == 0), in rack_adjust_sendmap_head()
11418 ("Rack:%p rsm:%p -- rsm at head but soff not zero", in rack_adjust_sendmap_head()
11422 if ((rsm->soff != soff) || (rsm->m != tm)) { in rack_adjust_sendmap_head()
11431 rsm->m = tm; in rack_adjust_sendmap_head()
11432 rsm->soff = soff; in rack_adjust_sendmap_head()
11434 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11435 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11437 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11438 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11441 rsm->m = sbsndmbuf(sb, (rsm->r_start - snd_una), &rsm->soff); in rack_adjust_sendmap_head()
11442 if (rsm->m) { in rack_adjust_sendmap_head()
11443 rsm->orig_m_len = rsm->m->m_len; in rack_adjust_sendmap_head()
11444 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_adjust_sendmap_head()
11446 rsm->orig_m_len = 0; in rack_adjust_sendmap_head()
11447 rsm->orig_t_space = 0; in rack_adjust_sendmap_head()
11450 rsm = tqhash_next(rack->r_ctl.tqh, rsm); in rack_adjust_sendmap_head()
11463 if ((rack->rc_hybrid_mode == 0) && in rack_req_check_for_comp()
11464 (tcp_bblogging_point_on(rack->rc_tp, TCP_BBPOINT_REQ_LEVEL_LOGGING) == 0)) { in rack_req_check_for_comp()
11469 tcp_req_check_for_comp(rack->rc_tp, th_ack); in rack_req_check_for_comp()
11479 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11493 data = ent->end - ent->start; in rack_req_check_for_comp()
11494 laa = tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time); in rack_req_check_for_comp()
11495 if (ent->flags & TCP_TRK_TRACK_FLG_FSND) { in rack_req_check_for_comp()
11496 if (ent->first_send > ent->localtime) in rack_req_check_for_comp()
11497 ftim = ent->first_send; in rack_req_check_for_comp()
11499 ftim = ent->localtime; in rack_req_check_for_comp()
11502 ftim = ent->localtime; in rack_req_check_for_comp()
11504 if (laa > ent->localtime) in rack_req_check_for_comp()
11505 tim = laa - ftim; in rack_req_check_for_comp()
11519 if (ent == rack->r_ctl.rc_last_sft) { in rack_req_check_for_comp()
11520 rack->r_ctl.rc_last_sft = NULL; in rack_req_check_for_comp()
11521 if (rack->rc_hybrid_mode) { in rack_req_check_for_comp()
11522 rack->rc_catch_up = 0; in rack_req_check_for_comp()
11523 if (rack->cspr_is_fcc == 0) in rack_req_check_for_comp()
11524 rack->r_ctl.bw_rate_cap = 0; in rack_req_check_for_comp()
11526 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_req_check_for_comp()
11527 rack->r_ctl.client_suggested_maxseg = 0; in rack_req_check_for_comp()
11531 tcp_req_log_req_info(rack->rc_tp, ent, in rack_req_check_for_comp()
11534 tcp_req_free_a_slot(rack->rc_tp, ent); in rack_req_check_for_comp()
11535 ent = tcp_req_find_a_req_that_is_completed_by(rack->rc_tp, th_ack, &i); in rack_req_check_for_comp()
11544 * For ret_val if its 0 the TCP is locked, if its non-zero
11564 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_ack()
11565 if (SEQ_GEQ(tp->snd_una, tp->iss + (65535 << tp->snd_scale))) { in rack_process_ack()
11567 tp->t_flags2 |= TF2_NO_ISS_CHECK; in rack_process_ack()
11573 if (tp->t_flags2 & TF2_NO_ISS_CHECK) { in rack_process_ack()
11575 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11578 if (SEQ_GT(tp->iss + 1, tp->snd_una - tp->max_sndwnd)) { in rack_process_ack()
11580 seq_min = tp->iss + 1; in rack_process_ack()
11587 seq_min = tp->snd_una - tp->max_sndwnd; in rack_process_ack()
11591 if (SEQ_LT(th->th_ack, seq_min)) { in rack_process_ack()
11598 rack->r_wanted_output = 1; in rack_process_ack()
11602 if (SEQ_GT(th->th_ack, tp->snd_max)) { in rack_process_ack()
11604 rack->r_wanted_output = 1; in rack_process_ack()
11607 if (rack->gp_ready && in rack_process_ack()
11608 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_process_ack()
11611 if (SEQ_GEQ(th->th_ack, tp->snd_una) || to->to_nsacks) { in rack_process_ack()
11615 in_rec = IN_FASTRECOVERY(tp->t_flags); in rack_process_ack()
11616 if (rack->rc_in_persist) { in rack_process_ack()
11617 tp->t_rxtshift = 0; in rack_process_ack()
11618 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11619 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11622 if ((th->th_ack == tp->snd_una) && in rack_process_ack()
11623 (tiwin == tp->snd_wnd) && in rack_process_ack()
11625 ((to->to_flags & TOF_SACK) == 0)) { in rack_process_ack()
11626 rack_strike_dupack(rack, th->th_ack); in rack_process_ack()
11629 rack_log_ack(tp, to, th, ((in_rec == 0) && IN_FASTRECOVERY(tp->t_flags)), in rack_process_ack()
11633 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_process_ack()
11639 if (SEQ_LT(th->th_ack, tp->snd_una) && (sbspace(&so->so_rcv) > ctf_fixed_maxseg(tp))) { in rack_process_ack()
11640 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_process_ack()
11641 if (rack->r_ctl.rc_reorder_ts == 0) in rack_process_ack()
11642 rack->r_ctl.rc_reorder_ts = 1; in rack_process_ack()
11650 if (tp->t_flags & TF_NEEDSYN) { in rack_process_ack()
11652 * T/TCP: Connection was half-synchronized, and our SYN has in rack_process_ack()
11654 * to non-starred state, increment snd_una for ACK of SYN, in rack_process_ack()
11657 tp->t_flags &= ~TF_NEEDSYN; in rack_process_ack()
11658 tp->snd_una++; in rack_process_ack()
11660 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_process_ack()
11662 tp->rcv_scale = tp->request_r_scale; in rack_process_ack()
11666 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_ack()
11671 * Any time we move the cum-ack forward clear in rack_process_ack()
11672 * keep-alive tied probe-not-answered. The in rack_process_ack()
11675 rack->probe_not_answered = 0; in rack_process_ack()
11685 if ((tp->t_flags & TF_PREVVALID) && in rack_process_ack()
11686 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_process_ack()
11687 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
11688 if (tp->t_rxtshift == 1 && in rack_process_ack()
11689 (int)(ticks - tp->t_badrxtwin) < 0) in rack_process_ack()
11690 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_process_ack()
11694 tp->t_rxtshift = 0; in rack_process_ack()
11695 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_ack()
11696 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_process_ack()
11697 rack->rc_tlp_in_progress = 0; in rack_process_ack()
11698 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_process_ack()
11703 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_process_ack()
11704 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11706 rack_req_check_for_comp(rack, th->th_ack); in rack_process_ack()
11725 * (possibly backed-off) value. in rack_process_ack()
11732 if (IN_RECOVERY(tp->t_flags)) { in rack_process_ack()
11733 if (SEQ_LT(th->th_ack, tp->snd_recover) && in rack_process_ack()
11734 (SEQ_LT(th->th_ack, tp->snd_max))) { in rack_process_ack()
11737 rack_post_recovery(tp, th->th_ack); in rack_process_ack()
11744 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_process_ack()
11746 p_cwnd += tp->snd_cwnd; in rack_process_ack()
11748 } else if ((rack->rto_from_rec == 1) && in rack_process_ack()
11749 SEQ_GEQ(th->th_ack, tp->snd_recover)) { in rack_process_ack()
11752 * and never re-entered recovery. The timeout(s) in rack_process_ack()
11756 rack->rto_from_rec = 0; in rack_process_ack()
11763 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, post_recovery); in rack_process_ack()
11765 (tp->snd_cwnd > p_cwnd)) { in rack_process_ack()
11766 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_process_ack()
11767 tp->snd_cwnd = p_cwnd; in rack_process_ack()
11770 acked_amount = min(acked, (int)sbavail(&so->so_snd)); in rack_process_ack()
11771 tp->snd_wnd -= acked_amount; in rack_process_ack()
11772 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_process_ack()
11773 if ((sbused(&so->so_snd) == 0) && in rack_process_ack()
11775 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
11776 (tp->t_flags & TF_SENTFIN)) { in rack_process_ack()
11785 tp->snd_una = th->th_ack; in rack_process_ack()
11787 if (acked_amount && sbavail(&so->so_snd)) in rack_process_ack()
11788 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_process_ack()
11789 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_process_ack()
11793 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_process_ack()
11794 tp->snd_recover = tp->snd_una; in rack_process_ack()
11796 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { in rack_process_ack()
11797 tp->snd_nxt = tp->snd_max; in rack_process_ack()
11800 (rack->use_fixed_rate == 0) && in rack_process_ack()
11801 (rack->in_probe_rtt == 0) && in rack_process_ack()
11802 rack->rc_gp_dyn_mul && in rack_process_ack()
11803 rack->rc_always_pace) { in rack_process_ack()
11807 if (tp->snd_una == tp->snd_max) { in rack_process_ack()
11809 tp->t_flags &= ~TF_PREVVALID; in rack_process_ack()
11810 if (rack->r_ctl.rc_went_idle_time == 0) in rack_process_ack()
11811 rack->r_ctl.rc_went_idle_time = 1; in rack_process_ack()
11812 rack->r_ctl.retran_during_recovery = 0; in rack_process_ack()
11813 rack->r_ctl.dsack_byte_cnt = 0; in rack_process_ack()
11815 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_process_ack()
11816 tp->t_acktime = 0; in rack_process_ack()
11817 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_ack()
11818 rack->rc_suspicious = 0; in rack_process_ack()
11820 rack->r_wanted_output = 1; in rack_process_ack()
11821 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_process_ack()
11822 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_process_ack()
11823 (sbavail(&so->so_snd) == 0) && in rack_process_ack()
11824 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_process_ack()
11831 /* tcp_close will kill the inp pre-log the Reset */ in rack_process_ack()
11848 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_collapse()
11857 log.u_bbr.flex5 = rack->r_must_retran; in rack_log_collapse()
11859 log.u_bbr.flex7 = rack->rc_has_collapsed; in rack_log_collapse()
11869 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_collapse()
11870 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_collapse()
11871 &rack->rc_inp->inp_socket->so_rcv, in rack_log_collapse()
11872 &rack->rc_inp->inp_socket->so_snd, in rack_log_collapse()
11887 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_collapsed_window()
11888 if ((rack->rc_has_collapsed == 0) || in rack_collapsed_window()
11889 (rack->r_ctl.last_collapse_point != (th_ack + rack->rc_tp->snd_wnd))) in rack_collapsed_window()
11891 rack->r_ctl.last_collapse_point = th_ack + rack->rc_tp->snd_wnd; in rack_collapsed_window()
11892 rack->r_ctl.high_collapse_point = rack->rc_tp->snd_max; in rack_collapsed_window()
11893 rack->rc_has_collapsed = 1; in rack_collapsed_window()
11894 rack->r_collapse_point_valid = 1; in rack_collapsed_window()
11895 rack_log_collapse(rack, 0, th_ack, rack->r_ctl.last_collapse_point, line, 1, 0, NULL); in rack_collapsed_window()
11906 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_WND); in rack_un_collapse_window()
11907 rack->rc_has_collapsed = 0; in rack_un_collapse_window()
11908 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
11911 rack_log_collapse(rack, 0, 0, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
11915 if (SEQ_GT(rack->r_ctl.last_collapse_point, rsm->r_start)) { in rack_un_collapse_window()
11916 rack_log_collapse(rack, rsm->r_start, rsm->r_end, in rack_un_collapse_window()
11917 rack->r_ctl.last_collapse_point, line, 3, rsm->r_flags, rsm); in rack_un_collapse_window()
11926 rack_clone_rsm(rack, nrsm, rsm, rack->r_ctl.last_collapse_point); in rack_un_collapse_window()
11928 (void)tqhash_insert(rack->r_ctl.tqh, nrsm); in rack_un_collapse_window()
11930 if ((insret = tqhash_insert(rack->r_ctl.tqh, nrsm)) != 0) { in rack_un_collapse_window()
11935 rack_log_map_chg(rack->rc_tp, rack, NULL, rsm, nrsm, MAP_SPLIT, in rack_un_collapse_window()
11936 rack->r_ctl.last_collapse_point, __LINE__); in rack_un_collapse_window()
11937 if (rsm->r_in_tmap) { in rack_un_collapse_window()
11938 TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext); in rack_un_collapse_window()
11939 nrsm->r_in_tmap = 1; in rack_un_collapse_window()
11949 TQHASH_FOREACH_FROM(nrsm, rack->r_ctl.tqh, rsm) { in rack_un_collapse_window()
11951 nrsm->r_flags |= RACK_RWND_COLLAPSED; in rack_un_collapse_window()
11952 rack_log_collapse(rack, nrsm->r_start, nrsm->r_end, 0, line, 4, nrsm->r_flags, nrsm); in rack_un_collapse_window()
11958 rack_log_collapse(rack, cnt, split, ctf_outstanding(rack->rc_tp), line, 0, 0, NULL); in rack_un_collapse_window()
11967 rack->r_ctl.rc_rcvtime, __LINE__); in rack_handle_delayed_ack()
11968 tp->t_flags |= TF_DELACK; in rack_handle_delayed_ack()
11970 rack->r_wanted_output = 1; in rack_handle_delayed_ack()
11971 tp->t_flags |= TF_ACKNOW; in rack_handle_delayed_ack()
11983 if (rack->r_fast_output) { in rack_validate_fo_sendwin_up()
11991 if ((out + rack->r_ctl.fsb.left_to_send) > tp->snd_wnd) { in rack_validate_fo_sendwin_up()
11993 if (out >= tp->snd_wnd) { in rack_validate_fo_sendwin_up()
11995 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
11998 rack->r_ctl.fsb.left_to_send = tp->snd_wnd - out; in rack_validate_fo_sendwin_up()
11999 if (rack->r_ctl.fsb.left_to_send < ctf_fixed_maxseg(tp)) { in rack_validate_fo_sendwin_up()
12001 rack->r_fast_output = 0; in rack_validate_fo_sendwin_up()
12028 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_process_data()
12029 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_process_data()
12031 (SEQ_LT(tp->snd_wl1, th->th_seq) || in rack_process_data()
12032 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || in rack_process_data()
12033 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { in rack_process_data()
12036 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) in rack_process_data()
12038 tp->snd_wnd = tiwin; in rack_process_data()
12040 tp->snd_wl1 = th->th_seq; in rack_process_data()
12041 tp->snd_wl2 = th->th_ack; in rack_process_data()
12042 if (tp->snd_wnd > tp->max_sndwnd) in rack_process_data()
12043 tp->max_sndwnd = tp->snd_wnd; in rack_process_data()
12044 rack->r_wanted_output = 1; in rack_process_data()
12046 if ((tp->snd_wl2 == th->th_ack) && (tiwin < tp->snd_wnd)) { in rack_process_data()
12047 tp->snd_wnd = tiwin; in rack_process_data()
12049 tp->snd_wl1 = th->th_seq; in rack_process_data()
12050 tp->snd_wl2 = th->th_ack; in rack_process_data()
12053 if (tp->snd_wnd < ctf_outstanding(tp)) in rack_process_data()
12055 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_process_data()
12056 else if (rack->rc_has_collapsed) in rack_process_data()
12058 if ((rack->r_collapse_point_valid) && in rack_process_data()
12059 (SEQ_GT(th->th_ack, rack->r_ctl.high_collapse_point))) in rack_process_data()
12060 rack->r_collapse_point_valid = 0; in rack_process_data()
12062 if ((rack->rc_in_persist != 0) && in rack_process_data()
12063 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_process_data()
12064 rack->r_ctl.rc_pace_min_segs))) { in rack_process_data()
12065 rack_exit_persist(tp, rack, rack->r_ctl.rc_rcvtime); in rack_process_data()
12066 tp->snd_nxt = tp->snd_max; in rack_process_data()
12068 rack->r_wanted_output = 1; in rack_process_data()
12071 if ((rack->rc_in_persist == 0) && in rack_process_data()
12072 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_process_data()
12073 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_process_data()
12074 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_process_data()
12075 sbavail(&tptosocket(tp)->so_snd) && in rack_process_data()
12076 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_process_data()
12083 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_process_data()
12085 if (tp->t_flags2 & TF2_DROP_AF_DATA) { in rack_process_data()
12093 tp->rcv_up = tp->rcv_nxt; in rack_process_data()
12098 * This process logically involves adjusting tp->rcv_wnd as data is in rack_process_data()
12103 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_process_data()
12104 (tp->t_flags & TF_FASTOPEN)); in rack_process_data()
12106 TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12107 tcp_seq save_start = th->th_seq; in rack_process_data()
12108 tcp_seq save_rnxt = tp->rcv_nxt; in rack_process_data()
12123 if (th->th_seq == tp->rcv_nxt && in rack_process_data()
12125 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_process_data()
12130 if (so->so_rcv.sb_shlim) { in rack_process_data()
12133 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_process_data()
12142 tp->rcv_nxt += tlen; in rack_process_data()
12144 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_process_data()
12145 (tp->t_fbyte_in == 0)) { in rack_process_data()
12146 tp->t_fbyte_in = ticks; in rack_process_data()
12147 if (tp->t_fbyte_in == 0) in rack_process_data()
12148 tp->t_fbyte_in = 1; in rack_process_data()
12149 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_process_data()
12150 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_process_data()
12156 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_process_data()
12165 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_process_data()
12170 sbappendstream_locked(&so->so_rcv, m, 0); in rack_process_data()
12172 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_process_data()
12176 if (so->so_rcv.sb_shlim && appended != mcnt) in rack_process_data()
12177 counter_fo_release(so->so_rcv.sb_shlim, in rack_process_data()
12178 mcnt - appended); in rack_process_data()
12190 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12191 if (tp->t_flags & TF_WAKESOR) { in rack_process_data()
12192 tp->t_flags &= ~TF_WAKESOR; in rack_process_data()
12197 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_process_data()
12199 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_process_data()
12207 } else if ((tlen > 0) && SEQ_GT(tp->rcv_nxt, save_rnxt)) { in rack_process_data()
12208 if ((tp->rcv_numsacks >= 1) && in rack_process_data()
12209 (tp->sackblks[0].end == save_start)) { in rack_process_data()
12215 tp->sackblks[0].start, in rack_process_data()
12216 tp->sackblks[0].end); in rack_process_data()
12240 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { in rack_process_data()
12244 * If connection is half-synchronized (ie NEEDSYN in rack_process_data()
12250 if (tp->t_flags & TF_NEEDSYN) { in rack_process_data()
12252 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12253 tp->t_flags |= TF_DELACK; in rack_process_data()
12255 tp->t_flags |= TF_ACKNOW; in rack_process_data()
12257 tp->rcv_nxt++; in rack_process_data()
12259 switch (tp->t_state) { in rack_process_data()
12265 tp->t_starttime = ticks; in rack_process_data()
12269 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12279 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12285 * starting the time-wait timer, turning off the in rack_process_data()
12290 rack->r_ctl.rc_rcvtime, __LINE__); in rack_process_data()
12298 if ((tp->t_flags & TF_ACKNOW) || in rack_process_data()
12299 (sbavail(&so->so_snd) > (tp->snd_max - tp->snd_una))) { in rack_process_data()
12300 rack->r_wanted_output = 1; in rack_process_data()
12307 * have broken out the fast-data path also just like
12308 * the fast-ack.
12327 if (__predict_false(th->th_seq != tp->rcv_nxt)) { in rack_do_fastnewdata()
12330 if (tiwin && tiwin != tp->snd_wnd) { in rack_do_fastnewdata()
12333 if (__predict_false((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)))) { in rack_do_fastnewdata()
12336 if (__predict_false((to->to_flags & TOF_TS) && in rack_do_fastnewdata()
12337 (TSTMP_LT(to->to_tsval, tp->ts_recent)))) { in rack_do_fastnewdata()
12340 if (__predict_false((th->th_ack != tp->snd_una))) { in rack_do_fastnewdata()
12343 if (__predict_false(tlen > sbspace(&so->so_rcv))) { in rack_do_fastnewdata()
12346 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fastnewdata()
12347 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_do_fastnewdata()
12348 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fastnewdata()
12349 tp->ts_recent = to->to_tsval; in rack_do_fastnewdata()
12351 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_fastnewdata()
12353 * This is a pure, in-sequence data packet with nothing on the in rack_do_fastnewdata()
12356 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_do_fastnewdata()
12359 if (so->so_rcv.sb_shlim) { in rack_do_fastnewdata()
12362 if (counter_fo_get(so->so_rcv.sb_shlim, mcnt, in rack_do_fastnewdata()
12371 if (tp->rcv_numsacks) in rack_do_fastnewdata()
12374 tp->rcv_nxt += tlen; in rack_do_fastnewdata()
12376 ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) && in rack_do_fastnewdata()
12377 (tp->t_fbyte_in == 0)) { in rack_do_fastnewdata()
12378 tp->t_fbyte_in = ticks; in rack_do_fastnewdata()
12379 if (tp->t_fbyte_in == 0) in rack_do_fastnewdata()
12380 tp->t_fbyte_in = 1; in rack_do_fastnewdata()
12381 if (tp->t_fbyte_out && tp->t_fbyte_in) in rack_do_fastnewdata()
12382 tp->t_flags2 |= TF2_FBYTES_COMPLETE; in rack_do_fastnewdata()
12387 tp->snd_wl1 = th->th_seq; in rack_do_fastnewdata()
12391 tp->rcv_up = tp->rcv_nxt; in rack_do_fastnewdata()
12398 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fastnewdata()
12407 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; in rack_do_fastnewdata()
12412 sbappendstream_locked(&so->so_rcv, m, 0); in rack_do_fastnewdata()
12415 rack_log_wakeup(tp,rack, &so->so_rcv, tlen, 1); in rack_do_fastnewdata()
12419 if (so->so_rcv.sb_shlim && mcnt != appended) in rack_do_fastnewdata()
12420 counter_fo_release(so->so_rcv.sb_shlim, mcnt - appended); in rack_do_fastnewdata()
12423 if (tp->snd_una == tp->snd_max) in rack_do_fastnewdata()
12424 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_fastnewdata()
12431 * in sequence to remain in the fast-path. We also add
12435 * slow-path.
12447 if (__predict_false(SEQ_LEQ(th->th_ack, tp->snd_una))) { in rack_fastack()
12451 if (__predict_false(SEQ_GT(th->th_ack, tp->snd_max))) { in rack_fastack()
12459 if (__predict_false(tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN))) { in rack_fastack()
12463 if ((to->to_flags & TOF_TS) && __predict_false(TSTMP_LT(to->to_tsval, tp->ts_recent))) { in rack_fastack()
12467 if (__predict_false(IN_RECOVERY(tp->t_flags))) { in rack_fastack()
12471 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fastack()
12472 if (rack->r_ctl.rc_sacked) { in rack_fastack()
12476 /* Ok if we reach here, we can process a fast-ack */ in rack_fastack()
12477 if (rack->gp_ready && in rack_fastack()
12478 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_fastack()
12481 nsegs = max(1, m->m_pkthdr.lro_nsegs); in rack_fastack()
12484 if (tiwin != tp->snd_wnd) { in rack_fastack()
12485 tp->snd_wnd = tiwin; in rack_fastack()
12487 tp->snd_wl1 = th->th_seq; in rack_fastack()
12488 if (tp->snd_wnd > tp->max_sndwnd) in rack_fastack()
12489 tp->max_sndwnd = tp->snd_wnd; in rack_fastack()
12492 if ((rack->rc_in_persist != 0) && in rack_fastack()
12493 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_fastack()
12494 rack->r_ctl.rc_pace_min_segs))) { in rack_fastack()
12498 if ((rack->rc_in_persist == 0) && in rack_fastack()
12499 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_fastack()
12500 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_fastack()
12501 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_fastack()
12502 sbavail(&tptosocket(tp)->so_snd) && in rack_fastack()
12503 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_fastack()
12510 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, th->th_ack); in rack_fastack()
12517 if ((to->to_flags & TOF_TS) != 0 && in rack_fastack()
12518 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { in rack_fastack()
12519 tp->ts_recent_age = tcp_ts_getticks(); in rack_fastack()
12520 tp->ts_recent = to->to_tsval; in rack_fastack()
12530 if ((tp->t_flags & TF_PREVVALID) && in rack_fastack()
12531 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_fastack()
12532 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12533 if (tp->t_rxtshift == 1 && in rack_fastack()
12534 (int)(ticks - tp->t_badrxtwin) < 0) in rack_fastack()
12535 rack_cong_signal(tp, CC_RTO_ERR, th->th_ack, __LINE__); in rack_fastack()
12555 rack_ack_received(tp, rack, th->th_ack, nsegs, CC_ACK, 0); in rack_fastack()
12557 mfree = sbcut_locked(&so->so_snd, acked); in rack_fastack()
12558 tp->snd_una = th->th_ack; in rack_fastack()
12560 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_fastack()
12562 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_fastack()
12565 tp->t_rxtshift = 0; in rack_fastack()
12566 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_fastack()
12567 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_fastack()
12568 rack->rc_tlp_in_progress = 0; in rack_fastack()
12569 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_fastack()
12574 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_fastack()
12575 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12578 rack_req_check_for_comp(rack, th->th_ack); in rack_fastack()
12586 if (tp->snd_wnd < ctf_outstanding(tp)) { in rack_fastack()
12588 rack_collapsed_window(rack, ctf_outstanding(tp), th->th_ack, __LINE__); in rack_fastack()
12589 } else if (rack->rc_has_collapsed) in rack_fastack()
12591 if ((rack->r_collapse_point_valid) && in rack_fastack()
12592 (SEQ_GT(tp->snd_una, rack->r_ctl.high_collapse_point))) in rack_fastack()
12593 rack->r_collapse_point_valid = 0; in rack_fastack()
12597 tp->snd_wl2 = th->th_ack; in rack_fastack()
12598 tp->t_dupacks = 0; in rack_fastack()
12604 * otherwise restart timer using current (possibly backed-off) in rack_fastack()
12610 (rack->use_fixed_rate == 0) && in rack_fastack()
12611 (rack->in_probe_rtt == 0) && in rack_fastack()
12612 rack->rc_gp_dyn_mul && in rack_fastack()
12613 rack->rc_always_pace) { in rack_fastack()
12617 if (tp->snd_una == tp->snd_max) { in rack_fastack()
12618 tp->t_flags &= ~TF_PREVVALID; in rack_fastack()
12619 rack->r_ctl.retran_during_recovery = 0; in rack_fastack()
12620 rack->rc_suspicious = 0; in rack_fastack()
12621 rack->r_ctl.dsack_byte_cnt = 0; in rack_fastack()
12622 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_fastack()
12623 if (rack->r_ctl.rc_went_idle_time == 0) in rack_fastack()
12624 rack->r_ctl.rc_went_idle_time = 1; in rack_fastack()
12626 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_fastack()
12627 tp->t_acktime = 0; in rack_fastack()
12628 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_fastack()
12630 if (acked && rack->r_fast_output) in rack_fastack()
12632 if (sbavail(&so->so_snd)) { in rack_fastack()
12633 rack->r_wanted_output = 1; in rack_fastack()
12661 * this is an acceptable SYN segment initialize tp->rcv_nxt and in rack_do_syn_sent()
12662 * tp->irs if seg contains ack then advance tp->snd_una if seg in rack_do_syn_sent()
12669 (SEQ_LEQ(th->th_ack, tp->iss) || in rack_do_syn_sent()
12670 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_sent()
12690 tp->irs = th->th_seq; in rack_do_syn_sent()
12692 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_sent()
12702 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_sent()
12704 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_sent()
12706 tp->rcv_adv += min(tp->rcv_wnd, in rack_do_syn_sent()
12707 TCP_MAXWIN << tp->rcv_scale); in rack_do_syn_sent()
12712 if ((tp->t_flags & TF_FASTOPEN) && in rack_do_syn_sent()
12713 (tp->snd_una != tp->snd_max)) { in rack_do_syn_sent()
12715 if (SEQ_LT(th->th_ack, tp->snd_max)) in rack_do_syn_sent()
12724 rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_syn_sent()
12725 tp->t_flags |= TF_DELACK; in rack_do_syn_sent()
12727 rack->r_wanted_output = 1; in rack_do_syn_sent()
12728 tp->t_flags |= TF_ACKNOW; in rack_do_syn_sent()
12733 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_syn_sent()
12739 * ack-processing since the in rack_do_syn_sent()
12740 * data stream in our send-map in rack_do_syn_sent()
12746 tp->snd_una++; in rack_do_syn_sent()
12747 if (tfo_partial && (SEQ_GT(tp->snd_max, tp->snd_una))) { in rack_do_syn_sent()
12756 rsm = tqhash_min(rack->r_ctl.tqh); in rack_do_syn_sent()
12758 if (rsm->r_flags & RACK_HAS_SYN) { in rack_do_syn_sent()
12759 rsm->r_flags &= ~RACK_HAS_SYN; in rack_do_syn_sent()
12760 rsm->r_start++; in rack_do_syn_sent()
12762 rack->r_ctl.rc_resend = rsm; in rack_do_syn_sent()
12768 * SYN_SENT --> ESTABLISHED SYN_SENT* --> FIN_WAIT_1 in rack_do_syn_sent()
12770 tp->t_starttime = ticks; in rack_do_syn_sent()
12771 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_sent()
12773 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_sent()
12783 * Received initial SYN in SYN-SENT[*] state => simultaneous in rack_do_syn_sent()
12786 * half-synchronized. Otherwise, do 3-way handshake: in rack_do_syn_sent()
12787 * SYN-SENT -> SYN-RECEIVED SYN-SENT* -> SYN-RECEIVED* If in rack_do_syn_sent()
12790 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN | TF_SONOTCONN); in rack_do_syn_sent()
12794 * Advance th->th_seq to correspond to first data byte. If data, in rack_do_syn_sent()
12797 th->th_seq++; in rack_do_syn_sent()
12798 if (tlen > tp->rcv_wnd) { in rack_do_syn_sent()
12799 todrop = tlen - tp->rcv_wnd; in rack_do_syn_sent()
12800 m_adj(m, -todrop); in rack_do_syn_sent()
12801 tlen = tp->rcv_wnd; in rack_do_syn_sent()
12806 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_sent()
12807 tp->rcv_up = th->th_seq; in rack_do_syn_sent()
12815 /* For syn-sent we need to possibly update the rtt */ in rack_do_syn_sent()
12816 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_sent()
12820 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_sent()
12821 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_sent()
12822 tp->t_rttlow = t; in rack_do_syn_sent()
12823 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 4); in rack_do_syn_sent()
12830 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_sent()
12847 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_sent()
12877 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_syn_recv()
12880 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_syn_recv()
12883 (SEQ_LEQ(th->th_ack, tp->snd_una) || in rack_do_syn_recv()
12884 SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_syn_recv()
12889 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
12902 /* non-initial SYN is ignored */ in rack_do_syn_recv()
12903 if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) || in rack_do_syn_recv()
12904 (rack->r_ctl.rc_hpts_flags & PACE_TMR_TLP) || in rack_do_syn_recv()
12905 (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)) { in rack_do_syn_recv()
12919 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_syn_recv()
12920 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_syn_recv()
12925 * In the SYN-RECEIVED state, validate that the packet belongs to in rack_do_syn_recv()
12931 if (SEQ_LT(th->th_seq, tp->irs)) { in rack_do_syn_recv()
12953 if ((to->to_flags & TOF_TS) != 0 && in rack_do_syn_recv()
12954 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_syn_recv()
12955 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_syn_recv()
12957 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_syn_recv()
12958 tp->ts_recent = to->to_tsval; in rack_do_syn_recv()
12960 tp->snd_wnd = tiwin; in rack_do_syn_recv()
12963 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_syn_recv()
12964 * is on (half-synchronized state), then queue data for later in rack_do_syn_recv()
12968 if (tp->t_flags & TF_FASTOPEN) { in rack_do_syn_recv()
12975 if (tp->t_flags & TF_SONOTCONN) { in rack_do_syn_recv()
12976 tp->t_flags &= ~TF_SONOTCONN; in rack_do_syn_recv()
12980 if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) == in rack_do_syn_recv()
12982 tp->rcv_scale = tp->request_r_scale; in rack_do_syn_recv()
12985 * Make transitions: SYN-RECEIVED -> ESTABLISHED SYN-RECEIVED* -> in rack_do_syn_recv()
12986 * FIN-WAIT-1 in rack_do_syn_recv()
12988 tp->t_starttime = ticks; in rack_do_syn_recv()
12989 if ((tp->t_flags & TF_FASTOPEN) && tp->t_tfo_pending) { in rack_do_syn_recv()
12990 tcp_fastopen_decrement_counter(tp->t_tfo_pending); in rack_do_syn_recv()
12991 tp->t_tfo_pending = NULL; in rack_do_syn_recv()
12993 if (tp->t_flags & TF_NEEDFIN) { in rack_do_syn_recv()
12995 tp->t_flags &= ~TF_NEEDFIN; in rack_do_syn_recv()
13006 if (!(tp->t_flags & TF_FASTOPEN)) in rack_do_syn_recv()
13014 if (SEQ_GT(th->th_ack, tp->snd_una) && !(tp->t_flags & TF_NEEDSYN)) in rack_do_syn_recv()
13015 tp->snd_una++; in rack_do_syn_recv()
13023 if (tp->t_flags & TF_WAKESOR) { in rack_do_syn_recv()
13024 tp->t_flags &= ~TF_WAKESOR; in rack_do_syn_recv()
13029 tp->snd_wl1 = th->th_seq - 1; in rack_do_syn_recv()
13030 /* For syn-recv we need to possibly update the rtt */ in rack_do_syn_recv()
13031 if ((to->to_flags & TOF_TS) != 0 && to->to_tsecr) { in rack_do_syn_recv()
13035 t = (mcts - to->to_tsecr) * HPTS_USEC_IN_MSEC; in rack_do_syn_recv()
13036 if (!tp->t_rttlow || tp->t_rttlow > t) in rack_do_syn_recv()
13037 tp->t_rttlow = t; in rack_do_syn_recv()
13038 rack_log_rtt_sample_calc(rack, t, (to->to_tsecr * 1000), (mcts * 1000), 5); in rack_do_syn_recv()
13045 if (tp->t_state == TCPS_FIN_WAIT_1) { in rack_do_syn_recv()
13062 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_syn_recv()
13092 * uni-directional data xfer. If the packet has no control flags, in rack_do_established()
13093 * is in-sequence, the window didn't change and we're not in rack_do_established()
13097 * waiting for space. If the length is non-zero and the ack didn't in rack_do_established()
13098 * move, we're the receiver side. If we're getting packets in-order in rack_do_established()
13101 * hidden state-flags are also off. Since we check for in rack_do_established()
13104 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_established()
13105 if (__predict_true(((to->to_flags & TOF_SACK) == 0)) && in rack_do_established()
13108 __predict_true(th->th_seq == tp->rcv_nxt)) { in rack_do_established()
13111 tiwin, nxt_pkt, rack->r_ctl.rc_rcvtime)) { in rack_do_established()
13124 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_established()
13139 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_established()
13140 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_established()
13161 if ((to->to_flags & TOF_TS) != 0 && in rack_do_established()
13162 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_established()
13163 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_established()
13165 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_established()
13166 tp->ts_recent = to->to_tsval; in rack_do_established()
13169 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_established()
13170 * is on (half-synchronized state), then queue data for later in rack_do_established()
13174 if (tp->t_flags & TF_NEEDSYN) { in rack_do_established()
13178 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_established()
13180 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_established()
13193 if (sbavail(&so->so_snd)) { in rack_do_established()
13220 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_close_wait()
13234 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_close_wait()
13235 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_close_wait()
13256 if ((to->to_flags & TOF_TS) != 0 && in rack_do_close_wait()
13257 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_close_wait()
13258 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_close_wait()
13260 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_close_wait()
13261 tp->ts_recent = to->to_tsval; in rack_do_close_wait()
13264 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_close_wait()
13265 * is on (half-synchronized state), then queue data for later in rack_do_close_wait()
13269 if (tp->t_flags & TF_NEEDSYN) { in rack_do_close_wait()
13273 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_close_wait()
13275 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_close_wait()
13288 if (sbavail(&so->so_snd)) { in rack_do_close_wait()
13290 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_close_wait()
13306 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_check_data_after_close()
13307 if (rack->rc_allow_data_af_clo == 0) { in rack_check_data_after_close()
13310 /* tcp_close will kill the inp pre-log the Reset */ in rack_check_data_after_close()
13317 if (sbavail(&so->so_snd) == 0) in rack_check_data_after_close()
13321 tp->rcv_nxt = th->th_seq + *tlen; in rack_check_data_after_close()
13322 tp->t_flags2 |= TF2_DROP_AF_DATA; in rack_check_data_after_close()
13323 rack->r_wanted_output = 1; in rack_check_data_after_close()
13345 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_1()
13359 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_1()
13360 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_1()
13371 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_1()
13388 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_1()
13389 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_1()
13390 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_1()
13392 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_1()
13393 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_1()
13396 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_1()
13397 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_1()
13401 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_1()
13404 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_1()
13406 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_1()
13429 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_fin_wait_1()
13438 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_1()
13440 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_1()
13467 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_closing()
13481 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_closing()
13482 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_closing()
13503 if ((to->to_flags & TOF_TS) != 0 && in rack_do_closing()
13504 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_closing()
13505 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_closing()
13507 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_closing()
13508 tp->ts_recent = to->to_tsval; in rack_do_closing()
13511 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_closing()
13512 * is on (half-synchronized state), then queue data for later in rack_do_closing()
13516 if (tp->t_flags & TF_NEEDSYN) { in rack_do_closing()
13519 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_closing()
13521 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_closing()
13539 if (sbavail(&so->so_snd)) { in rack_do_closing()
13541 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_closing()
13568 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_lastack()
13582 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_lastack()
13583 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_lastack()
13605 if ((to->to_flags & TOF_TS) != 0 && in rack_do_lastack()
13606 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_lastack()
13607 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_lastack()
13609 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_lastack()
13610 tp->ts_recent = to->to_tsval; in rack_do_lastack()
13613 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_lastack()
13614 * is on (half-synchronized state), then queue data for later in rack_do_lastack()
13618 if (tp->t_flags & TF_NEEDSYN) { in rack_do_lastack()
13621 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_lastack()
13623 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_lastack()
13641 if (sbavail(&so->so_snd)) { in rack_do_lastack()
13643 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_lastack()
13671 (tp->t_fin_is_rst && (thflags & TH_FIN))) in rack_do_fin_wait_2()
13685 if ((to->to_flags & TOF_TS) != 0 && tp->ts_recent && in rack_do_fin_wait_2()
13686 TSTMP_LT(to->to_tsval, tp->ts_recent)) { in rack_do_fin_wait_2()
13697 if ((tp->t_flags & TF_CLOSED) && tlen && in rack_do_fin_wait_2()
13714 if ((to->to_flags & TOF_TS) != 0 && in rack_do_fin_wait_2()
13715 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && in rack_do_fin_wait_2()
13716 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + in rack_do_fin_wait_2()
13718 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_fin_wait_2()
13719 tp->ts_recent = to->to_tsval; in rack_do_fin_wait_2()
13722 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN flag in rack_do_fin_wait_2()
13723 * is on (half-synchronized state), then queue data for later in rack_do_fin_wait_2()
13727 if (tp->t_flags & TF_NEEDSYN) { in rack_do_fin_wait_2()
13730 } else if (tp->t_flags & TF_ACKNOW) { in rack_do_fin_wait_2()
13732 ((struct tcp_rack *)tp->t_fb_ptr)->r_wanted_output = 1; in rack_do_fin_wait_2()
13745 if (sbavail(&so->so_snd)) { in rack_do_fin_wait_2()
13747 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_fin_wait_2()
13760 rack->r_ctl.rack_rs.rs_flags = RACK_RTT_EMPTY; in rack_clear_rate_sample()
13761 rack->r_ctl.rack_rs.rs_rtt_cnt = 0; in rack_clear_rate_sample()
13762 rack->r_ctl.rack_rs.rs_rtt_tot = 0; in rack_clear_rate_sample()
13773 if (rack->rc_hybrid_mode && in rack_set_pace_segments()
13774 (rack->r_ctl.rc_pace_max_segs != 0) && in rack_set_pace_segments()
13776 (rack->r_ctl.rc_last_sft != NULL)) { in rack_set_pace_segments()
13777 rack->r_ctl.rc_last_sft->hybrid_flags &= ~TCP_HYBRID_PACING_SETMSS; in rack_set_pace_segments()
13781 orig_min = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
13782 orig_max = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
13783 user_max = ctf_fixed_maxseg(tp) * rack->rc_user_set_max_segs; in rack_set_pace_segments()
13784 if (ctf_fixed_maxseg(tp) != rack->r_ctl.rc_pace_min_segs) in rack_set_pace_segments()
13786 rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp); in rack_set_pace_segments()
13787 if (rack->use_fixed_rate || rack->rc_force_max_seg) { in rack_set_pace_segments()
13788 if (user_max != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
13791 if (rack->rc_force_max_seg) { in rack_set_pace_segments()
13792 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
13793 } else if (rack->use_fixed_rate) { in rack_set_pace_segments()
13795 if ((rack->r_ctl.crte == NULL) || in rack_set_pace_segments()
13796 (bw_est != rack->r_ctl.crte->rate)) { in rack_set_pace_segments()
13797 rack->r_ctl.rc_pace_max_segs = user_max; in rack_set_pace_segments()
13803 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_set_pace_segments()
13808 rack->r_ctl.rc_pace_min_segs); in rack_set_pace_segments()
13809 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor( in rack_set_pace_segments()
13811 rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_set_pace_segments()
13813 } else if (rack->rc_always_pace) { in rack_set_pace_segments()
13814 if (rack->r_ctl.gp_bw || in rack_set_pace_segments()
13815 rack->r_ctl.init_rate) { in rack_set_pace_segments()
13820 orig = rack->r_ctl.rc_pace_max_segs; in rack_set_pace_segments()
13827 rack->r_ctl.rc_pace_max_segs = rack_get_pacing_len(rack, in rack_set_pace_segments()
13829 ctf_fixed_maxseg(rack->rc_tp)); in rack_set_pace_segments()
13831 rack->r_ctl.rc_pace_max_segs = rack->r_ctl.rc_pace_min_segs; in rack_set_pace_segments()
13832 if (orig != rack->r_ctl.rc_pace_max_segs) in rack_set_pace_segments()
13834 } else if ((rack->r_ctl.gp_bw == 0) && in rack_set_pace_segments()
13835 (rack->r_ctl.rc_pace_max_segs == 0)) { in rack_set_pace_segments()
13841 rack->r_ctl.rc_pace_max_segs = rc_init_window(rack); in rack_set_pace_segments()
13844 if (rack->r_ctl.rc_pace_max_segs > PACE_MAX_IP_BYTES) { in rack_set_pace_segments()
13846 rack->r_ctl.rc_pace_max_segs = PACE_MAX_IP_BYTES; in rack_set_pace_segments()
13866 if (rack->r_is_v6) { in rack_init_fsb_block()
13867 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); in rack_init_fsb_block()
13868 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
13869 if (tp->t_port) { in rack_init_fsb_block()
13870 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
13872 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
13873 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
13874 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
13875 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
13878 rack->r_ctl.fsb.th = (struct tcphdr *)(ip6 + 1); in rack_init_fsb_block()
13879 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
13881 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
13882 tp->t_port, in rack_init_fsb_block()
13883 ip6, rack->r_ctl.fsb.th); in rack_init_fsb_block()
13884 rack->r_ctl.fsb.hoplimit = in6_selecthlim(rack->rc_inp, NULL); in rack_init_fsb_block()
13889 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr); in rack_init_fsb_block()
13890 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_init_fsb_block()
13891 if (tp->t_port) { in rack_init_fsb_block()
13892 rack->r_ctl.fsb.tcp_ip_hdr_len += sizeof(struct udphdr); in rack_init_fsb_block()
13894 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_init_fsb_block()
13895 udp->uh_dport = tp->t_port; in rack_init_fsb_block()
13896 rack->r_ctl.fsb.udp = udp; in rack_init_fsb_block()
13897 rack->r_ctl.fsb.th = (struct tcphdr *)(udp + 1); in rack_init_fsb_block()
13900 rack->r_ctl.fsb.udp = NULL; in rack_init_fsb_block()
13901 rack->r_ctl.fsb.th = (struct tcphdr *)(ip + 1); in rack_init_fsb_block()
13903 tcpip_fillheaders(rack->rc_inp, in rack_init_fsb_block()
13904 tp->t_port, in rack_init_fsb_block()
13905 ip, rack->r_ctl.fsb.th); in rack_init_fsb_block()
13906 rack->r_ctl.fsb.hoplimit = tptoinpcb(tp)->inp_ip_ttl; in rack_init_fsb_block()
13909 rack->r_ctl.fsb.recwin = lmin(lmax(sbspace(&tptosocket(tp)->so_rcv), 0), in rack_init_fsb_block()
13910 (long)TCP_MAXWIN << tp->rcv_scale); in rack_init_fsb_block()
13911 rack->r_fsb_inited = 1; in rack_init_fsb_block()
13922 …rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + sizeof(struct ud… in rack_init_fsb()
13924 rack->r_ctl.fsb.tcp_ip_hdr_len = sizeof(struct tcpiphdr) + sizeof(struct udphdr); in rack_init_fsb()
13926 rack->r_ctl.fsb.tcp_ip_hdr = malloc(rack->r_ctl.fsb.tcp_ip_hdr_len, in rack_init_fsb()
13928 if (rack->r_ctl.fsb.tcp_ip_hdr == NULL) { in rack_init_fsb()
13931 rack->r_fsb_inited = 0; in rack_init_fsb()
13940 * 20 - Initial round setup in rack_log_hystart_event()
13941 * 21 - Rack declares a new round. in rack_log_hystart_event()
13945 tp = rack->rc_tp; in rack_log_hystart_event()
13951 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_log_hystart_event()
13952 log.u_bbr.flex2 = rack->r_ctl.roundends; in rack_log_hystart_event()
13954 log.u_bbr.flex4 = tp->snd_max; in rack_log_hystart_event()
13957 log.u_bbr.cur_del_rate = rack->rc_tp->t_sndbytes; in rack_log_hystart_event()
13958 log.u_bbr.delRate = rack->rc_tp->t_snd_rxt_bytes; in rack_log_hystart_event()
13960 &tptosocket(tp)->so_rcv, in rack_log_hystart_event()
13961 &tptosocket(tp)->so_snd, in rack_log_hystart_event()
13970 rack->rack_deferred_inited = 1; in rack_deferred_init()
13971 rack->r_ctl.roundends = tp->snd_max; in rack_deferred_init()
13972 rack->r_ctl.rc_high_rwnd = tp->snd_wnd; in rack_deferred_init()
13973 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_deferred_init()
13985 * fixed rate pacing, or just bursting rack. in rack_init_retransmit_value()
13987 * 1 - Use full sized retransmits i.e. limit in rack_init_retransmit_value()
13991 * 2 - Use pacer min granularity as a guide to in rack_init_retransmit_value()
13999 * 0 - The rack default 1 MSS (anything not 0/1/2 in rack_init_retransmit_value()
14004 rack->full_size_rxt = 1; in rack_init_retransmit_value()
14005 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14007 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14008 rack->shape_rxt_to_pacing_min = 1; in rack_init_retransmit_value()
14010 rack->full_size_rxt = 0; in rack_init_retransmit_value()
14011 rack->shape_rxt_to_pacing_min = 0; in rack_init_retransmit_value()
14021 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_chg_info()
14044 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_chg_query()
14045 switch (reqr->req) { in rack_chg_query()
14047 if ((reqr->req_param == tp->snd_max) || in rack_chg_query()
14048 (tp->snd_max == tp->snd_una)){ in rack_chg_query()
14052 rsm = tqhash_find(rack->r_ctl.tqh, reqr->req_param); in rack_chg_query()
14054 /* Can't find that seq -- unlikely */ in rack_chg_query()
14057 reqr->sendmap_start = rsm->r_start; in rack_chg_query()
14058 reqr->sendmap_end = rsm->r_end; in rack_chg_query()
14059 reqr->sendmap_send_cnt = rsm->r_rtr_cnt; in rack_chg_query()
14060 reqr->sendmap_fas = rsm->r_fas; in rack_chg_query()
14061 if (reqr->sendmap_send_cnt > SNDMAP_NRTX) in rack_chg_query()
14062 reqr->sendmap_send_cnt = SNDMAP_NRTX; in rack_chg_query()
14063 for(i=0; i<reqr->sendmap_send_cnt; i++) in rack_chg_query()
14064 reqr->sendmap_time[i] = rsm->r_tim_lastsent[i]; in rack_chg_query()
14065 reqr->sendmap_ack_arrival = rsm->r_ack_arrival; in rack_chg_query()
14066 reqr->sendmap_flags = rsm->r_flags & SNDMAP_MASK; in rack_chg_query()
14067 reqr->sendmap_r_rtr_bytes = rsm->r_rtr_bytes; in rack_chg_query()
14068 reqr->sendmap_dupacks = rsm->r_dupack; in rack_chg_query()
14070 rsm->r_start, in rack_chg_query()
14071 rsm->r_end, in rack_chg_query()
14072 rsm->r_flags); in rack_chg_query()
14076 if (rack->r_ctl.rc_hpts_flags == 0) { in rack_chg_query()
14080 reqr->timer_hpts_flags = rack->r_ctl.rc_hpts_flags; in rack_chg_query()
14081 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_chg_query()
14082 reqr->timer_pacing_to = rack->r_ctl.rc_last_output_to; in rack_chg_query()
14084 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_chg_query()
14085 reqr->timer_timer_exp = rack->r_ctl.rc_timer_exp; in rack_chg_query()
14088 rack->r_ctl.rc_hpts_flags, in rack_chg_query()
14089 rack->r_ctl.rc_last_output_to, in rack_chg_query()
14090 rack->r_ctl.rc_timer_exp); in rack_chg_query()
14095 reqr->rack_num_dsacks = rack->r_ctl.num_dsack; in rack_chg_query()
14096 reqr->rack_reorder_ts = rack->r_ctl.rc_reorder_ts; in rack_chg_query()
14098 reqr->rack_rxt_last_time = rack->r_ctl.rc_tlp_rxt_last_time; in rack_chg_query()
14099 reqr->rack_min_rtt = rack->r_ctl.rc_rack_min_rtt; in rack_chg_query()
14100 reqr->rack_rtt = rack->rc_rack_rtt; in rack_chg_query()
14101 reqr->rack_tmit_time = rack->r_ctl.rc_rack_tmit_time; in rack_chg_query()
14102 reqr->rack_srtt_measured = rack->rc_srtt_measure_made; in rack_chg_query()
14104 reqr->rack_sacked = rack->r_ctl.rc_sacked; in rack_chg_query()
14105 reqr->rack_holes_rxt = rack->r_ctl.rc_holes_rxt; in rack_chg_query()
14106 reqr->rack_prr_delivered = rack->r_ctl.rc_prr_delivered; in rack_chg_query()
14107 reqr->rack_prr_recovery_fs = rack->r_ctl.rc_prr_recovery_fs; in rack_chg_query()
14108 reqr->rack_prr_sndcnt = rack->r_ctl.rc_prr_sndcnt; in rack_chg_query()
14109 reqr->rack_prr_out = rack->r_ctl.rc_prr_out; in rack_chg_query()
14111 reqr->rack_tlp_out = rack->rc_tlp_in_progress; in rack_chg_query()
14112 reqr->rack_tlp_cnt_out = rack->r_ctl.rc_tlp_cnt_out; in rack_chg_query()
14113 if (rack->rc_in_persist) { in rack_chg_query()
14114 reqr->rack_time_went_idle = rack->r_ctl.rc_went_idle_time; in rack_chg_query()
14115 reqr->rack_in_persist = 1; in rack_chg_query()
14117 reqr->rack_time_went_idle = 0; in rack_chg_query()
14118 reqr->rack_in_persist = 0; in rack_chg_query()
14120 if (rack->r_wanted_output) in rack_chg_query()
14121 reqr->rack_wanted_output = 1; in rack_chg_query()
14123 reqr->rack_wanted_output = 0; in rack_chg_query()
14127 return (-EINVAL); in rack_chg_query()
14146 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_switch_failed()
14148 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_switch_failed()
14149 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14151 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_switch_failed()
14152 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_switch_failed()
14153 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_switch_failed()
14154 if (tp->t_in_hpts > IHPTS_NONE) { in rack_switch_failed()
14159 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_switch_failed()
14160 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_switch_failed()
14161 toval = rack->r_ctl.rc_last_output_to - cts; in rack_switch_failed()
14166 } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_switch_failed()
14167 if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { in rack_switch_failed()
14168 toval = rack->r_ctl.rc_timer_exp - cts; in rack_switch_failed()
14186 * to not refer to tp->t_fb_ptr. This has the old rack in rack_init_outstanding()
14192 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init_outstanding()
14200 rsm->r_no_rtt_allowed = 1; in rack_init_outstanding()
14201 rsm->r_tim_lastsent[0] = rack_to_usec_ts(&rack->r_ctl.act_rcv_time); in rack_init_outstanding()
14202 rsm->r_rtr_cnt = 1; in rack_init_outstanding()
14203 rsm->r_rtr_bytes = 0; in rack_init_outstanding()
14204 if (tp->t_flags & TF_SENTFIN) in rack_init_outstanding()
14205 rsm->r_flags |= RACK_HAS_FIN; in rack_init_outstanding()
14206 rsm->r_end = tp->snd_max; in rack_init_outstanding()
14207 if (tp->snd_una == tp->iss) { in rack_init_outstanding()
14209 rsm->r_flags |= RACK_HAS_SYN; in rack_init_outstanding()
14210 rsm->r_start = tp->iss; in rack_init_outstanding()
14211 rsm->r_end = rsm->r_start + (tp->snd_max - tp->snd_una); in rack_init_outstanding()
14213 rsm->r_start = tp->snd_una; in rack_init_outstanding()
14214 rsm->r_dupack = 0; in rack_init_outstanding()
14215 if (rack->rc_inp->inp_socket->so_snd.sb_mb != NULL) { in rack_init_outstanding()
14216 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, 0, &rsm->soff); in rack_init_outstanding()
14217 if (rsm->m) { in rack_init_outstanding()
14218 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14219 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14221 rsm->orig_m_len = 0; in rack_init_outstanding()
14222 rsm->orig_t_space = 0; in rack_init_outstanding()
14226 * This can happen if we have a stand-alone FIN or in rack_init_outstanding()
14229 rsm->m = NULL; in rack_init_outstanding()
14230 rsm->orig_m_len = 0; in rack_init_outstanding()
14231 rsm->orig_t_space = 0; in rack_init_outstanding()
14232 rsm->soff = 0; in rack_init_outstanding()
14235 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14240 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14242 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14243 rsm->r_in_tmap = 1; in rack_init_outstanding()
14250 at = tp->snd_una; in rack_init_outstanding()
14251 while (at != tp->snd_max) { in rack_init_outstanding()
14255 if ((*tp->t_fb->tfb_chg_query)(tp, &qr) == 0) in rack_init_outstanding()
14267 rsm->r_dupack = qr.sendmap_dupacks; in rack_init_outstanding()
14268 rsm->r_start = qr.sendmap_start; in rack_init_outstanding()
14269 rsm->r_end = qr.sendmap_end; in rack_init_outstanding()
14271 rsm->r_fas = qr.sendmap_end; in rack_init_outstanding()
14273 rsm->r_fas = rsm->r_start - tp->snd_una; in rack_init_outstanding()
14279 rsm->r_flags = qr.sendmap_flags & SNDMAP_MASK; in rack_init_outstanding()
14280 rsm->r_rtr_bytes = qr.sendmap_r_rtr_bytes; in rack_init_outstanding()
14281 rsm->r_rtr_cnt = qr.sendmap_send_cnt; in rack_init_outstanding()
14282 rsm->r_ack_arrival = qr.sendmap_ack_arrival; in rack_init_outstanding()
14283 for (i=0 ; i<rsm->r_rtr_cnt; i++) in rack_init_outstanding()
14284 rsm->r_tim_lastsent[i] = qr.sendmap_time[i]; in rack_init_outstanding()
14285 rsm->m = sbsndmbuf(&rack->rc_inp->inp_socket->so_snd, in rack_init_outstanding()
14286 (rsm->r_start - tp->snd_una), &rsm->soff); in rack_init_outstanding()
14287 if (rsm->m) { in rack_init_outstanding()
14288 rsm->orig_m_len = rsm->m->m_len; in rack_init_outstanding()
14289 rsm->orig_t_space = M_TRAILINGROOM(rsm->m); in rack_init_outstanding()
14291 rsm->orig_m_len = 0; in rack_init_outstanding()
14292 rsm->orig_t_space = 0; in rack_init_outstanding()
14295 if ((insret = tqhash_insert(rack->r_ctl.tqh, rsm)) != 0) { in rack_init_outstanding()
14300 (void)tqhash_insert(rack->r_ctl.tqh, rsm); in rack_init_outstanding()
14302 if ((rsm->r_flags & RACK_ACKED) == 0) { in rack_init_outstanding()
14303 TAILQ_FOREACH(ersm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_init_outstanding()
14304 if (ersm->r_tim_lastsent[(ersm->r_rtr_cnt-1)] > in rack_init_outstanding()
14305 rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)]) { in rack_init_outstanding()
14312 rsm->r_in_tmap = 1; in rack_init_outstanding()
14317 if (rsm->r_in_tmap == 0) { in rack_init_outstanding()
14321 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_tmap, rsm, r_tnext); in rack_init_outstanding()
14322 rsm->r_in_tmap = 1; in rack_init_outstanding()
14325 if ((rack->r_ctl.rc_sacklast == NULL) || in rack_init_outstanding()
14326 (SEQ_GT(rsm->r_end, rack->r_ctl.rc_sacklast->r_end))) { in rack_init_outstanding()
14327 rack->r_ctl.rc_sacklast = rsm; in rack_init_outstanding()
14331 rsm->r_start, in rack_init_outstanding()
14332 rsm->r_end, in rack_init_outstanding()
14333 rsm->r_flags); in rack_init_outstanding()
14354 * will be tp->t_fb_ptr. If its a stack switch that in rack_init()
14358 if (ptr == &tp->t_fb_ptr) in rack_init()
14374 rack->r_ctl.tqh = malloc(sizeof(struct tailq_hash), M_TCPFSB, M_NOWAIT); in rack_init()
14375 if (rack->r_ctl.tqh == NULL) { in rack_init()
14379 tqhash_init(rack->r_ctl.tqh); in rack_init()
14380 TAILQ_INIT(&rack->r_ctl.rc_free); in rack_init()
14381 TAILQ_INIT(&rack->r_ctl.rc_tmap); in rack_init()
14382 rack->rc_tp = tp; in rack_init()
14383 rack->rc_inp = inp; in rack_init()
14385 rack->r_is_v6 = (inp->inp_vflag & INP_IPV6) != 0; in rack_init()
14391 * reached where pacing is on (gp_ready/fixed enabled). in rack_init()
14393 * is enabled or we enable fixed) then we will set these in rack_init()
14402 rack->rc_new_rnd_needed = 1; in rack_init()
14403 rack->r_ctl.rc_split_limit = V_tcp_map_split_limit; in rack_init()
14406 rack->r_ctl.rc_reorder_fade = rack_reorder_fade; in rack_init()
14407 rack->rc_allow_data_af_clo = rack_ignore_data_after_close; in rack_init()
14408 rack->r_ctl.rc_tlp_threshold = rack_tlp_thresh; in rack_init()
14410 rack->rc_pace_to_cwnd = 1; in rack_init()
14412 rack->r_ctl.rc_user_set_min_segs = rack_pacing_min_seg; in rack_init()
14414 rack->use_rack_rr = 1; in rack_init()
14416 rack->rc_pace_dnd = 1; in rack_init()
14419 tp->t_delayed_ack = 1; in rack_init()
14421 tp->t_delayed_ack = 0; in rack_init()
14424 tp->t_flags2 |= TF2_TCP_ACCOUNTING; in rack_init()
14427 rack->r_ctl.pcm_i.cnt_alloc = RACK_DEFAULT_PCM_ARRAY; in rack_init()
14428 sz = (sizeof(struct rack_pcm_stats) * rack->r_ctl.pcm_i.cnt_alloc); in rack_init()
14429 rack->r_ctl.pcm_s = malloc(sz,M_TCPPCM, M_NOWAIT); in rack_init()
14430 if (rack->r_ctl.pcm_s == NULL) { in rack_init()
14431 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_init()
14433 rack->r_ctl.rack_per_upper_bound_ss = (uint8_t)rack_per_upper_bound_ss; in rack_init()
14434 rack->r_ctl.rack_per_upper_bound_ca = (uint8_t)rack_per_upper_bound_ca; in rack_init()
14436 rack->rack_enable_scwnd = 1; in rack_init()
14437 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_init()
14438 rack->rc_user_set_max_segs = rack_hptsi_segments; in rack_init()
14439 rack->r_ctl.max_reduction = rack_max_reduce; in rack_init()
14440 rack->rc_force_max_seg = 0; in rack_init()
14441 TAILQ_INIT(&rack->r_ctl.opt_list); in rack_init()
14442 rack->r_ctl.rc_saved_beta = V_newreno_beta_ecn; in rack_init()
14443 rack->r_ctl.rc_saved_beta_ecn = V_newreno_beta_ecn; in rack_init()
14445 rack->rack_hibeta = 1; in rack_init()
14448 rack->r_ctl.rc_saved_beta = rack_hibeta_setting; in rack_init()
14449 rack->r_ctl.saved_hibeta = rack_hibeta_setting; in rack_init()
14452 rack->r_ctl.saved_hibeta = 50; in rack_init()
14457 * will never have all 1's in ms :-) in rack_init()
14459 rack->r_ctl.last_tm_mark = 0xffffffffffffffff; in rack_init()
14460 rack->r_ctl.rc_reorder_shift = rack_reorder_thresh; in rack_init()
14461 rack->r_ctl.rc_pkt_delay = rack_pkt_delay; in rack_init()
14462 rack->r_ctl.rc_tlp_cwnd_reduce = rack_lower_cwnd_at_tlp; in rack_init()
14463 rack->r_ctl.rc_lowest_us_rtt = 0xffffffff; in rack_init()
14464 rack->r_ctl.rc_highest_us_rtt = 0; in rack_init()
14465 rack->r_ctl.bw_rate_cap = rack_bw_rate_cap; in rack_init()
14466 rack->pcm_enabled = rack_pcm_is_enabled; in rack_init()
14468 rack->r_ctl.fillcw_cap = rack_fillcw_bw_cap; in rack_init()
14469 rack->r_ctl.timer_slop = TICKS_2_USEC(tcp_rexmit_slop); in rack_init()
14471 rack->r_use_cmp_ack = 1; in rack_init()
14473 rack->rack_no_prr = 1; in rack_init()
14475 rack->rc_gp_no_rec_chg = 1; in rack_init()
14477 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_init()
14478 rack->rc_always_pace = 1; in rack_init()
14479 if (rack->rack_hibeta) in rack_init()
14482 rack->rc_always_pace = 0; in rack_init()
14483 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) in rack_init()
14484 rack->r_mbuf_queue = 1; in rack_init()
14486 rack->r_mbuf_queue = 0; in rack_init()
14489 rack->r_limit_scw = 1; in rack_init()
14491 rack->r_limit_scw = 0; in rack_init()
14493 rack->rc_labc = V_tcp_abc_l_var; in rack_init()
14495 rack->r_use_hpts_min = 1; in rack_init()
14496 if (tp->snd_una != 0) { in rack_init()
14497 rack->rc_sendvars_notset = 0; in rack_init()
14505 * syn-cache. This means none of the in rack_init()
14509 rack->rc_sendvars_notset = 1; in rack_init()
14512 rack->r_ctl.rc_rate_sample_method = rack_rate_sample_method; in rack_init()
14513 rack->rack_tlp_threshold_use = rack_tlp_threshold_use; in rack_init()
14514 rack->r_ctl.rc_prr_sendalot = rack_send_a_lot_in_prr; in rack_init()
14515 rack->r_ctl.rc_min_to = rack_min_to; in rack_init()
14516 microuptime(&rack->r_ctl.act_rcv_time); in rack_init()
14517 rack->r_ctl.rack_per_of_gp_ss = rack_per_of_gp_ss; in rack_init()
14519 rack->r_up_only = 1; in rack_init()
14522 rack->rc_gp_dyn_mul = 1; in rack_init()
14524 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_init()
14526 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_init()
14527 rack->r_ctl.rack_per_of_gp_rec = rack_per_of_gp_rec; in rack_init()
14529 rack->rc_skip_timely = 1; in rack_init()
14531 if (rack->rc_skip_timely) { in rack_init()
14532 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_init()
14533 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_init()
14534 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_init()
14536 rack->r_ctl.rack_per_of_gp_probertt = rack_per_of_gp_probertt; in rack_init()
14537 rack->r_ctl.rc_tlp_rxt_last_time = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time); in rack_init()
14538 rack->r_ctl.last_rcv_tstmp_for_rtt = tcp_tv_to_msec(&rack->r_ctl.act_rcv_time); in rack_init()
14540 setup_time_filter_small(&rack->r_ctl.rc_gp_min_rtt, FILTER_TYPE_MIN, in rack_init()
14542 us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_init()
14543 rack->r_ctl.rc_lower_rtt_us_cts = us_cts; in rack_init()
14544 rack->r_ctl.rc_time_of_last_probertt = us_cts; in rack_init()
14545 rack->r_ctl.rc_went_idle_time = us_cts; in rack_init()
14546 rack->r_ctl.rc_time_probertt_starts = 0; in rack_init()
14548 rack->r_ctl.gp_rnd_thresh = rack_rnd_cnt_req & 0xff; in rack_init()
14550 rack->r_ctl.gate_to_fs = 1; in rack_init()
14551 rack->r_ctl.gp_gain_req = rack_gp_gain_req; in rack_init()
14557 rack->rc_rack_tmr_std_based = 1; in rack_init()
14561 rack->rc_rack_use_dsack = 1; in rack_init()
14565 rack->r_ctl.req_measurements = rack_req_measurements; in rack_init()
14567 rack->r_ctl.req_measurements = 1; in rack_init()
14569 rack->rack_hdw_pace_ena = 1; in rack_init()
14571 rack->r_rack_hw_rate_caps = 1; in rack_init()
14573 rack->rack_rec_nonrxt_use_cr = 1; in rack_init()
14582 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_init()
14584 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_init()
14586 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_init()
14594 tp->t_flags &= ~TF_GPUTINPROG; in rack_init()
14595 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14596 (tp->t_state != TCPS_TIME_WAIT)) { in rack_init()
14601 if (SEQ_GT(tp->snd_max, tp->iss)) in rack_init()
14602 snt = tp->snd_max - tp->iss; in rack_init()
14613 if (tp->snd_cwnd < iwin) in rack_init()
14614 tp->snd_cwnd = iwin; in rack_init()
14635 tp->snd_ssthresh = 0xffffffff; in rack_init()
14646 if ((tp->t_state != TCPS_CLOSED) && in rack_init()
14647 (tp->t_state != TCPS_TIME_WAIT) && in rack_init()
14649 (tp->snd_una != tp->snd_max)) { in rack_init()
14658 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_init()
14659 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_init()
14661 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_init()
14662 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_init()
14663 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_init()
14669 * they are non-zero. They are kept with a 5 in rack_init()
14674 rack_log_hystart_event(rack, rack->r_ctl.roundends, 20); in rack_init()
14675 if ((tptoinpcb(tp)->inp_flags & INP_DROPPED) == 0) { in rack_init()
14677 if (tp->t_fb->tfb_chg_query == NULL) { in rack_init()
14687 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14689 rack->r_ctl.rc_reorder_ts = qr.rack_reorder_ts; in rack_init()
14690 rack->r_ctl.num_dsack = qr.rack_num_dsacks; in rack_init()
14691 rack->r_ctl.rc_tlp_rxt_last_time = qr.rack_rxt_last_time; in rack_init()
14692 rack->r_ctl.rc_rack_min_rtt = qr.rack_min_rtt; in rack_init()
14693 rack->rc_rack_rtt = qr.rack_rtt; in rack_init()
14694 rack->r_ctl.rc_rack_tmit_time = qr.rack_tmit_time; in rack_init()
14695 rack->r_ctl.rc_sacked = qr.rack_sacked; in rack_init()
14696 rack->r_ctl.rc_holes_rxt = qr.rack_holes_rxt; in rack_init()
14697 rack->r_ctl.rc_prr_delivered = qr.rack_prr_delivered; in rack_init()
14698 rack->r_ctl.rc_prr_recovery_fs = qr.rack_prr_recovery_fs; in rack_init()
14699 rack->r_ctl.rc_prr_sndcnt = qr.rack_prr_sndcnt; in rack_init()
14700 rack->r_ctl.rc_prr_out = qr.rack_prr_out; in rack_init()
14702 rack->rc_tlp_in_progress = 1; in rack_init()
14703 rack->r_ctl.rc_tlp_cnt_out = qr.rack_tlp_cnt_out; in rack_init()
14705 rack->rc_tlp_in_progress = 0; in rack_init()
14706 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_init()
14709 rack->rc_srtt_measure_made = 1; in rack_init()
14711 rack->r_ctl.rc_went_idle_time = qr.rack_time_went_idle; in rack_init()
14713 if (rack->r_ctl.rc_scw) { in rack_init()
14714 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_init()
14715 rack->rack_scwnd_is_idle = 1; in rack_init()
14718 rack->r_ctl.persist_lost_ends = 0; in rack_init()
14719 rack->probe_not_answered = 0; in rack_init()
14720 rack->forced_ack = 0; in rack_init()
14721 tp->t_rxtshift = 0; in rack_init()
14722 rack->rc_in_persist = 1; in rack_init()
14723 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_init()
14724 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_init()
14727 rack->r_wanted_output = 1; in rack_init()
14736 ret = (*tp->t_fb->tfb_chg_query)(tp, &qr); in rack_init()
14739 * non-zero return means we have a timer('s) in rack_init()
14745 rack->r_ctl.rc_hpts_flags = qr.timer_hpts_flags; in rack_init()
14747 rack->r_ctl.rc_last_output_to = qr.timer_pacing_to; in rack_init()
14749 tov = qr.timer_pacing_to - us_cts; in rack_init()
14754 rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; in rack_init()
14757 tov = qr.timer_timer_exp - us_cts; in rack_init()
14763 rack->r_ctl.rc_hpts_flags, in rack_init()
14764 rack->r_ctl.rc_last_output_to, in rack_init()
14765 rack->r_ctl.rc_timer_exp); in rack_init()
14770 rack_log_hpts_diag(rack, us_cts, &diag, &rack->r_ctl.act_rcv_time); in rack_init()
14774 rack_log_rtt_shrinks(rack, us_cts, tp->t_rxtcur, in rack_init()
14783 if ((tp->t_state == TCPS_CLOSED) || in rack_handoff_ok()
14784 (tp->t_state == TCPS_LISTEN)) { in rack_handoff_ok()
14788 if ((tp->t_state == TCPS_SYN_SENT) || in rack_handoff_ok()
14789 (tp->t_state == TCPS_SYN_RECEIVED)) { in rack_handoff_ok()
14796 if ((tp->t_flags & TF_SENTFIN) && ((tp->snd_max - tp->snd_una) > 1)) { in rack_handoff_ok()
14809 if ((tp->t_flags & TF_SACK_PERMIT) || rack_sack_not_required){ in rack_handoff_ok()
14823 if (tp->t_fb_ptr) { in rack_fini()
14829 tp->t_flags &= ~TF_FORCEDATA; in rack_fini()
14830 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_fini()
14839 if (rack->r_ctl.rc_scw) { in rack_fini()
14842 if (rack->r_limit_scw) in rack_fini()
14843 limit = max(1, rack->r_ctl.rc_lowest_us_rtt); in rack_fini()
14846 tcp_shared_cwnd_free_full(tp, rack->r_ctl.rc_scw, in rack_fini()
14847 rack->r_ctl.rc_scw_index, in rack_fini()
14849 rack->r_ctl.rc_scw = NULL; in rack_fini()
14852 if (rack->r_ctl.fsb.tcp_ip_hdr) { in rack_fini()
14853 free(rack->r_ctl.fsb.tcp_ip_hdr, M_TCPFSB); in rack_fini()
14854 rack->r_ctl.fsb.tcp_ip_hdr = NULL; in rack_fini()
14855 rack->r_ctl.fsb.th = NULL; in rack_fini()
14857 if (rack->rc_always_pace == 1) { in rack_fini()
14861 while (!TAILQ_EMPTY(&rack->r_ctl.opt_list)) { in rack_fini()
14864 dol = TAILQ_FIRST(&rack->r_ctl.opt_list); in rack_fini()
14865 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_fini()
14869 if (rack->r_ctl.crte != NULL) { in rack_fini()
14870 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_fini()
14871 rack->rack_hdrw_pacing = 0; in rack_fini()
14872 rack->r_ctl.crte = NULL; in rack_fini()
14879 * get each one and free it like a cum-ack would and in rack_fini()
14882 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
14884 tqhash_remove(rack->r_ctl.tqh, rsm, REMOVE_TYPE_CUMACK); in rack_fini()
14885 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
14887 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
14889 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
14891 TAILQ_REMOVE(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_fini()
14892 rack->r_ctl.rc_num_maps_alloced--; in rack_fini()
14893 rack->rc_free_cnt--; in rack_fini()
14896 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
14898 if (rack->r_ctl.pcm_s != NULL) { in rack_fini()
14899 free(rack->r_ctl.pcm_s, M_TCPPCM); in rack_fini()
14900 rack->r_ctl.pcm_s = NULL; in rack_fini()
14901 rack->r_ctl.pcm_i.cnt_alloc = 0; in rack_fini()
14902 rack->r_ctl.pcm_i.cnt = 0; in rack_fini()
14904 if ((rack->r_ctl.rc_num_maps_alloced > 0) && in rack_fini()
14911 log.u_bbr.flex1 = rack->r_ctl.rc_num_maps_alloced; in rack_fini()
14912 log.u_bbr.flex2 = rack->rc_free_cnt; in rack_fini()
14914 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fini()
14915 rsm = tqhash_min(rack->r_ctl.tqh); in rack_fini()
14917 rsm = TAILQ_FIRST(&rack->r_ctl.rc_free); in rack_fini()
14924 KASSERT((rack->r_ctl.rc_num_maps_alloced == 0), in rack_fini()
14927 rack->r_ctl.rc_num_maps_alloced)); in rack_fini()
14928 rack->rc_free_cnt = 0; in rack_fini()
14929 free(rack->r_ctl.tqh, M_TCPFSB); in rack_fini()
14930 rack->r_ctl.tqh = NULL; in rack_fini()
14931 uma_zfree(rack_pcb_zone, tp->t_fb_ptr); in rack_fini()
14932 tp->t_fb_ptr = NULL; in rack_fini()
14935 tp->snd_nxt = tp->snd_max; in rack_fini()
14941 if ((rack->r_state == TCPS_CLOSED) && (tp->t_state != TCPS_CLOSED)) { in rack_set_state()
14942 rack->r_is_v6 = (tptoinpcb(tp)->inp_vflag & INP_IPV6) != 0; in rack_set_state()
14944 switch (tp->t_state) { in rack_set_state()
14946 rack->r_state = TCPS_SYN_SENT; in rack_set_state()
14947 rack->r_substate = rack_do_syn_sent; in rack_set_state()
14950 rack->r_state = TCPS_SYN_RECEIVED; in rack_set_state()
14951 rack->r_substate = rack_do_syn_recv; in rack_set_state()
14955 rack->r_state = TCPS_ESTABLISHED; in rack_set_state()
14956 rack->r_substate = rack_do_established; in rack_set_state()
14959 rack->r_state = TCPS_CLOSE_WAIT; in rack_set_state()
14960 rack->r_substate = rack_do_close_wait; in rack_set_state()
14964 rack->r_state = TCPS_FIN_WAIT_1; in rack_set_state()
14965 rack->r_substate = rack_do_fin_wait_1; in rack_set_state()
14969 rack->r_state = TCPS_CLOSING; in rack_set_state()
14970 rack->r_substate = rack_do_closing; in rack_set_state()
14974 rack->r_state = TCPS_LAST_ACK; in rack_set_state()
14975 rack->r_substate = rack_do_lastack; in rack_set_state()
14978 rack->r_state = TCPS_FIN_WAIT_2; in rack_set_state()
14979 rack->r_substate = rack_do_fin_wait_2; in rack_set_state()
14987 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_set_state()
14988 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_state()
15004 tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK; in rack_timer_audit()
15005 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_timer_audit()
15011 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15015 if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT)) in rack_timer_audit()
15017 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_timer_audit()
15018 if (((rsm == NULL) || (tp->t_state < TCPS_ESTABLISHED)) && in rack_timer_audit()
15025 if (tp->t_flags & TF_DELACK) { in rack_timer_audit()
15030 rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_timer_audit()
15031 (tp->t_state <= TCPS_CLOSING)) && in rack_timer_audit()
15033 (tp->snd_max == tp->snd_una)) { in rack_timer_audit()
15038 if (SEQ_GT(tp->snd_max, tp->snd_una) && in rack_timer_audit()
15062 if (tcp_in_hpts(rack->rc_tp)) { in rack_timer_audit()
15063 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_timer_audit()
15067 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_timer_audit()
15068 rack->r_early = 1; in rack_timer_audit()
15069 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_timer_audit()
15071 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_timer_audit()
15073 tcp_hpts_remove(rack->rc_tp); in rack_timer_audit()
15075 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_timer_audit()
15083 if ((SEQ_LT(tp->snd_wl1, seq) || in rack_do_win_updates()
15084 (tp->snd_wl1 == seq && (SEQ_LT(tp->snd_wl2, ack) || in rack_do_win_updates()
15085 (tp->snd_wl2 == ack && tiwin > tp->snd_wnd))))) { in rack_do_win_updates()
15087 if ((tp->snd_wl2 == ack) && (tiwin > tp->snd_wnd)) in rack_do_win_updates()
15089 tp->snd_wnd = tiwin; in rack_do_win_updates()
15091 tp->snd_wl1 = seq; in rack_do_win_updates()
15092 tp->snd_wl2 = ack; in rack_do_win_updates()
15093 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15094 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15095 rack->r_wanted_output = 1; in rack_do_win_updates()
15096 } else if ((tp->snd_wl2 == ack) && (tiwin < tp->snd_wnd)) { in rack_do_win_updates()
15097 tp->snd_wnd = tiwin; in rack_do_win_updates()
15099 tp->snd_wl1 = seq; in rack_do_win_updates()
15100 tp->snd_wl2 = ack; in rack_do_win_updates()
15105 if (tp->snd_wnd > tp->max_sndwnd) in rack_do_win_updates()
15106 tp->max_sndwnd = tp->snd_wnd; in rack_do_win_updates()
15108 if ((rack->rc_in_persist != 0) && in rack_do_win_updates()
15109 (tp->snd_wnd >= min((rack->r_ctl.rc_high_rwnd/2), in rack_do_win_updates()
15110 rack->r_ctl.rc_pace_min_segs))) { in rack_do_win_updates()
15114 if ((rack->rc_in_persist == 0) && in rack_do_win_updates()
15115 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), rack->r_ctl.rc_pace_min_segs)) && in rack_do_win_updates()
15116 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_do_win_updates()
15117 ((tp->snd_max == tp->snd_una) || rack->rc_has_collapsed) && in rack_do_win_updates()
15118 sbavail(&tptosocket(tp)->so_snd) && in rack_do_win_updates()
15119 (sbavail(&tptosocket(tp)->so_snd) > tp->snd_wnd)) { in rack_do_win_updates()
15126 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, ack); in rack_do_win_updates()
15134 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_input_packet()
15147 if (SEQ_GT(ae->ack, tp->snd_una)) { in rack_log_input_packet()
15148 tcp_req = tcp_req_find_req_for_seq(tp, (ae->ack-1)); in rack_log_input_packet()
15150 tcp_req = tcp_req_find_req_for_seq(tp, ae->ack); in rack_log_input_packet()
15154 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_input_packet()
15155 if (rack->rack_no_prr == 0) in rack_log_input_packet()
15156 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_log_input_packet()
15159 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_log_input_packet()
15161 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_log_input_packet()
15162 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_log_input_packet()
15163 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_log_input_packet()
15164 log.u_bbr.inflight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_log_input_packet()
15165 log.u_bbr.pkts_out = tp->t_maxseg; in rack_log_input_packet()
15166 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_log_input_packet()
15168 log.u_bbr.lost = ae->flags; in rack_log_input_packet()
15171 if (ae->flags & TSTMP_HDWR) { in rack_log_input_packet()
15174 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15175 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15179 } else if (ae->flags & TSTMP_LRO) { in rack_log_input_packet()
15182 ts.tv_sec = ae->timestamp / 1000000000; in rack_log_input_packet()
15183 ts.tv_nsec = ae->timestamp % 1000000000; in rack_log_input_packet()
15190 log.u_bbr.delRate = ae->timestamp; in rack_log_input_packet()
15192 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_log_input_packet()
15194 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_log_input_packet()
15196 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_log_input_packet()
15200 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_log_input_packet()
15202 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_log_input_packet()
15203 log.u_bbr.rttProp = tcp_req->timestamp; in rack_log_input_packet()
15204 log.u_bbr.cur_del_rate = tcp_req->start; in rack_log_input_packet()
15205 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_log_input_packet()
15209 log.u_bbr.bw_inuse = tcp_req->end; in rack_log_input_packet()
15211 log.u_bbr.flex6 = tcp_req->start_seq; in rack_log_input_packet()
15212 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_log_input_packet()
15214 log.u_bbr.epoch = tcp_req->end_seq; in rack_log_input_packet()
15220 th->th_seq = ae->seq; in rack_log_input_packet()
15221 th->th_ack = ae->ack; in rack_log_input_packet()
15222 th->th_win = ae->win; in rack_log_input_packet()
15224 th->th_sport = inp->inp_fport; in rack_log_input_packet()
15225 th->th_dport = inp->inp_lport; in rack_log_input_packet()
15226 tcp_set_flags(th, ae->flags); in rack_log_input_packet()
15228 if (ae->flags & HAS_TSTMP) { in rack_log_input_packet()
15232 th->th_off = ((sizeof(struct tcphdr) + TCPOLEN_TSTAMP_APPA) >> 2); in rack_log_input_packet()
15242 val = htonl(ae->ts_value); in rack_log_input_packet()
15245 val = htonl(ae->ts_echo); in rack_log_input_packet()
15249 th->th_off = (sizeof(struct tcphdr) >> 2); in rack_log_input_packet()
15258 * snd_una was advanced and then un-advancing it so that the in rack_log_input_packet()
15261 if (tp->snd_una != high_seq) { in rack_log_input_packet()
15262 orig_snd_una = tp->snd_una; in rack_log_input_packet()
15263 tp->snd_una = high_seq; in rack_log_input_packet()
15268 &tptosocket(tp)->so_rcv, in rack_log_input_packet()
15269 &tptosocket(tp)->so_snd, TCP_LOG_IN, 0, in rack_log_input_packet()
15272 tp->snd_una = orig_snd_una; in rack_log_input_packet()
15283 * A persist or keep-alive was forced out, update our in rack_handle_probe_response()
15285 * When a subsequent keep-alive or persist times out in rack_handle_probe_response()
15291 * will clear the probe_not_answered flag i.e. cum-ack in rack_handle_probe_response()
15295 rack->forced_ack = 0; in rack_handle_probe_response()
15296 rack->rc_tp->t_rxtshift = 0; in rack_handle_probe_response()
15297 if ((rack->rc_in_persist && in rack_handle_probe_response()
15298 (tiwin == rack->rc_tp->snd_wnd)) || in rack_handle_probe_response()
15299 (rack->rc_in_persist == 0)) { in rack_handle_probe_response()
15314 if (rack->rc_in_persist) in rack_handle_probe_response()
15316 us_rtt = us_cts - rack->r_ctl.forced_ack_ts; in rack_handle_probe_response()
15319 if (rack->probe_not_answered == 0) { in rack_handle_probe_response()
15341 rack->r_ctl.roundends = tp->snd_max; in rack_new_round_starts()
15342 rack->rc_new_rnd_needed = 0; in rack_new_round_starts()
15343 rack_log_hystart_event(rack, tp->snd_max, 4); in rack_new_round_starts()
15351 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pcm()
15358 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pcm()
15364 log.u_bbr.flex5 = rack->r_ctl.pcm_idle_rounds; in rack_log_pcm()
15365 log.u_bbr.bbr_substate = rack->pcm_needed; in rack_log_pcm()
15367 log.u_bbr.bbr_substate |= rack->pcm_in_progress; in rack_log_pcm()
15369 log.u_bbr.bbr_substate |= rack->pcm_enabled; /* bits are NIE for Needed, Inprogress, Enabled */ in rack_log_pcm()
15370 (void)tcp_log_event(rack->rc_tp, NULL, NULL, NULL, TCP_PCM_MEASURE, ERRNO_UNK, in rack_log_pcm()
15385 rack->r_ctl.current_round++; in rack_new_round_setup()
15387 rack->rc_new_rnd_needed = 1; in rack_new_round_setup()
15388 if ((rack->pcm_enabled == 1) && in rack_new_round_setup()
15389 (rack->pcm_needed == 0) && in rack_new_round_setup()
15390 (rack->pcm_in_progress == 0)) { in rack_new_round_setup()
15398 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_new_round_setup()
15399 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_new_round_setup()
15400 rack->pcm_needed = 1; in rack_new_round_setup()
15401 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15403 …rack_log_pcm(rack, 3, rack->r_ctl.last_pcm_round, rack_pcm_every_n_rounds, rack->r_ctl.current_rou… in rack_new_round_setup()
15406 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_new_round_setup()
15408 if (CC_ALGO(tp)->newround != NULL) { in rack_new_round_setup()
15409 CC_ALGO(tp)->newround(&tp->t_ccv, rack->r_ctl.current_round); in rack_new_round_setup()
15414 * that we are not just pushing on slow-start and just in rack_new_round_setup()
15416 * boost in b/w during the inital slow-start. in rack_new_round_setup()
15418 if (rack->dgp_on && in rack_new_round_setup()
15419 (rack->rc_initial_ss_comp == 0) && in rack_new_round_setup()
15420 (tp->snd_cwnd < tp->snd_ssthresh) && in rack_new_round_setup()
15421 (rack->r_ctl.num_measurements >= RACK_REQ_AVG) && in rack_new_round_setup()
15422 (rack->r_ctl.gp_rnd_thresh > 0) && in rack_new_round_setup()
15423 ((rack->r_ctl.current_round - rack->r_ctl.last_rnd_of_gp_rise) >= rack->r_ctl.gp_rnd_thresh)) { in rack_new_round_setup()
15433 rack->rc_initial_ss_comp = 1; in rack_new_round_setup()
15435 if (tcp_bblogging_on(rack->rc_tp)) { in rack_new_round_setup()
15441 log.u_bbr.flex1 = rack->r_ctl.current_round; in rack_new_round_setup()
15442 log.u_bbr.flex2 = rack->r_ctl.last_rnd_of_gp_rise; in rack_new_round_setup()
15443 log.u_bbr.flex3 = rack->r_ctl.gp_rnd_thresh; in rack_new_round_setup()
15444 log.u_bbr.flex4 = rack->r_ctl.gate_to_fs; in rack_new_round_setup()
15445 log.u_bbr.flex5 = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15450 if ((rack->r_ctl.gate_to_fs == 1) && in rack_new_round_setup()
15451 (tp->snd_cwnd > rack->r_ctl.ss_hi_fs)) { in rack_new_round_setup()
15452 tp->snd_cwnd = rack->r_ctl.ss_hi_fs; in rack_new_round_setup()
15454 tp->snd_ssthresh = tp->snd_cwnd - 1; in rack_new_round_setup()
15456 rack->r_fast_output = 0; in rack_new_round_setup()
15467 * A) It moves the cum-ack forward in rack_do_compressed_ack_processing()
15468 * B) It is behind the cum-ack. in rack_do_compressed_ack_processing()
15469 * C) It is a window-update ack. in rack_do_compressed_ack_processing()
15470 * D) It is a dup-ack. in rack_do_compressed_ack_processing()
15472 * Note that we can have between 1 -> TCP_COMP_ACK_ENTRIES in rack_do_compressed_ack_processing()
15497 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_compressed_ack_processing()
15498 if (rack->gp_ready && in rack_do_compressed_ack_processing()
15499 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) in rack_do_compressed_ack_processing()
15502 if (rack->r_state != tp->t_state) in rack_do_compressed_ack_processing()
15504 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15505 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_compressed_ack_processing()
15514 bytes = tp->gput_ack - tp->gput_seq; in rack_do_compressed_ack_processing()
15515 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_compressed_ack_processing()
15516 bytes += tp->gput_seq - tp->snd_una; in rack_do_compressed_ack_processing()
15517 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_compressed_ack_processing()
15523 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_compressed_ack_processing()
15524 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_compressed_ack_processing()
15525 tp->gput_seq, in rack_do_compressed_ack_processing()
15527 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_compressed_ack_processing()
15531 to->to_flags = 0; in rack_do_compressed_ack_processing()
15532 KASSERT((m->m_len >= sizeof(struct tcp_ackent)), in rack_do_compressed_ack_processing()
15533 ("tp:%p m_cmpack:%p with invalid len:%u", tp, m, m->m_len)); in rack_do_compressed_ack_processing()
15534 cnt = m->m_len / sizeof(struct tcp_ackent); in rack_do_compressed_ack_processing()
15536 high_seq = tp->snd_una; in rack_do_compressed_ack_processing()
15537 the_win = tp->snd_wnd; in rack_do_compressed_ack_processing()
15538 win_seq = tp->snd_wl1; in rack_do_compressed_ack_processing()
15539 win_upd_ack = tp->snd_wl2; in rack_do_compressed_ack_processing()
15542 rack->r_ctl.rc_rcvtime = cts; in rack_do_compressed_ack_processing()
15544 if ((rack->rc_gp_dyn_mul) && in rack_do_compressed_ack_processing()
15545 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15546 (rack->rc_always_pace)) { in rack_do_compressed_ack_processing()
15556 if (ae->flags & TH_FIN) in rack_do_compressed_ack_processing()
15565 tiwin = ae->win << tp->snd_scale; in rack_do_compressed_ack_processing()
15566 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_compressed_ack_processing()
15567 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_compressed_ack_processing()
15569 if (SEQ_LT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15571 ae->ack_val_set = ACK_BEHIND; in rack_do_compressed_ack_processing()
15572 } else if (SEQ_GT(ae->ack, high_seq)) { in rack_do_compressed_ack_processing()
15574 ae->ack_val_set = ACK_CUMACK; in rack_do_compressed_ack_processing()
15575 } else if ((tiwin == the_win) && (rack->rc_in_persist == 0)){ in rack_do_compressed_ack_processing()
15577 ae->ack_val_set = ACK_DUPACK; in rack_do_compressed_ack_processing()
15580 ae->ack_val_set = ACK_RWND; in rack_do_compressed_ack_processing()
15583 rack_log_input_packet(tp, rack, ae, ae->ack_val_set, high_seq); in rack_do_compressed_ack_processing()
15585 if (ae->flags & HAS_TSTMP) { in rack_do_compressed_ack_processing()
15587 to->to_flags = TOF_TS; in rack_do_compressed_ack_processing()
15588 ae->ts_echo -= tp->ts_offset; in rack_do_compressed_ack_processing()
15589 to->to_tsecr = ae->ts_echo; in rack_do_compressed_ack_processing()
15590 to->to_tsval = ae->ts_value; in rack_do_compressed_ack_processing()
15596 if (TSTMP_GT(ae->ts_echo, ms_cts)) in rack_do_compressed_ack_processing()
15597 to->to_tsecr = 0; in rack_do_compressed_ack_processing()
15598 if (tp->ts_recent && in rack_do_compressed_ack_processing()
15599 TSTMP_LT(ae->ts_value, tp->ts_recent)) { in rack_do_compressed_ack_processing()
15600 if (ctf_ts_check_ac(tp, (ae->flags & 0xff))) { in rack_do_compressed_ack_processing()
15604 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15605 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15612 if (SEQ_LEQ(ae->seq, tp->last_ack_sent) && in rack_do_compressed_ack_processing()
15613 SEQ_LEQ(tp->last_ack_sent, ae->seq)) { in rack_do_compressed_ack_processing()
15614 tp->ts_recent_age = tcp_ts_getticks(); in rack_do_compressed_ack_processing()
15615 tp->ts_recent = ae->ts_value; in rack_do_compressed_ack_processing()
15619 to->to_flags = 0; in rack_do_compressed_ack_processing()
15622 if (tp->t_idle_reduce && in rack_do_compressed_ack_processing()
15623 (tp->snd_max == tp->snd_una) && in rack_do_compressed_ack_processing()
15624 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_compressed_ack_processing()
15628 tp->t_rcvtime = ticks; in rack_do_compressed_ack_processing()
15630 if (tcp_ecn_input_segment(tp, ae->flags, 0, in rack_do_compressed_ack_processing()
15631 tcp_packets_this_ack(tp, ae->ack), in rack_do_compressed_ack_processing()
15632 ae->codepoint)) in rack_do_compressed_ack_processing()
15633 rack_cong_signal(tp, CC_ECN, ae->ack, __LINE__); in rack_do_compressed_ack_processing()
15636 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15637 tp->tcp_cnt_counters[ae->ack_val_set]++; in rack_do_compressed_ack_processing()
15644 * The non-compressed path through the code has this in rack_do_compressed_ack_processing()
15651 if (ae->ack_val_set == ACK_BEHIND) { in rack_do_compressed_ack_processing()
15654 * or it could be a keep-alive or persists in rack_do_compressed_ack_processing()
15656 if (SEQ_LT(ae->ack, tp->snd_una) && (sbspace(&so->so_rcv) > segsiz)) { in rack_do_compressed_ack_processing()
15657 rack->r_ctl.rc_reorder_ts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_do_compressed_ack_processing()
15658 if (rack->r_ctl.rc_reorder_ts == 0) in rack_do_compressed_ack_processing()
15659 rack->r_ctl.rc_reorder_ts = 1; in rack_do_compressed_ack_processing()
15661 } else if (ae->ack_val_set == ACK_DUPACK) { in rack_do_compressed_ack_processing()
15663 rack_strike_dupack(rack, ae->ack); in rack_do_compressed_ack_processing()
15664 } else if (ae->ack_val_set == ACK_RWND) { in rack_do_compressed_ack_processing()
15666 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15667 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15668 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15669 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15670 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15672 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15674 if (rack->forced_ack) { in rack_do_compressed_ack_processing()
15676 tcp_tv_to_usec(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15681 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15682 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15687 if (SEQ_GT(ae->ack, tp->snd_max)) { in rack_do_compressed_ack_processing()
15692 if ((tp->t_flags & TF_ACKNOW) == 0) { in rack_do_compressed_ack_processing()
15694 if (tp->t_flags && TF_ACKNOW) in rack_do_compressed_ack_processing()
15695 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15700 if (tiwin != tp->snd_wnd) { in rack_do_compressed_ack_processing()
15701 win_upd_ack = ae->ack; in rack_do_compressed_ack_processing()
15702 win_seq = ae->seq; in rack_do_compressed_ack_processing()
15708 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15709 tp->tcp_cnt_counters[CNT_OF_ACKS_IN] += (((ae->ack - high_seq) + segsiz - 1) / segsiz); in rack_do_compressed_ack_processing()
15712 high_seq = ae->ack; in rack_do_compressed_ack_processing()
15714 if ((ae->flags & TSTMP_LRO) || (ae->flags & TSTMP_HDWR)) { in rack_do_compressed_ack_processing()
15715 ts.tv_sec = ae->timestamp / 1000000000; in rack_do_compressed_ack_processing()
15716 ts.tv_nsec = ae->timestamp % 1000000000; in rack_do_compressed_ack_processing()
15717 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_compressed_ack_processing()
15718 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_compressed_ack_processing()
15720 rack->r_ctl.act_rcv_time = *tv; in rack_do_compressed_ack_processing()
15722 rack_process_to_cumack(tp, rack, ae->ack, cts, to, in rack_do_compressed_ack_processing()
15723 tcp_tv_to_lusec(&rack->r_ctl.act_rcv_time)); in rack_do_compressed_ack_processing()
15727 if (rack->rc_dsack_round_seen) { in rack_do_compressed_ack_processing()
15729 if (SEQ_GEQ(ae->ack, rack->r_ctl.dsack_round_end)) { in rack_do_compressed_ack_processing()
15731 rack->rc_dsack_round_seen = 0; in rack_do_compressed_ack_processing()
15742 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15743 tp->tcp_proc_time[ae->ack_val_set] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15744 if (ae->ack_val_set == ACK_CUMACK) in rack_do_compressed_ack_processing()
15745 tp->tcp_proc_time[CYC_HANDLE_MAP] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15754 if (SEQ_GT(tp->snd_max, high_seq) && (tp->snd_wnd < (tp->snd_max - high_seq))) { in rack_do_compressed_ack_processing()
15756 rack_collapsed_window(rack, (tp->snd_max - high_seq), high_seq, __LINE__); in rack_do_compressed_ack_processing()
15757 } else if (rack->rc_has_collapsed) in rack_do_compressed_ack_processing()
15759 if ((rack->r_collapse_point_valid) && in rack_do_compressed_ack_processing()
15760 (SEQ_GT(high_seq, rack->r_ctl.high_collapse_point))) in rack_do_compressed_ack_processing()
15761 rack->r_collapse_point_valid = 0; in rack_do_compressed_ack_processing()
15762 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
15775 if (SEQ_GEQ(high_seq, rack->r_ctl.roundends) && in rack_do_compressed_ack_processing()
15776 (rack->rc_new_rnd_needed == 0) && in rack_do_compressed_ack_processing()
15786 * since cum-ack moved forward. in rack_do_compressed_ack_processing()
15788 rack->probe_not_answered = 0; in rack_do_compressed_ack_processing()
15789 if (tp->t_flags & TF_NEEDSYN) { in rack_do_compressed_ack_processing()
15791 * T/TCP: Connection was half-synchronized, and our SYN has in rack_do_compressed_ack_processing()
15793 * to non-starred state, increment snd_una for ACK of SYN, in rack_do_compressed_ack_processing()
15796 tp->t_flags &= ~TF_NEEDSYN; in rack_do_compressed_ack_processing()
15797 tp->snd_una++; in rack_do_compressed_ack_processing()
15798 acked_amount = acked = (high_seq - tp->snd_una); in rack_do_compressed_ack_processing()
15800 if (acked > sbavail(&so->so_snd)) in rack_do_compressed_ack_processing()
15801 acked_amount = sbavail(&so->so_snd); in rack_do_compressed_ack_processing()
15802 if (IN_FASTRECOVERY(tp->t_flags) && in rack_do_compressed_ack_processing()
15803 (rack->rack_no_prr == 0)) in rack_do_compressed_ack_processing()
15805 if (IN_RECOVERY(tp->t_flags)) { in rack_do_compressed_ack_processing()
15806 if (SEQ_LT(high_seq, tp->snd_recover) && in rack_do_compressed_ack_processing()
15807 (SEQ_LT(high_seq, tp->snd_max))) { in rack_do_compressed_ack_processing()
15813 } else if ((rack->rto_from_rec == 1) && in rack_do_compressed_ack_processing()
15814 SEQ_GEQ(high_seq, tp->snd_recover)) { in rack_do_compressed_ack_processing()
15817 * and never re-entered recovery. The timeout(s) in rack_do_compressed_ack_processing()
15821 rack->rto_from_rec = 0; in rack_do_compressed_ack_processing()
15823 /* Handle the rack-log-ack part (sendmap) */ in rack_do_compressed_ack_processing()
15824 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
15826 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15827 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
15840 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
15843 if ((tp->t_flags & TF_PREVVALID) && in rack_do_compressed_ack_processing()
15844 ((tp->t_flags & TF_RCVD_TSTMP) == 0)) { in rack_do_compressed_ack_processing()
15845 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
15846 if (tp->t_rxtshift == 1 && in rack_do_compressed_ack_processing()
15847 (int)(ticks - tp->t_badrxtwin) < 0) in rack_do_compressed_ack_processing()
15863 p_cwnd = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_do_compressed_ack_processing()
15865 p_cwnd += tp->snd_cwnd; in rack_do_compressed_ack_processing()
15868 if (post_recovery && (tp->snd_cwnd > p_cwnd)) { in rack_do_compressed_ack_processing()
15869 /* Must be non-newreno (cubic) getting too ahead of itself */ in rack_do_compressed_ack_processing()
15870 tp->snd_cwnd = p_cwnd; in rack_do_compressed_ack_processing()
15873 mfree = sbcut_locked(&so->so_snd, acked_amount); in rack_do_compressed_ack_processing()
15874 tp->snd_una = high_seq; in rack_do_compressed_ack_processing()
15876 rack_adjust_sendmap_head(rack, &so->so_snd); in rack_do_compressed_ack_processing()
15878 rack_log_wakeup(tp,rack, &so->so_snd, acked, 2); in rack_do_compressed_ack_processing()
15883 tp->t_acktime = ticks; in rack_do_compressed_ack_processing()
15884 rack_log_progress_event(rack, tp, tp->t_acktime, in rack_do_compressed_ack_processing()
15887 tp->t_rxtshift = 0; in rack_do_compressed_ack_processing()
15888 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_do_compressed_ack_processing()
15889 rack_rto_min, rack_rto_max, rack->r_ctl.timer_slop); in rack_do_compressed_ack_processing()
15890 rack->rc_tlp_in_progress = 0; in rack_do_compressed_ack_processing()
15891 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_do_compressed_ack_processing()
15893 if (SEQ_GT(tp->snd_una, tp->snd_recover)) in rack_do_compressed_ack_processing()
15894 tp->snd_recover = tp->snd_una; in rack_do_compressed_ack_processing()
15895 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) in rack_do_compressed_ack_processing()
15896 tp->snd_nxt = tp->snd_max; in rack_do_compressed_ack_processing()
15901 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RXT) in rack_do_compressed_ack_processing()
15902 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
15903 tp->snd_wl2 = high_seq; in rack_do_compressed_ack_processing()
15904 tp->t_dupacks = 0; in rack_do_compressed_ack_processing()
15906 (rack->use_fixed_rate == 0) && in rack_do_compressed_ack_processing()
15907 (rack->in_probe_rtt == 0) && in rack_do_compressed_ack_processing()
15908 rack->rc_gp_dyn_mul && in rack_do_compressed_ack_processing()
15909 rack->rc_always_pace) { in rack_do_compressed_ack_processing()
15913 if (tp->snd_una == tp->snd_max) { in rack_do_compressed_ack_processing()
15914 tp->t_flags &= ~TF_PREVVALID; in rack_do_compressed_ack_processing()
15915 rack->r_ctl.retran_during_recovery = 0; in rack_do_compressed_ack_processing()
15916 rack->rc_suspicious = 0; in rack_do_compressed_ack_processing()
15917 rack->r_ctl.dsack_byte_cnt = 0; in rack_do_compressed_ack_processing()
15918 rack->r_ctl.rc_went_idle_time = tcp_get_usecs(NULL); in rack_do_compressed_ack_processing()
15919 if (rack->r_ctl.rc_went_idle_time == 0) in rack_do_compressed_ack_processing()
15920 rack->r_ctl.rc_went_idle_time = 1; in rack_do_compressed_ack_processing()
15922 if (sbavail(&tptosocket(tp)->so_snd) == 0) in rack_do_compressed_ack_processing()
15923 tp->t_acktime = 0; in rack_do_compressed_ack_processing()
15925 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15926 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
15927 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_do_compressed_ack_processing()
15928 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15929 (sbavail(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
15930 (tp->t_flags2 & TF2_DROP_AF_DATA)) { in rack_do_compressed_ack_processing()
15936 rack_timer_cancel(tp, rack, rack->r_ctl.rc_rcvtime, __LINE__); in rack_do_compressed_ack_processing()
15937 /* tcp_close will kill the inp pre-log the Reset */ in rack_do_compressed_ack_processing()
15942 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
15943 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15944 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
15957 * We would normally do drop-with-reset which would in rack_do_compressed_ack_processing()
15968 if ((sbused(&so->so_snd) == 0) && in rack_do_compressed_ack_processing()
15969 (tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_compressed_ack_processing()
15970 (tp->t_flags & TF_SENTFIN)) { in rack_do_compressed_ack_processing()
15978 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
15987 * We don't change to fin-wait-2 if we have our fin acked in rack_do_compressed_ack_processing()
15995 if (sbavail(&so->so_snd)) { in rack_do_compressed_ack_processing()
15996 rack->r_wanted_output = 1; in rack_do_compressed_ack_processing()
15998 rack_log_progress_event((struct tcp_rack *)tp->t_fb_ptr, in rack_do_compressed_ack_processing()
16007 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16008 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16009 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16020 switch(tp->t_state) { in rack_do_compressed_ack_processing()
16025 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16026 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16027 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16040 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16041 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16042 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16055 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16056 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16057 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16061 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { in rack_do_compressed_ack_processing()
16074 if (rack->r_fast_output) { in rack_do_compressed_ack_processing()
16083 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16084 tp->tcp_proc_time[ACK_CUMACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16085 tp->tcp_proc_time[CYC_HANDLE_ACK] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16092 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_compressed_ack_processing()
16093 tp->tcp_proc_time[ACK_RWND] += (rdstc - ts_val); in rack_do_compressed_ack_processing()
16110 if ((rack->r_wanted_output != 0) || in rack_do_compressed_ack_processing()
16111 (rack->r_fast_output != 0) || in rack_do_compressed_ack_processing()
16112 (tp->t_flags & TF_ACKNOW )) { in rack_do_compressed_ack_processing()
16122 if (tp->t_flags2 & TF2_HPTS_CALLS) in rack_do_compressed_ack_processing()
16123 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_compressed_ack_processing()
16128 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_compressed_ack_processing()
16150 * cts - is the current time from tv (caller gets ts) in microseconds. in rack_do_segment_nounlock()
16151 * ms_cts - is the current time from tv in milliseconds. in rack_do_segment_nounlock()
16152 * us_cts - is the time that LRO or hardware actually got the packet in microseconds. in rack_do_segment_nounlock()
16175 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_do_segment_nounlock()
16176 if (rack->rack_deferred_inited == 0) { in rack_do_segment_nounlock()
16187 * can happen in the non-LRO path where we are pacing and in rack_do_segment_nounlock()
16192 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16197 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16201 if ((rack->rc_always_pace == 1) && in rack_do_segment_nounlock()
16202 (rack->rc_ack_can_sendout_data == 0) && in rack_do_segment_nounlock()
16203 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_do_segment_nounlock()
16204 (TSTMP_LT(us_cts, rack->r_ctl.rc_last_output_to))) { in rack_do_segment_nounlock()
16211 time_remaining = rack->r_ctl.rc_last_output_to - us_cts; in rack_do_segment_nounlock()
16212 if (rack->rc_tp->t_flags2 & TF2_DONT_SACK_QUEUE) { in rack_do_segment_nounlock()
16224 optlen = (th->th_off << 2) - sizeof(struct tcphdr); in rack_do_segment_nounlock()
16250 rack->r_ctl.gp_bw, in rack_do_segment_nounlock()
16256 if (m->m_flags & M_ACKCMP) { in rack_do_segment_nounlock()
16261 nsegs = m->m_pkthdr.lro_nsegs; in rack_do_segment_nounlock()
16268 if ((m->m_flags & M_TSTMP) || in rack_do_segment_nounlock()
16269 (m->m_flags & M_TSTMP_LRO)) { in rack_do_segment_nounlock()
16271 rack->r_ctl.act_rcv_time.tv_sec = ts.tv_sec; in rack_do_segment_nounlock()
16272 rack->r_ctl.act_rcv_time.tv_usec = ts.tv_nsec/1000; in rack_do_segment_nounlock()
16274 rack->r_ctl.act_rcv_time = *tv; in rack_do_segment_nounlock()
16278 * Unscale the window into a 32-bit value. For the SYN_SENT state in rack_do_segment_nounlock()
16281 tiwin = th->th_win << tp->snd_scale; in rack_do_segment_nounlock()
16297 * priority will bump us ... clock?) we will falsely add in in rack_do_segment_nounlock()
16310 (th->th_off << 2) - sizeof(struct tcphdr), in rack_do_segment_nounlock()
16312 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", in rack_do_segment_nounlock()
16314 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", in rack_do_segment_nounlock()
16316 if (tp->t_flags2 & TF2_PROC_SACK_PROHIBIT) { in rack_do_segment_nounlock()
16324 if ((tp->t_state >= TCPS_FIN_WAIT_1) && in rack_do_segment_nounlock()
16325 (tp->t_flags & TF_GPUTINPROG)) { in rack_do_segment_nounlock()
16334 bytes = tp->gput_ack - tp->gput_seq; in rack_do_segment_nounlock()
16335 if (SEQ_GT(tp->gput_seq, tp->snd_una)) in rack_do_segment_nounlock()
16336 bytes += tp->gput_seq - tp->snd_una; in rack_do_segment_nounlock()
16337 if (bytes > sbavail(&tptosocket(tp)->so_snd)) { in rack_do_segment_nounlock()
16343 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_do_segment_nounlock()
16344 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_do_segment_nounlock()
16345 tp->gput_seq, in rack_do_segment_nounlock()
16347 tp->t_flags &= ~TF_GPUTINPROG; in rack_do_segment_nounlock()
16350 if (tcp_bblogging_on(rack->rc_tp)) { in rack_do_segment_nounlock()
16356 if (SEQ_GT(th->th_ack, tp->snd_una)) { in rack_do_segment_nounlock()
16357 tcp_req = tcp_req_find_req_for_seq(tp, (th->th_ack-1)); in rack_do_segment_nounlock()
16359 tcp_req = tcp_req_find_req_for_seq(tp, th->th_ack); in rack_do_segment_nounlock()
16363 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_do_segment_nounlock()
16364 if (rack->rack_no_prr == 0) in rack_do_segment_nounlock()
16365 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_do_segment_nounlock()
16368 log.u_bbr.use_lt_bw = rack->r_ent_rec_ns; in rack_do_segment_nounlock()
16370 log.u_bbr.use_lt_bw |= rack->r_might_revert; in rack_do_segment_nounlock()
16371 log.u_bbr.flex2 = rack->r_ctl.rc_num_maps_alloced; in rack_do_segment_nounlock()
16372 log.u_bbr.bbr_state = rack->rc_free_cnt; in rack_do_segment_nounlock()
16373 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_do_segment_nounlock()
16374 log.u_bbr.pkts_out = rack->rc_tp->t_maxseg; in rack_do_segment_nounlock()
16375 log.u_bbr.flex3 = m->m_flags; in rack_do_segment_nounlock()
16376 log.u_bbr.flex4 = rack->r_ctl.rc_hpts_flags; in rack_do_segment_nounlock()
16383 if (m->m_flags & M_TSTMP) { in rack_do_segment_nounlock()
16389 } else if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment_nounlock()
16398 log.u_bbr.delRate = m->m_pkthdr.rcv_tstmp; in rack_do_segment_nounlock()
16400 log.u_bbr.applimited = tp->t_tcpreq_closed; in rack_do_segment_nounlock()
16402 log.u_bbr.applimited |= tp->t_tcpreq_open; in rack_do_segment_nounlock()
16404 log.u_bbr.applimited |= tp->t_tcpreq_req; in rack_do_segment_nounlock()
16408 log.u_bbr.pkt_epoch = (tcp_req->localtime / HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16410 log.u_bbr.delivered = (tcp_req->localtime % HPTS_USEC_IN_SEC); in rack_do_segment_nounlock()
16411 log.u_bbr.rttProp = tcp_req->timestamp; in rack_do_segment_nounlock()
16412 log.u_bbr.cur_del_rate = tcp_req->start; in rack_do_segment_nounlock()
16413 if (tcp_req->flags & TCP_TRK_TRACK_FLG_OPEN) { in rack_do_segment_nounlock()
16417 log.u_bbr.bw_inuse = tcp_req->end; in rack_do_segment_nounlock()
16419 log.u_bbr.flex6 = tcp_req->start_seq; in rack_do_segment_nounlock()
16420 if (tcp_req->flags & TCP_TRK_TRACK_FLG_COMP) { in rack_do_segment_nounlock()
16422 log.u_bbr.epoch = tcp_req->end_seq; in rack_do_segment_nounlock()
16426 TCP_LOG_EVENTP(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_IN, 0, in rack_do_segment_nounlock()
16431 rack->rc_ack_required = 0; in rack_do_segment_nounlock()
16440 * If a segment with the ACK-bit set arrives in the SYN-SENT state in rack_do_segment_nounlock()
16443 if ((tp->t_state == TCPS_SYN_SENT) && (thflags & TH_ACK) && in rack_do_segment_nounlock()
16444 (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { in rack_do_segment_nounlock()
16459 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS) && in rack_do_segment_nounlock()
16467 * Segment received on connection. Reset idle time and keep-alive in rack_do_segment_nounlock()
16471 if (tp->t_idle_reduce && in rack_do_segment_nounlock()
16472 (tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16473 (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) { in rack_do_segment_nounlock()
16477 tp->t_rcvtime = ticks; in rack_do_segment_nounlock()
16479 stats_voi_update_abs_ulong(tp->t_stats, VOI_TCP_FRWIN, tiwin); in rack_do_segment_nounlock()
16481 if (tiwin > rack->r_ctl.rc_high_rwnd) in rack_do_segment_nounlock()
16482 rack->r_ctl.rc_high_rwnd = tiwin; in rack_do_segment_nounlock()
16488 tcp_packets_this_ack(tp, th->th_ack), in rack_do_segment_nounlock()
16490 rack_cong_signal(tp, CC_ECN, th->th_ack, __LINE__); in rack_do_segment_nounlock()
16498 to.to_tsecr -= tp->ts_offset; in rack_do_segment_nounlock()
16502 if ((rack->r_rcvpath_rtt_up == 1) && in rack_do_segment_nounlock()
16504 (TSTMP_GEQ(to.to_tsecr, rack->r_ctl.last_rcv_tstmp_for_rtt))) { in rack_do_segment_nounlock()
16515 if (TSTMP_GT(cts, rack->r_ctl.last_time_of_arm_rcv)) in rack_do_segment_nounlock()
16516 rtt = (cts - rack->r_ctl.last_time_of_arm_rcv); in rack_do_segment_nounlock()
16517 rack->r_rcvpath_rtt_up = 0; in rack_do_segment_nounlock()
16528 if (rack->r_state == 0) { in rack_do_segment_nounlock()
16530 KASSERT(rack->rc_inp != NULL, in rack_do_segment_nounlock()
16531 ("%s: rack->rc_inp unexpectedly NULL", __func__)); in rack_do_segment_nounlock()
16532 if (rack->rc_inp == NULL) { in rack_do_segment_nounlock()
16533 rack->rc_inp = inp; in rack_do_segment_nounlock()
16543 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { in rack_do_segment_nounlock()
16547 (tp->t_flags & TF_REQ_SCALE)) { in rack_do_segment_nounlock()
16548 tp->t_flags |= TF_RCVD_SCALE; in rack_do_segment_nounlock()
16549 tp->snd_scale = to.to_wscale; in rack_do_segment_nounlock()
16551 tp->t_flags &= ~TF_REQ_SCALE; in rack_do_segment_nounlock()
16556 tp->snd_wnd = th->th_win; in rack_do_segment_nounlock()
16559 (tp->t_flags & TF_REQ_TSTMP)) { in rack_do_segment_nounlock()
16560 tp->t_flags |= TF_RCVD_TSTMP; in rack_do_segment_nounlock()
16561 tp->ts_recent = to.to_tsval; in rack_do_segment_nounlock()
16562 tp->ts_recent_age = cts; in rack_do_segment_nounlock()
16564 tp->t_flags &= ~TF_REQ_TSTMP; in rack_do_segment_nounlock()
16568 if ((tp->t_flags & TF_SACK_PERMIT) && in rack_do_segment_nounlock()
16570 tp->t_flags &= ~TF_SACK_PERMIT; in rack_do_segment_nounlock()
16571 if (tp->t_flags & TF_FASTOPEN) { in rack_do_segment_nounlock()
16578 if ((inp->inp_vflag & INP_IPV6) != 0) in rack_do_segment_nounlock()
16591 * TF_SACK_PERMIT is set and the sack-not-required is clear. in rack_do_segment_nounlock()
16592 * The code now does do dup-ack counting so if you don't in rack_do_segment_nounlock()
16598 ((tp->t_flags & TF_SACK_PERMIT) == 0)) { in rack_do_segment_nounlock()
16600 (*tp->t_fb->tfb_tcp_do_segment)(tp, m, th, drop_hdrlen, in rack_do_segment_nounlock()
16608 sack_filter_clear(&rack->r_ctl.rack_sf, th->th_ack); in rack_do_segment_nounlock()
16612 us_cts = tcp_tv_to_usec(&rack->r_ctl.act_rcv_time); in rack_do_segment_nounlock()
16613 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16614 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16615 (rack->rc_always_pace)) { in rack_do_segment_nounlock()
16620 if ((rack->forced_ack) && in rack_do_segment_nounlock()
16626 * always. All other times (timers etc) we must have a rack-state in rack_do_segment_nounlock()
16629 rack->r_ctl.rc_rcvtime = cts; in rack_do_segment_nounlock()
16630 if (rack->r_state != tp->t_state) in rack_do_segment_nounlock()
16632 if (SEQ_GT(th->th_ack, tp->snd_una) && in rack_do_segment_nounlock()
16633 (rsm = tqhash_min(rack->r_ctl.tqh)) != NULL) in rack_do_segment_nounlock()
16635 prev_state = rack->r_state; in rack_do_segment_nounlock()
16637 ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && in rack_do_segment_nounlock()
16638 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || in rack_do_segment_nounlock()
16639 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq))) { in rack_do_segment_nounlock()
16641 tcp_trace_point(rack->rc_tp, TCP_TP_RESET_RCV); in rack_do_segment_nounlock()
16643 retval = (*rack->r_substate) (m, th, so, in rack_do_segment_nounlock()
16652 if ((rack->rc_gp_dyn_mul) && in rack_do_segment_nounlock()
16653 (rack->rc_always_pace) && in rack_do_segment_nounlock()
16654 (rack->use_fixed_rate == 0) && in rack_do_segment_nounlock()
16655 rack->in_probe_rtt && in rack_do_segment_nounlock()
16656 (rack->r_ctl.rc_time_probertt_starts == 0)) { in rack_do_segment_nounlock()
16663 if (rack->set_pacing_done_a_iw == 0) { in rack_do_segment_nounlock()
16665 if ((tp->snd_una - tp->iss) > (ctf_fixed_maxseg(tp) * 10)) { in rack_do_segment_nounlock()
16667 rack->set_pacing_done_a_iw = 1; in rack_do_segment_nounlock()
16678 * use of 0xf here since we only have 11 counter (0 - 0xa) and in rack_do_segment_nounlock()
16686 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_do_segment_nounlock()
16687 tp->tcp_proc_time[ack_val_set] += (crtsc - ts_val); in rack_do_segment_nounlock()
16692 if ((rack->r_wanted_output != 0) || in rack_do_segment_nounlock()
16693 (tp->t_flags & TF_ACKNOW) || in rack_do_segment_nounlock()
16694 (rack->r_fast_output != 0)) { in rack_do_segment_nounlock()
16707 } else if ((nxt_pkt == 0) && (tp->t_flags & TF_ACKNOW)) { in rack_do_segment_nounlock()
16711 (tcp_in_hpts(rack->rc_tp) == 0)) { in rack_do_segment_nounlock()
16721 if ((nxt_pkt == 0) && (tp->t_flags2 & TF2_HPTS_CALLS)) in rack_do_segment_nounlock()
16722 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_do_segment_nounlock()
16733 if (SEQ_GEQ(tp->snd_una, rack->r_ctl.roundends) && in rack_do_segment_nounlock()
16734 (rack->rc_new_rnd_needed == 0) && in rack_do_segment_nounlock()
16740 rack_new_round_setup(tp, rack, tp->snd_una); in rack_do_segment_nounlock()
16743 ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) == 0) && in rack_do_segment_nounlock()
16744 (SEQ_GT(tp->snd_max, tp->snd_una) || in rack_do_segment_nounlock()
16745 (tp->t_flags & TF_DELACK) || in rack_do_segment_nounlock()
16746 ((V_tcp_always_keepalive || rack->rc_inp->inp_socket->so_options & SO_KEEPALIVE) && in rack_do_segment_nounlock()
16747 (tp->t_state <= TCPS_CLOSING)))) { in rack_do_segment_nounlock()
16749 if ((tp->snd_max == tp->snd_una) && in rack_do_segment_nounlock()
16750 ((tp->t_flags & TF_DELACK) == 0) && in rack_do_segment_nounlock()
16751 (tcp_in_hpts(rack->rc_tp)) && in rack_do_segment_nounlock()
16752 (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT)) { in rack_do_segment_nounlock()
16758 if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) { in rack_do_segment_nounlock()
16760 if (TSTMP_GT(rack->r_ctl.rc_last_output_to, us_cts)) { in rack_do_segment_nounlock()
16761 rack->r_early = 1; in rack_do_segment_nounlock()
16762 rack->r_ctl.rc_agg_early += (rack->r_ctl.rc_last_output_to - us_cts); in rack_do_segment_nounlock()
16765 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_do_segment_nounlock()
16782 rack_timer_audit(tp, rack, &so->so_snd); in rack_do_segment_nounlock()
16788 rack->r_wanted_output = 0; in rack_do_segment_nounlock()
16804 if (!STAILQ_EMPTY(&tp->t_inqueue)) { in rack_do_segment()
16810 if (m->m_flags & M_TSTMP_LRO) { in rack_do_segment()
16829 /* Return the next guy to be re-transmitted */ in tcp_rack_output()
16830 if (tqhash_empty(rack->r_ctl.tqh)) { in tcp_rack_output()
16833 if (tp->t_flags & TF_SENTFIN) { in tcp_rack_output()
16838 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in tcp_rack_output()
16839 if (rack->r_must_retran && rsm && (rsm->r_flags & RACK_MUST_RXT)) { in tcp_rack_output()
16842 if (rsm && ((rsm->r_flags & RACK_ACKED) == 0)) { in tcp_rack_output()
16850 if (((rack->rc_tp->t_flags & TF_SACK_PERMIT) == 0) && in tcp_rack_output()
16851 (rsm->r_dupack >= DUP_ACK_THRESHOLD)) { in tcp_rack_output()
16858 if (rsm->r_flags & RACK_ACKED) { in tcp_rack_output()
16861 if (((rsm->r_flags & RACK_SACK_PASSED) == 0) && in tcp_rack_output()
16862 (rsm->r_dupack < DUP_ACK_THRESHOLD)) { in tcp_rack_output()
16867 idx = rsm->r_rtr_cnt - 1; in tcp_rack_output()
16868 ts_low = (uint32_t)rsm->r_tim_lastsent[idx]; in tcp_rack_output()
16875 if ((tsused - ts_low) < thresh) { in tcp_rack_output()
16879 if ((rsm->r_dupack >= DUP_ACK_THRESHOLD) || in tcp_rack_output()
16880 ((rsm->r_flags & RACK_SACK_PASSED))) { in tcp_rack_output()
16882 * We have passed the dup-ack threshold <or> in tcp_rack_output()
16885 * it is only the dup-ack threshold that in tcp_rack_output()
16889 rack_log_retran_reason(rack, rsm, (tsused - ts_low), thresh, 1); in tcp_rack_output()
16890 rack->r_fast_output = 0; in tcp_rack_output()
16901 if (tcp_bblogging_on(rack->rc_tp)) { in rack_log_pacing_delay_calc()
16922 log.u_bbr.flex3 = rack->r_ctl.rc_pace_min_segs; in rack_log_pacing_delay_calc()
16923 log.u_bbr.flex4 = rack->r_ctl.rc_pace_max_segs; in rack_log_pacing_delay_calc()
16924 log.u_bbr.flex5 = rack->r_ctl.rack_per_of_gp_ss; in rack_log_pacing_delay_calc()
16925 log.u_bbr.flex6 = rack->r_ctl.rack_per_of_gp_ca; in rack_log_pacing_delay_calc()
16926 log.u_bbr.use_lt_bw = rack->rc_ack_can_sendout_data; in rack_log_pacing_delay_calc()
16928 log.u_bbr.use_lt_bw |= rack->r_late; in rack_log_pacing_delay_calc()
16930 log.u_bbr.use_lt_bw |= rack->r_early; in rack_log_pacing_delay_calc()
16932 log.u_bbr.use_lt_bw |= rack->app_limited_needs_set; in rack_log_pacing_delay_calc()
16934 log.u_bbr.use_lt_bw |= rack->rc_gp_filled; in rack_log_pacing_delay_calc()
16936 log.u_bbr.use_lt_bw |= rack->measure_saw_probe_rtt; in rack_log_pacing_delay_calc()
16938 log.u_bbr.use_lt_bw |= rack->in_probe_rtt; in rack_log_pacing_delay_calc()
16940 log.u_bbr.use_lt_bw |= rack->gp_ready; in rack_log_pacing_delay_calc()
16942 log.u_bbr.epoch = rack->r_ctl.rc_agg_delayed; in rack_log_pacing_delay_calc()
16943 log.u_bbr.lt_epoch = rack->r_ctl.rc_agg_early; in rack_log_pacing_delay_calc()
16944 log.u_bbr.applimited = rack->r_ctl.rack_per_of_gp_rec; in rack_log_pacing_delay_calc()
16947 if (rack->r_ctl.gp_bw == 0) in rack_log_pacing_delay_calc()
16952 log.u_bbr.pkts_out = rack->r_ctl.rc_rack_min_rtt; in rack_log_pacing_delay_calc()
16953 log.u_bbr.lost = rack->r_ctl.rc_probertt_sndmax_atexit; in rack_log_pacing_delay_calc()
16955 if (rack->r_ctl.cwnd_to_use < rack->rc_tp->snd_ssthresh) { in rack_log_pacing_delay_calc()
16964 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_pacing_delay_calc()
16965 log.u_bbr.cwnd_gain = rack->rc_gp_saw_rec; in rack_log_pacing_delay_calc()
16967 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ss; in rack_log_pacing_delay_calc()
16969 log.u_bbr.cwnd_gain |= rack->rc_gp_saw_ca; in rack_log_pacing_delay_calc()
16971 log.u_bbr.cwnd_gain |= rack->use_fixed_rate; in rack_log_pacing_delay_calc()
16973 log.u_bbr.cwnd_gain |= rack->rc_always_pace; in rack_log_pacing_delay_calc()
16975 log.u_bbr.cwnd_gain |= rack->gp_ready; in rack_log_pacing_delay_calc()
16977 log.u_bbr.bbr_state = rack->dgp_on; in rack_log_pacing_delay_calc()
16979 log.u_bbr.bbr_state |= rack->rc_pace_to_cwnd; in rack_log_pacing_delay_calc()
16981 TCP_LOG_EVENTP(rack->rc_tp, NULL, in rack_log_pacing_delay_calc()
16982 &rack->rc_inp->inp_socket->so_rcv, in rack_log_pacing_delay_calc()
16983 &rack->rc_inp->inp_socket->so_snd, in rack_log_pacing_delay_calc()
16994 user_max = rack->rc_user_set_max_segs * mss; in rack_get_pacing_len()
16995 if (rack->rc_force_max_seg) { in rack_get_pacing_len()
16998 if (rack->use_fixed_rate && in rack_get_pacing_len()
16999 ((rack->r_ctl.crte == NULL) || in rack_get_pacing_len()
17000 (bw != rack->r_ctl.crte->rate))) { in rack_get_pacing_len()
17005 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_len()
17010 new_tso = tcp_get_pacing_burst_size_w_divisor(rack->rc_tp, bw, mss, in rack_get_pacing_len()
17011 pace_one, rack->r_ctl.crte, NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_len()
17014 if (rack->rc_hybrid_mode && rack->r_ctl.client_suggested_maxseg) { in rack_get_pacing_len()
17015 if (((uint32_t)rack->r_ctl.client_suggested_maxseg * mss) > new_tso) in rack_get_pacing_len()
17016 new_tso = (uint32_t)rack->r_ctl.client_suggested_maxseg * mss; in rack_get_pacing_len()
17018 if (rack->r_ctl.rc_user_set_min_segs && in rack_get_pacing_len()
17019 ((rack->r_ctl.rc_user_set_min_segs * mss) > new_tso)) in rack_get_pacing_len()
17020 new_tso = rack->r_ctl.rc_user_set_min_segs * mss; in rack_get_pacing_len()
17033 * nearly zero, maybe because of a time-out? in rack_arrive_at_discounted_rate()
17034 * Lets drop back to the lt-bw. in rack_arrive_at_discounted_rate()
17040 } else if (IN_RECOVERY(rack->rc_tp->t_flags)) { in rack_arrive_at_discounted_rate()
17045 if (rack->rack_hibeta == 0) { in rack_arrive_at_discounted_rate()
17049 reduced_win = window_input * rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17051 gain = rack->r_ctl.saved_hibeta; in rack_arrive_at_discounted_rate()
17083 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17084 if (ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked) > rack->r_ctl.cwnd_to_use) in pace_to_fill_cwnd()
17086 if ((ctf_outstanding(rack->rc_tp) + (segsiz-1)) > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17088 if (rack->r_ctl.rc_last_us_rtt == 0) in pace_to_fill_cwnd()
17090 if (rack->rc_pace_fill_if_rttin_range && in pace_to_fill_cwnd()
17091 (rack->r_ctl.rc_last_us_rtt >= in pace_to_fill_cwnd()
17092 (get_filter_value_small(&rack->r_ctl.rc_gp_min_rtt) * rack->rtt_limit_mul))) { in pace_to_fill_cwnd()
17096 if (rack->r_ctl.fillcw_cap && *rate_wanted >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17099 * first lets calculate the b/w based on the last us-rtt in pace_to_fill_cwnd()
17102 fill_bw = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in pace_to_fill_cwnd()
17103 if (rack->rc_fillcw_apply_discount) { in pace_to_fill_cwnd()
17112 if (fill_bw > rack->rc_tp->snd_wnd) in pace_to_fill_cwnd()
17113 fill_bw = rack->rc_tp->snd_wnd; in pace_to_fill_cwnd()
17116 fill_bw /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in pace_to_fill_cwnd()
17118 if (rack->r_ctl.fillcw_cap && fill_bw >= rack->r_ctl.fillcw_cap) in pace_to_fill_cwnd()
17119 fill_bw = rack->r_ctl.fillcw_cap; in pace_to_fill_cwnd()
17124 * We want to limit fill-cw to the some multiplier in pace_to_fill_cwnd()
17138 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in pace_to_fill_cwnd()
17151 tcp_log_event(rack->rc_tp, NULL, NULL, NULL, in pace_to_fill_cwnd()
17164 rack->r_via_fill_cw = 1; in pace_to_fill_cwnd()
17165 if (rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17166 (rack->r_ctl.crte != NULL)) { in pace_to_fill_cwnd()
17169 high_rate = tcp_hw_highest_rate(rack->r_ctl.crte); in pace_to_fill_cwnd()
17174 rack->r_via_fill_cw = 0; in pace_to_fill_cwnd()
17183 } else if ((rack->r_ctl.crte == NULL) && in pace_to_fill_cwnd()
17184 (rack->rack_hdrw_pacing == 0) && in pace_to_fill_cwnd()
17185 (rack->rack_hdw_pace_ena) && in pace_to_fill_cwnd()
17186 rack->r_rack_hw_rate_caps && in pace_to_fill_cwnd()
17187 (rack->rack_attempt_hdwr_pace == 0) && in pace_to_fill_cwnd()
17188 (rack->rc_inp->inp_route.ro_nh != NULL) && in pace_to_fill_cwnd()
17189 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in pace_to_fill_cwnd()
17196 high_rate = tcp_hw_highest_rate_ifp(rack->rc_inp->inp_route.ro_nh->nh_ifp, rack->rc_inp); in pace_to_fill_cwnd()
17205 if (rack->r_ctl.bw_rate_cap && (fill_bw > rack->r_ctl.bw_rate_cap)) { in pace_to_fill_cwnd()
17206 rack_log_hybrid_bw(rack, rack->rc_tp->snd_max, in pace_to_fill_cwnd()
17208 fill_bw = rack->r_ctl.bw_rate_cap; in pace_to_fill_cwnd()
17236 (rack->r_ctl.rc_user_set_min_segs == 1)) in rack_get_pacing_delay()
17240 if (rack->rc_always_pace == 0) { in rack_get_pacing_delay()
17256 if (rack->r_ctl.rc_rack_min_rtt) in rack_get_pacing_delay()
17257 srtt = rack->r_ctl.rc_rack_min_rtt; in rack_get_pacing_delay()
17259 srtt = max(tp->t_srtt, 1); in rack_get_pacing_delay()
17260 if (rack->r_ctl.rc_rack_largest_cwnd) in rack_get_pacing_delay()
17261 cwnd = rack->r_ctl.rc_rack_largest_cwnd; in rack_get_pacing_delay()
17263 cwnd = rack->r_ctl.cwnd_to_use; in rack_get_pacing_delay()
17283 pacing_delay -= reduce; in rack_get_pacing_delay()
17289 if (rack->rc_pace_to_cwnd) { in rack_get_pacing_delay()
17293 rack->rc_ack_can_sendout_data = 1; in rack_get_pacing_delay()
17298 /* RRS: We insert non-paced call to stats here for len */ in rack_get_pacing_delay()
17306 if ((rack->r_rr_config == 1) && rsm) { in rack_get_pacing_delay()
17307 return (rack->r_ctl.rc_min_to); in rack_get_pacing_delay()
17309 if (rack->use_fixed_rate) { in rack_get_pacing_delay()
17311 } else if ((rack->r_ctl.init_rate == 0) && in rack_get_pacing_delay()
17312 (rack->r_ctl.gp_bw == 0)) { in rack_get_pacing_delay()
17315 } else if (rack->dgp_on) { in rack_get_pacing_delay()
17321 rate_wanted = min(rack->rc_tp->snd_cwnd, rack->r_ctl.cwnd_to_use); in rack_get_pacing_delay()
17324 if (rate_wanted > rack->rc_tp->snd_wnd) in rack_get_pacing_delay()
17325 rate_wanted = rack->rc_tp->snd_wnd; in rack_get_pacing_delay()
17328 rate_wanted /= (uint64_t)rack->r_ctl.rc_last_us_rtt; in rack_get_pacing_delay()
17331 rack_log_pacing_delay_calc(rack, rack->rc_tp->snd_cwnd, in rack_get_pacing_delay()
17332 rack->r_ctl.cwnd_to_use, in rack_get_pacing_delay()
17334 rack->r_ctl.rc_last_us_rtt, in rack_get_pacing_delay()
17337 if (((bw_est == 0) || (rate_wanted == 0) || (rack->gp_ready == 0)) && in rack_get_pacing_delay()
17338 (rack->use_fixed_rate == 0)) { in rack_get_pacing_delay()
17347 segs = (len + segsiz - 1) / segsiz; in rack_get_pacing_delay()
17349 * We need the diff between 1514 bytes (e-mtu with e-hdr) in rack_get_pacing_delay()
17355 oh = (tp->t_maxseg - segsiz) + sizeof(struct tcphdr); in rack_get_pacing_delay()
17356 if (rack->r_is_v6) { in rack_get_pacing_delay()
17365 /* We add a fixed 14 for the ethernet header */ in rack_get_pacing_delay()
17374 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17379 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17380 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17381 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17382 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17385 if (rack->r_ctl.crte && in rack_get_pacing_delay()
17386 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17392 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17399 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17400 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17401 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17402 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17405 if ((rack->r_ctl.crte != NULL) && (rack->rc_inp->inp_snd_tag == NULL)) { in rack_get_pacing_delay()
17410 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17411 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17412 /* Lets re-allow attempting to setup pacing */ in rack_get_pacing_delay()
17413 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17414 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17419 prev_fill = rack->r_via_fill_cw; in rack_get_pacing_delay()
17420 if ((rack->rc_pace_to_cwnd) && in rack_get_pacing_delay()
17422 (rack->dgp_on == 1) && in rack_get_pacing_delay()
17423 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17424 (rack->in_probe_rtt == 0) && in rack_get_pacing_delay()
17425 (IN_FASTRECOVERY(rack->rc_tp->t_flags) == 0)) { in rack_get_pacing_delay()
17431 /* Re-check to make sure we are not exceeding our max b/w */ in rack_get_pacing_delay()
17432 if ((rack->r_ctl.crte != NULL) && in rack_get_pacing_delay()
17433 (tcp_hw_highest_rate(rack->r_ctl.crte) < rate_wanted)) { in rack_get_pacing_delay()
17439 if (rack->r_rack_hw_rate_caps == 0) { in rack_get_pacing_delay()
17446 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17447 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17448 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17449 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17450 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_get_pacing_delay()
17454 if ((rack->rc_inp->inp_route.ro_nh != NULL) && in rack_get_pacing_delay()
17455 (rack->rc_inp->inp_route.ro_nh->nh_ifp != NULL)) { in rack_get_pacing_delay()
17456 if ((rack->rack_hdw_pace_ena) && in rack_get_pacing_delay()
17458 (rack->rack_hdrw_pacing == 0) && in rack_get_pacing_delay()
17459 (rack->rack_attempt_hdwr_pace == 0)) { in rack_get_pacing_delay()
17464 rack->rack_attempt_hdwr_pace = 1; in rack_get_pacing_delay()
17465 rack->r_ctl.crte = tcp_set_pacing_rate(rack->rc_tp, in rack_get_pacing_delay()
17466 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17469 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17470 if (rack->r_ctl.crte) { in rack_get_pacing_delay()
17471 rack->rack_hdrw_pacing = 1; in rack_get_pacing_delay()
17472 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, segsiz, in rack_get_pacing_delay()
17473 pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17474 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17476 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17478 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17482 } else if (rack->rack_hdrw_pacing && in rack_get_pacing_delay()
17483 (rack->r_ctl.last_hw_bw_req != rate_wanted)) { in rack_get_pacing_delay()
17487 if (rack->r_up_only && in rack_get_pacing_delay()
17488 (rate_wanted < rack->r_ctl.crte->rate)) { in rack_get_pacing_delay()
17493 * previous | this-time in rack_get_pacing_delay()
17494 * A) 0 | 0 -- fill_cw not in the picture in rack_get_pacing_delay()
17495 * B) 1 | 0 -- we were doing a fill-cw but now are not in rack_get_pacing_delay()
17496 * C) 1 | 1 -- all rates from fill_cw in rack_get_pacing_delay()
17497 * D) 0 | 1 -- we were doing non-fill and now we are filling in rack_get_pacing_delay()
17504 if (!((prev_fill == 1) && (rack->r_via_fill_cw == 0))) in rack_get_pacing_delay()
17507 if ((rate_wanted > rack->r_ctl.crte->rate) || in rack_get_pacing_delay()
17508 (rate_wanted <= rack->r_ctl.crte_prev_rate)) { in rack_get_pacing_delay()
17516 bw_est, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17518 tcp_rel_pacing_rate(rack->r_ctl.crte, rack->rc_tp); in rack_get_pacing_delay()
17519 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17520 rack->rack_attempt_hdwr_pace = 0; in rack_get_pacing_delay()
17521 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17522 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17525 nrte = tcp_chg_pacing_rate(rack->r_ctl.crte, in rack_get_pacing_delay()
17526 rack->rc_tp, in rack_get_pacing_delay()
17527 rack->rc_inp->inp_route.ro_nh->nh_ifp, in rack_get_pacing_delay()
17530 &err, &rack->r_ctl.crte_prev_rate); in rack_get_pacing_delay()
17536 rack->rack_hdrw_pacing = 0; in rack_get_pacing_delay()
17537 rack->r_ctl.crte = NULL; in rack_get_pacing_delay()
17541 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17543 } else if (nrte != rack->r_ctl.crte) { in rack_get_pacing_delay()
17544 rack->r_ctl.crte = nrte; in rack_get_pacing_delay()
17545 rack->r_ctl.rc_pace_max_segs = tcp_get_pacing_burst_size_w_divisor(tp, rate_wanted, in rack_get_pacing_delay()
17546 segsiz, pace_one, rack->r_ctl.crte, in rack_get_pacing_delay()
17547 NULL, rack->r_ctl.pace_len_divisor); in rack_get_pacing_delay()
17549 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17551 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17555 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, &rate_wanted); in rack_get_pacing_delay()
17557 rate_wanted, rack->r_ctl.crte->rate, __LINE__, in rack_get_pacing_delay()
17559 rack->r_ctl.last_hw_bw_req = rate_wanted; in rack_get_pacing_delay()
17565 (rack->use_fixed_rate == 0) && in rack_get_pacing_delay()
17566 (rack->rack_hdrw_pacing == 0)) { in rack_get_pacing_delay()
17575 * is set, we are doing fixed pacing or hardware pacing. in rack_get_pacing_delay()
17577 if (rack->rc_tp->t_srtt) in rack_get_pacing_delay()
17578 srtt = rack->rc_tp->t_srtt; in rack_get_pacing_delay()
17591 if (rack->r_ctl.crte && (rack->r_ctl.crte->rs_num_enobufs > 0)) { in rack_get_pacing_delay()
17595 * of gas or we are mis-estimating the time in rack_get_pacing_delay()
17601 hw_boost_delay = rack->r_ctl.crte->time_between * rack_enobuf_hw_boost_mult; in rack_get_pacing_delay()
17617 if (tp->t_state < TCPS_ESTABLISHED) { in rack_start_gp_measurement()
17624 if (tp->t_state >= TCPS_FIN_WAIT_1) { in rack_start_gp_measurement()
17631 if (sbavail(&tptosocket(tp)->so_snd) < in rack_start_gp_measurement()
17638 tp->t_flags |= TF_GPUTINPROG; in rack_start_gp_measurement()
17639 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17640 rack->r_ctl.rc_gp_lowrtt = 0xffffffff; in rack_start_gp_measurement()
17641 rack->r_ctl.rc_gp_high_rwnd = rack->rc_tp->snd_wnd; in rack_start_gp_measurement()
17642 tp->gput_seq = startseq; in rack_start_gp_measurement()
17643 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17644 if (rack->in_probe_rtt) in rack_start_gp_measurement()
17645 rack->measure_saw_probe_rtt = 1; in rack_start_gp_measurement()
17646 else if ((rack->measure_saw_probe_rtt) && in rack_start_gp_measurement()
17647 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.rc_probertt_sndmax_atexit))) in rack_start_gp_measurement()
17648 rack->measure_saw_probe_rtt = 0; in rack_start_gp_measurement()
17649 if (rack->rc_gp_filled) in rack_start_gp_measurement()
17650 tp->gput_ts = rack->r_ctl.last_cumack_advance; in rack_start_gp_measurement()
17655 tp->gput_ts = tcp_get_usecs(&tv); in rack_start_gp_measurement()
17656 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17662 * initial-windows worth of data to in rack_start_gp_measurement()
17666 if ((rack->rc_gp_filled == 0) && (rack->r_ctl.init_rate == 0)) { in rack_start_gp_measurement()
17667 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17668 tp->gput_ack = startseq + max(rc_init_window(rack), in rack_start_gp_measurement()
17671 tp->gput_seq, in rack_start_gp_measurement()
17672 tp->gput_ack, in rack_start_gp_measurement()
17674 tp->gput_ts, in rack_start_gp_measurement()
17675 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17679 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17688 if (rack->r_ctl.rc_app_limited_cnt == 0) { in rack_start_gp_measurement()
17691 * the tp->gput_ts is correctly set based on in rack_start_gp_measurement()
17695 my_rsm = tqhash_min(rack->r_ctl.tqh); in rack_start_gp_measurement()
17697 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17702 if (rack->r_ctl.rc_first_appl == NULL) { in rack_start_gp_measurement()
17717 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17721 * after that (after the app-limited). in rack_start_gp_measurement()
17723 my_rsm = tqhash_next(rack->r_ctl.tqh, rack->r_ctl.rc_first_appl); in rack_start_gp_measurement()
17725 if ((my_rsm->r_end - my_rsm->r_start) <= ctf_fixed_maxseg(tp)) in rack_start_gp_measurement()
17727 my_rsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17730 tp->gput_seq = my_rsm->r_start + ctf_fixed_maxseg(tp); in rack_start_gp_measurement()
17735 (my_rsm->r_rtr_cnt != 1)) { in rack_start_gp_measurement()
17738 * the last is the app-limited one. in rack_start_gp_measurement()
17743 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
17745 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
17751 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
17752 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17757 tp->gput_seq = my_rsm->r_end; in rack_start_gp_measurement()
17762 nrsm = tqhash_next(rack->r_ctl.tqh, my_rsm); in rack_start_gp_measurement()
17773 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
17774 tp->gput_ack = tp->gput_seq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
17775 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17776 if ((rack->r_ctl.cleared_app_ack == 1) && in rack_start_gp_measurement()
17777 (SEQ_GEQ(tp->gput_seq, rack->r_ctl.cleared_app_ack_seq))) { in rack_start_gp_measurement()
17783 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17784 rack->r_ctl.cleared_app_ack = 0; in rack_start_gp_measurement()
17787 tp->gput_seq, in rack_start_gp_measurement()
17788 tp->gput_ack, in rack_start_gp_measurement()
17790 tp->gput_ts, in rack_start_gp_measurement()
17791 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17796 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17803 * idle or if this is the first-send. Lets in rack_start_gp_measurement()
17808 rack->app_limited_needs_set = 1; in rack_start_gp_measurement()
17809 tp->gput_ack = startseq + rack_get_measure_window(tp, rack); in rack_start_gp_measurement()
17810 rack->r_ctl.rc_gp_cumack_ts = 0; in rack_start_gp_measurement()
17812 my_rsm = tqhash_find(rack->r_ctl.tqh, startseq); in rack_start_gp_measurement()
17814 rack->r_ctl.rc_gp_output_ts = my_rsm->r_tim_lastsent[0]; in rack_start_gp_measurement()
17815 if (my_rsm->r_flags & RACK_ACKED) { in rack_start_gp_measurement()
17820 tp->gput_ts = (uint32_t)my_rsm->r_ack_arrival; in rack_start_gp_measurement()
17821 rack->app_limited_needs_set = 0; in rack_start_gp_measurement()
17823 if (SEQ_LT(my_rsm->r_start, tp->gput_seq)) { in rack_start_gp_measurement()
17825 tp->gput_seq = my_rsm->r_start; in rack_start_gp_measurement()
17829 * TSNH unless we have some send-map limit, in rack_start_gp_measurement()
17836 rack->r_ctl.rc_gp_output_ts = rack_to_usec_ts(&tv); in rack_start_gp_measurement()
17840 tp->gput_seq, in rack_start_gp_measurement()
17841 tp->gput_ack, in rack_start_gp_measurement()
17843 tp->gput_ts, in rack_start_gp_measurement()
17844 (((uint64_t)rack->r_ctl.rc_app_limited_cnt << 32) | (uint64_t)rack->r_ctl.rc_gp_output_ts), in rack_start_gp_measurement()
17846 rack_log_gpset(rack, tp->gput_ack, 0, 0, __LINE__, 1, NULL); in rack_start_gp_measurement()
17856 if (tp->snd_wnd > cwnd_to_use) in rack_what_can_we_send()
17859 sendwin = tp->snd_wnd; in rack_what_can_we_send()
17860 if (ctf_outstanding(tp) >= tp->snd_wnd) { in rack_what_can_we_send()
17861 /* We never want to go over our peers rcv-window */ in rack_what_can_we_send()
17866 flight = ctf_flight_size(tp, rack->r_ctl.rc_sacked); in rack_what_can_we_send()
17871 * >= tp->snd_wnd). in rack_what_can_we_send()
17875 len = sendwin - flight; in rack_what_can_we_send()
17876 if ((len + ctf_outstanding(tp)) > tp->snd_wnd) { in rack_what_can_we_send()
17878 len = tp->snd_wnd - ctf_outstanding(tp); in rack_what_can_we_send()
17885 len = avail - sb_offset; in rack_what_can_we_send()
17896 if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) { in rack_log_fsb()
17901 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_fsb()
17906 log.u_bbr.flex5 = tp->rcv_numsacks; in rack_log_fsb()
17907 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_log_fsb()
17909 log.u_bbr.flex8 = rack->r_fsb_inited; in rack_log_fsb()
17910 log.u_bbr.applimited = rack->r_fast_output; in rack_log_fsb()
17918 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_fsb()
17919 tcp_log_event(tp, NULL, &so->so_rcv, &so->so_snd, TCP_LOG_FSB, 0, in rack_log_fsb()
17949 if (hw_tls && (m->m_flags & M_EXTPG)) in rack_fo_base_copym()
17950 tls = m->m_epg_tls; in rack_fo_base_copym()
17964 if (m->m_flags & M_EXTPG) in rack_fo_base_copym()
17965 ntls = m->m_epg_tls; in rack_fo_base_copym()
17981 mlen = min(len, m->m_len - off); in rack_fo_base_copym()
17991 if (m->m_flags & M_EXTPG) { in rack_fo_base_copym()
18012 mlen = (seglimit - frags - 1) * fragsize; in rack_fo_base_copym()
18019 seglimit -= frags; in rack_fo_base_copym()
18023 n = m_get(M_NOWAIT, m->m_type); in rack_fo_base_copym()
18027 n->m_len = mlen; in rack_fo_base_copym()
18029 len_cp += n->m_len; in rack_fo_base_copym()
18030 if (m->m_flags & (M_EXT | M_EXTPG)) { in rack_fo_base_copym()
18031 n->m_data = m->m_data + off; in rack_fo_base_copym()
18035 (u_int)n->m_len); in rack_fo_base_copym()
18037 len -= n->m_len; in rack_fo_base_copym()
18039 m = m->m_next; in rack_fo_base_copym()
18040 np = &n->m_next; in rack_fo_base_copym()
18041 if (len || (soff == smb->m_len)) { in rack_fo_base_copym()
18053 fsb->m = smb; in rack_fo_base_copym()
18054 fsb->off = soff; in rack_fo_base_copym()
18062 fsb->o_m_len = smb->m_len; in rack_fo_base_copym()
18063 fsb->o_t_len = M_TRAILINGROOM(smb); in rack_fo_base_copym()
18073 fsb->o_m_len = 0; in rack_fo_base_copym()
18074 fsb->o_t_len = 0; in rack_fo_base_copym()
18096 m = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18097 if (M_TRAILINGROOM(m) != rack->r_ctl.fsb.o_t_len) { in rack_fo_m_copym()
18104 KASSERT((rack->r_ctl.fsb.o_t_len > M_TRAILINGROOM(m)), in rack_fo_m_copym()
18109 rack->r_ctl.fsb.o_t_len, in rack_fo_m_copym()
18110 rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18111 m->m_len)); in rack_fo_m_copym()
18112 rack->r_ctl.fsb.o_m_len += (rack->r_ctl.fsb.o_t_len - M_TRAILINGROOM(m)); in rack_fo_m_copym()
18113 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(m); in rack_fo_m_copym()
18115 if (m->m_len < rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18120 KASSERT((rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len - m->m_len)), in rack_fo_m_copym()
18122 m, m->m_len, in rack_fo_m_copym()
18123 rack, rack->r_ctl.fsb.o_m_len, in rack_fo_m_copym()
18124 rack->r_ctl.fsb.off)); in rack_fo_m_copym()
18126 if (rack->r_ctl.fsb.off >= (rack->r_ctl.fsb.o_m_len- m->m_len)) in rack_fo_m_copym()
18127 rack->r_ctl.fsb.off -= (rack->r_ctl.fsb.o_m_len - m->m_len); in rack_fo_m_copym()
18129 rack->r_ctl.fsb.off = 0; in rack_fo_m_copym()
18130 rack->r_ctl.fsb.o_m_len = m->m_len; in rack_fo_m_copym()
18132 } else if (m->m_len > rack->r_ctl.fsb.o_m_len) { in rack_fo_m_copym()
18137 soff = rack->r_ctl.fsb.off; in rack_fo_m_copym()
18140 KASSERT(soff < m->m_len, ("%s rack:%p len:%u m:%p m->m_len:%u < off?", in rack_fo_m_copym()
18142 rack, *plen, m, m->m_len)); in rack_fo_m_copym()
18145 *s_mb = rack->r_ctl.fsb.m; in rack_fo_m_copym()
18147 &rack->r_ctl.fsb, in rack_fo_m_copym()
18148 seglimit, segsize, rack->r_ctl.fsb.hw_tls); in rack_fo_m_copym()
18162 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_log_queue_level()
18163 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_log_queue_level()
18166 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_log_queue_level()
18169 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_log_queue_level()
18170 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_log_queue_level()
18171 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_log_queue_level()
18175 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_log_queue_level()
18177 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_log_queue_level()
18193 err = in_pcbquery_txrlevel(rack->rc_inp, &p_queue); in rack_check_queue_level()
18199 err = in_pcbquery_txrtlmt(rack->rc_inp, &p_rate); in rack_check_queue_level()
18220 lentime = (rack->r_ctl.rc_pace_max_segs / segsiz); in rack_check_queue_level()
18225 /* TSNH -- KASSERT? */ in rack_check_queue_level()
18231 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_check_queue_level()
18234 log.u_bbr.flex4 = (uint32_t)rack->r_ctl.crte->using; in rack_check_queue_level()
18235 log.u_bbr.flex5 = (uint32_t)rack->r_ctl.crte->rs_num_enobufs; in rack_check_queue_level()
18236 log.u_bbr.flex6 = rack->r_ctl.crte->time_between; in rack_check_queue_level()
18240 log.u_bbr.delRate = rack->r_ctl.crte->rate; in rack_check_queue_level()
18243 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_check_queue_level()
18286 if (rack->r_is_v6) { in rack_fast_rsm_output()
18287 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18292 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_rsm_output()
18295 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_rsm_output()
18300 rsm->r_flags |= RACK_TLP; in rack_fast_rsm_output()
18303 rsm->r_flags &= ~RACK_TLP; in rack_fast_rsm_output()
18305 startseq = rsm->r_start; in rack_fast_rsm_output()
18306 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_rsm_output()
18307 inp = rack->rc_inp; in rack_fast_rsm_output()
18309 flags = tcp_outflags[tp->t_state]; in rack_fast_rsm_output()
18313 if (rsm->r_flags & RACK_HAS_FIN) { in rack_fast_rsm_output()
18321 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_rsm_output()
18322 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_rsm_output()
18323 to.to_tsecr = tp->ts_recent; in rack_fast_rsm_output()
18327 /* TCP-MD5 (RFC2385). */ in rack_fast_rsm_output()
18328 if (tp->t_flags & TF_SIGNATURE) in rack_fast_rsm_output()
18333 udp = rack->r_ctl.fsb.udp; in rack_fast_rsm_output()
18336 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_rsm_output()
18337 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18338 else if (rack->rc_user_set_max_segs) in rack_fast_rsm_output()
18339 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_rsm_output()
18342 if ((tp->t_flags & TF_TSO) && in rack_fast_rsm_output()
18345 (tp->t_port == 0)) in rack_fast_rsm_output()
18355 m->m_data += max_linkhdr; in rack_fast_rsm_output()
18356 m->m_len = hdrlen; in rack_fast_rsm_output()
18357 th = rack->r_ctl.fsb.th; in rack_fast_rsm_output()
18366 if_hw_tsomax = tp->t_tsomax; in rack_fast_rsm_output()
18367 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_rsm_output()
18368 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_rsm_output()
18375 max_len = (if_hw_tsomax - hdrlen - in rack_fast_rsm_output()
18397 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_rsm_output()
18400 th->th_seq = htonl(rsm->r_start); in rack_fast_rsm_output()
18401 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_rsm_output()
18409 if ((rsm->r_flags & RACK_HAD_PUSH) && in rack_fast_rsm_output()
18410 (len == (rsm->r_end - rsm->r_start))) in rack_fast_rsm_output()
18412 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_rsm_output()
18413 if (th->th_win == 0) { in rack_fast_rsm_output()
18414 tp->t_sndzerowin++; in rack_fast_rsm_output()
18415 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_rsm_output()
18417 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_rsm_output()
18418 if (rsm->r_flags & RACK_TLP) { in rack_fast_rsm_output()
18426 tp->t_sndrexmitpack++; in rack_fast_rsm_output()
18431 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_fast_rsm_output()
18434 if (rsm->m == NULL) in rack_fast_rsm_output()
18436 if (rsm->m && in rack_fast_rsm_output()
18437 ((rsm->orig_m_len != rsm->m->m_len) || in rack_fast_rsm_output()
18438 (M_TRAILINGROOM(rsm->m) != rsm->orig_t_space))) { in rack_fast_rsm_output()
18442 …m->m_next = rack_fo_base_copym(rsm->m, rsm->soff, &len, NULL, if_hw_tsomaxsegcount, if_hw_tsomaxse… in rack_fast_rsm_output()
18452 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_rsm_output()
18456 if (rack->r_is_v6) in rack_fast_rsm_output()
18457 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_rsm_output()
18459 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_rsm_output()
18460 udp->uh_ulen = htons(ulen); in rack_fast_rsm_output()
18462 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_rsm_output()
18463 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_rsm_output()
18464 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_rsm_output()
18466 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_rsm_output()
18467 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_rsm_output()
18468 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_rsm_output()
18470 if (rack->r_is_v6) { in rack_fast_rsm_output()
18471 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_rsm_output()
18472 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_rsm_output()
18477 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_rsm_output()
18478 ip->ip_tos |= ect; in rack_fast_rsm_output()
18481 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18489 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_rsm_output()
18499 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_rsm_output()
18509 if (rack->r_is_v6) { in rack_fast_rsm_output()
18510 if (tp->t_port) { in rack_fast_rsm_output()
18511 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_rsm_output()
18512 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18513 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_rsm_output()
18514 th->th_sum = htons(0); in rack_fast_rsm_output()
18517 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_rsm_output()
18518 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18519 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_rsm_output()
18530 if (tp->t_port) { in rack_fast_rsm_output()
18531 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_rsm_output()
18532 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_rsm_output()
18533 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18534 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_rsm_output()
18535 th->th_sum = htons(0); in rack_fast_rsm_output()
18538 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_rsm_output()
18539 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_rsm_output()
18540 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_rsm_output()
18541 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_rsm_output()
18545 KASSERT(ip->ip_v == IPVERSION, in rack_fast_rsm_output()
18546 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_rsm_output()
18553 * via either fast-path). in rack_fast_rsm_output()
18557 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_rsm_output()
18558 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_rsm_output()
18561 if (rack->r_is_v6) { in rack_fast_rsm_output()
18562 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18563 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_rsm_output()
18564 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_rsm_output()
18565 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18567 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18575 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_rsm_output()
18576 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_rsm_output()
18577 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_rsm_output()
18578 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18579 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_rsm_output()
18580 ip->ip_off |= htons(IP_DF); in rack_fast_rsm_output()
18583 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_rsm_output()
18589 rack->rc_gp_saw_rec = 1; in rack_fast_rsm_output()
18592 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_rsm_output()
18594 rack->rc_gp_saw_ca = 1; in rack_fast_rsm_output()
18597 rack->rc_gp_saw_ss = 1; in rack_fast_rsm_output()
18602 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_rsm_output()
18603 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_rsm_output()
18606 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_rsm_output()
18608 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_rsm_output()
18610 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_rsm_output()
18613 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_fast_rsm_output()
18614 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_fast_rsm_output()
18616 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_fast_rsm_output()
18619 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_rsm_output()
18620 if (rack->rack_no_prr) in rack_fast_rsm_output()
18623 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_rsm_output()
18624 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_rsm_output()
18625 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_rsm_output()
18628 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_rsm_output()
18629 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_rsm_output()
18631 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_rsm_output()
18638 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_rsm_output()
18640 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_rsm_output()
18641 if (rsm->r_rtr_cnt > 0) { in rack_fast_rsm_output()
18646 log.u_bbr.flex5 = rsm->r_fas; in rack_fast_rsm_output()
18647 log.u_bbr.bbr_substate = rsm->r_bas; in rack_fast_rsm_output()
18654 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_rsm_output()
18656 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_rsm_output()
18659 log.u_bbr.delRate = rsm->r_flags; in rack_fast_rsm_output()
18661 log.u_bbr.delRate |= rack->r_must_retran; in rack_fast_rsm_output()
18669 if ((rack->r_ctl.crte != NULL) && in rack_fast_rsm_output()
18674 if (rack->r_is_v6) { in rack_fast_rsm_output()
18675 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_rsm_output()
18676 &inp->inp_route6, in rack_fast_rsm_output()
18684 &inp->inp_route, in rack_fast_rsm_output()
18690 lgb->tlb_errno = error; in rack_fast_rsm_output()
18694 tp->snd_nxt = tp->snd_max; in rack_fast_rsm_output()
18697 } else if (rack->rc_hw_nobuf && (ip_sendflag != IP_NO_SND_TAG_RL)) { in rack_fast_rsm_output()
18698 rack->rc_hw_nobuf = 0; in rack_fast_rsm_output()
18699 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_rsm_output()
18700 rack->r_early = 0; in rack_fast_rsm_output()
18701 rack->r_late = 0; in rack_fast_rsm_output()
18702 rack->r_ctl.rc_agg_early = 0; in rack_fast_rsm_output()
18704 rack_log_output(tp, &to, len, rsm->r_start, flags, error, rack_to_usec_ts(tv), in rack_fast_rsm_output()
18705 rsm, RACK_SENT_FP, rsm->m, rsm->soff, rsm->r_hw_tls, segsiz); in rack_fast_rsm_output()
18707 rack->rc_tlp_in_progress = 1; in rack_fast_rsm_output()
18708 rack->r_ctl.rc_tlp_cnt_out++; in rack_fast_rsm_output()
18712 tcp_account_for_send(tp, len, 1, doing_tlp, rsm->r_hw_tls); in rack_fast_rsm_output()
18714 rack->rc_last_sent_tlp_past_cumack = 0; in rack_fast_rsm_output()
18715 rack->rc_last_sent_tlp_seq_valid = 1; in rack_fast_rsm_output()
18716 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_fast_rsm_output()
18717 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_fast_rsm_output()
18719 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_fast_rsm_output()
18720 rack->r_ctl.rc_prr_sndcnt -= len; in rack_fast_rsm_output()
18722 rack->r_ctl.rc_prr_sndcnt = 0; in rack_fast_rsm_output()
18724 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_rsm_output()
18725 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_rsm_output()
18726 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_fast_rsm_output()
18727 rack->r_ctl.retran_during_recovery += len; in rack_fast_rsm_output()
18733 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_rsm_output()
18737 if (tp->t_rtttime == 0) { in rack_fast_rsm_output()
18738 tp->t_rtttime = ticks; in rack_fast_rsm_output()
18739 tp->t_rtseq = startseq; in rack_fast_rsm_output()
18744 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18745 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_fast_rsm_output()
18746 if (tcp_bblogging_on(rack->rc_tp)) in rack_fast_rsm_output()
18749 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_fast_rsm_output()
18750 pacing_delay = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_fast_rsm_output()
18751 if (rack->rc_enobuf < 0x7f) in rack_fast_rsm_output()
18752 rack->rc_enobuf++; in rack_fast_rsm_output()
18755 if (rack->r_ctl.crte != NULL) { in rack_fast_rsm_output()
18757 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_fast_rsm_output()
18766 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_rsm_output()
18767 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_rsm_output()
18768 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_rsm_output()
18769 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((len + segsiz - 1) / segsiz); in rack_fast_rsm_output()
18777 return (-1); in rack_fast_rsm_output()
18788 * delay (eg. trans-continental/oceanic links). Setting the in rack_sndbuf_autoscale()
18810 tp = rack->rc_tp; in rack_sndbuf_autoscale()
18811 so = rack->rc_inp->inp_socket; in rack_sndbuf_autoscale()
18812 sendwin = min(rack->r_ctl.cwnd_to_use, tp->snd_wnd); in rack_sndbuf_autoscale()
18813 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) { in rack_sndbuf_autoscale()
18814 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && in rack_sndbuf_autoscale()
18815 sbused(&so->so_snd) >= in rack_sndbuf_autoscale()
18816 (so->so_snd.sb_hiwat / 8 * 7) && in rack_sndbuf_autoscale()
18817 sbused(&so->so_snd) < V_tcp_autosndbuf_max && in rack_sndbuf_autoscale()
18818 sendwin >= (sbused(&so->so_snd) - in rack_sndbuf_autoscale()
18819 (tp->snd_max - tp->snd_una))) { in rack_sndbuf_autoscale()
18821 scaleup = (rack_autosndbuf_inc * so->so_snd.sb_hiwat) / 100; in rack_sndbuf_autoscale()
18826 scaleup += so->so_snd.sb_hiwat; in rack_sndbuf_autoscale()
18830 so->so_snd.sb_flags &= ~SB_AUTOSIZE; in rack_sndbuf_autoscale()
18845 * the max-burst). We have how much to send and all the info we in rack_fast_output()
18875 if (rack->r_is_v6) { in rack_fast_output()
18876 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
18882 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_fast_output()
18886 if (tp->t_port && (V_tcp_udp_tunneling_port == 0)) { in rack_fast_output()
18890 rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_fast_output()
18891 startseq = tp->snd_max; in rack_fast_output()
18892 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_fast_output()
18893 inp = rack->rc_inp; in rack_fast_output()
18894 len = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
18896 flags = rack->r_ctl.fsb.tcp_flags; in rack_fast_output()
18897 if (tp->t_flags & TF_RCVD_TSTMP) { in rack_fast_output()
18898 to.to_tsval = ms_cts + tp->ts_offset; in rack_fast_output()
18899 to.to_tsecr = tp->ts_recent; in rack_fast_output()
18903 /* TCP-MD5 (RFC2385). */ in rack_fast_output()
18904 if (tp->t_flags & TF_SIGNATURE) in rack_fast_output()
18909 udp = rack->r_ctl.fsb.udp; in rack_fast_output()
18912 if (rack->r_ctl.rc_pace_max_segs) in rack_fast_output()
18913 max_val = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
18914 else if (rack->rc_user_set_max_segs) in rack_fast_output()
18915 max_val = rack->rc_user_set_max_segs * segsiz; in rack_fast_output()
18918 if ((tp->t_flags & TF_TSO) && in rack_fast_output()
18921 (tp->t_port == 0)) in rack_fast_output()
18932 m->m_data += max_linkhdr; in rack_fast_output()
18933 m->m_len = hdrlen; in rack_fast_output()
18934 th = rack->r_ctl.fsb.th; in rack_fast_output()
18943 if_hw_tsomax = tp->t_tsomax; in rack_fast_output()
18944 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_fast_output()
18945 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_fast_output()
18952 max_len = (if_hw_tsomax - hdrlen - in rack_fast_output()
18974 (len <= MHLEN - hdrlen - max_linkhdr)) { in rack_fast_output()
18977 sb_offset = tp->snd_max - tp->snd_una; in rack_fast_output()
18978 th->th_seq = htonl(tp->snd_max); in rack_fast_output()
18979 th->th_ack = htonl(tp->rcv_nxt); in rack_fast_output()
18980 th->th_win = htons((u_short)(rack->r_ctl.fsb.recwin >> tp->rcv_scale)); in rack_fast_output()
18981 if (th->th_win == 0) { in rack_fast_output()
18982 tp->t_sndzerowin++; in rack_fast_output()
18983 tp->t_flags |= TF_RXWIN0SENT; in rack_fast_output()
18985 tp->t_flags &= ~TF_RXWIN0SENT; in rack_fast_output()
18986 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_fast_output()
18990 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_fast_output()
18993 if (rack->r_ctl.fsb.m == NULL) in rack_fast_output()
18997 m->m_next = rack_fo_m_copym(rack, &len, if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, in rack_fast_output()
19008 if (rack->r_ctl.fsb.rfo_apply_push && in rack_fast_output()
19009 (len == rack->r_ctl.fsb.left_to_send)) { in rack_fast_output()
19013 if ((m->m_next == NULL) || (len <= 0)){ in rack_fast_output()
19017 if (rack->r_is_v6) in rack_fast_output()
19018 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_fast_output()
19020 ulen = hdrlen + len - sizeof(struct ip); in rack_fast_output()
19021 udp->uh_ulen = htons(ulen); in rack_fast_output()
19023 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_fast_output()
19024 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_fast_output()
19025 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_fast_output()
19027 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_fast_output()
19028 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_fast_output()
19029 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_fast_output()
19031 if (rack->r_is_v6) { in rack_fast_output()
19032 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_fast_output()
19033 ip6->ip6_flow |= htonl(ect << 20); in rack_fast_output()
19039 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_fast_output()
19040 ip->ip_tos |= ect; in rack_fast_output()
19045 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_fast_output()
19055 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_fast_output()
19065 if (rack->r_is_v6) { in rack_fast_output()
19066 if (tp->t_port) { in rack_fast_output()
19067 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_fast_output()
19068 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19069 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_fast_output()
19070 th->th_sum = htons(0); in rack_fast_output()
19073 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_fast_output()
19074 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19075 th->th_sum = in6_cksum_pseudo(ip6, in rack_fast_output()
19086 if (tp->t_port) { in rack_fast_output()
19087 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_fast_output()
19088 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_fast_output()
19089 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19090 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_fast_output()
19091 th->th_sum = htons(0); in rack_fast_output()
19094 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_fast_output()
19095 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_fast_output()
19096 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_fast_output()
19097 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_fast_output()
19101 KASSERT(ip->ip_v == IPVERSION, in rack_fast_output()
19102 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_fast_output()
19109 * via either fast-path). in rack_fast_output()
19113 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_fast_output()
19114 m->m_pkthdr.tso_segsz = segsiz; in rack_fast_output()
19117 if (rack->r_is_v6) { in rack_fast_output()
19118 ip6->ip6_hlim = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19119 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_fast_output()
19120 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_fast_output()
19121 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19123 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19131 ip->ip_len = htons(m->m_pkthdr.len); in rack_fast_output()
19132 ip->ip_ttl = rack->r_ctl.fsb.hoplimit; in rack_fast_output()
19133 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_fast_output()
19134 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_fast_output()
19135 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_fast_output()
19136 ip->ip_off |= htons(IP_DF); in rack_fast_output()
19139 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_fast_output()
19143 if (tp->snd_cwnd > tp->snd_ssthresh) { in rack_fast_output()
19145 rack->rc_gp_saw_ca = 1; in rack_fast_output()
19148 rack->rc_gp_saw_ss = 1; in rack_fast_output()
19152 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_fast_output()
19153 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_fast_output()
19156 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_fast_output()
19158 th->th_off = sizeof(struct tcphdr) >> 2; in rack_fast_output()
19160 if ((rack->r_ctl.crte != NULL) && in rack_fast_output()
19164 if (tcp_bblogging_on(rack->rc_tp)) { in rack_fast_output()
19168 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_fast_output()
19169 if (rack->rack_no_prr) in rack_fast_output()
19172 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_fast_output()
19173 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_fast_output()
19174 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_fast_output()
19177 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_fast_output()
19178 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_fast_output()
19180 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_fast_output()
19184 log.u_bbr.pkts_out = tp->t_maxseg; in rack_fast_output()
19186 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_fast_output()
19188 log.u_bbr.lt_epoch = rack->r_ctl.cwnd_to_use; in rack_fast_output()
19189 log.u_bbr.delivered = rack->r_ctl.fsb.left_to_send; in rack_fast_output()
19191 log.u_bbr.delRate = rack->r_must_retran; in rack_fast_output()
19196 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_fast_output()
19202 if (rack->r_is_v6) { in rack_fast_output()
19203 error = ip6_output(m, inp->in6p_outputopts, in rack_fast_output()
19204 &inp->inp_route6, in rack_fast_output()
19214 &inp->inp_route, in rack_fast_output()
19219 lgb->tlb_errno = error; in rack_fast_output()
19226 } else if (rack->rc_hw_nobuf) { in rack_fast_output()
19227 rack->rc_hw_nobuf = 0; in rack_fast_output()
19228 rack->r_ctl.rc_agg_delayed = 0; in rack_fast_output()
19229 rack->r_early = 0; in rack_fast_output()
19230 rack->r_late = 0; in rack_fast_output()
19231 rack->r_ctl.rc_agg_early = 0; in rack_fast_output()
19233 if ((error == 0) && (rack->lt_bw_up == 0)) { in rack_fast_output()
19235 rack->r_ctl.lt_timemark = tcp_tv_to_lusec(tv); in rack_fast_output()
19236 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19237 rack->lt_bw_up = 1; in rack_fast_output()
19239 (((tp->snd_max + len) - rack->r_ctl.lt_seq) > 0x7fffffff)) { in rack_fast_output()
19247 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_fast_output()
19248 rack->r_ctl.lt_seq = tp->snd_una; in rack_fast_output()
19250 if (tmark > rack->r_ctl.lt_timemark) { in rack_fast_output()
19251 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_fast_output()
19252 rack->r_ctl.lt_timemark = tmark; in rack_fast_output()
19255 rack_log_output(tp, &to, len, tp->snd_max, flags, error, rack_to_usec_ts(tv), in rack_fast_output()
19256 NULL, add_flag, s_mb, s_soff, rack->r_ctl.fsb.hw_tls, segsiz); in rack_fast_output()
19257 if (tp->snd_una == tp->snd_max) { in rack_fast_output()
19258 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_fast_output()
19260 tp->t_acktime = ticks; in rack_fast_output()
19263 tcp_account_for_send(tp, len, 0, 0, rack->r_ctl.fsb.hw_tls); in rack_fast_output()
19265 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_fast_output()
19267 if ((tp->t_flags & TF_GPUTINPROG) == 0) in rack_fast_output()
19268 rack_start_gp_measurement(tp, rack, tp->snd_max, sb_offset); in rack_fast_output()
19269 tp->snd_max += len; in rack_fast_output()
19270 tp->snd_nxt = tp->snd_max; in rack_fast_output()
19271 if (rack->rc_new_rnd_needed) { in rack_fast_output()
19272 rack_new_round_starts(tp, rack, tp->snd_max); in rack_fast_output()
19279 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_fast_output()
19283 if (len <= rack->r_ctl.fsb.left_to_send) in rack_fast_output()
19284 rack->r_ctl.fsb.left_to_send -= len; in rack_fast_output()
19286 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19287 if (rack->r_ctl.fsb.left_to_send < segsiz) { in rack_fast_output()
19288 rack->r_fast_output = 0; in rack_fast_output()
19289 rack->r_ctl.fsb.left_to_send = 0; in rack_fast_output()
19291 SOCK_SENDBUF_LOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19293 SOCK_SENDBUF_UNLOCK(rack->rc_inp->inp_socket); in rack_fast_output()
19295 if (tp->t_rtttime == 0) { in rack_fast_output()
19296 tp->t_rtttime = ticks; in rack_fast_output()
19297 tp->t_rtseq = startseq; in rack_fast_output()
19300 if ((rack->r_ctl.fsb.left_to_send >= segsiz) && in rack_fast_output()
19302 (*tot_len < rack->r_ctl.rc_pace_max_segs) && in rack_fast_output()
19304 max_val -= len; in rack_fast_output()
19306 th = rack->r_ctl.fsb.th; in rack_fast_output()
19312 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_fast_output()
19318 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_fast_output()
19319 tp->tcp_cnt_counters[SND_OUT_DATA] += cnt_thru; in rack_fast_output()
19320 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_fast_output()
19321 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((*tot_len + segsiz - 1) / segsiz); in rack_fast_output()
19329 rack->r_fast_output = 0; in rack_fast_output()
19330 return (-1); in rack_fast_output()
19340 rack->r_fast_output = 1; in rack_setup_fast_output()
19341 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_setup_fast_output()
19342 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_setup_fast_output()
19343 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_setup_fast_output()
19344 rack->r_ctl.fsb.tcp_flags = flags; in rack_setup_fast_output()
19345 rack->r_ctl.fsb.left_to_send = orig_len - len; in rack_setup_fast_output()
19346 if (rack->r_ctl.fsb.left_to_send < pace_max_seg) { in rack_setup_fast_output()
19348 rack->r_fast_output = 0; in rack_setup_fast_output()
19352 rack->r_ctl.fsb.left_to_send = rounddown(rack->r_ctl.fsb.left_to_send, pace_max_seg); in rack_setup_fast_output()
19355 rack->r_ctl.fsb.hw_tls = 1; in rack_setup_fast_output()
19357 rack->r_ctl.fsb.hw_tls = 0; in rack_setup_fast_output()
19358 KASSERT((rack->r_ctl.fsb.left_to_send <= (sbavail(sb) - (tp->snd_max - tp->snd_una))), in rack_setup_fast_output()
19360 rack, rack->r_ctl.fsb.left_to_send, sbavail(sb), in rack_setup_fast_output()
19361 (tp->snd_max - tp->snd_una))); in rack_setup_fast_output()
19362 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_setup_fast_output()
19363 rack->r_fast_output = 0; in rack_setup_fast_output()
19365 if (rack->r_ctl.fsb.left_to_send == (sbavail(sb) - (tp->snd_max - tp->snd_una))) in rack_setup_fast_output()
19366 rack->r_ctl.fsb.rfo_apply_push = 1; in rack_setup_fast_output()
19368 rack->r_ctl.fsb.rfo_apply_push = 0; in rack_setup_fast_output()
19379 maxlen = (uint32_t)((rack->r_ctl.gp_bw * min_time) / (uint64_t)HPTS_USEC_IN_SEC); in rack_get_hpts_pacing_min_for_bw()
19391 rsm = tqhash_find(rack->r_ctl.tqh, rack->r_ctl.last_collapse_point); in rack_check_collapsed()
19392 if ((rsm == NULL) || ((rsm->r_flags & RACK_RWND_COLLAPSED) == 0)) { in rack_check_collapsed()
19394 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19398 if (rsm->r_end > (rack->rc_tp->snd_una + rack->rc_tp->snd_wnd)) { in rack_check_collapsed()
19405 if (rsm->r_flags & RACK_ACKED) { in rack_check_collapsed()
19410 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_check_collapsed()
19412 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_check_collapsed()
19413 rack->r_ctl.high_collapse_point)) { in rack_check_collapsed()
19414 rack->r_collapse_point_valid = 0; in rack_check_collapsed()
19420 thresh = rack_calc_thresh_rack(rack, rack_grab_rtt(rack->rc_tp, rack), cts, __LINE__, 1); in rack_check_collapsed()
19421 if ((cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])) > thresh) { in rack_check_collapsed()
19422 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19423 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19424 thresh, __LINE__, 6, rsm->r_flags, rsm); in rack_check_collapsed()
19428 rack_log_collapse(rack, rsm->r_start, in rack_check_collapsed()
19429 (cts - ((uint32_t)rsm->r_tim_lastsent[(rsm->r_rtr_cnt-1)])), in rack_check_collapsed()
19430 thresh, __LINE__, 7, rsm->r_flags, rsm); in rack_check_collapsed()
19437 if ((rack->full_size_rxt == 0) && in rack_validate_sizes()
19438 (rack->shape_rxt_to_pacing_min == 0) && in rack_validate_sizes()
19441 } else if (rack->shape_rxt_to_pacing_min && in rack_validate_sizes()
19442 rack->gp_ready) { in rack_validate_sizes()
19531 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_output()
19536 hpts_calling = !!(tp->t_flags2 & TF2_HPTS_CALLS); in rack_output()
19537 tp->t_flags2 &= ~TF2_HPTS_CALLS; in rack_output()
19539 if (tp->t_flags & TF_TOE) { in rack_output()
19546 if (rack->rack_deferred_inited == 0) { in rack_output()
19559 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19560 (tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
19561 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN|ACK sent */ in rack_output()
19562 (rack->r_ctl.rc_resend == NULL)) { /* not a retransmit */ in rack_output()
19569 if (rack->r_state) { in rack_output()
19571 isipv6 = rack->r_is_v6; in rack_output()
19573 isipv6 = (rack->rc_inp->inp_vflag & INP_IPV6) != 0; in rack_output()
19579 if (((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) && in rack_output()
19580 tcp_in_hpts(rack->rc_tp)) { in rack_output()
19588 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19589 TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) { in rack_output()
19591 delayed = cts - rack->r_ctl.rc_last_output_to; in rack_output()
19596 if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { in rack_output()
19613 if (rack->rc_in_persist) { in rack_output()
19614 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19623 if ((rack->rc_ack_required == 1) && in rack_output()
19624 (rack->r_timer_override == 0)){ in rack_output()
19626 if (tcp_in_hpts(rack->rc_tp) == 0) { in rack_output()
19635 if ((rack->r_timer_override) || in rack_output()
19636 (rack->rc_ack_can_sendout_data) || in rack_output()
19638 (tp->t_state < TCPS_ESTABLISHED)) { in rack_output()
19639 rack->rc_ack_can_sendout_data = 0; in rack_output()
19640 if (tcp_in_hpts(rack->rc_tp)) in rack_output()
19641 tcp_hpts_remove(rack->rc_tp); in rack_output()
19642 } else if (tcp_in_hpts(rack->rc_tp)) { in rack_output()
19649 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
19650 tp->tcp_proc_time[SND_BLOCKED] += (crtsc - ts_val); in rack_output()
19651 tp->tcp_cnt_counters[SND_BLOCKED]++; in rack_output()
19659 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) && in rack_output()
19660 TSTMP_GT(rack->r_ctl.rc_last_output_to, cts)) { in rack_output()
19661 early = rack->r_ctl.rc_last_output_to - cts; in rack_output()
19664 if (delayed && (rack->rc_always_pace == 1)) { in rack_output()
19665 rack->r_ctl.rc_agg_delayed += delayed; in rack_output()
19666 rack->r_late = 1; in rack_output()
19667 } else if (early && (rack->rc_always_pace == 1)) { in rack_output()
19668 rack->r_ctl.rc_agg_early += early; in rack_output()
19669 rack->r_early = 1; in rack_output()
19670 } else if (rack->rc_always_pace == 0) { in rack_output()
19671 /* Non-paced we are not late */ in rack_output()
19672 rack->r_ctl.rc_agg_delayed = rack->r_ctl.rc_agg_early = 0; in rack_output()
19673 rack->r_early = rack->r_late = 0; in rack_output()
19676 rack->r_ctl.rc_hpts_flags &= ~PACE_PKT_OUTPUT; in rack_output()
19677 rack->r_wanted_output = 0; in rack_output()
19678 rack->r_timer_override = 0; in rack_output()
19679 if ((tp->t_state != rack->r_state) && in rack_output()
19680 TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
19683 segsiz = min(ctf_fixed_maxseg(tp), rack->r_ctl.rc_pace_min_segs); in rack_output()
19685 if (rack->r_ctl.rc_pace_max_segs == 0) in rack_output()
19686 pace_max_seg = rack->rc_user_set_max_segs * segsiz; in rack_output()
19688 pace_max_seg = rack->r_ctl.rc_pace_max_segs; in rack_output()
19689 if ((rack->r_fast_output) && in rack_output()
19691 (tp->rcv_numsacks == 0)) { in rack_output()
19699 inp = rack->rc_inp; in rack_output()
19700 so = inp->inp_socket; in rack_output()
19701 sb = &so->so_snd; in rack_output()
19708 /* We need to re-pin since fast_output un-pined */ in rack_output()
19715 inp = rack->rc_inp; in rack_output()
19721 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
19722 ((tp->t_state == TCPS_SYN_RECEIVED) || in rack_output()
19723 (tp->t_state == TCPS_SYN_SENT)) && in rack_output()
19724 SEQ_GT(tp->snd_max, tp->snd_una) && /* initial SYN or SYN|ACK sent */ in rack_output()
19725 (tp->t_rxtshift == 0)) { /* not a retransmit */ in rack_output()
19738 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una); in rack_output()
19739 if (tp->t_idle_reduce) { in rack_output()
19740 if (idle && (TICKS_2_USEC(ticks - tp->t_rcvtime) >= tp->t_rxtcur)) in rack_output()
19743 tp->t_flags &= ~TF_LASTIDLE; in rack_output()
19745 if (tp->t_flags & TF_MORETOCOME) { in rack_output()
19746 tp->t_flags |= TF_LASTIDLE; in rack_output()
19750 if ((tp->snd_una == tp->snd_max) && in rack_output()
19751 rack->r_ctl.rc_went_idle_time && in rack_output()
19752 (cts > rack->r_ctl.rc_went_idle_time)) { in rack_output()
19753 tot_idle = (cts - rack->r_ctl.rc_went_idle_time); in rack_output()
19756 if (rack->in_probe_rtt == 0) { in rack_output()
19757 rack->r_ctl.rc_lower_rtt_us_cts = cts; in rack_output()
19758 rack->r_ctl.rc_time_probertt_entered = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19759 rack->r_ctl.rc_time_probertt_starts = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19760 rack->r_ctl.rc_time_of_last_probertt = rack->r_ctl.rc_lower_rtt_us_cts; in rack_output()
19768 (rack->r_ctl.fsb.tcp_ip_hdr) && in rack_output()
19769 (rack->r_fsb_inited == 0) && in rack_output()
19770 (rack->r_state != TCPS_CLOSED)) in rack_output()
19771 rack_init_fsb_block(tp, rack, tcp_outflags[tp->t_state]); in rack_output()
19772 if (rack->rc_sendvars_notset == 1) { in rack_output()
19773 rack->rc_sendvars_notset = 0; in rack_output()
19775 * Make sure any TCP timers (keep-alive) is not running. in rack_output()
19779 if ((rack->rack_no_prr == 1) && in rack_output()
19780 (rack->rc_always_pace == 0)) { in rack_output()
19783 * no-pacing enabled and prr is turned off that in rack_output()
19791 rack->rack_no_prr = 0; in rack_output()
19793 if ((rack->pcm_enabled == 1) && in rack_output()
19794 (rack->pcm_needed == 0) && in rack_output()
19802 if (tp->t_srtt) in rack_output()
19803 rtts_idle = tot_idle / tp->t_srtt; in rack_output()
19806 rnds = rack->r_ctl.current_round - rack->r_ctl.last_pcm_round; in rack_output()
19807 rack->r_ctl.pcm_idle_rounds += rtts_idle; in rack_output()
19808 if ((rnds + rack->r_ctl.pcm_idle_rounds) >= rack_pcm_every_n_rounds) { in rack_output()
19809 rack->pcm_needed = 1; in rack_output()
19810 rack_log_pcm(rack, 8, rack->r_ctl.last_pcm_round, rtts_idle, rack->r_ctl.current_round ); in rack_output()
19819 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
19820 (rack->r_ctl.pcm_max_seg == 0)) { in rack_output()
19826 rack->r_ctl.pcm_max_seg = rc_init_window(rack); in rack_output()
19827 if (rack->r_ctl.pcm_max_seg < (ctf_fixed_maxseg(tp) * 10)) { in rack_output()
19831 rack->r_ctl.pcm_max_seg = ctf_fixed_maxseg(tp) * 10; in rack_output()
19834 if ((rack->r_ctl.pcm_max_seg != 0) && (rack->pcm_needed == 1)) { in rack_output()
19837 if (tp->snd_wnd > ctf_outstanding(tp)) in rack_output()
19838 rw_avail = tp->snd_wnd - ctf_outstanding(tp); in rack_output()
19841 if (tp->snd_cwnd > ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked)) in rack_output()
19842 cwa = tp->snd_cwnd -ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
19845 if ((cwa >= rack->r_ctl.pcm_max_seg) && in rack_output()
19846 (rw_avail > rack->r_ctl.pcm_max_seg)) { in rack_output()
19848 pace_max_seg = rack->r_ctl.pcm_max_seg; in rack_output()
19850 rack->r_fast_output = 0; in rack_output()
19854 cwa, rack->r_ctl.pcm_max_seg, rw_avail); in rack_output()
19857 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
19858 cwnd_to_use = rack->r_ctl.cwnd_to_use = tp->snd_cwnd; in rack_output()
19859 flags = tcp_outflags[tp->t_state]; in rack_output()
19860 while (rack->rc_free_cnt < rack_free_cache) { in rack_output()
19866 so = inp->inp_socket; in rack_output()
19867 sb = &so->so_snd; in rack_output()
19870 TAILQ_INSERT_TAIL(&rack->r_ctl.rc_free, rsm, r_tnext); in rack_output()
19871 rack->rc_free_cnt++; in rack_output()
19878 SOCK_SENDBUF_LOCK(inp->inp_socket); in rack_output()
19879 so = inp->inp_socket; in rack_output()
19880 sb = &so->so_snd; in rack_output()
19883 if (rack->r_ctl.rc_resend) { in rack_output()
19885 rsm = rack->r_ctl.rc_resend; in rack_output()
19886 rack->r_ctl.rc_resend = NULL; in rack_output()
19887 len = rsm->r_end - rsm->r_start; in rack_output()
19890 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
19893 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
19894 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
19896 } else if (rack->r_collapse_point_valid && in rack_output()
19903 tcp_trace_point(rack->rc_tp, TCP_TP_COLLAPSED_RXT); in rack_output()
19904 rack->r_ctl.last_collapse_point = rsm->r_end; in rack_output()
19906 if (SEQ_GEQ(rack->r_ctl.last_collapse_point, in rack_output()
19907 rack->r_ctl.high_collapse_point)) in rack_output()
19908 rack->r_collapse_point_valid = 0; in rack_output()
19912 len = rsm->r_end - rsm->r_start; in rack_output()
19913 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
19918 if ((!IN_FASTRECOVERY(tp->t_flags)) && in rack_output()
19919 ((rsm->r_flags & RACK_MUST_RXT) == 0) && in rack_output()
19920 ((tp->t_flags & TF_WASFRECOVERY) == 0)) { in rack_output()
19921 /* Enter recovery if not induced by a time-out */ in rack_output()
19922 rack_cong_signal(tp, CC_NDUPACK, tp->snd_una, __LINE__); in rack_output()
19925 if (SEQ_LT(rsm->r_start, tp->snd_una)) { in rack_output()
19927 tp, rack, rsm, rsm->r_start, tp->snd_una); in rack_output()
19930 len = rsm->r_end - rsm->r_start; in rack_output()
19931 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
19934 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
19935 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
19944 } else if (rack->r_ctl.rc_tlpsend) { in rack_output()
19955 rsm = rack->r_ctl.rc_tlpsend; in rack_output()
19957 rsm->r_flags |= RACK_TLP; in rack_output()
19958 rack->r_ctl.rc_tlpsend = NULL; in rack_output()
19960 tlen = rsm->r_end - rsm->r_start; in rack_output()
19963 KASSERT(SEQ_LEQ(tp->snd_una, rsm->r_start), in rack_output()
19966 rsm->r_start, tp->snd_una, tp, rack, rsm)); in rack_output()
19967 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
19968 cwin = min(tp->snd_wnd, tlen); in rack_output()
19971 if (rack->r_must_retran && in rack_output()
19973 (SEQ_GT(tp->snd_max, tp->snd_una)) && in rack_output()
19978 * a) This is a non-sack connection, we had a time-out in rack_output()
19992 sendwin = min(tp->snd_wnd, tp->snd_cwnd); in rack_output()
19993 flight = ctf_flight_size(tp, rack->r_ctl.rc_out_at_rto); in rack_output()
19998 so = inp->inp_socket; in rack_output()
19999 sb = &so->so_snd; in rack_output()
20004 * outstanding/not-acked should be marked. in rack_output()
20007 rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap); in rack_output()
20010 rack->r_must_retran = 0; in rack_output()
20011 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20012 so = inp->inp_socket; in rack_output()
20013 sb = &so->so_snd; in rack_output()
20016 if ((rsm->r_flags & RACK_MUST_RXT) == 0) { in rack_output()
20021 rack->r_must_retran = 0; in rack_output()
20022 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
20027 len = rsm->r_end - rsm->r_start; in rack_output()
20028 sb_offset = rsm->r_start - tp->snd_una; in rack_output()
20030 if ((rack->full_size_rxt == 0) && in rack_output()
20031 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20034 else if (rack->shape_rxt_to_pacing_min && in rack_output()
20035 rack->gp_ready) { in rack_output()
20056 (rack->r_ctl.rc_num_maps_alloced >= V_tcp_map_entries_limit)) { in rack_output()
20058 if (!rack->alloc_limit_reported) { in rack_output()
20059 rack->alloc_limit_reported = 1; in rack_output()
20062 so = inp->inp_socket; in rack_output()
20063 sb = &so->so_snd; in rack_output()
20066 if (rsm && (rsm->r_flags & RACK_HAS_FIN)) { in rack_output()
20068 len--; in rack_output()
20077 if (rsm && rack->r_fsb_inited && in rack_output()
20079 ((rsm->r_flags & RACK_HAS_FIN) == 0)) { in rack_output()
20086 so = inp->inp_socket; in rack_output()
20087 sb = &so->so_snd; in rack_output()
20093 if ((tp->t_flags2 & TF2_TCP_SCWND_ALLOWED) && in rack_output()
20094 rack->rack_enable_scwnd) { in rack_output()
20096 if (rack->gp_ready && in rack_output()
20097 (rack->rack_attempted_scwnd == 0) && in rack_output()
20098 (rack->r_ctl.rc_scw == NULL) && in rack_output()
20099 tp->t_lib) { in rack_output()
20102 rack->rack_attempted_scwnd = 1; in rack_output()
20103 rack->r_ctl.rc_scw = tcp_shared_cwnd_alloc(tp, in rack_output()
20104 &rack->r_ctl.rc_scw_index, in rack_output()
20107 if (rack->r_ctl.rc_scw && in rack_output()
20108 (rack->rack_scwnd_is_idle == 1) && in rack_output()
20109 sbavail(&so->so_snd)) { in rack_output()
20111 tcp_shared_cwnd_active(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20112 rack->rack_scwnd_is_idle = 0; in rack_output()
20114 if (rack->r_ctl.rc_scw) { in rack_output()
20116 rack->r_ctl.cwnd_to_use = cwnd_to_use = tcp_shared_cwnd_update(rack->r_ctl.rc_scw, in rack_output()
20117 rack->r_ctl.rc_scw_index, in rack_output()
20118 tp->snd_cwnd, tp->snd_wnd, segsiz); in rack_output()
20126 if (tp->t_flags & TF_NEEDFIN) in rack_output()
20128 if (tp->t_flags & TF_NEEDSYN) in rack_output()
20132 end_rsm = TAILQ_LAST_FAST(&rack->r_ctl.rc_tmap, rack_sendmap, r_tnext); in rack_output()
20139 (TCPS_HAVEESTABLISHED(tp->t_state) || in rack_output()
20140 (tp->t_flags & TF_FASTOPEN))) { in rack_output()
20150 if (SEQ_GT(tp->snd_max, tp->snd_una) && avail) in rack_output()
20151 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
20154 if ((IN_FASTRECOVERY(tp->t_flags) == 0) || rack->rack_no_prr) { in rack_output()
20155 if (rack->r_ctl.rc_tlp_new_data) { in rack_output()
20157 if (rack->r_ctl.rc_tlp_new_data > (uint32_t) (avail - sb_offset)) { in rack_output()
20158 rack->r_ctl.rc_tlp_new_data = (uint32_t) (avail - sb_offset); in rack_output()
20160 if ((rack->r_ctl.rc_tlp_new_data + sb_offset) > tp->snd_wnd) { in rack_output()
20161 if (tp->snd_wnd > sb_offset) in rack_output()
20162 len = tp->snd_wnd - sb_offset; in rack_output()
20166 len = rack->r_ctl.rc_tlp_new_data; in rack_output()
20168 rack->r_ctl.rc_tlp_new_data = 0; in rack_output()
20172 if ((rack->r_ctl.crte == NULL) && in rack_output()
20173 IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20174 (rack->full_size_rxt == 0) && in rack_output()
20175 (rack->shape_rxt_to_pacing_min == 0) && in rack_output()
20185 } else if (rack->shape_rxt_to_pacing_min && in rack_output()
20186 rack->gp_ready) { in rack_output()
20204 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20205 if ((rack->r_ctl.rc_prr_sndcnt + outstanding) > tp->snd_wnd) { in rack_output()
20206 if (tp->snd_wnd > outstanding) { in rack_output()
20207 len = tp->snd_wnd - outstanding; in rack_output()
20212 len = avail - sb_offset; in rack_output()
20220 len = avail - sb_offset; in rack_output()
20225 if (len > rack->r_ctl.rc_prr_sndcnt) { in rack_output()
20226 len = rack->r_ctl.rc_prr_sndcnt; in rack_output()
20238 * let us send a lot as well :-) in rack_output()
20240 if (rack->r_ctl.rc_prr_sendalot == 0) { in rack_output()
20252 leftinsb = sbavail(sb) - sb_offset; in rack_output()
20259 } else if (!TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_output()
20266 !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20278 * SYN-SENT state and if segment contains data and if we don't know in rack_output()
20282 SEQ_GT(tp->snd_max, tp->snd_una) && in rack_output()
20284 (tp->t_rxtshift == 0))) { in rack_output()
20289 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20290 (tp->t_state == TCPS_SYN_RECEIVED)) in rack_output()
20298 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) { in rack_output()
20305 * - When retransmitting SYN|ACK on a passively-created socket in rack_output()
20307 * - When retransmitting SYN on an actively created socket in rack_output()
20309 * - When sending a zero-length cookie (cookie request) on an in rack_output()
20312 * - When the socket is in the CLOSED state (RST is being sent) in rack_output()
20314 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
20315 (((flags & TH_SYN) && (tp->t_rxtshift > 0)) || in rack_output()
20316 ((tp->t_state == TCPS_SYN_SENT) && in rack_output()
20317 (tp->t_tfo_client_cookie_len == 0)) || in rack_output()
20322 /* Without fast-open there should never be data sent on a SYN */ in rack_output()
20323 if ((flags & TH_SYN) && !(tp->t_flags & TF_FASTOPEN)) { in rack_output()
20337 if ((tp->snd_wnd == 0) && in rack_output()
20338 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20339 (tp->snd_una == tp->snd_max) && in rack_output()
20341 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20351 if ((tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20352 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
20354 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20364 if (tp->snd_max == tp->snd_una) { in rack_output()
20369 rack_enter_persist(tp, rack, cts, tp->snd_una); in rack_output()
20372 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20373 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20386 } else if (((tp->snd_wnd - ctf_outstanding(tp)) < in rack_output()
20387 min((rack->r_ctl.rc_high_rwnd/2), minseg)) && in rack_output()
20388 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * segsiz)) && in rack_output()
20389 (len < (int)(sbavail(sb) - sb_offset)) && in rack_output()
20390 (TCPS_HAVEESTABLISHED(tp->t_state))) { in rack_output()
20400 } else if ((rack->r_ctl.crte != NULL) && in rack_output()
20401 (tp->snd_wnd >= (pace_max_seg * max(1, rack_hw_rwnd_factor))) && in rack_output()
20403 (ctf_flight_size(tp, rack->r_ctl.rc_sacked) >= (2 * segsiz)) && in rack_output()
20404 (len < (int)(sbavail(sb) - sb_offset))) { in rack_output()
20424 * defeats the point of hw-pacing (i.e. to help us get in rack_output()
20439 * presence of TCP-MD5, SACK retransmits, SACK advertizements and IP in rack_output()
20453 * Pre-calculate here as we save another lookup into the darknesses in rack_output()
20472 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > segsiz && in rack_output()
20473 (tp->t_port == 0) && in rack_output()
20474 ((tp->t_flags & TF_SIGNATURE) == 0) && in rack_output()
20481 outstanding = tp->snd_max - tp->snd_una; in rack_output()
20482 if (tp->t_flags & TF_SENTFIN) { in rack_output()
20487 outstanding--; in rack_output()
20490 if ((rsm->r_flags & RACK_HAS_FIN) == 0) in rack_output()
20494 recwin = lmin(lmax(sbspace(&so->so_rcv), 0), in rack_output()
20495 (long)TCP_MAXWIN << tp->rcv_scale); in rack_output()
20499 * conditions when len is non-zero: in rack_output()
20501 * - We have a full segment (or more with TSO) - This is the last in rack_output()
20503 * NODELAY - we've timed out (e.g. persist timer) - we have more in rack_output()
20505 * limited the window size) - we need to retransmit in rack_output()
20517 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */ in rack_output()
20518 (idle || (tp->t_flags & TF_NODELAY)) && in rack_output()
20520 (tp->t_flags & TF_NOPUSH) == 0) { in rack_output()
20524 if ((tp->snd_una == tp->snd_max) && len) { /* Nothing outstanding */ in rack_output()
20528 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { in rack_output()
20536 if (((tp->snd_wnd - ctf_outstanding(tp)) < segsiz) && in rack_output()
20555 * remote end starts to send again the ACK clock takes over and in rack_output()
20573 * pending (it will get piggy-backed on it) or the remote side in rack_output()
20574 * already has done a half-close and won't send more data. Skip in rack_output()
20575 * this if the connection is in T/TCP half-open state. in rack_output()
20577 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) && in rack_output()
20578 !(tp->t_flags & TF_DELACK) && in rack_output()
20579 !TCPS_HAVERCVDFIN(tp->t_state)) { in rack_output()
20583 * tp->rcv_scale. in rack_output()
20589 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { in rack_output()
20590 oldwin = (tp->rcv_adv - tp->rcv_nxt); in rack_output()
20592 adv -= oldwin; in rack_output()
20605 if (oldwin >> tp->rcv_scale >= (adv + oldwin) >> tp->rcv_scale) in rack_output()
20609 (adv >= (int32_t)(so->so_rcv.sb_hiwat / 4) || in rack_output()
20610 recwin <= (int32_t)(so->so_rcv.sb_hiwat / 8) || in rack_output()
20611 so->so_rcv.sb_hiwat <= 8 * segsiz)) { in rack_output()
20615 if (2 * adv >= (int32_t) so->so_rcv.sb_hiwat) { in rack_output()
20624 * is also a catch-all for the retransmit timer timeout case. in rack_output()
20626 if (tp->t_flags & TF_ACKNOW) { in rack_output()
20630 if (((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) { in rack_output()
20639 (tp->snd_max == tp->snd_una)) { in rack_output()
20652 if ((tp->t_flags & TF_FASTOPEN) == 0 && in rack_output()
20655 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
20656 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
20665 * the peer wait for the delayed-ack timer to run off in rack_output()
20671 rack->r_ctl.fsb.recwin = recwin; in rack_output()
20677 rack->r_fsb_inited && in rack_output()
20678 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20679 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
20681 (rack->r_must_retran == 0) && in rack_output()
20682 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
20685 ((orig_len - len) >= segsiz) && in rack_output()
20692 rack->r_fast_output = 0; in rack_output()
20697 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
20698 tp->snd_nxt = tp->snd_max; in rack_output()
20701 uint32_t seq = tp->gput_ack; in rack_output()
20703 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20706 * Mark the last sent that we just-returned (hinting in rack_output()
20709 rsm->r_just_ret = 1; in rack_output()
20712 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20713 rack->r_early = 0; in rack_output()
20714 rack->r_late = 0; in rack_output()
20715 rack->r_ctl.rc_agg_early = 0; in rack_output()
20717 min(max(segsiz, (rack->r_ctl.rc_high_rwnd/2)), in rack_output()
20718 minseg)) >= tp->snd_wnd) { in rack_output()
20721 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20722 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20724 /* We are limited by whats available -- app limited */ in rack_output()
20726 if (IN_FASTRECOVERY(tp->t_flags)) in rack_output()
20727 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
20729 ((tp->t_flags & TF_NODELAY) == 0) && in rack_output()
20736 * don't send. Another app-limited case. in rack_output()
20739 } else if (tp->t_flags & TF_NOPUSH) { in rack_output()
20750 } else if (IN_FASTRECOVERY(tp->t_flags) && in rack_output()
20751 (rack->rack_no_prr == 0) && in rack_output()
20752 (rack->r_ctl.rc_prr_sndcnt < segsiz)) { in rack_output()
20807 if ((tp->t_flags & TF_GPUTINPROG) && in rack_output()
20808 SEQ_GT(tp->gput_ack, tp->snd_max)) { in rack_output()
20809 tp->gput_ack = tp->snd_max; in rack_output()
20810 if ((tp->gput_ack - tp->gput_seq) < (MIN_GP_WIN * segsiz)) { in rack_output()
20814 tp->t_flags &= ~TF_GPUTINPROG; in rack_output()
20815 rack_log_pacing_delay_calc(rack, (tp->gput_ack - tp->gput_seq) /*flex2*/, in rack_output()
20816 rack->r_ctl.rc_gp_srtt /*flex1*/, in rack_output()
20817 tp->gput_seq, in rack_output()
20823 rsm = tqhash_max(rack->r_ctl.tqh); in rack_output()
20824 if (rsm && ((rsm->r_flags & RACK_APP_LIMITED) == 0)) { in rack_output()
20825 if (rack->r_ctl.rc_app_limited_cnt == 0) in rack_output()
20826 rack->r_ctl.rc_end_appl = rack->r_ctl.rc_first_appl = rsm; in rack_output()
20833 if (rack->r_ctl.rc_end_appl) in rack_output()
20834 rack->r_ctl.rc_end_appl->r_nseq_appl = rsm->r_start; in rack_output()
20835 rack->r_ctl.rc_end_appl = rsm; in rack_output()
20837 rsm->r_flags |= RACK_APP_LIMITED; in rack_output()
20838 rack->r_ctl.rc_app_limited_cnt++; in rack_output()
20842 rack->r_ctl.rc_app_limited_cnt, seq, in rack_output()
20843 tp->gput_ack, 0, 0, 4, __LINE__, NULL, 0); in rack_output()
20847 if ((tp->snd_max == tp->snd_una) && in rack_output()
20848 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20850 (sbavail(sb) > tp->snd_wnd) && in rack_output()
20851 (tp->snd_wnd < min((rack->r_ctl.rc_high_rwnd/2), minseg))) { in rack_output()
20852 /* Yes lets make sure to move to persist before timer-start */ in rack_output()
20853 rack_enter_persist(tp, rack, rack->r_ctl.rc_rcvtime, tp->snd_una); in rack_output()
20860 rack->r_ctl.rc_scw) { in rack_output()
20861 tcp_shared_cwnd_idle(rack->r_ctl.rc_scw, rack->r_ctl.rc_scw_index); in rack_output()
20862 rack->rack_scwnd_is_idle = 1; in rack_output()
20868 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
20869 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
20870 tp->tcp_proc_time[SND_OUT_DATA] += (crtsc - ts_val); in rack_output()
20871 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) / segsiz); in rack_output()
20875 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
20876 tp->tcp_cnt_counters[SND_LIMITED]++; in rack_output()
20877 tp->tcp_proc_time[SND_LIMITED] += (crtsc - ts_val); in rack_output()
20885 if ((rack->r_ctl.crte != NULL) && in rack_output()
20887 ((rack->rc_hw_nobuf == 1) || in rack_output()
20897 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
20898 rack->r_ctl.rc_agg_early = 0; in rack_output()
20899 rack->r_early = 0; in rack_output()
20900 rack->r_late = 0; in rack_output()
20918 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
20919 (sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
20920 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
20929 * the peer wait for the delayed-ack timer to run off in rack_output()
20942 (rack->pcm_in_progress == 0) && in rack_output()
20943 (rack->r_ctl.pcm_max_seg > 0) && in rack_output()
20944 (len >= rack->r_ctl.pcm_max_seg)) { in rack_output()
20947 rack_log_pcm(rack, 5, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
20949 rack_log_pcm(rack, 6, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
20955 tp->t_flags2 |= TF2_PLPMTU_MAXSEGSNT; in rack_output()
20957 tp->t_flags2 &= ~TF2_PLPMTU_MAXSEGSNT; in rack_output()
20979 * be snd_max-1 else its snd_max. in rack_output()
20983 rack_seq = tp->iss; in rack_output()
20985 (tp->t_flags & TF_SENTFIN)) in rack_output()
20986 rack_seq = tp->snd_max - 1; in rack_output()
20988 rack_seq = tp->snd_max; in rack_output()
20990 rack_seq = rsm->r_start; in rack_output()
20994 * established connection segments. Options for SYN-ACK segments in rack_output()
20998 if ((tp->t_flags & TF_NOOPT) == 0) { in rack_output()
21001 to.to_mss = tcp_mssopt(&inp->inp_inc); in rack_output()
21002 if (tp->t_port) in rack_output()
21003 to.to_mss -= V_tcp_udp_tunneling_overhead; in rack_output()
21013 if ((tp->t_flags & TF_FASTOPEN) && in rack_output()
21014 (tp->t_rxtshift == 0)) { in rack_output()
21015 if (tp->t_state == TCPS_SYN_RECEIVED) { in rack_output()
21018 (u_int8_t *)&tp->t_tfo_cookie.server; in rack_output()
21021 } else if (tp->t_state == TCPS_SYN_SENT) { in rack_output()
21023 tp->t_tfo_client_cookie_len; in rack_output()
21025 tp->t_tfo_cookie.client; in rack_output()
21040 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) { in rack_output()
21041 to.to_wscale = tp->request_r_scale; in rack_output()
21045 if ((tp->t_flags & TF_RCVD_TSTMP) || in rack_output()
21046 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) { in rack_output()
21049 if ((rack->r_rcvpath_rtt_up == 1) && in rack_output()
21050 (ms_cts == rack->r_ctl.last_rcv_tstmp_for_rtt)) { in rack_output()
21058 * our ack-probe. in rack_output()
21064 to.to_tsval = ts_to_use + tp->ts_offset; in rack_output()
21065 to.to_tsecr = tp->ts_recent; in rack_output()
21068 (TCPS_HAVEESTABLISHED(tp->t_state)) && in rack_output()
21069 ((ms_cts - rack->r_ctl.last_rcv_tstmp_for_rtt) > RCV_PATH_RTT_MS) && in rack_output()
21070 (tp->snd_una == tp->snd_max) && in rack_output()
21073 (rack->r_ctl.current_round != 0) && in rack_output()
21075 (rack->r_rcvpath_rtt_up == 0)) { in rack_output()
21076 rack->r_ctl.last_rcv_tstmp_for_rtt = ms_cts; in rack_output()
21077 rack->r_ctl.last_time_of_arm_rcv = cts; in rack_output()
21078 rack->r_rcvpath_rtt_up = 1; in rack_output()
21080 rack_seq--; in rack_output()
21084 if (tp->rfbuf_ts == 0 && in rack_output()
21085 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { in rack_output()
21086 tp->rfbuf_ts = ms_cts; in rack_output()
21089 if (tp->t_flags & TF_SACK_PERMIT) { in rack_output()
21092 else if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21093 tp->rcv_numsacks > 0) { in rack_output()
21095 to.to_nsacks = tp->rcv_numsacks; in rack_output()
21096 to.to_sacks = (u_char *)tp->sackblks; in rack_output()
21100 /* TCP-MD5 (RFC2385). */ in rack_output()
21101 if (tp->t_flags & TF_SIGNATURE) in rack_output()
21111 if ((tp->t_flags & TF_FASTOPEN) && wanted_cookie && in rack_output()
21115 if (tp->t_port) { in rack_output()
21121 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
21122 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
21123 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
21136 if (inp->inp_options) in rack_output()
21137 ipoptlen = inp->inp_options->m_len - in rack_output()
21150 if (len + optlen + ipoptlen > tp->t_maxseg) { in rack_output()
21157 if_hw_tsomax = tp->t_tsomax; in rack_output()
21158 if_hw_tsomaxsegcount = tp->t_tsomaxsegcount; in rack_output()
21159 if_hw_tsomaxsegsize = tp->t_tsomaxsegsize; in rack_output()
21169 max_len = (if_hw_tsomax - hdrlen - in rack_output()
21184 max_len = (tp->t_maxseg - optlen); in rack_output()
21189 len -= moff; in rack_output()
21206 if (tp->t_flags & TF_NEEDFIN) { in rack_output()
21211 if (optlen + ipoptlen >= tp->t_maxseg) { in rack_output()
21225 len = tp->t_maxseg - optlen - ipoptlen; in rack_output()
21257 if ((sbused(sb) == (tp->snd_max - tp->snd_una)) && in rack_output()
21258 ((tp->snd_max - tp->snd_una) <= segsiz)) { in rack_output()
21267 * the peer wait for the delayed-ack timer to run off in rack_output()
21279 hw_tls = tp->t_nic_ktls_xmit != 0; in rack_output()
21308 m->m_data += max_linkhdr; in rack_output()
21309 m->m_len = hdrlen; in rack_output()
21318 if (len <= MHLEN - hdrlen - max_linkhdr && !hw_tls) { in rack_output()
21328 m->m_len += len; in rack_output()
21343 m->m_next = tcp_m_copym( in rack_output()
21347 if (len <= (tp->t_maxseg - optlen)) { in rack_output()
21356 if (m->m_next == NULL) { in rack_output()
21365 if (rsm && (rsm->r_flags & RACK_TLP)) { in rack_output()
21373 tp->t_sndrexmitpack++; in rack_output()
21378 stats_voi_update_abs_u32(tp->t_stats, VOI_TCP_RETXPB, in rack_output()
21385 stats_voi_update_abs_u64(tp->t_stats, VOI_TCP_TXPB, in rack_output()
21403 if (tp->t_flags & TF_ACKNOW) in rack_output()
21422 m->m_data += max_linkhdr; in rack_output()
21423 m->m_len = hdrlen; in rack_output()
21426 m->m_pkthdr.rcvif = (struct ifnet *)0; in rack_output()
21430 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21433 ip6 = (struct ip6_hdr *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21437 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_output()
21439 th = rack->r_ctl.fsb.th; in rack_output()
21440 udp = rack->r_ctl.fsb.udp; in rack_output()
21444 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21447 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21448 udp->uh_ulen = htons(ulen); in rack_output()
21454 if (tp->t_port) { in rack_output()
21456 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21457 udp->uh_dport = tp->t_port; in rack_output()
21458 ulen = hdrlen + len - sizeof(struct ip6_hdr); in rack_output()
21459 udp->uh_ulen = htons(ulen); in rack_output()
21463 tcpip_fillheaders(inp, tp->t_port, ip6, th); in rack_output()
21469 if (tp->t_port) { in rack_output()
21471 udp->uh_sport = htons(V_tcp_udp_tunneling_port); in rack_output()
21472 udp->uh_dport = tp->t_port; in rack_output()
21473 ulen = hdrlen + len - sizeof(struct ip); in rack_output()
21474 udp->uh_ulen = htons(ulen); in rack_output()
21478 tcpip_fillheaders(inp, tp->t_port, ip, th); in rack_output()
21487 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) { in rack_output()
21491 if (TCPS_HAVERCVDSYN(tp->t_state) && in rack_output()
21492 (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT))) { in rack_output()
21494 if ((tp->t_state == TCPS_SYN_RECEIVED) && in rack_output()
21495 (tp->t_flags2 & TF2_ECN_SND_ECE)) in rack_output()
21496 tp->t_flags2 &= ~TF2_ECN_SND_ECE; in rack_output()
21499 ip6->ip6_flow &= ~htonl(IPTOS_ECN_MASK << 20); in rack_output()
21500 ip6->ip6_flow |= htonl(ect << 20); in rack_output()
21506 ip->ip_tos &= ~IPTOS_ECN_MASK; in rack_output()
21507 ip->ip_tos |= ect; in rack_output()
21511 th->th_seq = htonl(rack_seq); in rack_output()
21512 th->th_ack = htonl(tp->rcv_nxt); in rack_output()
21522 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) && in rack_output()
21526 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) && in rack_output()
21527 recwin < (long)(tp->rcv_adv - tp->rcv_nxt)) in rack_output()
21528 recwin = (long)(tp->rcv_adv - tp->rcv_nxt); in rack_output()
21537 th->th_win = htons((u_short) in rack_output()
21538 (min(sbspace(&so->so_rcv), TCP_MAXWIN))); in rack_output()
21541 recwin = roundup2(recwin, 1 << tp->rcv_scale); in rack_output()
21542 th->th_win = htons((u_short)(recwin >> tp->rcv_scale)); in rack_output()
21545 * Adjust the RXWIN0SENT flag - indicate that we have advertised a 0 in rack_output()
21552 if (th->th_win == 0) { in rack_output()
21553 tp->t_sndzerowin++; in rack_output()
21554 tp->t_flags |= TF_RXWIN0SENT; in rack_output()
21556 tp->t_flags &= ~TF_RXWIN0SENT; in rack_output()
21557 tp->snd_up = tp->snd_una; /* drag it along, its deprecated */ in rack_output()
21559 if ((ipoptlen == 0) && (rack->r_ctl.fsb.tcp_ip_hdr) && rack->r_fsb_inited) { in rack_output()
21563 memcpy(cpto, rack->r_ctl.fsb.tcp_ip_hdr, rack->r_ctl.fsb.tcp_ip_hdr_len); in rack_output()
21583 th = (struct tcphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.th - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21586 udp = (struct udphdr *)(cpto + ((uint8_t *)rack->r_ctl.fsb.udp - rack->r_ctl.fsb.tcp_ip_hdr)); in rack_output()
21590 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; in rack_output()
21596 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */ in rack_output()
21606 (u_char *)(th + 1) + (to.to_signature - opt)) != 0) { in rack_output()
21621 if (tp->t_port) { in rack_output()
21622 m->m_pkthdr.csum_flags = CSUM_UDP_IPV6; in rack_output()
21623 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21624 udp->uh_sum = in6_cksum_pseudo(ip6, ulen, IPPROTO_UDP, 0); in rack_output()
21625 th->th_sum = htons(0); in rack_output()
21628 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; in rack_output()
21629 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21630 th->th_sum = in6_cksum_pseudo(ip6, in rack_output()
21641 if (tp->t_port) { in rack_output()
21642 m->m_pkthdr.csum_flags = CSUM_UDP; in rack_output()
21643 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); in rack_output()
21644 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21645 ip->ip_dst.s_addr, htons(ulen + IPPROTO_UDP)); in rack_output()
21646 th->th_sum = htons(0); in rack_output()
21649 m->m_pkthdr.csum_flags = CSUM_TCP; in rack_output()
21650 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); in rack_output()
21651 th->th_sum = in_pseudo(ip->ip_src.s_addr, in rack_output()
21652 ip->ip_dst.s_addr, htons(sizeof(struct tcphdr) + in rack_output()
21656 KASSERT(ip->ip_v == IPVERSION, in rack_output()
21657 ("%s: IP version incorrect: %d", __func__, ip->ip_v)); in rack_output()
21670 KASSERT(len > tp->t_maxseg - optlen, in rack_output()
21672 m->m_pkthdr.csum_flags |= CSUM_TSO; in rack_output()
21673 m->m_pkthdr.tso_segsz = tp->t_maxseg - optlen; in rack_output()
21683 if ((rack->r_ctl.crte != NULL) && in rack_output()
21684 (rack->rc_hw_nobuf == 0) && in rack_output()
21689 if (tcp_bblogging_on(rack->rc_tp)) { in rack_output()
21693 log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp); in rack_output()
21694 if (rack->rack_no_prr) in rack_output()
21697 log.u_bbr.flex1 = rack->r_ctl.rc_prr_sndcnt; in rack_output()
21698 log.u_bbr.flex2 = rack->r_ctl.rc_pace_min_segs; in rack_output()
21699 log.u_bbr.flex3 = rack->r_ctl.rc_pace_max_segs; in rack_output()
21702 log.u_bbr.flex6 = rack->r_ctl.rc_agg_early; in rack_output()
21703 log.u_bbr.applimited = rack->r_ctl.rc_agg_delayed; in rack_output()
21705 log.u_bbr.cur_del_rate = rack->r_ctl.gp_bw; in rack_output()
21708 if (rsm->r_flags & RACK_RWND_COLLAPSED) { in rack_output()
21709 rack_log_collapse(rack, rsm->r_start, rsm->r_end, 0, __LINE__, 5, rsm->r_flags, rsm); in rack_output()
21711 counter_u64_add(rack_collapsed_win_rxt_bytes, (rsm->r_end - rsm->r_start)); in rack_output()
21725 log.u_bbr.pkts_out = tp->t_maxseg; in rack_output()
21727 log.u_bbr.inflight = ctf_flight_size(rack->rc_tp, rack->r_ctl.rc_sacked); in rack_output()
21728 if (rsm && (rsm->r_rtr_cnt > 0)) { in rack_output()
21733 log.u_bbr.flex5 = rsm->r_fas; in rack_output()
21734 log.u_bbr.bbr_substate = rsm->r_bas; in rack_output()
21742 log.u_bbr.bbr_substate = (uint8_t)((len + segsiz - 1)/segsiz); in rack_output()
21749 log.u_bbr.delRate = rsm->r_flags; in rack_output()
21751 log.u_bbr.delRate |= rack->r_must_retran; in rack_output()
21755 log.u_bbr.delRate = rack->r_must_retran; in rack_output()
21759 lgb = tcp_log_event(tp, th, &so->so_rcv, &so->so_snd, TCP_LOG_OUT, ERRNO_UNK, in rack_output()
21770 * m->m_pkthdr.len should have been set before cksum calcuration, in rack_output()
21781 rack->r_ctl.fsb.hoplimit = ip6->ip6_hlim = in6_selecthlim(inp, NULL); in rack_output()
21788 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); in rack_output()
21790 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) in rack_output()
21791 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
21793 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
21795 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
21801 inp->in6p_outputopts, in rack_output()
21802 &inp->inp_route6, in rack_output()
21806 if (error == EMSGSIZE && inp->inp_route6.ro_nh != NULL) in rack_output()
21807 mtu = inp->inp_route6.ro_nh->nh_mtu; in rack_output()
21815 ip->ip_len = htons(m->m_pkthdr.len); in rack_output()
21817 if (inp->inp_vflag & INP_IPV6PROTO) in rack_output()
21818 ip->ip_ttl = in6_selecthlim(inp, NULL); in rack_output()
21820 rack->r_ctl.fsb.hoplimit = ip->ip_ttl; in rack_output()
21831 if (V_path_mtu_discovery && tp->t_maxseg > V_tcp_minmss) { in rack_output()
21832 tp->t_flags2 |= TF2_PLPMTU_PMTUD; in rack_output()
21833 if (tp->t_port == 0 || len < V_tcp_minmss) { in rack_output()
21834 ip->ip_off |= htons(IP_DF); in rack_output()
21837 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD; in rack_output()
21840 if (tp->t_state == TCPS_SYN_SENT) in rack_output()
21847 inp->inp_options, in rack_output()
21851 &inp->inp_route, in rack_output()
21854 if (error == EMSGSIZE && inp->inp_route.ro_nh != NULL) in rack_output()
21855 mtu = inp->inp_route.ro_nh->nh_mtu; in rack_output()
21859 lgb->tlb_errno = error; in rack_output()
21877 rack->pcm_in_progress = 1; in rack_output()
21878 rack->pcm_needed = 0; in rack_output()
21879 rack_log_pcm(rack, 7, len, rack->r_ctl.pcm_max_seg, add_flag); in rack_output()
21882 if (rack->lt_bw_up == 0) { in rack_output()
21883 rack->r_ctl.lt_timemark = tcp_tv_to_lusec(&tv); in rack_output()
21884 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
21885 rack->lt_bw_up = 1; in rack_output()
21886 } else if (((rack_seq + len) - rack->r_ctl.lt_seq) > 0x7fffffff) { in rack_output()
21893 rack->r_ctl.lt_bw_bytes += (tp->snd_una - rack->r_ctl.lt_seq); in rack_output()
21894 rack->r_ctl.lt_seq = tp->snd_una; in rack_output()
21896 if (tmark > rack->r_ctl.lt_timemark) { in rack_output()
21897 rack->r_ctl.lt_bw_time += (tmark - rack->r_ctl.lt_timemark); in rack_output()
21898 rack->r_ctl.lt_timemark = tmark; in rack_output()
21902 rack->forced_ack = 0; /* If we send something zap the FA flag */ in rack_output()
21906 rack->rc_last_sent_tlp_past_cumack = 0; in rack_output()
21907 rack->rc_last_sent_tlp_seq_valid = 1; in rack_output()
21908 rack->r_ctl.last_sent_tlp_seq = rsm->r_start; in rack_output()
21909 rack->r_ctl.last_sent_tlp_len = rsm->r_end - rsm->r_start; in rack_output()
21911 if (rack->rc_hw_nobuf) { in rack_output()
21912 rack->rc_hw_nobuf = 0; in rack_output()
21913 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
21914 rack->r_early = 0; in rack_output()
21915 rack->r_late = 0; in rack_output()
21916 rack->r_ctl.rc_agg_early = 0; in rack_output()
21920 rack->rc_gp_saw_rec = 1; in rack_output()
21922 if (cwnd_to_use > tp->snd_ssthresh) { in rack_output()
21924 rack->rc_gp_saw_ca = 1; in rack_output()
21927 rack->rc_gp_saw_ss = 1; in rack_output()
21930 if (TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
21931 (tp->t_flags & TF_SACK_PERMIT) && in rack_output()
21932 tp->rcv_numsacks > 0) in rack_output()
21942 counter_u64_add(rack_out_size[(TCP_MSS_ACCT_ATIMER-1)], 1); in rack_output()
21947 if ((rack->rack_no_prr == 0) && in rack_output()
21950 if (rack->r_ctl.rc_prr_sndcnt >= len) in rack_output()
21951 rack->r_ctl.rc_prr_sndcnt -= len; in rack_output()
21953 rack->r_ctl.rc_prr_sndcnt = 0; in rack_output()
21959 rsm->r_flags |= RACK_TLP; in rack_output()
21962 rsm->r_flags &= ~RACK_TLP; in rack_output()
21966 (tp->snd_una == tp->snd_max)) in rack_output()
21967 rack->r_ctl.rc_tlp_rxt_last_time = cts; in rack_output()
21974 tcp_seq startseq = tp->snd_max; in rack_output()
21978 rack->r_ctl.rc_loss_count += rsm->r_end - rsm->r_start; in rack_output()
21989 rack->rc_tlp_in_progress = 0; in rack_output()
21990 rack->r_ctl.rc_tlp_cnt_out = 0; in rack_output()
21998 rack->rc_tlp_in_progress = 1; in rack_output()
21999 rack->r_ctl.rc_tlp_cnt_out++; in rack_output()
22007 if ((tp->snd_una == tp->snd_max) && (len > 0)) { in rack_output()
22013 tp->t_acktime = ticks; in rack_output()
22020 ((tp->t_flags & TF_SENTSYN) == 0)) { in rack_output()
22021 tp->snd_max++; in rack_output()
22022 tp->t_flags |= TF_SENTSYN; in rack_output()
22025 ((tp->t_flags & TF_SENTFIN) == 0)) { in rack_output()
22026 tp->snd_max++; in rack_output()
22027 tp->t_flags |= TF_SENTFIN; in rack_output()
22030 tp->snd_max += len; in rack_output()
22031 if (rack->rc_new_rnd_needed) { in rack_output()
22032 rack_new_round_starts(tp, rack, tp->snd_max); in rack_output()
22040 if (tp->t_rtttime == 0) { in rack_output()
22041 tp->t_rtttime = ticks; in rack_output()
22042 tp->t_rtseq = startseq; in rack_output()
22046 ((tp->t_flags & TF_GPUTINPROG) == 0)) in rack_output()
22057 if (rack->r_fast_output && len) { in rack_output()
22058 if (rack->r_ctl.fsb.left_to_send > len) in rack_output()
22059 rack->r_ctl.fsb.left_to_send -= len; in rack_output()
22061 rack->r_ctl.fsb.left_to_send = 0; in rack_output()
22062 if (rack->r_ctl.fsb.left_to_send < segsiz) in rack_output()
22063 rack->r_fast_output = 0; in rack_output()
22064 if (rack->r_fast_output) { in rack_output()
22065 rack->r_ctl.fsb.m = sbsndmbuf(sb, (tp->snd_max - tp->snd_una), &rack->r_ctl.fsb.off); in rack_output()
22066 rack->r_ctl.fsb.o_m_len = rack->r_ctl.fsb.m->m_len; in rack_output()
22067 rack->r_ctl.fsb.o_t_len = M_TRAILINGROOM(rack->r_ctl.fsb.m); in rack_output()
22074 ((pace_max_seg - len) > segsiz)) { in rack_output()
22082 n_len = (orig_len - len); in rack_output()
22083 orig_len -= len; in rack_output()
22084 pace_max_seg -= len; in rack_output()
22086 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22087 /* Re-lock for the next spin */ in rack_output()
22094 ((orig_len - len) > segsiz)) { in rack_output()
22102 n_len = (orig_len - len); in rack_output()
22103 orig_len -= len; in rack_output()
22105 sb_offset = tp->snd_max - tp->snd_una; in rack_output()
22106 /* Re-lock for the next spin */ in rack_output()
22114 rack->r_ctl.rc_agg_delayed = 0; in rack_output()
22115 rack->r_early = 0; in rack_output()
22116 rack->r_late = 0; in rack_output()
22117 rack->r_ctl.rc_agg_early = 0; in rack_output()
22132 tp->t_softerror = error; in rack_output()
22135 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22136 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22137 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22147 if (rack->r_ctl.crte != NULL) { in rack_output()
22148 tcp_trace_point(rack->rc_tp, TCP_TP_HWENOBUF); in rack_output()
22149 if (tcp_bblogging_on(rack->rc_tp)) in rack_output()
22152 tcp_trace_point(rack->rc_tp, TCP_TP_ENOBUF); in rack_output()
22153 pacing_delay = ((1 + rack->rc_enobuf) * HPTS_USEC_IN_MSEC); in rack_output()
22154 if (rack->rc_enobuf < 0x7f) in rack_output()
22155 rack->rc_enobuf++; in rack_output()
22158 if (rack->r_ctl.crte != NULL) { in rack_output()
22160 tcp_rl_log_enobuf(rack->r_ctl.crte); in rack_output()
22174 tp->t_flags &= ~TF_TSO; in rack_output()
22178 saved_mtu = tp->t_maxseg; in rack_output()
22179 tcp_mss_update(tp, -1, mtu, NULL, NULL); in rack_output()
22180 if (saved_mtu > tp->t_maxseg) { in rack_output()
22188 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22189 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22190 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22201 if (TCPS_HAVERCVDSYN(tp->t_state)) { in rack_output()
22202 tp->t_softerror = error; in rack_output()
22211 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22212 tp->tcp_cnt_counters[SND_OUT_FAIL]++; in rack_output()
22213 tp->tcp_proc_time[SND_OUT_FAIL] += (crtsc - ts_val); in rack_output()
22220 rack->rc_enobuf = 0; in rack_output()
22221 if (IN_FASTRECOVERY(tp->t_flags) && rsm) in rack_output()
22222 rack->r_ctl.retran_during_recovery += len; in rack_output()
22231 if (recwin > 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv)) in rack_output()
22232 tp->rcv_adv = tp->rcv_nxt + recwin; in rack_output()
22234 tp->last_ack_sent = tp->rcv_nxt; in rack_output()
22235 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK); in rack_output()
22265 rack->r_ent_rec_ns = 0; in rack_output()
22266 if (rack->r_must_retran) { in rack_output()
22268 rack->r_ctl.rc_out_at_rto -= (rsm->r_end - rsm->r_start); in rack_output()
22269 if (SEQ_GEQ(rsm->r_end, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22273 rack->r_must_retran = 0; in rack_output()
22274 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22276 } else if (SEQ_GEQ(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22281 rack->r_must_retran = 0; in rack_output()
22282 rack->r_ctl.rc_out_at_rto = 0; in rack_output()
22285 rack->r_ctl.fsb.recwin = recwin; in rack_output()
22286 if ((tp->t_flags & (TF_WASCRECOVERY|TF_WASFRECOVERY)) && in rack_output()
22287 SEQ_GT(tp->snd_max, rack->r_ctl.rc_snd_max_at_rto)) { in rack_output()
22292 tp->t_flags &= ~(TF_WASCRECOVERY|TF_WASFRECOVERY); in rack_output()
22302 rack->r_fsb_inited && in rack_output()
22303 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22304 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22305 (rack->r_must_retran == 0) && in rack_output()
22306 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22309 ((orig_len - len) >= segsiz) && in rack_output()
22316 rack->r_fast_output = 0; in rack_output()
22330 (rack->r_must_retran == 0) && in rack_output()
22331 rack->r_fsb_inited && in rack_output()
22332 TCPS_HAVEESTABLISHED(tp->t_state) && in rack_output()
22333 ((IN_RECOVERY(tp->t_flags)) == 0) && in rack_output()
22334 ((tp->t_flags & TF_NEEDFIN) == 0) && in rack_output()
22337 ((orig_len - len) >= segsiz) && in rack_output()
22343 if (rack->r_fast_output) { in rack_output()
22357 if (SEQ_GT(tp->snd_max, tp->snd_nxt)) in rack_output()
22358 tp->snd_nxt = tp->snd_max; in rack_output()
22361 crtsc = get_cyclecount() - ts_val; in rack_output()
22363 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22364 tp->tcp_cnt_counters[SND_OUT_DATA]++; in rack_output()
22365 tp->tcp_proc_time[SND_OUT_DATA] += crtsc; in rack_output()
22366 tp->tcp_cnt_counters[CNT_OF_MSS_OUT] += ((tot_len_this_send + segsiz - 1) /segsiz); in rack_output()
22369 if (tp->t_flags2 & TF2_TCP_ACCOUNTING) { in rack_output()
22370 tp->tcp_cnt_counters[SND_OUT_ACK]++; in rack_output()
22371 tp->tcp_proc_time[SND_OUT_ACK] += crtsc; in rack_output()
22386 orig_val = rack->r_ctl.rc_pace_max_segs; in rack_update_seg()
22387 rack_set_pace_segments(rack->rc_tp, rack, __LINE__, NULL); in rack_update_seg()
22388 if (orig_val != rack->r_ctl.rc_pace_max_segs) in rack_update_seg()
22401 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_mtu_change()
22402 if (rack->r_ctl.rc_pace_min_segs != ctf_fixed_maxseg(tp)) { in rack_mtu_change()
22411 rack->r_fast_output = 0; in rack_mtu_change()
22412 rack->r_ctl.rc_out_at_rto = ctf_flight_size(tp, in rack_mtu_change()
22413 rack->r_ctl.rc_sacked); in rack_mtu_change()
22414 rack->r_ctl.rc_snd_max_at_rto = tp->snd_max; in rack_mtu_change()
22415 rack->r_must_retran = 1; in rack_mtu_change()
22417 TAILQ_FOREACH(rsm, &rack->r_ctl.rc_tmap, r_tnext) { in rack_mtu_change()
22418 rsm->r_flags |= (RACK_MUST_RXT|RACK_PMTU_CHG); in rack_mtu_change()
22421 sack_filter_clear(&rack->r_ctl.rack_sf, tp->snd_una); in rack_mtu_change()
22423 tp->snd_nxt = tp->snd_max; in rack_mtu_change()
22429 if (rack->dgp_on == 1) in rack_set_dgp()
22431 if ((rack->use_fixed_rate == 1) && in rack_set_dgp()
22432 (rack->rc_always_pace == 1)) { in rack_set_dgp()
22439 if (rack->rc_always_pace == 1) { in rack_set_dgp()
22444 rack->r_ctl.pacing_method |= RACK_DGP_PACING; in rack_set_dgp()
22445 rack->rc_fillcw_apply_discount = 0; in rack_set_dgp()
22446 rack->dgp_on = 1; in rack_set_dgp()
22447 rack->rc_always_pace = 1; in rack_set_dgp()
22448 rack->rc_pace_dnd = 1; in rack_set_dgp()
22449 rack->use_fixed_rate = 0; in rack_set_dgp()
22450 if (rack->gp_ready) in rack_set_dgp()
22452 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_dgp()
22453 rack->rack_attempt_hdwr_pace = 0; in rack_set_dgp()
22455 rack->full_size_rxt = 1; in rack_set_dgp()
22456 rack->shape_rxt_to_pacing_min = 0; in rack_set_dgp()
22458 rack->r_use_cmp_ack = 1; in rack_set_dgp()
22459 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state) && in rack_set_dgp()
22460 rack->r_use_cmp_ack) in rack_set_dgp()
22461 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_dgp()
22463 rack->rack_enable_scwnd = 1; in rack_set_dgp()
22465 rack->rc_gp_dyn_mul = 1; in rack_set_dgp()
22467 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_set_dgp()
22469 rack->r_rr_config = 3; in rack_set_dgp()
22471 rack->r_ctl.rc_no_push_at_mrtt = 2; in rack_set_dgp()
22473 rack->rc_pace_to_cwnd = 1; in rack_set_dgp()
22474 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_dgp()
22475 rack->rtt_limit_mul = 0; in rack_set_dgp()
22477 rack->rack_no_prr = 1; in rack_set_dgp()
22479 rack->r_limit_scw = 1; in rack_set_dgp()
22481 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_set_dgp()
22503 * fill-cw the same settings that profile5 does in rack_set_profile()
22504 * to replace DGP. It gets then the max(dgp-rate, fillcw(discounted). in rack_set_profile()
22506 rack->rc_fillcw_apply_discount = 1; in rack_set_profile()
22509 if (rack->rc_always_pace == 1) { in rack_set_profile()
22513 rack->dgp_on = 0; in rack_set_profile()
22514 rack->rc_hybrid_mode = 0; in rack_set_profile()
22515 rack->use_fixed_rate = 0; in rack_set_profile()
22519 rack->rc_pace_to_cwnd = 1; in rack_set_profile()
22521 rack->rc_pace_to_cwnd = 0; in rack_set_profile()
22524 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_set_profile()
22525 rack->rc_always_pace = 1; in rack_set_profile()
22526 if (rack->rack_hibeta) in rack_set_profile()
22529 rack->rc_always_pace = 0; in rack_set_profile()
22532 rack->rc_rack_tmr_std_based = 1; in rack_set_profile()
22536 rack->rc_rack_use_dsack = 1; in rack_set_profile()
22539 rack->r_use_cmp_ack = 1; in rack_set_profile()
22541 rack->r_use_cmp_ack = 0; in rack_set_profile()
22543 rack->rack_no_prr = 1; in rack_set_profile()
22545 rack->rack_no_prr = 0; in rack_set_profile()
22547 rack->rc_gp_no_rec_chg = 1; in rack_set_profile()
22549 rack->rc_gp_no_rec_chg = 0; in rack_set_profile()
22550 if (rack_enable_mqueue_for_nonpaced || rack->r_use_cmp_ack) { in rack_set_profile()
22551 rack->r_mbuf_queue = 1; in rack_set_profile()
22552 if (TCPS_HAVEESTABLISHED(rack->rc_tp->t_state)) in rack_set_profile()
22553 rack->rc_tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_set_profile()
22554 rack->rc_tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22556 rack->r_mbuf_queue = 0; in rack_set_profile()
22557 rack->rc_tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_set_profile()
22560 rack->rack_enable_scwnd = 1; in rack_set_profile()
22562 rack->rack_enable_scwnd = 0; in rack_set_profile()
22565 rack->rc_gp_dyn_mul = 1; in rack_set_profile()
22567 rack->r_ctl.rack_per_of_gp_ca = rack_do_dyn_mul; in rack_set_profile()
22569 rack->r_ctl.rack_per_of_gp_ca = rack_per_of_gp_ca; in rack_set_profile()
22570 rack->rc_gp_dyn_mul = 0; in rack_set_profile()
22572 rack->r_rr_config = 0; in rack_set_profile()
22573 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_set_profile()
22574 rack->rc_pace_fill_if_rttin_range = 0; in rack_set_profile()
22575 rack->rtt_limit_mul = 0; in rack_set_profile()
22578 rack->rack_hdw_pace_ena = 1; in rack_set_profile()
22580 rack->rack_hdw_pace_ena = 0; in rack_set_profile()
22582 rack->rack_no_prr = 1; in rack_set_profile()
22584 rack->rack_no_prr = 0; in rack_set_profile()
22586 rack->r_limit_scw = 1; in rack_set_profile()
22588 rack->r_limit_scw = 0; in rack_set_profile()
22604 * No space yikes -- fail out.. in rack_add_deferred_option()
22608 dol->optname = sopt_name; in rack_add_deferred_option()
22609 dol->optval = loptval; in rack_add_deferred_option()
22610 TAILQ_INSERT_TAIL(&rack->r_ctl.opt_list, dol, next); in rack_add_deferred_option()
22625 /* Make sure no fixed rate is on */ in process_hybrid_pacing()
22626 rack->use_fixed_rate = 0; in process_hybrid_pacing()
22627 rack->r_ctl.rc_fixed_pacing_rate_rec = 0; in process_hybrid_pacing()
22628 rack->r_ctl.rc_fixed_pacing_rate_ca = 0; in process_hybrid_pacing()
22629 rack->r_ctl.rc_fixed_pacing_rate_ss = 0; in process_hybrid_pacing()
22631 sft = tcp_req_alloc_req_full(rack->rc_tp, &hybrid->req, tcp_tv_to_lusec(&tv), 0); in process_hybrid_pacing()
22633 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22635 seq = rack->rc_tp->snd_una + rack->rc_tp->t_inpcb.inp_socket->so_snd.sb_ccc; in process_hybrid_pacing()
22640 hybrid->hybrid_flags &= TCP_HYBRID_PACING_USER_MASK; in process_hybrid_pacing()
22642 seq = sft->start_seq; in process_hybrid_pacing()
22643 if ((hybrid->hybrid_flags & TCP_HYBRID_PACING_ENABLE) == 0) { in process_hybrid_pacing()
22645 if (rack->rc_hybrid_mode) { in process_hybrid_pacing()
22647 rack->rc_tp->tcp_hybrid_stop++; in process_hybrid_pacing()
22652 if (rack->dgp_on == 0) { in process_hybrid_pacing()
22660 rack->rc_tp->tcp_hybrid_error++; in process_hybrid_pacing()
22669 if (rack->rc_hybrid_mode == 0) { in process_hybrid_pacing()
22672 rack->r_ctl.pacing_method |= RACK_REG_PACING; in process_hybrid_pacing()
22673 rack->rc_hybrid_mode = 1; in process_hybrid_pacing()
22677 if (rack->r_ctl.pacing_method & RACK_DGP_PACING) { in process_hybrid_pacing()
22682 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in process_hybrid_pacing()
22686 sft->hybrid_flags = hybrid->hybrid_flags | TCP_HYBRID_PACING_WASSET; in process_hybrid_pacing()
22687 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_CSPR) in process_hybrid_pacing()
22688 sft->cspr = hybrid->cspr; in process_hybrid_pacing()
22690 sft->cspr = 0; in process_hybrid_pacing()
22691 if (hybrid->hybrid_flags & TCP_HYBRID_PACING_H_MS) in process_hybrid_pacing()
22692 sft->hint_maxseg = hybrid->hint_maxseg; in process_hybrid_pacing()
22694 sft->hint_maxseg = 0; in process_hybrid_pacing()
22695 rack->rc_tp->tcp_hybrid_start++; in process_hybrid_pacing()
22707 si->bytes_transmitted = tp->t_sndbytes; in rack_stack_information()
22708 si->bytes_retransmitted = tp->t_snd_rxt_bytes; in rack_stack_information()
22739 rack->rc_rack_tmr_std_based = 1; in rack_process_option()
22741 rack->rc_rack_tmr_std_based = 0; in rack_process_option()
22744 rack->rc_rack_use_dsack = 1; in rack_process_option()
22746 rack->rc_rack_use_dsack = 0; in rack_process_option()
22753 rack->r_ctl.pace_len_divisor = rack_default_pacing_divisor; in rack_process_option()
22756 rack->r_ctl.pace_len_divisor = RL_MIN_DIVISOR; in rack_process_option()
22758 rack->r_ctl.pace_len_divisor = optval; in rack_process_option()
22764 rack->rack_hibeta = 1; in rack_process_option()
22770 rack->r_ctl.saved_hibeta = optval; in rack_process_option()
22771 if (rack->rc_pacing_cc_set) in rack_process_option()
22773 rack->r_ctl.rc_saved_beta = optval; in rack_process_option()
22775 if (rack->rc_pacing_cc_set == 0) in rack_process_option()
22778 rack->rack_hibeta = 0; in rack_process_option()
22779 if (rack->rc_pacing_cc_set) in rack_process_option()
22788 rack->r_ctl.timer_slop = optval; in rack_process_option()
22789 if (rack->rc_tp->t_srtt) { in rack_process_option()
22794 RACK_TCPT_RANGESET(tp->t_rxtcur, RACK_REXMTVAL(tp), in rack_process_option()
22796 rack->r_ctl.timer_slop); in rack_process_option()
22801 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) { in rack_process_option()
22806 if (rack->rc_pacing_cc_set) { in rack_process_option()
22815 if (CC_ALGO(tp)->ctl_output != NULL) in rack_process_option()
22816 error = CC_ALGO(tp)->ctl_output(&tp->t_ccv, &sopt, &opt); in rack_process_option()
22824 rack->r_ctl.rc_saved_beta_ecn = optval; in rack_process_option()
22830 if (rack->gp_ready) { in rack_process_option()
22835 rack->defer_options = 1; in rack_process_option()
22837 rack->defer_options = 0; in rack_process_option()
22842 rack->r_ctl.req_measurements = optval; in rack_process_option()
22849 rack->r_use_labc_for_rec = 1; in rack_process_option()
22851 rack->r_use_labc_for_rec = 0; in rack_process_option()
22856 rack->rc_labc = optval; in rack_process_option()
22863 rack->r_up_only = 1; in rack_process_option()
22865 rack->r_up_only = 0; in rack_process_option()
22869 rack->r_ctl.fillcw_cap = loptval; in rack_process_option()
22873 if ((rack->dgp_on == 1) && in rack_process_option()
22874 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
22886 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
22888 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
22890 rack->r_ctl.bw_rate_cap = loptval; in rack_process_option()
22897 if (rack->r_ctl.side_chan_dis_mask & HYBRID_DIS_MASK) { in rack_process_option()
22905 rack->r_ctl.side_chan_dis_mask = optval; in rack_process_option()
22907 rack->r_ctl.side_chan_dis_mask = 0; in rack_process_option()
22915 if ((optval == 0) && (tp->t_flags2 & TF2_MBUF_ACKCMP)) { in rack_process_option()
22918 } else if ((optval == 1) && (rack->r_use_cmp_ack == 0)) { in rack_process_option()
22919 rack->r_use_cmp_ack = 1; in rack_process_option()
22920 rack->r_mbuf_queue = 1; in rack_process_option()
22921 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
22923 if (rack->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) in rack_process_option()
22924 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_process_option()
22929 rack->r_limit_scw = 1; in rack_process_option()
22931 rack->r_limit_scw = 0; in rack_process_option()
22939 rack->rc_pace_to_cwnd = 0; in rack_process_option()
22941 rack->rc_pace_to_cwnd = 1; in rack_process_option()
22946 rack->rc_pace_fill_if_rttin_range = 1; in rack_process_option()
22947 rack->rtt_limit_mul = optval; in rack_process_option()
22949 rack->rc_pace_fill_if_rttin_range = 0; in rack_process_option()
22950 rack->rtt_limit_mul = 0; in rack_process_option()
22956 rack->r_ctl.rc_no_push_at_mrtt = 0; in rack_process_option()
22958 rack->r_ctl.rc_no_push_at_mrtt = optval; in rack_process_option()
22965 rack->rack_enable_scwnd = 0; in rack_process_option()
22967 rack->rack_enable_scwnd = 1; in rack_process_option()
22970 /* Now do we use the LRO mbuf-queue feature */ in rack_process_option()
22972 if (optval || rack->r_use_cmp_ack) in rack_process_option()
22973 rack->r_mbuf_queue = 1; in rack_process_option()
22975 rack->r_mbuf_queue = 0; in rack_process_option()
22976 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
22977 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
22979 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
22984 rack->rack_rec_nonrxt_use_cr = 0; in rack_process_option()
22986 rack->rack_rec_nonrxt_use_cr = 1; in rack_process_option()
22991 rack->rack_no_prr = 0; in rack_process_option()
22993 rack->rack_no_prr = 1; in rack_process_option()
22995 rack->no_prr_addback = 1; in rack_process_option()
23001 rack->cspr_is_fcc = 1; in rack_process_option()
23003 rack->cspr_is_fcc = 0; in rack_process_option()
23008 rack->rc_gp_dyn_mul = 0; in rack_process_option()
23010 rack->rc_gp_dyn_mul = 1; in rack_process_option()
23016 rack->r_ctl.rack_per_of_gp_ca = optval; in rack_process_option()
23029 rack->rack_tlp_threshold_use = optval; in rack_process_option()
23034 rack->r_ctl.rc_tlp_cwnd_reduce = optval; in rack_process_option()
23043 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23048 if (rack->rc_always_pace) { in rack_process_option()
23052 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23053 rack->rc_always_pace = 1; in rack_process_option()
23054 if (rack->rack_hibeta) in rack_process_option()
23062 if (rack->rc_always_pace == 1) { in rack_process_option()
23066 if (rack->r_mbuf_queue || rack->rc_always_pace || rack->r_use_cmp_ack) in rack_process_option()
23067 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_process_option()
23069 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_process_option()
23079 rack->r_ctl.init_rate = val; in rack_process_option()
23080 if (rack->rc_always_pace) in rack_process_option()
23089 rack->rc_force_max_seg = 1; in rack_process_option()
23091 rack->rc_force_max_seg = 0; in rack_process_option()
23095 rack->r_ctl.rc_user_set_min_segs = (0x0000ffff & optval); in rack_process_option()
23101 if ((rack->dgp_on == 1) && in rack_process_option()
23102 (rack->r_ctl.pacing_method & RACK_DGP_PACING)) { in rack_process_option()
23104 * If we set a max-seg and are doing DGP then in rack_process_option()
23115 rack->r_ctl.pacing_method |= RACK_REG_PACING; in rack_process_option()
23117 rack->r_ctl.pacing_method &= ~RACK_DGP_PACING; in rack_process_option()
23120 rack->rc_user_set_max_segs = optval; in rack_process_option()
23122 rack->rc_user_set_max_segs = MAX_USER_SET_SEG; in rack_process_option()
23126 /* Set the fixed pacing rate in Bytes per second ca */ in rack_process_option()
23128 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23132 if (rack->dgp_on) { in rack_process_option()
23140 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23141 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23142 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23143 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23144 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23145 rack->use_fixed_rate = 1; in rack_process_option()
23146 if (rack->rack_hibeta) in rack_process_option()
23149 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23150 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23151 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23156 /* Set the fixed pacing rate in Bytes per second ca */ in rack_process_option()
23158 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23162 if (rack->dgp_on) { in rack_process_option()
23170 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23171 if (rack->r_ctl.rc_fixed_pacing_rate_ca == 0) in rack_process_option()
23172 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23173 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23174 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23175 rack->use_fixed_rate = 1; in rack_process_option()
23176 if (rack->rack_hibeta) in rack_process_option()
23179 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23180 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23181 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23186 /* Set the fixed pacing rate in Bytes per second ca */ in rack_process_option()
23188 if (rack->r_ctl.side_chan_dis_mask & CCSP_DIS_MASK) { in rack_process_option()
23192 if (rack->dgp_on) { in rack_process_option()
23200 rack->r_ctl.rc_fixed_pacing_rate_ca = optval; in rack_process_option()
23201 if (rack->r_ctl.rc_fixed_pacing_rate_ss == 0) in rack_process_option()
23202 rack->r_ctl.rc_fixed_pacing_rate_ss = optval; in rack_process_option()
23203 if (rack->r_ctl.rc_fixed_pacing_rate_rec == 0) in rack_process_option()
23204 rack->r_ctl.rc_fixed_pacing_rate_rec = optval; in rack_process_option()
23205 rack->use_fixed_rate = 1; in rack_process_option()
23206 if (rack->rack_hibeta) in rack_process_option()
23209 rack->r_ctl.rc_fixed_pacing_rate_ss, in rack_process_option()
23210 rack->r_ctl.rc_fixed_pacing_rate_ca, in rack_process_option()
23211 rack->r_ctl.rc_fixed_pacing_rate_rec, 0, 0, 8, in rack_process_option()
23216 rack->r_ctl.rack_per_of_gp_rec = optval; in rack_process_option()
23218 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23219 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23220 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23234 rack->r_ctl.rack_per_of_gp_ca = ca; in rack_process_option()
23236 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23237 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23238 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23252 rack->r_ctl.rack_per_of_gp_ss = ss; in rack_process_option()
23254 rack->r_ctl.rack_per_of_gp_ss, in rack_process_option()
23255 rack->r_ctl.rack_per_of_gp_ca, in rack_process_option()
23256 rack->r_ctl.rack_per_of_gp_rec, 0, 0, 1, in rack_process_option()
23262 rack->r_rr_config = optval; in rack_process_option()
23264 rack->r_rr_config = 0; in rack_process_option()
23268 rack->rc_pace_dnd = 1; in rack_process_option()
23270 rack->rc_pace_dnd = 0; in rack_process_option()
23275 if (rack->r_rack_hw_rate_caps == 0) in rack_process_option()
23276 rack->r_rack_hw_rate_caps = 1; in rack_process_option()
23280 rack->r_rack_hw_rate_caps = 0; in rack_process_option()
23287 rack->r_ctl.rack_per_upper_bound_ca = val; in rack_process_option()
23289 rack->r_ctl.rack_per_upper_bound_ss = val; in rack_process_option()
23294 rack->r_ctl.gp_rnd_thresh = optval & 0x0ff; in rack_process_option()
23296 rack->r_ctl.gate_to_fs = 1; in rack_process_option()
23298 rack->r_ctl.gate_to_fs = 0; in rack_process_option()
23301 rack->r_ctl.use_gp_not_last = 1; in rack_process_option()
23303 rack->r_ctl.use_gp_not_last = 0; in rack_process_option()
23310 rack->r_ctl.gp_gain_req = v; in rack_process_option()
23314 rack->rc_initial_ss_comp = 1; in rack_process_option()
23315 rack->r_ctl.gp_rnd_thresh = 0; in rack_process_option()
23320 rack->r_ctl.rc_split_limit = optval; in rack_process_option()
23325 if (rack->rack_hdrw_pacing == 0) { in rack_process_option()
23326 rack->rack_hdw_pace_ena = 1; in rack_process_option()
23327 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23331 rack->rack_hdw_pace_ena = 0; in rack_process_option()
23333 if (rack->r_ctl.crte != NULL) { in rack_process_option()
23334 rack->rack_hdrw_pacing = 0; in rack_process_option()
23335 rack->rack_attempt_hdwr_pace = 0; in rack_process_option()
23336 tcp_rel_pacing_rate(rack->r_ctl.crte, tp); in rack_process_option()
23337 rack->r_ctl.crte = NULL; in rack_process_option()
23346 rack->r_ctl.rc_prr_sendalot = optval; in rack_process_option()
23349 /* Minimum time between rack t-o's in ms */ in rack_process_option()
23351 rack->r_ctl.rc_min_to = optval; in rack_process_option()
23356 rack->r_ctl.rc_early_recovery_segs = optval; in rack_process_option()
23361 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_process_option()
23363 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_process_option()
23365 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_process_option()
23367 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_process_option()
23375 rack->r_ctl.rc_reorder_shift = optval; in rack_process_option()
23382 rack->r_ctl.rc_reorder_fade = optval; in rack_process_option()
23388 rack->r_ctl.rc_tlp_threshold = optval; in rack_process_option()
23395 rack->use_rack_rr = 1; in rack_process_option()
23397 rack->use_rack_rr = 0; in rack_process_option()
23400 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_process_option()
23402 rack->r_ctl.rc_pkt_delay = optval; in rack_process_option()
23407 tp->t_delayed_ack = 0; in rack_process_option()
23409 tp->t_delayed_ack = 1; in rack_process_option()
23410 if (tp->t_flags & TF_DELACK) { in rack_process_option()
23411 tp->t_flags &= ~TF_DELACK; in rack_process_option()
23412 tp->t_flags |= TF_ACKNOW; in rack_process_option()
23426 rack->r_ctl.rc_rate_sample_method = optval; in rack_process_option()
23431 rack->r_use_hpts_min = 1; in rack_process_option()
23433 * Must be between 2 - 80% to be a reduction else in rack_process_option()
23437 rack->r_ctl.max_reduction = optval; in rack_process_option()
23440 rack->r_use_hpts_min = 0; in rack_process_option()
23445 rack->rc_gp_no_rec_chg = 1; in rack_process_option()
23447 rack->rc_gp_no_rec_chg = 0; in rack_process_option()
23452 rack->rc_skip_timely = 1; in rack_process_option()
23453 rack->r_ctl.rack_per_of_gp_rec = 90; in rack_process_option()
23454 rack->r_ctl.rack_per_of_gp_ca = 100; in rack_process_option()
23455 rack->r_ctl.rack_per_of_gp_ss = 250; in rack_process_option()
23457 rack->rc_skip_timely = 0; in rack_process_option()
23462 rack->use_lesser_lt_bw = 0; in rack_process_option()
23463 rack->dis_lt_bw = 1; in rack_process_option()
23465 rack->use_lesser_lt_bw = 1; in rack_process_option()
23466 rack->dis_lt_bw = 0; in rack_process_option()
23468 rack->use_lesser_lt_bw = 0; in rack_process_option()
23469 rack->dis_lt_bw = 0; in rack_process_option()
23475 rack->rc_allow_data_af_clo = 1; in rack_process_option()
23477 rack->rc_allow_data_af_clo = 0; in rack_process_option()
23492 * apply a read-lock to the parent (we are already in rack_inherit()
23503 if (par->t_fb != tp->t_fb) { in rack_inherit()
23509 dest = (struct tcp_rack *)tp->t_fb_ptr; in rack_inherit()
23510 src = (struct tcp_rack *)par->t_fb_ptr; in rack_inherit()
23516 /* Now copy out anything we wish to inherit i.e. things in socket-options */ in rack_inherit()
23518 if ((src->dgp_on) && (dest->dgp_on == 0)) { in rack_inherit()
23524 if (dest->full_size_rxt != src->full_size_rxt) { in rack_inherit()
23525 dest->full_size_rxt = src->full_size_rxt; in rack_inherit()
23528 if (dest->shape_rxt_to_pacing_min != src->shape_rxt_to_pacing_min) { in rack_inherit()
23529 dest->shape_rxt_to_pacing_min = src->shape_rxt_to_pacing_min; in rack_inherit()
23533 if (dest->rc_rack_tmr_std_based != src->rc_rack_tmr_std_based) { in rack_inherit()
23534 dest->rc_rack_tmr_std_based = src->rc_rack_tmr_std_based; in rack_inherit()
23537 if (dest->rc_rack_use_dsack != src->rc_rack_use_dsack) { in rack_inherit()
23538 dest->rc_rack_use_dsack = src->rc_rack_use_dsack; in rack_inherit()
23542 if (dest->r_ctl.pace_len_divisor != src->r_ctl.pace_len_divisor) { in rack_inherit()
23543 dest->r_ctl.pace_len_divisor = src->r_ctl.pace_len_divisor; in rack_inherit()
23547 if (src->rack_hibeta != dest->rack_hibeta) { in rack_inherit()
23549 if (src->rack_hibeta) { in rack_inherit()
23550 dest->r_ctl.rc_saved_beta = src->r_ctl.rc_saved_beta; in rack_inherit()
23551 dest->rack_hibeta = 1; in rack_inherit()
23553 dest->rack_hibeta = 0; in rack_inherit()
23557 if (dest->r_ctl.timer_slop != src->r_ctl.timer_slop) { in rack_inherit()
23558 dest->r_ctl.timer_slop = src->r_ctl.timer_slop; in rack_inherit()
23562 if (dest->r_ctl.rc_saved_beta_ecn != src->r_ctl.rc_saved_beta_ecn) { in rack_inherit()
23563 dest->r_ctl.rc_saved_beta_ecn = src->r_ctl.rc_saved_beta_ecn; in rack_inherit()
23568 if (dest->r_ctl.req_measurements != src->r_ctl.req_measurements) { in rack_inherit()
23569 dest->r_ctl.req_measurements = src->r_ctl.req_measurements; in rack_inherit()
23573 if (dest->r_up_only != src->r_up_only) { in rack_inherit()
23574 dest->r_up_only = src->r_up_only; in rack_inherit()
23578 if (dest->r_ctl.fillcw_cap != src->r_ctl.fillcw_cap) { in rack_inherit()
23579 dest->r_ctl.fillcw_cap = src->r_ctl.fillcw_cap; in rack_inherit()
23583 if (dest->r_ctl.bw_rate_cap != src->r_ctl.bw_rate_cap) { in rack_inherit()
23584 dest->r_ctl.bw_rate_cap = src->r_ctl.bw_rate_cap; in rack_inherit()
23589 if (dest->r_ctl.side_chan_dis_mask != src->r_ctl.side_chan_dis_mask) { in rack_inherit()
23590 dest->r_ctl.side_chan_dis_mask = src->r_ctl.side_chan_dis_mask; in rack_inherit()
23594 if (dest->r_limit_scw != src->r_limit_scw) { in rack_inherit()
23595 dest->r_limit_scw = src->r_limit_scw; in rack_inherit()
23599 if (dest->rc_pace_to_cwnd != src->rc_pace_to_cwnd) { in rack_inherit()
23600 dest->rc_pace_to_cwnd = src->rc_pace_to_cwnd; in rack_inherit()
23603 if (dest->rc_pace_fill_if_rttin_range != src->rc_pace_fill_if_rttin_range) { in rack_inherit()
23604 dest->rc_pace_fill_if_rttin_range = src->rc_pace_fill_if_rttin_range; in rack_inherit()
23607 if (dest->rtt_limit_mul != src->rtt_limit_mul) { in rack_inherit()
23608 dest->rtt_limit_mul = src->rtt_limit_mul; in rack_inherit()
23612 if (dest->r_ctl.rc_no_push_at_mrtt != src->r_ctl.rc_no_push_at_mrtt) { in rack_inherit()
23613 dest->r_ctl.rc_no_push_at_mrtt = src->r_ctl.rc_no_push_at_mrtt; in rack_inherit()
23617 if (dest->rack_enable_scwnd != src->rack_enable_scwnd) { in rack_inherit()
23618 dest->rack_enable_scwnd = src->rack_enable_scwnd; in rack_inherit()
23622 if (dest->r_use_cmp_ack != src->r_use_cmp_ack) { in rack_inherit()
23623 dest->r_use_cmp_ack = src->r_use_cmp_ack; in rack_inherit()
23627 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23628 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23632 if (dest->r_mbuf_queue != src->r_mbuf_queue) { in rack_inherit()
23633 dest->r_mbuf_queue = src->r_mbuf_queue; in rack_inherit()
23636 if (dest->r_mbuf_queue || dest->rc_always_pace || dest->r_use_cmp_ack) { in rack_inherit()
23637 tp->t_flags2 |= TF2_SUPPORTS_MBUFQ; in rack_inherit()
23639 tp->t_flags2 &= ~TF2_SUPPORTS_MBUFQ; in rack_inherit()
23641 if (dest->r_use_cmp_ack && TCPS_HAVEESTABLISHED(tp->t_state)) { in rack_inherit()
23642 tp->t_flags2 |= TF2_MBUF_ACKCMP; in rack_inherit()
23645 if (dest->rack_rec_nonrxt_use_cr != src->rack_rec_nonrxt_use_cr) { in rack_inherit()
23646 dest->rack_rec_nonrxt_use_cr = src->rack_rec_nonrxt_use_cr; in rack_inherit()
23650 if (dest->rack_no_prr != src->rack_no_prr) { in rack_inherit()
23651 dest->rack_no_prr = src->rack_no_prr; in rack_inherit()
23654 if (dest->no_prr_addback != src->no_prr_addback) { in rack_inherit()
23655 dest->no_prr_addback = src->no_prr_addback; in rack_inherit()
23659 if (dest->cspr_is_fcc != src->cspr_is_fcc) { in rack_inherit()
23660 dest->cspr_is_fcc = src->cspr_is_fcc; in rack_inherit()
23664 if (dest->rc_gp_dyn_mul != src->rc_gp_dyn_mul) { in rack_inherit()
23665 dest->rc_gp_dyn_mul = src->rc_gp_dyn_mul; in rack_inherit()
23668 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23669 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23673 if (dest->rack_tlp_threshold_use != src->rack_tlp_threshold_use) { in rack_inherit()
23674 dest->rack_tlp_threshold_use = src->rack_tlp_threshold_use; in rack_inherit()
23679 if (dest->r_ctl.init_rate != src->r_ctl.init_rate) { in rack_inherit()
23680 dest->r_ctl.init_rate = src->r_ctl.init_rate; in rack_inherit()
23684 if (dest->rc_force_max_seg != src->rc_force_max_seg) { in rack_inherit()
23685 dest->rc_force_max_seg = src->rc_force_max_seg; in rack_inherit()
23689 if (dest->r_ctl.rc_user_set_min_segs != src->r_ctl.rc_user_set_min_segs) { in rack_inherit()
23690 dest->r_ctl.rc_user_set_min_segs = src->r_ctl.rc_user_set_min_segs; in rack_inherit()
23695 if (dest->r_ctl.rc_fixed_pacing_rate_ca != src->r_ctl.rc_fixed_pacing_rate_ca) { in rack_inherit()
23696 dest->r_ctl.rc_fixed_pacing_rate_ca = src->r_ctl.rc_fixed_pacing_rate_ca; in rack_inherit()
23699 if (dest->r_ctl.rc_fixed_pacing_rate_ss != src->r_ctl.rc_fixed_pacing_rate_ss) { in rack_inherit()
23700 dest->r_ctl.rc_fixed_pacing_rate_ss = src->r_ctl.rc_fixed_pacing_rate_ss; in rack_inherit()
23703 if (dest->r_ctl.rc_fixed_pacing_rate_rec != src->r_ctl.rc_fixed_pacing_rate_rec) { in rack_inherit()
23704 dest->r_ctl.rc_fixed_pacing_rate_rec = src->r_ctl.rc_fixed_pacing_rate_rec; in rack_inherit()
23708 if (dest->r_ctl.rack_per_of_gp_rec != src->r_ctl.rack_per_of_gp_rec) { in rack_inherit()
23709 dest->r_ctl.rack_per_of_gp_rec = src->r_ctl.rack_per_of_gp_rec; in rack_inherit()
23712 if (dest->r_ctl.rack_per_of_gp_ca != src->r_ctl.rack_per_of_gp_ca) { in rack_inherit()
23713 dest->r_ctl.rack_per_of_gp_ca = src->r_ctl.rack_per_of_gp_ca; in rack_inherit()
23717 if (dest->r_ctl.rack_per_of_gp_ss != src->r_ctl.rack_per_of_gp_ss) { in rack_inherit()
23718 dest->r_ctl.rack_per_of_gp_ss = src->r_ctl.rack_per_of_gp_ss; in rack_inherit()
23722 if (dest->r_rr_config != src->r_rr_config) { in rack_inherit()
23723 dest->r_rr_config = src->r_rr_config; in rack_inherit()
23727 if (dest->rc_pace_dnd != src->rc_pace_dnd) { in rack_inherit()
23728 dest->rc_pace_dnd = src->rc_pace_dnd; in rack_inherit()
23732 if (dest->r_rack_hw_rate_caps != src->r_rack_hw_rate_caps) { in rack_inherit()
23733 dest->r_rack_hw_rate_caps = src->r_rack_hw_rate_caps; in rack_inherit()
23737 if (dest->r_ctl.rack_per_upper_bound_ca != src->r_ctl.rack_per_upper_bound_ca) { in rack_inherit()
23738 dest->r_ctl.rack_per_upper_bound_ca = src->r_ctl.rack_per_upper_bound_ca; in rack_inherit()
23741 if (dest->r_ctl.rack_per_upper_bound_ss != src->r_ctl.rack_per_upper_bound_ss) { in rack_inherit()
23742 dest->r_ctl.rack_per_upper_bound_ss = src->r_ctl.rack_per_upper_bound_ss; in rack_inherit()
23746 if (dest->r_ctl.gp_rnd_thresh != src->r_ctl.gp_rnd_thresh) { in rack_inherit()
23747 dest->r_ctl.gp_rnd_thresh = src->r_ctl.gp_rnd_thresh; in rack_inherit()
23750 if (dest->r_ctl.gate_to_fs != src->r_ctl.gate_to_fs) { in rack_inherit()
23751 dest->r_ctl.gate_to_fs = src->r_ctl.gate_to_fs; in rack_inherit()
23754 if (dest->r_ctl.use_gp_not_last != src->r_ctl.use_gp_not_last) { in rack_inherit()
23755 dest->r_ctl.use_gp_not_last = src->r_ctl.use_gp_not_last; in rack_inherit()
23758 if (dest->r_ctl.gp_gain_req != src->r_ctl.gp_gain_req) { in rack_inherit()
23759 dest->r_ctl.gp_gain_req = src->r_ctl.gp_gain_req; in rack_inherit()
23763 if (dest->rack_hdw_pace_ena != src->rack_hdw_pace_ena) { in rack_inherit()
23764 dest->rack_hdw_pace_ena = src->rack_hdw_pace_ena; in rack_inherit()
23767 if (dest->rack_attempt_hdwr_pace != src->rack_attempt_hdwr_pace) { in rack_inherit()
23768 dest->rack_attempt_hdwr_pace = src->rack_attempt_hdwr_pace; in rack_inherit()
23772 if (dest->r_ctl.rc_prr_sendalot != src->r_ctl.rc_prr_sendalot) { in rack_inherit()
23773 dest->r_ctl.rc_prr_sendalot = src->r_ctl.rc_prr_sendalot; in rack_inherit()
23777 if (dest->r_ctl.rc_min_to != src->r_ctl.rc_min_to) { in rack_inherit()
23778 dest->r_ctl.rc_min_to = src->r_ctl.rc_min_to; in rack_inherit()
23782 if (dest->r_ctl.rc_early_recovery_segs != src->r_ctl.rc_early_recovery_segs) { in rack_inherit()
23783 dest->r_ctl.rc_early_recovery_segs = src->r_ctl.rc_early_recovery_segs; in rack_inherit()
23787 if (par->t_ccv.flags != tp->t_ccv.flags) { in rack_inherit()
23789 if (par->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_inherit()
23790 tp->t_ccv.flags |= CCF_HYSTART_ALLOWED; in rack_inherit()
23792 tp->t_ccv.flags |= CCF_HYSTART_CAN_SH_CWND; in rack_inherit()
23794 tp->t_ccv.flags |= CCF_HYSTART_CONS_SSTH; in rack_inherit()
23796 tp->t_ccv.flags &= ~(CCF_HYSTART_ALLOWED|CCF_HYSTART_CAN_SH_CWND|CCF_HYSTART_CONS_SSTH); in rack_inherit()
23800 if (dest->r_ctl.rc_reorder_shift != src->r_ctl.rc_reorder_shift) { in rack_inherit()
23801 dest->r_ctl.rc_reorder_shift = src->r_ctl.rc_reorder_shift; in rack_inherit()
23805 if (dest->r_ctl.rc_reorder_fade != src->r_ctl.rc_reorder_fade) { in rack_inherit()
23806 dest->r_ctl.rc_reorder_fade = src->r_ctl.rc_reorder_fade; in rack_inherit()
23810 if (dest->r_ctl.rc_tlp_threshold != src->r_ctl.rc_tlp_threshold) { in rack_inherit()
23811 dest->r_ctl.rc_tlp_threshold = src->r_ctl.rc_tlp_threshold; in rack_inherit()
23815 if (dest->use_rack_rr != src->use_rack_rr) { in rack_inherit()
23816 dest->use_rack_rr = src->use_rack_rr; in rack_inherit()
23820 if (dest->r_ctl.rc_pkt_delay != src->r_ctl.rc_pkt_delay) { in rack_inherit()
23821 dest->r_ctl.rc_pkt_delay = src->r_ctl.rc_pkt_delay; in rack_inherit()
23826 if (dest->r_ctl.rc_rate_sample_method != src->r_ctl.rc_rate_sample_method) { in rack_inherit()
23827 dest->r_ctl.rc_rate_sample_method = src->r_ctl.rc_rate_sample_method; in rack_inherit()
23831 if (dest->r_use_hpts_min != src->r_use_hpts_min) { in rack_inherit()
23832 dest->r_use_hpts_min = src->r_use_hpts_min; in rack_inherit()
23835 if (dest->r_ctl.max_reduction != src->r_ctl.max_reduction) { in rack_inherit()
23836 dest->r_ctl.max_reduction = src->r_ctl.max_reduction; in rack_inherit()
23840 if (dest->rc_gp_no_rec_chg != src->rc_gp_no_rec_chg) { in rack_inherit()
23841 dest->rc_gp_no_rec_chg = src->rc_gp_no_rec_chg; in rack_inherit()
23844 if (dest->rc_skip_timely != src->rc_skip_timely) { in rack_inherit()
23845 dest->rc_skip_timely = src->rc_skip_timely; in rack_inherit()
23849 if (dest->rc_allow_data_af_clo != src->rc_allow_data_af_clo) { in rack_inherit()
23850 dest->rc_allow_data_af_clo = src->rc_allow_data_af_clo; in rack_inherit()
23854 if (src->use_lesser_lt_bw != dest->use_lesser_lt_bw) { in rack_inherit()
23855 dest->use_lesser_lt_bw = src->use_lesser_lt_bw; in rack_inherit()
23858 if (dest->dis_lt_bw != src->dis_lt_bw) { in rack_inherit()
23859 dest->dis_lt_bw = src->dis_lt_bw; in rack_inherit()
23872 TAILQ_FOREACH_SAFE(dol, &rack->r_ctl.opt_list, next, sdol) { in rack_apply_deferred_options()
23873 TAILQ_REMOVE(&rack->r_ctl.opt_list, dol, next); in rack_apply_deferred_options()
23875 s_optval = (uint32_t)dol->optval; in rack_apply_deferred_options()
23876 (void)rack_process_option(rack->rc_tp, rack, dol->optname, s_optval, dol->optval, NULL); in rack_apply_deferred_options()
23887 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_hw_tls_change()
23889 rack->r_ctl.fsb.hw_tls = 1; in rack_hw_tls_change()
23891 rack->r_ctl.fsb.hw_tls = 0; in rack_hw_tls_change()
23909 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_wake_check()
23910 if (rack->r_ctl.rc_hpts_flags) { in rack_wake_check()
23912 if ((rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == PACE_PKT_OUTPUT){ in rack_wake_check()
23916 if (TSTMP_GEQ(cts, rack->r_ctl.rc_last_output_to)) in rack_wake_check()
23918 } else if ((rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) != 0) { in rack_wake_check()
23922 if (TSTMP_GEQ(cts, rack->r_ctl.rc_timer_exp)) in rack_wake_check()
23956 * socket option arguments. When it re-acquires the lock after the copy, it
23972 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_set_sockopt()
23978 ip = (struct ip *)rack->r_ctl.fsb.tcp_ip_hdr; in rack_set_sockopt()
23981 switch (sopt->sopt_level) { in rack_set_sockopt()
23984 MPASS(inp->inp_vflag & INP_IPV6PROTO); in rack_set_sockopt()
23985 switch (sopt->sopt_name) { in rack_set_sockopt()
23995 switch (sopt->sopt_name) { in rack_set_sockopt()
24000 ip->ip_tos = rack->rc_inp->inp_ip_tos; in rack_set_sockopt()
24006 ip->ip_ttl = rack->rc_inp->inp_ip_ttl; in rack_set_sockopt()
24014 switch (sopt->sopt_name) { in rack_set_sockopt()
24015 case SO_PEERPRIO: /* SC-URL:bs */ in rack_set_sockopt()
24017 if (inp->inp_socket) { in rack_set_sockopt()
24018 rack->client_bufferlvl = inp->inp_socket->so_peerprio; in rack_set_sockopt()
24026 switch (sopt->sopt_name) { in rack_set_sockopt()
24043 case TCP_PACING_RATE_CAP: /* URL:cap -- used by side-channel */ in rack_set_sockopt()
24044 case TCP_HDWR_UP_ONLY: /* URL:uponly -- hardware pacing boolean */ in rack_set_sockopt()
24104 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_set_sockopt()
24105 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) { in rack_set_sockopt()
24108 * We truncate it down to 32 bits for the socket-option trace this in rack_set_sockopt()
24112 } else if (sopt->sopt_name == TCP_HYBRID_PACING) { in rack_set_sockopt()
24122 if (tp->t_fb != &__tcp_rack) { in rack_set_sockopt()
24126 if (rack->defer_options && (rack->gp_ready == 0) && in rack_set_sockopt()
24127 (sopt->sopt_name != TCP_DEFER_OPTIONS) && in rack_set_sockopt()
24128 (sopt->sopt_name != TCP_HYBRID_PACING) && in rack_set_sockopt()
24129 (sopt->sopt_name != TCP_RACK_SET_RXT_OPTIONS) && in rack_set_sockopt()
24130 (sopt->sopt_name != TCP_RACK_PACING_BETA_ECN) && in rack_set_sockopt()
24131 (sopt->sopt_name != TCP_RACK_MEASURE_CNT)) { in rack_set_sockopt()
24133 if (rack_add_deferred_option(rack, sopt->sopt_name, loptval)) { in rack_set_sockopt()
24142 error = rack_process_option(tp, rack, sopt->sopt_name, optval, loptval, &hybrid); in rack_set_sockopt()
24154 ti->tcpi_state = tp->t_state; in rack_fill_info()
24155 if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP)) in rack_fill_info()
24156 ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; in rack_fill_info()
24157 if (tp->t_flags & TF_SACK_PERMIT) in rack_fill_info()
24158 ti->tcpi_options |= TCPI_OPT_SACK; in rack_fill_info()
24159 if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) { in rack_fill_info()
24160 ti->tcpi_options |= TCPI_OPT_WSCALE; in rack_fill_info()
24161 ti->tcpi_snd_wscale = tp->snd_scale; in rack_fill_info()
24162 ti->tcpi_rcv_wscale = tp->rcv_scale; in rack_fill_info()
24164 if (tp->t_flags2 & (TF2_ECN_PERMIT | TF2_ACE_PERMIT)) in rack_fill_info()
24165 ti->tcpi_options |= TCPI_OPT_ECN; in rack_fill_info()
24166 if (tp->t_flags & TF_FASTOPEN) in rack_fill_info()
24167 ti->tcpi_options |= TCPI_OPT_TFO; in rack_fill_info()
24169 ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick; in rack_fill_info()
24171 ti->tcpi_rtt = tp->t_srtt; in rack_fill_info()
24172 ti->tcpi_rttvar = tp->t_rttvar; in rack_fill_info()
24173 ti->tcpi_rto = tp->t_rxtcur; in rack_fill_info()
24174 ti->tcpi_snd_ssthresh = tp->snd_ssthresh; in rack_fill_info()
24175 ti->tcpi_snd_cwnd = tp->snd_cwnd; in rack_fill_info()
24177 * FreeBSD-specific extension fields for tcp_info. in rack_fill_info()
24179 ti->tcpi_rcv_space = tp->rcv_wnd; in rack_fill_info()
24180 ti->tcpi_rcv_nxt = tp->rcv_nxt; in rack_fill_info()
24181 ti->tcpi_snd_wnd = tp->snd_wnd; in rack_fill_info()
24182 ti->tcpi_snd_bwnd = 0; /* Unused, kept for compat. */ in rack_fill_info()
24183 ti->tcpi_snd_nxt = tp->snd_nxt; in rack_fill_info()
24184 ti->tcpi_snd_mss = tp->t_maxseg; in rack_fill_info()
24185 ti->tcpi_rcv_mss = tp->t_maxseg; in rack_fill_info()
24186 ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack; in rack_fill_info()
24187 ti->tcpi_rcv_ooopack = tp->t_rcvoopack; in rack_fill_info()
24188 ti->tcpi_snd_zerowin = tp->t_sndzerowin; in rack_fill_info()
24189 ti->tcpi_total_tlp = tp->t_sndtlppack; in rack_fill_info()
24190 ti->tcpi_total_tlp_bytes = tp->t_sndtlpbyte; in rack_fill_info()
24191 ti->tcpi_rttmin = tp->t_rttlow; in rack_fill_info()
24193 memcpy(&ti->tcpi_rxsyninfo, &tp->t_rxsyninfo, sizeof(struct tcpsyninfo)); in rack_fill_info()
24196 if (tp->t_flags & TF_TOE) { in rack_fill_info()
24197 ti->tcpi_options |= TCPI_OPT_TOE; in rack_fill_info()
24218 rack = (struct tcp_rack *)tp->t_fb_ptr; in rack_get_sockopt()
24223 switch (sopt->sopt_name) { in rack_get_sockopt()
24238 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24240 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24241 optval = rack->r_ctl.rc_saved_beta; in rack_get_sockopt()
24248 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24249 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta; in rack_get_sockopt()
24262 if (strcmp(tp->t_cc->name, CCALGONAME_NEWRENO) != 0) in rack_get_sockopt()
24264 else if (rack->rc_pacing_cc_set == 0) in rack_get_sockopt()
24265 optval = rack->r_ctl.rc_saved_beta_ecn; in rack_get_sockopt()
24272 if (tp->t_ccv.cc_data) in rack_get_sockopt()
24273 optval = ((struct newreno *)tp->t_ccv.cc_data)->beta_ecn; in rack_get_sockopt()
24280 if (rack->rc_rack_tmr_std_based) { in rack_get_sockopt()
24283 if (rack->rc_rack_use_dsack) { in rack_get_sockopt()
24289 if (tp->t_ccv.flags & CCF_HYSTART_ALLOWED) { in rack_get_sockopt()
24291 if (tp->t_ccv.flags & CCF_HYSTART_CAN_SH_CWND) in rack_get_sockopt()
24293 if (tp->t_ccv.flags & CCF_HYSTART_CONS_SSTH) in rack_get_sockopt()
24304 optval = rack->rack_hibeta; in rack_get_sockopt()
24307 optval = rack->defer_options; in rack_get_sockopt()
24310 optval = rack->r_ctl.req_measurements; in rack_get_sockopt()
24313 optval = rack->r_use_labc_for_rec; in rack_get_sockopt()
24316 optval = rack->rc_labc; in rack_get_sockopt()
24319 optval= rack->r_up_only; in rack_get_sockopt()
24322 loptval = rack->r_ctl.fillcw_cap; in rack_get_sockopt()
24325 loptval = rack->r_ctl.bw_rate_cap; in rack_get_sockopt()
24332 optval = rack->r_ctl.side_chan_dis_mask; in rack_get_sockopt()
24339 optval = rack->r_use_cmp_ack; in rack_get_sockopt()
24342 optval = rack->rc_pace_to_cwnd; in rack_get_sockopt()
24345 optval = rack->r_ctl.rc_no_push_at_mrtt; in rack_get_sockopt()
24348 optval = rack->rack_enable_scwnd; in rack_get_sockopt()
24351 optval = rack->rack_rec_nonrxt_use_cr; in rack_get_sockopt()
24354 if (rack->rack_no_prr == 1) in rack_get_sockopt()
24356 else if (rack->no_prr_addback == 1) in rack_get_sockopt()
24362 if (rack->dis_lt_bw) { in rack_get_sockopt()
24365 } else if (rack->use_lesser_lt_bw) { in rack_get_sockopt()
24377 /* Now do we use the LRO mbuf-queue feature */ in rack_get_sockopt()
24378 optval = rack->r_mbuf_queue; in rack_get_sockopt()
24381 optval = rack->cspr_is_fcc; in rack_get_sockopt()
24384 optval = rack->rc_gp_dyn_mul; in rack_get_sockopt()
24391 optval = rack->r_ctl.rc_tlp_cwnd_reduce; in rack_get_sockopt()
24394 val = rack->r_ctl.init_rate; in rack_get_sockopt()
24401 optval = rack->rc_force_max_seg; in rack_get_sockopt()
24404 optval = rack->r_ctl.rc_user_set_min_segs; in rack_get_sockopt()
24408 optval = rack->rc_user_set_max_segs; in rack_get_sockopt()
24412 optval = rack->rc_always_pace; in rack_get_sockopt()
24416 optval = rack->r_ctl.rc_prr_sendalot; in rack_get_sockopt()
24419 /* Minimum time between rack t-o's in ms */ in rack_get_sockopt()
24420 optval = rack->r_ctl.rc_min_to; in rack_get_sockopt()
24423 optval = rack->r_ctl.rc_split_limit; in rack_get_sockopt()
24427 optval = rack->r_ctl.rc_early_recovery_segs; in rack_get_sockopt()
24431 optval = rack->r_ctl.rc_reorder_shift; in rack_get_sockopt()
24434 if (rack->r_ctl.gp_rnd_thresh) { in rack_get_sockopt()
24437 v = rack->r_ctl.gp_gain_req; in rack_get_sockopt()
24439 optval = v | (rack->r_ctl.gp_rnd_thresh & 0xff); in rack_get_sockopt()
24440 if (rack->r_ctl.gate_to_fs == 1) in rack_get_sockopt()
24447 optval = rack->r_ctl.rc_reorder_fade; in rack_get_sockopt()
24451 optval = rack->use_rack_rr; in rack_get_sockopt()
24454 optval = rack->r_rr_config; in rack_get_sockopt()
24457 optval = rack->r_rack_hw_rate_caps; in rack_get_sockopt()
24460 optval = rack->rack_hdw_pace_ena; in rack_get_sockopt()
24464 optval = rack->r_ctl.rc_tlp_threshold; in rack_get_sockopt()
24467 /* RACK added ms i.e. rack-rtt + reord + N */ in rack_get_sockopt()
24468 optval = rack->r_ctl.rc_pkt_delay; in rack_get_sockopt()
24471 optval = rack->rack_tlp_threshold_use; in rack_get_sockopt()
24474 optval = rack->rc_pace_dnd; in rack_get_sockopt()
24477 optval = rack->r_ctl.rc_fixed_pacing_rate_ca; in rack_get_sockopt()
24480 optval = rack->r_ctl.rc_fixed_pacing_rate_ss; in rack_get_sockopt()
24483 optval = rack->r_ctl.rc_fixed_pacing_rate_rec; in rack_get_sockopt()
24486 optval = rack->r_ctl.rack_per_upper_bound_ss; in rack_get_sockopt()
24488 optval |= rack->r_ctl.rack_per_upper_bound_ca; in rack_get_sockopt()
24491 optval = rack->r_ctl.rack_per_of_gp_ca; in rack_get_sockopt()
24494 optval = rack->r_ctl.rack_per_of_gp_ss; in rack_get_sockopt()
24497 optval = rack->r_ctl.pace_len_divisor; in rack_get_sockopt()
24500 optval = rack->r_ctl.rc_rate_sample_method; in rack_get_sockopt()
24503 optval = tp->t_delayed_ack; in rack_get_sockopt()
24506 optval = rack->rc_allow_data_af_clo; in rack_get_sockopt()
24509 optval = rack->r_limit_scw; in rack_get_sockopt()
24512 if (rack->r_use_hpts_min) in rack_get_sockopt()
24513 optval = rack->r_ctl.max_reduction; in rack_get_sockopt()
24518 optval = rack->rc_gp_no_rec_chg; in rack_get_sockopt()
24521 optval = rack->rc_skip_timely; in rack_get_sockopt()
24524 optval = rack->r_ctl.timer_slop; in rack_get_sockopt()
24532 if ((sopt->sopt_name == TCP_PACING_RATE_CAP) || in rack_get_sockopt()
24533 (sopt->sopt_name == TCP_FILLCW_RATE_CAP)) in rack_get_sockopt()
24544 if (sopt->sopt_dir == SOPT_SET) { in rack_ctloutput()
24546 } else if (sopt->sopt_dir == SOPT_GET) { in rack_ctloutput()
24549 panic("%s: sopt_dir $%d", __func__, sopt->sopt_dir); in rack_ctloutput()
24620 printf("Failed to register rack module -- err:%d\n", err); in tcp_addrack()